aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS9
-rw-r--r--Documentation/DMA-mapping.txt2
-rw-r--r--Documentation/RCU/whatisRCU.txt2
-rw-r--r--Documentation/block/biodoc.txt14
-rw-r--r--Documentation/cachetlb.txt21
-rw-r--r--Documentation/cpu-hotplug.txt4
-rw-r--r--Documentation/cputopology.txt4
-rw-r--r--Documentation/drivers/edac/edac.txt34
-rw-r--r--Documentation/filesystems/00-INDEX54
-rw-r--r--Documentation/ioctl-number.txt2
-rw-r--r--Documentation/m68k/README.buddha2
-rw-r--r--Documentation/networking/ifenslave.c2
-rw-r--r--Documentation/networking/vortex.txt81
-rw-r--r--Documentation/pnp.txt3
-rw-r--r--Documentation/robust-futex-ABI.txt182
-rw-r--r--Documentation/robust-futexes.txt218
-rw-r--r--Documentation/rpc-cache.txt121
-rw-r--r--Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl2
-rw-r--r--MAINTAINERS50
-rw-r--r--arch/alpha/Kconfig8
-rw-r--r--arch/alpha/kernel/osf_sys.c1
-rw-r--r--arch/alpha/kernel/setup.c14
-rw-r--r--arch/alpha/kernel/time.c3
-rw-r--r--arch/alpha/lib/ev6-memchr.S2
-rw-r--r--arch/alpha/lib/fpreg.c8
-rw-r--r--arch/alpha/mm/numa.c4
-rw-r--r--arch/arm/Kconfig7
-rw-r--r--arch/arm/common/rtctime.c108
-rw-r--r--arch/arm/lib/copy_template.S2
-rw-r--r--arch/arm/mach-footbridge/time.c17
-rw-r--r--arch/arm/mach-integrator/time.c16
-rw-r--r--arch/arm/mach-omap1/board-netstar.c2
-rw-r--r--arch/arm/mach-omap1/board-voiceblue.c2
-rw-r--r--arch/arm/mach-pxa/generic.c6
-rw-r--r--arch/arm/mach-sa1100/generic.c6
-rw-r--r--arch/arm26/Kconfig4
-rw-r--r--arch/arm26/kernel/traps.c12
-rw-r--r--arch/arm26/mm/init.c7
-rw-r--r--arch/cris/Kconfig8
-rw-r--r--arch/cris/kernel/setup.c5
-rw-r--r--arch/frv/Kconfig4
-rw-r--r--arch/frv/mm/mmu-context.c6
-rw-r--r--arch/h8300/Kconfig8
-rw-r--r--arch/i386/Kconfig13
-rw-r--r--arch/i386/Makefile9
-rw-r--r--arch/i386/boot/Makefile36
-rw-r--r--arch/i386/boot/video.S2
-rw-r--r--arch/i386/kernel/acpi/boot.c8
-rw-r--r--arch/i386/kernel/cpu/common.c10
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c16
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.h4
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c77
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c13
-rw-r--r--arch/i386/kernel/dmi_scan.c90
-rw-r--r--arch/i386/kernel/efi.c23
-rw-r--r--arch/i386/kernel/io_apic.c4
-rw-r--r--arch/i386/kernel/kprobes.c253
-rw-r--r--arch/i386/kernel/microcode.c24
-rw-r--r--arch/i386/kernel/nmi.c9
-rw-r--r--arch/i386/kernel/process.c8
-rw-r--r--arch/i386/kernel/setup.c9
-rw-r--r--arch/i386/kernel/smpboot.c24
-rw-r--r--arch/i386/kernel/syscall_table.S2
-rw-r--r--arch/i386/kernel/timers/timer_pm.c104
-rw-r--r--arch/i386/kernel/traps.c34
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c2
-rw-r--r--arch/i386/mm/discontig.c12
-rw-r--r--arch/i386/mm/pgtable.c2
-rw-r--r--arch/i386/oprofile/nmi_int.c2
-rw-r--r--arch/ia64/Kconfig8
-rw-r--r--arch/ia64/ia32/ia32priv.h4
-rw-r--r--arch/ia64/ia32/sys_ia32.c75
-rw-r--r--arch/ia64/kernel/Makefile3
-rw-r--r--arch/ia64/kernel/acpi.c6
-rw-r--r--arch/ia64/kernel/efi.c62
-rw-r--r--arch/ia64/kernel/kprobes.c51
-rw-r--r--arch/ia64/kernel/mca.c3
-rw-r--r--arch/ia64/kernel/process.c8
-rw-r--r--arch/ia64/kernel/setup.c10
-rw-r--r--arch/ia64/kernel/traps.c6
-rw-r--r--arch/ia64/lib/Makefile2
-rw-r--r--arch/ia64/lib/bitop.c88
-rw-r--r--arch/ia64/mm/Makefile2
-rw-r--r--arch/ia64/mm/discontig.c33
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/ia64/mm/ioremap.c43
-rw-r--r--arch/ia64/sn/kernel/setup.c5
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_proc_fs.c39
-rw-r--r--arch/m32r/Kconfig8
-rw-r--r--arch/m32r/kernel/setup.c1
-rw-r--r--arch/m32r/mm/discontig.c7
-rw-r--r--arch/m32r/mm/init.c3
-rw-r--r--arch/m68k/Kconfig4
-rw-r--r--arch/m68k/bvme6000/config.c2
-rw-r--r--arch/m68k/mvme16x/rtc.c4
-rw-r--r--arch/m68knommu/Kconfig8
-rw-r--r--arch/mips/Kconfig8
-rw-r--r--arch/mips/ddb5xxx/common/rtc_ds1386.c4
-rw-r--r--arch/mips/dec/time.c51
-rw-r--r--arch/mips/ite-boards/generic/time.c4
-rw-r--r--arch/mips/ite-boards/ivr/init.c3
-rw-r--r--arch/mips/ite-boards/qed-4n-s01b/init.c3
-rw-r--r--arch/mips/jmr3927/common/rtc_ds1742.c4
-rw-r--r--arch/mips/kernel/linux32.c74
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c9
-rw-r--r--arch/mips/kernel/time.c22
-rw-r--r--arch/mips/lasat/setup.c7
-rw-r--r--arch/mips/lasat/sysctl.c63
-rw-r--r--arch/mips/mips-boards/atlas/atlas_setup.c2
-rw-r--r--arch/mips/mips-boards/generic/memory.c7
-rw-r--r--arch/mips/mips-boards/malta/malta_setup.c2
-rw-r--r--arch/mips/mips-boards/sim/sim_mem.c7
-rw-r--r--arch/mips/mm/init.c4
-rw-r--r--arch/mips/momentum/jaguar_atx/setup.c4
-rw-r--r--arch/mips/momentum/ocelot_3/setup.c38
-rw-r--r--arch/mips/momentum/ocelot_c/setup.c4
-rw-r--r--arch/mips/pmc-sierra/yosemite/setup.c4
-rw-r--r--arch/mips/sgi-ip22/ip22-reset.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-time.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c3
-rw-r--r--arch/mips/sgi-ip32/ip32-reset.c2
-rw-r--r--arch/mips/sgi-ip32/ip32-setup.c4
-rw-r--r--arch/mips/sibyte/swarm/setup.c8
-rw-r--r--arch/mips/sni/setup.c4
-rw-r--r--arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c4
-rw-r--r--arch/mips/tx4938/common/rtc_rx5c348.c16
-rw-r--r--arch/parisc/Kconfig8
-rw-r--r--arch/parisc/kernel/pdc_chassis.c3
-rw-r--r--arch/parisc/kernel/sys_parisc32.c58
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/kernel/kprobes.c66
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c73
-rw-r--r--arch/powerpc/kernel/traps.c16
-rw-r--r--arch/powerpc/mm/imalloc.c18
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c22
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/powerpc/platforms/chrp/time.c22
-rw-r--r--arch/powerpc/platforms/maple/time.c24
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_64.c7
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c10
-rw-r--r--arch/ppc/Kconfig4
-rw-r--r--arch/ppc/kernel/ppc_htab.c2
-rw-r--r--arch/ppc/platforms/chrp_time.c42
-rw-r--r--arch/ppc/platforms/prep_setup.c2
-rw-r--r--arch/s390/Kconfig4
-rw-r--r--arch/s390/crypto/crypt_s390_query.c2
-rw-r--r--arch/s390/kernel/compat_linux.c74
-rw-r--r--arch/s390/kernel/compat_wrapper.S8
-rw-r--r--arch/s390/kernel/process.c11
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/sh/Kconfig8
-rw-r--r--arch/sh/boards/mpc1211/rtc.c50
-rw-r--r--arch/sh/boards/sh03/rtc.c13
-rw-r--r--arch/sh/kernel/cpu/rtc.c10
-rw-r--r--arch/sh/kernel/setup.c5
-rw-r--r--arch/sh64/Kconfig8
-rw-r--r--arch/sh64/kernel/setup.c1
-rw-r--r--arch/sh64/kernel/time.c9
-rw-r--r--arch/sparc/Kconfig8
-rw-r--r--arch/sparc64/Kconfig8
-rw-r--r--arch/sparc64/defconfig39
-rw-r--r--arch/sparc64/kernel/kprobes.c69
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c15
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c74
-rw-r--r--arch/sparc64/kernel/systbls.S2
-rw-r--r--arch/sparc64/kernel/time.c16
-rw-r--r--arch/sparc64/kernel/traps.c17
-rw-r--r--arch/sparc64/lib/Makefile2
-rw-r--r--arch/sparc64/lib/find_bit.c127
-rw-r--r--arch/sparc64/mm/fault.c4
-rw-r--r--arch/um/Kconfig.i3865
-rw-r--r--arch/um/Kconfig.x86_645
-rw-r--r--arch/um/drivers/mconsole_kern.c3
-rw-r--r--arch/um/drivers/ubd_kern.c76
-rw-r--r--arch/um/include/irq_user.h15
-rw-r--r--arch/um/include/kern.h2
-rw-r--r--arch/um/include/misc_constants.h6
-rw-r--r--arch/um/include/os.h36
-rw-r--r--arch/um/include/sigio.h3
-rw-r--r--arch/um/include/skas/mode-skas.h1
-rw-r--r--arch/um/include/skas/skas.h1
-rw-r--r--arch/um/include/user_util.h1
-rw-r--r--arch/um/kernel/Makefile9
-rw-r--r--arch/um/kernel/exec_kern.c2
-rw-r--r--arch/um/kernel/irq.c294
-rw-r--r--arch/um/kernel/irq_user.c412
-rw-r--r--arch/um/kernel/physmem.c3
-rw-r--r--arch/um/kernel/sigio_kern.c10
-rw-r--r--arch/um/kernel/smp.c14
-rw-r--r--arch/um/kernel/um_arch.c12
-rw-r--r--arch/um/os-Linux/Makefile13
-rw-r--r--arch/um/os-Linux/irq.c162
-rw-r--r--arch/um/os-Linux/sigio.c (renamed from arch/um/kernel/sigio_user.c)233
-rw-r--r--arch/um/os-Linux/start_up.c128
-rw-r--r--arch/um/os-Linux/tt.c10
-rw-r--r--arch/um/os-Linux/tty_log.c (renamed from arch/um/kernel/tty_log.c)18
-rw-r--r--arch/um/os-Linux/umid.c33
-rw-r--r--arch/um/sys-i386/ptrace.c15
-rw-r--r--arch/um/sys-i386/signal.c59
-rw-r--r--arch/um/sys-i386/user-offsets.c10
-rw-r--r--arch/um/sys-x86_64/ptrace.c22
-rw-r--r--arch/um/sys-x86_64/signal.c56
-rw-r--r--arch/um/sys-x86_64/user-offsets.c6
-rw-r--r--arch/v850/Kconfig6
-rw-r--r--arch/x86_64/Kconfig17
-rw-r--r--arch/x86_64/Makefile17
-rw-r--r--arch/x86_64/boot/Makefile36
-rw-r--r--arch/x86_64/ia32/ia32entry.S4
-rw-r--r--arch/x86_64/ia32/sys_ia32.c77
-rw-r--r--arch/x86_64/kernel/kprobes.c65
-rw-r--r--arch/x86_64/kernel/process.c26
-rw-r--r--arch/x86_64/kernel/setup.c3
-rw-r--r--arch/x86_64/kernel/smpboot.c24
-rw-r--r--arch/x86_64/kernel/time.c41
-rw-r--r--arch/x86_64/kernel/traps.c18
-rw-r--r--arch/x86_64/mm/init.c2
-rw-r--r--arch/x86_64/mm/numa.c15
-rw-r--r--arch/xtensa/Kconfig8
-rw-r--r--arch/xtensa/platform-iss/setup.c2
-rw-r--r--block/Kconfig10
-rw-r--r--block/cfq-iosched.c359
-rw-r--r--block/genhd.c6
-rw-r--r--block/ll_rw_blk.c39
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/osl.c64
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/tables.c3
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/base/memory.c8
-rw-r--r--drivers/block/DAC960.c24
-rw-r--r--drivers/block/Kconfig18
-rw-r--r--drivers/block/acsi_slm.c2
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/cciss.c12
-rw-r--r--drivers/block/cciss_scsi.c3
-rw-r--r--drivers/block/floppy.c32
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/paride/bpck6.c3
-rw-r--r--drivers/block/paride/pd.c3
-rw-r--r--drivers/block/paride/pg.c8
-rw-r--r--drivers/block/paride/pt.c8
-rw-r--r--drivers/block/pktcdvd.c26
-rw-r--r--drivers/block/umem.c5
-rw-r--r--drivers/char/Kconfig3
-rw-r--r--drivers/char/agp/nvidia-agp.c3
-rw-r--r--drivers/char/drm/drm_fops.c2
-rw-r--r--drivers/char/drm/i810_dma.c2
-rw-r--r--drivers/char/drm/i830_dma.c2
-rw-r--r--drivers/char/epca.c3
-rw-r--r--drivers/char/ftape/lowlevel/fdc-io.c2
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c48
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c558
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c1064
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h3
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c8
-rw-r--r--drivers/char/mem.c20
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/mxser.h2
-rw-r--r--drivers/char/synclink.c33
-rw-r--r--drivers/char/synclink_gt.c250
-rw-r--r--drivers/char/tlclk.c1
-rw-r--r--drivers/char/tty_io.c11
-rw-r--r--drivers/cpufreq/cpufreq.c61
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c155
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c11
-rw-r--r--drivers/edac/Kconfig10
-rw-r--r--drivers/edac/amd76x_edac.c126
-rw-r--r--drivers/edac/e752x_edac.c354
-rw-r--r--drivers/edac/e7xxx_edac.c228
-rw-r--r--drivers/edac/edac_mc.c810
-rw-r--r--drivers/edac/edac_mc.h133
-rw-r--r--drivers/edac/i82860_edac.c127
-rw-r--r--drivers/edac/i82875p_edac.c208
-rw-r--r--drivers/edac/r82600_edac.c140
-rw-r--r--drivers/firmware/dcdbas.c19
-rw-r--r--drivers/firmware/efivars.c28
-rw-r--r--drivers/firmware/pcdp.c19
-rw-r--r--drivers/i2c/chips/Kconfig18
-rw-r--r--drivers/i2c/chips/Makefile2
-rw-r--r--drivers/i2c/chips/rtc8564.c385
-rw-r--r--drivers/i2c/chips/rtc8564.h78
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/ide/ide-tape.c5
-rw-r--r--drivers/ide/pci/amd74xx.c8
-rw-r--r--drivers/ide/pci/generic.c3
-rw-r--r--drivers/ide/pci/sis5513.c2
-rw-r--r--drivers/ieee1394/highlevel.c3
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/serio/hp_sdc_mlc.c7
-rw-r--r--drivers/input/touchscreen/ads7846.c2
-rw-r--r--drivers/isdn/Makefile1
-rw-r--r--drivers/isdn/capi/capi.c8
-rw-r--r--drivers/isdn/capi/kcapi_proc.c2
-rw-r--r--drivers/isdn/gigaset/Kconfig42
-rw-r--r--drivers/isdn/gigaset/Makefile6
-rw-r--r--drivers/isdn/gigaset/asyncdata.c597
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c2365
-rw-r--r--drivers/isdn/gigaset/common.c1203
-rw-r--r--drivers/isdn/gigaset/ev-layer.c1983
-rw-r--r--drivers/isdn/gigaset/gigaset.h938
-rw-r--r--drivers/isdn/gigaset/i4l.c567
-rw-r--r--drivers/isdn/gigaset/interface.c718
-rw-r--r--drivers/isdn/gigaset/isocdata.c1009
-rw-r--r--drivers/isdn/gigaset/proc.c81
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c1008
-rw-r--r--drivers/isdn/hardware/avm/avmcard.h4
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c7
-rw-r--r--drivers/isdn/hisax/hisax_isac.c9
-rw-r--r--drivers/isdn/hisax/st5481_b.c4
-rw-r--r--drivers/isdn/hisax/st5481_d.c4
-rw-r--r--drivers/isdn/i4l/Kconfig1
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c6
-rw-r--r--drivers/macintosh/adb.c11
-rw-r--r--drivers/macintosh/adbhid.c3
-rw-r--r--drivers/macintosh/smu.c9
-rw-r--r--drivers/macintosh/via-pmu.c2
-rw-r--r--drivers/macintosh/via-pmu68k.c7
-rw-r--r--drivers/macintosh/windfarm_core.c8
-rw-r--r--drivers/md/Kconfig26
-rw-r--r--drivers/md/bitmap.c14
-rw-r--r--drivers/md/dm-crypt.c31
-rw-r--r--drivers/md/dm-io.c13
-rw-r--r--drivers/md/dm-ioctl.c100
-rw-r--r--drivers/md/dm-linear.c8
-rw-r--r--drivers/md/dm-mpath.c3
-rw-r--r--drivers/md/dm-path-selector.c3
-rw-r--r--drivers/md/dm-raid1.c43
-rw-r--r--drivers/md/dm-snap.c412
-rw-r--r--drivers/md/dm-stripe.c13
-rw-r--r--drivers/md/dm-table.c59
-rw-r--r--drivers/md/dm.c94
-rw-r--r--drivers/md/dm.h23
-rw-r--r--drivers/md/kcopyd.c47
-rw-r--r--drivers/md/md.c235
-rw-r--r--drivers/md/multipath.c17
-rw-r--r--drivers/md/raid1.c27
-rw-r--r--drivers/md/raid5.c719
-rw-r--r--drivers/md/raid6main.c14
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c2
-rw-r--r--drivers/media/video/videodev.c2
-rw-r--r--drivers/message/i2o/i2o_block.c7
-rw-r--r--drivers/message/i2o/i2o_proc.c2
-rw-r--r--drivers/misc/ibmasm/heartbeat.c5
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c2
-rw-r--r--drivers/mtd/chips/Kconfig5
-rw-r--r--drivers/mtd/maps/dilnetpc.c8
-rw-r--r--drivers/mtd/mtd_blkdevs.c3
-rw-r--r--drivers/mtd/mtdconcat.c6
-rw-r--r--drivers/net/3c59x.c245
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/forcedeth.c3
-rw-r--r--drivers/net/ioc3-eth.c7
-rw-r--r--drivers/net/irda/nsc-ircc.c4
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/ns83820.c6
-rw-r--r--drivers/net/sis900.c6
-rw-r--r--drivers/net/tulip/de4x5.c2
-rw-r--r--drivers/net/tulip/pnic2.c2
-rw-r--r--drivers/net/typhoon.c4
-rw-r--r--drivers/net/wan/dscc4.c7
-rw-r--r--drivers/net/wan/wanxl.c4
-rw-r--r--drivers/net/wireless/orinoco.c2
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c3
-rw-r--r--drivers/oprofile/cpu_buffer.c13
-rw-r--r--drivers/oprofile/oprofile_stats.c4
-rw-r--r--drivers/oprofile/oprofilefs.c6
-rw-r--r--drivers/parisc/led.c14
-rw-r--r--drivers/parisc/power.c6
-rw-r--r--drivers/parport/parport_pc.c31
-rw-r--r--drivers/parport/share.c19
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c12
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c19
-rw-r--r--drivers/pnp/card.c16
-rw-r--r--drivers/pnp/driver.c19
-rw-r--r--drivers/pnp/isapnp/core.c7
-rw-r--r--drivers/rtc/Kconfig165
-rw-r--r--drivers/rtc/Makefile21
-rw-r--r--drivers/rtc/class.c145
-rw-r--r--drivers/rtc/hctosys.c69
-rw-r--r--drivers/rtc/interface.c277
-rw-r--r--drivers/rtc/rtc-dev.c382
-rw-r--r--drivers/rtc/rtc-ds1672.c233
-rw-r--r--drivers/rtc/rtc-ep93xx.c162
-rw-r--r--drivers/rtc/rtc-lib.c101
-rw-r--r--drivers/rtc/rtc-m48t86.c209
-rw-r--r--drivers/rtc/rtc-pcf8563.c353
-rw-r--r--drivers/rtc/rtc-proc.c162
-rw-r--r--drivers/rtc/rtc-rs5c372.c294
-rw-r--r--drivers/rtc/rtc-sa1100.c388
-rw-r--r--drivers/rtc/rtc-sysfs.c124
-rw-r--r--drivers/rtc/rtc-test.c204
-rw-r--r--drivers/rtc/rtc-x1205.c (renamed from drivers/i2c/chips/x1205.c)453
-rw-r--r--drivers/s390/block/dasd_devmap.c6
-rw-r--r--drivers/s390/char/raw3270.c39
-rw-r--r--drivers/s390/scsi/zfcp_aux.c60
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/BusLogic.c10
-rw-r--r--drivers/scsi/a100u2w.c3
-rw-r--r--drivers/scsi/aacraid/aachba.c1
-rw-r--r--drivers/scsi/aacraid/linit.c5
-rw-r--r--drivers/scsi/atp870u.c3
-rw-r--r--drivers/scsi/dpt_i2o.c5
-rw-r--r--drivers/scsi/eata.c3
-rw-r--r--drivers/scsi/gdth.c16
-rw-r--r--drivers/scsi/initio.c3
-rw-r--r--drivers/scsi/ips.c5
-rw-r--r--drivers/scsi/iscsi_tcp.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c22
-rw-r--r--drivers/scsi/megaraid.c7
-rw-r--r--drivers/scsi/nsp32.c3
-rw-r--r--drivers/scsi/osst.c5
-rw-r--r--drivers/scsi/ppa.c3
-rw-r--r--drivers/scsi/qla1280.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c3
-rw-r--r--drivers/scsi/qlogicfc.c8
-rw-r--r--drivers/scsi/qlogicpti.c3
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/serial/8250.c2
-rw-r--r--drivers/serial/serial_txx9.c2
-rw-r--r--drivers/serial/sunsu.c2
-rw-r--r--drivers/telephony/phonedev.c23
-rw-r--r--drivers/usb/core/file.c6
-rw-r--r--drivers/usb/core/notify.c65
-rw-r--r--drivers/usb/gadget/inode.c6
-rw-r--r--drivers/usb/host/ohci-s3c2410.c2
-rw-r--r--drivers/usb/net/zaurus.c2
-rw-r--r--drivers/video/Kconfig27
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/acornfb.c8
-rw-r--r--drivers/video/asiliantfb.c14
-rw-r--r--drivers/video/aty/aty128fb.c4
-rw-r--r--drivers/video/aty/atyfb_base.c16
-rw-r--r--drivers/video/aty/mach64_gx.c3
-rw-r--r--drivers/video/aty/radeon_base.c6
-rw-r--r--drivers/video/au1200fb.c3844
-rw-r--r--drivers/video/au1200fb.h572
-rw-r--r--drivers/video/chipsfb.c14
-rw-r--r--drivers/video/console/Kconfig24
-rw-r--r--drivers/video/console/fonts.c2
-rw-r--r--drivers/video/console/newport_con.c4
-rw-r--r--drivers/video/console/vgacon.c271
-rw-r--r--drivers/video/fbcmap.c4
-rw-r--r--drivers/video/fbmem.c31
-rw-r--r--drivers/video/fbmon.c6
-rw-r--r--drivers/video/fbsysfs.c4
-rw-r--r--drivers/video/geode/Kconfig17
-rw-r--r--drivers/video/geode/Makefile4
-rw-r--r--drivers/video/geode/display_gx.c156
-rw-r--r--drivers/video/geode/display_gx.h96
-rw-r--r--drivers/video/geode/gxfb_core.c423
-rw-r--r--drivers/video/geode/video_gx.c262
-rw-r--r--drivers/video/geode/video_gx.h47
-rw-r--r--drivers/video/i810/i810-i2c.c3
-rw-r--r--drivers/video/imsttfb.c32
-rw-r--r--drivers/video/macmodes.c2
-rw-r--r--drivers/video/matrox/matroxfb_g450.c2
-rw-r--r--drivers/video/matrox/matroxfb_maven.c78
-rw-r--r--drivers/video/modedb.c6
-rw-r--r--drivers/video/neofb.c8
-rw-r--r--drivers/video/nvidia/nv_accel.c12
-rw-r--r--drivers/video/nvidia/nv_i2c.c3
-rw-r--r--drivers/video/nvidia/nv_type.h1
-rw-r--r--drivers/video/nvidia/nvidia.c119
-rw-r--r--drivers/video/pmagb-b-fb.c2
-rw-r--r--drivers/video/radeonfb.c2
-rw-r--r--drivers/video/riva/fbdev.c2
-rw-r--r--drivers/video/savage/savagefb-i2c.c3
-rw-r--r--drivers/video/sis/init301.c11
-rw-r--r--drivers/video/sstfb.c11
-rw-r--r--drivers/video/virgefb.c3
-rw-r--r--fs/9p/v9fs_vfs.h4
-rw-r--r--fs/9p/vfs_dir.c2
-rw-r--r--fs/9p/vfs_file.c2
-rw-r--r--fs/adfs/adfs.h4
-rw-r--r--fs/adfs/dir.c2
-rw-r--r--fs/adfs/file.c2
-rw-r--r--fs/affs/affs.h6
-rw-r--r--fs/affs/dir.c2
-rw-r--r--fs/affs/file.c2
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/afs/file.c6
-rw-r--r--fs/afs/internal.h4
-rw-r--r--fs/afs/mntpt.c2
-rw-r--r--fs/afs/proc.c10
-rw-r--r--fs/autofs/autofs_i.h2
-rw-r--r--fs/autofs/dirhash.c2
-rw-r--r--fs/autofs/root.c2
-rw-r--r--fs/autofs4/autofs_i.h54
-rw-r--r--fs/autofs4/expire.c292
-rw-r--r--fs/autofs4/inode.c103
-rw-r--r--fs/autofs4/root.c328
-rw-r--r--fs/autofs4/waitq.c111
-rw-r--r--fs/bad_inode.c2
-rw-r--r--fs/befs/linuxvfs.c2
-rw-r--r--fs/bfs/bfs.h4
-rw-r--r--fs/bfs/dir.c2
-rw-r--r--fs/bfs/file.c2
-rw-r--r--fs/binfmt_misc.c6
-rw-r--r--fs/bio.c25
-rw-r--r--fs/block_dev.c306
-rw-r--r--fs/buffer.c47
-rw-r--r--fs/char_dev.c6
-rw-r--r--fs/cifs/cifsfs.c28
-rw-r--r--fs/cifs/cifsfs.h10
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/cifs/inode.c6
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/coda/dir.c2
-rw-r--r--fs/coda/file.c2
-rw-r--r--fs/coda/pioctl.c2
-rw-r--r--fs/coda/psdev.c2
-rw-r--r--fs/compat.c13
-rw-r--r--fs/configfs/configfs_internal.h6
-rw-r--r--fs/configfs/dir.c2
-rw-r--r--fs/configfs/file.c2
-rw-r--r--fs/cramfs/inode.c35
-rw-r--r--fs/dcache.c25
-rw-r--r--fs/dcookies.c25
-rw-r--r--fs/debugfs/file.c4
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/devfs/base.c12
-rw-r--r--fs/direct-io.c27
-rw-r--r--fs/dnotify.c4
-rw-r--r--fs/efs/dir.c2
-rw-r--r--fs/eventpoll.c8
-rw-r--r--fs/exec.c2
-rw-r--r--fs/ext2/dir.c8
-rw-r--r--fs/ext2/ext2.h6
-rw-r--r--fs/ext2/file.c4
-rw-r--r--fs/ext2/inode.c14
-rw-r--r--fs/ext3/balloc.c109
-rw-r--r--fs/ext3/dir.c7
-rw-r--r--fs/ext3/file.c2
-rw-r--r--fs/ext3/inode.c582
-rw-r--r--fs/ext3/super.c6
-rw-r--r--fs/fat/dir.c2
-rw-r--r--fs/fat/file.c2
-rw-r--r--fs/fat/inode.c5
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/fifo.c2
-rw-r--r--fs/file.c2
-rw-r--r--fs/freevxfs/vxfs_extern.h2
-rw-r--r--fs/freevxfs/vxfs_lookup.c2
-rw-r--r--fs/fuse/dev.c2
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/fuse/file.c6
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/hfs/bnode.c9
-rw-r--r--fs/hfs/btree.c3
-rw-r--r--fs/hfs/dir.c2
-rw-r--r--fs/hfs/hfs_fs.h2
-rw-r--r--fs/hfs/inode.c17
-rw-r--r--fs/hfsplus/dir.c2
-rw-r--r--fs/hfsplus/inode.c15
-rw-r--r--fs/hostfs/hostfs_kern.c4
-rw-r--r--fs/hostfs/hostfs_user.c1
-rw-r--r--fs/hpfs/dir.c2
-rw-r--r--fs/hpfs/file.c2
-rw-r--r--fs/hpfs/hpfs_fn.h4
-rw-r--r--fs/hppfs/hppfs_kern.c4
-rw-r--r--fs/hugetlbfs/inode.c4
-rw-r--r--fs/inode.c10
-rw-r--r--fs/inotify.c14
-rw-r--r--fs/isofs/dir.c2
-rw-r--r--fs/isofs/isofs.h2
-rw-r--r--fs/jbd/transaction.c13
-rw-r--r--fs/jffs/inode-v23.c8
-rw-r--r--fs/jffs2/compr_zlib.c19
-rw-r--r--fs/jffs2/dir.c2
-rw-r--r--fs/jffs2/file.c2
-rw-r--r--fs/jffs2/os-linux.h4
-rw-r--r--fs/jfs/file.c2
-rw-r--r--fs/jfs/inode.c5
-rw-r--r--fs/jfs/jfs_inode.h4
-rw-r--r--fs/jfs/jfs_logmgr.c27
-rw-r--r--fs/jfs/jfs_metapage.c11
-rw-r--r--fs/jfs/namei.c2
-rw-r--r--fs/libfs.c2
-rw-r--r--fs/lockd/host.c19
-rw-r--r--fs/lockd/svc.c17
-rw-r--r--fs/lockd/svcsubs.c17
-rw-r--r--fs/locks.c41
-rw-r--r--fs/mbcache.c2
-rw-r--r--fs/minix/dir.c2
-rw-r--r--fs/minix/file.c2
-rw-r--r--fs/minix/minix.h4
-rw-r--r--fs/mpage.c104
-rw-r--r--fs/namei.c39
-rw-r--r--fs/namespace.c12
-rw-r--r--fs/ncpfs/dir.c2
-rw-r--r--fs/ncpfs/file.c2
-rw-r--r--fs/nfs/callback.c11
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/file.c5
-rw-r--r--fs/nfs/read.c6
-rw-r--r--fs/nfs/write.c12
-rw-r--r--fs/nfsd/export.c368
-rw-r--r--fs/nfsd/nfs4idmap.c146
-rw-r--r--fs/nfsd/nfs4state.c47
-rw-r--r--fs/nfsd/nfsctl.c4
-rw-r--r--fs/nfsd/nfsfh.c2
-rw-r--r--fs/nfsd/stats.c2
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/ntfs/dir.c2
-rw-r--r--fs/ntfs/file.c4
-rw-r--r--fs/ntfs/logfile.c4
-rw-r--r--fs/ntfs/mft.c2
-rw-r--r--fs/ntfs/ntfs.h35
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/file.c4
-rw-r--r--fs/ocfs2/file.h4
-rw-r--r--fs/ocfs2/journal.c10
-rw-r--r--fs/ocfs2/namei.c5
-rw-r--r--fs/openpromfs/inode.c6
-rw-r--r--fs/partitions/check.c27
-rw-r--r--fs/partitions/devfs.c12
-rw-r--r--fs/pipe.c8
-rw-r--r--fs/proc/array.c5
-rw-r--r--fs/proc/generic.c34
-rw-r--r--fs/proc/internal.h2
-rw-r--r--fs/proc/kcore.c2
-rw-r--r--fs/proc/kmsg.c2
-rw-r--r--fs/proc/proc_devtree.c2
-rw-r--r--fs/proc/proc_misc.c4
-rw-r--r--fs/proc/vmcore.c2
-rw-r--r--fs/qnx4/dir.c2
-rw-r--r--fs/qnx4/file.c2
-rw-r--r--fs/ramfs/file-mmu.c2
-rw-r--r--fs/ramfs/file-nommu.c2
-rw-r--r--fs/ramfs/internal.h2
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/reiserfs/dir.c2
-rw-r--r--fs/reiserfs/file.c2
-rw-r--r--fs/reiserfs/inode.c9
-rw-r--r--fs/reiserfs/prints.c2
-rw-r--r--fs/reiserfs/procfs.c2
-rw-r--r--fs/romfs/inode.c2
-rw-r--r--fs/select.c118
-rw-r--r--fs/smbfs/dir.c2
-rw-r--r--fs/smbfs/file.c2
-rw-r--r--fs/smbfs/proto.h4
-rw-r--r--fs/super.c7
-rw-r--r--fs/sysfs/bin.c2
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/sysfs/file.c2
-rw-r--r--fs/sysfs/sysfs.h6
-rw-r--r--fs/sysv/dir.c2
-rw-r--r--fs/sysv/file.c2
-rw-r--r--fs/sysv/sysv.h4
-rw-r--r--fs/udf/dir.c2
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/udf/udfdecl.h4
-rw-r--r--fs/ufs/dir.c2
-rw-r--r--fs/ufs/file.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c15
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c6
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.h6
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c5
-rw-r--r--include/asm-alpha/bitops.h133
-rw-r--r--include/asm-alpha/fpu.h4
-rw-r--r--include/asm-alpha/mmzone.h19
-rw-r--r--include/asm-alpha/page.h4
-rw-r--r--include/asm-alpha/poll.h4
-rw-r--r--include/asm-arm/bitops.h175
-rw-r--r--include/asm-arm/memory.h15
-rw-r--r--include/asm-arm/rtc.h3
-rw-r--r--include/asm-arm26/bitops.h146
-rw-r--r--include/asm-arm26/memory.h4
-rw-r--r--include/asm-cris/bitops.h235
-rw-r--r--include/asm-cris/page.h4
-rw-r--r--include/asm-frv/bitops.h174
-rw-r--r--include/asm-frv/futex.h6
-rw-r--r--include/asm-frv/page.h7
-rw-r--r--include/asm-generic/bitops.h76
-rw-r--r--include/asm-generic/bitops/__ffs.h43
-rw-r--r--include/asm-generic/bitops/atomic.h191
-rw-r--r--include/asm-generic/bitops/ext2-atomic.h22
-rw-r--r--include/asm-generic/bitops/ext2-non-atomic.h18
-rw-r--r--include/asm-generic/bitops/ffs.h41
-rw-r--r--include/asm-generic/bitops/ffz.h12
-rw-r--r--include/asm-generic/bitops/find.h13
-rw-r--r--include/asm-generic/bitops/fls.h41
-rw-r--r--include/asm-generic/bitops/fls64.h14
-rw-r--r--include/asm-generic/bitops/hweight.h11
-rw-r--r--include/asm-generic/bitops/le.h53
-rw-r--r--include/asm-generic/bitops/minix-le.h17
-rw-r--r--include/asm-generic/bitops/minix.h15
-rw-r--r--include/asm-generic/bitops/non-atomic.h111
-rw-r--r--include/asm-generic/bitops/sched.h36
-rw-r--r--include/asm-generic/futex.h6
-rw-r--r--include/asm-generic/local.h80
-rw-r--r--include/asm-generic/memory_model.h77
-rw-r--r--include/asm-generic/percpu.h2
-rw-r--r--include/asm-h8300/bitops.h222
-rw-r--r--include/asm-h8300/page.h4
-rw-r--r--include/asm-h8300/types.h3
-rw-r--r--include/asm-i386/bitops.h55
-rw-r--r--include/asm-i386/futex.h27
-rw-r--r--include/asm-i386/kdebug.h10
-rw-r--r--include/asm-i386/kprobes.h6
-rw-r--r--include/asm-i386/mach-default/mach_time.h37
-rw-r--r--include/asm-i386/mmzone.h17
-rw-r--r--include/asm-i386/page.h3
-rw-r--r--include/asm-i386/processor.h7
-rw-r--r--include/asm-i386/setup.h4
-rw-r--r--include/asm-i386/stat.h3
-rw-r--r--include/asm-i386/topology.h2
-rw-r--r--include/asm-i386/types.h5
-rw-r--r--include/asm-i386/unistd.h4
-rw-r--r--include/asm-ia64/bitops.h67
-rw-r--r--include/asm-ia64/compat.h6
-rw-r--r--include/asm-ia64/dmi.h6
-rw-r--r--include/asm-ia64/io.h22
-rw-r--r--include/asm-ia64/kdebug.h4
-rw-r--r--include/asm-ia64/page.h18
-rw-r--r--include/asm-ia64/sn/sn_sal.h2
-rw-r--r--include/asm-m32r/bitops.h457
-rw-r--r--include/asm-m32r/mmzone.h14
-rw-r--r--include/asm-m32r/page.h5
-rw-r--r--include/asm-m32r/setup.h4
-rw-r--r--include/asm-m68k/bitops.h42
-rw-r--r--include/asm-m68k/stat.h3
-rw-r--r--include/asm-m68knommu/bitops.h221
-rw-r--r--include/asm-mips/bitops.h465
-rw-r--r--include/asm-mips/compat.h5
-rw-r--r--include/asm-mips/futex.h6
-rw-r--r--include/asm-mips/mc146818-time.h33
-rw-r--r--include/asm-mips/mmzone.h14
-rw-r--r--include/asm-mips/page.h3
-rw-r--r--include/asm-mips/time.h12
-rw-r--r--include/asm-mips/types.h5
-rw-r--r--include/asm-parisc/bitops.h286
-rw-r--r--include/asm-parisc/compat.h5
-rw-r--r--include/asm-parisc/mmzone.h17
-rw-r--r--include/asm-parisc/page.h3
-rw-r--r--include/asm-parisc/pdc.h2
-rw-r--r--include/asm-powerpc/bitops.h105
-rw-r--r--include/asm-powerpc/futex.h6
-rw-r--r--include/asm-powerpc/kdebug.h12
-rw-r--r--include/asm-powerpc/page.h3
-rw-r--r--include/asm-powerpc/types.h5
-rw-r--r--include/asm-ppc/page.h4
-rw-r--r--include/asm-s390/bitops.h48
-rw-r--r--include/asm-s390/compat.h5
-rw-r--r--include/asm-s390/page.h3
-rw-r--r--include/asm-s390/types.h5
-rw-r--r--include/asm-sh/addrspace.h2
-rw-r--r--include/asm-sh/bitops.h348
-rw-r--r--include/asm-sh/page.h5
-rw-r--r--include/asm-sh/stat.h8
-rw-r--r--include/asm-sh/thread_info.h2
-rw-r--r--include/asm-sh/types.h5
-rw-r--r--include/asm-sh64/bitops.h390
-rw-r--r--include/asm-sh64/page.h5
-rw-r--r--include/asm-sh64/platform.h5
-rw-r--r--include/asm-sparc/bitops.h388
-rw-r--r--include/asm-sparc/page.h4
-rw-r--r--include/asm-sparc64/bitops.h219
-rw-r--r--include/asm-sparc64/floppy.h2
-rw-r--r--include/asm-sparc64/futex.h24
-rw-r--r--include/asm-sparc64/kdebug.h11
-rw-r--r--include/asm-sparc64/page.h2
-rw-r--r--include/asm-um/page.h4
-rw-r--r--include/asm-um/uaccess.h31
-rw-r--r--include/asm-v850/bitops.h220
-rw-r--r--include/asm-v850/page.h4
-rw-r--r--include/asm-x86_64/bitops.h42
-rw-r--r--include/asm-x86_64/futex.h27
-rw-r--r--include/asm-x86_64/kdebug.h23
-rw-r--r--include/asm-x86_64/mmzone.h4
-rw-r--r--include/asm-x86_64/page.h3
-rw-r--r--include/asm-x86_64/processor.h7
-rw-r--r--include/asm-x86_64/smp.h1
-rw-r--r--include/asm-x86_64/topology.h2
-rw-r--r--include/asm-x86_64/unistd.h6
-rw-r--r--include/asm-xtensa/bitops.h340
-rw-r--r--include/asm-xtensa/page.h6
-rw-r--r--include/linux/adb.h2
-rw-r--r--include/linux/auto_fs4.h51
-rw-r--r--include/linux/bitops.h124
-rw-r--r--include/linux/blkdev.h22
-rw-r--r--include/linux/bootmem.h1
-rw-r--r--include/linux/buffer_head.h26
-rw-r--r--include/linux/cdev.h4
-rw-r--r--include/linux/coda_linux.h6
-rw-r--r--include/linux/compat.h46
-rw-r--r--include/linux/compat_ioctl.h2
-rw-r--r--include/linux/cpumask.h5
-rw-r--r--include/linux/crash_dump.h2
-rw-r--r--include/linux/debugfs.h2
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/dm-ioctl.h17
-rw-r--r--include/linux/dma-mapping.h1
-rw-r--r--include/linux/efi.h20
-rw-r--r--include/linux/efs_fs.h2
-rw-r--r--include/linux/ext3_fs.h15
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/fs.h60
-rw-r--r--include/linux/futex.h89
-rw-r--r--include/linux/gameport.h4
-rw-r--r--include/linux/genhd.h19
-rw-r--r--include/linux/gigaset_dev.h32
-rw-r--r--include/linux/highmem.h12
-rw-r--r--include/linux/hpet.h36
-rw-r--r--include/linux/hrtimer.h41
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/i2c-id.h4
-rw-r--r--include/linux/i2o.h4
-rw-r--r--include/linux/input.h2
-rw-r--r--include/linux/ipmi.h3
-rw-r--r--include/linux/ipmi_msgdefs.h1
-rw-r--r--include/linux/ipmi_smi.h47
-rw-r--r--include/linux/jbd.h2
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kernel_stat.h2
-rw-r--r--include/linux/ktime.h20
-rw-r--r--include/linux/m48t86.h16
-rw-r--r--include/linux/memory.h1
-rw-r--r--include/linux/mempool.h38
-rw-r--r--include/linux/miscdevice.h2
-rw-r--r--include/linux/mmzone.h91
-rw-r--r--include/linux/msdos_fs.h7
-rw-r--r--include/linux/ncp_fs.h4
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack.h17
-rw-r--r--include/linux/nfs_fs.h4
-rw-r--r--include/linux/nfsd/export.h29
-rw-r--r--include/linux/nodemask.h4
-rw-r--r--include/linux/notifier.h96
-rw-r--r--include/linux/oprofile.h14
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/pfn.h9
-rw-r--r--include/linux/poll.h17
-rw-r--r--include/linux/proc_fs.h15
-rw-r--r--include/linux/qnx4_fs.h4
-rw-r--r--include/linux/raid/md.h3
-rw-r--r--include/linux/raid/md_k.h21
-rw-r--r--include/linux/raid/md_p.h32
-rw-r--r--include/linux/raid/raid5.h25
-rw-r--r--include/linux/ramfs.h2
-rw-r--r--include/linux/reiserfs_fs.h4
-rw-r--r--include/linux/relayfs_fs.h287
-rw-r--r--include/linux/rtc.h92
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/serio.h6
-rw-r--r--include/linux/smp.h6
-rw-r--r--include/linux/sound.h12
-rw-r--r--include/linux/stat.h2
-rw-r--r--include/linux/statfs.h10
-rw-r--r--include/linux/sunrpc/cache.h145
-rw-r--r--include/linux/sunrpc/stats.h4
-rw-r--r--include/linux/sunrpc/svcauth.h12
-rw-r--r--include/linux/synclink.h11
-rw-r--r--include/linux/threads.h3
-rw-r--r--include/linux/time.h18
-rw-r--r--include/linux/timer.h3
-rw-r--r--include/linux/timex.h2
-rw-r--r--include/linux/topology.h9
-rw-r--r--include/linux/types.h4
-rw-r--r--include/linux/ufs_fs.h4
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/videodev2.h2
-rw-r--r--include/linux/x1205.h31
-rw-r--r--include/net/netfilter/nf_conntrack.h17
-rw-r--r--include/net/request_sock.h2
-rw-r--r--include/sound/core.h6
-rw-r--r--init/do_mounts.c1
-rw-r--r--init/initramfs.c2
-rw-r--r--init/main.c21
-rw-r--r--ipc/compat.c2
-rw-r--r--ipc/mqueue.c4
-rw-r--r--ipc/msg.c18
-rw-r--r--ipc/sem.c40
-rw-r--r--ipc/shm.c30
-rw-r--r--ipc/util.c29
-rw-r--r--ipc/util.h4
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/compat.c82
-rw-r--r--kernel/cpu.c29
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/fork.c10
-rw-r--r--kernel/futex.c170
-rw-r--r--kernel/futex_compat.c142
-rw-r--r--kernel/hrtimer.c193
-rw-r--r--kernel/irq/manage.c1
-rw-r--r--kernel/itimer.c14
-rw-r--r--kernel/kprobes.c10
-rw-r--r--kernel/module.c22
-rw-r--r--kernel/panic.c4
-rw-r--r--kernel/params.c2
-rw-r--r--kernel/posix-timers.c67
-rw-r--r--kernel/power/swap.c7
-rw-r--r--kernel/profile.c53
-rw-r--r--kernel/rcutorture.c4
-rw-r--r--kernel/sched.c163
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/sys.c327
-rw-r--r--kernel/sys_ni.c4
-rw-r--r--kernel/time.c4
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Makefile1
-rw-r--r--lib/bitmap.c19
-rw-r--r--lib/find_next_bit.c177
-rw-r--r--lib/hweight.c53
-rw-r--r--mm/Makefile2
-rw-r--r--mm/bootmem.c39
-rw-r--r--mm/highmem.c23
-rw-r--r--mm/memory.c8
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/mempool.c46
-rw-r--r--mm/mmzone.c50
-rw-r--r--mm/page_alloc.c56
-rw-r--r--mm/slab.c4
-rw-r--r--mm/swap.c2
-rw-r--r--mm/vmscan.c6
-rw-r--r--net/bluetooth/hci_core.c8
-rw-r--r--net/core/dev.c42
-rw-r--r--net/core/request_sock.c2
-rw-r--r--net/decnet/dn_dev.c10
-rw-r--r--net/ipv4/devinet.c16
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c11
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c6
-rw-r--r--net/ipv4/netfilter/ipt_hashlimit.c9
-rw-r--r--net/ipv6/addrconf.c10
-rw-r--r--net/ipv6/ipcomp6.c5
-rw-r--r--net/irda/af_irda.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c6
-rw-r--r--net/netlink/af_netlink.c9
-rw-r--r--net/netlink/genetlink.c9
-rw-r--r--net/nonet.c2
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c182
-rw-r--r--net/sunrpc/cache.c163
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/sunrpc/sched.c12
-rw-r--r--net/sunrpc/stats.c4
-rw-r--r--net/sunrpc/sunrpc_syms.c6
-rw-r--r--net/sunrpc/svcauth.c122
-rw-r--r--net/sunrpc/svcauth_unix.c229
-rw-r--r--sound/core/init.c3
-rw-r--r--sound/core/rawmidi.c6
-rw-r--r--sound/core/sound.c4
-rw-r--r--sound/core/sound_oss.c2
-rw-r--r--sound/drivers/mpu401/mpu401.c16
-rw-r--r--sound/isa/ad1816a/ad1816a.c12
-rw-r--r--sound/isa/als100.c14
-rw-r--r--sound/isa/azt2320.c12
-rw-r--r--sound/isa/cmi8330.c15
-rw-r--r--sound/isa/cs423x/cs4236.c25
-rw-r--r--sound/isa/dt019x.c12
-rw-r--r--sound/isa/es18xx.c13
-rw-r--r--sound/isa/gus/interwave.c12
-rw-r--r--sound/isa/opl3sa2.c17
-rw-r--r--sound/isa/sb/es968.c12
-rw-r--r--sound/isa/sb/sb16.c14
-rw-r--r--sound/isa/sscape.c4
-rw-r--r--sound/isa/wavefront/wavefront.c10
-rw-r--r--sound/oss/cmpci.c2
-rw-r--r--sound/oss/cs4232.c15
-rw-r--r--sound/oss/dmasound/dmasound_awacs.c2
-rw-r--r--sound/oss/emu10k1/midi.c9
-rw-r--r--sound/oss/esssolo1.c2
-rw-r--r--sound/oss/maestro3.c10
-rw-r--r--sound/oss/msnd.c6
-rw-r--r--sound/oss/sb_card.c35
-rw-r--r--sound/oss/sequencer.c13
-rw-r--r--sound/oss/sh_dac_audio.c2
-rw-r--r--sound/oss/sonicvibes.c21
-rw-r--r--sound/oss/vwsnd.c40
-rw-r--r--sound/pci/ad1889.c1
-rw-r--r--sound/pci/ali5451/ali5451.c5
-rw-r--r--sound/pci/als4000.c5
-rw-r--r--sound/pci/azt3328.c5
-rw-r--r--sound/pci/emu10k1/emu10k1x.c1
-rw-r--r--sound/pci/es1938.c5
-rw-r--r--sound/pci/es1968.c1
-rw-r--r--sound/pci/ice1712/ice1712.c2
-rw-r--r--sound/pci/maestro3.c1
-rw-r--r--sound/pci/mixart/mixart.c2
-rw-r--r--sound/pci/pcxhr/pcxhr.c1
-rw-r--r--sound/pci/rme32.c8
-rw-r--r--sound/pci/rme96.c8
-rw-r--r--sound/pci/rme9652/hdspm.c2
-rw-r--r--sound/pci/sonicvibes.c5
-rw-r--r--sound/pci/trident/trident_main.c5
-rw-r--r--sound/sound_core.c22
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c2
996 files changed, 34691 insertions, 15187 deletions
diff --git a/CREDITS b/CREDITS
index c6d69bf10e15..0bf31eac6dc2 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1127,8 +1127,10 @@ S: Carnegie, Pennsylvania 15106-4304
1127S: USA 1127S: USA
1128 1128
1129N: Philip Gladstone 1129N: Philip Gladstone
1130E: philip@raptor.com 1130E: philip@gladstonefamily.net
1131D: Kernel / timekeeping stuff 1131D: Kernel / timekeeping stuff
1132S: Carlisle, MA 01741
1133S: USA
1132 1134
1133N: Jan-Benedict Glaw 1135N: Jan-Benedict Glaw
1134E: jbglaw@lug-owl.de 1136E: jbglaw@lug-owl.de
@@ -3741,10 +3743,11 @@ D: Mylex DAC960 PCI RAID driver
3741D: Miscellaneous kernel fixes 3743D: Miscellaneous kernel fixes
3742 3744
3743N: Alessandro Zummo 3745N: Alessandro Zummo
3744E: azummo@ita.flashnet.it 3746E: a.zummo@towertech.it
3745W: http://freepage.logicom.it/azummo/
3746D: CMI8330 support is sb_card.c 3747D: CMI8330 support is sb_card.c
3747D: ISAPnP fixes in sb_card.c 3748D: ISAPnP fixes in sb_card.c
3749D: ZyXEL omni.net lcd plus driver
3750D: RTC subsystem
3748S: Italy 3751S: Italy
3749 3752
3750N: Marc Zyngier 3753N: Marc Zyngier
diff --git a/Documentation/DMA-mapping.txt b/Documentation/DMA-mapping.txt
index 684557474c15..ee4bb73683cd 100644
--- a/Documentation/DMA-mapping.txt
+++ b/Documentation/DMA-mapping.txt
@@ -199,6 +199,8 @@ address during PCI bus mastering you might do something like:
199 "mydev: 24-bit DMA addressing not available.\n"); 199 "mydev: 24-bit DMA addressing not available.\n");
200 goto ignore_this_device; 200 goto ignore_this_device;
201 } 201 }
202[Better use DMA_24BIT_MASK instead of 0x00ffffff.
203See linux/include/dma-mapping.h for reference.]
202 204
203When pci_set_dma_mask() is successful, and returns zero, the PCI layer 205When pci_set_dma_mask() is successful, and returns zero, the PCI layer
204saves away this mask you have provided. The PCI layer will use this 206saves away this mask you have provided. The PCI layer will use this
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index b4ea51ad3610..07cb93b82ba9 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -605,7 +605,7 @@ are the same as those shown in the preceding section, so they are omitted.
605 { 605 {
606 int cpu; 606 int cpu;
607 607
608 for_each_cpu(cpu) 608 for_each_possible_cpu(cpu)
609 run_on(cpu); 609 run_on(cpu);
610 } 610 }
611 611
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 8e63831971d5..f989a9e839b4 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -132,8 +132,18 @@ Some new queue property settings:
132 limit. No highmem default. 132 limit. No highmem default.
133 133
134 blk_queue_max_sectors(q, max_sectors) 134 blk_queue_max_sectors(q, max_sectors)
135 Maximum size request you can handle in units of 512 byte 135 Sets two variables that limit the size of the request.
136 sectors. 255 default. 136
137 - The request queue's max_sectors, which is a soft size in
138 in units of 512 byte sectors, and could be dynamically varied
139 by the core kernel.
140
141 - The request queue's max_hw_sectors, which is a hard limit
142 and reflects the maximum size request a driver can handle
143 in units of 512 byte sectors.
144
145 The default for both max_sectors and max_hw_sectors is
146 255. The upper limit of max_sectors is 1024.
137 147
138 blk_queue_max_phys_segments(q, max_segments) 148 blk_queue_max_phys_segments(q, max_segments)
139 Maximum physical segments you can handle in a request. 128 149 Maximum physical segments you can handle in a request. 128
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index 4ae418889b88..53245c429f7d 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -362,6 +362,27 @@ maps this page at its virtual address.
362 likely that you will need to flush the instruction cache 362 likely that you will need to flush the instruction cache
363 for copy_to_user_page(). 363 for copy_to_user_page().
364 364
365 void flush_anon_page(struct page *page, unsigned long vmaddr)
366 When the kernel needs to access the contents of an anonymous
367 page, it calls this function (currently only
368 get_user_pages()). Note: flush_dcache_page() deliberately
369 doesn't work for an anonymous page. The default
370 implementation is a nop (and should remain so for all coherent
371 architectures). For incoherent architectures, it should flush
372 the cache of the page at vmaddr in the current user process.
373
374 void flush_kernel_dcache_page(struct page *page)
375 When the kernel needs to modify a user page is has obtained
376 with kmap, it calls this function after all modifications are
377 complete (but before kunmapping it) to bring the underlying
378 page up to date. It is assumed here that the user has no
379 incoherent cached copies (i.e. the original page was obtained
380 from a mechanism like get_user_pages()). The default
381 implementation is a nop and should remain so on all coherent
382 architectures. On incoherent architectures, this should flush
383 the kernel cache for page (using page_address(page)).
384
385
365 void flush_icache_range(unsigned long start, unsigned long end) 386 void flush_icache_range(unsigned long start, unsigned long end)
366 When the kernel stores into addresses that it will execute 387 When the kernel stores into addresses that it will execute
367 out of (eg when loading modules), this function is called. 388 out of (eg when loading modules), this function is called.
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index 57a09f99ecb0..1bcf69996c9d 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -97,13 +97,13 @@ at which time hotplug is disabled.
97 97
98You really dont need to manipulate any of the system cpu maps. They should 98You really dont need to manipulate any of the system cpu maps. They should
99be read-only for most use. When setting up per-cpu resources almost always use 99be read-only for most use. When setting up per-cpu resources almost always use
100cpu_possible_map/for_each_cpu() to iterate. 100cpu_possible_map/for_each_possible_cpu() to iterate.
101 101
102Never use anything other than cpumask_t to represent bitmap of CPUs. 102Never use anything other than cpumask_t to represent bitmap of CPUs.
103 103
104#include <linux/cpumask.h> 104#include <linux/cpumask.h>
105 105
106for_each_cpu - Iterate over cpu_possible_map 106for_each_possible_cpu - Iterate over cpu_possible_map
107for_each_online_cpu - Iterate over cpu_online_map 107for_each_online_cpu - Iterate over cpu_online_map
108for_each_present_cpu - Iterate over cpu_present_map 108for_each_present_cpu - Iterate over cpu_present_map
109for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask. 109for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask.
diff --git a/Documentation/cputopology.txt b/Documentation/cputopology.txt
index ff280e2e1613..2b28e9ec4e3a 100644
--- a/Documentation/cputopology.txt
+++ b/Documentation/cputopology.txt
@@ -1,5 +1,5 @@
1 1
2Export cpu topology info by sysfs. Items (attributes) are similar 2Export cpu topology info via sysfs. Items (attributes) are similar
3to /proc/cpuinfo. 3to /proc/cpuinfo.
4 4
51) /sys/devices/system/cpu/cpuX/topology/physical_package_id: 51) /sys/devices/system/cpu/cpuX/topology/physical_package_id:
@@ -12,7 +12,7 @@ represent the thread siblings to cpu X in the same core;
12represent the thread siblings to cpu X in the same physical package; 12represent the thread siblings to cpu X in the same physical package;
13 13
14To implement it in an architecture-neutral way, a new source file, 14To implement it in an architecture-neutral way, a new source file,
15driver/base/topology.c, is to export the 5 attributes. 15drivers/base/topology.c, is to export the 4 attributes.
16 16
17If one architecture wants to support this feature, it just needs to 17If one architecture wants to support this feature, it just needs to
18implement 4 defines, typically in file include/asm-XXX/topology.h. 18implement 4 defines, typically in file include/asm-XXX/topology.h.
diff --git a/Documentation/drivers/edac/edac.txt b/Documentation/drivers/edac/edac.txt
index d37191fe5681..70d96a62e5e1 100644
--- a/Documentation/drivers/edac/edac.txt
+++ b/Documentation/drivers/edac/edac.txt
@@ -21,7 +21,7 @@ within the computer system. In the initial release, memory Correctable Errors
21 21
22Detecting CE events, then harvesting those events and reporting them, 22Detecting CE events, then harvesting those events and reporting them,
23CAN be a predictor of future UE events. With CE events, the system can 23CAN be a predictor of future UE events. With CE events, the system can
24continue to operate, but with less safety. Preventive maintainence and 24continue to operate, but with less safety. Preventive maintenance and
25proactive part replacement of memory DIMMs exhibiting CEs can reduce 25proactive part replacement of memory DIMMs exhibiting CEs can reduce
26the likelihood of the dreaded UE events and system 'panics'. 26the likelihood of the dreaded UE events and system 'panics'.
27 27
@@ -29,13 +29,13 @@ the likelihood of the dreaded UE events and system 'panics'.
29In addition, PCI Bus Parity and SERR Errors are scanned for on PCI devices 29In addition, PCI Bus Parity and SERR Errors are scanned for on PCI devices
30in order to determine if errors are occurring on data transfers. 30in order to determine if errors are occurring on data transfers.
31The presence of PCI Parity errors must be examined with a grain of salt. 31The presence of PCI Parity errors must be examined with a grain of salt.
32There are several addin adapters that do NOT follow the PCI specification 32There are several add-in adapters that do NOT follow the PCI specification
33with regards to Parity generation and reporting. The specification says 33with regards to Parity generation and reporting. The specification says
34the vendor should tie the parity status bits to 0 if they do not intend 34the vendor should tie the parity status bits to 0 if they do not intend
35to generate parity. Some vendors do not do this, and thus the parity bit 35to generate parity. Some vendors do not do this, and thus the parity bit
36can "float" giving false positives. 36can "float" giving false positives.
37 37
38The PCI Parity EDAC device has the ability to "skip" known flakey 38The PCI Parity EDAC device has the ability to "skip" known flaky
39cards during the parity scan. These are set by the parity "blacklist" 39cards during the parity scan. These are set by the parity "blacklist"
40interface in the sysfs for PCI Parity. (See the PCI section in the sysfs 40interface in the sysfs for PCI Parity. (See the PCI section in the sysfs
41section below.) There is also a parity "whitelist" which is used as 41section below.) There is also a parity "whitelist" which is used as
@@ -101,7 +101,7 @@ Memory Controller (mc) Model
101 101
102First a background on the memory controller's model abstracted in EDAC. 102First a background on the memory controller's model abstracted in EDAC.
103Each mc device controls a set of DIMM memory modules. These modules are 103Each mc device controls a set of DIMM memory modules. These modules are
104layed out in a Chip-Select Row (csrowX) and Channel table (chX). There can 104laid out in a Chip-Select Row (csrowX) and Channel table (chX). There can
105be multiple csrows and two channels. 105be multiple csrows and two channels.
106 106
107Memory controllers allow for several csrows, with 8 csrows being a typical value. 107Memory controllers allow for several csrows, with 8 csrows being a typical value.
@@ -131,7 +131,7 @@ for memory DIMMs:
131 DIMM_B1 131 DIMM_B1
132 132
133Labels for these slots are usually silk screened on the motherboard. Slots 133Labels for these slots are usually silk screened on the motherboard. Slots
134labeled 'A' are channel 0 in this example. Slots labled 'B' 134labeled 'A' are channel 0 in this example. Slots labeled 'B'
135are channel 1. Notice that there are two csrows possible on a 135are channel 1. Notice that there are two csrows possible on a
136physical DIMM. These csrows are allocated their csrow assignment 136physical DIMM. These csrows are allocated their csrow assignment
137based on the slot into which the memory DIMM is placed. Thus, when 1 DIMM 137based on the slot into which the memory DIMM is placed. Thus, when 1 DIMM
@@ -140,7 +140,7 @@ is placed in each Channel, the csrows cross both DIMMs.
140Memory DIMMs come single or dual "ranked". A rank is a populated csrow. 140Memory DIMMs come single or dual "ranked". A rank is a populated csrow.
141Thus, 2 single ranked DIMMs, placed in slots DIMM_A0 and DIMM_B0 above 141Thus, 2 single ranked DIMMs, placed in slots DIMM_A0 and DIMM_B0 above
142will have 1 csrow, csrow0. csrow1 will be empty. On the other hand, 142will have 1 csrow, csrow0. csrow1 will be empty. On the other hand,
143when 2 dual ranked DIMMs are similiaryly placed, then both csrow0 and 143when 2 dual ranked DIMMs are similarly placed, then both csrow0 and
144csrow1 will be populated. The pattern repeats itself for csrow2 and 144csrow1 will be populated. The pattern repeats itself for csrow2 and
145csrow3. 145csrow3.
146 146
@@ -246,7 +246,7 @@ Module Version read-only attribute file:
246 246
247 'mc_version' 247 'mc_version'
248 248
249 The EDAC CORE modules's version and compile date are shown here to 249 The EDAC CORE module's version and compile date are shown here to
250 indicate what EDAC is running. 250 indicate what EDAC is running.
251 251
252 252
@@ -423,7 +423,7 @@ Total memory managed by this csrow attribute file:
423 'size_mb' 423 'size_mb'
424 424
425 This attribute file displays, in count of megabytes, of memory 425 This attribute file displays, in count of megabytes, of memory
426 that this csrow contatins. 426 that this csrow contains.
427 427
428 428
429Memory Type attribute file: 429Memory Type attribute file:
@@ -557,7 +557,7 @@ On Header Type 00 devices the primary status is looked at
557for any parity error regardless of whether Parity is enabled on the 557for any parity error regardless of whether Parity is enabled on the
558device. (The spec indicates parity is generated in some cases). 558device. (The spec indicates parity is generated in some cases).
559On Header Type 01 bridges, the secondary status register is also 559On Header Type 01 bridges, the secondary status register is also
560looked at to see if parity ocurred on the bus on the other side of 560looked at to see if parity occurred on the bus on the other side of
561the bridge. 561the bridge.
562 562
563 563
@@ -588,7 +588,7 @@ Panic on PCI PARITY Error:
588 'panic_on_pci_parity' 588 'panic_on_pci_parity'
589 589
590 590
591 This control files enables or disables panic'ing when a parity 591 This control files enables or disables panicking when a parity
592 error has been detected. 592 error has been detected.
593 593
594 594
@@ -616,12 +616,12 @@ PCI Device Whitelist:
616 616
617 This control file allows for an explicit list of PCI devices to be 617 This control file allows for an explicit list of PCI devices to be
618 scanned for parity errors. Only devices found on this list will 618 scanned for parity errors. Only devices found on this list will
619 be examined. The list is a line of hexadecimel VENDOR and DEVICE 619 be examined. The list is a line of hexadecimal VENDOR and DEVICE
620 ID tuples: 620 ID tuples:
621 621
622 1022:7450,1434:16a6 622 1022:7450,1434:16a6
623 623
624 One or more can be inserted, seperated by a comma. 624 One or more can be inserted, separated by a comma.
625 625
626 To write the above list doing the following as one command line: 626 To write the above list doing the following as one command line:
627 627
@@ -639,11 +639,11 @@ PCI Device Blacklist:
639 639
640 This control file allows for a list of PCI devices to be 640 This control file allows for a list of PCI devices to be
641 skipped for scanning. 641 skipped for scanning.
642 The list is a line of hexadecimel VENDOR and DEVICE ID tuples: 642 The list is a line of hexadecimal VENDOR and DEVICE ID tuples:
643 643
644 1022:7450,1434:16a6 644 1022:7450,1434:16a6
645 645
646 One or more can be inserted, seperated by a comma. 646 One or more can be inserted, separated by a comma.
647 647
648 To write the above list doing the following as one command line: 648 To write the above list doing the following as one command line:
649 649
@@ -651,14 +651,14 @@ PCI Device Blacklist:
651 > /sys/devices/system/edac/pci/pci_parity_blacklist 651 > /sys/devices/system/edac/pci/pci_parity_blacklist
652 652
653 653
654 To display what the whitelist current contatins, 654 To display what the whitelist currently contains,
655 simply 'cat' the same file. 655 simply 'cat' the same file.
656 656
657======================================================================= 657=======================================================================
658 658
659PCI Vendor and Devices IDs can be obtained with the lspci command. Using 659PCI Vendor and Devices IDs can be obtained with the lspci command. Using
660the -n option lspci will display the vendor and device IDs. The system 660the -n option lspci will display the vendor and device IDs. The system
661adminstrator will have to determine which devices should be scanned or 661administrator will have to determine which devices should be scanned or
662skipped. 662skipped.
663 663
664 664
@@ -669,5 +669,5 @@ Turn OFF a whitelist by an empty echo command:
669 669
670 echo > /sys/devices/system/edac/pci/pci_parity_whitelist 670 echo > /sys/devices/system/edac/pci/pci_parity_whitelist
671 671
672and any previous blacklist will be utililzed. 672and any previous blacklist will be utilized.
673 673
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 74052d22d868..66fdc0744fe0 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -1,27 +1,47 @@
100-INDEX 100-INDEX
2 - this file (info on some of the filesystems supported by linux). 2 - this file (info on some of the filesystems supported by linux).
3Exporting
4 - explanation of how to make filesystems exportable.
3Locking 5Locking
4 - info on locking rules as they pertain to Linux VFS. 6 - info on locking rules as they pertain to Linux VFS.
5adfs.txt 7adfs.txt
6 - info and mount options for the Acorn Advanced Disc Filing System. 8 - info and mount options for the Acorn Advanced Disc Filing System.
9afs.txt
10 - info and examples for the distributed AFS (Andrew File System) fs.
7affs.txt 11affs.txt
8 - info and mount options for the Amiga Fast File System. 12 - info and mount options for the Amiga Fast File System.
13automount-support.txt
14 - information about filesystem automount support.
15befs.txt
16 - information about the BeOS filesystem for Linux.
9bfs.txt 17bfs.txt
10 - info for the SCO UnixWare Boot Filesystem (BFS). 18 - info for the SCO UnixWare Boot Filesystem (BFS).
11cifs.txt 19cifs.txt
12 - description of the CIFS filesystem 20 - description of the CIFS filesystem.
13coda.txt 21coda.txt
14 - description of the CODA filesystem. 22 - description of the CODA filesystem.
15configfs/ 23configfs/
16 - directory containing configfs documentation and example code. 24 - directory containing configfs documentation and example code.
17cramfs.txt 25cramfs.txt
18 - info on the cram filesystem for small storage (ROMs etc) 26 - info on the cram filesystem for small storage (ROMs etc).
27dentry-locking.txt
28 - info on the RCU-based dcache locking model.
19devfs/ 29devfs/
20 - directory containing devfs documentation. 30 - directory containing devfs documentation.
31directory-locking
32 - info about the locking scheme used for directory operations.
21dlmfs.txt 33dlmfs.txt
22 - info on the userspace interface to the OCFS2 DLM. 34 - info on the userspace interface to the OCFS2 DLM.
23ext2.txt 35ext2.txt
24 - info, mount options and specifications for the Ext2 filesystem. 36 - info, mount options and specifications for the Ext2 filesystem.
37ext3.txt
38 - info, mount options and specifications for the Ext3 filesystem.
39files.txt
40 - info on file management in the Linux kernel.
41fuse.txt
42 - info on the Filesystem in User SpacE including mount options.
43hfs.txt
44 - info on the Macintosh HFS Filesystem for Linux.
25hpfs.txt 45hpfs.txt
26 - info and mount options for the OS/2 HPFS. 46 - info and mount options for the OS/2 HPFS.
27isofs.txt 47isofs.txt
@@ -32,23 +52,43 @@ ncpfs.txt
32 - info on Novell Netware(tm) filesystem using NCP protocol. 52 - info on Novell Netware(tm) filesystem using NCP protocol.
33ntfs.txt 53ntfs.txt
34 - info and mount options for the NTFS filesystem (Windows NT). 54 - info and mount options for the NTFS filesystem (Windows NT).
35proc.txt
36 - info on Linux's /proc filesystem.
37ocfs2.txt 55ocfs2.txt
38 - info and mount options for the OCFS2 clustered filesystem. 56 - info and mount options for the OCFS2 clustered filesystem.
57porting
58 - various information on filesystem porting.
59proc.txt
60 - info on Linux's /proc filesystem.
61ramfs-rootfs-initramfs.txt
62 - info on the 'in memory' filesystems ramfs, rootfs and initramfs.
63reiser4.txt
64 - info on the Reiser4 filesystem based on dancing tree algorithms.
65relayfs.txt
66 - info on relayfs, for efficient streaming from kernel to user space.
39romfs.txt 67romfs.txt
40 - Description of the ROMFS filesystem. 68 - description of the ROMFS filesystem.
41smbfs.txt 69smbfs.txt
42 - info on using filesystems with the SMB protocol (Windows 3.11 and NT) 70 - info on using filesystems with the SMB protocol (Win 3.11 and NT).
71spufs.txt
72 - info and mount options for the SPU filesystem used on Cell.
73sysfs-pci.txt
74 - info on accessing PCI device resources through sysfs.
75sysfs.txt
76 - info on sysfs, a ram-based filesystem for exporting kernel objects.
43sysv-fs.txt 77sysv-fs.txt
44 - info on the SystemV/V7/Xenix/Coherent filesystem. 78 - info on the SystemV/V7/Xenix/Coherent filesystem.
79tmpfs.txt
80 - info on tmpfs, a filesystem that holds all files in virtual memory.
45udf.txt 81udf.txt
46 - info and mount options for the UDF filesystem. 82 - info and mount options for the UDF filesystem.
47ufs.txt 83ufs.txt
48 - info on the ufs filesystem. 84 - info on the ufs filesystem.
85v9fs.txt
86 - v9fs is a Unix implementation of the Plan 9 9p remote fs protocol.
49vfat.txt 87vfat.txt
50 - info on using the VFAT filesystem used in Windows NT and Windows 95 88 - info on using the VFAT filesystem used in Windows NT and Windows 95
51vfs.txt 89vfs.txt
52 - Overview of the Virtual File System 90 - overview of the Virtual File System
53xfs.txt 91xfs.txt
54 - info and mount options for the XFS filesystem. 92 - info and mount options for the XFS filesystem.
93xip.txt
94 - info on execute-in-place for file mappings.
diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt
index aa7ba00ec082..171a44ebd939 100644
--- a/Documentation/ioctl-number.txt
+++ b/Documentation/ioctl-number.txt
@@ -78,8 +78,6 @@ Code Seq# Include File Comments
78'#' 00-3F IEEE 1394 Subsystem Block for the entire subsystem 78'#' 00-3F IEEE 1394 Subsystem Block for the entire subsystem
79'1' 00-1F <linux/timepps.h> PPS kit from Ulrich Windl 79'1' 00-1F <linux/timepps.h> PPS kit from Ulrich Windl
80 <ftp://ftp.de.kernel.org/pub/linux/daemons/ntp/PPS/> 80 <ftp://ftp.de.kernel.org/pub/linux/daemons/ntp/PPS/>
81'6' 00-10 <asm-i386/processor.h> Intel IA32 microcode update driver
82 <mailto:tigran@veritas.com>
83'8' all SNP8023 advanced NIC card 81'8' all SNP8023 advanced NIC card
84 <mailto:mcr@solidum.com> 82 <mailto:mcr@solidum.com>
85'A' 00-1F linux/apm_bios.h 83'A' 00-1F linux/apm_bios.h
diff --git a/Documentation/m68k/README.buddha b/Documentation/m68k/README.buddha
index bf802ffc98ad..ef484a719bb9 100644
--- a/Documentation/m68k/README.buddha
+++ b/Documentation/m68k/README.buddha
@@ -29,7 +29,7 @@ address is written to $4a, then the whole Byte is written to
29$48, while it doesn't matter how often you're writing to $4a 29$48, while it doesn't matter how often you're writing to $4a
30as long as $48 is not touched. After $48 has been written, 30as long as $48 is not touched. After $48 has been written,
31the whole card disappears from $e8 and is mapped to the new 31the whole card disappears from $e8 and is mapped to the new
32address just written. Make shure $4a is written before $48, 32address just written. Make sure $4a is written before $48,
33otherwise your chance is only 1:16 to find the board :-). 33otherwise your chance is only 1:16 to find the board :-).
34 34
35The local memory-map is even active when mapped to $e8: 35The local memory-map is even active when mapped to $e8:
diff --git a/Documentation/networking/ifenslave.c b/Documentation/networking/ifenslave.c
index 545447ac503a..a12059886755 100644
--- a/Documentation/networking/ifenslave.c
+++ b/Documentation/networking/ifenslave.c
@@ -87,7 +87,7 @@
87 * would fail and generate an error message in the system log. 87 * would fail and generate an error message in the system log.
88 * - For opt_c: slave should not be set to the master's setting 88 * - For opt_c: slave should not be set to the master's setting
89 * while it is running. It was already set during enslave. To 89 * while it is running. It was already set during enslave. To
90 * simplify things, it is now handeled separately. 90 * simplify things, it is now handled separately.
91 * 91 *
92 * - 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com> 92 * - 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
93 * - Code cleanup and style changes 93 * - Code cleanup and style changes
diff --git a/Documentation/networking/vortex.txt b/Documentation/networking/vortex.txt
index 3759acf95b29..6091e5f6794f 100644
--- a/Documentation/networking/vortex.txt
+++ b/Documentation/networking/vortex.txt
@@ -24,36 +24,44 @@ Since kernel 2.3.99-pre6, this driver incorporates the support for the
24 24
25This driver supports the following hardware: 25This driver supports the following hardware:
26 26
27 3c590 Vortex 10Mbps 27 3c590 Vortex 10Mbps
28 3c592 EISA 10mbps Demon/Vortex 28 3c592 EISA 10Mbps Demon/Vortex
29 3c597 EISA Fast Demon/Vortex 29 3c597 EISA Fast Demon/Vortex
30 3c595 Vortex 100baseTx 30 3c595 Vortex 100baseTx
31 3c595 Vortex 100baseT4 31 3c595 Vortex 100baseT4
32 3c595 Vortex 100base-MII 32 3c595 Vortex 100base-MII
33 3Com Vortex 33 3c900 Boomerang 10baseT
34 3c900 Boomerang 10baseT 34 3c900 Boomerang 10Mbps Combo
35 3c900 Boomerang 10Mbps Combo 35 3c900 Cyclone 10Mbps TPO
36 3c900 Cyclone 10Mbps TPO 36 3c900 Cyclone 10Mbps Combo
37 3c900B Cyclone 10Mbps T 37 3c900 Cyclone 10Mbps TPC
38 3c900 Cyclone 10Mbps Combo 38 3c900B-FL Cyclone 10base-FL
39 3c900 Cyclone 10Mbps TPC 39 3c905 Boomerang 100baseTx
40 3c900B-FL Cyclone 10base-FL 40 3c905 Boomerang 100baseT4
41 3c905 Boomerang 100baseTx 41 3c905B Cyclone 100baseTx
42 3c905 Boomerang 100baseT4 42 3c905B Cyclone 10/100/BNC
43 3c905B Cyclone 100baseTx 43 3c905B-FX Cyclone 100baseFx
44 3c905B Cyclone 10/100/BNC 44 3c905C Tornado
45 3c905B-FX Cyclone 100baseFx 45 3c920B-EMB-WNM (ATI Radeon 9100 IGP)
46 3c905C Tornado 46 3c980 Cyclone
47 3c980 Cyclone 47 3c980C Python-T
48 3cSOHO100-TX Hurricane 48 3cSOHO100-TX Hurricane
49 3c555 Laptop Hurricane 49 3c555 Laptop Hurricane
50 3c575 Boomerang CardBus 50 3c556 Laptop Tornado
51 3CCFE575 Cyclone CardBus 51 3c556B Laptop Hurricane
52 3CCFE575CT Cyclone CardBus 52 3c575 [Megahertz] 10/100 LAN CardBus
53 3CCFE656 Cyclone CardBus 53 3c575 Boomerang CardBus
54 3CCFEM656 Cyclone CardBus 54 3CCFE575BT Cyclone CardBus
55 3c450 Cyclone/unknown 55 3CCFE575CT Tornado CardBus
56 56 3CCFE656 Cyclone CardBus
57 3CCFEM656B Cyclone+Winmodem CardBus
58 3CXFEM656C Tornado+Winmodem CardBus
59 3c450 HomePNA Tornado
60 3c920 Tornado
61 3c982 Hydra Dual Port A
62 3c982 Hydra Dual Port B
63 3c905B-T4
64 3c920B-EMB-WNM Tornado
57 65
58Module parameters 66Module parameters
59================= 67=================
@@ -293,11 +301,6 @@ Donald's wake-on-LAN page:
293 301
294 http://www.scyld.com/wakeonlan.html 302 http://www.scyld.com/wakeonlan.html
295 303
2963Com's documentation for many NICs, including the ones supported by
297this driver is available at
298
299 http://support.3com.com/partners/developer/developer_form.html
300
3013Com's DOS-based application for setting up the NICs EEPROMs: 3043Com's DOS-based application for setting up the NICs EEPROMs:
302 305
303 ftp://ftp.3com.com/pub/nic/3c90x/3c90xx2.exe 306 ftp://ftp.3com.com/pub/nic/3c90x/3c90xx2.exe
@@ -312,10 +315,10 @@ Autonegotiation notes
312--------------------- 315---------------------
313 316
314 The driver uses a one-minute heartbeat for adapting to changes in 317 The driver uses a one-minute heartbeat for adapting to changes in
315 the external LAN environment. This means that when, for example, a 318 the external LAN environment if link is up and 5 seconds if link is down.
316 machine is unplugged from a hubbed 10baseT LAN plugged into a 319 This means that when, for example, a machine is unplugged from a hubbed
317 switched 100baseT LAN, the throughput will be quite dreadful for up 320 10baseT LAN plugged into a switched 100baseT LAN, the throughput
318 to sixty seconds. Be patient. 321 will be quite dreadful for up to sixty seconds. Be patient.
319 322
320 Cisco interoperability note from Walter Wong <wcw+@CMU.EDU>: 323 Cisco interoperability note from Walter Wong <wcw+@CMU.EDU>:
321 324
diff --git a/Documentation/pnp.txt b/Documentation/pnp.txt
index af0f6eabfa1c..9529c9c9fd59 100644
--- a/Documentation/pnp.txt
+++ b/Documentation/pnp.txt
@@ -115,6 +115,9 @@ pnp_unregister_protocol
115pnp_register_driver 115pnp_register_driver
116- adds a PnP driver to the Plug and Play Layer 116- adds a PnP driver to the Plug and Play Layer
117- this includes driver model integration 117- this includes driver model integration
118- returns zero for success or a negative error number for failure; count
119 calls to the .add() method if you need to know how many devices bind to
120 the driver
118 121
119pnp_unregister_driver 122pnp_unregister_driver
120- removes a PnP driver from the Plug and Play Layer 123- removes a PnP driver from the Plug and Play Layer
diff --git a/Documentation/robust-futex-ABI.txt b/Documentation/robust-futex-ABI.txt
new file mode 100644
index 000000000000..8529a17ffaa1
--- /dev/null
+++ b/Documentation/robust-futex-ABI.txt
@@ -0,0 +1,182 @@
1Started by Paul Jackson <pj@sgi.com>
2
3The robust futex ABI
4--------------------
5
6Robust_futexes provide a mechanism that is used in addition to normal
7futexes, for kernel assist of cleanup of held locks on task exit.
8
9The interesting data as to what futexes a thread is holding is kept on a
10linked list in user space, where it can be updated efficiently as locks
11are taken and dropped, without kernel intervention. The only additional
12kernel intervention required for robust_futexes above and beyond what is
13required for futexes is:
14
15 1) a one time call, per thread, to tell the kernel where its list of
16 held robust_futexes begins, and
17 2) internal kernel code at exit, to handle any listed locks held
18 by the exiting thread.
19
20The existing normal futexes already provide a "Fast Userspace Locking"
21mechanism, which handles uncontested locking without needing a system
22call, and handles contested locking by maintaining a list of waiting
23threads in the kernel. Options on the sys_futex(2) system call support
24waiting on a particular futex, and waking up the next waiter on a
25particular futex.
26
27For robust_futexes to work, the user code (typically in a library such
28as glibc linked with the application) has to manage and place the
29necessary list elements exactly as the kernel expects them. If it fails
30to do so, then improperly listed locks will not be cleaned up on exit,
31probably causing deadlock or other such failure of the other threads
32waiting on the same locks.
33
34A thread that anticipates possibly using robust_futexes should first
35issue the system call:
36
37 asmlinkage long
38 sys_set_robust_list(struct robust_list_head __user *head, size_t len);
39
40The pointer 'head' points to a structure in the threads address space
41consisting of three words. Each word is 32 bits on 32 bit arch's, or 64
42bits on 64 bit arch's, and local byte order. Each thread should have
43its own thread private 'head'.
44
45If a thread is running in 32 bit compatibility mode on a 64 native arch
46kernel, then it can actually have two such structures - one using 32 bit
47words for 32 bit compatibility mode, and one using 64 bit words for 64
48bit native mode. The kernel, if it is a 64 bit kernel supporting 32 bit
49compatibility mode, will attempt to process both lists on each task
50exit, if the corresponding sys_set_robust_list() call has been made to
51setup that list.
52
53 The first word in the memory structure at 'head' contains a
54 pointer to a single linked list of 'lock entries', one per lock,
55 as described below. If the list is empty, the pointer will point
56 to itself, 'head'. The last 'lock entry' points back to the 'head'.
57
58 The second word, called 'offset', specifies the offset from the
59 address of the associated 'lock entry', plus or minus, of what will
60 be called the 'lock word', from that 'lock entry'. The 'lock word'
61 is always a 32 bit word, unlike the other words above. The 'lock
62 word' holds 3 flag bits in the upper 3 bits, and the thread id (TID)
63 of the thread holding the lock in the bottom 29 bits. See further
64 below for a description of the flag bits.
65
66 The third word, called 'list_op_pending', contains transient copy of
67 the address of the 'lock entry', during list insertion and removal,
68 and is needed to correctly resolve races should a thread exit while
69 in the middle of a locking or unlocking operation.
70
71Each 'lock entry' on the single linked list starting at 'head' consists
72of just a single word, pointing to the next 'lock entry', or back to
73'head' if there are no more entries. In addition, nearby to each 'lock
74entry', at an offset from the 'lock entry' specified by the 'offset'
75word, is one 'lock word'.
76
77The 'lock word' is always 32 bits, and is intended to be the same 32 bit
78lock variable used by the futex mechanism, in conjunction with
79robust_futexes. The kernel will only be able to wakeup the next thread
80waiting for a lock on a threads exit if that next thread used the futex
81mechanism to register the address of that 'lock word' with the kernel.
82
83For each futex lock currently held by a thread, if it wants this
84robust_futex support for exit cleanup of that lock, it should have one
85'lock entry' on this list, with its associated 'lock word' at the
86specified 'offset'. Should a thread die while holding any such locks,
87the kernel will walk this list, mark any such locks with a bit
88indicating their holder died, and wakeup the next thread waiting for
89that lock using the futex mechanism.
90
91When a thread has invoked the above system call to indicate it
92anticipates using robust_futexes, the kernel stores the passed in 'head'
93pointer for that task. The task may retrieve that value later on by
94using the system call:
95
96 asmlinkage long
97 sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr,
98 size_t __user *len_ptr);
99
100It is anticipated that threads will use robust_futexes embedded in
101larger, user level locking structures, one per lock. The kernel
102robust_futex mechanism doesn't care what else is in that structure, so
103long as the 'offset' to the 'lock word' is the same for all
104robust_futexes used by that thread. The thread should link those locks
105it currently holds using the 'lock entry' pointers. It may also have
106other links between the locks, such as the reverse side of a double
107linked list, but that doesn't matter to the kernel.
108
109By keeping its locks linked this way, on a list starting with a 'head'
110pointer known to the kernel, the kernel can provide to a thread the
111essential service available for robust_futexes, which is to help clean
112up locks held at the time of (a perhaps unexpectedly) exit.
113
114Actual locking and unlocking, during normal operations, is handled
115entirely by user level code in the contending threads, and by the
116existing futex mechanism to wait for, and wakeup, locks. The kernels
117only essential involvement in robust_futexes is to remember where the
118list 'head' is, and to walk the list on thread exit, handling locks
119still held by the departing thread, as described below.
120
121There may exist thousands of futex lock structures in a threads shared
122memory, on various data structures, at a given point in time. Only those
123lock structures for locks currently held by that thread should be on
124that thread's robust_futex linked lock list a given time.
125
126A given futex lock structure in a user shared memory region may be held
127at different times by any of the threads with access to that region. The
128thread currently holding such a lock, if any, is marked with the threads
129TID in the lower 29 bits of the 'lock word'.
130
131When adding or removing a lock from its list of held locks, in order for
132the kernel to correctly handle lock cleanup regardless of when the task
133exits (perhaps it gets an unexpected signal 9 in the middle of
134manipulating this list), the user code must observe the following
135protocol on 'lock entry' insertion and removal:
136
137On insertion:
138 1) set the 'list_op_pending' word to the address of the 'lock word'
139 to be inserted,
140 2) acquire the futex lock,
141 3) add the lock entry, with its thread id (TID) in the bottom 29 bits
142 of the 'lock word', to the linked list starting at 'head', and
143 4) clear the 'list_op_pending' word.
144
145On removal:
146 1) set the 'list_op_pending' word to the address of the 'lock word'
147 to be removed,
148 2) remove the lock entry for this lock from the 'head' list,
149 2) release the futex lock, and
150 2) clear the 'lock_op_pending' word.
151
152On exit, the kernel will consider the address stored in
153'list_op_pending' and the address of each 'lock word' found by walking
154the list starting at 'head'. For each such address, if the bottom 29
155bits of the 'lock word' at offset 'offset' from that address equals the
156exiting threads TID, then the kernel will do two things:
157
158 1) if bit 31 (0x80000000) is set in that word, then attempt a futex
159 wakeup on that address, which will waken the next thread that has
160 used to the futex mechanism to wait on that address, and
161 2) atomically set bit 30 (0x40000000) in the 'lock word'.
162
163In the above, bit 31 was set by futex waiters on that lock to indicate
164they were waiting, and bit 30 is set by the kernel to indicate that the
165lock owner died holding the lock.
166
167The kernel exit code will silently stop scanning the list further if at
168any point:
169
170 1) the 'head' pointer or an subsequent linked list pointer
171 is not a valid address of a user space word
172 2) the calculated location of the 'lock word' (address plus
173 'offset') is not the valud address of a 32 bit user space
174 word
175 3) if the list contains more than 1 million (subject to
176 future kernel configuration changes) elements.
177
178When the kernel sees a list entry whose 'lock word' doesn't have the
179current threads TID in the lower 29 bits, it does nothing with that
180entry, and goes on to the next entry.
181
182Bit 29 (0x20000000) of the 'lock word' is reserved for future use.
diff --git a/Documentation/robust-futexes.txt b/Documentation/robust-futexes.txt
new file mode 100644
index 000000000000..df82d75245a0
--- /dev/null
+++ b/Documentation/robust-futexes.txt
@@ -0,0 +1,218 @@
1Started by: Ingo Molnar <mingo@redhat.com>
2
3Background
4----------
5
6what are robust futexes? To answer that, we first need to understand
7what futexes are: normal futexes are special types of locks that in the
8noncontended case can be acquired/released from userspace without having
9to enter the kernel.
10
11A futex is in essence a user-space address, e.g. a 32-bit lock variable
12field. If userspace notices contention (the lock is already owned and
13someone else wants to grab it too) then the lock is marked with a value
14that says "there's a waiter pending", and the sys_futex(FUTEX_WAIT)
15syscall is used to wait for the other guy to release it. The kernel
16creates a 'futex queue' internally, so that it can later on match up the
17waiter with the waker - without them having to know about each other.
18When the owner thread releases the futex, it notices (via the variable
19value) that there were waiter(s) pending, and does the
20sys_futex(FUTEX_WAKE) syscall to wake them up. Once all waiters have
21taken and released the lock, the futex is again back to 'uncontended'
22state, and there's no in-kernel state associated with it. The kernel
23completely forgets that there ever was a futex at that address. This
24method makes futexes very lightweight and scalable.
25
26"Robustness" is about dealing with crashes while holding a lock: if a
27process exits prematurely while holding a pthread_mutex_t lock that is
28also shared with some other process (e.g. yum segfaults while holding a
29pthread_mutex_t, or yum is kill -9-ed), then waiters for that lock need
30to be notified that the last owner of the lock exited in some irregular
31way.
32
33To solve such types of problems, "robust mutex" userspace APIs were
34created: pthread_mutex_lock() returns an error value if the owner exits
35prematurely - and the new owner can decide whether the data protected by
36the lock can be recovered safely.
37
38There is a big conceptual problem with futex based mutexes though: it is
39the kernel that destroys the owner task (e.g. due to a SEGFAULT), but
40the kernel cannot help with the cleanup: if there is no 'futex queue'
41(and in most cases there is none, futexes being fast lightweight locks)
42then the kernel has no information to clean up after the held lock!
43Userspace has no chance to clean up after the lock either - userspace is
44the one that crashes, so it has no opportunity to clean up. Catch-22.
45
46In practice, when e.g. yum is kill -9-ed (or segfaults), a system reboot
47is needed to release that futex based lock. This is one of the leading
48bugreports against yum.
49
50To solve this problem, the traditional approach was to extend the vma
51(virtual memory area descriptor) concept to have a notion of 'pending
52robust futexes attached to this area'. This approach requires 3 new
53syscall variants to sys_futex(): FUTEX_REGISTER, FUTEX_DEREGISTER and
54FUTEX_RECOVER. At do_exit() time, all vmas are searched to see whether
55they have a robust_head set. This approach has two fundamental problems
56left:
57
58 - it has quite complex locking and race scenarios. The vma-based
59 approach had been pending for years, but they are still not completely
60 reliable.
61
62 - they have to scan _every_ vma at sys_exit() time, per thread!
63
64The second disadvantage is a real killer: pthread_exit() takes around 1
65microsecond on Linux, but with thousands (or tens of thousands) of vmas
66every pthread_exit() takes a millisecond or more, also totally
67destroying the CPU's L1 and L2 caches!
68
69This is very much noticeable even for normal process sys_exit_group()
70calls: the kernel has to do the vma scanning unconditionally! (this is
71because the kernel has no knowledge about how many robust futexes there
72are to be cleaned up, because a robust futex might have been registered
73in another task, and the futex variable might have been simply mmap()-ed
74into this process's address space).
75
76This huge overhead forced the creation of CONFIG_FUTEX_ROBUST so that
77normal kernels can turn it off, but worse than that: the overhead makes
78robust futexes impractical for any type of generic Linux distribution.
79
80So something had to be done.
81
82New approach to robust futexes
83------------------------------
84
85At the heart of this new approach there is a per-thread private list of
86robust locks that userspace is holding (maintained by glibc) - which
87userspace list is registered with the kernel via a new syscall [this
88registration happens at most once per thread lifetime]. At do_exit()
89time, the kernel checks this user-space list: are there any robust futex
90locks to be cleaned up?
91
92In the common case, at do_exit() time, there is no list registered, so
93the cost of robust futexes is just a simple current->robust_list != NULL
94comparison. If the thread has registered a list, then normally the list
95is empty. If the thread/process crashed or terminated in some incorrect
96way then the list might be non-empty: in this case the kernel carefully
97walks the list [not trusting it], and marks all locks that are owned by
98this thread with the FUTEX_OWNER_DEAD bit, and wakes up one waiter (if
99any).
100
101The list is guaranteed to be private and per-thread at do_exit() time,
102so it can be accessed by the kernel in a lockless way.
103
104There is one race possible though: since adding to and removing from the
105list is done after the futex is acquired by glibc, there is a few
106instructions window for the thread (or process) to die there, leaving
107the futex hung. To protect against this possibility, userspace (glibc)
108also maintains a simple per-thread 'list_op_pending' field, to allow the
109kernel to clean up if the thread dies after acquiring the lock, but just
110before it could have added itself to the list. Glibc sets this
111list_op_pending field before it tries to acquire the futex, and clears
112it after the list-add (or list-remove) has finished.
113
114That's all that is needed - all the rest of robust-futex cleanup is done
115in userspace [just like with the previous patches].
116
117Ulrich Drepper has implemented the necessary glibc support for this new
118mechanism, which fully enables robust mutexes.
119
120Key differences of this userspace-list based approach, compared to the
121vma based method:
122
123 - it's much, much faster: at thread exit time, there's no need to loop
124 over every vma (!), which the VM-based method has to do. Only a very
125 simple 'is the list empty' op is done.
126
127 - no VM changes are needed - 'struct address_space' is left alone.
128
129 - no registration of individual locks is needed: robust mutexes dont
130 need any extra per-lock syscalls. Robust mutexes thus become a very
131 lightweight primitive - so they dont force the application designer
132 to do a hard choice between performance and robustness - robust
133 mutexes are just as fast.
134
135 - no per-lock kernel allocation happens.
136
137 - no resource limits are needed.
138
139 - no kernel-space recovery call (FUTEX_RECOVER) is needed.
140
141 - the implementation and the locking is "obvious", and there are no
142 interactions with the VM.
143
144Performance
145-----------
146
147I have benchmarked the time needed for the kernel to process a list of 1
148million (!) held locks, using the new method [on a 2GHz CPU]:
149
150 - with FUTEX_WAIT set [contended mutex]: 130 msecs
151 - without FUTEX_WAIT set [uncontended mutex]: 30 msecs
152
153I have also measured an approach where glibc does the lock notification
154[which it currently does for !pshared robust mutexes], and that took 256
155msecs - clearly slower, due to the 1 million FUTEX_WAKE syscalls
156userspace had to do.
157
158(1 million held locks are unheard of - we expect at most a handful of
159locks to be held at a time. Nevertheless it's nice to know that this
160approach scales nicely.)
161
162Implementation details
163----------------------
164
165The patch adds two new syscalls: one to register the userspace list, and
166one to query the registered list pointer:
167
168 asmlinkage long
169 sys_set_robust_list(struct robust_list_head __user *head,
170 size_t len);
171
172 asmlinkage long
173 sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr,
174 size_t __user *len_ptr);
175
176List registration is very fast: the pointer is simply stored in
177current->robust_list. [Note that in the future, if robust futexes become
178widespread, we could extend sys_clone() to register a robust-list head
179for new threads, without the need of another syscall.]
180
181So there is virtually zero overhead for tasks not using robust futexes,
182and even for robust futex users, there is only one extra syscall per
183thread lifetime, and the cleanup operation, if it happens, is fast and
184straightforward. The kernel doesnt have any internal distinction between
185robust and normal futexes.
186
187If a futex is found to be held at exit time, the kernel sets the
188following bit of the futex word:
189
190 #define FUTEX_OWNER_DIED 0x40000000
191
192and wakes up the next futex waiter (if any). User-space does the rest of
193the cleanup.
194
195Otherwise, robust futexes are acquired by glibc by putting the TID into
196the futex field atomically. Waiters set the FUTEX_WAITERS bit:
197
198 #define FUTEX_WAITERS 0x80000000
199
200and the remaining bits are for the TID.
201
202Testing, architecture support
203-----------------------------
204
205i've tested the new syscalls on x86 and x86_64, and have made sure the
206parsing of the userspace list is robust [ ;-) ] even if the list is
207deliberately corrupted.
208
209i386 and x86_64 syscalls are wired up at the moment, and Ulrich has
210tested the new glibc code (on x86_64 and i386), and it works for his
211robust-mutex testcases.
212
213All other architectures should build just fine too - but they wont have
214the new syscalls yet.
215
216Architectures need to implement the new futex_atomic_cmpxchg_inatomic()
217inline function before writing up the syscalls (that function returns
218-ENOSYS right now).
diff --git a/Documentation/rpc-cache.txt b/Documentation/rpc-cache.txt
index 2b5d4434fa5a..5f757c8cf979 100644
--- a/Documentation/rpc-cache.txt
+++ b/Documentation/rpc-cache.txt
@@ -1,4 +1,4 @@
1This document gives a brief introduction to the caching 1 This document gives a brief introduction to the caching
2mechanisms in the sunrpc layer that is used, in particular, 2mechanisms in the sunrpc layer that is used, in particular,
3for NFS authentication. 3for NFS authentication.
4 4
@@ -25,25 +25,17 @@ The common code handles such things as:
25 - supporting 'NEGATIVE' as well as positive entries 25 - supporting 'NEGATIVE' as well as positive entries
26 - allowing an EXPIRED time on cache items, and removing 26 - allowing an EXPIRED time on cache items, and removing
27 items after they expire, and are no longe in-use. 27 items after they expire, and are no longe in-use.
28
29 Future code extensions are expect to handle
30 - making requests to user-space to fill in cache entries 28 - making requests to user-space to fill in cache entries
31 - allowing user-space to directly set entries in the cache 29 - allowing user-space to directly set entries in the cache
32 - delaying RPC requests that depend on as-yet incomplete 30 - delaying RPC requests that depend on as-yet incomplete
33 cache entries, and replaying those requests when the cache entry 31 cache entries, and replaying those requests when the cache entry
34 is complete. 32 is complete.
35 - maintaining last-access times on cache entries 33 - clean out old entries as they expire.
36 - clean out old entries when the caches become full
37
38The code for performing a cache lookup is also common, but in the form
39of a template. i.e. a #define.
40Each cache defines a lookup function by using the DefineCacheLookup
41macro, or the simpler DefineSimpleCacheLookup macro
42 34
43Creating a Cache 35Creating a Cache
44---------------- 36----------------
45 37
461/ A cache needs a datum to cache. This is in the form of a 381/ A cache needs a datum to store. This is in the form of a
47 structure definition that must contain a 39 structure definition that must contain a
48 struct cache_head 40 struct cache_head
49 as an element, usually the first. 41 as an element, usually the first.
@@ -51,35 +43,69 @@ Creating a Cache
51 Each cache element is reference counted and contains 43 Each cache element is reference counted and contains
52 expiry and update times for use in cache management. 44 expiry and update times for use in cache management.
532/ A cache needs a "cache_detail" structure that 452/ A cache needs a "cache_detail" structure that
54 describes the cache. This stores the hash table, and some 46 describes the cache. This stores the hash table, some
55 parameters for cache management. 47 parameters for cache management, and some operations detailing how
563/ A cache needs a lookup function. This is created using 48 to work with particular cache items.
57 the DefineCacheLookup macro. This lookup function is used both 49 The operations requires are:
58 to find entries and to update entries. The normal mode for 50 struct cache_head *alloc(void)
59 updating an entry is to replace the old entry with a new 51 This simply allocates appropriate memory and returns
60 entry. However it is possible to allow update-in-place 52 a pointer to the cache_detail embedded within the
61 for those caches where it makes sense (no atomicity issues 53 structure
62 or indirect reference counting issue) 54 void cache_put(struct kref *)
634/ A cache needs to be registered using cache_register(). This 55 This is called when the last reference to an item is
64 includes in on a list of caches that will be regularly 56 is dropped. The pointer passed is to the 'ref' field
65 cleaned to discard old data. For this to work, some 57 in the cache_head. cache_put should release any
66 thread must periodically call cache_clean 58 references create by 'cache_init' and, if CACHE_VALID
67 59 is set, any references created by cache_update.
60 It should then release the memory allocated by
61 'alloc'.
62 int match(struct cache_head *orig, struct cache_head *new)
63 test if the keys in the two structures match. Return
64 1 if they do, 0 if they don't.
65 void init(struct cache_head *orig, struct cache_head *new)
66 Set the 'key' fields in 'new' from 'orig'. This may
67 include taking references to shared objects.
68 void update(struct cache_head *orig, struct cache_head *new)
69 Set the 'content' fileds in 'new' from 'orig'.
70 int cache_show(struct seq_file *m, struct cache_detail *cd,
71 struct cache_head *h)
72 Optional. Used to provide a /proc file that lists the
73 contents of a cache. This should show one item,
74 usually on just one line.
75 int cache_request(struct cache_detail *cd, struct cache_head *h,
76 char **bpp, int *blen)
77 Format a request to be send to user-space for an item
78 to be instantiated. *bpp is a buffer of size *blen.
79 bpp should be moved forward over the encoded message,
80 and *blen should be reduced to show how much free
81 space remains. Return 0 on success or <0 if not
82 enough room or other problem.
83 int cache_parse(struct cache_detail *cd, char *buf, int len)
84 A message from user space has arrived to fill out a
85 cache entry. It is in 'buf' of length 'len'.
86 cache_parse should parse this, find the item in the
87 cache with sunrpc_cache_lookup, and update the item
88 with sunrpc_cache_update.
89
90
913/ A cache needs to be registered using cache_register(). This
92 includes it on a list of caches that will be regularly
93 cleaned to discard old data.
94
68Using a cache 95Using a cache
69------------- 96-------------
70 97
71To find a value in a cache, call the lookup function passing it a the 98To find a value in a cache, call sunrpc_cache_lookup passing a pointer
72datum which contains key, and possibly content, and a flag saying 99to the cache_head in a sample item with the 'key' fields filled in.
73whether to update the cache with new data from the datum. Depending 100This will be passed to ->match to identify the target entry. If no
74on how the cache lookup function was defined, it may take an extra 101entry is found, a new entry will be create, added to the cache, and
75argument to identify the particular cache in question. 102marked as not containing valid data.
76 103
77Except in cases of kmalloc failure, the lookup function 104The item returned is typically passed to cache_check which will check
78will return a new datum which will store the key and 105if the data is valid, and may initiate an up-call to get fresh data.
79may contain valid content, or may not. 106cache_check will return -ENOENT in the entry is negative or if an up
80This datum is typically passed to cache_check which determines the 107call is needed but not possible, -EAGAIN if an upcall is pending,
81validity of the datum and may later initiate an upcall to fill 108or 0 if the data is valid;
82in the data.
83 109
84cache_check can be passed a "struct cache_req *". This structure is 110cache_check can be passed a "struct cache_req *". This structure is
85typically embedded in the actual request and can be used to create a 111typically embedded in the actual request and can be used to create a
@@ -90,6 +116,13 @@ item does become valid, the deferred copy of the request will be
90revisited (->revisit). It is expected that this method will 116revisited (->revisit). It is expected that this method will
91reschedule the request for processing. 117reschedule the request for processing.
92 118
119The value returned by sunrpc_cache_lookup can also be passed to
120sunrpc_cache_update to set the content for the item. A second item is
121passed which should hold the content. If the item found by _lookup
122has valid data, then it is discarded and a new item is created. This
123saves any user of an item from worrying about content changing while
124it is being inspected. If the item found by _lookup does not contain
125valid data, then the content is copied across and CACHE_VALID is set.
93 126
94Populating a cache 127Populating a cache
95------------------ 128------------------
@@ -114,8 +147,8 @@ should be create or updated to have the given content, and the
114expiry time should be set on that item. 147expiry time should be set on that item.
115 148
116Reading from a channel is a bit more interesting. When a cache 149Reading from a channel is a bit more interesting. When a cache
117lookup fail, or when it suceeds but finds an entry that may soon 150lookup fails, or when it succeeds but finds an entry that may soon
118expiry, a request is lodged for that cache item to be updated by 151expire, a request is lodged for that cache item to be updated by
119user-space. These requests appear in the channel file. 152user-space. These requests appear in the channel file.
120 153
121Successive reads will return successive requests. 154Successive reads will return successive requests.
@@ -130,7 +163,7 @@ Thus a user-space helper is likely to:
130 write a response 163 write a response
131 loop. 164 loop.
132 165
133If it dies and needs to be restarted, any requests that have not be 166If it dies and needs to be restarted, any requests that have not been
134answered will still appear in the file and will be read by the new 167answered will still appear in the file and will be read by the new
135instance of the helper. 168instance of the helper.
136 169
@@ -142,10 +175,9 @@ Each cache should also define a "cache_request" method which
142takes a cache item and encodes a request into the buffer 175takes a cache item and encodes a request into the buffer
143provided. 176provided.
144 177
145
146Note: If a cache has no active readers on the channel, and has had not 178Note: If a cache has no active readers on the channel, and has had not
147active readers for more than 60 seconds, further requests will not be 179active readers for more than 60 seconds, further requests will not be
148added to the channel but instead all looks that do not find a valid 180added to the channel but instead all lookups that do not find a valid
149entry will fail. This is partly for backward compatibility: The 181entry will fail. This is partly for backward compatibility: The
150previous nfs exports table was deemed to be authoritative and a 182previous nfs exports table was deemed to be authoritative and a
151failed lookup meant a definite 'no'. 183failed lookup meant a definite 'no'.
@@ -154,18 +186,17 @@ request/response format
154----------------------- 186-----------------------
155 187
156While each cache is free to use it's own format for requests 188While each cache is free to use it's own format for requests
157and responses over channel, the following is recommended are 189and responses over channel, the following is recommended as
158appropriate and support routines are available to help: 190appropriate and support routines are available to help:
159Each request or response record should be printable ASCII 191Each request or response record should be printable ASCII
160with precisely one newline character which should be at the end. 192with precisely one newline character which should be at the end.
161Fields within the record should be separated by spaces, normally one. 193Fields within the record should be separated by spaces, normally one.
162If spaces, newlines, or nul characters are needed in a field they 194If spaces, newlines, or nul characters are needed in a field they
163much be quotes. two mechanisms are available: 195much be quoted. two mechanisms are available:
1641/ If a field begins '\x' then it must contain an even number of 1961/ If a field begins '\x' then it must contain an even number of
165 hex digits, and pairs of these digits provide the bytes in the 197 hex digits, and pairs of these digits provide the bytes in the
166 field. 198 field.
1672/ otherwise a \ in the field must be followed by 3 octal digits 1992/ otherwise a \ in the field must be followed by 3 octal digits
168 which give the code for a byte. Other characters are treated 200 which give the code for a byte. Other characters are treated
169 as them selves. At the very least, space, newlines nul, and 201 as them selves. At the very least, space, newline, nul, and
170 '\' must be quoted in this way. 202 '\' must be quoted in this way.
171
diff --git a/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl b/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
index 6dc9d9f622ca..6feef9e82b63 100644
--- a/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
+++ b/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
@@ -2836,7 +2836,7 @@ struct _snd_pcm_runtime {
2836 2836
2837 <para> 2837 <para>
2838 Note that this callback became non-atomic since the recent version. 2838 Note that this callback became non-atomic since the recent version.
2839 You can use schedule-related fucntions safely in this callback now. 2839 You can use schedule-related functions safely in this callback now.
2840 </para> 2840 </para>
2841 2841
2842 <para> 2842 <para>
diff --git a/MAINTAINERS b/MAINTAINERS
index 4e8fbbc5566d..e5b051f0e27e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -882,13 +882,34 @@ W: http://ebtables.sourceforge.net/
882S: Maintained 882S: Maintained
883 883
884EDAC-CORE 884EDAC-CORE
885P: Doug Thompson 885P: Doug Thompson
886M: norsk5@xmission.com, dthompson@linuxnetworx.com 886M: norsk5@xmission.com, dthompson@linuxnetworx.com
887P: Dave Peterson 887P: Dave Peterson
888M: dsp@llnl.gov, dave_peterson@pobox.com 888M: dsp@llnl.gov, dave_peterson@pobox.com
889L: bluesmoke-devel@lists.sourceforge.net 889L: bluesmoke-devel@lists.sourceforge.net
890W: bluesmoke.sourceforge.net 890W: bluesmoke.sourceforge.net
891S: Maintained 891S: Maintained
892
893EDAC-E752X
894P: Dave Peterson
895M: dsp@llnl.gov, dave_peterson@pobox.com
896L: bluesmoke-devel@lists.sourceforge.net
897W: bluesmoke.sourceforge.net
898S: Maintained
899
900EDAC-E7XXX
901P: Dave Peterson
902M: dsp@llnl.gov, dave_peterson@pobox.com
903L: bluesmoke-devel@lists.sourceforge.net
904W: bluesmoke.sourceforge.net
905S: Maintained
906
907EDAC-R82600
908P: Tim Small
909M: tim@buttersideup.com
910L: bluesmoke-devel@lists.sourceforge.net
911W: bluesmoke.sourceforge.net
912S: Maintained
892 913
893EEPRO100 NETWORK DRIVER 914EEPRO100 NETWORK DRIVER
894P: Andrey V. Savochkin 915P: Andrey V. Savochkin
@@ -1039,6 +1060,15 @@ M: khc@pm.waw.pl
1039W: http://www.kernel.org/pub/linux/utils/net/hdlc/ 1060W: http://www.kernel.org/pub/linux/utils/net/hdlc/
1040S: Maintained 1061S: Maintained
1041 1062
1063GIGASET ISDN DRIVERS
1064P: Hansjoerg Lipp
1065M: hjlipp@web.de
1066P: Tilman Schmidt
1067M: tilman@imap.cc
1068L: gigaset307x-common@lists.sourceforge.net
1069W: http://gigaset307x.sourceforge.net/
1070S: Maintained
1071
1042HARDWARE MONITORING 1072HARDWARE MONITORING
1043P: Jean Delvare 1073P: Jean Delvare
1044M: khali@linux-fr.org 1074M: khali@linux-fr.org
@@ -2203,6 +2233,12 @@ M: p_gortmaker@yahoo.com
2203L: linux-kernel@vger.kernel.org 2233L: linux-kernel@vger.kernel.org
2204S: Maintained 2234S: Maintained
2205 2235
2236REAL TIME CLOCK (RTC) SUBSYSTEM
2237P: Alessandro Zummo
2238M: a.zummo@towertech.it
2239L: linux-kernel@vger.kernel.org
2240S: Maintained
2241
2206REISERFS FILE SYSTEM 2242REISERFS FILE SYSTEM
2207P: Hans Reiser 2243P: Hans Reiser
2208M: reiserfs-dev@namesys.com 2244M: reiserfs-dev@namesys.com
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index eedf41bf7057..9bef61b30367 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -25,6 +25,10 @@ config RWSEM_XCHGADD_ALGORITHM
25 bool 25 bool
26 default y 26 default y
27 27
28config GENERIC_FIND_NEXT_BIT
29 bool
30 default y
31
28config GENERIC_CALIBRATE_DELAY 32config GENERIC_CALIBRATE_DELAY
29 bool 33 bool
30 default y 34 default y
@@ -447,6 +451,10 @@ config ALPHA_IRONGATE
447 depends on ALPHA_NAUTILUS 451 depends on ALPHA_NAUTILUS
448 default y 452 default y
449 453
454config GENERIC_HWEIGHT
455 bool
456 default y if !ALPHA_EV6 && !ALPHA_EV67
457
450config ALPHA_AVANTI 458config ALPHA_AVANTI
451 bool 459 bool
452 depends on ALPHA_XL || ALPHA_AVANTI_CH 460 depends on ALPHA_XL || ALPHA_AVANTI_CH
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 7fb14f42a125..31afe3d91ac6 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -821,7 +821,6 @@ osf_setsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes,
821 affects all sorts of things, like timeval and itimerval. */ 821 affects all sorts of things, like timeval and itimerval. */
822 822
823extern struct timezone sys_tz; 823extern struct timezone sys_tz;
824extern int do_adjtimex(struct timex *);
825 824
826struct timeval32 825struct timeval32
827{ 826{
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index b4e5f8ff2b25..dd8769670596 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -34,6 +34,7 @@
34#include <linux/root_dev.h> 34#include <linux/root_dev.h>
35#include <linux/initrd.h> 35#include <linux/initrd.h>
36#include <linux/eisa.h> 36#include <linux/eisa.h>
37#include <linux/pfn.h>
37#ifdef CONFIG_MAGIC_SYSRQ 38#ifdef CONFIG_MAGIC_SYSRQ
38#include <linux/sysrq.h> 39#include <linux/sysrq.h>
39#include <linux/reboot.h> 40#include <linux/reboot.h>
@@ -42,7 +43,7 @@
42#include <asm/setup.h> 43#include <asm/setup.h>
43#include <asm/io.h> 44#include <asm/io.h>
44 45
45extern struct notifier_block *panic_notifier_list; 46extern struct atomic_notifier_head panic_notifier_list;
46static int alpha_panic_event(struct notifier_block *, unsigned long, void *); 47static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
47static struct notifier_block alpha_panic_block = { 48static struct notifier_block alpha_panic_block = {
48 alpha_panic_event, 49 alpha_panic_event,
@@ -241,9 +242,6 @@ reserve_std_resources(void)
241 request_resource(io, standard_io_resources+i); 242 request_resource(io, standard_io_resources+i);
242} 243}
243 244
244#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
245#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
246#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
247#define PFN_MAX PFN_DOWN(0x80000000) 245#define PFN_MAX PFN_DOWN(0x80000000)
248#define for_each_mem_cluster(memdesc, cluster, i) \ 246#define for_each_mem_cluster(memdesc, cluster, i) \
249 for ((cluster) = (memdesc)->cluster, (i) = 0; \ 247 for ((cluster) = (memdesc)->cluster, (i) = 0; \
@@ -472,11 +470,6 @@ page_is_ram(unsigned long pfn)
472 return 0; 470 return 0;
473} 471}
474 472
475#undef PFN_UP
476#undef PFN_DOWN
477#undef PFN_PHYS
478#undef PFN_MAX
479
480void __init 473void __init
481setup_arch(char **cmdline_p) 474setup_arch(char **cmdline_p)
482{ 475{
@@ -507,7 +500,8 @@ setup_arch(char **cmdline_p)
507 } 500 }
508 501
509 /* Register a call for panic conditions. */ 502 /* Register a call for panic conditions. */
510 notifier_chain_register(&panic_notifier_list, &alpha_panic_block); 503 atomic_notifier_chain_register(&panic_notifier_list,
504 &alpha_panic_block);
511 505
512#ifdef CONFIG_ALPHA_GENERIC 506#ifdef CONFIG_ALPHA_GENERIC
513 /* Assume that we've booted from SRM if we haven't booted from MILO. 507 /* Assume that we've booted from SRM if we haven't booted from MILO.
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 6b2921be1909..3859749810b4 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -314,10 +314,11 @@ time_init(void)
314 if (!est_cycle_freq) 314 if (!est_cycle_freq)
315 est_cycle_freq = validate_cc_value(calibrate_cc_with_pit()); 315 est_cycle_freq = validate_cc_value(calibrate_cc_with_pit());
316 316
317 cc1 = rpcc_after_update_in_progress(); 317 cc1 = rpcc();
318 318
319 /* Calibrate CPU clock -- attempt #2. */ 319 /* Calibrate CPU clock -- attempt #2. */
320 if (!est_cycle_freq) { 320 if (!est_cycle_freq) {
321 cc1 = rpcc_after_update_in_progress();
321 cc2 = rpcc_after_update_in_progress(); 322 cc2 = rpcc_after_update_in_progress();
322 est_cycle_freq = validate_cc_value(cc2 - cc1); 323 est_cycle_freq = validate_cc_value(cc2 - cc1);
323 cc1 = cc2; 324 cc1 = cc2;
diff --git a/arch/alpha/lib/ev6-memchr.S b/arch/alpha/lib/ev6-memchr.S
index a8e843dbcc23..1a5f71b9d8b1 100644
--- a/arch/alpha/lib/ev6-memchr.S
+++ b/arch/alpha/lib/ev6-memchr.S
@@ -84,7 +84,7 @@ $last_quad:
84 beq $2, $not_found # U : U L U L 84 beq $2, $not_found # U : U L U L
85 85
86$found_it: 86$found_it:
87#if defined(__alpha_fix__) && defined(__alpha_cix__) 87#ifdef CONFIG_ALPHA_EV67
88 /* 88 /*
89 * Since we are guaranteed to have set one of the bits, we don't 89 * Since we are guaranteed to have set one of the bits, we don't
90 * have to worry about coming back with a 0x40 out of cttz... 90 * have to worry about coming back with a 0x40 out of cttz...
diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c
index 97c4d9d7a4d5..05017ba34c3c 100644
--- a/arch/alpha/lib/fpreg.c
+++ b/arch/alpha/lib/fpreg.c
@@ -4,7 +4,7 @@
4 * (C) Copyright 1998 Linus Torvalds 4 * (C) Copyright 1998 Linus Torvalds
5 */ 5 */
6 6
7#if defined(__alpha_cix__) || defined(__alpha_fix__) 7#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
8#define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val)); 8#define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val));
9#else 9#else
10#define STT(reg,val) asm volatile ("stt $f"#reg",%0" : "=m"(val)); 10#define STT(reg,val) asm volatile ("stt $f"#reg",%0" : "=m"(val));
@@ -53,7 +53,7 @@ alpha_read_fp_reg (unsigned long reg)
53 return val; 53 return val;
54} 54}
55 55
56#if defined(__alpha_cix__) || defined(__alpha_fix__) 56#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
57#define LDT(reg,val) asm volatile ("itoft %0,$f"#reg : : "r"(val)); 57#define LDT(reg,val) asm volatile ("itoft %0,$f"#reg : : "r"(val));
58#else 58#else
59#define LDT(reg,val) asm volatile ("ldt $f"#reg",%0" : : "m"(val)); 59#define LDT(reg,val) asm volatile ("ldt $f"#reg",%0" : : "m"(val));
@@ -98,7 +98,7 @@ alpha_write_fp_reg (unsigned long reg, unsigned long val)
98 } 98 }
99} 99}
100 100
101#if defined(__alpha_cix__) || defined(__alpha_fix__) 101#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
102#define STS(reg,val) asm volatile ("ftois $f"#reg",%0" : "=r"(val)); 102#define STS(reg,val) asm volatile ("ftois $f"#reg",%0" : "=r"(val));
103#else 103#else
104#define STS(reg,val) asm volatile ("sts $f"#reg",%0" : "=m"(val)); 104#define STS(reg,val) asm volatile ("sts $f"#reg",%0" : "=m"(val));
@@ -147,7 +147,7 @@ alpha_read_fp_reg_s (unsigned long reg)
147 return val; 147 return val;
148} 148}
149 149
150#if defined(__alpha_cix__) || defined(__alpha_fix__) 150#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
151#define LDS(reg,val) asm volatile ("itofs %0,$f"#reg : : "r"(val)); 151#define LDS(reg,val) asm volatile ("itofs %0,$f"#reg : : "r"(val));
152#else 152#else
153#define LDS(reg,val) asm volatile ("lds $f"#reg",%0" : : "m"(val)); 153#define LDS(reg,val) asm volatile ("lds $f"#reg",%0" : : "m"(val));
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index 6d5251254f68..bf6b65c81bef 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -13,6 +13,7 @@
13#include <linux/bootmem.h> 13#include <linux/bootmem.h>
14#include <linux/swap.h> 14#include <linux/swap.h>
15#include <linux/initrd.h> 15#include <linux/initrd.h>
16#include <linux/pfn.h>
16 17
17#include <asm/hwrpb.h> 18#include <asm/hwrpb.h>
18#include <asm/pgalloc.h> 19#include <asm/pgalloc.h>
@@ -27,9 +28,6 @@ bootmem_data_t node_bdata[MAX_NUMNODES];
27#define DBGDCONT(args...) 28#define DBGDCONT(args...)
28#endif 29#endif
29 30
30#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
31#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
32#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
33#define for_each_mem_cluster(memdesc, cluster, i) \ 31#define for_each_mem_cluster(memdesc, cluster, i) \
34 for ((cluster) = (memdesc)->cluster, (i) = 0; \ 32 for ((cluster) = (memdesc)->cluster, (i) = 0; \
35 (i) < (memdesc)->numclusters; (i)++, (cluster)++) 33 (i) < (memdesc)->numclusters; (i)++, (cluster)++)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0dd24ebdf6ac..9731b3f826ab 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -8,6 +8,7 @@ mainmenu "Linux Kernel Configuration"
8config ARM 8config ARM
9 bool 9 bool
10 default y 10 default y
11 select RTC_LIB
11 help 12 help
12 The ARM series is a line of low-power-consumption RISC chip designs 13 The ARM series is a line of low-power-consumption RISC chip designs
13 licensed by ARM Ltd and targeted at embedded applications and 14 licensed by ARM Ltd and targeted at embedded applications and
@@ -53,6 +54,10 @@ config RWSEM_GENERIC_SPINLOCK
53config RWSEM_XCHGADD_ALGORITHM 54config RWSEM_XCHGADD_ALGORITHM
54 bool 55 bool
55 56
57config GENERIC_HWEIGHT
58 bool
59 default y
60
56config GENERIC_CALIBRATE_DELAY 61config GENERIC_CALIBRATE_DELAY
57 bool 62 bool
58 default y 63 default y
@@ -835,6 +840,8 @@ source "drivers/usb/Kconfig"
835 840
836source "drivers/mmc/Kconfig" 841source "drivers/mmc/Kconfig"
837 842
843source "drivers/rtc/Kconfig"
844
838endmenu 845endmenu
839 846
840source "fs/Kconfig" 847source "fs/Kconfig"
diff --git a/arch/arm/common/rtctime.c b/arch/arm/common/rtctime.c
index e851d86c212c..35c9a64ac14c 100644
--- a/arch/arm/common/rtctime.c
+++ b/arch/arm/common/rtctime.c
@@ -20,6 +20,7 @@
20#include <linux/capability.h> 20#include <linux/capability.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/rtc.h>
23 24
24#include <asm/rtc.h> 25#include <asm/rtc.h>
25#include <asm/semaphore.h> 26#include <asm/semaphore.h>
@@ -42,89 +43,6 @@ static struct rtc_ops *rtc_ops;
42 43
43#define rtc_epoch 1900UL 44#define rtc_epoch 1900UL
44 45
45static const unsigned char days_in_month[] = {
46 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
47};
48
49#define LEAPS_THRU_END_OF(y) ((y)/4 - (y)/100 + (y)/400)
50#define LEAP_YEAR(year) ((!(year % 4) && (year % 100)) || !(year % 400))
51
52static int month_days(unsigned int month, unsigned int year)
53{
54 return days_in_month[month] + (LEAP_YEAR(year) && month == 1);
55}
56
57/*
58 * Convert seconds since 01-01-1970 00:00:00 to Gregorian date.
59 */
60void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
61{
62 int days, month, year;
63
64 days = time / 86400;
65 time -= days * 86400;
66
67 tm->tm_wday = (days + 4) % 7;
68
69 year = 1970 + days / 365;
70 days -= (year - 1970) * 365
71 + LEAPS_THRU_END_OF(year - 1)
72 - LEAPS_THRU_END_OF(1970 - 1);
73 if (days < 0) {
74 year -= 1;
75 days += 365 + LEAP_YEAR(year);
76 }
77 tm->tm_year = year - 1900;
78 tm->tm_yday = days + 1;
79
80 for (month = 0; month < 11; month++) {
81 int newdays;
82
83 newdays = days - month_days(month, year);
84 if (newdays < 0)
85 break;
86 days = newdays;
87 }
88 tm->tm_mon = month;
89 tm->tm_mday = days + 1;
90
91 tm->tm_hour = time / 3600;
92 time -= tm->tm_hour * 3600;
93 tm->tm_min = time / 60;
94 tm->tm_sec = time - tm->tm_min * 60;
95}
96EXPORT_SYMBOL(rtc_time_to_tm);
97
98/*
99 * Does the rtc_time represent a valid date/time?
100 */
101int rtc_valid_tm(struct rtc_time *tm)
102{
103 if (tm->tm_year < 70 ||
104 tm->tm_mon >= 12 ||
105 tm->tm_mday < 1 ||
106 tm->tm_mday > month_days(tm->tm_mon, tm->tm_year + 1900) ||
107 tm->tm_hour >= 24 ||
108 tm->tm_min >= 60 ||
109 tm->tm_sec >= 60)
110 return -EINVAL;
111
112 return 0;
113}
114EXPORT_SYMBOL(rtc_valid_tm);
115
116/*
117 * Convert Gregorian date to seconds since 01-01-1970 00:00:00.
118 */
119int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
120{
121 *time = mktime(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
122 tm->tm_hour, tm->tm_min, tm->tm_sec);
123
124 return 0;
125}
126EXPORT_SYMBOL(rtc_tm_to_time);
127
128/* 46/*
129 * Calculate the next alarm time given the requested alarm time mask 47 * Calculate the next alarm time given the requested alarm time mask
130 * and the current time. 48 * and the current time.
@@ -151,13 +69,13 @@ void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc
151 } 69 }
152} 70}
153 71
154static inline int rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm) 72static inline int rtc_arm_read_time(struct rtc_ops *ops, struct rtc_time *tm)
155{ 73{
156 memset(tm, 0, sizeof(struct rtc_time)); 74 memset(tm, 0, sizeof(struct rtc_time));
157 return ops->read_time(tm); 75 return ops->read_time(tm);
158} 76}
159 77
160static inline int rtc_set_time(struct rtc_ops *ops, struct rtc_time *tm) 78static inline int rtc_arm_set_time(struct rtc_ops *ops, struct rtc_time *tm)
161{ 79{
162 int ret; 80 int ret;
163 81
@@ -168,7 +86,7 @@ static inline int rtc_set_time(struct rtc_ops *ops, struct rtc_time *tm)
168 return ret; 86 return ret;
169} 87}
170 88
171static inline int rtc_read_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm) 89static inline int rtc_arm_read_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm)
172{ 90{
173 int ret = -EINVAL; 91 int ret = -EINVAL;
174 if (ops->read_alarm) { 92 if (ops->read_alarm) {
@@ -178,7 +96,7 @@ static inline int rtc_read_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm)
178 return ret; 96 return ret;
179} 97}
180 98
181static inline int rtc_set_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm) 99static inline int rtc_arm_set_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm)
182{ 100{
183 int ret = -EINVAL; 101 int ret = -EINVAL;
184 if (ops->set_alarm) 102 if (ops->set_alarm)
@@ -266,7 +184,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
266 184
267 switch (cmd) { 185 switch (cmd) {
268 case RTC_ALM_READ: 186 case RTC_ALM_READ:
269 ret = rtc_read_alarm(ops, &alrm); 187 ret = rtc_arm_read_alarm(ops, &alrm);
270 if (ret) 188 if (ret)
271 break; 189 break;
272 ret = copy_to_user(uarg, &alrm.time, sizeof(tm)); 190 ret = copy_to_user(uarg, &alrm.time, sizeof(tm));
@@ -288,11 +206,11 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
288 alrm.time.tm_wday = -1; 206 alrm.time.tm_wday = -1;
289 alrm.time.tm_yday = -1; 207 alrm.time.tm_yday = -1;
290 alrm.time.tm_isdst = -1; 208 alrm.time.tm_isdst = -1;
291 ret = rtc_set_alarm(ops, &alrm); 209 ret = rtc_arm_set_alarm(ops, &alrm);
292 break; 210 break;
293 211
294 case RTC_RD_TIME: 212 case RTC_RD_TIME:
295 ret = rtc_read_time(ops, &tm); 213 ret = rtc_arm_read_time(ops, &tm);
296 if (ret) 214 if (ret)
297 break; 215 break;
298 ret = copy_to_user(uarg, &tm, sizeof(tm)); 216 ret = copy_to_user(uarg, &tm, sizeof(tm));
@@ -310,7 +228,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
310 ret = -EFAULT; 228 ret = -EFAULT;
311 break; 229 break;
312 } 230 }
313 ret = rtc_set_time(ops, &tm); 231 ret = rtc_arm_set_time(ops, &tm);
314 break; 232 break;
315 233
316 case RTC_EPOCH_SET: 234 case RTC_EPOCH_SET:
@@ -341,11 +259,11 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
341 ret = -EFAULT; 259 ret = -EFAULT;
342 break; 260 break;
343 } 261 }
344 ret = rtc_set_alarm(ops, &alrm); 262 ret = rtc_arm_set_alarm(ops, &alrm);
345 break; 263 break;
346 264
347 case RTC_WKALM_RD: 265 case RTC_WKALM_RD:
348 ret = rtc_read_alarm(ops, &alrm); 266 ret = rtc_arm_read_alarm(ops, &alrm);
349 if (ret) 267 if (ret)
350 break; 268 break;
351 ret = copy_to_user(uarg, &alrm, sizeof(alrm)); 269 ret = copy_to_user(uarg, &alrm, sizeof(alrm));
@@ -435,7 +353,7 @@ static int rtc_read_proc(char *page, char **start, off_t off, int count, int *eo
435 struct rtc_time tm; 353 struct rtc_time tm;
436 char *p = page; 354 char *p = page;
437 355
438 if (rtc_read_time(ops, &tm) == 0) { 356 if (rtc_arm_read_time(ops, &tm) == 0) {
439 p += sprintf(p, 357 p += sprintf(p,
440 "rtc_time\t: %02d:%02d:%02d\n" 358 "rtc_time\t: %02d:%02d:%02d\n"
441 "rtc_date\t: %04d-%02d-%02d\n" 359 "rtc_date\t: %04d-%02d-%02d\n"
@@ -445,7 +363,7 @@ static int rtc_read_proc(char *page, char **start, off_t off, int count, int *eo
445 rtc_epoch); 363 rtc_epoch);
446 } 364 }
447 365
448 if (rtc_read_alarm(ops, &alrm) == 0) { 366 if (rtc_arm_read_alarm(ops, &alrm) == 0) {
449 p += sprintf(p, "alrm_time\t: "); 367 p += sprintf(p, "alrm_time\t: ");
450 if ((unsigned int)alrm.time.tm_hour <= 24) 368 if ((unsigned int)alrm.time.tm_hour <= 24)
451 p += sprintf(p, "%02d:", alrm.time.tm_hour); 369 p += sprintf(p, "%02d:", alrm.time.tm_hour);
diff --git a/arch/arm/lib/copy_template.S b/arch/arm/lib/copy_template.S
index 838e435e4922..cab355c0c1f7 100644
--- a/arch/arm/lib/copy_template.S
+++ b/arch/arm/lib/copy_template.S
@@ -236,7 +236,7 @@
236 236
237 237
238/* 238/*
239 * Abort preanble and completion macros. 239 * Abort preamble and completion macros.
240 * If a fixup handler is required then those macros must surround it. 240 * If a fixup handler is required then those macros must surround it.
241 * It is assumed that the fixup code will handle the private part of 241 * It is assumed that the fixup code will handle the private part of
242 * the exit macro. 242 * the exit macro.
diff --git a/arch/arm/mach-footbridge/time.c b/arch/arm/mach-footbridge/time.c
index 2c64a0b0502e..5d02e95dede3 100644
--- a/arch/arm/mach-footbridge/time.c
+++ b/arch/arm/mach-footbridge/time.c
@@ -34,27 +34,12 @@ static int rtc_base;
34static unsigned long __init get_isa_cmos_time(void) 34static unsigned long __init get_isa_cmos_time(void)
35{ 35{
36 unsigned int year, mon, day, hour, min, sec; 36 unsigned int year, mon, day, hour, min, sec;
37 int i;
38 37
39 // check to see if the RTC makes sense..... 38 // check to see if the RTC makes sense.....
40 if ((CMOS_READ(RTC_VALID) & RTC_VRT) == 0) 39 if ((CMOS_READ(RTC_VALID) & RTC_VRT) == 0)
41 return mktime(1970, 1, 1, 0, 0, 0); 40 return mktime(1970, 1, 1, 0, 0, 0);
42 41
43 /* The Linux interpretation of the CMOS clock register contents: 42 do {
44 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
45 * RTC registers show the second which has precisely just started.
46 * Let's hope other operating systems interpret the RTC the same way.
47 */
48 /* read RTC exactly on falling edge of update flag */
49 for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
50 if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
51 break;
52
53 for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */
54 if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
55 break;
56
57 do { /* Isn't this overkill ? UIP above should guarantee consistency */
58 sec = CMOS_READ(RTC_SECONDS); 43 sec = CMOS_READ(RTC_SECONDS);
59 min = CMOS_READ(RTC_MINUTES); 44 min = CMOS_READ(RTC_MINUTES);
60 hour = CMOS_READ(RTC_HOURS); 45 hour = CMOS_READ(RTC_HOURS);
diff --git a/arch/arm/mach-integrator/time.c b/arch/arm/mach-integrator/time.c
index 3c22c16b38bf..bc07f52a6fd7 100644
--- a/arch/arm/mach-integrator/time.c
+++ b/arch/arm/mach-integrator/time.c
@@ -40,13 +40,13 @@ static int integrator_set_rtc(void)
40 return 1; 40 return 1;
41} 41}
42 42
43static int rtc_read_alarm(struct rtc_wkalrm *alrm) 43static int integrator_rtc_read_alarm(struct rtc_wkalrm *alrm)
44{ 44{
45 rtc_time_to_tm(readl(rtc_base + RTC_MR), &alrm->time); 45 rtc_time_to_tm(readl(rtc_base + RTC_MR), &alrm->time);
46 return 0; 46 return 0;
47} 47}
48 48
49static inline int rtc_set_alarm(struct rtc_wkalrm *alrm) 49static inline int integrator_rtc_set_alarm(struct rtc_wkalrm *alrm)
50{ 50{
51 unsigned long time; 51 unsigned long time;
52 int ret; 52 int ret;
@@ -62,7 +62,7 @@ static inline int rtc_set_alarm(struct rtc_wkalrm *alrm)
62 return ret; 62 return ret;
63} 63}
64 64
65static int rtc_read_time(struct rtc_time *tm) 65static int integrator_rtc_read_time(struct rtc_time *tm)
66{ 66{
67 rtc_time_to_tm(readl(rtc_base + RTC_DR), tm); 67 rtc_time_to_tm(readl(rtc_base + RTC_DR), tm);
68 return 0; 68 return 0;
@@ -76,7 +76,7 @@ static int rtc_read_time(struct rtc_time *tm)
76 * edge of the 1Hz clock, we must write the time one second 76 * edge of the 1Hz clock, we must write the time one second
77 * in advance. 77 * in advance.
78 */ 78 */
79static inline int rtc_set_time(struct rtc_time *tm) 79static inline int integrator_rtc_set_time(struct rtc_time *tm)
80{ 80{
81 unsigned long time; 81 unsigned long time;
82 int ret; 82 int ret;
@@ -90,10 +90,10 @@ static inline int rtc_set_time(struct rtc_time *tm)
90 90
91static struct rtc_ops rtc_ops = { 91static struct rtc_ops rtc_ops = {
92 .owner = THIS_MODULE, 92 .owner = THIS_MODULE,
93 .read_time = rtc_read_time, 93 .read_time = integrator_rtc_read_time,
94 .set_time = rtc_set_time, 94 .set_time = integrator_rtc_set_time,
95 .read_alarm = rtc_read_alarm, 95 .read_alarm = integrator_rtc_read_alarm,
96 .set_alarm = rtc_set_alarm, 96 .set_alarm = integrator_rtc_set_alarm,
97}; 97};
98 98
99static irqreturn_t arm_rtc_interrupt(int irq, void *dev_id, 99static irqreturn_t arm_rtc_interrupt(int irq, void *dev_id,
diff --git a/arch/arm/mach-omap1/board-netstar.c b/arch/arm/mach-omap1/board-netstar.c
index 60d5f8a3339c..7520e602d7a2 100644
--- a/arch/arm/mach-omap1/board-netstar.c
+++ b/arch/arm/mach-omap1/board-netstar.c
@@ -141,7 +141,7 @@ static int __init netstar_late_init(void)
141 /* TODO: Setup front panel switch here */ 141 /* TODO: Setup front panel switch here */
142 142
143 /* Setup panic notifier */ 143 /* Setup panic notifier */
144 notifier_chain_register(&panic_notifier_list, &panic_block); 144 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
145 145
146 return 0; 146 return 0;
147} 147}
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index bfd5fdd1a875..52e4a9d69642 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -235,7 +235,7 @@ static struct notifier_block panic_block = {
235static int __init voiceblue_setup(void) 235static int __init voiceblue_setup(void)
236{ 236{
237 /* Setup panic notifier */ 237 /* Setup panic notifier */
238 notifier_chain_register(&panic_notifier_list, &panic_block); 238 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
239 239
240 return 0; 240 return 0;
241} 241}
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c
index 9b48a90aefce..5efa84749f37 100644
--- a/arch/arm/mach-pxa/generic.c
+++ b/arch/arm/mach-pxa/generic.c
@@ -319,6 +319,11 @@ void __init pxa_set_ficp_info(struct pxaficp_platform_data *info)
319 pxaficp_device.dev.platform_data = info; 319 pxaficp_device.dev.platform_data = info;
320} 320}
321 321
322static struct platform_device pxartc_device = {
323 .name = "sa1100-rtc",
324 .id = -1,
325};
326
322static struct platform_device *devices[] __initdata = { 327static struct platform_device *devices[] __initdata = {
323 &pxamci_device, 328 &pxamci_device,
324 &udc_device, 329 &udc_device,
@@ -329,6 +334,7 @@ static struct platform_device *devices[] __initdata = {
329 &pxaficp_device, 334 &pxaficp_device,
330 &i2c_device, 335 &i2c_device,
331 &i2s_device, 336 &i2s_device,
337 &pxartc_device,
332}; 338};
333 339
334static int __init pxa_init(void) 340static int __init pxa_init(void)
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 2abdc419e984..9ea71551fc04 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -324,6 +324,11 @@ void sa11x0_set_irda_data(struct irda_platform_data *irda)
324 sa11x0ir_device.dev.platform_data = irda; 324 sa11x0ir_device.dev.platform_data = irda;
325} 325}
326 326
327static struct platform_device sa11x0rtc_device = {
328 .name = "sa1100-rtc",
329 .id = -1,
330};
331
327static struct platform_device *sa11x0_devices[] __initdata = { 332static struct platform_device *sa11x0_devices[] __initdata = {
328 &sa11x0udc_device, 333 &sa11x0udc_device,
329 &sa11x0uart1_device, 334 &sa11x0uart1_device,
@@ -333,6 +338,7 @@ static struct platform_device *sa11x0_devices[] __initdata = {
333 &sa11x0pcmcia_device, 338 &sa11x0pcmcia_device,
334 &sa11x0fb_device, 339 &sa11x0fb_device,
335 &sa11x0mtd_device, 340 &sa11x0mtd_device,
341 &sa11x0rtc_device,
336}; 342};
337 343
338static int __init sa1100_init(void) 344static int __init sa1100_init(void)
diff --git a/arch/arm26/Kconfig b/arch/arm26/Kconfig
index dee23d87fc5a..cf4ebf4c274d 100644
--- a/arch/arm26/Kconfig
+++ b/arch/arm26/Kconfig
@@ -41,6 +41,10 @@ config RWSEM_GENERIC_SPINLOCK
41config RWSEM_XCHGADD_ALGORITHM 41config RWSEM_XCHGADD_ALGORITHM
42 bool 42 bool
43 43
44config GENERIC_HWEIGHT
45 bool
46 default y
47
44config GENERIC_CALIBRATE_DELAY 48config GENERIC_CALIBRATE_DELAY
45 bool 49 bool
46 default y 50 default y
diff --git a/arch/arm26/kernel/traps.c b/arch/arm26/kernel/traps.c
index 5847ea5d7747..a79de041b50e 100644
--- a/arch/arm26/kernel/traps.c
+++ b/arch/arm26/kernel/traps.c
@@ -34,7 +34,7 @@
34#include <asm/system.h> 34#include <asm/system.h>
35#include <asm/uaccess.h> 35#include <asm/uaccess.h>
36#include <asm/unistd.h> 36#include <asm/unistd.h>
37#include <asm/semaphore.h> 37#include <linux/mutex.h>
38 38
39#include "ptrace.h" 39#include "ptrace.h"
40 40
@@ -207,19 +207,19 @@ void die_if_kernel(const char *str, struct pt_regs *regs, int err)
207 die(str, regs, err); 207 die(str, regs, err);
208} 208}
209 209
210static DECLARE_MUTEX(undef_sem); 210static DEFINE_MUTEX(undef_mutex);
211static int (*undef_hook)(struct pt_regs *); 211static int (*undef_hook)(struct pt_regs *);
212 212
213int request_undef_hook(int (*fn)(struct pt_regs *)) 213int request_undef_hook(int (*fn)(struct pt_regs *))
214{ 214{
215 int ret = -EBUSY; 215 int ret = -EBUSY;
216 216
217 down(&undef_sem); 217 mutex_lock(&undef_mutex);
218 if (undef_hook == NULL) { 218 if (undef_hook == NULL) {
219 undef_hook = fn; 219 undef_hook = fn;
220 ret = 0; 220 ret = 0;
221 } 221 }
222 up(&undef_sem); 222 mutex_unlock(&undef_mutex);
223 223
224 return ret; 224 return ret;
225} 225}
@@ -228,12 +228,12 @@ int release_undef_hook(int (*fn)(struct pt_regs *))
228{ 228{
229 int ret = -EINVAL; 229 int ret = -EINVAL;
230 230
231 down(&undef_sem); 231 mutex_lock(&undef_mutex);
232 if (undef_hook == fn) { 232 if (undef_hook == fn) {
233 undef_hook = NULL; 233 undef_hook = NULL;
234 ret = 0; 234 ret = 0;
235 } 235 }
236 up(&undef_sem); 236 mutex_unlock(&undef_mutex);
237 237
238 return ret; 238 return ret;
239} 239}
diff --git a/arch/arm26/mm/init.c b/arch/arm26/mm/init.c
index e3ecaa453747..7da8a5205678 100644
--- a/arch/arm26/mm/init.c
+++ b/arch/arm26/mm/init.c
@@ -23,6 +23,7 @@
23#include <linux/initrd.h> 23#include <linux/initrd.h>
24#include <linux/bootmem.h> 24#include <linux/bootmem.h>
25#include <linux/blkdev.h> 25#include <linux/blkdev.h>
26#include <linux/pfn.h>
26 27
27#include <asm/segment.h> 28#include <asm/segment.h>
28#include <asm/mach-types.h> 29#include <asm/mach-types.h>
@@ -101,12 +102,6 @@ struct node_info {
101 int bootmap_pages; 102 int bootmap_pages;
102}; 103};
103 104
104#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
105#define PFN_UP(x) (PAGE_ALIGN(x) >> PAGE_SHIFT)
106#define PFN_SIZE(x) ((x) >> PAGE_SHIFT)
107#define PFN_RANGE(s,e) PFN_SIZE(PAGE_ALIGN((unsigned long)(e)) - \
108 (((unsigned long)(s)) & PAGE_MASK))
109
110/* 105/*
111 * FIXME: We really want to avoid allocating the bootmap bitmap 106 * FIXME: We really want to avoid allocating the bootmap bitmap
112 * over the top of the initrd. Hopefully, this is located towards 107 * over the top of the initrd. Hopefully, this is located towards
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index b83261949737..856b665020e7 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -16,6 +16,14 @@ config RWSEM_GENERIC_SPINLOCK
16config RWSEM_XCHGADD_ALGORITHM 16config RWSEM_XCHGADD_ALGORITHM
17 bool 17 bool
18 18
19config GENERIC_FIND_NEXT_BIT
20 bool
21 default y
22
23config GENERIC_HWEIGHT
24 bool
25 default y
26
19config GENERIC_CALIBRATE_DELAY 27config GENERIC_CALIBRATE_DELAY
20 bool 28 bool
21 default y 29 default y
diff --git a/arch/cris/kernel/setup.c b/arch/cris/kernel/setup.c
index 1ba57efff60d..619a6eefd893 100644
--- a/arch/cris/kernel/setup.c
+++ b/arch/cris/kernel/setup.c
@@ -18,6 +18,7 @@
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <linux/tty.h> 19#include <linux/tty.h>
20#include <linux/utsname.h> 20#include <linux/utsname.h>
21#include <linux/pfn.h>
21 22
22#include <asm/setup.h> 23#include <asm/setup.h>
23 24
@@ -88,10 +89,6 @@ setup_arch(char **cmdline_p)
88 init_mm.end_data = (unsigned long) &_edata; 89 init_mm.end_data = (unsigned long) &_edata;
89 init_mm.brk = (unsigned long) &_end; 90 init_mm.brk = (unsigned long) &_end;
90 91
91#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
92#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
93#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
94
95 /* min_low_pfn points to the start of DRAM, start_pfn points 92 /* min_low_pfn points to the start of DRAM, start_pfn points
96 * to the first DRAM pages after the kernel, and max_low_pfn 93 * to the first DRAM pages after the kernel, and max_low_pfn
97 * to the end of DRAM. 94 * to the end of DRAM.
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index e08383712370..95a3892b8d1b 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -17,6 +17,10 @@ config GENERIC_FIND_NEXT_BIT
17 bool 17 bool
18 default y 18 default y
19 19
20config GENERIC_HWEIGHT
21 bool
22 default y
23
20config GENERIC_CALIBRATE_DELAY 24config GENERIC_CALIBRATE_DELAY
21 bool 25 bool
22 default n 26 default n
diff --git a/arch/frv/mm/mmu-context.c b/arch/frv/mm/mmu-context.c
index f2c6866fc88b..1530a4111e6d 100644
--- a/arch/frv/mm/mmu-context.c
+++ b/arch/frv/mm/mmu-context.c
@@ -54,9 +54,9 @@ static unsigned get_cxn(mm_context_t *ctx)
54 /* find the first unallocated context number 54 /* find the first unallocated context number
55 * - 0 is reserved for the kernel 55 * - 0 is reserved for the kernel
56 */ 56 */
57 cxn = find_next_zero_bit(&cxn_bitmap, NR_CXN, 1); 57 cxn = find_next_zero_bit(cxn_bitmap, NR_CXN, 1);
58 if (cxn < NR_CXN) { 58 if (cxn < NR_CXN) {
59 set_bit(cxn, &cxn_bitmap); 59 set_bit(cxn, cxn_bitmap);
60 } 60 }
61 else { 61 else {
62 /* none remaining - need to steal someone else's cxn */ 62 /* none remaining - need to steal someone else's cxn */
@@ -138,7 +138,7 @@ void destroy_context(struct mm_struct *mm)
138 cxn_pinned = -1; 138 cxn_pinned = -1;
139 139
140 list_del_init(&ctx->id_link); 140 list_del_init(&ctx->id_link);
141 clear_bit(ctx->id, &cxn_bitmap); 141 clear_bit(ctx->id, cxn_bitmap);
142 __flush_tlb_mm(ctx->id); 142 __flush_tlb_mm(ctx->id);
143 ctx->id = 0; 143 ctx->id = 0;
144 } 144 }
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 98308b018a35..cabf0bfffc53 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -29,6 +29,14 @@ config RWSEM_XCHGADD_ALGORITHM
29 bool 29 bool
30 default n 30 default n
31 31
32config GENERIC_FIND_NEXT_BIT
33 bool
34 default y
35
36config GENERIC_HWEIGHT
37 bool
38 default y
39
32config GENERIC_CALIBRATE_DELAY 40config GENERIC_CALIBRATE_DELAY
33 bool 41 bool
34 default y 42 default y
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index b008fb0cd7b7..f17bd1d2707e 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -37,6 +37,10 @@ config GENERIC_IOMAP
37 bool 37 bool
38 default y 38 default y
39 39
40config GENERIC_HWEIGHT
41 bool
42 default y
43
40config ARCH_MAY_HAVE_PC_FDC 44config ARCH_MAY_HAVE_PC_FDC
41 bool 45 bool
42 default y 46 default y
@@ -227,6 +231,15 @@ config SCHED_SMT
227 cost of slightly increased overhead in some places. If unsure say 231 cost of slightly increased overhead in some places. If unsure say
228 N here. 232 N here.
229 233
234config SCHED_MC
235 bool "Multi-core scheduler support"
236 depends on SMP
237 default y
238 help
239 Multi-core scheduler support improves the CPU scheduler's decision
240 making when dealing with multi-core CPU chips at a cost of slightly
241 increased overhead in some places. If unsure say N here.
242
230source "kernel/Kconfig.preempt" 243source "kernel/Kconfig.preempt"
231 244
232config X86_UP_APIC 245config X86_UP_APIC
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index c848a5b30391..3e4adb1e2244 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -103,7 +103,7 @@ AFLAGS += $(mflags-y)
103boot := arch/i386/boot 103boot := arch/i386/boot
104 104
105PHONY += zImage bzImage compressed zlilo bzlilo \ 105PHONY += zImage bzImage compressed zlilo bzlilo \
106 zdisk bzdisk fdimage fdimage144 fdimage288 install 106 zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
107 107
108all: bzImage 108all: bzImage
109 109
@@ -122,7 +122,7 @@ zlilo bzlilo: vmlinux
122zdisk bzdisk: vmlinux 122zdisk bzdisk: vmlinux
123 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zdisk 123 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zdisk
124 124
125fdimage fdimage144 fdimage288: vmlinux 125fdimage fdimage144 fdimage288 isoimage: vmlinux
126 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@ 126 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@
127 127
128install: 128install:
@@ -139,6 +139,9 @@ define archhelp
139 echo ' install to $$(INSTALL_PATH) and run lilo' 139 echo ' install to $$(INSTALL_PATH) and run lilo'
140 echo ' bzdisk - Create a boot floppy in /dev/fd0' 140 echo ' bzdisk - Create a boot floppy in /dev/fd0'
141 echo ' fdimage - Create a boot floppy image' 141 echo ' fdimage - Create a boot floppy image'
142 echo ' isoimage - Create a boot CD-ROM image'
142endef 143endef
143 144
144CLEAN_FILES += arch/$(ARCH)/boot/fdimage arch/$(ARCH)/boot/mtools.conf 145CLEAN_FILES += arch/$(ARCH)/boot/fdimage \
146 arch/$(ARCH)/boot/image.iso \
147 arch/$(ARCH)/boot/mtools.conf
diff --git a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile
index f136752563b1..33e55476381b 100644
--- a/arch/i386/boot/Makefile
+++ b/arch/i386/boot/Makefile
@@ -62,8 +62,12 @@ $(obj)/setup $(obj)/bootsect: %: %.o FORCE
62$(obj)/compressed/vmlinux: FORCE 62$(obj)/compressed/vmlinux: FORCE
63 $(Q)$(MAKE) $(build)=$(obj)/compressed IMAGE_OFFSET=$(IMAGE_OFFSET) $@ 63 $(Q)$(MAKE) $(build)=$(obj)/compressed IMAGE_OFFSET=$(IMAGE_OFFSET) $@
64 64
65# Set this if you want to pass append arguments to the zdisk/fdimage kernel 65# Set this if you want to pass append arguments to the zdisk/fdimage/isoimage kernel
66FDARGS = 66FDARGS =
67# Set this if you want an initrd included with the zdisk/fdimage/isoimage kernel
68FDINITRD =
69
70image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,)
67 71
68$(obj)/mtools.conf: $(src)/mtools.conf.in 72$(obj)/mtools.conf: $(src)/mtools.conf.in
69 sed -e 's|@OBJ@|$(obj)|g' < $< > $@ 73 sed -e 's|@OBJ@|$(obj)|g' < $< > $@
@@ -72,8 +76,11 @@ $(obj)/mtools.conf: $(src)/mtools.conf.in
72zdisk: $(BOOTIMAGE) $(obj)/mtools.conf 76zdisk: $(BOOTIMAGE) $(obj)/mtools.conf
73 MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync 77 MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync
74 syslinux /dev/fd0 ; sync 78 syslinux /dev/fd0 ; sync
75 echo 'default linux $(FDARGS)' | \ 79 echo '$(image_cmdline)' | \
76 MTOOLSRC=$(src)/mtools.conf mcopy - a:syslinux.cfg 80 MTOOLSRC=$(src)/mtools.conf mcopy - a:syslinux.cfg
81 if [ -f '$(FDINITRD)' ] ; then \
82 MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' a:initrd.img ; \
83 fi
77 MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) a:linux ; sync 84 MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) a:linux ; sync
78 85
79# These require being root or having syslinux 2.02 or higher installed 86# These require being root or having syslinux 2.02 or higher installed
@@ -81,18 +88,39 @@ fdimage fdimage144: $(BOOTIMAGE) $(obj)/mtools.conf
81 dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440 88 dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440
82 MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync 89 MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync
83 syslinux $(obj)/fdimage ; sync 90 syslinux $(obj)/fdimage ; sync
84 echo 'default linux $(FDARGS)' | \ 91 echo '$(image_cmdline)' | \
85 MTOOLSRC=$(obj)/mtools.conf mcopy - v:syslinux.cfg 92 MTOOLSRC=$(obj)/mtools.conf mcopy - v:syslinux.cfg
93 if [ -f '$(FDINITRD)' ] ; then \
94 MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' v:initrd.img ; \
95 fi
86 MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) v:linux ; sync 96 MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) v:linux ; sync
87 97
88fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf 98fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
89 dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880 99 dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880
90 MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync 100 MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync
91 syslinux $(obj)/fdimage ; sync 101 syslinux $(obj)/fdimage ; sync
92 echo 'default linux $(FDARGS)' | \ 102 echo '$(image_cmdline)' | \
93 MTOOLSRC=$(obj)/mtools.conf mcopy - w:syslinux.cfg 103 MTOOLSRC=$(obj)/mtools.conf mcopy - w:syslinux.cfg
104 if [ -f '$(FDINITRD)' ] ; then \
105 MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' w:initrd.img ; \
106 fi
94 MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) w:linux ; sync 107 MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) w:linux ; sync
95 108
109isoimage: $(BOOTIMAGE)
110 -rm -rf $(obj)/isoimage
111 mkdir $(obj)/isoimage
112 cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \
113 $(obj)/isoimage
114 cp $(BOOTIMAGE) $(obj)/isoimage/linux
115 echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
116 if [ -f '$(FDINITRD)' ] ; then \
117 cp '$(FDINITRD)' $(obj)/isoimage/initrd.img ; \
118 fi
119 mkisofs -J -r -o $(obj)/image.iso -b isolinux.bin -c boot.cat \
120 -no-emul-boot -boot-load-size 4 -boot-info-table \
121 $(obj)/isoimage
122 rm -rf $(obj)/isoimage
123
96zlilo: $(BOOTIMAGE) 124zlilo: $(BOOTIMAGE)
97 if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi 125 if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi
98 if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi 126 if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi
diff --git a/arch/i386/boot/video.S b/arch/i386/boot/video.S
index 2ac40c8244c4..0000a2674537 100644
--- a/arch/i386/boot/video.S
+++ b/arch/i386/boot/video.S
@@ -1924,6 +1924,7 @@ skip10: movb %ah, %al
1924 ret 1924 ret
1925 1925
1926store_edid: 1926store_edid:
1927#ifdef CONFIG_FB_FIRMWARE_EDID
1927 pushw %es # just save all registers 1928 pushw %es # just save all registers
1928 pushw %ax 1929 pushw %ax
1929 pushw %bx 1930 pushw %bx
@@ -1954,6 +1955,7 @@ store_edid:
1954 popw %bx 1955 popw %bx
1955 popw %ax 1956 popw %ax
1956 popw %es 1957 popw %es
1958#endif
1957 ret 1959 ret
1958 1960
1959# VIDEO_SELECT-only variables 1961# VIDEO_SELECT-only variables
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index f1a21945963d..033066176b3e 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -668,10 +668,10 @@ unsigned long __init acpi_find_rsdp(void)
668 unsigned long rsdp_phys = 0; 668 unsigned long rsdp_phys = 0;
669 669
670 if (efi_enabled) { 670 if (efi_enabled) {
671 if (efi.acpi20) 671 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
672 return __pa(efi.acpi20); 672 return efi.acpi20;
673 else if (efi.acpi) 673 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
674 return __pa(efi.acpi); 674 return efi.acpi;
675 } 675 }
676 /* 676 /*
677 * Scan memory looking for the RSDP signature. First search EBDA (low 677 * Scan memory looking for the RSDP signature. First search EBDA (low
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 7e3d6b6a4e96..a06a49075f10 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -266,7 +266,7 @@ static void __init early_cpu_detect(void)
266void __cpuinit generic_identify(struct cpuinfo_x86 * c) 266void __cpuinit generic_identify(struct cpuinfo_x86 * c)
267{ 267{
268 u32 tfms, xlvl; 268 u32 tfms, xlvl;
269 int junk; 269 int ebx;
270 270
271 if (have_cpuid_p()) { 271 if (have_cpuid_p()) {
272 /* Get vendor name */ 272 /* Get vendor name */
@@ -282,7 +282,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c)
282 /* Intel-defined flags: level 0x00000001 */ 282 /* Intel-defined flags: level 0x00000001 */
283 if ( c->cpuid_level >= 0x00000001 ) { 283 if ( c->cpuid_level >= 0x00000001 ) {
284 u32 capability, excap; 284 u32 capability, excap;
285 cpuid(0x00000001, &tfms, &junk, &excap, &capability); 285 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
286 c->x86_capability[0] = capability; 286 c->x86_capability[0] = capability;
287 c->x86_capability[4] = excap; 287 c->x86_capability[4] = excap;
288 c->x86 = (tfms >> 8) & 15; 288 c->x86 = (tfms >> 8) & 15;
@@ -292,6 +292,11 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c)
292 if (c->x86 >= 0x6) 292 if (c->x86 >= 0x6)
293 c->x86_model += ((tfms >> 16) & 0xF) << 4; 293 c->x86_model += ((tfms >> 16) & 0xF) << 4;
294 c->x86_mask = tfms & 15; 294 c->x86_mask = tfms & 15;
295#ifdef CONFIG_SMP
296 c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
297#else
298 c->apicid = (ebx >> 24) & 0xFF;
299#endif
295 } else { 300 } else {
296 /* Have CPUID level 0 only - unheard of */ 301 /* Have CPUID level 0 only - unheard of */
297 c->x86 = 4; 302 c->x86 = 4;
@@ -474,7 +479,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
474 479
475 cpuid(1, &eax, &ebx, &ecx, &edx); 480 cpuid(1, &eax, &ebx, &ecx, &edx);
476 481
477 c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
478 482
479 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) 483 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
480 return; 484 return;
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index e5bc06480ff9..712a26bd4457 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -40,6 +40,7 @@
40 40
41#ifdef CONFIG_X86_POWERNOW_K8_ACPI 41#ifdef CONFIG_X86_POWERNOW_K8_ACPI
42#include <linux/acpi.h> 42#include <linux/acpi.h>
43#include <linux/mutex.h>
43#include <acpi/processor.h> 44#include <acpi/processor.h>
44#endif 45#endif
45 46
@@ -49,7 +50,7 @@
49#include "powernow-k8.h" 50#include "powernow-k8.h"
50 51
51/* serialize freq changes */ 52/* serialize freq changes */
52static DECLARE_MUTEX(fidvid_sem); 53static DEFINE_MUTEX(fidvid_mutex);
53 54
54static struct powernow_k8_data *powernow_data[NR_CPUS]; 55static struct powernow_k8_data *powernow_data[NR_CPUS];
55 56
@@ -943,17 +944,17 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
943 if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate)) 944 if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate))
944 goto err_out; 945 goto err_out;
945 946
946 down(&fidvid_sem); 947 mutex_lock(&fidvid_mutex);
947 948
948 powernow_k8_acpi_pst_values(data, newstate); 949 powernow_k8_acpi_pst_values(data, newstate);
949 950
950 if (transition_frequency(data, newstate)) { 951 if (transition_frequency(data, newstate)) {
951 printk(KERN_ERR PFX "transition frequency failed\n"); 952 printk(KERN_ERR PFX "transition frequency failed\n");
952 ret = 1; 953 ret = 1;
953 up(&fidvid_sem); 954 mutex_unlock(&fidvid_mutex);
954 goto err_out; 955 goto err_out;
955 } 956 }
956 up(&fidvid_sem); 957 mutex_unlock(&fidvid_mutex);
957 958
958 pol->cur = find_khz_freq_from_fid(data->currfid); 959 pol->cur = find_khz_freq_from_fid(data->currfid);
959 ret = 0; 960 ret = 0;
@@ -1094,10 +1095,15 @@ static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol)
1094 1095
1095static unsigned int powernowk8_get (unsigned int cpu) 1096static unsigned int powernowk8_get (unsigned int cpu)
1096{ 1097{
1097 struct powernow_k8_data *data = powernow_data[cpu]; 1098 struct powernow_k8_data *data;
1098 cpumask_t oldmask = current->cpus_allowed; 1099 cpumask_t oldmask = current->cpus_allowed;
1099 unsigned int khz = 0; 1100 unsigned int khz = 0;
1100 1101
1102 data = powernow_data[first_cpu(cpu_core_map[cpu])];
1103
1104 if (!data)
1105 return -EINVAL;
1106
1101 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 1107 set_cpus_allowed(current, cpumask_of_cpu(cpu));
1102 if (smp_processor_id() != cpu) { 1108 if (smp_processor_id() != cpu) {
1103 printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu); 1109 printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu);
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
index 00ea899c17e1..79a7c5c87edc 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
@@ -182,10 +182,6 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
182 182
183static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); 183static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
184 184
185#ifndef for_each_cpu_mask
186#define for_each_cpu_mask(i,mask) for (i=0;i<1;i++)
187#endif
188
189#ifdef CONFIG_SMP 185#ifdef CONFIG_SMP
190static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) 186static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])
191{ 187{
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index ce61921369e5..9df87b03612c 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -173,6 +173,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
173 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ 173 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
174 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 174 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
175 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ 175 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
176 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
177#ifdef CONFIG_SMP
178 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
179#endif
176 180
177 if (c->cpuid_level > 3) { 181 if (c->cpuid_level > 3) {
178 static int is_initialized; 182 static int is_initialized;
@@ -205,9 +209,15 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
205 break; 209 break;
206 case 2: 210 case 2:
207 new_l2 = this_leaf.size/1024; 211 new_l2 = this_leaf.size/1024;
212 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
213 index_msb = get_count_order(num_threads_sharing);
214 l2_id = c->apicid >> index_msb;
208 break; 215 break;
209 case 3: 216 case 3:
210 new_l3 = this_leaf.size/1024; 217 new_l3 = this_leaf.size/1024;
218 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
219 index_msb = get_count_order(num_threads_sharing);
220 l3_id = c->apicid >> index_msb;
211 break; 221 break;
212 default: 222 default:
213 break; 223 break;
@@ -215,11 +225,19 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
215 } 225 }
216 } 226 }
217 } 227 }
218 if (c->cpuid_level > 1) { 228 /*
229 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
230 * trace cache
231 */
232 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
219 /* supports eax=2 call */ 233 /* supports eax=2 call */
220 int i, j, n; 234 int i, j, n;
221 int regs[4]; 235 int regs[4];
222 unsigned char *dp = (unsigned char *)regs; 236 unsigned char *dp = (unsigned char *)regs;
237 int only_trace = 0;
238
239 if (num_cache_leaves != 0 && c->x86 == 15)
240 only_trace = 1;
223 241
224 /* Number of times to iterate */ 242 /* Number of times to iterate */
225 n = cpuid_eax(2) & 0xFF; 243 n = cpuid_eax(2) & 0xFF;
@@ -241,6 +259,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
241 while (cache_table[k].descriptor != 0) 259 while (cache_table[k].descriptor != 0)
242 { 260 {
243 if (cache_table[k].descriptor == des) { 261 if (cache_table[k].descriptor == des) {
262 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
263 break;
244 switch (cache_table[k].cache_type) { 264 switch (cache_table[k].cache_type) {
245 case LVL_1_INST: 265 case LVL_1_INST:
246 l1i += cache_table[k].size; 266 l1i += cache_table[k].size;
@@ -266,34 +286,45 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
266 } 286 }
267 } 287 }
268 } 288 }
289 }
269 290
270 if (new_l1d) 291 if (new_l1d)
271 l1d = new_l1d; 292 l1d = new_l1d;
272 293
273 if (new_l1i) 294 if (new_l1i)
274 l1i = new_l1i; 295 l1i = new_l1i;
275 296
276 if (new_l2) 297 if (new_l2) {
277 l2 = new_l2; 298 l2 = new_l2;
299#ifdef CONFIG_SMP
300 cpu_llc_id[cpu] = l2_id;
301#endif
302 }
278 303
279 if (new_l3) 304 if (new_l3) {
280 l3 = new_l3; 305 l3 = new_l3;
306#ifdef CONFIG_SMP
307 cpu_llc_id[cpu] = l3_id;
308#endif
309 }
281 310
282 if ( trace ) 311 if (trace)
283 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); 312 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
284 else if ( l1i ) 313 else if ( l1i )
285 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); 314 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
286 if ( l1d )
287 printk(", L1 D cache: %dK\n", l1d);
288 else
289 printk("\n");
290 if ( l2 )
291 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
292 if ( l3 )
293 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
294 315
295 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); 316 if (l1d)
296 } 317 printk(", L1 D cache: %dK\n", l1d);
318 else
319 printk("\n");
320
321 if (l2)
322 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
323
324 if (l3)
325 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
326
327 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
297 328
298 return l2; 329 return l2;
299} 330}
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index 3b4618bed70d..fff90bda4733 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -36,6 +36,7 @@
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/smp.h> 37#include <linux/smp.h>
38#include <linux/cpu.h> 38#include <linux/cpu.h>
39#include <linux/mutex.h>
39 40
40#include <asm/mtrr.h> 41#include <asm/mtrr.h>
41 42
@@ -47,7 +48,7 @@
47u32 num_var_ranges = 0; 48u32 num_var_ranges = 0;
48 49
49unsigned int *usage_table; 50unsigned int *usage_table;
50static DECLARE_MUTEX(mtrr_sem); 51static DEFINE_MUTEX(mtrr_mutex);
51 52
52u32 size_or_mask, size_and_mask; 53u32 size_or_mask, size_and_mask;
53 54
@@ -333,7 +334,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
333 /* No CPU hotplug when we change MTRR entries */ 334 /* No CPU hotplug when we change MTRR entries */
334 lock_cpu_hotplug(); 335 lock_cpu_hotplug();
335 /* Search for existing MTRR */ 336 /* Search for existing MTRR */
336 down(&mtrr_sem); 337 mutex_lock(&mtrr_mutex);
337 for (i = 0; i < num_var_ranges; ++i) { 338 for (i = 0; i < num_var_ranges; ++i) {
338 mtrr_if->get(i, &lbase, &lsize, &ltype); 339 mtrr_if->get(i, &lbase, &lsize, &ltype);
339 if (base >= lbase + lsize) 340 if (base >= lbase + lsize)
@@ -371,7 +372,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
371 printk(KERN_INFO "mtrr: no more MTRRs available\n"); 372 printk(KERN_INFO "mtrr: no more MTRRs available\n");
372 error = i; 373 error = i;
373 out: 374 out:
374 up(&mtrr_sem); 375 mutex_unlock(&mtrr_mutex);
375 unlock_cpu_hotplug(); 376 unlock_cpu_hotplug();
376 return error; 377 return error;
377} 378}
@@ -464,7 +465,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
464 max = num_var_ranges; 465 max = num_var_ranges;
465 /* No CPU hotplug when we change MTRR entries */ 466 /* No CPU hotplug when we change MTRR entries */
466 lock_cpu_hotplug(); 467 lock_cpu_hotplug();
467 down(&mtrr_sem); 468 mutex_lock(&mtrr_mutex);
468 if (reg < 0) { 469 if (reg < 0) {
469 /* Search for existing MTRR */ 470 /* Search for existing MTRR */
470 for (i = 0; i < max; ++i) { 471 for (i = 0; i < max; ++i) {
@@ -503,7 +504,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
503 set_mtrr(reg, 0, 0, 0); 504 set_mtrr(reg, 0, 0, 0);
504 error = reg; 505 error = reg;
505 out: 506 out:
506 up(&mtrr_sem); 507 mutex_unlock(&mtrr_mutex);
507 unlock_cpu_hotplug(); 508 unlock_cpu_hotplug();
508 return error; 509 return error;
509} 510}
@@ -685,7 +686,7 @@ void mtrr_ap_init(void)
685 if (!mtrr_if || !use_intel()) 686 if (!mtrr_if || !use_intel())
686 return; 687 return;
687 /* 688 /*
688 * Ideally we should hold mtrr_sem here to avoid mtrr entries changed, 689 * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
689 * but this routine will be called in cpu boot time, holding the lock 690 * but this routine will be called in cpu boot time, holding the lock
690 * breaks it. This routine is called in two cases: 1.very earily time 691 * breaks it. This routine is called in two cases: 1.very earily time
691 * of software resume, when there absolutely isn't mtrr entry changes; 692 * of software resume, when there absolutely isn't mtrr entry changes;
diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c
index ebc8dc116c43..5efceebc48dc 100644
--- a/arch/i386/kernel/dmi_scan.c
+++ b/arch/i386/kernel/dmi_scan.c
@@ -3,6 +3,7 @@
3#include <linux/init.h> 3#include <linux/init.h>
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/dmi.h> 5#include <linux/dmi.h>
6#include <linux/efi.h>
6#include <linux/bootmem.h> 7#include <linux/bootmem.h>
7#include <linux/slab.h> 8#include <linux/slab.h>
8#include <asm/dmi.h> 9#include <asm/dmi.h>
@@ -185,47 +186,72 @@ static void __init dmi_decode(struct dmi_header *dm)
185 } 186 }
186} 187}
187 188
188void __init dmi_scan_machine(void) 189static int __init dmi_present(char __iomem *p)
189{ 190{
190 u8 buf[15]; 191 u8 buf[15];
191 char __iomem *p, *q; 192 memcpy_fromio(buf, p, 15);
193 if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
194 u16 num = (buf[13] << 8) | buf[12];
195 u16 len = (buf[7] << 8) | buf[6];
196 u32 base = (buf[11] << 24) | (buf[10] << 16) |
197 (buf[9] << 8) | buf[8];
192 198
193 /* 199 /*
194 * no iounmap() for that ioremap(); it would be a no-op, but it's 200 * DMI version 0.0 means that the real version is taken from
195 * so early in setup that sucker gets confused into doing what 201 * the SMBIOS version, which we don't know at this point.
196 * it shouldn't if we actually call it. 202 */
197 */ 203 if (buf[14] != 0)
198 p = ioremap(0xF0000, 0x10000); 204 printk(KERN_INFO "DMI %d.%d present.\n",
199 if (p == NULL) 205 buf[14] >> 4, buf[14] & 0xF);
200 goto out; 206 else
201 207 printk(KERN_INFO "DMI present.\n");
202 for (q = p; q < p + 0x10000; q += 16) { 208 if (dmi_table(base,len, num, dmi_decode) == 0)
203 memcpy_fromio(buf, q, 15); 209 return 0;
204 if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) { 210 }
205 u16 num = (buf[13] << 8) | buf[12]; 211 return 1;
206 u16 len = (buf[7] << 8) | buf[6]; 212}
207 u32 base = (buf[11] << 24) | (buf[10] << 16) |
208 (buf[9] << 8) | buf[8];
209
210 /*
211 * DMI version 0.0 means that the real version is taken from
212 * the SMBIOS version, which we don't know at this point.
213 */
214 if (buf[14] != 0)
215 printk(KERN_INFO "DMI %d.%d present.\n",
216 buf[14] >> 4, buf[14] & 0xF);
217 else
218 printk(KERN_INFO "DMI present.\n");
219 213
220 if (dmi_table(base,len, num, dmi_decode) == 0) 214void __init dmi_scan_machine(void)
215{
216 char __iomem *p, *q;
217 int rc;
218
219 if (efi_enabled) {
220 if (efi.smbios == EFI_INVALID_TABLE_ADDR)
221 goto out;
222
223 /* This is called as a core_initcall() because it isn't
224 * needed during early boot. This also means we can
225 * iounmap the space when we're done with it.
226 */
227 p = dmi_ioremap(efi.smbios, 32);
228 if (p == NULL)
229 goto out;
230
231 rc = dmi_present(p + 0x10); /* offset of _DMI_ string */
232 dmi_iounmap(p, 32);
233 if (!rc)
234 return;
235 }
236 else {
237 /*
238 * no iounmap() for that ioremap(); it would be a no-op, but
239 * it's so early in setup that sucker gets confused into doing
240 * what it shouldn't if we actually call it.
241 */
242 p = dmi_ioremap(0xF0000, 0x10000);
243 if (p == NULL)
244 goto out;
245
246 for (q = p; q < p + 0x10000; q += 16) {
247 rc = dmi_present(q);
248 if (!rc)
221 return; 249 return;
222 } 250 }
223 } 251 }
224 252 out: printk(KERN_INFO "DMI not present or invalid.\n");
225out: printk(KERN_INFO "DMI not present or invalid.\n");
226} 253}
227 254
228
229/** 255/**
230 * dmi_check_system - check system DMI data 256 * dmi_check_system - check system DMI data
231 * @list: array of dmi_system_id structures to match against 257 * @list: array of dmi_system_id structures to match against
diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c
index 7ec6cfa01fb3..9202b67c4b2e 100644
--- a/arch/i386/kernel/efi.c
+++ b/arch/i386/kernel/efi.c
@@ -361,7 +361,7 @@ void __init efi_init(void)
361 */ 361 */
362 c16 = (efi_char16_t *) boot_ioremap(efi.systab->fw_vendor, 2); 362 c16 = (efi_char16_t *) boot_ioremap(efi.systab->fw_vendor, 2);
363 if (c16) { 363 if (c16) {
364 for (i = 0; i < sizeof(vendor) && *c16; ++i) 364 for (i = 0; i < (sizeof(vendor) - 1) && *c16; ++i)
365 vendor[i] = *c16++; 365 vendor[i] = *c16++;
366 vendor[i] = '\0'; 366 vendor[i] = '\0';
367 } else 367 } else
@@ -381,29 +381,38 @@ void __init efi_init(void)
381 if (config_tables == NULL) 381 if (config_tables == NULL)
382 printk(KERN_ERR PFX "Could not map EFI Configuration Table!\n"); 382 printk(KERN_ERR PFX "Could not map EFI Configuration Table!\n");
383 383
384 efi.mps = EFI_INVALID_TABLE_ADDR;
385 efi.acpi = EFI_INVALID_TABLE_ADDR;
386 efi.acpi20 = EFI_INVALID_TABLE_ADDR;
387 efi.smbios = EFI_INVALID_TABLE_ADDR;
388 efi.sal_systab = EFI_INVALID_TABLE_ADDR;
389 efi.boot_info = EFI_INVALID_TABLE_ADDR;
390 efi.hcdp = EFI_INVALID_TABLE_ADDR;
391 efi.uga = EFI_INVALID_TABLE_ADDR;
392
384 for (i = 0; i < num_config_tables; i++) { 393 for (i = 0; i < num_config_tables; i++) {
385 if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { 394 if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
386 efi.mps = (void *)config_tables[i].table; 395 efi.mps = config_tables[i].table;
387 printk(KERN_INFO " MPS=0x%lx ", config_tables[i].table); 396 printk(KERN_INFO " MPS=0x%lx ", config_tables[i].table);
388 } else 397 } else
389 if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { 398 if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
390 efi.acpi20 = __va(config_tables[i].table); 399 efi.acpi20 = config_tables[i].table;
391 printk(KERN_INFO " ACPI 2.0=0x%lx ", config_tables[i].table); 400 printk(KERN_INFO " ACPI 2.0=0x%lx ", config_tables[i].table);
392 } else 401 } else
393 if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { 402 if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
394 efi.acpi = __va(config_tables[i].table); 403 efi.acpi = config_tables[i].table;
395 printk(KERN_INFO " ACPI=0x%lx ", config_tables[i].table); 404 printk(KERN_INFO " ACPI=0x%lx ", config_tables[i].table);
396 } else 405 } else
397 if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { 406 if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {
398 efi.smbios = (void *) config_tables[i].table; 407 efi.smbios = config_tables[i].table;
399 printk(KERN_INFO " SMBIOS=0x%lx ", config_tables[i].table); 408 printk(KERN_INFO " SMBIOS=0x%lx ", config_tables[i].table);
400 } else 409 } else
401 if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { 410 if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
402 efi.hcdp = (void *)config_tables[i].table; 411 efi.hcdp = config_tables[i].table;
403 printk(KERN_INFO " HCDP=0x%lx ", config_tables[i].table); 412 printk(KERN_INFO " HCDP=0x%lx ", config_tables[i].table);
404 } else 413 } else
405 if (efi_guidcmp(config_tables[i].guid, UGA_IO_PROTOCOL_GUID) == 0) { 414 if (efi_guidcmp(config_tables[i].guid, UGA_IO_PROTOCOL_GUID) == 0) {
406 efi.uga = (void *)config_tables[i].table; 415 efi.uga = config_tables[i].table;
407 printk(KERN_INFO " UGA=0x%lx ", config_tables[i].table); 416 printk(KERN_INFO " UGA=0x%lx ", config_tables[i].table);
408 } 417 }
409 } 418 }
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 311b4e7266f1..3b329af4afc5 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -381,7 +381,7 @@ static void do_irq_balance(void)
381 unsigned long imbalance = 0; 381 unsigned long imbalance = 0;
382 cpumask_t allowed_mask, target_cpu_mask, tmp; 382 cpumask_t allowed_mask, target_cpu_mask, tmp;
383 383
384 for_each_cpu(i) { 384 for_each_possible_cpu(i) {
385 int package_index; 385 int package_index;
386 CPU_IRQ(i) = 0; 386 CPU_IRQ(i) = 0;
387 if (!cpu_online(i)) 387 if (!cpu_online(i))
@@ -632,7 +632,7 @@ static int __init balanced_irq_init(void)
632 else 632 else
633 printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); 633 printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
634failed: 634failed:
635 for_each_cpu(i) { 635 for_each_possible_cpu(i) {
636 kfree(irq_cpu_data[i].irq_delta); 636 kfree(irq_cpu_data[i].irq_delta);
637 irq_cpu_data[i].irq_delta = NULL; 637 irq_cpu_data[i].irq_delta = NULL;
638 kfree(irq_cpu_data[i].last_irq); 638 kfree(irq_cpu_data[i].last_irq);
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 7a59050242a7..f19768789e8a 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -35,12 +35,56 @@
35#include <asm/cacheflush.h> 35#include <asm/cacheflush.h>
36#include <asm/kdebug.h> 36#include <asm/kdebug.h>
37#include <asm/desc.h> 37#include <asm/desc.h>
38#include <asm/uaccess.h>
38 39
39void jprobe_return_end(void); 40void jprobe_return_end(void);
40 41
41DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 42DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
42DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 43DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
43 44
45/* insert a jmp code */
46static inline void set_jmp_op(void *from, void *to)
47{
48 struct __arch_jmp_op {
49 char op;
50 long raddr;
51 } __attribute__((packed)) *jop;
52 jop = (struct __arch_jmp_op *)from;
53 jop->raddr = (long)(to) - ((long)(from) + 5);
54 jop->op = RELATIVEJUMP_INSTRUCTION;
55}
56
57/*
58 * returns non-zero if opcodes can be boosted.
59 */
60static inline int can_boost(kprobe_opcode_t opcode)
61{
62 switch (opcode & 0xf0 ) {
63 case 0x70:
64 return 0; /* can't boost conditional jump */
65 case 0x90:
66 /* can't boost call and pushf */
67 return opcode != 0x9a && opcode != 0x9c;
68 case 0xc0:
69 /* can't boost undefined opcodes and soft-interruptions */
70 return (0xc1 < opcode && opcode < 0xc6) ||
71 (0xc7 < opcode && opcode < 0xcc) || opcode == 0xcf;
72 case 0xd0:
73 /* can boost AA* and XLAT */
74 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
75 case 0xe0:
76 /* can boost in/out and (may be) jmps */
77 return (0xe3 < opcode && opcode != 0xe8);
78 case 0xf0:
79 /* clear and set flags can be boost */
80 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
81 default:
82 /* currently, can't boost 2 bytes opcodes */
83 return opcode != 0x0f;
84 }
85}
86
87
44/* 88/*
45 * returns non-zero if opcode modifies the interrupt flag. 89 * returns non-zero if opcode modifies the interrupt flag.
46 */ 90 */
@@ -65,6 +109,11 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
65 109
66 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 110 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
67 p->opcode = *p->addr; 111 p->opcode = *p->addr;
112 if (can_boost(p->opcode)) {
113 p->ainsn.boostable = 0;
114 } else {
115 p->ainsn.boostable = -1;
116 }
68 return 0; 117 return 0;
69} 118}
70 119
@@ -155,9 +204,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
155{ 204{
156 struct kprobe *p; 205 struct kprobe *p;
157 int ret = 0; 206 int ret = 0;
158 kprobe_opcode_t *addr = NULL; 207 kprobe_opcode_t *addr;
159 unsigned long *lp;
160 struct kprobe_ctlblk *kcb; 208 struct kprobe_ctlblk *kcb;
209#ifdef CONFIG_PREEMPT
210 unsigned pre_preempt_count = preempt_count();
211#endif /* CONFIG_PREEMPT */
212
213 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
161 214
162 /* 215 /*
163 * We don't want to be preempted for the entire 216 * We don't want to be preempted for the entire
@@ -166,17 +219,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
166 preempt_disable(); 219 preempt_disable();
167 kcb = get_kprobe_ctlblk(); 220 kcb = get_kprobe_ctlblk();
168 221
169 /* Check if the application is using LDT entry for its code segment and
170 * calculate the address by reading the base address from the LDT entry.
171 */
172 if ((regs->xcs & 4) && (current->mm)) {
173 lp = (unsigned long *) ((unsigned long)((regs->xcs >> 3) * 8)
174 + (char *) current->mm->context.ldt);
175 addr = (kprobe_opcode_t *) (get_desc_base(lp) + regs->eip -
176 sizeof(kprobe_opcode_t));
177 } else {
178 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
179 }
180 /* Check we're not actually recursing */ 222 /* Check we're not actually recursing */
181 if (kprobe_running()) { 223 if (kprobe_running()) {
182 p = get_kprobe(addr); 224 p = get_kprobe(addr);
@@ -252,6 +294,21 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
252 /* handler has already set things up, so skip ss setup */ 294 /* handler has already set things up, so skip ss setup */
253 return 1; 295 return 1;
254 296
297 if (p->ainsn.boostable == 1 &&
298#ifdef CONFIG_PREEMPT
299 !(pre_preempt_count) && /*
300 * This enables booster when the direct
301 * execution path aren't preempted.
302 */
303#endif /* CONFIG_PREEMPT */
304 !p->post_handler && !p->break_handler ) {
305 /* Boost up -- we can execute copied instructions directly */
306 reset_current_kprobe();
307 regs->eip = (unsigned long)p->ainsn.insn;
308 preempt_enable_no_resched();
309 return 1;
310 }
311
255ss_probe: 312ss_probe:
256 prepare_singlestep(p, regs); 313 prepare_singlestep(p, regs);
257 kcb->kprobe_status = KPROBE_HIT_SS; 314 kcb->kprobe_status = KPROBE_HIT_SS;
@@ -267,17 +324,44 @@ no_kprobe:
267 * here. When a retprobed function returns, this probe is hit and 324 * here. When a retprobed function returns, this probe is hit and
268 * trampoline_probe_handler() runs, calling the kretprobe's handler. 325 * trampoline_probe_handler() runs, calling the kretprobe's handler.
269 */ 326 */
270 void kretprobe_trampoline_holder(void) 327 void __kprobes kretprobe_trampoline_holder(void)
271 { 328 {
272 asm volatile ( ".global kretprobe_trampoline\n" 329 asm volatile ( ".global kretprobe_trampoline\n"
273 "kretprobe_trampoline: \n" 330 "kretprobe_trampoline: \n"
274 "nop\n"); 331 " pushf\n"
275 } 332 /* skip cs, eip, orig_eax, es, ds */
333 " subl $20, %esp\n"
334 " pushl %eax\n"
335 " pushl %ebp\n"
336 " pushl %edi\n"
337 " pushl %esi\n"
338 " pushl %edx\n"
339 " pushl %ecx\n"
340 " pushl %ebx\n"
341 " movl %esp, %eax\n"
342 " call trampoline_handler\n"
343 /* move eflags to cs */
344 " movl 48(%esp), %edx\n"
345 " movl %edx, 44(%esp)\n"
346 /* save true return address on eflags */
347 " movl %eax, 48(%esp)\n"
348 " popl %ebx\n"
349 " popl %ecx\n"
350 " popl %edx\n"
351 " popl %esi\n"
352 " popl %edi\n"
353 " popl %ebp\n"
354 " popl %eax\n"
355 /* skip eip, orig_eax, es, ds */
356 " addl $16, %esp\n"
357 " popf\n"
358 " ret\n");
359}
276 360
277/* 361/*
278 * Called when we hit the probe point at kretprobe_trampoline 362 * Called from kretprobe_trampoline
279 */ 363 */
280int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 364fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
281{ 365{
282 struct kretprobe_instance *ri = NULL; 366 struct kretprobe_instance *ri = NULL;
283 struct hlist_head *head; 367 struct hlist_head *head;
@@ -306,8 +390,11 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
306 /* another task is sharing our hash bucket */ 390 /* another task is sharing our hash bucket */
307 continue; 391 continue;
308 392
309 if (ri->rp && ri->rp->handler) 393 if (ri->rp && ri->rp->handler){
394 __get_cpu_var(current_kprobe) = &ri->rp->kp;
310 ri->rp->handler(ri, regs); 395 ri->rp->handler(ri, regs);
396 __get_cpu_var(current_kprobe) = NULL;
397 }
311 398
312 orig_ret_address = (unsigned long)ri->ret_addr; 399 orig_ret_address = (unsigned long)ri->ret_addr;
313 recycle_rp_inst(ri); 400 recycle_rp_inst(ri);
@@ -322,18 +409,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
322 } 409 }
323 410
324 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); 411 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
325 regs->eip = orig_ret_address;
326 412
327 reset_current_kprobe();
328 spin_unlock_irqrestore(&kretprobe_lock, flags); 413 spin_unlock_irqrestore(&kretprobe_lock, flags);
329 preempt_enable_no_resched();
330 414
331 /* 415 return (void*)orig_ret_address;
332 * By returning a non-zero value, we are telling
333 * kprobe_handler() that we don't want the post_handler
334 * to run (and have re-enabled preemption)
335 */
336 return 1;
337} 416}
338 417
339/* 418/*
@@ -357,15 +436,17 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
357 * 2) If the single-stepped instruction was a call, the return address 436 * 2) If the single-stepped instruction was a call, the return address
358 * that is atop the stack is the address following the copied instruction. 437 * that is atop the stack is the address following the copied instruction.
359 * We need to make it the address following the original instruction. 438 * We need to make it the address following the original instruction.
439 *
440 * This function also checks instruction size for preparing direct execution.
360 */ 441 */
361static void __kprobes resume_execution(struct kprobe *p, 442static void __kprobes resume_execution(struct kprobe *p,
362 struct pt_regs *regs, struct kprobe_ctlblk *kcb) 443 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
363{ 444{
364 unsigned long *tos = (unsigned long *)&regs->esp; 445 unsigned long *tos = (unsigned long *)&regs->esp;
365 unsigned long next_eip = 0;
366 unsigned long copy_eip = (unsigned long)p->ainsn.insn; 446 unsigned long copy_eip = (unsigned long)p->ainsn.insn;
367 unsigned long orig_eip = (unsigned long)p->addr; 447 unsigned long orig_eip = (unsigned long)p->addr;
368 448
449 regs->eflags &= ~TF_MASK;
369 switch (p->ainsn.insn[0]) { 450 switch (p->ainsn.insn[0]) {
370 case 0x9c: /* pushfl */ 451 case 0x9c: /* pushfl */
371 *tos &= ~(TF_MASK | IF_MASK); 452 *tos &= ~(TF_MASK | IF_MASK);
@@ -375,37 +456,51 @@ static void __kprobes resume_execution(struct kprobe *p,
375 case 0xcb: 456 case 0xcb:
376 case 0xc2: 457 case 0xc2:
377 case 0xca: 458 case 0xca:
378 regs->eflags &= ~TF_MASK; 459 case 0xea: /* jmp absolute -- eip is correct */
379 /* eip is already adjusted, no more changes required*/ 460 /* eip is already adjusted, no more changes required */
380 return; 461 p->ainsn.boostable = 1;
462 goto no_change;
381 case 0xe8: /* call relative - Fix return addr */ 463 case 0xe8: /* call relative - Fix return addr */
382 *tos = orig_eip + (*tos - copy_eip); 464 *tos = orig_eip + (*tos - copy_eip);
383 break; 465 break;
384 case 0xff: 466 case 0xff:
385 if ((p->ainsn.insn[1] & 0x30) == 0x10) { 467 if ((p->ainsn.insn[1] & 0x30) == 0x10) {
386 /* call absolute, indirect */ 468 /* call absolute, indirect */
387 /* Fix return addr; eip is correct. */ 469 /*
388 next_eip = regs->eip; 470 * Fix return addr; eip is correct.
471 * But this is not boostable
472 */
389 *tos = orig_eip + (*tos - copy_eip); 473 *tos = orig_eip + (*tos - copy_eip);
474 goto no_change;
390 } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ 475 } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
391 ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ 476 ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
392 /* eip is correct. */ 477 /* eip is correct. And this is boostable */
393 next_eip = regs->eip; 478 p->ainsn.boostable = 1;
479 goto no_change;
394 } 480 }
395 break;
396 case 0xea: /* jmp absolute -- eip is correct */
397 next_eip = regs->eip;
398 break;
399 default: 481 default:
400 break; 482 break;
401 } 483 }
402 484
403 regs->eflags &= ~TF_MASK; 485 if (p->ainsn.boostable == 0) {
404 if (next_eip) { 486 if ((regs->eip > copy_eip) &&
405 regs->eip = next_eip; 487 (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) {
406 } else { 488 /*
407 regs->eip = orig_eip + (regs->eip - copy_eip); 489 * These instructions can be executed directly if it
490 * jumps back to correct address.
491 */
492 set_jmp_op((void *)regs->eip,
493 (void *)orig_eip + (regs->eip - copy_eip));
494 p->ainsn.boostable = 1;
495 } else {
496 p->ainsn.boostable = -1;
497 }
408 } 498 }
499
500 regs->eip = orig_eip + (regs->eip - copy_eip);
501
502no_change:
503 return;
409} 504}
410 505
411/* 506/*
@@ -453,15 +548,57 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
453 struct kprobe *cur = kprobe_running(); 548 struct kprobe *cur = kprobe_running();
454 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 549 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
455 550
456 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 551 switch(kcb->kprobe_status) {
457 return 1; 552 case KPROBE_HIT_SS:
458 553 case KPROBE_REENTER:
459 if (kcb->kprobe_status & KPROBE_HIT_SS) { 554 /*
460 resume_execution(cur, regs, kcb); 555 * We are here because the instruction being single
556 * stepped caused a page fault. We reset the current
557 * kprobe and the eip points back to the probe address
558 * and allow the page fault handler to continue as a
559 * normal page fault.
560 */
561 regs->eip = (unsigned long)cur->addr;
461 regs->eflags |= kcb->kprobe_old_eflags; 562 regs->eflags |= kcb->kprobe_old_eflags;
462 563 if (kcb->kprobe_status == KPROBE_REENTER)
463 reset_current_kprobe(); 564 restore_previous_kprobe(kcb);
565 else
566 reset_current_kprobe();
464 preempt_enable_no_resched(); 567 preempt_enable_no_resched();
568 break;
569 case KPROBE_HIT_ACTIVE:
570 case KPROBE_HIT_SSDONE:
571 /*
572 * We increment the nmissed count for accounting,
573 * we can also use npre/npostfault count for accouting
574 * these specific fault cases.
575 */
576 kprobes_inc_nmissed_count(cur);
577
578 /*
579 * We come here because instructions in the pre/post
580 * handler caused the page_fault, this could happen
581 * if handler tries to access user space by
582 * copy_from_user(), get_user() etc. Let the
583 * user-specified handler try to fix it first.
584 */
585 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
586 return 1;
587
588 /*
589 * In case the user-specified fault handler returned
590 * zero, try to fix up.
591 */
592 if (fixup_exception(regs))
593 return 1;
594
595 /*
596 * fixup_exception() could not handle it,
597 * Let do_page_fault() fix it.
598 */
599 break;
600 default:
601 break;
465 } 602 }
466 return 0; 603 return 0;
467} 604}
@@ -475,6 +612,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
475 struct die_args *args = (struct die_args *)data; 612 struct die_args *args = (struct die_args *)data;
476 int ret = NOTIFY_DONE; 613 int ret = NOTIFY_DONE;
477 614
615 if (args->regs && user_mode(args->regs))
616 return ret;
617
478 switch (val) { 618 switch (val) {
479 case DIE_INT3: 619 case DIE_INT3:
480 if (kprobe_handler(args->regs)) 620 if (kprobe_handler(args->regs))
@@ -564,12 +704,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
564 return 0; 704 return 0;
565} 705}
566 706
567static struct kprobe trampoline_p = {
568 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
569 .pre_handler = trampoline_probe_handler
570};
571
572int __init arch_init_kprobes(void) 707int __init arch_init_kprobes(void)
573{ 708{
574 return register_kprobe(&trampoline_p); 709 return 0;
575} 710}
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index 55bc365b8753..e7c138f66c5a 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -81,6 +81,7 @@
81#include <linux/miscdevice.h> 81#include <linux/miscdevice.h>
82#include <linux/spinlock.h> 82#include <linux/spinlock.h>
83#include <linux/mm.h> 83#include <linux/mm.h>
84#include <linux/mutex.h>
84 85
85#include <asm/msr.h> 86#include <asm/msr.h>
86#include <asm/uaccess.h> 87#include <asm/uaccess.h>
@@ -114,7 +115,7 @@ MODULE_LICENSE("GPL");
114static DEFINE_SPINLOCK(microcode_update_lock); 115static DEFINE_SPINLOCK(microcode_update_lock);
115 116
116/* no concurrent ->write()s are allowed on /dev/cpu/microcode */ 117/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
117static DECLARE_MUTEX(microcode_sem); 118static DEFINE_MUTEX(microcode_mutex);
118 119
119static void __user *user_buffer; /* user area microcode data buffer */ 120static void __user *user_buffer; /* user area microcode data buffer */
120static unsigned int user_buffer_size; /* it's size */ 121static unsigned int user_buffer_size; /* it's size */
@@ -444,7 +445,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_
444 return -EINVAL; 445 return -EINVAL;
445 } 446 }
446 447
447 down(&microcode_sem); 448 mutex_lock(&microcode_mutex);
448 449
449 user_buffer = (void __user *) buf; 450 user_buffer = (void __user *) buf;
450 user_buffer_size = (int) len; 451 user_buffer_size = (int) len;
@@ -453,31 +454,14 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_
453 if (!ret) 454 if (!ret)
454 ret = (ssize_t)len; 455 ret = (ssize_t)len;
455 456
456 up(&microcode_sem); 457 mutex_unlock(&microcode_mutex);
457 458
458 return ret; 459 return ret;
459} 460}
460 461
461static int microcode_ioctl (struct inode *inode, struct file *file,
462 unsigned int cmd, unsigned long arg)
463{
464 switch (cmd) {
465 /*
466 * XXX: will be removed after microcode_ctl
467 * is updated to ignore failure of this ioctl()
468 */
469 case MICROCODE_IOCFREE:
470 return 0;
471 default:
472 return -EINVAL;
473 }
474 return -EINVAL;
475}
476
477static struct file_operations microcode_fops = { 462static struct file_operations microcode_fops = {
478 .owner = THIS_MODULE, 463 .owner = THIS_MODULE,
479 .write = microcode_write, 464 .write = microcode_write,
480 .ioctl = microcode_ioctl,
481 .open = microcode_open, 465 .open = microcode_open,
482}; 466};
483 467
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 9074818b9473..d43b498ec745 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -138,12 +138,12 @@ static int __init check_nmi_watchdog(void)
138 if (nmi_watchdog == NMI_LOCAL_APIC) 138 if (nmi_watchdog == NMI_LOCAL_APIC)
139 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); 139 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
140 140
141 for_each_cpu(cpu) 141 for_each_possible_cpu(cpu)
142 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; 142 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
143 local_irq_enable(); 143 local_irq_enable();
144 mdelay((10*1000)/nmi_hz); // wait 10 ticks 144 mdelay((10*1000)/nmi_hz); // wait 10 ticks
145 145
146 for_each_cpu(cpu) { 146 for_each_possible_cpu(cpu) {
147#ifdef CONFIG_SMP 147#ifdef CONFIG_SMP
148 /* Check cpu_callin_map here because that is set 148 /* Check cpu_callin_map here because that is set
149 after the timer is started. */ 149 after the timer is started. */
@@ -510,7 +510,7 @@ void touch_nmi_watchdog (void)
510 * Just reset the alert counters, (other CPUs might be 510 * Just reset the alert counters, (other CPUs might be
511 * spinning on locks we hold): 511 * spinning on locks we hold):
512 */ 512 */
513 for_each_cpu(i) 513 for_each_possible_cpu(i)
514 alert_counter[i] = 0; 514 alert_counter[i] = 0;
515 515
516 /* 516 /*
@@ -529,7 +529,8 @@ void nmi_watchdog_tick (struct pt_regs * regs)
529 * always switch the stack NMI-atomically, it's safe to use 529 * always switch the stack NMI-atomically, it's safe to use
530 * smp_processor_id(). 530 * smp_processor_id().
531 */ 531 */
532 int sum, cpu = smp_processor_id(); 532 unsigned int sum;
533 int cpu = smp_processor_id();
533 534
534 sum = per_cpu(irq_stat, cpu).apic_timer_irqs; 535 sum = per_cpu(irq_stat, cpu).apic_timer_irqs;
535 536
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 299e61674084..24b3e745478b 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -38,7 +38,6 @@
38#include <linux/kallsyms.h> 38#include <linux/kallsyms.h>
39#include <linux/ptrace.h> 39#include <linux/ptrace.h>
40#include <linux/random.h> 40#include <linux/random.h>
41#include <linux/kprobes.h>
42 41
43#include <asm/uaccess.h> 42#include <asm/uaccess.h>
44#include <asm/pgtable.h> 43#include <asm/pgtable.h>
@@ -364,13 +363,6 @@ void exit_thread(void)
364 struct task_struct *tsk = current; 363 struct task_struct *tsk = current;
365 struct thread_struct *t = &tsk->thread; 364 struct thread_struct *t = &tsk->thread;
366 365
367 /*
368 * Remove function-return probe instances associated with this task
369 * and put them back on the free list. Do not insert an exit probe for
370 * this function, it will be disabled by kprobe_flush_task if you do.
371 */
372 kprobe_flush_task(tsk);
373
374 /* The process may have allocated an io port bitmap... nuke it. */ 366 /* The process may have allocated an io port bitmap... nuke it. */
375 if (unlikely(NULL != t->io_bitmap_ptr)) { 367 if (unlikely(NULL != t->io_bitmap_ptr)) {
376 int cpu = get_cpu(); 368 int cpu = get_cpu();
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index d313a11acafa..8c08660b4e5d 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -46,6 +46,7 @@
46#include <linux/kexec.h> 46#include <linux/kexec.h>
47#include <linux/crash_dump.h> 47#include <linux/crash_dump.h>
48#include <linux/dmi.h> 48#include <linux/dmi.h>
49#include <linux/pfn.h>
49 50
50#include <video/edid.h> 51#include <video/edid.h>
51 52
@@ -1058,10 +1059,10 @@ static int __init
1058free_available_memory(unsigned long start, unsigned long end, void *arg) 1059free_available_memory(unsigned long start, unsigned long end, void *arg)
1059{ 1060{
1060 /* check max_low_pfn */ 1061 /* check max_low_pfn */
1061 if (start >= ((max_low_pfn + 1) << PAGE_SHIFT)) 1062 if (start >= (max_low_pfn << PAGE_SHIFT))
1062 return 0; 1063 return 0;
1063 if (end >= ((max_low_pfn + 1) << PAGE_SHIFT)) 1064 if (end >= (max_low_pfn << PAGE_SHIFT))
1064 end = (max_low_pfn + 1) << PAGE_SHIFT; 1065 end = max_low_pfn << PAGE_SHIFT;
1065 if (start < end) 1066 if (start < end)
1066 free_bootmem(start, end - start); 1067 free_bootmem(start, end - start);
1067 1068
@@ -1286,8 +1287,6 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat
1286 probe_roms(); 1287 probe_roms();
1287 for (i = 0; i < e820.nr_map; i++) { 1288 for (i = 0; i < e820.nr_map; i++) {
1288 struct resource *res; 1289 struct resource *res;
1289 if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
1290 continue;
1291 res = kzalloc(sizeof(struct resource), GFP_ATOMIC); 1290 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
1292 switch (e820.map[i].type) { 1291 switch (e820.map[i].type) {
1293 case E820_RAM: res->name = "System RAM"; break; 1292 case E820_RAM: res->name = "System RAM"; break;
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 82371d83bfa9..a6969903f2d6 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -72,6 +72,9 @@ int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
72/* Core ID of each logical CPU */ 72/* Core ID of each logical CPU */
73int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; 73int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
74 74
75/* Last level cache ID of each logical CPU */
76int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
77
75/* representing HT siblings of each logical CPU */ 78/* representing HT siblings of each logical CPU */
76cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 79cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
77EXPORT_SYMBOL(cpu_sibling_map); 80EXPORT_SYMBOL(cpu_sibling_map);
@@ -440,6 +443,18 @@ static void __devinit smp_callin(void)
440 443
441static int cpucount; 444static int cpucount;
442 445
446/* maps the cpu to the sched domain representing multi-core */
447cpumask_t cpu_coregroup_map(int cpu)
448{
449 struct cpuinfo_x86 *c = cpu_data + cpu;
450 /*
451 * For perf, we return last level cache shared map.
452 * TBD: when power saving sched policy is added, we will return
453 * cpu_core_map when power saving policy is enabled
454 */
455 return c->llc_shared_map;
456}
457
443/* representing cpus for which sibling maps can be computed */ 458/* representing cpus for which sibling maps can be computed */
444static cpumask_t cpu_sibling_setup_map; 459static cpumask_t cpu_sibling_setup_map;
445 460
@@ -459,12 +474,16 @@ set_cpu_sibling_map(int cpu)
459 cpu_set(cpu, cpu_sibling_map[i]); 474 cpu_set(cpu, cpu_sibling_map[i]);
460 cpu_set(i, cpu_core_map[cpu]); 475 cpu_set(i, cpu_core_map[cpu]);
461 cpu_set(cpu, cpu_core_map[i]); 476 cpu_set(cpu, cpu_core_map[i]);
477 cpu_set(i, c[cpu].llc_shared_map);
478 cpu_set(cpu, c[i].llc_shared_map);
462 } 479 }
463 } 480 }
464 } else { 481 } else {
465 cpu_set(cpu, cpu_sibling_map[cpu]); 482 cpu_set(cpu, cpu_sibling_map[cpu]);
466 } 483 }
467 484
485 cpu_set(cpu, c[cpu].llc_shared_map);
486
468 if (current_cpu_data.x86_max_cores == 1) { 487 if (current_cpu_data.x86_max_cores == 1) {
469 cpu_core_map[cpu] = cpu_sibling_map[cpu]; 488 cpu_core_map[cpu] = cpu_sibling_map[cpu];
470 c[cpu].booted_cores = 1; 489 c[cpu].booted_cores = 1;
@@ -472,6 +491,11 @@ set_cpu_sibling_map(int cpu)
472 } 491 }
473 492
474 for_each_cpu_mask(i, cpu_sibling_setup_map) { 493 for_each_cpu_mask(i, cpu_sibling_setup_map) {
494 if (cpu_llc_id[cpu] != BAD_APICID &&
495 cpu_llc_id[cpu] == cpu_llc_id[i]) {
496 cpu_set(i, c[cpu].llc_shared_map);
497 cpu_set(cpu, c[i].llc_shared_map);
498 }
475 if (phys_proc_id[cpu] == phys_proc_id[i]) { 499 if (phys_proc_id[cpu] == phys_proc_id[i]) {
476 cpu_set(i, cpu_core_map[cpu]); 500 cpu_set(i, cpu_core_map[cpu]);
477 cpu_set(cpu, cpu_core_map[i]); 501 cpu_set(cpu, cpu_core_map[i]);
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index ac687d00a1ce..326595f3fa4d 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -310,3 +310,5 @@ ENTRY(sys_call_table)
310 .long sys_pselect6 310 .long sys_pselect6
311 .long sys_ppoll 311 .long sys_ppoll
312 .long sys_unshare /* 310 */ 312 .long sys_unshare /* 310 */
313 .long sys_set_robust_list
314 .long sys_get_robust_list
diff --git a/arch/i386/kernel/timers/timer_pm.c b/arch/i386/kernel/timers/timer_pm.c
index 264edaaac315..144e94a04933 100644
--- a/arch/i386/kernel/timers/timer_pm.c
+++ b/arch/i386/kernel/timers/timer_pm.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/pci.h>
18#include <asm/types.h> 19#include <asm/types.h>
19#include <asm/timer.h> 20#include <asm/timer.h>
20#include <asm/smp.h> 21#include <asm/smp.h>
@@ -45,24 +46,31 @@ static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
45 46
46#define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */ 47#define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */
47 48
49static int pmtmr_need_workaround __read_mostly = 1;
50
48/*helper function to safely read acpi pm timesource*/ 51/*helper function to safely read acpi pm timesource*/
49static inline u32 read_pmtmr(void) 52static inline u32 read_pmtmr(void)
50{ 53{
51 u32 v1=0,v2=0,v3=0; 54 if (pmtmr_need_workaround) {
52 /* It has been reported that because of various broken 55 u32 v1, v2, v3;
53 * chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM time 56
54 * source is not latched, so you must read it multiple 57 /* It has been reported that because of various broken
55 * times to insure a safe value is read. 58 * chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM time
56 */ 59 * source is not latched, so you must read it multiple
57 do { 60 * times to insure a safe value is read.
58 v1 = inl(pmtmr_ioport); 61 */
59 v2 = inl(pmtmr_ioport); 62 do {
60 v3 = inl(pmtmr_ioport); 63 v1 = inl(pmtmr_ioport);
61 } while ((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) 64 v2 = inl(pmtmr_ioport);
62 || (v3 > v1 && v3 < v2)); 65 v3 = inl(pmtmr_ioport);
63 66 } while ((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
64 /* mask the output to 24 bits */ 67 || (v3 > v1 && v3 < v2));
65 return v2 & ACPI_PM_MASK; 68
69 /* mask the output to 24 bits */
70 return v2 & ACPI_PM_MASK;
71 }
72
73 return inl(pmtmr_ioport) & ACPI_PM_MASK;
66} 74}
67 75
68 76
@@ -263,6 +271,72 @@ struct init_timer_opts __initdata timer_pmtmr_init = {
263 .opts = &timer_pmtmr, 271 .opts = &timer_pmtmr,
264}; 272};
265 273
274#ifdef CONFIG_PCI
275/*
276 * PIIX4 Errata:
277 *
278 * The power management timer may return improper results when read.
279 * Although the timer value settles properly after incrementing,
280 * while incrementing there is a 3 ns window every 69.8 ns where the
281 * timer value is indeterminate (a 4.2% chance that the data will be
282 * incorrect when read). As a result, the ACPI free running count up
283 * timer specification is violated due to erroneous reads.
284 */
285static int __init pmtmr_bug_check(void)
286{
287 static struct pci_device_id gray_list[] __initdata = {
288 /* these chipsets may have bug. */
289 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
290 PCI_DEVICE_ID_INTEL_82801DB_0) },
291 { },
292 };
293 struct pci_dev *dev;
294 int pmtmr_has_bug = 0;
295 u8 rev;
296
297 if (cur_timer != &timer_pmtmr || !pmtmr_need_workaround)
298 return 0;
299
300 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
301 PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
302 if (dev) {
303 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
304 /* the bug has been fixed in PIIX4M */
305 if (rev < 3) {
306 printk(KERN_WARNING "* Found PM-Timer Bug on this "
307 "chipset. Due to workarounds for a bug,\n"
308 "* this time source is slow. Consider trying "
309 "other time sources (clock=)\n");
310 pmtmr_has_bug = 1;
311 }
312 pci_dev_put(dev);
313 }
314
315 if (pci_dev_present(gray_list)) {
316 printk(KERN_WARNING "* This chipset may have PM-Timer Bug. Due"
317 " to workarounds for a bug,\n"
318 "* this time source is slow. If you are sure your timer"
319 " does not have\n"
320 "* this bug, please use \"pmtmr_good\" to disable the "
321 "workaround\n");
322 pmtmr_has_bug = 1;
323 }
324
325 if (!pmtmr_has_bug)
326 pmtmr_need_workaround = 0;
327
328 return 0;
329}
330device_initcall(pmtmr_bug_check);
331#endif
332
333static int __init pmtr_good_setup(char *__str)
334{
335 pmtmr_need_workaround = 0;
336 return 1;
337}
338__setup("pmtmr_good", pmtr_good_setup);
339
266MODULE_LICENSE("GPL"); 340MODULE_LICENSE("GPL");
267MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); 341MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
268MODULE_DESCRIPTION("Power Management Timer (PMTMR) as primary timing source for x86"); 342MODULE_DESCRIPTION("Power Management Timer (PMTMR) as primary timing source for x86");
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index de5386b01d38..6b63a5aa1e46 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -92,22 +92,21 @@ asmlinkage void spurious_interrupt_bug(void);
92asmlinkage void machine_check(void); 92asmlinkage void machine_check(void);
93 93
94static int kstack_depth_to_print = 24; 94static int kstack_depth_to_print = 24;
95struct notifier_block *i386die_chain; 95ATOMIC_NOTIFIER_HEAD(i386die_chain);
96static DEFINE_SPINLOCK(die_notifier_lock);
97 96
98int register_die_notifier(struct notifier_block *nb) 97int register_die_notifier(struct notifier_block *nb)
99{ 98{
100 int err = 0;
101 unsigned long flags;
102
103 vmalloc_sync_all(); 99 vmalloc_sync_all();
104 spin_lock_irqsave(&die_notifier_lock, flags); 100 return atomic_notifier_chain_register(&i386die_chain, nb);
105 err = notifier_chain_register(&i386die_chain, nb);
106 spin_unlock_irqrestore(&die_notifier_lock, flags);
107 return err;
108} 101}
109EXPORT_SYMBOL(register_die_notifier); 102EXPORT_SYMBOL(register_die_notifier);
110 103
104int unregister_die_notifier(struct notifier_block *nb)
105{
106 return atomic_notifier_chain_unregister(&i386die_chain, nb);
107}
108EXPORT_SYMBOL(unregister_die_notifier);
109
111static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) 110static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
112{ 111{
113 return p > (void *)tinfo && 112 return p > (void *)tinfo &&
@@ -386,8 +385,12 @@ void die(const char * str, struct pt_regs * regs, long err)
386#endif 385#endif
387 if (nl) 386 if (nl)
388 printk("\n"); 387 printk("\n");
389 notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV); 388 if (notify_die(DIE_OOPS, str, regs, err,
390 show_registers(regs); 389 current->thread.trap_no, SIGSEGV) !=
390 NOTIFY_STOP)
391 show_registers(regs);
392 else
393 regs = NULL;
391 } else 394 } else
392 printk(KERN_EMERG "Recursive die() failure, output suppressed\n"); 395 printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
393 396
@@ -395,6 +398,9 @@ void die(const char * str, struct pt_regs * regs, long err)
395 die.lock_owner = -1; 398 die.lock_owner = -1;
396 spin_unlock_irqrestore(&die.lock, flags); 399 spin_unlock_irqrestore(&die.lock, flags);
397 400
401 if (!regs)
402 return;
403
398 if (kexec_should_crash(current)) 404 if (kexec_should_crash(current))
399 crash_kexec(regs); 405 crash_kexec(regs);
400 406
@@ -623,7 +629,7 @@ static DEFINE_SPINLOCK(nmi_print_lock);
623 629
624void die_nmi (struct pt_regs *regs, const char *msg) 630void die_nmi (struct pt_regs *regs, const char *msg)
625{ 631{
626 if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 0, SIGINT) == 632 if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
627 NOTIFY_STOP) 633 NOTIFY_STOP)
628 return; 634 return;
629 635
@@ -662,7 +668,7 @@ static void default_do_nmi(struct pt_regs * regs)
662 reason = get_nmi_reason(); 668 reason = get_nmi_reason();
663 669
664 if (!(reason & 0xc0)) { 670 if (!(reason & 0xc0)) {
665 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT) 671 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
666 == NOTIFY_STOP) 672 == NOTIFY_STOP)
667 return; 673 return;
668#ifdef CONFIG_X86_LOCAL_APIC 674#ifdef CONFIG_X86_LOCAL_APIC
@@ -678,7 +684,7 @@ static void default_do_nmi(struct pt_regs * regs)
678 unknown_nmi_error(reason, regs); 684 unknown_nmi_error(reason, regs);
679 return; 685 return;
680 } 686 }
681 if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP) 687 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
682 return; 688 return;
683 if (reason & 0x80) 689 if (reason & 0x80)
684 mem_parity_error(reason, regs); 690 mem_parity_error(reason, regs);
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index 8165626a5c30..70e560a1b79a 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -1700,7 +1700,7 @@ after_handle_vic_irq(unsigned int irq)
1700 1700
1701 printk("VOYAGER SMP: CPU%d lost interrupt %d\n", 1701 printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
1702 cpu, irq); 1702 cpu, irq);
1703 for_each_cpu(real_cpu, mask) { 1703 for_each_possible_cpu(real_cpu, mask) {
1704 1704
1705 outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu, 1705 outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
1706 VIC_PROCESSOR_ID); 1706 VIC_PROCESSOR_ID);
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index c4af9638dbfa..fe6eb901326e 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -31,6 +31,7 @@
31#include <linux/nodemask.h> 31#include <linux/nodemask.h>
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/kexec.h> 33#include <linux/kexec.h>
34#include <linux/pfn.h>
34 35
35#include <asm/e820.h> 36#include <asm/e820.h>
36#include <asm/setup.h> 37#include <asm/setup.h>
@@ -352,17 +353,6 @@ void __init zone_sizes_init(void)
352{ 353{
353 int nid; 354 int nid;
354 355
355 /*
356 * Insert nodes into pgdat_list backward so they appear in order.
357 * Clobber node 0's links and NULL out pgdat_list before starting.
358 */
359 pgdat_list = NULL;
360 for (nid = MAX_NUMNODES - 1; nid >= 0; nid--) {
361 if (!node_online(nid))
362 continue;
363 NODE_DATA(nid)->pgdat_next = pgdat_list;
364 pgdat_list = NODE_DATA(nid);
365 }
366 356
367 for_each_online_node(nid) { 357 for_each_online_node(nid) {
368 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; 358 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index 9db3242103be..2889567e21a1 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -36,7 +36,7 @@ void show_mem(void)
36 printk(KERN_INFO "Mem-info:\n"); 36 printk(KERN_INFO "Mem-info:\n");
37 show_free_areas(); 37 show_free_areas();
38 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 38 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
39 for_each_pgdat(pgdat) { 39 for_each_online_pgdat(pgdat) {
40 pgdat_resize_lock(pgdat, &flags); 40 pgdat_resize_lock(pgdat, &flags);
41 for (i = 0; i < pgdat->node_spanned_pages; ++i) { 41 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
42 page = pgdat_page_nr(pgdat, i); 42 page = pgdat_page_nr(pgdat, i);
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c
index 1accce50c2c7..1a2076ce6f6a 100644
--- a/arch/i386/oprofile/nmi_int.c
+++ b/arch/i386/oprofile/nmi_int.c
@@ -122,7 +122,7 @@ static void nmi_save_registers(void * dummy)
122static void free_msrs(void) 122static void free_msrs(void)
123{ 123{
124 int i; 124 int i;
125 for_each_cpu(i) { 125 for_each_possible_cpu(i) {
126 kfree(cpu_msrs[i].counters); 126 kfree(cpu_msrs[i].counters);
127 cpu_msrs[i].counters = NULL; 127 cpu_msrs[i].counters = NULL;
128 kfree(cpu_msrs[i].controls); 128 kfree(cpu_msrs[i].controls);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 10b6b9e7716b..edffe25a477a 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -34,6 +34,10 @@ config RWSEM_XCHGADD_ALGORITHM
34 bool 34 bool
35 default y 35 default y
36 36
37config GENERIC_FIND_NEXT_BIT
38 bool
39 default y
40
37config GENERIC_CALIBRATE_DELAY 41config GENERIC_CALIBRATE_DELAY
38 bool 42 bool
39 default y 43 default y
@@ -42,6 +46,10 @@ config TIME_INTERPOLATION
42 bool 46 bool
43 default y 47 default y
44 48
49config DMI
50 bool
51 default y
52
45config EFI 53config EFI
46 bool 54 bool
47 default y 55 default y
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index 68ceb4e690c7..ccb98ed48e58 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -29,9 +29,9 @@
29struct partial_page { 29struct partial_page {
30 struct partial_page *next; /* linked list, sorted by address */ 30 struct partial_page *next; /* linked list, sorted by address */
31 struct rb_node pp_rb; 31 struct rb_node pp_rb;
32 /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*32 32 /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64
33 * should suffice.*/ 33 * should suffice.*/
34 unsigned int bitmap; 34 unsigned long bitmap;
35 unsigned int base; 35 unsigned int base;
36}; 36};
37 37
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 13e739e4c84d..5366b3b23d09 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -25,7 +25,6 @@
25#include <linux/resource.h> 25#include <linux/resource.h>
26#include <linux/times.h> 26#include <linux/times.h>
27#include <linux/utsname.h> 27#include <linux/utsname.h>
28#include <linux/timex.h>
29#include <linux/smp.h> 28#include <linux/smp.h>
30#include <linux/smp_lock.h> 29#include <linux/smp_lock.h>
31#include <linux/sem.h> 30#include <linux/sem.h>
@@ -2591,78 +2590,4 @@ sys32_setresgid(compat_gid_t rgid, compat_gid_t egid,
2591 ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid); 2590 ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid);
2592 return sys_setresgid(srgid, segid, ssgid); 2591 return sys_setresgid(srgid, segid, ssgid);
2593} 2592}
2594
2595/* Handle adjtimex compatibility. */
2596
2597struct timex32 {
2598 u32 modes;
2599 s32 offset, freq, maxerror, esterror;
2600 s32 status, constant, precision, tolerance;
2601 struct compat_timeval time;
2602 s32 tick;
2603 s32 ppsfreq, jitter, shift, stabil;
2604 s32 jitcnt, calcnt, errcnt, stbcnt;
2605 s32 :32; s32 :32; s32 :32; s32 :32;
2606 s32 :32; s32 :32; s32 :32; s32 :32;
2607 s32 :32; s32 :32; s32 :32; s32 :32;
2608};
2609
2610extern int do_adjtimex(struct timex *);
2611
2612asmlinkage long
2613sys32_adjtimex(struct timex32 *utp)
2614{
2615 struct timex txc;
2616 int ret;
2617
2618 memset(&txc, 0, sizeof(struct timex));
2619
2620 if(get_user(txc.modes, &utp->modes) ||
2621 __get_user(txc.offset, &utp->offset) ||
2622 __get_user(txc.freq, &utp->freq) ||
2623 __get_user(txc.maxerror, &utp->maxerror) ||
2624 __get_user(txc.esterror, &utp->esterror) ||
2625 __get_user(txc.status, &utp->status) ||
2626 __get_user(txc.constant, &utp->constant) ||
2627 __get_user(txc.precision, &utp->precision) ||
2628 __get_user(txc.tolerance, &utp->tolerance) ||
2629 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
2630 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
2631 __get_user(txc.tick, &utp->tick) ||
2632 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
2633 __get_user(txc.jitter, &utp->jitter) ||
2634 __get_user(txc.shift, &utp->shift) ||
2635 __get_user(txc.stabil, &utp->stabil) ||
2636 __get_user(txc.jitcnt, &utp->jitcnt) ||
2637 __get_user(txc.calcnt, &utp->calcnt) ||
2638 __get_user(txc.errcnt, &utp->errcnt) ||
2639 __get_user(txc.stbcnt, &utp->stbcnt))
2640 return -EFAULT;
2641
2642 ret = do_adjtimex(&txc);
2643
2644 if(put_user(txc.modes, &utp->modes) ||
2645 __put_user(txc.offset, &utp->offset) ||
2646 __put_user(txc.freq, &utp->freq) ||
2647 __put_user(txc.maxerror, &utp->maxerror) ||
2648 __put_user(txc.esterror, &utp->esterror) ||
2649 __put_user(txc.status, &utp->status) ||
2650 __put_user(txc.constant, &utp->constant) ||
2651 __put_user(txc.precision, &utp->precision) ||
2652 __put_user(txc.tolerance, &utp->tolerance) ||
2653 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
2654 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
2655 __put_user(txc.tick, &utp->tick) ||
2656 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
2657 __put_user(txc.jitter, &utp->jitter) ||
2658 __put_user(txc.shift, &utp->shift) ||
2659 __put_user(txc.stabil, &utp->stabil) ||
2660 __put_user(txc.jitcnt, &utp->jitcnt) ||
2661 __put_user(txc.calcnt, &utp->calcnt) ||
2662 __put_user(txc.errcnt, &utp->errcnt) ||
2663 __put_user(txc.stbcnt, &utp->stbcnt))
2664 ret = -EFAULT;
2665
2666 return ret;
2667}
2668#endif /* NOTYET */ 2593#endif /* NOTYET */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 09a0dbc17fb6..59e871dae742 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds
7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ 7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ 8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
9 salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ 9 salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
10 unwind.o mca.o mca_asm.o topology.o 10 unwind.o mca.o mca_asm.o topology.o dmi_scan.o
11 11
12obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o 12obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
13obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o 13obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
@@ -30,6 +30,7 @@ obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
30obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o 30obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
31obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o 31obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
32mca_recovery-y += mca_drv.o mca_drv_asm.o 32mca_recovery-y += mca_drv.o mca_drv_asm.o
33dmi_scan-y += ../../i386/kernel/dmi_scan.o
33 34
34# The gate DSO image is built using a special linker script. 35# The gate DSO image is built using a special linker script.
35targets += gate.so gate-syms.o 36targets += gate.so gate-syms.o
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index a4e218ce2edb..58c93a30348c 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -651,9 +651,9 @@ unsigned long __init acpi_find_rsdp(void)
651{ 651{
652 unsigned long rsdp_phys = 0; 652 unsigned long rsdp_phys = 0;
653 653
654 if (efi.acpi20) 654 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
655 rsdp_phys = __pa(efi.acpi20); 655 rsdp_phys = efi.acpi20;
656 else if (efi.acpi) 656 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
657 printk(KERN_WARNING PREFIX 657 printk(KERN_WARNING PREFIX
658 "v1.0/r0.71 tables no longer supported\n"); 658 "v1.0/r0.71 tables no longer supported\n");
659 return rsdp_phys; 659 return rsdp_phys;
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 9990320b6f9a..12cfedce73b1 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -458,24 +458,33 @@ efi_init (void)
458 printk(KERN_INFO "EFI v%u.%.02u by %s:", 458 printk(KERN_INFO "EFI v%u.%.02u by %s:",
459 efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); 459 efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor);
460 460
461 efi.mps = EFI_INVALID_TABLE_ADDR;
462 efi.acpi = EFI_INVALID_TABLE_ADDR;
463 efi.acpi20 = EFI_INVALID_TABLE_ADDR;
464 efi.smbios = EFI_INVALID_TABLE_ADDR;
465 efi.sal_systab = EFI_INVALID_TABLE_ADDR;
466 efi.boot_info = EFI_INVALID_TABLE_ADDR;
467 efi.hcdp = EFI_INVALID_TABLE_ADDR;
468 efi.uga = EFI_INVALID_TABLE_ADDR;
469
461 for (i = 0; i < (int) efi.systab->nr_tables; i++) { 470 for (i = 0; i < (int) efi.systab->nr_tables; i++) {
462 if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { 471 if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
463 efi.mps = __va(config_tables[i].table); 472 efi.mps = config_tables[i].table;
464 printk(" MPS=0x%lx", config_tables[i].table); 473 printk(" MPS=0x%lx", config_tables[i].table);
465 } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { 474 } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
466 efi.acpi20 = __va(config_tables[i].table); 475 efi.acpi20 = config_tables[i].table;
467 printk(" ACPI 2.0=0x%lx", config_tables[i].table); 476 printk(" ACPI 2.0=0x%lx", config_tables[i].table);
468 } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { 477 } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
469 efi.acpi = __va(config_tables[i].table); 478 efi.acpi = config_tables[i].table;
470 printk(" ACPI=0x%lx", config_tables[i].table); 479 printk(" ACPI=0x%lx", config_tables[i].table);
471 } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { 480 } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {
472 efi.smbios = __va(config_tables[i].table); 481 efi.smbios = config_tables[i].table;
473 printk(" SMBIOS=0x%lx", config_tables[i].table); 482 printk(" SMBIOS=0x%lx", config_tables[i].table);
474 } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) { 483 } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) {
475 efi.sal_systab = __va(config_tables[i].table); 484 efi.sal_systab = config_tables[i].table;
476 printk(" SALsystab=0x%lx", config_tables[i].table); 485 printk(" SALsystab=0x%lx", config_tables[i].table);
477 } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { 486 } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
478 efi.hcdp = __va(config_tables[i].table); 487 efi.hcdp = config_tables[i].table;
479 printk(" HCDP=0x%lx", config_tables[i].table); 488 printk(" HCDP=0x%lx", config_tables[i].table);
480 } 489 }
481 } 490 }
@@ -677,27 +686,34 @@ EXPORT_SYMBOL(efi_mem_attributes);
677/* 686/*
678 * Determines whether the memory at phys_addr supports the desired 687 * Determines whether the memory at phys_addr supports the desired
679 * attribute (WB, UC, etc). If this returns 1, the caller can safely 688 * attribute (WB, UC, etc). If this returns 1, the caller can safely
680 * access *size bytes at phys_addr with the specified attribute. 689 * access size bytes at phys_addr with the specified attribute.
681 */ 690 */
682static int 691int
683efi_mem_attribute_range (unsigned long phys_addr, unsigned long *size, u64 attr) 692efi_mem_attribute_range (unsigned long phys_addr, unsigned long size, u64 attr)
684{ 693{
694 unsigned long end = phys_addr + size;
685 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); 695 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
686 unsigned long md_end;
687 696
688 if (!md || (md->attribute & attr) != attr) 697 /*
698 * Some firmware doesn't report MMIO regions in the EFI memory
699 * map. The Intel BigSur (a.k.a. HP i2000) has this problem.
700 * On those platforms, we have to assume UC is valid everywhere.
701 */
702 if (!md || (md->attribute & attr) != attr) {
703 if (attr == EFI_MEMORY_UC && !efi_memmap_has_mmio())
704 return 1;
689 return 0; 705 return 0;
706 }
690 707
691 do { 708 do {
692 md_end = efi_md_end(md); 709 unsigned long md_end = efi_md_end(md);
693 if (phys_addr + *size <= md_end) 710
711 if (end <= md_end)
694 return 1; 712 return 1;
695 713
696 md = efi_memory_descriptor(md_end); 714 md = efi_memory_descriptor(md_end);
697 if (!md || (md->attribute & attr) != attr) { 715 if (!md || (md->attribute & attr) != attr)
698 *size = md_end - phys_addr; 716 return 0;
699 return 1;
700 }
701 } while (md); 717 } while (md);
702 return 0; 718 return 0;
703} 719}
@@ -708,7 +724,7 @@ efi_mem_attribute_range (unsigned long phys_addr, unsigned long *size, u64 attr)
708 * control access size. 724 * control access size.
709 */ 725 */
710int 726int
711valid_phys_addr_range (unsigned long phys_addr, unsigned long *size) 727valid_phys_addr_range (unsigned long phys_addr, unsigned long size)
712{ 728{
713 return efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB); 729 return efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB);
714} 730}
@@ -723,7 +739,7 @@ valid_phys_addr_range (unsigned long phys_addr, unsigned long *size)
723 * because that doesn't appear in the boot-time EFI memory map. 739 * because that doesn't appear in the boot-time EFI memory map.
724 */ 740 */
725int 741int
726valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long *size) 742valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long size)
727{ 743{
728 if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB)) 744 if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB))
729 return 1; 745 return 1;
@@ -731,14 +747,6 @@ valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long *size)
731 if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_UC)) 747 if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_UC))
732 return 1; 748 return 1;
733 749
734 /*
735 * Some firmware doesn't report MMIO regions in the EFI memory map.
736 * The Intel BigSur (a.k.a. HP i2000) has this problem. In this
737 * case, we can't use the EFI memory map to validate mmap requests.
738 */
739 if (!efi_memmap_has_mmio())
740 return 1;
741
742 return 0; 750 return 0;
743} 751}
744 752
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 50ae8c7d453d..789881ca83d4 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -34,6 +34,7 @@
34#include <asm/pgtable.h> 34#include <asm/pgtable.h>
35#include <asm/kdebug.h> 35#include <asm/kdebug.h>
36#include <asm/sections.h> 36#include <asm/sections.h>
37#include <asm/uaccess.h>
37 38
38extern void jprobe_inst_return(void); 39extern void jprobe_inst_return(void);
39 40
@@ -722,13 +723,50 @@ static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
722 struct kprobe *cur = kprobe_running(); 723 struct kprobe *cur = kprobe_running();
723 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 724 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
724 725
725 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
726 return 1;
727 726
728 if (kcb->kprobe_status & KPROBE_HIT_SS) { 727 switch(kcb->kprobe_status) {
729 resume_execution(cur, regs); 728 case KPROBE_HIT_SS:
730 reset_current_kprobe(); 729 case KPROBE_REENTER:
730 /*
731 * We are here because the instruction being single
732 * stepped caused a page fault. We reset the current
733 * kprobe and the instruction pointer points back to
734 * the probe address and allow the page fault handler
735 * to continue as a normal page fault.
736 */
737 regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL;
738 ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf;
739 if (kcb->kprobe_status == KPROBE_REENTER)
740 restore_previous_kprobe(kcb);
741 else
742 reset_current_kprobe();
731 preempt_enable_no_resched(); 743 preempt_enable_no_resched();
744 break;
745 case KPROBE_HIT_ACTIVE:
746 case KPROBE_HIT_SSDONE:
747 /*
748 * We increment the nmissed count for accounting,
749 * we can also use npre/npostfault count for accouting
750 * these specific fault cases.
751 */
752 kprobes_inc_nmissed_count(cur);
753
754 /*
755 * We come here because instructions in the pre/post
756 * handler caused the page_fault, this could happen
757 * if handler tries to access user space by
758 * copy_from_user(), get_user() etc. Let the
759 * user-specified handler try to fix it first.
760 */
761 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
762 return 1;
763
764 /*
765 * Let ia64_do_page_fault() fix it.
766 */
767 break;
768 default:
769 break;
732 } 770 }
733 771
734 return 0; 772 return 0;
@@ -740,6 +778,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
740 struct die_args *args = (struct die_args *)data; 778 struct die_args *args = (struct die_args *)data;
741 int ret = NOTIFY_DONE; 779 int ret = NOTIFY_DONE;
742 780
781 if (args->regs && user_mode(args->regs))
782 return ret;
783
743 switch(val) { 784 switch(val) {
744 case DIE_BREAK: 785 case DIE_BREAK:
745 /* err is break number from ia64_bad_break() */ 786 /* err is break number from ia64_bad_break() */
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 87ff7fe33cfb..8963171788d5 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -69,6 +69,7 @@
69#include <linux/kernel.h> 69#include <linux/kernel.h>
70#include <linux/smp.h> 70#include <linux/smp.h>
71#include <linux/workqueue.h> 71#include <linux/workqueue.h>
72#include <linux/cpumask.h>
72 73
73#include <asm/delay.h> 74#include <asm/delay.h>
74#include <asm/kdebug.h> 75#include <asm/kdebug.h>
@@ -1505,7 +1506,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
1505 ti->cpu = cpu; 1506 ti->cpu = cpu;
1506 p->thread_info = ti; 1507 p->thread_info = ti;
1507 p->state = TASK_UNINTERRUPTIBLE; 1508 p->state = TASK_UNINTERRUPTIBLE;
1508 __set_bit(cpu, &p->cpus_allowed); 1509 cpu_set(cpu, p->cpus_allowed);
1509 INIT_LIST_HEAD(&p->tasks); 1510 INIT_LIST_HEAD(&p->tasks);
1510 p->parent = p->real_parent = p->group_leader = p; 1511 p->parent = p->real_parent = p->group_leader = p;
1511 INIT_LIST_HEAD(&p->children); 1512 INIT_LIST_HEAD(&p->children);
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 309d59658e5f..355d57970ba3 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -30,7 +30,6 @@
30#include <linux/efi.h> 30#include <linux/efi.h>
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/kprobes.h>
34 33
35#include <asm/cpu.h> 34#include <asm/cpu.h>
36#include <asm/delay.h> 35#include <asm/delay.h>
@@ -738,13 +737,6 @@ void
738exit_thread (void) 737exit_thread (void)
739{ 738{
740 739
741 /*
742 * Remove function-return probe instances associated with this task
743 * and put them back on the free list. Do not insert an exit probe for
744 * this function, it will be disabled by kprobe_flush_task if you do.
745 */
746 kprobe_flush_task(current);
747
748 ia64_drop_fpu(current); 740 ia64_drop_fpu(current);
749#ifdef CONFIG_PERFMON 741#ifdef CONFIG_PERFMON
750 /* if needed, stop monitoring and flush state to perfmon context */ 742 /* if needed, stop monitoring and flush state to perfmon context */
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index eb388e271b2b..e4dfda1eb7dd 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -37,6 +37,7 @@
37#include <linux/string.h> 37#include <linux/string.h>
38#include <linux/threads.h> 38#include <linux/threads.h>
39#include <linux/tty.h> 39#include <linux/tty.h>
40#include <linux/dmi.h>
40#include <linux/serial.h> 41#include <linux/serial.h>
41#include <linux/serial_core.h> 42#include <linux/serial_core.h>
42#include <linux/efi.h> 43#include <linux/efi.h>
@@ -433,7 +434,7 @@ setup_arch (char **cmdline_p)
433 find_memory(); 434 find_memory();
434 435
435 /* process SAL system table: */ 436 /* process SAL system table: */
436 ia64_sal_init(efi.sal_systab); 437 ia64_sal_init(__va(efi.sal_systab));
437 438
438 ia64_setup_printk_clock(); 439 ia64_setup_printk_clock();
439 440
@@ -887,3 +888,10 @@ check_bugs (void)
887 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 888 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
888 (unsigned long) __end___mckinley_e9_bundles); 889 (unsigned long) __end___mckinley_e9_bundles);
889} 890}
891
892static int __init run_dmi_scan(void)
893{
894 dmi_scan_machine();
895 return 0;
896}
897core_initcall(run_dmi_scan);
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index dabd6c32641e..7c1ddc8ac443 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -30,19 +30,19 @@ extern spinlock_t timerlist_lock;
30fpswa_interface_t *fpswa_interface; 30fpswa_interface_t *fpswa_interface;
31EXPORT_SYMBOL(fpswa_interface); 31EXPORT_SYMBOL(fpswa_interface);
32 32
33struct notifier_block *ia64die_chain; 33ATOMIC_NOTIFIER_HEAD(ia64die_chain);
34 34
35int 35int
36register_die_notifier(struct notifier_block *nb) 36register_die_notifier(struct notifier_block *nb)
37{ 37{
38 return notifier_chain_register(&ia64die_chain, nb); 38 return atomic_notifier_chain_register(&ia64die_chain, nb);
39} 39}
40EXPORT_SYMBOL_GPL(register_die_notifier); 40EXPORT_SYMBOL_GPL(register_die_notifier);
41 41
42int 42int
43unregister_die_notifier(struct notifier_block *nb) 43unregister_die_notifier(struct notifier_block *nb)
44{ 44{
45 return notifier_chain_unregister(&ia64die_chain, nb); 45 return atomic_notifier_chain_unregister(&ia64die_chain, nb);
46} 46}
47EXPORT_SYMBOL_GPL(unregister_die_notifier); 47EXPORT_SYMBOL_GPL(unregister_die_notifier);
48 48
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index ac64664a1807..d8536a2c22a9 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -6,7 +6,7 @@ obj-y := io.o
6 6
7lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ 7lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
8 __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \ 8 __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \
9 bitop.o checksum.o clear_page.o csum_partial_copy.o \ 9 checksum.o clear_page.o csum_partial_copy.o \
10 clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ 10 clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \
11 flush.o ip_fast_csum.o do_csum.o \ 11 flush.o ip_fast_csum.o do_csum.o \
12 memset.o strlen.o 12 memset.o strlen.o
diff --git a/arch/ia64/lib/bitop.c b/arch/ia64/lib/bitop.c
deleted file mode 100644
index 82e299c8464e..000000000000
--- a/arch/ia64/lib/bitop.c
+++ /dev/null
@@ -1,88 +0,0 @@
1#include <linux/compiler.h>
2#include <linux/types.h>
3#include <asm/intrinsics.h>
4#include <linux/module.h>
5#include <linux/bitops.h>
6
7/*
8 * Find next zero bit in a bitmap reasonably efficiently..
9 */
10
11int __find_next_zero_bit (const void *addr, unsigned long size, unsigned long offset)
12{
13 unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
14 unsigned long result = offset & ~63UL;
15 unsigned long tmp;
16
17 if (offset >= size)
18 return size;
19 size -= result;
20 offset &= 63UL;
21 if (offset) {
22 tmp = *(p++);
23 tmp |= ~0UL >> (64-offset);
24 if (size < 64)
25 goto found_first;
26 if (~tmp)
27 goto found_middle;
28 size -= 64;
29 result += 64;
30 }
31 while (size & ~63UL) {
32 if (~(tmp = *(p++)))
33 goto found_middle;
34 result += 64;
35 size -= 64;
36 }
37 if (!size)
38 return result;
39 tmp = *p;
40found_first:
41 tmp |= ~0UL << size;
42 if (tmp == ~0UL) /* any bits zero? */
43 return result + size; /* nope */
44found_middle:
45 return result + ffz(tmp);
46}
47EXPORT_SYMBOL(__find_next_zero_bit);
48
49/*
50 * Find next bit in a bitmap reasonably efficiently..
51 */
52int __find_next_bit(const void *addr, unsigned long size, unsigned long offset)
53{
54 unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
55 unsigned long result = offset & ~63UL;
56 unsigned long tmp;
57
58 if (offset >= size)
59 return size;
60 size -= result;
61 offset &= 63UL;
62 if (offset) {
63 tmp = *(p++);
64 tmp &= ~0UL << offset;
65 if (size < 64)
66 goto found_first;
67 if (tmp)
68 goto found_middle;
69 size -= 64;
70 result += 64;
71 }
72 while (size & ~63UL) {
73 if ((tmp = *(p++)))
74 goto found_middle;
75 result += 64;
76 size -= 64;
77 }
78 if (!size)
79 return result;
80 tmp = *p;
81 found_first:
82 tmp &= ~0UL >> (64-size);
83 if (tmp == 0UL) /* Are any bits set? */
84 return result + size; /* Nope. */
85 found_middle:
86 return result + __ffs(tmp);
87}
88EXPORT_SYMBOL(__find_next_bit);
diff --git a/arch/ia64/mm/Makefile b/arch/ia64/mm/Makefile
index d78d20f0a0f0..bb0a01a81878 100644
--- a/arch/ia64/mm/Makefile
+++ b/arch/ia64/mm/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the ia64-specific parts of the memory manager. 2# Makefile for the ia64-specific parts of the memory manager.
3# 3#
4 4
5obj-y := init.o fault.o tlb.o extable.o 5obj-y := init.o fault.o tlb.o extable.o ioremap.o
6 6
7obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 7obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
8obj-$(CONFIG_NUMA) += numa.o 8obj-$(CONFIG_NUMA) += numa.o
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 2f5e44862e91..ec9eeb89975d 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -379,31 +379,6 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
379} 379}
380 380
381/** 381/**
382 * pgdat_insert - insert the pgdat into global pgdat_list
383 * @pgdat: the pgdat for a node.
384 */
385static void __init pgdat_insert(pg_data_t *pgdat)
386{
387 pg_data_t *prev = NULL, *next;
388
389 for_each_pgdat(next)
390 if (pgdat->node_id < next->node_id)
391 break;
392 else
393 prev = next;
394
395 if (prev) {
396 prev->pgdat_next = pgdat;
397 pgdat->pgdat_next = next;
398 } else {
399 pgdat->pgdat_next = pgdat_list;
400 pgdat_list = pgdat;
401 }
402
403 return;
404}
405
406/**
407 * memory_less_nodes - allocate and initialize CPU only nodes pernode 382 * memory_less_nodes - allocate and initialize CPU only nodes pernode
408 * information. 383 * information.
409 */ 384 */
@@ -560,7 +535,7 @@ void show_mem(void)
560 printk("Mem-info:\n"); 535 printk("Mem-info:\n");
561 show_free_areas(); 536 show_free_areas();
562 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 537 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
563 for_each_pgdat(pgdat) { 538 for_each_online_pgdat(pgdat) {
564 unsigned long present; 539 unsigned long present;
565 unsigned long flags; 540 unsigned long flags;
566 int shared = 0, cached = 0, reserved = 0; 541 int shared = 0, cached = 0, reserved = 0;
@@ -745,11 +720,5 @@ void __init paging_init(void)
745 pfn_offset, zholes_size); 720 pfn_offset, zholes_size);
746 } 721 }
747 722
748 /*
749 * Make memory less nodes become a member of the known nodes.
750 */
751 for_each_node_mask(node, memory_less_mask)
752 pgdat_insert(mem_data[node].pgdat);
753
754 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); 723 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
755} 724}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ff4f31fcd330..2ef1151cde90 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -600,7 +600,7 @@ mem_init (void)
600 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); 600 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
601 kclist_add(&kcore_kernel, _stext, _end - _stext); 601 kclist_add(&kcore_kernel, _stext, _end - _stext);
602 602
603 for_each_pgdat(pgdat) 603 for_each_online_pgdat(pgdat)
604 if (pgdat->bdata->node_bootmem_map) 604 if (pgdat->bdata->node_bootmem_map)
605 totalram_pages += free_all_bootmem_node(pgdat); 605 totalram_pages += free_all_bootmem_node(pgdat);
606 606
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
new file mode 100644
index 000000000000..62328621f99c
--- /dev/null
+++ b/arch/ia64/mm/ioremap.c
@@ -0,0 +1,43 @@
1/*
2 * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
3 * Bjorn Helgaas <bjorn.helgaas@hp.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/compiler.h>
11#include <linux/module.h>
12#include <linux/efi.h>
13#include <asm/io.h>
14
15static inline void __iomem *
16__ioremap (unsigned long offset, unsigned long size)
17{
18 return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset);
19}
20
21void __iomem *
22ioremap (unsigned long offset, unsigned long size)
23{
24 if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC))
25 return __ioremap(offset, size);
26
27 if (efi_mem_attribute_range(offset, size, EFI_MEMORY_WB))
28 return phys_to_virt(offset);
29
30 /*
31 * Someday this should check ACPI resources so we
32 * can do the right thing for hot-plugged regions.
33 */
34 return __ioremap(offset, size);
35}
36EXPORT_SYMBOL(ioremap);
37
38void __iomem *
39ioremap_nocache (unsigned long offset, unsigned long size)
40{
41 return __ioremap(offset, size);
42}
43EXPORT_SYMBOL(ioremap_nocache);
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 8b6d5c844708..30988dfbddff 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -327,10 +327,11 @@ sn_scan_pcdp(void)
327 struct pcdp_interface_pci if_pci; 327 struct pcdp_interface_pci if_pci;
328 extern struct efi efi; 328 extern struct efi efi;
329 329
330 pcdp = efi.hcdp; 330 if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
331 if (! pcdp)
332 return; /* no hcdp/pcdp table */ 331 return; /* no hcdp/pcdp table */
333 332
333 pcdp = __va(efi.hcdp);
334
334 if (pcdp->rev < 3) 335 if (pcdp->rev < 3)
335 return; /* only support PCDP (rev >= 3) */ 336 return; /* only support PCDP (rev >= 3) */
336 337
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
index c686d9c12f7b..5100261310f7 100644
--- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
@@ -93,19 +93,22 @@ static int coherence_id_open(struct inode *inode, struct file *file)
93static struct proc_dir_entry 93static struct proc_dir_entry
94*sn_procfs_create_entry(const char *name, struct proc_dir_entry *parent, 94*sn_procfs_create_entry(const char *name, struct proc_dir_entry *parent,
95 int (*openfunc)(struct inode *, struct file *), 95 int (*openfunc)(struct inode *, struct file *),
96 int (*releasefunc)(struct inode *, struct file *)) 96 int (*releasefunc)(struct inode *, struct file *),
97 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *))
97{ 98{
98 struct proc_dir_entry *e = create_proc_entry(name, 0444, parent); 99 struct proc_dir_entry *e = create_proc_entry(name, 0444, parent);
99 100
100 if (e) { 101 if (e) {
101 e->proc_fops = (struct file_operations *)kmalloc( 102 struct file_operations *f;
102 sizeof(struct file_operations), GFP_KERNEL); 103
103 if (e->proc_fops) { 104 f = kzalloc(sizeof(*f), GFP_KERNEL);
104 memset(e->proc_fops, 0, sizeof(struct file_operations)); 105 if (f) {
105 e->proc_fops->open = openfunc; 106 f->open = openfunc;
106 e->proc_fops->read = seq_read; 107 f->read = seq_read;
107 e->proc_fops->llseek = seq_lseek; 108 f->llseek = seq_lseek;
108 e->proc_fops->release = releasefunc; 109 f->release = releasefunc;
110 f->write = write;
111 e->proc_fops = f;
109 } 112 }
110 } 113 }
111 114
@@ -119,31 +122,29 @@ extern int sn_topology_release(struct inode *, struct file *);
119void register_sn_procfs(void) 122void register_sn_procfs(void)
120{ 123{
121 static struct proc_dir_entry *sgi_proc_dir = NULL; 124 static struct proc_dir_entry *sgi_proc_dir = NULL;
122 struct proc_dir_entry *e;
123 125
124 BUG_ON(sgi_proc_dir != NULL); 126 BUG_ON(sgi_proc_dir != NULL);
125 if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL))) 127 if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL)))
126 return; 128 return;
127 129
128 sn_procfs_create_entry("partition_id", sgi_proc_dir, 130 sn_procfs_create_entry("partition_id", sgi_proc_dir,
129 partition_id_open, single_release); 131 partition_id_open, single_release, NULL);
130 132
131 sn_procfs_create_entry("system_serial_number", sgi_proc_dir, 133 sn_procfs_create_entry("system_serial_number", sgi_proc_dir,
132 system_serial_number_open, single_release); 134 system_serial_number_open, single_release, NULL);
133 135
134 sn_procfs_create_entry("licenseID", sgi_proc_dir, 136 sn_procfs_create_entry("licenseID", sgi_proc_dir,
135 licenseID_open, single_release); 137 licenseID_open, single_release, NULL);
136 138
137 e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir, 139 sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir,
138 sn_force_interrupt_open, single_release); 140 sn_force_interrupt_open, single_release,
139 if (e) 141 sn_force_interrupt_write_proc);
140 e->proc_fops->write = sn_force_interrupt_write_proc;
141 142
142 sn_procfs_create_entry("coherence_id", sgi_proc_dir, 143 sn_procfs_create_entry("coherence_id", sgi_proc_dir,
143 coherence_id_open, single_release); 144 coherence_id_open, single_release, NULL);
144 145
145 sn_procfs_create_entry("sn_topology", sgi_proc_dir, 146 sn_procfs_create_entry("sn_topology", sgi_proc_dir,
146 sn_topology_open, sn_topology_release); 147 sn_topology_open, sn_topology_release, NULL);
147} 148}
148 149
149#endif /* CONFIG_PROC_FS */ 150#endif /* CONFIG_PROC_FS */
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index a3dcc3fab4b7..05c864c6c2d9 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -214,6 +214,14 @@ config RWSEM_XCHGADD_ALGORITHM
214 bool 214 bool
215 default n 215 default n
216 216
217config GENERIC_FIND_NEXT_BIT
218 bool
219 default y
220
221config GENERIC_HWEIGHT
222 bool
223 default y
224
217config GENERIC_CALIBRATE_DELAY 225config GENERIC_CALIBRATE_DELAY
218 bool 226 bool
219 default y 227 default y
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index d742037a7ccb..0d78942b4c76 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -24,6 +24,7 @@
24#include <linux/tty.h> 24#include <linux/tty.h>
25#include <linux/cpu.h> 25#include <linux/cpu.h>
26#include <linux/nodemask.h> 26#include <linux/nodemask.h>
27#include <linux/pfn.h>
27 28
28#include <asm/processor.h> 29#include <asm/processor.h>
29#include <asm/pgtable.h> 30#include <asm/pgtable.h>
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c
index 08e727955555..cf610a7c5ff0 100644
--- a/arch/m32r/mm/discontig.c
+++ b/arch/m32r/mm/discontig.c
@@ -13,6 +13,7 @@
13#include <linux/initrd.h> 13#include <linux/initrd.h>
14#include <linux/nodemask.h> 14#include <linux/nodemask.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/pfn.h>
16 17
17#include <asm/setup.h> 18#include <asm/setup.h>
18 19
@@ -137,12 +138,6 @@ unsigned long __init zone_sizes_init(void)
137 int nid, i; 138 int nid, i;
138 mem_prof_t *mp; 139 mem_prof_t *mp;
139 140
140 pgdat_list = NULL;
141 for (nid = num_online_nodes() - 1 ; nid >= 0 ; nid--) {
142 NODE_DATA(nid)->pgdat_next = pgdat_list;
143 pgdat_list = NODE_DATA(nid);
144 }
145
146 for_each_online_node(nid) { 141 for_each_online_node(nid) {
147 mp = &mem_prof[nid]; 142 mp = &mem_prof[nid];
148 for (i = 0 ; i < MAX_NR_ZONES ; i++) { 143 for (i = 0 ; i < MAX_NR_ZONES ; i++) {
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index c9e7dad860b7..b71348fec1f4 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -18,6 +18,7 @@
18#include <linux/highmem.h> 18#include <linux/highmem.h>
19#include <linux/bitops.h> 19#include <linux/bitops.h>
20#include <linux/nodemask.h> 20#include <linux/nodemask.h>
21#include <linux/pfn.h>
21#include <asm/types.h> 22#include <asm/types.h>
22#include <asm/processor.h> 23#include <asm/processor.h>
23#include <asm/page.h> 24#include <asm/page.h>
@@ -47,7 +48,7 @@ void show_mem(void)
47 printk("Mem-info:\n"); 48 printk("Mem-info:\n");
48 show_free_areas(); 49 show_free_areas();
49 printk("Free swap: %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); 50 printk("Free swap: %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
50 for_each_pgdat(pgdat) { 51 for_each_online_pgdat(pgdat) {
51 unsigned long flags; 52 unsigned long flags;
52 pgdat_resize_lock(pgdat, &flags); 53 pgdat_resize_lock(pgdat, &flags);
53 for (i = 0; i < pgdat->node_spanned_pages; ++i) { 54 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 8849439e88dd..805b81fedf80 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -17,6 +17,10 @@ config RWSEM_GENERIC_SPINLOCK
17config RWSEM_XCHGADD_ALGORITHM 17config RWSEM_XCHGADD_ALGORITHM
18 bool 18 bool
19 19
20config GENERIC_HWEIGHT
21 bool
22 default y
23
20config GENERIC_CALIBRATE_DELAY 24config GENERIC_CALIBRATE_DELAY
21 bool 25 bool
22 default y 26 default y
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 3ffc84f9c291..c90cb5fcc8ef 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -142,7 +142,7 @@ void __init config_bvme6000(void)
142 /* Now do the PIT configuration */ 142 /* Now do the PIT configuration */
143 143
144 pit->pgcr = 0x00; /* Unidirectional 8 bit, no handshake for now */ 144 pit->pgcr = 0x00; /* Unidirectional 8 bit, no handshake for now */
145 pit->psrr = 0x18; /* PIACK and PIRQ fucntions enabled */ 145 pit->psrr = 0x18; /* PIACK and PIRQ functions enabled */
146 pit->pacr = 0x00; /* Sub Mode 00, H2 i/p, no DMA */ 146 pit->pacr = 0x00; /* Sub Mode 00, H2 i/p, no DMA */
147 pit->padr = 0x00; /* Just to be tidy! */ 147 pit->padr = 0x00; /* Just to be tidy! */
148 pit->paddr = 0x00; /* All inputs for now (safest) */ 148 pit->paddr = 0x00; /* All inputs for now (safest) */
diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c
index a69fe3048edc..b0e4c084df8a 100644
--- a/arch/m68k/mvme16x/rtc.c
+++ b/arch/m68k/mvme16x/rtc.c
@@ -17,6 +17,7 @@
17#include <linux/poll.h> 17#include <linux/poll.h>
18#include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */ 18#include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */
19#include <linux/smp_lock.h> 19#include <linux/smp_lock.h>
20#include <linux/bcd.h>
20#include <asm/mvme16xhw.h> 21#include <asm/mvme16xhw.h>
21 22
22#include <asm/io.h> 23#include <asm/io.h>
@@ -31,9 +32,6 @@
31 * ioctls. 32 * ioctls.
32 */ 33 */
33 34
34#define BCD2BIN(val) (((val)&15) + ((val)>>4)*10)
35#define BIN2BCD(val) ((((val)/10)<<4) + (val)%10)
36
37static const unsigned char days_in_mo[] = 35static const unsigned char days_in_mo[] =
38{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; 36{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
39 37
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index e50858dbc237..3cde6822ead1 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -25,6 +25,14 @@ config RWSEM_XCHGADD_ALGORITHM
25 bool 25 bool
26 default n 26 default n
27 27
28config GENERIC_FIND_NEXT_BIT
29 bool
30 default y
31
32config GENERIC_HWEIGHT
33 bool
34 default y
35
28config GENERIC_CALIBRATE_DELAY 36config GENERIC_CALIBRATE_DELAY
29 bool 37 bool
30 default y 38 default y
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ac2012f033d6..5080ea1799a4 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -801,6 +801,14 @@ config RWSEM_GENERIC_SPINLOCK
801config RWSEM_XCHGADD_ALGORITHM 801config RWSEM_XCHGADD_ALGORITHM
802 bool 802 bool
803 803
804config GENERIC_FIND_NEXT_BIT
805 bool
806 default y
807
808config GENERIC_HWEIGHT
809 bool
810 default y
811
804config GENERIC_CALIBRATE_DELAY 812config GENERIC_CALIBRATE_DELAY
805 bool 813 bool
806 default y 814 default y
diff --git a/arch/mips/ddb5xxx/common/rtc_ds1386.c b/arch/mips/ddb5xxx/common/rtc_ds1386.c
index 995896ac0e39..5dc34daa7150 100644
--- a/arch/mips/ddb5xxx/common/rtc_ds1386.c
+++ b/arch/mips/ddb5xxx/common/rtc_ds1386.c
@@ -165,6 +165,6 @@ rtc_ds1386_init(unsigned long base)
165 WRITE_RTC(0xB, byte); 165 WRITE_RTC(0xB, byte);
166 166
167 /* set the function pointers */ 167 /* set the function pointers */
168 rtc_get_time = rtc_ds1386_get_time; 168 rtc_mips_get_time = rtc_ds1386_get_time;
169 rtc_set_time = rtc_ds1386_set_time; 169 rtc_mips_set_time = rtc_ds1386_set_time;
170} 170}
diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c
index 174822344131..74cb055d4bf6 100644
--- a/arch/mips/dec/time.c
+++ b/arch/mips/dec/time.c
@@ -36,41 +36,13 @@
36#include <asm/dec/ioasic_addrs.h> 36#include <asm/dec/ioasic_addrs.h>
37#include <asm/dec/machtype.h> 37#include <asm/dec/machtype.h>
38 38
39
40/*
41 * Returns true if a clock update is in progress
42 */
43static inline unsigned char dec_rtc_is_updating(void)
44{
45 unsigned char uip;
46 unsigned long flags;
47
48 spin_lock_irqsave(&rtc_lock, flags);
49 uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
50 spin_unlock_irqrestore(&rtc_lock, flags);
51 return uip;
52}
53
54static unsigned long dec_rtc_get_time(void) 39static unsigned long dec_rtc_get_time(void)
55{ 40{
56 unsigned int year, mon, day, hour, min, sec, real_year; 41 unsigned int year, mon, day, hour, min, sec, real_year;
57 int i;
58 unsigned long flags; 42 unsigned long flags;
59 43
60 /* The Linux interpretation of the DS1287 clock register contents:
61 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
62 * RTC registers show the second which has precisely just started.
63 * Let's hope other operating systems interpret the RTC the same way.
64 */
65 /* read RTC exactly on falling edge of update flag */
66 for (i = 0; i < 1000000; i++) /* may take up to 1 second... */
67 if (dec_rtc_is_updating())
68 break;
69 for (i = 0; i < 1000000; i++) /* must try at least 2.228 ms */
70 if (!dec_rtc_is_updating())
71 break;
72 spin_lock_irqsave(&rtc_lock, flags); 44 spin_lock_irqsave(&rtc_lock, flags);
73 /* Isn't this overkill? UIP above should guarantee consistency */ 45
74 do { 46 do {
75 sec = CMOS_READ(RTC_SECONDS); 47 sec = CMOS_READ(RTC_SECONDS);
76 min = CMOS_READ(RTC_MINUTES); 48 min = CMOS_READ(RTC_MINUTES);
@@ -78,7 +50,16 @@ static unsigned long dec_rtc_get_time(void)
78 day = CMOS_READ(RTC_DAY_OF_MONTH); 50 day = CMOS_READ(RTC_DAY_OF_MONTH);
79 mon = CMOS_READ(RTC_MONTH); 51 mon = CMOS_READ(RTC_MONTH);
80 year = CMOS_READ(RTC_YEAR); 52 year = CMOS_READ(RTC_YEAR);
53 /*
54 * The PROM will reset the year to either '72 or '73.
55 * Therefore we store the real year separately, in one
56 * of unused BBU RAM locations.
57 */
58 real_year = CMOS_READ(RTC_DEC_YEAR);
81 } while (sec != CMOS_READ(RTC_SECONDS)); 59 } while (sec != CMOS_READ(RTC_SECONDS));
60
61 spin_unlock_irqrestore(&rtc_lock, flags);
62
82 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { 63 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
83 sec = BCD2BIN(sec); 64 sec = BCD2BIN(sec);
84 min = BCD2BIN(min); 65 min = BCD2BIN(min);
@@ -87,13 +68,7 @@ static unsigned long dec_rtc_get_time(void)
87 mon = BCD2BIN(mon); 68 mon = BCD2BIN(mon);
88 year = BCD2BIN(year); 69 year = BCD2BIN(year);
89 } 70 }
90 /* 71
91 * The PROM will reset the year to either '72 or '73.
92 * Therefore we store the real year separately, in one
93 * of unused BBU RAM locations.
94 */
95 real_year = CMOS_READ(RTC_DEC_YEAR);
96 spin_unlock_irqrestore(&rtc_lock, flags);
97 year += real_year - 72 + 2000; 72 year += real_year - 72 + 2000;
98 73
99 return mktime(year, mon, day, hour, min, sec); 74 return mktime(year, mon, day, hour, min, sec);
@@ -193,8 +168,8 @@ static void dec_ioasic_hpt_init(unsigned int count)
193 168
194void __init dec_time_init(void) 169void __init dec_time_init(void)
195{ 170{
196 rtc_get_time = dec_rtc_get_time; 171 rtc_mips_get_time = dec_rtc_get_time;
197 rtc_set_mmss = dec_rtc_set_mmss; 172 rtc_mips_set_mmss = dec_rtc_set_mmss;
198 173
199 mips_timer_state = dec_timer_state; 174 mips_timer_state = dec_timer_state;
200 mips_timer_ack = dec_timer_ack; 175 mips_timer_ack = dec_timer_ack;
diff --git a/arch/mips/ite-boards/generic/time.c b/arch/mips/ite-boards/generic/time.c
index f5d67ee21ac6..b79817bb6cce 100644
--- a/arch/mips/ite-boards/generic/time.c
+++ b/arch/mips/ite-boards/generic/time.c
@@ -227,8 +227,8 @@ void __init it8172_time_init(void)
227 227
228 local_irq_restore(flags); 228 local_irq_restore(flags);
229 229
230 rtc_get_time = it8172_rtc_get_time; 230 rtc_mips_get_time = it8172_rtc_get_time;
231 rtc_set_time = it8172_rtc_set_time; 231 rtc_mips_set_time = it8172_rtc_set_time;
232} 232}
233 233
234#define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5) 234#define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5)
diff --git a/arch/mips/ite-boards/ivr/init.c b/arch/mips/ite-boards/ivr/init.c
index ea4e1935fec5..b774db035b31 100644
--- a/arch/mips/ite-boards/ivr/init.c
+++ b/arch/mips/ite-boards/ivr/init.c
@@ -45,9 +45,6 @@ extern void __init prom_init_cmdline(void);
45extern unsigned long __init prom_get_memsize(void); 45extern unsigned long __init prom_get_memsize(void);
46extern void __init it8172_init_ram_resource(unsigned long memsize); 46extern void __init it8172_init_ram_resource(unsigned long memsize);
47 47
48#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
49#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
50
51const char *get_system_type(void) 48const char *get_system_type(void)
52{ 49{
53 return "Globespan IVR"; 50 return "Globespan IVR";
diff --git a/arch/mips/ite-boards/qed-4n-s01b/init.c b/arch/mips/ite-boards/qed-4n-s01b/init.c
index 56dca7e0c21d..e8ec8be66a80 100644
--- a/arch/mips/ite-boards/qed-4n-s01b/init.c
+++ b/arch/mips/ite-boards/qed-4n-s01b/init.c
@@ -45,9 +45,6 @@ extern void __init prom_init_cmdline(void);
45extern unsigned long __init prom_get_memsize(void); 45extern unsigned long __init prom_get_memsize(void);
46extern void __init it8172_init_ram_resource(unsigned long memsize); 46extern void __init it8172_init_ram_resource(unsigned long memsize);
47 47
48#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
49#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
50
51const char *get_system_type(void) 48const char *get_system_type(void)
52{ 49{
53 return "ITE QED-4N-S01B"; 50 return "ITE QED-4N-S01B";
diff --git a/arch/mips/jmr3927/common/rtc_ds1742.c b/arch/mips/jmr3927/common/rtc_ds1742.c
index 9a8bff153d80..a6bd3f4d3049 100644
--- a/arch/mips/jmr3927/common/rtc_ds1742.c
+++ b/arch/mips/jmr3927/common/rtc_ds1742.c
@@ -159,8 +159,8 @@ rtc_ds1742_init(unsigned long base)
159 db_assert((rtc_base & 0xe0000000) == KSEG1); 159 db_assert((rtc_base & 0xe0000000) == KSEG1);
160 160
161 /* set the function pointers */ 161 /* set the function pointers */
162 rtc_get_time = rtc_ds1742_get_time; 162 rtc_mips_get_time = rtc_ds1742_get_time;
163 rtc_set_time = rtc_ds1742_set_time; 163 rtc_mips_set_time = rtc_ds1742_set_time;
164 164
165 /* clear oscillator stop bit */ 165 /* clear oscillator stop bit */
166 CMOS_WRITE(RTC_READ, RTC_CONTROL); 166 CMOS_WRITE(RTC_READ, RTC_CONTROL);
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 013bc93688e8..3f40c37a9ee6 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -30,7 +30,6 @@
30#include <linux/utime.h> 30#include <linux/utime.h>
31#include <linux/utsname.h> 31#include <linux/utsname.h>
32#include <linux/personality.h> 32#include <linux/personality.h>
33#include <linux/timex.h>
34#include <linux/dnotify.h> 33#include <linux/dnotify.h>
35#include <linux/module.h> 34#include <linux/module.h>
36#include <linux/binfmts.h> 35#include <linux/binfmts.h>
@@ -1157,79 +1156,6 @@ out:
1157 return err; 1156 return err;
1158} 1157}
1159 1158
1160/* Handle adjtimex compatibility. */
1161
1162struct timex32 {
1163 u32 modes;
1164 s32 offset, freq, maxerror, esterror;
1165 s32 status, constant, precision, tolerance;
1166 struct compat_timeval time;
1167 s32 tick;
1168 s32 ppsfreq, jitter, shift, stabil;
1169 s32 jitcnt, calcnt, errcnt, stbcnt;
1170 s32 :32; s32 :32; s32 :32; s32 :32;
1171 s32 :32; s32 :32; s32 :32; s32 :32;
1172 s32 :32; s32 :32; s32 :32; s32 :32;
1173};
1174
1175extern int do_adjtimex(struct timex *);
1176
1177asmlinkage int sys32_adjtimex(struct timex32 __user *utp)
1178{
1179 struct timex txc;
1180 int ret;
1181
1182 memset(&txc, 0, sizeof(struct timex));
1183
1184 if (get_user(txc.modes, &utp->modes) ||
1185 __get_user(txc.offset, &utp->offset) ||
1186 __get_user(txc.freq, &utp->freq) ||
1187 __get_user(txc.maxerror, &utp->maxerror) ||
1188 __get_user(txc.esterror, &utp->esterror) ||
1189 __get_user(txc.status, &utp->status) ||
1190 __get_user(txc.constant, &utp->constant) ||
1191 __get_user(txc.precision, &utp->precision) ||
1192 __get_user(txc.tolerance, &utp->tolerance) ||
1193 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
1194 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
1195 __get_user(txc.tick, &utp->tick) ||
1196 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
1197 __get_user(txc.jitter, &utp->jitter) ||
1198 __get_user(txc.shift, &utp->shift) ||
1199 __get_user(txc.stabil, &utp->stabil) ||
1200 __get_user(txc.jitcnt, &utp->jitcnt) ||
1201 __get_user(txc.calcnt, &utp->calcnt) ||
1202 __get_user(txc.errcnt, &utp->errcnt) ||
1203 __get_user(txc.stbcnt, &utp->stbcnt))
1204 return -EFAULT;
1205
1206 ret = do_adjtimex(&txc);
1207
1208 if (put_user(txc.modes, &utp->modes) ||
1209 __put_user(txc.offset, &utp->offset) ||
1210 __put_user(txc.freq, &utp->freq) ||
1211 __put_user(txc.maxerror, &utp->maxerror) ||
1212 __put_user(txc.esterror, &utp->esterror) ||
1213 __put_user(txc.status, &utp->status) ||
1214 __put_user(txc.constant, &utp->constant) ||
1215 __put_user(txc.precision, &utp->precision) ||
1216 __put_user(txc.tolerance, &utp->tolerance) ||
1217 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
1218 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
1219 __put_user(txc.tick, &utp->tick) ||
1220 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
1221 __put_user(txc.jitter, &utp->jitter) ||
1222 __put_user(txc.shift, &utp->shift) ||
1223 __put_user(txc.stabil, &utp->stabil) ||
1224 __put_user(txc.jitcnt, &utp->jitcnt) ||
1225 __put_user(txc.calcnt, &utp->calcnt) ||
1226 __put_user(txc.errcnt, &utp->errcnt) ||
1227 __put_user(txc.stbcnt, &utp->stbcnt))
1228 ret = -EFAULT;
1229
1230 return ret;
1231}
1232
1233asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, 1159asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset,
1234 s32 count) 1160 s32 count)
1235{ 1161{
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 02c8267e45e7..05a2c0567dae 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -273,7 +273,7 @@ EXPORT(sysn32_call_table)
273 PTR sys_pivot_root 273 PTR sys_pivot_root
274 PTR sys32_sysctl 274 PTR sys32_sysctl
275 PTR sys_prctl 275 PTR sys_prctl
276 PTR sys32_adjtimex 276 PTR compat_sys_adjtimex
277 PTR compat_sys_setrlimit /* 6155 */ 277 PTR compat_sys_setrlimit /* 6155 */
278 PTR sys_chroot 278 PTR sys_chroot
279 PTR sys_sync 279 PTR sys_sync
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 797e0d874889..19c4ca481b02 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -328,7 +328,7 @@ sys_call_table:
328 PTR sys_setdomainname 328 PTR sys_setdomainname
329 PTR sys32_newuname 329 PTR sys32_newuname
330 PTR sys_ni_syscall /* sys_modify_ldt */ 330 PTR sys_ni_syscall /* sys_modify_ldt */
331 PTR sys32_adjtimex 331 PTR compat_sys_adjtimex
332 PTR sys_mprotect /* 4125 */ 332 PTR sys_mprotect /* 4125 */
333 PTR compat_sys_sigprocmask 333 PTR compat_sys_sigprocmask
334 PTR sys_ni_syscall /* was creat_module */ 334 PTR sys_ni_syscall /* was creat_module */
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 0cb3b6097e0e..dcbfd27071f0 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -34,6 +34,7 @@
34#include <linux/highmem.h> 34#include <linux/highmem.h>
35#include <linux/console.h> 35#include <linux/console.h>
36#include <linux/mmzone.h> 36#include <linux/mmzone.h>
37#include <linux/pfn.h>
37 38
38#include <asm/addrspace.h> 39#include <asm/addrspace.h>
39#include <asm/bootinfo.h> 40#include <asm/bootinfo.h>
@@ -257,10 +258,6 @@ static inline int parse_rd_cmdline(unsigned long* rd_start, unsigned long* rd_en
257 return 0; 258 return 0;
258} 259}
259 260
260#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
261#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
262#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
263
264#define MAXMEM HIGHMEM_START 261#define MAXMEM HIGHMEM_START
265#define MAXMEM_PFN PFN_DOWN(MAXMEM) 262#define MAXMEM_PFN PFN_DOWN(MAXMEM)
266 263
@@ -493,10 +490,6 @@ static inline void resource_init(void)
493 } 490 }
494} 491}
495 492
496#undef PFN_UP
497#undef PFN_DOWN
498#undef PFN_PHYS
499
500#undef MAXMEM 493#undef MAXMEM
501#undef MAXMEM_PFN 494#undef MAXMEM_PFN
502 495
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 51273b7297a7..5e51a2d8f3f0 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -65,9 +65,9 @@ static int null_rtc_set_time(unsigned long sec)
65 return 0; 65 return 0;
66} 66}
67 67
68unsigned long (*rtc_get_time)(void) = null_rtc_get_time; 68unsigned long (*rtc_mips_get_time)(void) = null_rtc_get_time;
69int (*rtc_set_time)(unsigned long) = null_rtc_set_time; 69int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time;
70int (*rtc_set_mmss)(unsigned long); 70int (*rtc_mips_set_mmss)(unsigned long);
71 71
72 72
73/* usecs per counter cycle, shifted to left by 32 bits */ 73/* usecs per counter cycle, shifted to left by 32 bits */
@@ -440,14 +440,14 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
440 440
441 /* 441 /*
442 * If we have an externally synchronized Linux clock, then update 442 * If we have an externally synchronized Linux clock, then update
443 * CMOS clock accordingly every ~11 minutes. rtc_set_time() has to be 443 * CMOS clock accordingly every ~11 minutes. rtc_mips_set_time() has to be
444 * called as close as possible to 500 ms before the new second starts. 444 * called as close as possible to 500 ms before the new second starts.
445 */ 445 */
446 if (ntp_synced() && 446 if (ntp_synced() &&
447 xtime.tv_sec > last_rtc_update + 660 && 447 xtime.tv_sec > last_rtc_update + 660 &&
448 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 448 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
449 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { 449 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
450 if (rtc_set_mmss(xtime.tv_sec) == 0) { 450 if (rtc_mips_set_mmss(xtime.tv_sec) == 0) {
451 last_rtc_update = xtime.tv_sec; 451 last_rtc_update = xtime.tv_sec;
452 } else { 452 } else {
453 /* do it again in 60 s */ 453 /* do it again in 60 s */
@@ -565,7 +565,7 @@ asmlinkage void ll_local_timer_interrupt(int irq, struct pt_regs *regs)
565 * b) (optional) calibrate and set the mips_hpt_frequency 565 * b) (optional) calibrate and set the mips_hpt_frequency
566 * (only needed if you intended to use fixed_rate_gettimeoffset 566 * (only needed if you intended to use fixed_rate_gettimeoffset
567 * or use cpu counter as timer interrupt source) 567 * or use cpu counter as timer interrupt source)
568 * 2) setup xtime based on rtc_get_time(). 568 * 2) setup xtime based on rtc_mips_get_time().
569 * 3) choose a appropriate gettimeoffset routine. 569 * 3) choose a appropriate gettimeoffset routine.
570 * 4) calculate a couple of cached variables for later usage 570 * 4) calculate a couple of cached variables for later usage
571 * 5) board_timer_setup() - 571 * 5) board_timer_setup() -
@@ -633,10 +633,10 @@ void __init time_init(void)
633 if (board_time_init) 633 if (board_time_init)
634 board_time_init(); 634 board_time_init();
635 635
636 if (!rtc_set_mmss) 636 if (!rtc_mips_set_mmss)
637 rtc_set_mmss = rtc_set_time; 637 rtc_mips_set_mmss = rtc_mips_set_time;
638 638
639 xtime.tv_sec = rtc_get_time(); 639 xtime.tv_sec = rtc_mips_get_time();
640 xtime.tv_nsec = 0; 640 xtime.tv_nsec = 0;
641 641
642 set_normalized_timespec(&wall_to_monotonic, 642 set_normalized_timespec(&wall_to_monotonic,
@@ -772,8 +772,8 @@ void to_tm(unsigned long tim, struct rtc_time *tm)
772 772
773EXPORT_SYMBOL(rtc_lock); 773EXPORT_SYMBOL(rtc_lock);
774EXPORT_SYMBOL(to_tm); 774EXPORT_SYMBOL(to_tm);
775EXPORT_SYMBOL(rtc_set_time); 775EXPORT_SYMBOL(rtc_mips_set_time);
776EXPORT_SYMBOL(rtc_get_time); 776EXPORT_SYMBOL(rtc_mips_get_time);
777 777
778unsigned long long sched_clock(void) 778unsigned long long sched_clock(void)
779{ 779{
diff --git a/arch/mips/lasat/setup.c b/arch/mips/lasat/setup.c
index 83eb08b7a072..bb70a8240e61 100644
--- a/arch/mips/lasat/setup.c
+++ b/arch/mips/lasat/setup.c
@@ -165,7 +165,8 @@ void __init plat_setup(void)
165 165
166 /* Set up panic notifier */ 166 /* Set up panic notifier */
167 for (i = 0; i < sizeof(lasat_panic_block) / sizeof(struct notifier_block); i++) 167 for (i = 0; i < sizeof(lasat_panic_block) / sizeof(struct notifier_block); i++)
168 notifier_chain_register(&panic_notifier_list, &lasat_panic_block[i]); 168 atomic_notifier_chain_register(&panic_notifier_list,
169 &lasat_panic_block[i]);
169 170
170 lasat_reboot_setup(); 171 lasat_reboot_setup();
171 172
@@ -174,8 +175,8 @@ void __init plat_setup(void)
174 175
175#ifdef CONFIG_DS1603 176#ifdef CONFIG_DS1603
176 ds1603 = &ds_defs[mips_machtype]; 177 ds1603 = &ds_defs[mips_machtype];
177 rtc_get_time = ds1603_read; 178 rtc_mips_get_time = ds1603_read;
178 rtc_set_time = ds1603_set; 179 rtc_mips_set_time = ds1603_set;
179#endif 180#endif
180 181
181#ifdef DYNAMIC_SERIAL_INIT 182#ifdef DYNAMIC_SERIAL_INIT
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c
index 8ff43a1c1e99..e3d5aaa90f0d 100644
--- a/arch/mips/lasat/sysctl.c
+++ b/arch/mips/lasat/sysctl.c
@@ -30,12 +30,13 @@
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/net.h> 31#include <linux/net.h>
32#include <linux/inet.h> 32#include <linux/inet.h>
33#include <linux/mutex.h>
33#include <asm/uaccess.h> 34#include <asm/uaccess.h>
34 35
35#include "sysctl.h" 36#include "sysctl.h"
36#include "ds1603.h" 37#include "ds1603.h"
37 38
38static DECLARE_MUTEX(lasat_info_sem); 39static DEFINE_MUTEX(lasat_info_mutex);
39 40
40/* Strategy function to write EEPROM after changing string entry */ 41/* Strategy function to write EEPROM after changing string entry */
41int sysctl_lasatstring(ctl_table *table, int *name, int nlen, 42int sysctl_lasatstring(ctl_table *table, int *name, int nlen,
@@ -43,17 +44,17 @@ int sysctl_lasatstring(ctl_table *table, int *name, int nlen,
43 void *newval, size_t newlen, void **context) 44 void *newval, size_t newlen, void **context)
44{ 45{
45 int r; 46 int r;
46 down(&lasat_info_sem); 47 mutex_lock(&lasat_info_mutex);
47 r = sysctl_string(table, name, 48 r = sysctl_string(table, name,
48 nlen, oldval, oldlenp, newval, newlen, context); 49 nlen, oldval, oldlenp, newval, newlen, context);
49 if (r < 0) { 50 if (r < 0) {
50 up(&lasat_info_sem); 51 mutex_unlock(&lasat_info_mutex);
51 return r; 52 return r;
52 } 53 }
53 if (newval && newlen) { 54 if (newval && newlen) {
54 lasat_write_eeprom_info(); 55 lasat_write_eeprom_info();
55 } 56 }
56 up(&lasat_info_sem); 57 mutex_unlock(&lasat_info_mutex);
57 return 1; 58 return 1;
58} 59}
59 60
@@ -63,14 +64,14 @@ int proc_dolasatstring(ctl_table *table, int write, struct file *filp,
63 void *buffer, size_t *lenp, loff_t *ppos) 64 void *buffer, size_t *lenp, loff_t *ppos)
64{ 65{
65 int r; 66 int r;
66 down(&lasat_info_sem); 67 mutex_lock(&lasat_info_mutex);
67 r = proc_dostring(table, write, filp, buffer, lenp, ppos); 68 r = proc_dostring(table, write, filp, buffer, lenp, ppos);
68 if ( (!write) || r) { 69 if ( (!write) || r) {
69 up(&lasat_info_sem); 70 mutex_unlock(&lasat_info_mutex);
70 return r; 71 return r;
71 } 72 }
72 lasat_write_eeprom_info(); 73 lasat_write_eeprom_info();
73 up(&lasat_info_sem); 74 mutex_unlock(&lasat_info_mutex);
74 return 0; 75 return 0;
75} 76}
76 77
@@ -79,14 +80,14 @@ int proc_dolasatint(ctl_table *table, int write, struct file *filp,
79 void *buffer, size_t *lenp, loff_t *ppos) 80 void *buffer, size_t *lenp, loff_t *ppos)
80{ 81{
81 int r; 82 int r;
82 down(&lasat_info_sem); 83 mutex_lock(&lasat_info_mutex);
83 r = proc_dointvec(table, write, filp, buffer, lenp, ppos); 84 r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
84 if ( (!write) || r) { 85 if ( (!write) || r) {
85 up(&lasat_info_sem); 86 mutex_unlock(&lasat_info_mutex);
86 return r; 87 return r;
87 } 88 }
88 lasat_write_eeprom_info(); 89 lasat_write_eeprom_info();
89 up(&lasat_info_sem); 90 mutex_unlock(&lasat_info_mutex);
90 return 0; 91 return 0;
91} 92}
92 93
@@ -98,7 +99,7 @@ int proc_dolasatrtc(ctl_table *table, int write, struct file *filp,
98 void *buffer, size_t *lenp, loff_t *ppos) 99 void *buffer, size_t *lenp, loff_t *ppos)
99{ 100{
100 int r; 101 int r;
101 down(&lasat_info_sem); 102 mutex_lock(&lasat_info_mutex);
102 if (!write) { 103 if (!write) {
103 rtctmp = ds1603_read(); 104 rtctmp = ds1603_read();
104 /* check for time < 0 and set to 0 */ 105 /* check for time < 0 and set to 0 */
@@ -107,11 +108,11 @@ int proc_dolasatrtc(ctl_table *table, int write, struct file *filp,
107 } 108 }
108 r = proc_dointvec(table, write, filp, buffer, lenp, ppos); 109 r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
109 if ( (!write) || r) { 110 if ( (!write) || r) {
110 up(&lasat_info_sem); 111 mutex_unlock(&lasat_info_mutex);
111 return r; 112 return r;
112 } 113 }
113 ds1603_set(rtctmp); 114 ds1603_set(rtctmp);
114 up(&lasat_info_sem); 115 mutex_unlock(&lasat_info_mutex);
115 return 0; 116 return 0;
116} 117}
117#endif 118#endif
@@ -122,16 +123,16 @@ int sysctl_lasat_intvec(ctl_table *table, int *name, int nlen,
122 void *newval, size_t newlen, void **context) 123 void *newval, size_t newlen, void **context)
123{ 124{
124 int r; 125 int r;
125 down(&lasat_info_sem); 126 mutex_lock(&lasat_info_mutex);
126 r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context); 127 r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context);
127 if (r < 0) { 128 if (r < 0) {
128 up(&lasat_info_sem); 129 mutex_unlock(&lasat_info_mutex);
129 return r; 130 return r;
130 } 131 }
131 if (newval && newlen) { 132 if (newval && newlen) {
132 lasat_write_eeprom_info(); 133 lasat_write_eeprom_info();
133 } 134 }
134 up(&lasat_info_sem); 135 mutex_unlock(&lasat_info_mutex);
135 return 1; 136 return 1;
136} 137}
137 138
@@ -142,19 +143,19 @@ int sysctl_lasat_rtc(ctl_table *table, int *name, int nlen,
142 void *newval, size_t newlen, void **context) 143 void *newval, size_t newlen, void **context)
143{ 144{
144 int r; 145 int r;
145 down(&lasat_info_sem); 146 mutex_lock(&lasat_info_mutex);
146 rtctmp = ds1603_read(); 147 rtctmp = ds1603_read();
147 if (rtctmp < 0) 148 if (rtctmp < 0)
148 rtctmp = 0; 149 rtctmp = 0;
149 r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context); 150 r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context);
150 if (r < 0) { 151 if (r < 0) {
151 up(&lasat_info_sem); 152 mutex_unlock(&lasat_info_mutex);
152 return r; 153 return r;
153 } 154 }
154 if (newval && newlen) { 155 if (newval && newlen) {
155 ds1603_set(rtctmp); 156 ds1603_set(rtctmp);
156 } 157 }
157 up(&lasat_info_sem); 158 mutex_unlock(&lasat_info_mutex);
158 return 1; 159 return 1;
159} 160}
160#endif 161#endif
@@ -192,13 +193,13 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp,
192 return 0; 193 return 0;
193 } 194 }
194 195
195 down(&lasat_info_sem); 196 mutex_lock(&lasat_info_mutex);
196 if (write) { 197 if (write) {
197 len = 0; 198 len = 0;
198 p = buffer; 199 p = buffer;
199 while (len < *lenp) { 200 while (len < *lenp) {
200 if(get_user(c, p++)) { 201 if(get_user(c, p++)) {
201 up(&lasat_info_sem); 202 mutex_unlock(&lasat_info_mutex);
202 return -EFAULT; 203 return -EFAULT;
203 } 204 }
204 if (c == 0 || c == '\n') 205 if (c == 0 || c == '\n')
@@ -209,7 +210,7 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp,
209 len = sizeof(proc_lasat_ipbuf) - 1; 210 len = sizeof(proc_lasat_ipbuf) - 1;
210 if (copy_from_user(proc_lasat_ipbuf, buffer, len)) 211 if (copy_from_user(proc_lasat_ipbuf, buffer, len))
211 { 212 {
212 up(&lasat_info_sem); 213 mutex_unlock(&lasat_info_mutex);
213 return -EFAULT; 214 return -EFAULT;
214 } 215 }
215 proc_lasat_ipbuf[len] = 0; 216 proc_lasat_ipbuf[len] = 0;
@@ -230,12 +231,12 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp,
230 len = *lenp; 231 len = *lenp;
231 if (len) 232 if (len)
232 if(copy_to_user(buffer, proc_lasat_ipbuf, len)) { 233 if(copy_to_user(buffer, proc_lasat_ipbuf, len)) {
233 up(&lasat_info_sem); 234 mutex_unlock(&lasat_info_mutex);
234 return -EFAULT; 235 return -EFAULT;
235 } 236 }
236 if (len < *lenp) { 237 if (len < *lenp) {
237 if(put_user('\n', ((char *) buffer) + len)) { 238 if(put_user('\n', ((char *) buffer) + len)) {
238 up(&lasat_info_sem); 239 mutex_unlock(&lasat_info_mutex);
239 return -EFAULT; 240 return -EFAULT;
240 } 241 }
241 len++; 242 len++;
@@ -244,7 +245,7 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp,
244 *ppos += len; 245 *ppos += len;
245 } 246 }
246 update_bcastaddr(); 247 update_bcastaddr();
247 up(&lasat_info_sem); 248 mutex_unlock(&lasat_info_mutex);
248 return 0; 249 return 0;
249} 250}
250#endif /* defined(CONFIG_INET) */ 251#endif /* defined(CONFIG_INET) */
@@ -256,10 +257,10 @@ static int sysctl_lasat_eeprom_value(ctl_table *table, int *name, int nlen,
256{ 257{
257 int r; 258 int r;
258 259
259 down(&lasat_info_sem); 260 mutex_lock(&lasat_info_mutex);
260 r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context); 261 r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context);
261 if (r < 0) { 262 if (r < 0) {
262 up(&lasat_info_sem); 263 mutex_unlock(&lasat_info_mutex);
263 return r; 264 return r;
264 } 265 }
265 266
@@ -271,7 +272,7 @@ static int sysctl_lasat_eeprom_value(ctl_table *table, int *name, int nlen,
271 lasat_write_eeprom_info(); 272 lasat_write_eeprom_info();
272 lasat_init_board_info(); 273 lasat_init_board_info();
273 } 274 }
274 up(&lasat_info_sem); 275 mutex_unlock(&lasat_info_mutex);
275 276
276 return 0; 277 return 0;
277} 278}
@@ -280,10 +281,10 @@ int proc_lasat_eeprom_value(ctl_table *table, int write, struct file *filp,
280 void *buffer, size_t *lenp, loff_t *ppos) 281 void *buffer, size_t *lenp, loff_t *ppos)
281{ 282{
282 int r; 283 int r;
283 down(&lasat_info_sem); 284 mutex_lock(&lasat_info_mutex);
284 r = proc_dointvec(table, write, filp, buffer, lenp, ppos); 285 r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
285 if ( (!write) || r) { 286 if ( (!write) || r) {
286 up(&lasat_info_sem); 287 mutex_unlock(&lasat_info_mutex);
287 return r; 288 return r;
288 } 289 }
289 if (filp && filp->f_dentry) 290 if (filp && filp->f_dentry)
@@ -294,7 +295,7 @@ int proc_lasat_eeprom_value(ctl_table *table, int write, struct file *filp,
294 lasat_board_info.li_eeprom_info.debugaccess = lasat_board_info.li_debugaccess; 295 lasat_board_info.li_eeprom_info.debugaccess = lasat_board_info.li_debugaccess;
295 } 296 }
296 lasat_write_eeprom_info(); 297 lasat_write_eeprom_info();
297 up(&lasat_info_sem); 298 mutex_unlock(&lasat_info_mutex);
298 return 0; 299 return 0;
299} 300}
300 301
diff --git a/arch/mips/mips-boards/atlas/atlas_setup.c b/arch/mips/mips-boards/atlas/atlas_setup.c
index 873cf3141a31..c20d401ecf80 100644
--- a/arch/mips/mips-boards/atlas/atlas_setup.c
+++ b/arch/mips/mips-boards/atlas/atlas_setup.c
@@ -65,7 +65,7 @@ void __init plat_setup(void)
65 65
66 board_time_init = mips_time_init; 66 board_time_init = mips_time_init;
67 board_timer_setup = mips_timer_setup; 67 board_timer_setup = mips_timer_setup;
68 rtc_get_time = mips_rtc_get_time; 68 rtc_mips_get_time = mips_rtc_get_time;
69} 69}
70 70
71static void __init serial_init(void) 71static void __init serial_init(void)
diff --git a/arch/mips/mips-boards/generic/memory.c b/arch/mips/mips-boards/generic/memory.c
index ee5e70c95cf3..32c9210373ac 100644
--- a/arch/mips/mips-boards/generic/memory.c
+++ b/arch/mips/mips-boards/generic/memory.c
@@ -49,9 +49,6 @@ static char *mtypes[3] = {
49/* References to section boundaries */ 49/* References to section boundaries */
50extern char _end; 50extern char _end;
51 51
52#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
53
54
55struct prom_pmemblock * __init prom_getmdesc(void) 52struct prom_pmemblock * __init prom_getmdesc(void)
56{ 53{
57 char *memsize_str; 54 char *memsize_str;
@@ -109,10 +106,10 @@ struct prom_pmemblock * __init prom_getmdesc(void)
109 106
110 mdesc[3].type = yamon_dontuse; 107 mdesc[3].type = yamon_dontuse;
111 mdesc[3].base = 0x00100000; 108 mdesc[3].base = 0x00100000;
112 mdesc[3].size = CPHYSADDR(PFN_ALIGN(&_end)) - mdesc[3].base; 109 mdesc[3].size = CPHYSADDR(PAGE_ALIGN(&_end)) - mdesc[3].base;
113 110
114 mdesc[4].type = yamon_free; 111 mdesc[4].type = yamon_free;
115 mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end)); 112 mdesc[4].base = CPHYSADDR(PAGE_ALIGN(&_end));
116 mdesc[4].size = memsize - mdesc[4].base; 113 mdesc[4].size = memsize - mdesc[4].base;
117 114
118 return &mdesc[0]; 115 return &mdesc[0];
diff --git a/arch/mips/mips-boards/malta/malta_setup.c b/arch/mips/mips-boards/malta/malta_setup.c
index 2209e8a9de34..b8488aab6df1 100644
--- a/arch/mips/mips-boards/malta/malta_setup.c
+++ b/arch/mips/mips-boards/malta/malta_setup.c
@@ -225,5 +225,5 @@ void __init plat_setup(void)
225 225
226 board_time_init = mips_time_init; 226 board_time_init = mips_time_init;
227 board_timer_setup = mips_timer_setup; 227 board_timer_setup = mips_timer_setup;
228 rtc_get_time = mips_rtc_get_time; 228 rtc_mips_get_time = mips_rtc_get_time;
229} 229}
diff --git a/arch/mips/mips-boards/sim/sim_mem.c b/arch/mips/mips-boards/sim/sim_mem.c
index 1ec4e75656bd..e57f737bab10 100644
--- a/arch/mips/mips-boards/sim/sim_mem.c
+++ b/arch/mips/mips-boards/sim/sim_mem.c
@@ -42,9 +42,6 @@ static char *mtypes[3] = {
42/* References to section boundaries */ 42/* References to section boundaries */
43extern char _end; 43extern char _end;
44 44
45#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
46
47
48struct prom_pmemblock * __init prom_getmdesc(void) 45struct prom_pmemblock * __init prom_getmdesc(void)
49{ 46{
50 unsigned int memsize; 47 unsigned int memsize;
@@ -64,10 +61,10 @@ struct prom_pmemblock * __init prom_getmdesc(void)
64 61
65 mdesc[2].type = simmem_reserved; 62 mdesc[2].type = simmem_reserved;
66 mdesc[2].base = 0x00100000; 63 mdesc[2].base = 0x00100000;
67 mdesc[2].size = CPHYSADDR(PFN_ALIGN(&_end)) - mdesc[2].base; 64 mdesc[2].size = CPHYSADDR(PAGE_ALIGN(&_end)) - mdesc[2].base;
68 65
69 mdesc[3].type = simmem_free; 66 mdesc[3].type = simmem_free;
70 mdesc[3].base = CPHYSADDR(PFN_ALIGN(&_end)); 67 mdesc[3].base = CPHYSADDR(PAGE_ALIGN(&_end));
71 mdesc[3].size = memsize - mdesc[3].base; 68 mdesc[3].size = memsize - mdesc[3].base;
72 69
73 return &mdesc[0]; 70 return &mdesc[0];
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 52f7d59fe612..ad89c442f299 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -25,6 +25,7 @@
25#include <linux/highmem.h> 25#include <linux/highmem.h>
26#include <linux/swap.h> 26#include <linux/swap.h>
27#include <linux/proc_fs.h> 27#include <linux/proc_fs.h>
28#include <linux/pfn.h>
28 29
29#include <asm/bootinfo.h> 30#include <asm/bootinfo.h>
30#include <asm/cachectl.h> 31#include <asm/cachectl.h>
@@ -177,9 +178,6 @@ void __init paging_init(void)
177 free_area_init(zones_size); 178 free_area_init(zones_size);
178} 179}
179 180
180#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
181#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
182
183static inline int page_is_ram(unsigned long pagenr) 181static inline int page_is_ram(unsigned long pagenr)
184{ 182{
185 int i; 183 int i;
diff --git a/arch/mips/momentum/jaguar_atx/setup.c b/arch/mips/momentum/jaguar_atx/setup.c
index 3784c898db1a..91d9637143d7 100644
--- a/arch/mips/momentum/jaguar_atx/setup.c
+++ b/arch/mips/momentum/jaguar_atx/setup.c
@@ -229,8 +229,8 @@ void momenco_time_init(void)
229 mips_hpt_frequency = cpu_clock / 2; 229 mips_hpt_frequency = cpu_clock / 2;
230 board_timer_setup = momenco_timer_setup; 230 board_timer_setup = momenco_timer_setup;
231 231
232 rtc_get_time = m48t37y_get_time; 232 rtc_mips_get_time = m48t37y_get_time;
233 rtc_set_time = m48t37y_set_time; 233 rtc_mips_set_time = m48t37y_set_time;
234} 234}
235 235
236static struct resource mv_pci_io_mem0_resource = { 236static struct resource mv_pci_io_mem0_resource = {
diff --git a/arch/mips/momentum/ocelot_3/setup.c b/arch/mips/momentum/ocelot_3/setup.c
index f95677f4f06f..370e75d0e75c 100644
--- a/arch/mips/momentum/ocelot_3/setup.c
+++ b/arch/mips/momentum/ocelot_3/setup.c
@@ -58,6 +58,7 @@
58#include <linux/bootmem.h> 58#include <linux/bootmem.h>
59#include <linux/mv643xx.h> 59#include <linux/mv643xx.h>
60#include <linux/pm.h> 60#include <linux/pm.h>
61#include <linux/bcd.h>
61 62
62#include <asm/time.h> 63#include <asm/time.h>
63#include <asm/page.h> 64#include <asm/page.h>
@@ -131,9 +132,6 @@ void setup_wired_tlb_entries(void)
131 add_wired_entry(ENTRYLO(0xfc000000), ENTRYLO(0xfd000000), (signed)0xfc000000, PM_16M); 132 add_wired_entry(ENTRYLO(0xfc000000), ENTRYLO(0xfd000000), (signed)0xfc000000, PM_16M);
132} 133}
133 134
134#define CONV_BCD_TO_BIN(val) (((val) & 0xf) + (((val) >> 4) * 10))
135#define CONV_BIN_TO_BCD(val) (((val) % 10) + (((val) / 10) << 4))
136
137unsigned long m48t37y_get_time(void) 135unsigned long m48t37y_get_time(void)
138{ 136{
139 unsigned int year, month, day, hour, min, sec; 137 unsigned int year, month, day, hour, min, sec;
@@ -143,16 +141,16 @@ unsigned long m48t37y_get_time(void)
143 /* stop the update */ 141 /* stop the update */
144 rtc_base[0x7ff8] = 0x40; 142 rtc_base[0x7ff8] = 0x40;
145 143
146 year = CONV_BCD_TO_BIN(rtc_base[0x7fff]); 144 year = BCD2BIN(rtc_base[0x7fff]);
147 year += CONV_BCD_TO_BIN(rtc_base[0x7ff1]) * 100; 145 year += BCD2BIN(rtc_base[0x7ff1]) * 100;
148 146
149 month = CONV_BCD_TO_BIN(rtc_base[0x7ffe]); 147 month = BCD2BIN(rtc_base[0x7ffe]);
150 148
151 day = CONV_BCD_TO_BIN(rtc_base[0x7ffd]); 149 day = BCD2BIN(rtc_base[0x7ffd]);
152 150
153 hour = CONV_BCD_TO_BIN(rtc_base[0x7ffb]); 151 hour = BCD2BIN(rtc_base[0x7ffb]);
154 min = CONV_BCD_TO_BIN(rtc_base[0x7ffa]); 152 min = BCD2BIN(rtc_base[0x7ffa]);
155 sec = CONV_BCD_TO_BIN(rtc_base[0x7ff9]); 153 sec = BCD2BIN(rtc_base[0x7ff9]);
156 154
157 /* start the update */ 155 /* start the update */
158 rtc_base[0x7ff8] = 0x00; 156 rtc_base[0x7ff8] = 0x00;
@@ -175,22 +173,22 @@ int m48t37y_set_time(unsigned long sec)
175 rtc_base[0x7ff8] = 0x80; 173 rtc_base[0x7ff8] = 0x80;
176 174
177 /* year */ 175 /* year */
178 rtc_base[0x7fff] = CONV_BIN_TO_BCD(tm.tm_year % 100); 176 rtc_base[0x7fff] = BIN2BCD(tm.tm_year % 100);
179 rtc_base[0x7ff1] = CONV_BIN_TO_BCD(tm.tm_year / 100); 177 rtc_base[0x7ff1] = BIN2BCD(tm.tm_year / 100);
180 178
181 /* month */ 179 /* month */
182 rtc_base[0x7ffe] = CONV_BIN_TO_BCD(tm.tm_mon); 180 rtc_base[0x7ffe] = BIN2BCD(tm.tm_mon);
183 181
184 /* day */ 182 /* day */
185 rtc_base[0x7ffd] = CONV_BIN_TO_BCD(tm.tm_mday); 183 rtc_base[0x7ffd] = BIN2BCD(tm.tm_mday);
186 184
187 /* hour/min/sec */ 185 /* hour/min/sec */
188 rtc_base[0x7ffb] = CONV_BIN_TO_BCD(tm.tm_hour); 186 rtc_base[0x7ffb] = BIN2BCD(tm.tm_hour);
189 rtc_base[0x7ffa] = CONV_BIN_TO_BCD(tm.tm_min); 187 rtc_base[0x7ffa] = BIN2BCD(tm.tm_min);
190 rtc_base[0x7ff9] = CONV_BIN_TO_BCD(tm.tm_sec); 188 rtc_base[0x7ff9] = BIN2BCD(tm.tm_sec);
191 189
192 /* day of week -- not really used, but let's keep it up-to-date */ 190 /* day of week -- not really used, but let's keep it up-to-date */
193 rtc_base[0x7ffc] = CONV_BIN_TO_BCD(tm.tm_wday + 1); 191 rtc_base[0x7ffc] = BIN2BCD(tm.tm_wday + 1);
194 192
195 /* disable writing */ 193 /* disable writing */
196 rtc_base[0x7ff8] = 0x00; 194 rtc_base[0x7ff8] = 0x00;
@@ -215,8 +213,8 @@ void momenco_time_init(void)
215 mips_hpt_frequency = cpu_clock / 2; 213 mips_hpt_frequency = cpu_clock / 2;
216 board_timer_setup = momenco_timer_setup; 214 board_timer_setup = momenco_timer_setup;
217 215
218 rtc_get_time = m48t37y_get_time; 216 rtc_mips_get_time = m48t37y_get_time;
219 rtc_set_time = m48t37y_set_time; 217 rtc_mips_set_time = m48t37y_set_time;
220} 218}
221 219
222/* 220/*
diff --git a/arch/mips/momentum/ocelot_c/setup.c b/arch/mips/momentum/ocelot_c/setup.c
index bd02e60d037a..a3e6f5575592 100644
--- a/arch/mips/momentum/ocelot_c/setup.c
+++ b/arch/mips/momentum/ocelot_c/setup.c
@@ -227,8 +227,8 @@ void momenco_time_init(void)
227 printk("momenco_time_init cpu_clock=%d\n", cpu_clock); 227 printk("momenco_time_init cpu_clock=%d\n", cpu_clock);
228 board_timer_setup = momenco_timer_setup; 228 board_timer_setup = momenco_timer_setup;
229 229
230 rtc_get_time = m48t37y_get_time; 230 rtc_mips_get_time = m48t37y_get_time;
231 rtc_set_time = m48t37y_set_time; 231 rtc_mips_set_time = m48t37y_set_time;
232} 232}
233 233
234void __init plat_setup(void) 234void __init plat_setup(void)
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c
index 8bce711575de..3f724d661bdb 100644
--- a/arch/mips/pmc-sierra/yosemite/setup.c
+++ b/arch/mips/pmc-sierra/yosemite/setup.c
@@ -198,8 +198,8 @@ static void __init py_rtc_setup(void)
198 if (!m48t37_base) 198 if (!m48t37_base)
199 printk(KERN_ERR "Mapping the RTC failed\n"); 199 printk(KERN_ERR "Mapping the RTC failed\n");
200 200
201 rtc_get_time = m48t37y_get_time; 201 rtc_mips_get_time = m48t37y_get_time;
202 rtc_set_time = m48t37y_set_time; 202 rtc_mips_set_time = m48t37y_set_time;
203 203
204 write_seqlock(&xtime_lock); 204 write_seqlock(&xtime_lock);
205 xtime.tv_sec = m48t37y_get_time(); 205 xtime.tv_sec = m48t37y_get_time();
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index 92a3b3c15ed3..a9c58e067b53 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -238,7 +238,7 @@ static int __init reboot_setup(void)
238 request_irq(SGI_PANEL_IRQ, panel_int, 0, "Front Panel", NULL); 238 request_irq(SGI_PANEL_IRQ, panel_int, 0, "Front Panel", NULL);
239 init_timer(&blink_timer); 239 init_timer(&blink_timer);
240 blink_timer.function = blink_timeout; 240 blink_timer.function = blink_timeout;
241 notifier_chain_register(&panic_notifier_list, &panic_block); 241 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
242 242
243 return 0; 243 return 0;
244} 244}
diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c
index b7300cc5c5ad..cca688ad64ad 100644
--- a/arch/mips/sgi-ip22/ip22-time.c
+++ b/arch/mips/sgi-ip22/ip22-time.c
@@ -212,8 +212,8 @@ static void indy_timer_setup(struct irqaction *irq)
212void __init ip22_time_init(void) 212void __init ip22_time_init(void)
213{ 213{
214 /* setup hookup functions */ 214 /* setup hookup functions */
215 rtc_get_time = indy_rtc_get_time; 215 rtc_mips_get_time = indy_rtc_get_time;
216 rtc_set_time = indy_rtc_set_time; 216 rtc_mips_set_time = indy_rtc_set_time;
217 217
218 board_time_init = indy_time_init; 218 board_time_init = indy_time_init;
219 board_timer_setup = indy_timer_setup; 219 board_timer_setup = indy_timer_setup;
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index e0d095daa5ed..6c00dce9f73f 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -19,6 +19,7 @@
19#include <linux/nodemask.h> 19#include <linux/nodemask.h>
20#include <linux/swap.h> 20#include <linux/swap.h>
21#include <linux/bootmem.h> 21#include <linux/bootmem.h>
22#include <linux/pfn.h>
22#include <asm/page.h> 23#include <asm/page.h>
23#include <asm/sections.h> 24#include <asm/sections.h>
24 25
@@ -28,8 +29,6 @@
28#include <asm/sn/sn_private.h> 29#include <asm/sn/sn_private.h>
29 30
30 31
31#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
32
33#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT) 32#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
34#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT) 33#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
35 34
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index 0c948008b023..ab9d9cef089e 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -193,7 +193,7 @@ static __init int ip32_reboot_setup(void)
193 193
194 init_timer(&blink_timer); 194 init_timer(&blink_timer);
195 blink_timer.function = blink_timeout; 195 blink_timer.function = blink_timeout;
196 notifier_chain_register(&panic_notifier_list, &panic_block); 196 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
197 197
198 request_irq(MACEISA_RTC_IRQ, ip32_rtc_int, 0, "rtc", NULL); 198 request_irq(MACEISA_RTC_IRQ, ip32_rtc_int, 0, "rtc", NULL);
199 199
diff --git a/arch/mips/sgi-ip32/ip32-setup.c b/arch/mips/sgi-ip32/ip32-setup.c
index 2f50c79b7887..a2dd8ae1ea8f 100644
--- a/arch/mips/sgi-ip32/ip32-setup.c
+++ b/arch/mips/sgi-ip32/ip32-setup.c
@@ -91,8 +91,8 @@ void __init plat_setup(void)
91{ 91{
92 board_be_init = ip32_be_init; 92 board_be_init = ip32_be_init;
93 93
94 rtc_get_time = mc146818_get_cmos_time; 94 rtc_mips_get_time = mc146818_get_cmos_time;
95 rtc_set_mmss = mc146818_set_rtc_mmss; 95 rtc_mips_set_mmss = mc146818_set_rtc_mmss;
96 96
97 board_time_init = ip32_time_init; 97 board_time_init = ip32_time_init;
98 board_timer_setup = ip32_timer_setup; 98 board_timer_setup = ip32_timer_setup;
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c
index b661d2425a36..4b5f74ff3edd 100644
--- a/arch/mips/sibyte/swarm/setup.c
+++ b/arch/mips/sibyte/swarm/setup.c
@@ -121,14 +121,14 @@ void __init plat_setup(void)
121 121
122 if (xicor_probe()) { 122 if (xicor_probe()) {
123 printk("swarm setup: Xicor 1241 RTC detected.\n"); 123 printk("swarm setup: Xicor 1241 RTC detected.\n");
124 rtc_get_time = xicor_get_time; 124 rtc_mips_get_time = xicor_get_time;
125 rtc_set_time = xicor_set_time; 125 rtc_mips_set_time = xicor_set_time;
126 } 126 }
127 127
128 if (m41t81_probe()) { 128 if (m41t81_probe()) {
129 printk("swarm setup: M41T81 RTC detected.\n"); 129 printk("swarm setup: M41T81 RTC detected.\n");
130 rtc_get_time = m41t81_get_time; 130 rtc_mips_get_time = m41t81_get_time;
131 rtc_set_time = m41t81_set_time; 131 rtc_mips_set_time = m41t81_set_time;
132 } 132 }
133 133
134 printk("This kernel optimized for " 134 printk("This kernel optimized for "
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
index 1141fcd13a59..01ba6c581e3d 100644
--- a/arch/mips/sni/setup.c
+++ b/arch/mips/sni/setup.c
@@ -164,8 +164,8 @@ static struct pci_controller sni_controller = {
164 164
165static inline void sni_pcimt_time_init(void) 165static inline void sni_pcimt_time_init(void)
166{ 166{
167 rtc_get_time = mc146818_get_cmos_time; 167 rtc_mips_get_time = mc146818_get_cmos_time;
168 rtc_set_time = mc146818_set_rtc_mmss; 168 rtc_mips_set_time = mc146818_set_rtc_mmss;
169} 169}
170 170
171void __init plat_setup(void) 171void __init plat_setup(void)
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c
index 2ad6401d2af4..6dcf077f61a0 100644
--- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c
+++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c
@@ -1036,8 +1036,8 @@ toshiba_rbtx4927_time_init(void)
1036 1036
1037#ifdef CONFIG_RTC_DS1742 1037#ifdef CONFIG_RTC_DS1742
1038 1038
1039 rtc_get_time = rtc_ds1742_get_time; 1039 rtc_mips_get_time = rtc_ds1742_get_time;
1040 rtc_set_time = rtc_ds1742_set_time; 1040 rtc_mips_set_time = rtc_ds1742_set_time;
1041 1041
1042 TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIME_INIT, 1042 TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIME_INIT,
1043 ":rtc_ds1742_init()-\n"); 1043 ":rtc_ds1742_init()-\n");
diff --git a/arch/mips/tx4938/common/rtc_rx5c348.c b/arch/mips/tx4938/common/rtc_rx5c348.c
index d249edbb6af4..07f782fc0725 100644
--- a/arch/mips/tx4938/common/rtc_rx5c348.c
+++ b/arch/mips/tx4938/common/rtc_rx5c348.c
@@ -14,6 +14,7 @@
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/rtc.h> 15#include <linux/rtc.h>
16#include <linux/time.h> 16#include <linux/time.h>
17#include <linux/bcd.h>
17#include <asm/time.h> 18#include <asm/time.h>
18#include <asm/tx4938/spi.h> 19#include <asm/tx4938/spi.h>
19 20
@@ -77,17 +78,6 @@ spi_rtc_io(unsigned char *inbuf, unsigned char *outbuf, unsigned int count)
77 inbufs, incounts, outbufs, outcounts, 0); 78 inbufs, incounts, outbufs, outcounts, 0);
78} 79}
79 80
80/*
81 * Conversion between binary and BCD.
82 */
83#ifndef BCD_TO_BIN
84#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
85#endif
86
87#ifndef BIN_TO_BCD
88#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
89#endif
90
91/* RTC-dependent code for time.c */ 81/* RTC-dependent code for time.c */
92 82
93static int 83static int
@@ -197,6 +187,6 @@ rtc_rx5c348_init(int chipid)
197 srtc_24h = 1; 187 srtc_24h = 1;
198 188
199 /* set the function pointers */ 189 /* set the function pointers */
200 rtc_get_time = rtc_rx5c348_get_time; 190 rtc_mips_get_time = rtc_rx5c348_get_time;
201 rtc_set_time = rtc_rx5c348_set_time; 191 rtc_mips_set_time = rtc_rx5c348_set_time;
202} 192}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index eca33cfa8a4c..6b3c50964ca9 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -25,6 +25,14 @@ config RWSEM_GENERIC_SPINLOCK
25config RWSEM_XCHGADD_ALGORITHM 25config RWSEM_XCHGADD_ALGORITHM
26 bool 26 bool
27 27
28config GENERIC_FIND_NEXT_BIT
29 bool
30 default y
31
32config GENERIC_HWEIGHT
33 bool
34 default y
35
28config GENERIC_CALIBRATE_DELAY 36config GENERIC_CALIBRATE_DELAY
29 bool 37 bool
30 default y 38 default y
diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c
index 2a01fe1bdc98..0cea6958f427 100644
--- a/arch/parisc/kernel/pdc_chassis.c
+++ b/arch/parisc/kernel/pdc_chassis.c
@@ -150,7 +150,8 @@ void __init parisc_pdc_chassis_init(void)
150 150
151 if (handle) { 151 if (handle) {
152 /* initialize panic notifier chain */ 152 /* initialize panic notifier chain */
153 notifier_chain_register(&panic_notifier_list, &pdc_chassis_panic_block); 153 atomic_notifier_chain_register(&panic_notifier_list,
154 &pdc_chassis_panic_block);
154 155
155 /* initialize reboot notifier chain */ 156 /* initialize reboot notifier chain */
156 register_reboot_notifier(&pdc_chassis_reboot_block); 157 register_reboot_notifier(&pdc_chassis_reboot_block);
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 613569018410..d286f68a3d3a 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -21,7 +21,6 @@
21#include <linux/times.h> 21#include <linux/times.h>
22#include <linux/utsname.h> 22#include <linux/utsname.h>
23#include <linux/time.h> 23#include <linux/time.h>
24#include <linux/timex.h>
25#include <linux/smp.h> 24#include <linux/smp.h>
26#include <linux/smp_lock.h> 25#include <linux/smp_lock.h>
27#include <linux/sem.h> 26#include <linux/sem.h>
@@ -567,63 +566,6 @@ asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *off
567} 566}
568 567
569 568
570struct timex32 {
571 unsigned int modes; /* mode selector */
572 int offset; /* time offset (usec) */
573 int freq; /* frequency offset (scaled ppm) */
574 int maxerror; /* maximum error (usec) */
575 int esterror; /* estimated error (usec) */
576 int status; /* clock command/status */
577 int constant; /* pll time constant */
578 int precision; /* clock precision (usec) (read only) */
579 int tolerance; /* clock frequency tolerance (ppm)
580 * (read only)
581 */
582 struct compat_timeval time; /* (read only) */
583 int tick; /* (modified) usecs between clock ticks */
584
585 int ppsfreq; /* pps frequency (scaled ppm) (ro) */
586 int jitter; /* pps jitter (us) (ro) */
587 int shift; /* interval duration (s) (shift) (ro) */
588 int stabil; /* pps stability (scaled ppm) (ro) */
589 int jitcnt; /* jitter limit exceeded (ro) */
590 int calcnt; /* calibration intervals (ro) */
591 int errcnt; /* calibration errors (ro) */
592 int stbcnt; /* stability limit exceeded (ro) */
593
594 int :32; int :32; int :32; int :32;
595 int :32; int :32; int :32; int :32;
596 int :32; int :32; int :32; int :32;
597};
598
599asmlinkage long sys32_adjtimex(struct timex32 __user *txc_p32)
600{
601 struct timex txc;
602 struct timex32 t32;
603 int ret;
604 extern int do_adjtimex(struct timex *txc);
605
606 if(copy_from_user(&t32, txc_p32, sizeof(struct timex32)))
607 return -EFAULT;
608#undef CP
609#define CP(x) txc.x = t32.x
610 CP(modes); CP(offset); CP(freq); CP(maxerror); CP(esterror);
611 CP(status); CP(constant); CP(precision); CP(tolerance);
612 CP(time.tv_sec); CP(time.tv_usec); CP(tick); CP(ppsfreq); CP(jitter);
613 CP(shift); CP(stabil); CP(jitcnt); CP(calcnt); CP(errcnt);
614 CP(stbcnt);
615 ret = do_adjtimex(&txc);
616#undef CP
617#define CP(x) t32.x = txc.x
618 CP(modes); CP(offset); CP(freq); CP(maxerror); CP(esterror);
619 CP(status); CP(constant); CP(precision); CP(tolerance);
620 CP(time.tv_sec); CP(time.tv_usec); CP(tick); CP(ppsfreq); CP(jitter);
621 CP(shift); CP(stabil); CP(jitcnt); CP(calcnt); CP(errcnt);
622 CP(stbcnt);
623 return copy_to_user(txc_p32, &t32, sizeof(struct timex32)) ? -EFAULT : ret;
624}
625
626
627struct sysinfo32 { 569struct sysinfo32 {
628 s32 uptime; 570 s32 uptime;
629 u32 loads[3]; 571 u32 loads[3];
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 71011eadb872..89b6c56ea0a8 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -207,7 +207,7 @@
207 /* struct sockaddr... */ 207 /* struct sockaddr... */
208 ENTRY_SAME(recvfrom) 208 ENTRY_SAME(recvfrom)
209 /* struct timex contains longs */ 209 /* struct timex contains longs */
210 ENTRY_DIFF(adjtimex) 210 ENTRY_COMP(adjtimex)
211 ENTRY_SAME(mprotect) /* 125 */ 211 ENTRY_SAME(mprotect) /* 125 */
212 /* old_sigset_t forced to 32 bits. Beware glibc sigset_t */ 212 /* old_sigset_t forced to 32 bits. Beware glibc sigset_t */
213 ENTRY_COMP(sigprocmask) 213 ENTRY_COMP(sigprocmask)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index fae42da7468d..a433b7126d33 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -37,6 +37,10 @@ config RWSEM_XCHGADD_ALGORITHM
37 bool 37 bool
38 default y 38 default y
39 39
40config GENERIC_HWEIGHT
41 bool
42 default y
43
40config GENERIC_CALIBRATE_DELAY 44config GENERIC_CALIBRATE_DELAY
41 bool 45 bool
42 default y 46 default y
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index cb1fe5878e8b..ad7a90212204 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -30,9 +30,11 @@
30#include <linux/kprobes.h> 30#include <linux/kprobes.h>
31#include <linux/ptrace.h> 31#include <linux/ptrace.h>
32#include <linux/preempt.h> 32#include <linux/preempt.h>
33#include <linux/module.h>
33#include <asm/cacheflush.h> 34#include <asm/cacheflush.h>
34#include <asm/kdebug.h> 35#include <asm/kdebug.h>
35#include <asm/sstep.h> 36#include <asm/sstep.h>
37#include <asm/uaccess.h>
36 38
37DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 39DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
38DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 40DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -372,17 +374,62 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
372{ 374{
373 struct kprobe *cur = kprobe_running(); 375 struct kprobe *cur = kprobe_running();
374 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 376 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
375 377 const struct exception_table_entry *entry;
376 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 378
377 return 1; 379 switch(kcb->kprobe_status) {
378 380 case KPROBE_HIT_SS:
379 if (kcb->kprobe_status & KPROBE_HIT_SS) { 381 case KPROBE_REENTER:
380 resume_execution(cur, regs); 382 /*
383 * We are here because the instruction being single
384 * stepped caused a page fault. We reset the current
385 * kprobe and the nip points back to the probe address
386 * and allow the page fault handler to continue as a
387 * normal page fault.
388 */
389 regs->nip = (unsigned long)cur->addr;
381 regs->msr &= ~MSR_SE; 390 regs->msr &= ~MSR_SE;
382 regs->msr |= kcb->kprobe_saved_msr; 391 regs->msr |= kcb->kprobe_saved_msr;
383 392 if (kcb->kprobe_status == KPROBE_REENTER)
384 reset_current_kprobe(); 393 restore_previous_kprobe(kcb);
394 else
395 reset_current_kprobe();
385 preempt_enable_no_resched(); 396 preempt_enable_no_resched();
397 break;
398 case KPROBE_HIT_ACTIVE:
399 case KPROBE_HIT_SSDONE:
400 /*
401 * We increment the nmissed count for accounting,
402 * we can also use npre/npostfault count for accouting
403 * these specific fault cases.
404 */
405 kprobes_inc_nmissed_count(cur);
406
407 /*
408 * We come here because instructions in the pre/post
409 * handler caused the page_fault, this could happen
410 * if handler tries to access user space by
411 * copy_from_user(), get_user() etc. Let the
412 * user-specified handler try to fix it first.
413 */
414 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
415 return 1;
416
417 /*
418 * In case the user-specified fault handler returned
419 * zero, try to fix up.
420 */
421 if ((entry = search_exception_tables(regs->nip)) != NULL) {
422 regs->nip = entry->fixup;
423 return 1;
424 }
425
426 /*
427 * fixup_exception() could not handle it,
428 * Let do_page_fault() fix it.
429 */
430 break;
431 default:
432 break;
386 } 433 }
387 return 0; 434 return 0;
388} 435}
@@ -396,6 +443,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
396 struct die_args *args = (struct die_args *)data; 443 struct die_args *args = (struct die_args *)data;
397 int ret = NOTIFY_DONE; 444 int ret = NOTIFY_DONE;
398 445
446 if (args->regs && user_mode(args->regs))
447 return ret;
448
399 switch (val) { 449 switch (val) {
400 case DIE_BPT: 450 case DIE_BPT:
401 if (kprobe_handler(args->regs)) 451 if (kprobe_handler(args->regs))
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 1770a066c217..f698aa77127e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -35,7 +35,6 @@
35#include <linux/mqueue.h> 35#include <linux/mqueue.h>
36#include <linux/hardirq.h> 36#include <linux/hardirq.h>
37#include <linux/utsname.h> 37#include <linux/utsname.h>
38#include <linux/kprobes.h>
39 38
40#include <asm/pgtable.h> 39#include <asm/pgtable.h>
41#include <asm/uaccess.h> 40#include <asm/uaccess.h>
@@ -460,7 +459,6 @@ void show_regs(struct pt_regs * regs)
460 459
461void exit_thread(void) 460void exit_thread(void)
462{ 461{
463 kprobe_flush_task(current);
464 discard_lazy_cpu_state(); 462 discard_lazy_cpu_state();
465} 463}
466 464
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 2f3fdad35594..e20c1fae3423 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -579,7 +579,8 @@ void __init setup_arch(char **cmdline_p)
579 panic_timeout = 180; 579 panic_timeout = 180;
580 580
581 if (ppc_md.panic) 581 if (ppc_md.panic)
582 notifier_chain_register(&panic_notifier_list, &ppc64_panic_block); 582 atomic_notifier_chain_register(&panic_notifier_list,
583 &ppc64_panic_block);
583 584
584 init_mm.start_code = PAGE_OFFSET; 585 init_mm.start_code = PAGE_OFFSET;
585 init_mm.end_code = (unsigned long) _etext; 586 init_mm.end_code = (unsigned long) _etext;
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index cd75ab2908fa..ec274e688816 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -24,7 +24,6 @@
24#include <linux/resource.h> 24#include <linux/resource.h>
25#include <linux/times.h> 25#include <linux/times.h>
26#include <linux/utsname.h> 26#include <linux/utsname.h>
27#include <linux/timex.h>
28#include <linux/smp.h> 27#include <linux/smp.h>
29#include <linux/smp_lock.h> 28#include <linux/smp_lock.h>
30#include <linux/sem.h> 29#include <linux/sem.h>
@@ -161,78 +160,6 @@ asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2)
161 return sys_sysfs((int)option, arg1, arg2); 160 return sys_sysfs((int)option, arg1, arg2);
162} 161}
163 162
164/* Handle adjtimex compatibility. */
165struct timex32 {
166 u32 modes;
167 s32 offset, freq, maxerror, esterror;
168 s32 status, constant, precision, tolerance;
169 struct compat_timeval time;
170 s32 tick;
171 s32 ppsfreq, jitter, shift, stabil;
172 s32 jitcnt, calcnt, errcnt, stbcnt;
173 s32 :32; s32 :32; s32 :32; s32 :32;
174 s32 :32; s32 :32; s32 :32; s32 :32;
175 s32 :32; s32 :32; s32 :32; s32 :32;
176};
177
178extern int do_adjtimex(struct timex *);
179
180asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
181{
182 struct timex txc;
183 int ret;
184
185 memset(&txc, 0, sizeof(struct timex));
186
187 if(get_user(txc.modes, &utp->modes) ||
188 __get_user(txc.offset, &utp->offset) ||
189 __get_user(txc.freq, &utp->freq) ||
190 __get_user(txc.maxerror, &utp->maxerror) ||
191 __get_user(txc.esterror, &utp->esterror) ||
192 __get_user(txc.status, &utp->status) ||
193 __get_user(txc.constant, &utp->constant) ||
194 __get_user(txc.precision, &utp->precision) ||
195 __get_user(txc.tolerance, &utp->tolerance) ||
196 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
197 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
198 __get_user(txc.tick, &utp->tick) ||
199 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
200 __get_user(txc.jitter, &utp->jitter) ||
201 __get_user(txc.shift, &utp->shift) ||
202 __get_user(txc.stabil, &utp->stabil) ||
203 __get_user(txc.jitcnt, &utp->jitcnt) ||
204 __get_user(txc.calcnt, &utp->calcnt) ||
205 __get_user(txc.errcnt, &utp->errcnt) ||
206 __get_user(txc.stbcnt, &utp->stbcnt))
207 return -EFAULT;
208
209 ret = do_adjtimex(&txc);
210
211 if(put_user(txc.modes, &utp->modes) ||
212 __put_user(txc.offset, &utp->offset) ||
213 __put_user(txc.freq, &utp->freq) ||
214 __put_user(txc.maxerror, &utp->maxerror) ||
215 __put_user(txc.esterror, &utp->esterror) ||
216 __put_user(txc.status, &utp->status) ||
217 __put_user(txc.constant, &utp->constant) ||
218 __put_user(txc.precision, &utp->precision) ||
219 __put_user(txc.tolerance, &utp->tolerance) ||
220 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
221 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
222 __put_user(txc.tick, &utp->tick) ||
223 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
224 __put_user(txc.jitter, &utp->jitter) ||
225 __put_user(txc.shift, &utp->shift) ||
226 __put_user(txc.stabil, &utp->stabil) ||
227 __put_user(txc.jitcnt, &utp->jitcnt) ||
228 __put_user(txc.calcnt, &utp->calcnt) ||
229 __put_user(txc.errcnt, &utp->errcnt) ||
230 __put_user(txc.stbcnt, &utp->stbcnt))
231 ret = -EFAULT;
232
233 return ret;
234}
235
236asmlinkage long compat_sys_pause(void) 163asmlinkage long compat_sys_pause(void)
237{ 164{
238 current->state = TASK_INTERRUPTIBLE; 165 current->state = TASK_INTERRUPTIBLE;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 98660aedeeb7..9763faab6739 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -74,19 +74,19 @@ EXPORT_SYMBOL(__debugger_dabr_match);
74EXPORT_SYMBOL(__debugger_fault_handler); 74EXPORT_SYMBOL(__debugger_fault_handler);
75#endif 75#endif
76 76
77struct notifier_block *powerpc_die_chain; 77ATOMIC_NOTIFIER_HEAD(powerpc_die_chain);
78static DEFINE_SPINLOCK(die_notifier_lock);
79 78
80int register_die_notifier(struct notifier_block *nb) 79int register_die_notifier(struct notifier_block *nb)
81{ 80{
82 int err = 0; 81 return atomic_notifier_chain_register(&powerpc_die_chain, nb);
83 unsigned long flags; 82}
83EXPORT_SYMBOL(register_die_notifier);
84 84
85 spin_lock_irqsave(&die_notifier_lock, flags); 85int unregister_die_notifier(struct notifier_block *nb)
86 err = notifier_chain_register(&powerpc_die_chain, nb); 86{
87 spin_unlock_irqrestore(&die_notifier_lock, flags); 87 return atomic_notifier_chain_unregister(&powerpc_die_chain, nb);
88 return err;
89} 88}
89EXPORT_SYMBOL(unregister_die_notifier);
90 90
91/* 91/*
92 * Trap & Exception support 92 * Trap & Exception support
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
index 8b0c132bc163..add8c1a9af68 100644
--- a/arch/powerpc/mm/imalloc.c
+++ b/arch/powerpc/mm/imalloc.c
@@ -13,12 +13,12 @@
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16#include <asm/semaphore.h> 16#include <linux/mutex.h>
17#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
18 18
19#include "mmu_decl.h" 19#include "mmu_decl.h"
20 20
21static DECLARE_MUTEX(imlist_sem); 21static DEFINE_MUTEX(imlist_mutex);
22struct vm_struct * imlist = NULL; 22struct vm_struct * imlist = NULL;
23 23
24static int get_free_im_addr(unsigned long size, unsigned long *im_addr) 24static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
@@ -257,7 +257,7 @@ struct vm_struct * im_get_free_area(unsigned long size)
257 struct vm_struct *area; 257 struct vm_struct *area;
258 unsigned long addr; 258 unsigned long addr;
259 259
260 down(&imlist_sem); 260 mutex_lock(&imlist_mutex);
261 if (get_free_im_addr(size, &addr)) { 261 if (get_free_im_addr(size, &addr)) {
262 printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n", 262 printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n",
263 __FUNCTION__, size); 263 __FUNCTION__, size);
@@ -272,7 +272,7 @@ struct vm_struct * im_get_free_area(unsigned long size)
272 __FUNCTION__, addr, size); 272 __FUNCTION__, addr, size);
273 } 273 }
274next_im_done: 274next_im_done:
275 up(&imlist_sem); 275 mutex_unlock(&imlist_mutex);
276 return area; 276 return area;
277} 277}
278 278
@@ -281,9 +281,9 @@ struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
281{ 281{
282 struct vm_struct *area; 282 struct vm_struct *area;
283 283
284 down(&imlist_sem); 284 mutex_lock(&imlist_mutex);
285 area = __im_get_area(v_addr, size, criteria); 285 area = __im_get_area(v_addr, size, criteria);
286 up(&imlist_sem); 286 mutex_unlock(&imlist_mutex);
287 return area; 287 return area;
288} 288}
289 289
@@ -297,17 +297,17 @@ void im_free(void * addr)
297 printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr); 297 printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr);
298 return; 298 return;
299 } 299 }
300 down(&imlist_sem); 300 mutex_lock(&imlist_mutex);
301 for (p = &imlist ; (tmp = *p) ; p = &tmp->next) { 301 for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
302 if (tmp->addr == addr) { 302 if (tmp->addr == addr) {
303 *p = tmp->next; 303 *p = tmp->next;
304 unmap_vm_area(tmp); 304 unmap_vm_area(tmp);
305 kfree(tmp); 305 kfree(tmp);
306 up(&imlist_sem); 306 mutex_unlock(&imlist_mutex);
307 return; 307 return;
308 } 308 }
309 } 309 }
310 up(&imlist_sem); 310 mutex_unlock(&imlist_mutex);
311 printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__, 311 printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
312 addr); 312 addr);
313} 313}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index badac10d700c..5e435a9c3431 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -195,7 +195,7 @@ void show_mem(void)
195 printk("Mem-info:\n"); 195 printk("Mem-info:\n");
196 show_free_areas(); 196 show_free_areas();
197 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 197 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
198 for_each_pgdat(pgdat) { 198 for_each_online_pgdat(pgdat) {
199 unsigned long flags; 199 unsigned long flags;
200 pgdat_resize_lock(pgdat, &flags); 200 pgdat_resize_lock(pgdat, &flags);
201 for (i = 0; i < pgdat->node_spanned_pages; i++) { 201 for (i = 0; i < pgdat->node_spanned_pages; i++) {
@@ -351,7 +351,7 @@ void __init mem_init(void)
351 max_mapnr = max_pfn; 351 max_mapnr = max_pfn;
352 totalram_pages += free_all_bootmem(); 352 totalram_pages += free_all_bootmem();
353#endif 353#endif
354 for_each_pgdat(pgdat) { 354 for_each_online_pgdat(pgdat) {
355 for (i = 0; i < pgdat->node_spanned_pages; i++) { 355 for (i = 0; i < pgdat->node_spanned_pages; i++) {
356 if (!pfn_valid(pgdat->node_start_pfn + i)) 356 if (!pfn_valid(pgdat->node_start_pfn + i))
357 continue; 357 continue;
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index d75ae03df686..a8fa1eeeb174 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -32,7 +32,7 @@
32 32
33#include <asm/io.h> 33#include <asm/io.h>
34#include <asm/prom.h> 34#include <asm/prom.h>
35#include <asm/semaphore.h> 35#include <linux/mutex.h>
36#include <asm/spu.h> 36#include <asm/spu.h>
37#include <asm/mmu_context.h> 37#include <asm/mmu_context.h>
38 38
@@ -342,7 +342,7 @@ spu_free_irqs(struct spu *spu)
342} 342}
343 343
344static LIST_HEAD(spu_list); 344static LIST_HEAD(spu_list);
345static DECLARE_MUTEX(spu_mutex); 345static DEFINE_MUTEX(spu_mutex);
346 346
347static void spu_init_channels(struct spu *spu) 347static void spu_init_channels(struct spu *spu)
348{ 348{
@@ -382,7 +382,7 @@ struct spu *spu_alloc(void)
382{ 382{
383 struct spu *spu; 383 struct spu *spu;
384 384
385 down(&spu_mutex); 385 mutex_lock(&spu_mutex);
386 if (!list_empty(&spu_list)) { 386 if (!list_empty(&spu_list)) {
387 spu = list_entry(spu_list.next, struct spu, list); 387 spu = list_entry(spu_list.next, struct spu, list);
388 list_del_init(&spu->list); 388 list_del_init(&spu->list);
@@ -391,7 +391,7 @@ struct spu *spu_alloc(void)
391 pr_debug("No SPU left\n"); 391 pr_debug("No SPU left\n");
392 spu = NULL; 392 spu = NULL;
393 } 393 }
394 up(&spu_mutex); 394 mutex_unlock(&spu_mutex);
395 395
396 if (spu) 396 if (spu)
397 spu_init_channels(spu); 397 spu_init_channels(spu);
@@ -402,9 +402,9 @@ EXPORT_SYMBOL_GPL(spu_alloc);
402 402
403void spu_free(struct spu *spu) 403void spu_free(struct spu *spu)
404{ 404{
405 down(&spu_mutex); 405 mutex_lock(&spu_mutex);
406 list_add_tail(&spu->list, &spu_list); 406 list_add_tail(&spu->list, &spu_list);
407 up(&spu_mutex); 407 mutex_unlock(&spu_mutex);
408} 408}
409EXPORT_SYMBOL_GPL(spu_free); 409EXPORT_SYMBOL_GPL(spu_free);
410 410
@@ -633,14 +633,14 @@ static int __init create_spu(struct device_node *spe)
633 spu->wbox_callback = NULL; 633 spu->wbox_callback = NULL;
634 spu->stop_callback = NULL; 634 spu->stop_callback = NULL;
635 635
636 down(&spu_mutex); 636 mutex_lock(&spu_mutex);
637 spu->number = number++; 637 spu->number = number++;
638 ret = spu_request_irqs(spu); 638 ret = spu_request_irqs(spu);
639 if (ret) 639 if (ret)
640 goto out_unmap; 640 goto out_unmap;
641 641
642 list_add(&spu->list, &spu_list); 642 list_add(&spu->list, &spu_list);
643 up(&spu_mutex); 643 mutex_unlock(&spu_mutex);
644 644
645 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n", 645 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
646 spu->name, spu->isrc, spu->local_store, 646 spu->name, spu->isrc, spu->local_store,
@@ -648,7 +648,7 @@ static int __init create_spu(struct device_node *spe)
648 goto out; 648 goto out;
649 649
650out_unmap: 650out_unmap:
651 up(&spu_mutex); 651 mutex_unlock(&spu_mutex);
652 spu_unmap(spu); 652 spu_unmap(spu);
653out_free: 653out_free:
654 kfree(spu); 654 kfree(spu);
@@ -668,10 +668,10 @@ static void destroy_spu(struct spu *spu)
668static void cleanup_spu_base(void) 668static void cleanup_spu_base(void)
669{ 669{
670 struct spu *spu, *tmp; 670 struct spu *spu, *tmp;
671 down(&spu_mutex); 671 mutex_lock(&spu_mutex);
672 list_for_each_entry_safe(spu, tmp, &spu_list, list) 672 list_for_each_entry_safe(spu, tmp, &spu_list, list)
673 destroy_spu(spu); 673 destroy_spu(spu);
674 up(&spu_mutex); 674 mutex_unlock(&spu_mutex);
675} 675}
676module_exit(cleanup_spu_base); 676module_exit(cleanup_spu_base);
677 677
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index b3962c3a0348..5be40aa483fd 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -103,7 +103,7 @@ spufs_setattr(struct dentry *dentry, struct iattr *attr)
103 103
104static int 104static int
105spufs_new_file(struct super_block *sb, struct dentry *dentry, 105spufs_new_file(struct super_block *sb, struct dentry *dentry,
106 struct file_operations *fops, int mode, 106 const struct file_operations *fops, int mode,
107 struct spu_context *ctx) 107 struct spu_context *ctx)
108{ 108{
109 static struct inode_operations spufs_file_iops = { 109 static struct inode_operations spufs_file_iops = {
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c
index 12c6f689b1aa..7d7889026936 100644
--- a/arch/powerpc/platforms/chrp/time.c
+++ b/arch/powerpc/platforms/chrp/time.c
@@ -120,33 +120,15 @@ int chrp_set_rtc_time(struct rtc_time *tmarg)
120void chrp_get_rtc_time(struct rtc_time *tm) 120void chrp_get_rtc_time(struct rtc_time *tm)
121{ 121{
122 unsigned int year, mon, day, hour, min, sec; 122 unsigned int year, mon, day, hour, min, sec;
123 int uip, i;
124 123
125 /* The Linux interpretation of the CMOS clock register contents: 124 do {
126 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
127 * RTC registers show the second which has precisely just started.
128 * Let's hope other operating systems interpret the RTC the same way.
129 */
130
131 /* Since the UIP flag is set for about 2.2 ms and the clock
132 * is typically written with a precision of 1 jiffy, trying
133 * to obtain a precision better than a few milliseconds is
134 * an illusion. Only consistency is interesting, this also
135 * allows to use the routine for /dev/rtc without a potential
136 * 1 second kernel busy loop triggered by any reader of /dev/rtc.
137 */
138
139 for ( i = 0; i<1000000; i++) {
140 uip = chrp_cmos_clock_read(RTC_FREQ_SELECT);
141 sec = chrp_cmos_clock_read(RTC_SECONDS); 125 sec = chrp_cmos_clock_read(RTC_SECONDS);
142 min = chrp_cmos_clock_read(RTC_MINUTES); 126 min = chrp_cmos_clock_read(RTC_MINUTES);
143 hour = chrp_cmos_clock_read(RTC_HOURS); 127 hour = chrp_cmos_clock_read(RTC_HOURS);
144 day = chrp_cmos_clock_read(RTC_DAY_OF_MONTH); 128 day = chrp_cmos_clock_read(RTC_DAY_OF_MONTH);
145 mon = chrp_cmos_clock_read(RTC_MONTH); 129 mon = chrp_cmos_clock_read(RTC_MONTH);
146 year = chrp_cmos_clock_read(RTC_YEAR); 130 year = chrp_cmos_clock_read(RTC_YEAR);
147 uip |= chrp_cmos_clock_read(RTC_FREQ_SELECT); 131 } while (sec != chrp_cmos_clock_read(RTC_SECONDS));
148 if ((uip & RTC_UIP)==0) break;
149 }
150 132
151 if (!(chrp_cmos_clock_read(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { 133 if (!(chrp_cmos_clock_read(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
152 BCD_TO_BIN(sec); 134 BCD_TO_BIN(sec);
diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c
index 5e6981d17379..b9a2b3d4bf33 100644
--- a/arch/powerpc/platforms/maple/time.c
+++ b/arch/powerpc/platforms/maple/time.c
@@ -60,34 +60,14 @@ static void maple_clock_write(unsigned long val, int addr)
60 60
61void maple_get_rtc_time(struct rtc_time *tm) 61void maple_get_rtc_time(struct rtc_time *tm)
62{ 62{
63 int uip, i; 63 do {
64
65 /* The Linux interpretation of the CMOS clock register contents:
66 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
67 * RTC registers show the second which has precisely just started.
68 * Let's hope other operating systems interpret the RTC the same way.
69 */
70
71 /* Since the UIP flag is set for about 2.2 ms and the clock
72 * is typically written with a precision of 1 jiffy, trying
73 * to obtain a precision better than a few milliseconds is
74 * an illusion. Only consistency is interesting, this also
75 * allows to use the routine for /dev/rtc without a potential
76 * 1 second kernel busy loop triggered by any reader of /dev/rtc.
77 */
78
79 for (i = 0; i<1000000; i++) {
80 uip = maple_clock_read(RTC_FREQ_SELECT);
81 tm->tm_sec = maple_clock_read(RTC_SECONDS); 64 tm->tm_sec = maple_clock_read(RTC_SECONDS);
82 tm->tm_min = maple_clock_read(RTC_MINUTES); 65 tm->tm_min = maple_clock_read(RTC_MINUTES);
83 tm->tm_hour = maple_clock_read(RTC_HOURS); 66 tm->tm_hour = maple_clock_read(RTC_HOURS);
84 tm->tm_mday = maple_clock_read(RTC_DAY_OF_MONTH); 67 tm->tm_mday = maple_clock_read(RTC_DAY_OF_MONTH);
85 tm->tm_mon = maple_clock_read(RTC_MONTH); 68 tm->tm_mon = maple_clock_read(RTC_MONTH);
86 tm->tm_year = maple_clock_read(RTC_YEAR); 69 tm->tm_year = maple_clock_read(RTC_YEAR);
87 uip |= maple_clock_read(RTC_FREQ_SELECT); 70 } while (tm->tm_sec != maple_clock_read(RTC_SECONDS));
88 if ((uip & RTC_UIP)==0)
89 break;
90 }
91 71
92 if (!(maple_clock_read(RTC_CONTROL) & RTC_DM_BINARY) 72 if (!(maple_clock_read(RTC_CONTROL) & RTC_DM_BINARY)
93 || RTC_ALWAYS_BCD) { 73 || RTC_ALWAYS_BCD) {
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
index a415e8d2f7af..b57e465a1b71 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -21,6 +21,7 @@
21#include <linux/cpufreq.h> 21#include <linux/cpufreq.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/completion.h> 23#include <linux/completion.h>
24#include <linux/mutex.h>
24#include <asm/prom.h> 25#include <asm/prom.h>
25#include <asm/machdep.h> 26#include <asm/machdep.h>
26#include <asm/irq.h> 27#include <asm/irq.h>
@@ -90,7 +91,7 @@ static void (*g5_switch_volt)(int speed_mode);
90static int (*g5_switch_freq)(int speed_mode); 91static int (*g5_switch_freq)(int speed_mode);
91static int (*g5_query_freq)(void); 92static int (*g5_query_freq)(void);
92 93
93static DECLARE_MUTEX(g5_switch_mutex); 94static DEFINE_MUTEX(g5_switch_mutex);
94 95
95 96
96static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */ 97static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */
@@ -327,7 +328,7 @@ static int g5_cpufreq_target(struct cpufreq_policy *policy,
327 if (g5_pmode_cur == newstate) 328 if (g5_pmode_cur == newstate)
328 return 0; 329 return 0;
329 330
330 down(&g5_switch_mutex); 331 mutex_lock(&g5_switch_mutex);
331 332
332 freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; 333 freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
333 freqs.new = g5_cpu_freqs[newstate].frequency; 334 freqs.new = g5_cpu_freqs[newstate].frequency;
@@ -337,7 +338,7 @@ static int g5_cpufreq_target(struct cpufreq_policy *policy,
337 rc = g5_switch_freq(newstate); 338 rc = g5_switch_freq(newstate);
338 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 339 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
339 340
340 up(&g5_switch_mutex); 341 mutex_unlock(&g5_switch_mutex);
341 342
342 return rc; 343 return rc;
343} 344}
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 86cfa6ecdcf3..5ad90676567a 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -94,16 +94,16 @@ static struct device_node *derive_parent(const char *path)
94 return parent; 94 return parent;
95} 95}
96 96
97static struct notifier_block *pSeries_reconfig_chain; 97static BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain);
98 98
99int pSeries_reconfig_notifier_register(struct notifier_block *nb) 99int pSeries_reconfig_notifier_register(struct notifier_block *nb)
100{ 100{
101 return notifier_chain_register(&pSeries_reconfig_chain, nb); 101 return blocking_notifier_chain_register(&pSeries_reconfig_chain, nb);
102} 102}
103 103
104void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) 104void pSeries_reconfig_notifier_unregister(struct notifier_block *nb)
105{ 105{
106 notifier_chain_unregister(&pSeries_reconfig_chain, nb); 106 blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb);
107} 107}
108 108
109static int pSeries_reconfig_add_node(const char *path, struct property *proplist) 109static int pSeries_reconfig_add_node(const char *path, struct property *proplist)
@@ -131,7 +131,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
131 goto out_err; 131 goto out_err;
132 } 132 }
133 133
134 err = notifier_call_chain(&pSeries_reconfig_chain, 134 err = blocking_notifier_call_chain(&pSeries_reconfig_chain,
135 PSERIES_RECONFIG_ADD, np); 135 PSERIES_RECONFIG_ADD, np);
136 if (err == NOTIFY_BAD) { 136 if (err == NOTIFY_BAD) {
137 printk(KERN_ERR "Failed to add device node %s\n", path); 137 printk(KERN_ERR "Failed to add device node %s\n", path);
@@ -171,7 +171,7 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
171 171
172 remove_node_proc_entries(np); 172 remove_node_proc_entries(np);
173 173
174 notifier_call_chain(&pSeries_reconfig_chain, 174 blocking_notifier_call_chain(&pSeries_reconfig_chain,
175 PSERIES_RECONFIG_REMOVE, np); 175 PSERIES_RECONFIG_REMOVE, np);
176 of_detach_node(np); 176 of_detach_node(np);
177 177
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 54a0a9bb12dd..3a3e302b4ea2 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -19,6 +19,10 @@ config RWSEM_XCHGADD_ALGORITHM
19 bool 19 bool
20 default y 20 default y
21 21
22config GENERIC_HWEIGHT
23 bool
24 default y
25
22config GENERIC_CALIBRATE_DELAY 26config GENERIC_CALIBRATE_DELAY
23 bool 27 bool
24 default y 28 default y
diff --git a/arch/ppc/kernel/ppc_htab.c b/arch/ppc/kernel/ppc_htab.c
index 2f5c7650274f..9b84bffdefce 100644
--- a/arch/ppc/kernel/ppc_htab.c
+++ b/arch/ppc/kernel/ppc_htab.c
@@ -52,7 +52,7 @@ static int ppc_htab_open(struct inode *inode, struct file *file)
52 return single_open(file, ppc_htab_show, NULL); 52 return single_open(file, ppc_htab_show, NULL);
53} 53}
54 54
55struct file_operations ppc_htab_operations = { 55const struct file_operations ppc_htab_operations = {
56 .open = ppc_htab_open, 56 .open = ppc_htab_open,
57 .read = seq_read, 57 .read = seq_read,
58 .llseek = seq_lseek, 58 .llseek = seq_lseek,
diff --git a/arch/ppc/platforms/chrp_time.c b/arch/ppc/platforms/chrp_time.c
index c8627770af13..51e06ad66168 100644
--- a/arch/ppc/platforms/chrp_time.c
+++ b/arch/ppc/platforms/chrp_time.c
@@ -119,44 +119,28 @@ int chrp_set_rtc_time(unsigned long nowtime)
119unsigned long chrp_get_rtc_time(void) 119unsigned long chrp_get_rtc_time(void)
120{ 120{
121 unsigned int year, mon, day, hour, min, sec; 121 unsigned int year, mon, day, hour, min, sec;
122 int uip, i;
123 122
124 /* The Linux interpretation of the CMOS clock register contents: 123 do {
125 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
126 * RTC registers show the second which has precisely just started.
127 * Let's hope other operating systems interpret the RTC the same way.
128 */
129
130 /* Since the UIP flag is set for about 2.2 ms and the clock
131 * is typically written with a precision of 1 jiffy, trying
132 * to obtain a precision better than a few milliseconds is
133 * an illusion. Only consistency is interesting, this also
134 * allows to use the routine for /dev/rtc without a potential
135 * 1 second kernel busy loop triggered by any reader of /dev/rtc.
136 */
137
138 for ( i = 0; i<1000000; i++) {
139 uip = chrp_cmos_clock_read(RTC_FREQ_SELECT);
140 sec = chrp_cmos_clock_read(RTC_SECONDS); 124 sec = chrp_cmos_clock_read(RTC_SECONDS);
141 min = chrp_cmos_clock_read(RTC_MINUTES); 125 min = chrp_cmos_clock_read(RTC_MINUTES);
142 hour = chrp_cmos_clock_read(RTC_HOURS); 126 hour = chrp_cmos_clock_read(RTC_HOURS);
143 day = chrp_cmos_clock_read(RTC_DAY_OF_MONTH); 127 day = chrp_cmos_clock_read(RTC_DAY_OF_MONTH);
144 mon = chrp_cmos_clock_read(RTC_MONTH); 128 mon = chrp_cmos_clock_read(RTC_MONTH);
145 year = chrp_cmos_clock_read(RTC_YEAR); 129 year = chrp_cmos_clock_read(RTC_YEAR);
146 uip |= chrp_cmos_clock_read(RTC_FREQ_SELECT); 130 } while (sec != chrp_cmos_clock_read(RTC_SECONDS));
147 if ((uip & RTC_UIP)==0) break; 131
132 if (!(chrp_cmos_clock_read(RTC_CONTROL) & RTC_DM_BINARY)
133 || RTC_ALWAYS_BCD) {
134 BCD_TO_BIN(sec);
135 BCD_TO_BIN(min);
136 BCD_TO_BIN(hour);
137 BCD_TO_BIN(day);
138 BCD_TO_BIN(mon);
139 BCD_TO_BIN(year);
148 } 140 }
149 141
150 if (!(chrp_cmos_clock_read(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 142 year += 1900;
151 { 143 if (year < 1970)
152 BCD_TO_BIN(sec);
153 BCD_TO_BIN(min);
154 BCD_TO_BIN(hour);
155 BCD_TO_BIN(day);
156 BCD_TO_BIN(mon);
157 BCD_TO_BIN(year);
158 }
159 if ((year += 1900) < 1970)
160 year += 100; 144 year += 100;
161 return mktime(year, mon, day, hour, min, sec); 145 return mktime(year, mon, day, hour, min, sec);
162} 146}
diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c
index a0fc628ffb1e..d95c05d9824d 100644
--- a/arch/ppc/platforms/prep_setup.c
+++ b/arch/ppc/platforms/prep_setup.c
@@ -736,7 +736,7 @@ ibm_statusled_progress(char *s, unsigned short hex)
736 hex = 0xfff; 736 hex = 0xfff;
737 if (!notifier_installed) { 737 if (!notifier_installed) {
738 ++notifier_installed; 738 ++notifier_installed;
739 notifier_chain_register(&panic_notifier_list, 739 atomic_notifier_chain_register(&panic_notifier_list,
740 &ibm_statusled_block); 740 &ibm_statusled_block);
741 } 741 }
742 } 742 }
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2b7364ed23bc..01c5c082f970 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -14,6 +14,10 @@ config RWSEM_XCHGADD_ALGORITHM
14 bool 14 bool
15 default y 15 default y
16 16
17config GENERIC_HWEIGHT
18 bool
19 default y
20
17config GENERIC_CALIBRATE_DELAY 21config GENERIC_CALIBRATE_DELAY
18 bool 22 bool
19 default y 23 default y
diff --git a/arch/s390/crypto/crypt_s390_query.c b/arch/s390/crypto/crypt_s390_query.c
index def02bdc44a4..54fb11d7fadd 100644
--- a/arch/s390/crypto/crypt_s390_query.c
+++ b/arch/s390/crypto/crypt_s390_query.c
@@ -55,7 +55,7 @@ static void query_available_functions(void)
55 printk(KERN_INFO "KMC_AES_256: %d\n", 55 printk(KERN_INFO "KMC_AES_256: %d\n",
56 crypt_s390_func_available(KMC_AES_256_ENCRYPT)); 56 crypt_s390_func_available(KMC_AES_256_ENCRYPT));
57 57
58 /* query available KIMD fucntions */ 58 /* query available KIMD functions */
59 printk(KERN_INFO "KIMD_QUERY: %d\n", 59 printk(KERN_INFO "KIMD_QUERY: %d\n",
60 crypt_s390_func_available(KIMD_QUERY)); 60 crypt_s390_func_available(KIMD_QUERY));
61 printk(KERN_INFO "KIMD_SHA_1: %d\n", 61 printk(KERN_INFO "KIMD_SHA_1: %d\n",
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index cc058dc3bc8b..5e14de37c17b 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -26,7 +26,6 @@
26#include <linux/resource.h> 26#include <linux/resource.h>
27#include <linux/times.h> 27#include <linux/times.h>
28#include <linux/utsname.h> 28#include <linux/utsname.h>
29#include <linux/timex.h>
30#include <linux/smp.h> 29#include <linux/smp.h>
31#include <linux/smp_lock.h> 30#include <linux/smp_lock.h>
32#include <linux/sem.h> 31#include <linux/sem.h>
@@ -705,79 +704,6 @@ asmlinkage long sys32_sendfile64(int out_fd, int in_fd,
705 return ret; 704 return ret;
706} 705}
707 706
708/* Handle adjtimex compatibility. */
709
710struct timex32 {
711 u32 modes;
712 s32 offset, freq, maxerror, esterror;
713 s32 status, constant, precision, tolerance;
714 struct compat_timeval time;
715 s32 tick;
716 s32 ppsfreq, jitter, shift, stabil;
717 s32 jitcnt, calcnt, errcnt, stbcnt;
718 s32 :32; s32 :32; s32 :32; s32 :32;
719 s32 :32; s32 :32; s32 :32; s32 :32;
720 s32 :32; s32 :32; s32 :32; s32 :32;
721};
722
723extern int do_adjtimex(struct timex *);
724
725asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
726{
727 struct timex txc;
728 int ret;
729
730 memset(&txc, 0, sizeof(struct timex));
731
732 if(get_user(txc.modes, &utp->modes) ||
733 __get_user(txc.offset, &utp->offset) ||
734 __get_user(txc.freq, &utp->freq) ||
735 __get_user(txc.maxerror, &utp->maxerror) ||
736 __get_user(txc.esterror, &utp->esterror) ||
737 __get_user(txc.status, &utp->status) ||
738 __get_user(txc.constant, &utp->constant) ||
739 __get_user(txc.precision, &utp->precision) ||
740 __get_user(txc.tolerance, &utp->tolerance) ||
741 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
742 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
743 __get_user(txc.tick, &utp->tick) ||
744 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
745 __get_user(txc.jitter, &utp->jitter) ||
746 __get_user(txc.shift, &utp->shift) ||
747 __get_user(txc.stabil, &utp->stabil) ||
748 __get_user(txc.jitcnt, &utp->jitcnt) ||
749 __get_user(txc.calcnt, &utp->calcnt) ||
750 __get_user(txc.errcnt, &utp->errcnt) ||
751 __get_user(txc.stbcnt, &utp->stbcnt))
752 return -EFAULT;
753
754 ret = do_adjtimex(&txc);
755
756 if(put_user(txc.modes, &utp->modes) ||
757 __put_user(txc.offset, &utp->offset) ||
758 __put_user(txc.freq, &utp->freq) ||
759 __put_user(txc.maxerror, &utp->maxerror) ||
760 __put_user(txc.esterror, &utp->esterror) ||
761 __put_user(txc.status, &utp->status) ||
762 __put_user(txc.constant, &utp->constant) ||
763 __put_user(txc.precision, &utp->precision) ||
764 __put_user(txc.tolerance, &utp->tolerance) ||
765 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
766 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
767 __put_user(txc.tick, &utp->tick) ||
768 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
769 __put_user(txc.jitter, &utp->jitter) ||
770 __put_user(txc.shift, &utp->shift) ||
771 __put_user(txc.stabil, &utp->stabil) ||
772 __put_user(txc.jitcnt, &utp->jitcnt) ||
773 __put_user(txc.calcnt, &utp->calcnt) ||
774 __put_user(txc.errcnt, &utp->errcnt) ||
775 __put_user(txc.stbcnt, &utp->stbcnt))
776 ret = -EFAULT;
777
778 return ret;
779}
780
781#ifdef CONFIG_SYSCTL 707#ifdef CONFIG_SYSCTL
782struct __sysctl_args32 { 708struct __sysctl_args32 {
783 u32 name; 709 u32 name;
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 50e80138e7ad..199da68bd7be 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -551,10 +551,10 @@ sys32_newuname_wrapper:
551 llgtr %r2,%r2 # struct new_utsname * 551 llgtr %r2,%r2 # struct new_utsname *
552 jg s390x_newuname # branch to system call 552 jg s390x_newuname # branch to system call
553 553
554 .globl sys32_adjtimex_wrapper 554 .globl compat_sys_adjtimex_wrapper
555sys32_adjtimex_wrapper: 555compat_sys_adjtimex_wrapper:
556 llgtr %r2,%r2 # struct timex_emu31 * 556 llgtr %r2,%r2 # struct compat_timex *
557 jg sys32_adjtimex # branch to system call 557 jg compat_sys_adjtimex # branch to system call
558 558
559 .globl sys32_mprotect_wrapper 559 .globl sys32_mprotect_wrapper
560sys32_mprotect_wrapper: 560sys32_mprotect_wrapper:
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 99182a415fe7..4a0f5a1551ea 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -76,17 +76,17 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
76/* 76/*
77 * Need to know about CPUs going idle? 77 * Need to know about CPUs going idle?
78 */ 78 */
79static struct notifier_block *idle_chain; 79static ATOMIC_NOTIFIER_HEAD(idle_chain);
80 80
81int register_idle_notifier(struct notifier_block *nb) 81int register_idle_notifier(struct notifier_block *nb)
82{ 82{
83 return notifier_chain_register(&idle_chain, nb); 83 return atomic_notifier_chain_register(&idle_chain, nb);
84} 84}
85EXPORT_SYMBOL(register_idle_notifier); 85EXPORT_SYMBOL(register_idle_notifier);
86 86
87int unregister_idle_notifier(struct notifier_block *nb) 87int unregister_idle_notifier(struct notifier_block *nb)
88{ 88{
89 return notifier_chain_unregister(&idle_chain, nb); 89 return atomic_notifier_chain_unregister(&idle_chain, nb);
90} 90}
91EXPORT_SYMBOL(unregister_idle_notifier); 91EXPORT_SYMBOL(unregister_idle_notifier);
92 92
@@ -95,7 +95,7 @@ void do_monitor_call(struct pt_regs *regs, long interruption_code)
95 /* disable monitor call class 0 */ 95 /* disable monitor call class 0 */
96 __ctl_clear_bit(8, 15); 96 __ctl_clear_bit(8, 15);
97 97
98 notifier_call_chain(&idle_chain, CPU_NOT_IDLE, 98 atomic_notifier_call_chain(&idle_chain, CPU_NOT_IDLE,
99 (void *)(long) smp_processor_id()); 99 (void *)(long) smp_processor_id());
100} 100}
101 101
@@ -116,7 +116,8 @@ static void default_idle(void)
116 return; 116 return;
117 } 117 }
118 118
119 rc = notifier_call_chain(&idle_chain, CPU_IDLE, (void *)(long) cpu); 119 rc = atomic_notifier_call_chain(&idle_chain,
120 CPU_IDLE, (void *)(long) cpu);
120 if (rc != NOTIFY_OK && rc != NOTIFY_DONE) 121 if (rc != NOTIFY_OK && rc != NOTIFY_DONE)
121 BUG(); 122 BUG();
122 if (rc != NOTIFY_OK) { 123 if (rc != NOTIFY_OK) {
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 7c88d85c3597..2f56654da821 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -132,7 +132,7 @@ SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue) /* 120 */
132SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper) 132SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
133SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper) 133SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper)
134NI_SYSCALL /* modify_ldt for i386 */ 134NI_SYSCALL /* modify_ldt for i386 */
135SYSCALL(sys_adjtimex,sys_adjtimex,sys32_adjtimex_wrapper) 135SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper)
136SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */ 136SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */
137SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask_wrapper) 137SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask_wrapper)
138NI_SYSCALL /* old "create module" */ 138NI_SYSCALL /* old "create module" */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index e9b275d90737..58583f459471 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -21,6 +21,14 @@ config RWSEM_GENERIC_SPINLOCK
21config RWSEM_XCHGADD_ALGORITHM 21config RWSEM_XCHGADD_ALGORITHM
22 bool 22 bool
23 23
24config GENERIC_FIND_NEXT_BIT
25 bool
26 default y
27
28config GENERIC_HWEIGHT
29 bool
30 default y
31
24config GENERIC_HARDIRQS 32config GENERIC_HARDIRQS
25 bool 33 bool
26 default y 34 default y
diff --git a/arch/sh/boards/mpc1211/rtc.c b/arch/sh/boards/mpc1211/rtc.c
index 4d100f048072..a76c655dceee 100644
--- a/arch/sh/boards/mpc1211/rtc.c
+++ b/arch/sh/boards/mpc1211/rtc.c
@@ -9,36 +9,16 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/time.h> 11#include <linux/time.h>
12#include <linux/bcd.h>
12#include <linux/mc146818rtc.h> 13#include <linux/mc146818rtc.h>
13 14
14#ifndef BCD_TO_BIN
15#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
16#endif
17
18#ifndef BIN_TO_BCD
19#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
20#endif
21
22/* arc/i386/kernel/time.c */
23unsigned long get_cmos_time(void) 15unsigned long get_cmos_time(void)
24{ 16{
25 unsigned int year, mon, day, hour, min, sec; 17 unsigned int year, mon, day, hour, min, sec;
26 int i;
27 18
28 spin_lock(&rtc_lock); 19 spin_lock(&rtc_lock);
29 /* The Linux interpretation of the CMOS clock register contents: 20
30 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the 21 do {
31 * RTC registers show the second which has precisely just started.
32 * Let's hope other operating systems interpret the RTC the same way.
33 */
34 /* read RTC exactly on falling edge of update flag */
35 for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
36 if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
37 break;
38 for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */
39 if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
40 break;
41 do { /* Isn't this overkill ? UIP above should guarantee consistency */
42 sec = CMOS_READ(RTC_SECONDS); 22 sec = CMOS_READ(RTC_SECONDS);
43 min = CMOS_READ(RTC_MINUTES); 23 min = CMOS_READ(RTC_MINUTES);
44 hour = CMOS_READ(RTC_HOURS); 24 hour = CMOS_READ(RTC_HOURS);
@@ -46,18 +26,22 @@ unsigned long get_cmos_time(void)
46 mon = CMOS_READ(RTC_MONTH); 26 mon = CMOS_READ(RTC_MONTH);
47 year = CMOS_READ(RTC_YEAR); 27 year = CMOS_READ(RTC_YEAR);
48 } while (sec != CMOS_READ(RTC_SECONDS)); 28 } while (sec != CMOS_READ(RTC_SECONDS));
49 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 29
50 { 30 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
51 BCD_TO_BIN(sec); 31 BCD_TO_BIN(sec);
52 BCD_TO_BIN(min); 32 BCD_TO_BIN(min);
53 BCD_TO_BIN(hour); 33 BCD_TO_BIN(hour);
54 BCD_TO_BIN(day); 34 BCD_TO_BIN(day);
55 BCD_TO_BIN(mon); 35 BCD_TO_BIN(mon);
56 BCD_TO_BIN(year); 36 BCD_TO_BIN(year);
57 } 37 }
38
58 spin_unlock(&rtc_lock); 39 spin_unlock(&rtc_lock);
59 if ((year += 1900) < 1970) 40
41 year += 1900;
42 if (year < 1970)
60 year += 100; 43 year += 100;
44
61 return mktime(year, mon, day, hour, min, sec); 45 return mktime(year, mon, day, hour, min, sec);
62} 46}
63 47
diff --git a/arch/sh/boards/sh03/rtc.c b/arch/sh/boards/sh03/rtc.c
index cbeca7037ba5..d609863cfe53 100644
--- a/arch/sh/boards/sh03/rtc.c
+++ b/arch/sh/boards/sh03/rtc.c
@@ -9,6 +9,7 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/time.h> 11#include <linux/time.h>
12#include <linux/bcd.h>
12#include <asm/io.h> 13#include <asm/io.h>
13#include <linux/rtc.h> 14#include <linux/rtc.h>
14#include <linux/spinlock.h> 15#include <linux/spinlock.h>
@@ -33,14 +34,6 @@
33#define RTC_BUSY 1 34#define RTC_BUSY 1
34#define RTC_STOP 2 35#define RTC_STOP 2
35 36
36#ifndef BCD_TO_BIN
37#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
38#endif
39
40#ifndef BIN_TO_BCD
41#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
42#endif
43
44extern void (*rtc_get_time)(struct timespec *); 37extern void (*rtc_get_time)(struct timespec *);
45extern int (*rtc_set_time)(const time_t); 38extern int (*rtc_set_time)(const time_t);
46extern spinlock_t rtc_lock; 39extern spinlock_t rtc_lock;
@@ -48,13 +41,9 @@ extern spinlock_t rtc_lock;
48unsigned long get_cmos_time(void) 41unsigned long get_cmos_time(void)
49{ 42{
50 unsigned int year, mon, day, hour, min, sec; 43 unsigned int year, mon, day, hour, min, sec;
51 int i;
52 44
53 spin_lock(&rtc_lock); 45 spin_lock(&rtc_lock);
54 again: 46 again:
55 for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
56 if (!(ctrl_inb(RTC_CTL) & RTC_BUSY))
57 break;
58 do { 47 do {
59 sec = (ctrl_inb(RTC_SEC1) & 0xf) + (ctrl_inb(RTC_SEC10) & 0x7) * 10; 48 sec = (ctrl_inb(RTC_SEC1) & 0xf) + (ctrl_inb(RTC_SEC10) & 0x7) * 10;
60 min = (ctrl_inb(RTC_MIN1) & 0xf) + (ctrl_inb(RTC_MIN10) & 0xf) * 10; 49 min = (ctrl_inb(RTC_MIN1) & 0xf) + (ctrl_inb(RTC_MIN10) & 0xf) * 10;
diff --git a/arch/sh/kernel/cpu/rtc.c b/arch/sh/kernel/cpu/rtc.c
index f8361f5e788b..4304cf75cfa2 100644
--- a/arch/sh/kernel/cpu/rtc.c
+++ b/arch/sh/kernel/cpu/rtc.c
@@ -9,18 +9,10 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/time.h> 11#include <linux/time.h>
12 12#include <linux/bcd.h>
13#include <asm/io.h> 13#include <asm/io.h>
14#include <asm/rtc.h> 14#include <asm/rtc.h>
15 15
16#ifndef BCD_TO_BIN
17#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
18#endif
19
20#ifndef BIN_TO_BCD
21#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
22#endif
23
24void sh_rtc_gettimeofday(struct timespec *ts) 16void sh_rtc_gettimeofday(struct timespec *ts)
25{ 17{
26 unsigned int sec128, sec, sec2, min, hr, wk, day, mon, yr, yr100, cf_bit; 18 unsigned int sec128, sec, sec2, min, hr, wk, day, mon, yr, yr100, cf_bit;
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index c0e79843f580..7ee4ca203616 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -20,6 +20,7 @@
20#include <linux/root_dev.h> 20#include <linux/root_dev.h>
21#include <linux/utsname.h> 21#include <linux/utsname.h>
22#include <linux/cpu.h> 22#include <linux/cpu.h>
23#include <linux/pfn.h>
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24#include <asm/io.h> 25#include <asm/io.h>
25#include <asm/sections.h> 26#include <asm/sections.h>
@@ -275,10 +276,6 @@ void __init setup_arch(char **cmdline_p)
275 276
276 sh_mv_setup(cmdline_p); 277 sh_mv_setup(cmdline_p);
277 278
278#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
279#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
280#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
281
282 /* 279 /*
283 * Find the highest page frame number we have available 280 * Find the highest page frame number we have available
284 */ 281 */
diff --git a/arch/sh64/Kconfig b/arch/sh64/Kconfig
index 07b172deb872..58c678e06667 100644
--- a/arch/sh64/Kconfig
+++ b/arch/sh64/Kconfig
@@ -21,6 +21,14 @@ config RWSEM_GENERIC_SPINLOCK
21 bool 21 bool
22 default y 22 default y
23 23
24config GENERIC_FIND_NEXT_BIT
25 bool
26 default y
27
28config GENERIC_HWEIGHT
29 bool
30 default y
31
24config GENERIC_CALIBRATE_DELAY 32config GENERIC_CALIBRATE_DELAY
25 bool 33 bool
26 default y 34 default y
diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c
index c7a7b816a30f..d2711c9c9d13 100644
--- a/arch/sh64/kernel/setup.c
+++ b/arch/sh64/kernel/setup.c
@@ -48,6 +48,7 @@
48#include <linux/root_dev.h> 48#include <linux/root_dev.h>
49#include <linux/cpu.h> 49#include <linux/cpu.h>
50#include <linux/initrd.h> 50#include <linux/initrd.h>
51#include <linux/pfn.h>
51#include <asm/processor.h> 52#include <asm/processor.h>
52#include <asm/page.h> 53#include <asm/page.h>
53#include <asm/pgtable.h> 54#include <asm/pgtable.h>
diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c
index 0773c9f389f3..6b8f4d22abc6 100644
--- a/arch/sh64/kernel/time.c
+++ b/arch/sh64/kernel/time.c
@@ -30,6 +30,7 @@
30#include <linux/profile.h> 30#include <linux/profile.h>
31#include <linux/smp.h> 31#include <linux/smp.h>
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/bcd.h>
33 34
34#include <asm/registers.h> /* required by inline __asm__ stmt. */ 35#include <asm/registers.h> /* required by inline __asm__ stmt. */
35 36
@@ -105,14 +106,6 @@
105#define RCR1 rtc_base+0x38 106#define RCR1 rtc_base+0x38
106#define RCR2 rtc_base+0x3c 107#define RCR2 rtc_base+0x3c
107 108
108#ifndef BCD_TO_BIN
109#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
110#endif
111
112#ifndef BIN_TO_BCD
113#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
114#endif
115
116#define TICK_SIZE (tick_nsec / 1000) 109#define TICK_SIZE (tick_nsec / 1000)
117 110
118extern unsigned long wall_jiffies; 111extern unsigned long wall_jiffies;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 7c58fc1a39c4..9431e967aa45 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -150,6 +150,14 @@ config RWSEM_GENERIC_SPINLOCK
150config RWSEM_XCHGADD_ALGORITHM 150config RWSEM_XCHGADD_ALGORITHM
151 bool 151 bool
152 152
153config GENERIC_FIND_NEXT_BIT
154 bool
155 default y
156
157config GENERIC_HWEIGHT
158 bool
159 default y
160
153config GENERIC_CALIBRATE_DELAY 161config GENERIC_CALIBRATE_DELAY
154 bool 162 bool
155 default y 163 default y
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 267afddf63cf..d1e2fc566486 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -162,6 +162,14 @@ config RWSEM_XCHGADD_ALGORITHM
162 bool 162 bool
163 default y 163 default y
164 164
165config GENERIC_FIND_NEXT_BIT
166 bool
167 default y
168
169config GENERIC_HWEIGHT
170 bool
171 default y if !ULTRA_HAS_POPULATION_COUNT
172
165config GENERIC_CALIBRATE_DELAY 173config GENERIC_CALIBRATE_DELAY
166 bool 174 bool
167 default y 175 default y
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index f819a9663a8d..900fb0b940d8 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.16 3# Linux kernel version: 2.6.16
4# Mon Mar 20 01:23:21 2006 4# Sun Mar 26 14:58:11 2006
5# 5#
6CONFIG_SPARC=y 6CONFIG_SPARC=y
7CONFIG_SPARC64=y 7CONFIG_SPARC64=y
@@ -38,6 +38,7 @@ CONFIG_POSIX_MQUEUE=y
38CONFIG_SYSCTL=y 38CONFIG_SYSCTL=y
39# CONFIG_AUDIT is not set 39# CONFIG_AUDIT is not set
40# CONFIG_IKCONFIG is not set 40# CONFIG_IKCONFIG is not set
41CONFIG_RELAY=y
41CONFIG_INITRAMFS_SOURCE="" 42CONFIG_INITRAMFS_SOURCE=""
42CONFIG_UID16=y 43CONFIG_UID16=y
43CONFIG_CC_OPTIMIZE_FOR_SIZE=y 44CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -53,10 +54,6 @@ CONFIG_BASE_FULL=y
53CONFIG_FUTEX=y 54CONFIG_FUTEX=y
54CONFIG_EPOLL=y 55CONFIG_EPOLL=y
55CONFIG_SHMEM=y 56CONFIG_SHMEM=y
56CONFIG_CC_ALIGN_FUNCTIONS=0
57CONFIG_CC_ALIGN_LABELS=0
58CONFIG_CC_ALIGN_LOOPS=0
59CONFIG_CC_ALIGN_JUMPS=0
60CONFIG_SLAB=y 57CONFIG_SLAB=y
61# CONFIG_TINY_SHMEM is not set 58# CONFIG_TINY_SHMEM is not set
62CONFIG_BASE_SMALL=0 59CONFIG_BASE_SMALL=0
@@ -68,7 +65,6 @@ CONFIG_BASE_SMALL=0
68CONFIG_MODULES=y 65CONFIG_MODULES=y
69CONFIG_MODULE_UNLOAD=y 66CONFIG_MODULE_UNLOAD=y
70CONFIG_MODULE_FORCE_UNLOAD=y 67CONFIG_MODULE_FORCE_UNLOAD=y
71CONFIG_OBSOLETE_MODPARM=y
72CONFIG_MODVERSIONS=y 68CONFIG_MODVERSIONS=y
73CONFIG_MODULE_SRCVERSION_ALL=y 69CONFIG_MODULE_SRCVERSION_ALL=y
74CONFIG_KMOD=y 70CONFIG_KMOD=y
@@ -76,6 +72,7 @@ CONFIG_KMOD=y
76# 72#
77# Block layer 73# Block layer
78# 74#
75CONFIG_BLK_DEV_IO_TRACE=y
79 76
80# 77#
81# IO Schedulers 78# IO Schedulers
@@ -111,6 +108,8 @@ CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
111CONFIG_US3_FREQ=m 108CONFIG_US3_FREQ=m
112CONFIG_US2E_FREQ=m 109CONFIG_US2E_FREQ=m
113CONFIG_RWSEM_XCHGADD_ALGORITHM=y 110CONFIG_RWSEM_XCHGADD_ALGORITHM=y
111CONFIG_GENERIC_FIND_NEXT_BIT=y
112CONFIG_GENERIC_HWEIGHT=y
114CONFIG_GENERIC_CALIBRATE_DELAY=y 113CONFIG_GENERIC_CALIBRATE_DELAY=y
115CONFIG_HUGETLB_PAGE_SIZE_4MB=y 114CONFIG_HUGETLB_PAGE_SIZE_4MB=y
116# CONFIG_HUGETLB_PAGE_SIZE_512K is not set 115# CONFIG_HUGETLB_PAGE_SIZE_512K is not set
@@ -128,7 +127,6 @@ CONFIG_HAVE_MEMORY_PRESENT=y
128CONFIG_SPARSEMEM_EXTREME=y 127CONFIG_SPARSEMEM_EXTREME=y
129CONFIG_MEMORY_HOTPLUG=y 128CONFIG_MEMORY_HOTPLUG=y
130CONFIG_SPLIT_PTLOCK_CPUS=4 129CONFIG_SPLIT_PTLOCK_CPUS=4
131CONFIG_MIGRATION=y
132CONFIG_GENERIC_ISA_DMA=y 130CONFIG_GENERIC_ISA_DMA=y
133CONFIG_SBUS=y 131CONFIG_SBUS=y
134CONFIG_SBUSCHAR=y 132CONFIG_SBUSCHAR=y
@@ -136,7 +134,6 @@ CONFIG_SUN_AUXIO=y
136CONFIG_SUN_IO=y 134CONFIG_SUN_IO=y
137CONFIG_PCI=y 135CONFIG_PCI=y
138CONFIG_PCI_DOMAINS=y 136CONFIG_PCI_DOMAINS=y
139# CONFIG_PCI_LEGACY_PROC is not set
140# CONFIG_PCI_DEBUG is not set 137# CONFIG_PCI_DEBUG is not set
141CONFIG_SUN_OPENPROMFS=m 138CONFIG_SUN_OPENPROMFS=m
142CONFIG_SPARC32_COMPAT=y 139CONFIG_SPARC32_COMPAT=y
@@ -201,6 +198,8 @@ CONFIG_TCP_CONG_VEGAS=m
201CONFIG_TCP_CONG_SCALABLE=m 198CONFIG_TCP_CONG_SCALABLE=m
202CONFIG_IPV6=m 199CONFIG_IPV6=m
203CONFIG_IPV6_PRIVACY=y 200CONFIG_IPV6_PRIVACY=y
201CONFIG_IPV6_ROUTER_PREF=y
202CONFIG_IPV6_ROUTE_INFO=y
204CONFIG_INET6_AH=m 203CONFIG_INET6_AH=m
205CONFIG_INET6_ESP=m 204CONFIG_INET6_ESP=m
206CONFIG_INET6_IPCOMP=m 205CONFIG_INET6_IPCOMP=m
@@ -213,10 +212,12 @@ CONFIG_IPV6_TUNNEL=m
213# 212#
214CONFIG_IP_DCCP=m 213CONFIG_IP_DCCP=m
215CONFIG_INET_DCCP_DIAG=m 214CONFIG_INET_DCCP_DIAG=m
215CONFIG_IP_DCCP_ACKVEC=y
216 216
217# 217#
218# DCCP CCIDs Configuration (EXPERIMENTAL) 218# DCCP CCIDs Configuration (EXPERIMENTAL)
219# 219#
220CONFIG_IP_DCCP_CCID2=m
220CONFIG_IP_DCCP_CCID3=m 221CONFIG_IP_DCCP_CCID3=m
221CONFIG_IP_DCCP_TFRC_LIB=m 222CONFIG_IP_DCCP_TFRC_LIB=m
222 223
@@ -224,7 +225,6 @@ CONFIG_IP_DCCP_TFRC_LIB=m
224# DCCP Kernel Hacking 225# DCCP Kernel Hacking
225# 226#
226# CONFIG_IP_DCCP_DEBUG is not set 227# CONFIG_IP_DCCP_DEBUG is not set
227# CONFIG_IP_DCCP_UNLOAD_HACK is not set
228 228
229# 229#
230# SCTP Configuration (EXPERIMENTAL) 230# SCTP Configuration (EXPERIMENTAL)
@@ -309,6 +309,7 @@ CONFIG_BLK_DEV_NBD=m
309CONFIG_BLK_DEV_UB=m 309CONFIG_BLK_DEV_UB=m
310# CONFIG_BLK_DEV_RAM is not set 310# CONFIG_BLK_DEV_RAM is not set
311CONFIG_BLK_DEV_RAM_COUNT=16 311CONFIG_BLK_DEV_RAM_COUNT=16
312# CONFIG_BLK_DEV_INITRD is not set
312CONFIG_CDROM_PKTCDVD=m 313CONFIG_CDROM_PKTCDVD=m
313CONFIG_CDROM_PKTCDVD_BUFFERS=8 314CONFIG_CDROM_PKTCDVD_BUFFERS=8
314CONFIG_CDROM_PKTCDVD_WCACHE=y 315CONFIG_CDROM_PKTCDVD_WCACHE=y
@@ -722,7 +723,6 @@ CONFIG_I2C_ALGOBIT=y
722# CONFIG_I2C_PARPORT_LIGHT is not set 723# CONFIG_I2C_PARPORT_LIGHT is not set
723# CONFIG_I2C_PROSAVAGE is not set 724# CONFIG_I2C_PROSAVAGE is not set
724# CONFIG_I2C_SAVAGE4 is not set 725# CONFIG_I2C_SAVAGE4 is not set
725# CONFIG_SCx200_ACB is not set
726# CONFIG_I2C_SIS5595 is not set 726# CONFIG_I2C_SIS5595 is not set
727# CONFIG_I2C_SIS630 is not set 727# CONFIG_I2C_SIS630 is not set
728# CONFIG_I2C_SIS96X is not set 728# CONFIG_I2C_SIS96X is not set
@@ -808,10 +808,6 @@ CONFIG_HWMON=y
808# 808#
809 809
810# 810#
811# Multimedia Capabilities Port drivers
812#
813
814#
815# Multimedia devices 811# Multimedia devices
816# 812#
817# CONFIG_VIDEO_DEV is not set 813# CONFIG_VIDEO_DEV is not set
@@ -820,6 +816,7 @@ CONFIG_HWMON=y
820# Digital Video Broadcasting Devices 816# Digital Video Broadcasting Devices
821# 817#
822# CONFIG_DVB is not set 818# CONFIG_DVB is not set
819# CONFIG_USB_DABUSB is not set
823 820
824# 821#
825# Graphics support 822# Graphics support
@@ -901,10 +898,12 @@ CONFIG_SND_SEQ_DUMMY=m
901CONFIG_SND_OSSEMUL=y 898CONFIG_SND_OSSEMUL=y
902CONFIG_SND_MIXER_OSS=m 899CONFIG_SND_MIXER_OSS=m
903CONFIG_SND_PCM_OSS=m 900CONFIG_SND_PCM_OSS=m
901CONFIG_SND_PCM_OSS_PLUGINS=y
904CONFIG_SND_SEQUENCER_OSS=y 902CONFIG_SND_SEQUENCER_OSS=y
905# CONFIG_SND_RTCTIMER is not set 903# CONFIG_SND_RTCTIMER is not set
906# CONFIG_SND_DYNAMIC_MINORS is not set 904# CONFIG_SND_DYNAMIC_MINORS is not set
907CONFIG_SND_SUPPORT_OLD_API=y 905CONFIG_SND_SUPPORT_OLD_API=y
906CONFIG_SND_VERBOSE_PROCFS=y
908# CONFIG_SND_VERBOSE_PRINTK is not set 907# CONFIG_SND_VERBOSE_PRINTK is not set
909# CONFIG_SND_DEBUG is not set 908# CONFIG_SND_DEBUG is not set
910 909
@@ -987,6 +986,7 @@ CONFIG_SND_SUN_CS4231=m
987# 986#
988CONFIG_USB_ARCH_HAS_HCD=y 987CONFIG_USB_ARCH_HAS_HCD=y
989CONFIG_USB_ARCH_HAS_OHCI=y 988CONFIG_USB_ARCH_HAS_OHCI=y
989CONFIG_USB_ARCH_HAS_EHCI=y
990CONFIG_USB=y 990CONFIG_USB=y
991# CONFIG_USB_DEBUG is not set 991# CONFIG_USB_DEBUG is not set
992 992
@@ -1014,7 +1014,6 @@ CONFIG_USB_UHCI_HCD=m
1014# 1014#
1015# USB Device Class drivers 1015# USB Device Class drivers
1016# 1016#
1017# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
1018# CONFIG_USB_ACM is not set 1017# CONFIG_USB_ACM is not set
1019# CONFIG_USB_PRINTER is not set 1018# CONFIG_USB_PRINTER is not set
1020 1019
@@ -1058,15 +1057,6 @@ CONFIG_USB_HIDDEV=y
1058# CONFIG_USB_MICROTEK is not set 1057# CONFIG_USB_MICROTEK is not set
1059 1058
1060# 1059#
1061# USB Multimedia devices
1062#
1063# CONFIG_USB_DABUSB is not set
1064
1065#
1066# Video4Linux support is needed for USB Multimedia device support
1067#
1068
1069#
1070# USB Network Adapters 1060# USB Network Adapters
1071# 1061#
1072# CONFIG_USB_CATC is not set 1062# CONFIG_USB_CATC is not set
@@ -1194,7 +1184,6 @@ CONFIG_TMPFS=y
1194CONFIG_HUGETLBFS=y 1184CONFIG_HUGETLBFS=y
1195CONFIG_HUGETLB_PAGE=y 1185CONFIG_HUGETLB_PAGE=y
1196CONFIG_RAMFS=y 1186CONFIG_RAMFS=y
1197CONFIG_RELAYFS_FS=m
1198# CONFIG_CONFIGFS_FS is not set 1187# CONFIG_CONFIGFS_FS is not set
1199 1188
1200# 1189#
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index b9a9ce70e55c..ffc7309e9f22 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -6,9 +6,11 @@
6#include <linux/config.h> 6#include <linux/config.h>
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/kprobes.h> 8#include <linux/kprobes.h>
9#include <linux/module.h>
9#include <asm/kdebug.h> 10#include <asm/kdebug.h>
10#include <asm/signal.h> 11#include <asm/signal.h>
11#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
13#include <asm/uaccess.h>
12 14
13/* We do not have hardware single-stepping on sparc64. 15/* We do not have hardware single-stepping on sparc64.
14 * So we implement software single-stepping with breakpoint 16 * So we implement software single-stepping with breakpoint
@@ -302,16 +304,68 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
302{ 304{
303 struct kprobe *cur = kprobe_running(); 305 struct kprobe *cur = kprobe_running();
304 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 306 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
307 const struct exception_table_entry *entry;
308
309 switch(kcb->kprobe_status) {
310 case KPROBE_HIT_SS:
311 case KPROBE_REENTER:
312 /*
313 * We are here because the instruction being single
314 * stepped caused a page fault. We reset the current
315 * kprobe and the tpc points back to the probe address
316 * and allow the page fault handler to continue as a
317 * normal page fault.
318 */
319 regs->tpc = (unsigned long)cur->addr;
320 regs->tnpc = kcb->kprobe_orig_tnpc;
321 regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
322 kcb->kprobe_orig_tstate_pil);
323 if (kcb->kprobe_status == KPROBE_REENTER)
324 restore_previous_kprobe(kcb);
325 else
326 reset_current_kprobe();
327 preempt_enable_no_resched();
328 break;
329 case KPROBE_HIT_ACTIVE:
330 case KPROBE_HIT_SSDONE:
331 /*
332 * We increment the nmissed count for accounting,
333 * we can also use npre/npostfault count for accouting
334 * these specific fault cases.
335 */
336 kprobes_inc_nmissed_count(cur);
337
338 /*
339 * We come here because instructions in the pre/post
340 * handler caused the page_fault, this could happen
341 * if handler tries to access user space by
342 * copy_from_user(), get_user() etc. Let the
343 * user-specified handler try to fix it first.
344 */
345 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
346 return 1;
305 347
306 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 348 /*
307 return 1; 349 * In case the user-specified fault handler returned
350 * zero, try to fix up.
351 */
308 352
309 if (kcb->kprobe_status & KPROBE_HIT_SS) { 353 entry = search_exception_tables(regs->tpc);
310 resume_execution(cur, regs, kcb); 354 if (entry) {
355 regs->tpc = entry->fixup;
356 regs->tnpc = regs->tpc + 4;
357 return 1;
358 }
311 359
312 reset_current_kprobe(); 360 /*
313 preempt_enable_no_resched(); 361 * fixup_exception() could not handle it,
362 * Let do_page_fault() fix it.
363 */
364 break;
365 default:
366 break;
314 } 367 }
368
315 return 0; 369 return 0;
316} 370}
317 371
@@ -324,6 +378,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
324 struct die_args *args = (struct die_args *)data; 378 struct die_args *args = (struct die_args *)data;
325 int ret = NOTIFY_DONE; 379 int ret = NOTIFY_DONE;
326 380
381 if (args->regs && user_mode(args->regs))
382 return ret;
383
327 switch (val) { 384 switch (val) {
328 case DIE_DEBUG: 385 case DIE_DEBUG:
329 if (kprobe_handler(args->regs)) 386 if (kprobe_handler(args->regs))
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 9914a17651b4..f5e8db1de76b 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -175,11 +175,6 @@ EXPORT_SYMBOL(set_bit);
175EXPORT_SYMBOL(clear_bit); 175EXPORT_SYMBOL(clear_bit);
176EXPORT_SYMBOL(change_bit); 176EXPORT_SYMBOL(change_bit);
177 177
178/* Bit searching */
179EXPORT_SYMBOL(find_next_bit);
180EXPORT_SYMBOL(find_next_zero_bit);
181EXPORT_SYMBOL(find_next_zero_le_bit);
182
183EXPORT_SYMBOL(ivector_table); 178EXPORT_SYMBOL(ivector_table);
184EXPORT_SYMBOL(enable_irq); 179EXPORT_SYMBOL(enable_irq);
185EXPORT_SYMBOL(disable_irq); 180EXPORT_SYMBOL(disable_irq);
@@ -279,18 +274,9 @@ EXPORT_SYMBOL(__prom_getsibling);
279 274
280/* sparc library symbols */ 275/* sparc library symbols */
281EXPORT_SYMBOL(strlen); 276EXPORT_SYMBOL(strlen);
282EXPORT_SYMBOL(strnlen);
283EXPORT_SYMBOL(__strlen_user); 277EXPORT_SYMBOL(__strlen_user);
284EXPORT_SYMBOL(__strnlen_user); 278EXPORT_SYMBOL(__strnlen_user);
285EXPORT_SYMBOL(strcpy);
286EXPORT_SYMBOL(strncpy);
287EXPORT_SYMBOL(strcat);
288EXPORT_SYMBOL(strncat);
289EXPORT_SYMBOL(strcmp);
290EXPORT_SYMBOL(strchr);
291EXPORT_SYMBOL(strrchr);
292EXPORT_SYMBOL(strpbrk); 279EXPORT_SYMBOL(strpbrk);
293EXPORT_SYMBOL(strstr);
294 280
295#ifdef CONFIG_SOLARIS_EMUL_MODULE 281#ifdef CONFIG_SOLARIS_EMUL_MODULE
296EXPORT_SYMBOL(linux_sparc_syscall); 282EXPORT_SYMBOL(linux_sparc_syscall);
@@ -324,7 +310,6 @@ EXPORT_SYMBOL(__memscan_zero);
324EXPORT_SYMBOL(__memscan_generic); 310EXPORT_SYMBOL(__memscan_generic);
325EXPORT_SYMBOL(__memcmp); 311EXPORT_SYMBOL(__memcmp);
326EXPORT_SYMBOL(__memset); 312EXPORT_SYMBOL(__memset);
327EXPORT_SYMBOL(memchr);
328 313
329EXPORT_SYMBOL(csum_partial); 314EXPORT_SYMBOL(csum_partial);
330EXPORT_SYMBOL(csum_partial_copy_nocheck); 315EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index 0e41df024489..2e906bad56fa 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -19,7 +19,6 @@
19#include <linux/resource.h> 19#include <linux/resource.h>
20#include <linux/times.h> 20#include <linux/times.h>
21#include <linux/utsname.h> 21#include <linux/utsname.h>
22#include <linux/timex.h>
23#include <linux/smp.h> 22#include <linux/smp.h>
24#include <linux/smp_lock.h> 23#include <linux/smp_lock.h>
25#include <linux/sem.h> 24#include <linux/sem.h>
@@ -945,79 +944,6 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
945 return ret; 944 return ret;
946} 945}
947 946
948/* Handle adjtimex compatibility. */
949
950struct timex32 {
951 u32 modes;
952 s32 offset, freq, maxerror, esterror;
953 s32 status, constant, precision, tolerance;
954 struct compat_timeval time;
955 s32 tick;
956 s32 ppsfreq, jitter, shift, stabil;
957 s32 jitcnt, calcnt, errcnt, stbcnt;
958 s32 :32; s32 :32; s32 :32; s32 :32;
959 s32 :32; s32 :32; s32 :32; s32 :32;
960 s32 :32; s32 :32; s32 :32; s32 :32;
961};
962
963extern int do_adjtimex(struct timex *);
964
965asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
966{
967 struct timex txc;
968 int ret;
969
970 memset(&txc, 0, sizeof(struct timex));
971
972 if (get_user(txc.modes, &utp->modes) ||
973 __get_user(txc.offset, &utp->offset) ||
974 __get_user(txc.freq, &utp->freq) ||
975 __get_user(txc.maxerror, &utp->maxerror) ||
976 __get_user(txc.esterror, &utp->esterror) ||
977 __get_user(txc.status, &utp->status) ||
978 __get_user(txc.constant, &utp->constant) ||
979 __get_user(txc.precision, &utp->precision) ||
980 __get_user(txc.tolerance, &utp->tolerance) ||
981 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
982 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
983 __get_user(txc.tick, &utp->tick) ||
984 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
985 __get_user(txc.jitter, &utp->jitter) ||
986 __get_user(txc.shift, &utp->shift) ||
987 __get_user(txc.stabil, &utp->stabil) ||
988 __get_user(txc.jitcnt, &utp->jitcnt) ||
989 __get_user(txc.calcnt, &utp->calcnt) ||
990 __get_user(txc.errcnt, &utp->errcnt) ||
991 __get_user(txc.stbcnt, &utp->stbcnt))
992 return -EFAULT;
993
994 ret = do_adjtimex(&txc);
995
996 if (put_user(txc.modes, &utp->modes) ||
997 __put_user(txc.offset, &utp->offset) ||
998 __put_user(txc.freq, &utp->freq) ||
999 __put_user(txc.maxerror, &utp->maxerror) ||
1000 __put_user(txc.esterror, &utp->esterror) ||
1001 __put_user(txc.status, &utp->status) ||
1002 __put_user(txc.constant, &utp->constant) ||
1003 __put_user(txc.precision, &utp->precision) ||
1004 __put_user(txc.tolerance, &utp->tolerance) ||
1005 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
1006 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
1007 __put_user(txc.tick, &utp->tick) ||
1008 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
1009 __put_user(txc.jitter, &utp->jitter) ||
1010 __put_user(txc.shift, &utp->shift) ||
1011 __put_user(txc.stabil, &utp->stabil) ||
1012 __put_user(txc.jitcnt, &utp->jitcnt) ||
1013 __put_user(txc.calcnt, &utp->calcnt) ||
1014 __put_user(txc.errcnt, &utp->errcnt) ||
1015 __put_user(txc.stbcnt, &utp->stbcnt))
1016 ret = -EFAULT;
1017
1018 return ret;
1019}
1020
1021/* This is just a version for 32-bit applications which does 947/* This is just a version for 32-bit applications which does
1022 * not force O_LARGEFILE on. 948 * not force O_LARGEFILE on.
1023 */ 949 */
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index c3adb7ac167d..3b250f2318fd 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -63,7 +63,7 @@ sys_call_table32:
63/*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir 63/*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir
64 .word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64 64 .word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64
65/*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo 65/*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo
66 .word sys32_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, sys32_adjtimex 66 .word sys32_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, compat_sys_adjtimex
67/*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid 67/*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid
68 .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16 68 .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16
69/*230*/ .word sys32_select, compat_sys_time, sys_nis_syscall, compat_sys_stime, compat_sys_statfs64 69/*230*/ .word sys32_select, compat_sys_time, sys_nis_syscall, compat_sys_stime, compat_sys_statfs64
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 7d61f1bfd3d3..e55b5c6ece02 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -641,23 +641,8 @@ static void __init set_system_time(void)
641 mon = MSTK_REG_MONTH(mregs); 641 mon = MSTK_REG_MONTH(mregs);
642 year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) ); 642 year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
643 } else { 643 } else {
644 int i;
645
646 /* Dallas 12887 RTC chip. */ 644 /* Dallas 12887 RTC chip. */
647 645
648 /* Stolen from arch/i386/kernel/time.c, see there for
649 * credits and descriptive comments.
650 */
651 for (i = 0; i < 1000000; i++) {
652 if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
653 break;
654 udelay(10);
655 }
656 for (i = 0; i < 1000000; i++) {
657 if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
658 break;
659 udelay(10);
660 }
661 do { 646 do {
662 sec = CMOS_READ(RTC_SECONDS); 647 sec = CMOS_READ(RTC_SECONDS);
663 min = CMOS_READ(RTC_MINUTES); 648 min = CMOS_READ(RTC_MINUTES);
@@ -666,6 +651,7 @@ static void __init set_system_time(void)
666 mon = CMOS_READ(RTC_MONTH); 651 mon = CMOS_READ(RTC_MONTH);
667 year = CMOS_READ(RTC_YEAR); 652 year = CMOS_READ(RTC_YEAR);
668 } while (sec != CMOS_READ(RTC_SECONDS)); 653 } while (sec != CMOS_READ(RTC_SECONDS));
654
669 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { 655 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
670 BCD_TO_BIN(sec); 656 BCD_TO_BIN(sec);
671 BCD_TO_BIN(min); 657 BCD_TO_BIN(min);
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index df612e4f75f9..ff090bb9734b 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -43,18 +43,19 @@
43#include <linux/kmod.h> 43#include <linux/kmod.h>
44#endif 44#endif
45 45
46struct notifier_block *sparc64die_chain; 46ATOMIC_NOTIFIER_HEAD(sparc64die_chain);
47static DEFINE_SPINLOCK(die_notifier_lock);
48 47
49int register_die_notifier(struct notifier_block *nb) 48int register_die_notifier(struct notifier_block *nb)
50{ 49{
51 int err = 0; 50 return atomic_notifier_chain_register(&sparc64die_chain, nb);
52 unsigned long flags;
53 spin_lock_irqsave(&die_notifier_lock, flags);
54 err = notifier_chain_register(&sparc64die_chain, nb);
55 spin_unlock_irqrestore(&die_notifier_lock, flags);
56 return err;
57} 51}
52EXPORT_SYMBOL(register_die_notifier);
53
54int unregister_die_notifier(struct notifier_block *nb)
55{
56 return atomic_notifier_chain_unregister(&sparc64die_chain, nb);
57}
58EXPORT_SYMBOL(unregister_die_notifier);
58 59
59/* When an irrecoverable trap occurs at tl > 0, the trap entry 60/* When an irrecoverable trap occurs at tl > 0, the trap entry
60 * code logs the trap state registers at every level in the trap 61 * code logs the trap state registers at every level in the trap
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index 8812ded19f01..4a725d8985f1 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -14,6 +14,6 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
14 NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \ 14 NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \
15 NGpage.o NGbzero.o \ 15 NGpage.o NGbzero.o \
16 copy_in_user.o user_fixup.o memmove.o \ 16 copy_in_user.o user_fixup.o memmove.o \
17 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o 17 mcount.o ipcsum.o rwsem.o xor.o delay.o
18 18
19obj-y += iomap.o 19obj-y += iomap.o
diff --git a/arch/sparc64/lib/find_bit.c b/arch/sparc64/lib/find_bit.c
deleted file mode 100644
index 6059557067b4..000000000000
--- a/arch/sparc64/lib/find_bit.c
+++ /dev/null
@@ -1,127 +0,0 @@
1#include <linux/bitops.h>
2
3/**
4 * find_next_bit - find the next set bit in a memory region
5 * @addr: The address to base the search on
6 * @offset: The bitnumber to start searching at
7 * @size: The maximum size to search
8 */
9unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
10 unsigned long offset)
11{
12 const unsigned long *p = addr + (offset >> 6);
13 unsigned long result = offset & ~63UL;
14 unsigned long tmp;
15
16 if (offset >= size)
17 return size;
18 size -= result;
19 offset &= 63UL;
20 if (offset) {
21 tmp = *(p++);
22 tmp &= (~0UL << offset);
23 if (size < 64)
24 goto found_first;
25 if (tmp)
26 goto found_middle;
27 size -= 64;
28 result += 64;
29 }
30 while (size & ~63UL) {
31 if ((tmp = *(p++)))
32 goto found_middle;
33 result += 64;
34 size -= 64;
35 }
36 if (!size)
37 return result;
38 tmp = *p;
39
40found_first:
41 tmp &= (~0UL >> (64 - size));
42 if (tmp == 0UL) /* Are any bits set? */
43 return result + size; /* Nope. */
44found_middle:
45 return result + __ffs(tmp);
46}
47
48/* find_next_zero_bit() finds the first zero bit in a bit string of length
49 * 'size' bits, starting the search at bit 'offset'. This is largely based
50 * on Linus's ALPHA routines, which are pretty portable BTW.
51 */
52
53unsigned long find_next_zero_bit(const unsigned long *addr,
54 unsigned long size, unsigned long offset)
55{
56 const unsigned long *p = addr + (offset >> 6);
57 unsigned long result = offset & ~63UL;
58 unsigned long tmp;
59
60 if (offset >= size)
61 return size;
62 size -= result;
63 offset &= 63UL;
64 if (offset) {
65 tmp = *(p++);
66 tmp |= ~0UL >> (64-offset);
67 if (size < 64)
68 goto found_first;
69 if (~tmp)
70 goto found_middle;
71 size -= 64;
72 result += 64;
73 }
74 while (size & ~63UL) {
75 if (~(tmp = *(p++)))
76 goto found_middle;
77 result += 64;
78 size -= 64;
79 }
80 if (!size)
81 return result;
82 tmp = *p;
83
84found_first:
85 tmp |= ~0UL << size;
86 if (tmp == ~0UL) /* Are any bits zero? */
87 return result + size; /* Nope. */
88found_middle:
89 return result + ffz(tmp);
90}
91
92unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset)
93{
94 unsigned long *p = addr + (offset >> 6);
95 unsigned long result = offset & ~63UL;
96 unsigned long tmp;
97
98 if (offset >= size)
99 return size;
100 size -= result;
101 offset &= 63UL;
102 if(offset) {
103 tmp = __swab64p(p++);
104 tmp |= (~0UL >> (64-offset));
105 if(size < 64)
106 goto found_first;
107 if(~tmp)
108 goto found_middle;
109 size -= 64;
110 result += 64;
111 }
112 while(size & ~63) {
113 if(~(tmp = __swab64p(p++)))
114 goto found_middle;
115 result += 64;
116 size -= 64;
117 }
118 if(!size)
119 return result;
120 tmp = __swab64p(p);
121found_first:
122 tmp |= (~0UL << size);
123 if (tmp == ~0UL) /* Are any bits zero? */
124 return result + size; /* Nope. */
125found_middle:
126 return result + ffz(tmp);
127}
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index d21ff3230c02..0db2f7d9fab5 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -413,12 +413,12 @@ good_area:
413#ifdef CONFIG_HUGETLB_PAGE 413#ifdef CONFIG_HUGETLB_PAGE
414 mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE)); 414 mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
415#endif 415#endif
416 if (unlikely(mm_rss >= 416 if (unlikely(mm_rss >
417 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) 417 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
418 tsb_grow(mm, MM_TSB_BASE, mm_rss); 418 tsb_grow(mm, MM_TSB_BASE, mm_rss);
419#ifdef CONFIG_HUGETLB_PAGE 419#ifdef CONFIG_HUGETLB_PAGE
420 mm_rss = mm->context.huge_pte_count; 420 mm_rss = mm->context.huge_pte_count;
421 if (unlikely(mm_rss >= 421 if (unlikely(mm_rss >
422 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) 422 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
423 tsb_grow(mm, MM_TSB_HUGE, mm_rss); 423 tsb_grow(mm, MM_TSB_HUGE, mm_rss);
424#endif 424#endif
diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386
index ef79ed25aecd..85e6a55b3b59 100644
--- a/arch/um/Kconfig.i386
+++ b/arch/um/Kconfig.i386
@@ -52,3 +52,8 @@ config ARCH_HAS_SC_SIGNALS
52config ARCH_REUSE_HOST_VSYSCALL_AREA 52config ARCH_REUSE_HOST_VSYSCALL_AREA
53 bool 53 bool
54 default y 54 default y
55
56config GENERIC_HWEIGHT
57 bool
58 default y
59
diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
index aae19bc4b06a..f60e9e506424 100644
--- a/arch/um/Kconfig.x86_64
+++ b/arch/um/Kconfig.x86_64
@@ -46,3 +46,8 @@ config ARCH_REUSE_HOST_VSYSCALL_AREA
46config SMP_BROKEN 46config SMP_BROKEN
47 bool 47 bool
48 default y 48 default y
49
50config GENERIC_HWEIGHT
51 bool
52 default y
53
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 54388d10bcf9..1488816588ea 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -762,7 +762,8 @@ static struct notifier_block panic_exit_notifier = {
762 762
763static int add_notifier(void) 763static int add_notifier(void)
764{ 764{
765 notifier_chain_register(&panic_notifier_list, &panic_exit_notifier); 765 atomic_notifier_chain_register(&panic_notifier_list,
766 &panic_exit_notifier);
766 return(0); 767 return(0);
767} 768}
768 769
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index fa617e0719ab..0336575d2448 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -71,7 +71,7 @@ struct io_thread_req {
71 int error; 71 int error;
72}; 72};
73 73
74extern int open_ubd_file(char *file, struct openflags *openflags, 74extern int open_ubd_file(char *file, struct openflags *openflags, int shared,
75 char **backing_file_out, int *bitmap_offset_out, 75 char **backing_file_out, int *bitmap_offset_out,
76 unsigned long *bitmap_len_out, int *data_offset_out, 76 unsigned long *bitmap_len_out, int *data_offset_out,
77 int *create_cow_out); 77 int *create_cow_out);
@@ -137,7 +137,7 @@ static int fake_major = MAJOR_NR;
137 137
138static struct gendisk *ubd_gendisk[MAX_DEV]; 138static struct gendisk *ubd_gendisk[MAX_DEV];
139static struct gendisk *fake_gendisk[MAX_DEV]; 139static struct gendisk *fake_gendisk[MAX_DEV];
140 140
141#ifdef CONFIG_BLK_DEV_UBD_SYNC 141#ifdef CONFIG_BLK_DEV_UBD_SYNC
142#define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 1, .c = 0, \ 142#define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 1, .c = 0, \
143 .cl = 1 }) 143 .cl = 1 })
@@ -168,6 +168,7 @@ struct ubd {
168 __u64 size; 168 __u64 size;
169 struct openflags boot_openflags; 169 struct openflags boot_openflags;
170 struct openflags openflags; 170 struct openflags openflags;
171 int shared;
171 int no_cow; 172 int no_cow;
172 struct cow cow; 173 struct cow cow;
173 struct platform_device pdev; 174 struct platform_device pdev;
@@ -189,6 +190,7 @@ struct ubd {
189 .boot_openflags = OPEN_FLAGS, \ 190 .boot_openflags = OPEN_FLAGS, \
190 .openflags = OPEN_FLAGS, \ 191 .openflags = OPEN_FLAGS, \
191 .no_cow = 0, \ 192 .no_cow = 0, \
193 .shared = 0, \
192 .cow = DEFAULT_COW, \ 194 .cow = DEFAULT_COW, \
193} 195}
194 196
@@ -305,7 +307,7 @@ static int ubd_setup_common(char *str, int *index_out)
305 } 307 }
306 major = simple_strtoul(str, &end, 0); 308 major = simple_strtoul(str, &end, 0);
307 if((*end != '\0') || (end == str)){ 309 if((*end != '\0') || (end == str)){
308 printk(KERN_ERR 310 printk(KERN_ERR
309 "ubd_setup : didn't parse major number\n"); 311 "ubd_setup : didn't parse major number\n");
310 return(1); 312 return(1);
311 } 313 }
@@ -316,7 +318,7 @@ static int ubd_setup_common(char *str, int *index_out)
316 printk(KERN_ERR "Can't assign a fake major twice\n"); 318 printk(KERN_ERR "Can't assign a fake major twice\n");
317 goto out1; 319 goto out1;
318 } 320 }
319 321
320 fake_major = major; 322 fake_major = major;
321 323
322 printk(KERN_INFO "Setting extra ubd major number to %d\n", 324 printk(KERN_INFO "Setting extra ubd major number to %d\n",
@@ -351,7 +353,7 @@ static int ubd_setup_common(char *str, int *index_out)
351 if (index_out) 353 if (index_out)
352 *index_out = n; 354 *index_out = n;
353 355
354 for (i = 0; i < 4; i++) { 356 for (i = 0; i < sizeof("rscd="); i++) {
355 switch (*str) { 357 switch (*str) {
356 case 'r': 358 case 'r':
357 flags.w = 0; 359 flags.w = 0;
@@ -362,11 +364,14 @@ static int ubd_setup_common(char *str, int *index_out)
362 case 'd': 364 case 'd':
363 dev->no_cow = 1; 365 dev->no_cow = 1;
364 break; 366 break;
367 case 'c':
368 dev->shared = 1;
369 break;
365 case '=': 370 case '=':
366 str++; 371 str++;
367 goto break_loop; 372 goto break_loop;
368 default: 373 default:
369 printk(KERN_ERR "ubd_setup : Expected '=' or flag letter (r,s or d)\n"); 374 printk(KERN_ERR "ubd_setup : Expected '=' or flag letter (r, s, c, or d)\n");
370 goto out; 375 goto out;
371 } 376 }
372 str++; 377 str++;
@@ -515,7 +520,7 @@ static void ubd_handler(void)
515 spin_unlock(&ubd_io_lock); 520 spin_unlock(&ubd_io_lock);
516 return; 521 return;
517 } 522 }
518 523
519 ubd_finish(rq, req.error); 524 ubd_finish(rq, req.error);
520 reactivate_fd(thread_fd, UBD_IRQ); 525 reactivate_fd(thread_fd, UBD_IRQ);
521 do_ubd_request(ubd_queue); 526 do_ubd_request(ubd_queue);
@@ -532,7 +537,7 @@ static int io_pid = -1;
532 537
533void kill_io_thread(void) 538void kill_io_thread(void)
534{ 539{
535 if(io_pid != -1) 540 if(io_pid != -1)
536 os_kill_process(io_pid, 1); 541 os_kill_process(io_pid, 1);
537} 542}
538 543
@@ -567,14 +572,15 @@ static int ubd_open_dev(struct ubd *dev)
567 create_cow = 0; 572 create_cow = 0;
568 create_ptr = (dev->cow.file != NULL) ? &create_cow : NULL; 573 create_ptr = (dev->cow.file != NULL) ? &create_cow : NULL;
569 back_ptr = dev->no_cow ? NULL : &dev->cow.file; 574 back_ptr = dev->no_cow ? NULL : &dev->cow.file;
570 dev->fd = open_ubd_file(dev->file, &dev->openflags, back_ptr, 575 dev->fd = open_ubd_file(dev->file, &dev->openflags, dev->shared,
571 &dev->cow.bitmap_offset, &dev->cow.bitmap_len, 576 back_ptr, &dev->cow.bitmap_offset,
572 &dev->cow.data_offset, create_ptr); 577 &dev->cow.bitmap_len, &dev->cow.data_offset,
578 create_ptr);
573 579
574 if((dev->fd == -ENOENT) && create_cow){ 580 if((dev->fd == -ENOENT) && create_cow){
575 dev->fd = create_cow_file(dev->file, dev->cow.file, 581 dev->fd = create_cow_file(dev->file, dev->cow.file,
576 dev->openflags, 1 << 9, PAGE_SIZE, 582 dev->openflags, 1 << 9, PAGE_SIZE,
577 &dev->cow.bitmap_offset, 583 &dev->cow.bitmap_offset,
578 &dev->cow.bitmap_len, 584 &dev->cow.bitmap_len,
579 &dev->cow.data_offset); 585 &dev->cow.data_offset);
580 if(dev->fd >= 0){ 586 if(dev->fd >= 0){
@@ -598,16 +604,16 @@ static int ubd_open_dev(struct ubd *dev)
598 } 604 }
599 flush_tlb_kernel_vm(); 605 flush_tlb_kernel_vm();
600 606
601 err = read_cow_bitmap(dev->fd, dev->cow.bitmap, 607 err = read_cow_bitmap(dev->fd, dev->cow.bitmap,
602 dev->cow.bitmap_offset, 608 dev->cow.bitmap_offset,
603 dev->cow.bitmap_len); 609 dev->cow.bitmap_len);
604 if(err < 0) 610 if(err < 0)
605 goto error; 611 goto error;
606 612
607 flags = dev->openflags; 613 flags = dev->openflags;
608 flags.w = 0; 614 flags.w = 0;
609 err = open_ubd_file(dev->cow.file, &flags, NULL, NULL, NULL, 615 err = open_ubd_file(dev->cow.file, &flags, dev->shared, NULL,
610 NULL, NULL); 616 NULL, NULL, NULL, NULL);
611 if(err < 0) goto error; 617 if(err < 0) goto error;
612 dev->cow.fd = err; 618 dev->cow.fd = err;
613 } 619 }
@@ -685,11 +691,11 @@ static int ubd_add(int n)
685 dev->size = ROUND_BLOCK(dev->size); 691 dev->size = ROUND_BLOCK(dev->size);
686 692
687 err = ubd_new_disk(MAJOR_NR, dev->size, n, &ubd_gendisk[n]); 693 err = ubd_new_disk(MAJOR_NR, dev->size, n, &ubd_gendisk[n]);
688 if(err) 694 if(err)
689 goto out_close; 695 goto out_close;
690 696
691 if(fake_major != MAJOR_NR) 697 if(fake_major != MAJOR_NR)
692 ubd_new_disk(fake_major, dev->size, n, 698 ubd_new_disk(fake_major, dev->size, n,
693 &fake_gendisk[n]); 699 &fake_gendisk[n]);
694 700
695 /* perhaps this should also be under the "if (fake_major)" above */ 701 /* perhaps this should also be under the "if (fake_major)" above */
@@ -854,7 +860,7 @@ int ubd_init(void)
854 return -1; 860 return -1;
855 } 861 }
856 platform_driver_register(&ubd_driver); 862 platform_driver_register(&ubd_driver);
857 for (i = 0; i < MAX_DEV; i++) 863 for (i = 0; i < MAX_DEV; i++)
858 ubd_add(i); 864 ubd_add(i);
859 return 0; 865 return 0;
860} 866}
@@ -872,16 +878,16 @@ int ubd_driver_init(void){
872 * enough. So use anyway the io thread. */ 878 * enough. So use anyway the io thread. */
873 } 879 }
874 stack = alloc_stack(0, 0); 880 stack = alloc_stack(0, 0);
875 io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *), 881 io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *),
876 &thread_fd); 882 &thread_fd);
877 if(io_pid < 0){ 883 if(io_pid < 0){
878 printk(KERN_ERR 884 printk(KERN_ERR
879 "ubd : Failed to start I/O thread (errno = %d) - " 885 "ubd : Failed to start I/O thread (errno = %d) - "
880 "falling back to synchronous I/O\n", -io_pid); 886 "falling back to synchronous I/O\n", -io_pid);
881 io_pid = -1; 887 io_pid = -1;
882 return(0); 888 return(0);
883 } 889 }
884 err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr, 890 err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr,
885 SA_INTERRUPT, "ubd", ubd_dev); 891 SA_INTERRUPT, "ubd", ubd_dev);
886 if(err != 0) 892 if(err != 0)
887 printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err); 893 printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err);
@@ -978,7 +984,7 @@ static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
978 if(req->op == UBD_READ) { 984 if(req->op == UBD_READ) {
979 for(i = 0; i < req->length >> 9; i++){ 985 for(i = 0; i < req->length >> 9; i++){
980 if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) 986 if(ubd_test_bit(sector + i, (unsigned char *) bitmap))
981 ubd_set_bit(i, (unsigned char *) 987 ubd_set_bit(i, (unsigned char *)
982 &req->sector_mask); 988 &req->sector_mask);
983 } 989 }
984 } 990 }
@@ -999,7 +1005,7 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req)
999 1005
1000 /* This should be impossible now */ 1006 /* This should be impossible now */
1001 if((rq_data_dir(req) == WRITE) && !dev->openflags.w){ 1007 if((rq_data_dir(req) == WRITE) && !dev->openflags.w){
1002 printk("Write attempted on readonly ubd device %s\n", 1008 printk("Write attempted on readonly ubd device %s\n",
1003 disk->disk_name); 1009 disk->disk_name);
1004 end_request(req, 0); 1010 end_request(req, 0);
1005 return(1); 1011 return(1);
@@ -1182,7 +1188,7 @@ int read_cow_bitmap(int fd, void *buf, int offset, int len)
1182 return(0); 1188 return(0);
1183} 1189}
1184 1190
1185int open_ubd_file(char *file, struct openflags *openflags, 1191int open_ubd_file(char *file, struct openflags *openflags, int shared,
1186 char **backing_file_out, int *bitmap_offset_out, 1192 char **backing_file_out, int *bitmap_offset_out,
1187 unsigned long *bitmap_len_out, int *data_offset_out, 1193 unsigned long *bitmap_len_out, int *data_offset_out,
1188 int *create_cow_out) 1194 int *create_cow_out)
@@ -1206,10 +1212,14 @@ int open_ubd_file(char *file, struct openflags *openflags,
1206 return fd; 1212 return fd;
1207 } 1213 }
1208 1214
1209 err = os_lock_file(fd, openflags->w); 1215 if(shared)
1210 if(err < 0){ 1216 printk("Not locking \"%s\" on the host\n", file);
1211 printk("Failed to lock '%s', err = %d\n", file, -err); 1217 else {
1212 goto out_close; 1218 err = os_lock_file(fd, openflags->w);
1219 if(err < 0){
1220 printk("Failed to lock '%s', err = %d\n", file, -err);
1221 goto out_close;
1222 }
1213 } 1223 }
1214 1224
1215 /* Succesful return case! */ 1225 /* Succesful return case! */
@@ -1260,7 +1270,7 @@ int create_cow_file(char *cow_file, char *backing_file, struct openflags flags,
1260 int err, fd; 1270 int err, fd;
1261 1271
1262 flags.c = 1; 1272 flags.c = 1;
1263 fd = open_ubd_file(cow_file, &flags, NULL, NULL, NULL, NULL, NULL); 1273 fd = open_ubd_file(cow_file, &flags, 0, NULL, NULL, NULL, NULL, NULL);
1264 if(fd < 0){ 1274 if(fd < 0){
1265 err = fd; 1275 err = fd;
1266 printk("Open of COW file '%s' failed, errno = %d\n", cow_file, 1276 printk("Open of COW file '%s' failed, errno = %d\n", cow_file,
diff --git a/arch/um/include/irq_user.h b/arch/um/include/irq_user.h
index b61deb8b362a..69a93c804f0e 100644
--- a/arch/um/include/irq_user.h
+++ b/arch/um/include/irq_user.h
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -6,6 +6,17 @@
6#ifndef __IRQ_USER_H__ 6#ifndef __IRQ_USER_H__
7#define __IRQ_USER_H__ 7#define __IRQ_USER_H__
8 8
9struct irq_fd {
10 struct irq_fd *next;
11 void *id;
12 int fd;
13 int type;
14 int irq;
15 int pid;
16 int events;
17 int current_events;
18};
19
9enum { IRQ_READ, IRQ_WRITE }; 20enum { IRQ_READ, IRQ_WRITE };
10 21
11extern void sigio_handler(int sig, union uml_pt_regs *regs); 22extern void sigio_handler(int sig, union uml_pt_regs *regs);
@@ -16,8 +27,6 @@ extern void reactivate_fd(int fd, int irqnum);
16extern void deactivate_fd(int fd, int irqnum); 27extern void deactivate_fd(int fd, int irqnum);
17extern int deactivate_all_fds(void); 28extern int deactivate_all_fds(void);
18extern void forward_interrupts(int pid); 29extern void forward_interrupts(int pid);
19extern void init_irq_signals(int on_sigstack);
20extern void forward_ipi(int fd, int pid);
21extern int activate_ipi(int fd, int pid); 30extern int activate_ipi(int fd, int pid);
22extern unsigned long irq_lock(void); 31extern unsigned long irq_lock(void);
23extern void irq_unlock(unsigned long flags); 32extern void irq_unlock(unsigned long flags);
diff --git a/arch/um/include/kern.h b/arch/um/include/kern.h
index 7d223beccbc0..4ce3fc650e57 100644
--- a/arch/um/include/kern.h
+++ b/arch/um/include/kern.h
@@ -29,7 +29,7 @@ extern int getuid(void);
29extern int getgid(void); 29extern int getgid(void);
30extern int pause(void); 30extern int pause(void);
31extern int write(int, const void *, int); 31extern int write(int, const void *, int);
32extern int exit(int); 32extern void exit(int);
33extern int close(int); 33extern int close(int);
34extern int read(unsigned int, char *, int); 34extern int read(unsigned int, char *, int);
35extern int pipe(int *); 35extern int pipe(int *);
diff --git a/arch/um/include/misc_constants.h b/arch/um/include/misc_constants.h
new file mode 100644
index 000000000000..989bc08de36e
--- /dev/null
+++ b/arch/um/include/misc_constants.h
@@ -0,0 +1,6 @@
1#ifndef __MISC_CONSTANT_H_
2#define __MISC_CONSTANT_H_
3
4#include <user_constants.h>
5
6#endif
diff --git a/arch/um/include/os.h b/arch/um/include/os.h
index 2a1c64d8d0bf..d3d1bc6074ef 100644
--- a/arch/um/include/os.h
+++ b/arch/um/include/os.h
@@ -12,6 +12,7 @@
12#include "sysdep/ptrace.h" 12#include "sysdep/ptrace.h"
13#include "kern_util.h" 13#include "kern_util.h"
14#include "skas/mm_id.h" 14#include "skas/mm_id.h"
15#include "irq_user.h"
15 16
16#define OS_TYPE_FILE 1 17#define OS_TYPE_FILE 1
17#define OS_TYPE_DIR 2 18#define OS_TYPE_DIR 2
@@ -121,6 +122,7 @@ static inline struct openflags of_cloexec(struct openflags flags)
121 return(flags); 122 return(flags);
122} 123}
123 124
125/* file.c */
124extern int os_stat_file(const char *file_name, struct uml_stat *buf); 126extern int os_stat_file(const char *file_name, struct uml_stat *buf);
125extern int os_stat_fd(const int fd, struct uml_stat *buf); 127extern int os_stat_fd(const int fd, struct uml_stat *buf);
126extern int os_access(const char *file, int mode); 128extern int os_access(const char *file, int mode);
@@ -156,10 +158,20 @@ extern int os_connect_socket(char *name);
156extern int os_file_type(char *file); 158extern int os_file_type(char *file);
157extern int os_file_mode(char *file, struct openflags *mode_out); 159extern int os_file_mode(char *file, struct openflags *mode_out);
158extern int os_lock_file(int fd, int excl); 160extern int os_lock_file(int fd, int excl);
161extern void os_flush_stdout(void);
162extern int os_stat_filesystem(char *path, long *bsize_out,
163 long long *blocks_out, long long *bfree_out,
164 long long *bavail_out, long long *files_out,
165 long long *ffree_out, void *fsid_out,
166 int fsid_size, long *namelen_out,
167 long *spare_out);
168extern int os_change_dir(char *dir);
169extern int os_fchange_dir(int fd);
159 170
160/* start_up.c */ 171/* start_up.c */
161extern void os_early_checks(void); 172extern void os_early_checks(void);
162extern int can_do_skas(void); 173extern int can_do_skas(void);
174extern void os_check_bugs(void);
163 175
164/* Make sure they are clear when running in TT mode. Required by 176/* Make sure they are clear when running in TT mode. Required by
165 * SEGV_MAYBE_FIXABLE */ 177 * SEGV_MAYBE_FIXABLE */
@@ -198,6 +210,8 @@ extern void os_flush_stdout(void);
198/* tt.c 210/* tt.c
199 * for tt mode only (will be deleted in future...) 211 * for tt mode only (will be deleted in future...)
200 */ 212 */
213extern void forward_ipi(int fd, int pid);
214extern void kill_child_dead(int pid);
201extern void stop(void); 215extern void stop(void);
202extern int wait_for_stop(int pid, int sig, int cont_type, void *relay); 216extern int wait_for_stop(int pid, int sig, int cont_type, void *relay);
203extern int protect_memory(unsigned long addr, unsigned long len, 217extern int protect_memory(unsigned long addr, unsigned long len,
@@ -294,4 +308,26 @@ extern void initial_thread_cb_skas(void (*proc)(void *),
294extern void halt_skas(void); 308extern void halt_skas(void);
295extern void reboot_skas(void); 309extern void reboot_skas(void);
296 310
311/* irq.c */
312extern int os_waiting_for_events(struct irq_fd *active_fds);
313extern int os_isatty(int fd);
314extern int os_create_pollfd(int fd, int events, void *tmp_pfd, int size_tmpfds);
315extern void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg,
316 struct irq_fd *active_fds, struct irq_fd ***last_irq_ptr2);
317extern void os_free_irq_later(struct irq_fd *active_fds,
318 int irq, void *dev_id);
319extern int os_get_pollfd(int i);
320extern void os_set_pollfd(int i, int fd);
321extern void os_set_ioignore(void);
322extern void init_irq_signals(int on_sigstack);
323
324/* sigio.c */
325extern void write_sigio_workaround(void);
326extern int add_sigio_fd(int fd, int read);
327extern int ignore_sigio_fd(int fd);
328
329/* skas/trap */
330extern void sig_handler_common_skas(int sig, void *sc_ptr);
331extern void user_signal(int sig, union uml_pt_regs *regs, int pid);
332
297#endif 333#endif
diff --git a/arch/um/include/sigio.h b/arch/um/include/sigio.h
index 37d76e29a147..fe99ea163c2e 100644
--- a/arch/um/include/sigio.h
+++ b/arch/um/include/sigio.h
@@ -8,9 +8,6 @@
8 8
9extern int write_sigio_irq(int fd); 9extern int write_sigio_irq(int fd);
10extern int register_sigio_fd(int fd); 10extern int register_sigio_fd(int fd);
11extern int read_sigio_fd(int fd);
12extern int add_sigio_fd(int fd, int read);
13extern int ignore_sigio_fd(int fd);
14extern void sigio_lock(void); 11extern void sigio_lock(void);
15extern void sigio_unlock(void); 12extern void sigio_unlock(void);
16 13
diff --git a/arch/um/include/skas/mode-skas.h b/arch/um/include/skas/mode-skas.h
index 260065cfeef1..8bc6916bbbb1 100644
--- a/arch/um/include/skas/mode-skas.h
+++ b/arch/um/include/skas/mode-skas.h
@@ -13,7 +13,6 @@ extern unsigned long exec_fp_regs[];
13extern unsigned long exec_fpx_regs[]; 13extern unsigned long exec_fpx_regs[];
14extern int have_fpx_regs; 14extern int have_fpx_regs;
15 15
16extern void sig_handler_common_skas(int sig, void *sc_ptr);
17extern void kill_off_processes_skas(void); 16extern void kill_off_processes_skas(void);
18 17
19#endif 18#endif
diff --git a/arch/um/include/skas/skas.h b/arch/um/include/skas/skas.h
index 86357282d681..853b26f148c5 100644
--- a/arch/um/include/skas/skas.h
+++ b/arch/um/include/skas/skas.h
@@ -17,7 +17,6 @@ extern int user_thread(unsigned long stack, int flags);
17extern void new_thread_proc(void *stack, void (*handler)(int sig)); 17extern void new_thread_proc(void *stack, void (*handler)(int sig));
18extern void new_thread_handler(int sig); 18extern void new_thread_handler(int sig);
19extern void handle_syscall(union uml_pt_regs *regs); 19extern void handle_syscall(union uml_pt_regs *regs);
20extern void user_signal(int sig, union uml_pt_regs *regs, int pid);
21extern int new_mm(unsigned long stack); 20extern int new_mm(unsigned long stack);
22extern void get_skas_faultinfo(int pid, struct faultinfo * fi); 21extern void get_skas_faultinfo(int pid, struct faultinfo * fi);
23extern long execute_syscall_skas(void *r); 22extern long execute_syscall_skas(void *r);
diff --git a/arch/um/include/user_util.h b/arch/um/include/user_util.h
index a6f1f176cf84..992a7e1e0fca 100644
--- a/arch/um/include/user_util.h
+++ b/arch/um/include/user_util.h
@@ -58,7 +58,6 @@ extern int attach(int pid);
58extern void kill_child_dead(int pid); 58extern void kill_child_dead(int pid);
59extern int cont(int pid); 59extern int cont(int pid);
60extern void check_sigio(void); 60extern void check_sigio(void);
61extern void write_sigio_workaround(void);
62extern void arch_check_bugs(void); 61extern void arch_check_bugs(void);
63extern int cpu_feature(char *what, char *buf, int len); 62extern int cpu_feature(char *what, char *buf, int len);
64extern int arch_handle_signal(int sig, union uml_pt_regs *regs); 63extern int arch_handle_signal(int sig, union uml_pt_regs *regs);
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 693018ba80f1..fe08971b64cf 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -7,23 +7,20 @@ extra-y := vmlinux.lds
7clean-files := 7clean-files :=
8 8
9obj-y = config.o exec_kern.o exitcode.o \ 9obj-y = config.o exec_kern.o exitcode.o \
10 init_task.o irq.o irq_user.o ksyms.o mem.o physmem.o \ 10 init_task.o irq.o ksyms.o mem.o physmem.o \
11 process_kern.o ptrace.o reboot.o resource.o sigio_user.o sigio_kern.o \ 11 process_kern.o ptrace.o reboot.o resource.o sigio_kern.o \
12 signal_kern.o smp.o syscall_kern.o sysrq.o \ 12 signal_kern.o smp.o syscall_kern.o sysrq.o \
13 time_kern.o tlb.o trap_kern.o uaccess.o um_arch.o umid.o 13 time_kern.o tlb.o trap_kern.o uaccess.o um_arch.o umid.o
14 14
15obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o 15obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
16obj-$(CONFIG_GPROF) += gprof_syms.o 16obj-$(CONFIG_GPROF) += gprof_syms.o
17obj-$(CONFIG_GCOV) += gmon_syms.o 17obj-$(CONFIG_GCOV) += gmon_syms.o
18obj-$(CONFIG_TTY_LOG) += tty_log.o
19obj-$(CONFIG_SYSCALL_DEBUG) += syscall.o 18obj-$(CONFIG_SYSCALL_DEBUG) += syscall.o
20 19
21obj-$(CONFIG_MODE_TT) += tt/ 20obj-$(CONFIG_MODE_TT) += tt/
22obj-$(CONFIG_MODE_SKAS) += skas/ 21obj-$(CONFIG_MODE_SKAS) += skas/
23 22
24user-objs-$(CONFIG_TTY_LOG) += tty_log.o 23USER_OBJS := config.o
25
26USER_OBJS := $(user-objs-y) config.o tty_log.o
27 24
28include arch/um/scripts/Makefile.rules 25include arch/um/scripts/Makefile.rules
29 26
diff --git a/arch/um/kernel/exec_kern.c b/arch/um/kernel/exec_kern.c
index c264e1c05ab3..1ca84319317d 100644
--- a/arch/um/kernel/exec_kern.c
+++ b/arch/um/kernel/exec_kern.c
@@ -30,8 +30,6 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
30 CHOOSE_MODE_PROC(start_thread_tt, start_thread_skas, regs, eip, esp); 30 CHOOSE_MODE_PROC(start_thread_tt, start_thread_skas, regs, eip, esp);
31} 31}
32 32
33extern void log_exec(char **argv, void *tty);
34
35static long execve1(char *file, char __user * __user *argv, 33static long execve1(char *file, char __user * __user *argv,
36 char __user *__user *env) 34 char __user *__user *env)
37{ 35{
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index bbf94bf2921e..c39ea3abeda4 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -31,6 +31,8 @@
31#include "irq_user.h" 31#include "irq_user.h"
32#include "irq_kern.h" 32#include "irq_kern.h"
33#include "os.h" 33#include "os.h"
34#include "sigio.h"
35#include "misc_constants.h"
34 36
35/* 37/*
36 * Generic, controller-independent functions: 38 * Generic, controller-independent functions:
@@ -77,6 +79,298 @@ skip:
77 return 0; 79 return 0;
78} 80}
79 81
82struct irq_fd *active_fds = NULL;
83static struct irq_fd **last_irq_ptr = &active_fds;
84
85extern void free_irqs(void);
86
87void sigio_handler(int sig, union uml_pt_regs *regs)
88{
89 struct irq_fd *irq_fd;
90 int n;
91
92 if(smp_sigio_handler()) return;
93 while(1){
94 n = os_waiting_for_events(active_fds);
95 if (n <= 0) {
96 if(n == -EINTR) continue;
97 else break;
98 }
99
100 for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
101 if(irq_fd->current_events != 0){
102 irq_fd->current_events = 0;
103 do_IRQ(irq_fd->irq, regs);
104 }
105 }
106 }
107
108 free_irqs();
109}
110
111static void maybe_sigio_broken(int fd, int type)
112{
113 if(os_isatty(fd)){
114 if((type == IRQ_WRITE) && !pty_output_sigio){
115 write_sigio_workaround();
116 add_sigio_fd(fd, 0);
117 }
118 else if((type == IRQ_READ) && !pty_close_sigio){
119 write_sigio_workaround();
120 add_sigio_fd(fd, 1);
121 }
122 }
123}
124
125
126int activate_fd(int irq, int fd, int type, void *dev_id)
127{
128 struct pollfd *tmp_pfd;
129 struct irq_fd *new_fd, *irq_fd;
130 unsigned long flags;
131 int pid, events, err, n;
132
133 pid = os_getpid();
134 err = os_set_fd_async(fd, pid);
135 if(err < 0)
136 goto out;
137
138 new_fd = um_kmalloc(sizeof(*new_fd));
139 err = -ENOMEM;
140 if(new_fd == NULL)
141 goto out;
142
143 if(type == IRQ_READ) events = UM_POLLIN | UM_POLLPRI;
144 else events = UM_POLLOUT;
145 *new_fd = ((struct irq_fd) { .next = NULL,
146 .id = dev_id,
147 .fd = fd,
148 .type = type,
149 .irq = irq,
150 .pid = pid,
151 .events = events,
152 .current_events = 0 } );
153
154 /* Critical section - locked by a spinlock because this stuff can
155 * be changed from interrupt handlers. The stuff above is done
156 * outside the lock because it allocates memory.
157 */
158
159 /* Actually, it only looks like it can be called from interrupt
160 * context. The culprit is reactivate_fd, which calls
161 * maybe_sigio_broken, which calls write_sigio_workaround,
162 * which calls activate_fd. However, write_sigio_workaround should
163 * only be called once, at boot time. That would make it clear that
164 * this is called only from process context, and can be locked with
165 * a semaphore.
166 */
167 flags = irq_lock();
168 for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
169 if((irq_fd->fd == fd) && (irq_fd->type == type)){
170 printk("Registering fd %d twice\n", fd);
171 printk("Irqs : %d, %d\n", irq_fd->irq, irq);
172 printk("Ids : 0x%p, 0x%p\n", irq_fd->id, dev_id);
173 goto out_unlock;
174 }
175 }
176
177 /*-------------*/
178 if(type == IRQ_WRITE)
179 fd = -1;
180
181 tmp_pfd = NULL;
182 n = 0;
183
184 while(1){
185 n = os_create_pollfd(fd, events, tmp_pfd, n);
186 if (n == 0)
187 break;
188
189 /* n > 0
190 * It means we couldn't put new pollfd to current pollfds
191 * and tmp_fds is NULL or too small for new pollfds array.
192 * Needed size is equal to n as minimum.
193 *
194 * Here we have to drop the lock in order to call
195 * kmalloc, which might sleep.
196 * If something else came in and changed the pollfds array
197 * so we will not be able to put new pollfd struct to pollfds
198 * then we free the buffer tmp_fds and try again.
199 */
200 irq_unlock(flags);
201 if (tmp_pfd != NULL) {
202 kfree(tmp_pfd);
203 tmp_pfd = NULL;
204 }
205
206 tmp_pfd = um_kmalloc(n);
207 if (tmp_pfd == NULL)
208 goto out_kfree;
209
210 flags = irq_lock();
211 }
212 /*-------------*/
213
214 *last_irq_ptr = new_fd;
215 last_irq_ptr = &new_fd->next;
216
217 irq_unlock(flags);
218
219 /* This calls activate_fd, so it has to be outside the critical
220 * section.
221 */
222 maybe_sigio_broken(fd, type);
223
224 return(0);
225
226 out_unlock:
227 irq_unlock(flags);
228 out_kfree:
229 kfree(new_fd);
230 out:
231 return(err);
232}
233
234static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
235{
236 unsigned long flags;
237
238 flags = irq_lock();
239 os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
240 irq_unlock(flags);
241}
242
243struct irq_and_dev {
244 int irq;
245 void *dev;
246};
247
248static int same_irq_and_dev(struct irq_fd *irq, void *d)
249{
250 struct irq_and_dev *data = d;
251
252 return((irq->irq == data->irq) && (irq->id == data->dev));
253}
254
255void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
256{
257 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
258 .dev = dev });
259
260 free_irq_by_cb(same_irq_and_dev, &data);
261}
262
263static int same_fd(struct irq_fd *irq, void *fd)
264{
265 return(irq->fd == *((int *) fd));
266}
267
268void free_irq_by_fd(int fd)
269{
270 free_irq_by_cb(same_fd, &fd);
271}
272
273static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
274{
275 struct irq_fd *irq;
276 int i = 0;
277 int fdi;
278
279 for(irq=active_fds; irq != NULL; irq = irq->next){
280 if((irq->fd == fd) && (irq->irq == irqnum)) break;
281 i++;
282 }
283 if(irq == NULL){
284 printk("find_irq_by_fd doesn't have descriptor %d\n", fd);
285 goto out;
286 }
287 fdi = os_get_pollfd(i);
288 if((fdi != -1) && (fdi != fd)){
289 printk("find_irq_by_fd - mismatch between active_fds and "
290 "pollfds, fd %d vs %d, need %d\n", irq->fd,
291 fdi, fd);
292 irq = NULL;
293 goto out;
294 }
295 *index_out = i;
296 out:
297 return(irq);
298}
299
300void reactivate_fd(int fd, int irqnum)
301{
302 struct irq_fd *irq;
303 unsigned long flags;
304 int i;
305
306 flags = irq_lock();
307 irq = find_irq_by_fd(fd, irqnum, &i);
308 if(irq == NULL){
309 irq_unlock(flags);
310 return;
311 }
312 os_set_pollfd(i, irq->fd);
313 irq_unlock(flags);
314
315 /* This calls activate_fd, so it has to be outside the critical
316 * section.
317 */
318 maybe_sigio_broken(fd, irq->type);
319}
320
321void deactivate_fd(int fd, int irqnum)
322{
323 struct irq_fd *irq;
324 unsigned long flags;
325 int i;
326
327 flags = irq_lock();
328 irq = find_irq_by_fd(fd, irqnum, &i);
329 if(irq == NULL)
330 goto out;
331 os_set_pollfd(i, -1);
332 out:
333 irq_unlock(flags);
334}
335
336int deactivate_all_fds(void)
337{
338 struct irq_fd *irq;
339 int err;
340
341 for(irq=active_fds;irq != NULL;irq = irq->next){
342 err = os_clear_fd_async(irq->fd);
343 if(err)
344 return(err);
345 }
346 /* If there is a signal already queued, after unblocking ignore it */
347 os_set_ioignore();
348
349 return(0);
350}
351
352void forward_interrupts(int pid)
353{
354 struct irq_fd *irq;
355 unsigned long flags;
356 int err;
357
358 flags = irq_lock();
359 for(irq=active_fds;irq != NULL;irq = irq->next){
360 err = os_set_owner(irq->fd, pid);
361 if(err < 0){
362 /* XXX Just remove the irq rather than
363 * print out an infinite stream of these
364 */
365 printk("Failed to forward %d to pid %d, err = %d\n",
366 irq->fd, pid, -err);
367 }
368
369 irq->pid = pid;
370 }
371 irq_unlock(flags);
372}
373
80/* 374/*
81 * do_IRQ handles all normal device IRQ's (the special 375 * do_IRQ handles all normal device IRQ's (the special
82 * SMP cross-CPU interrupts have their own specific 376 * SMP cross-CPU interrupts have their own specific
diff --git a/arch/um/kernel/irq_user.c b/arch/um/kernel/irq_user.c
deleted file mode 100644
index 0e32f5f4a887..000000000000
--- a/arch/um/kernel/irq_user.c
+++ /dev/null
@@ -1,412 +0,0 @@
1/*
2 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include <stdlib.h>
7#include <unistd.h>
8#include <errno.h>
9#include <signal.h>
10#include <string.h>
11#include <sys/poll.h>
12#include <sys/types.h>
13#include <sys/time.h>
14#include "user_util.h"
15#include "kern_util.h"
16#include "user.h"
17#include "process.h"
18#include "sigio.h"
19#include "irq_user.h"
20#include "os.h"
21
22struct irq_fd {
23 struct irq_fd *next;
24 void *id;
25 int fd;
26 int type;
27 int irq;
28 int pid;
29 int events;
30 int current_events;
31};
32
33static struct irq_fd *active_fds = NULL;
34static struct irq_fd **last_irq_ptr = &active_fds;
35
36static struct pollfd *pollfds = NULL;
37static int pollfds_num = 0;
38static int pollfds_size = 0;
39
40extern int io_count, intr_count;
41
42extern void free_irqs(void);
43
44void sigio_handler(int sig, union uml_pt_regs *regs)
45{
46 struct irq_fd *irq_fd;
47 int i, n;
48
49 if(smp_sigio_handler()) return;
50 while(1){
51 n = poll(pollfds, pollfds_num, 0);
52 if(n < 0){
53 if(errno == EINTR) continue;
54 printk("sigio_handler : poll returned %d, "
55 "errno = %d\n", n, errno);
56 break;
57 }
58 if(n == 0) break;
59
60 irq_fd = active_fds;
61 for(i = 0; i < pollfds_num; i++){
62 if(pollfds[i].revents != 0){
63 irq_fd->current_events = pollfds[i].revents;
64 pollfds[i].fd = -1;
65 }
66 irq_fd = irq_fd->next;
67 }
68
69 for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
70 if(irq_fd->current_events != 0){
71 irq_fd->current_events = 0;
72 do_IRQ(irq_fd->irq, regs);
73 }
74 }
75 }
76
77 free_irqs();
78}
79
80int activate_ipi(int fd, int pid)
81{
82 return(os_set_fd_async(fd, pid));
83}
84
85static void maybe_sigio_broken(int fd, int type)
86{
87 if(isatty(fd)){
88 if((type == IRQ_WRITE) && !pty_output_sigio){
89 write_sigio_workaround();
90 add_sigio_fd(fd, 0);
91 }
92 else if((type == IRQ_READ) && !pty_close_sigio){
93 write_sigio_workaround();
94 add_sigio_fd(fd, 1);
95 }
96 }
97}
98
99int activate_fd(int irq, int fd, int type, void *dev_id)
100{
101 struct pollfd *tmp_pfd;
102 struct irq_fd *new_fd, *irq_fd;
103 unsigned long flags;
104 int pid, events, err, n, size;
105
106 pid = os_getpid();
107 err = os_set_fd_async(fd, pid);
108 if(err < 0)
109 goto out;
110
111 new_fd = um_kmalloc(sizeof(*new_fd));
112 err = -ENOMEM;
113 if(new_fd == NULL)
114 goto out;
115
116 if(type == IRQ_READ) events = POLLIN | POLLPRI;
117 else events = POLLOUT;
118 *new_fd = ((struct irq_fd) { .next = NULL,
119 .id = dev_id,
120 .fd = fd,
121 .type = type,
122 .irq = irq,
123 .pid = pid,
124 .events = events,
125 .current_events = 0 } );
126
127 /* Critical section - locked by a spinlock because this stuff can
128 * be changed from interrupt handlers. The stuff above is done
129 * outside the lock because it allocates memory.
130 */
131
132 /* Actually, it only looks like it can be called from interrupt
133 * context. The culprit is reactivate_fd, which calls
134 * maybe_sigio_broken, which calls write_sigio_workaround,
135 * which calls activate_fd. However, write_sigio_workaround should
136 * only be called once, at boot time. That would make it clear that
137 * this is called only from process context, and can be locked with
138 * a semaphore.
139 */
140 flags = irq_lock();
141 for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
142 if((irq_fd->fd == fd) && (irq_fd->type == type)){
143 printk("Registering fd %d twice\n", fd);
144 printk("Irqs : %d, %d\n", irq_fd->irq, irq);
145 printk("Ids : 0x%x, 0x%x\n", irq_fd->id, dev_id);
146 goto out_unlock;
147 }
148 }
149
150 n = pollfds_num;
151 if(n == pollfds_size){
152 while(1){
153 /* Here we have to drop the lock in order to call
154 * kmalloc, which might sleep. If something else
155 * came in and changed the pollfds array, we free
156 * the buffer and try again.
157 */
158 irq_unlock(flags);
159 size = (pollfds_num + 1) * sizeof(pollfds[0]);
160 tmp_pfd = um_kmalloc(size);
161 flags = irq_lock();
162 if(tmp_pfd == NULL)
163 goto out_unlock;
164 if(n == pollfds_size)
165 break;
166 kfree(tmp_pfd);
167 }
168 if(pollfds != NULL){
169 memcpy(tmp_pfd, pollfds,
170 sizeof(pollfds[0]) * pollfds_size);
171 kfree(pollfds);
172 }
173 pollfds = tmp_pfd;
174 pollfds_size++;
175 }
176
177 if(type == IRQ_WRITE)
178 fd = -1;
179
180 pollfds[pollfds_num] = ((struct pollfd) { .fd = fd,
181 .events = events,
182 .revents = 0 });
183 pollfds_num++;
184
185 *last_irq_ptr = new_fd;
186 last_irq_ptr = &new_fd->next;
187
188 irq_unlock(flags);
189
190 /* This calls activate_fd, so it has to be outside the critical
191 * section.
192 */
193 maybe_sigio_broken(fd, type);
194
195 return(0);
196
197 out_unlock:
198 irq_unlock(flags);
199 kfree(new_fd);
200 out:
201 return(err);
202}
203
204static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
205{
206 struct irq_fd **prev;
207 unsigned long flags;
208 int i = 0;
209
210 flags = irq_lock();
211 prev = &active_fds;
212 while(*prev != NULL){
213 if((*test)(*prev, arg)){
214 struct irq_fd *old_fd = *prev;
215 if((pollfds[i].fd != -1) &&
216 (pollfds[i].fd != (*prev)->fd)){
217 printk("free_irq_by_cb - mismatch between "
218 "active_fds and pollfds, fd %d vs %d\n",
219 (*prev)->fd, pollfds[i].fd);
220 goto out;
221 }
222
223 pollfds_num--;
224
225 /* This moves the *whole* array after pollfds[i] (though
226 * it doesn't spot as such)! */
227
228 memmove(&pollfds[i], &pollfds[i + 1],
229 (pollfds_num - i) * sizeof(pollfds[0]));
230
231 if(last_irq_ptr == &old_fd->next)
232 last_irq_ptr = prev;
233 *prev = (*prev)->next;
234 if(old_fd->type == IRQ_WRITE)
235 ignore_sigio_fd(old_fd->fd);
236 kfree(old_fd);
237 continue;
238 }
239 prev = &(*prev)->next;
240 i++;
241 }
242 out:
243 irq_unlock(flags);
244}
245
246struct irq_and_dev {
247 int irq;
248 void *dev;
249};
250
251static int same_irq_and_dev(struct irq_fd *irq, void *d)
252{
253 struct irq_and_dev *data = d;
254
255 return((irq->irq == data->irq) && (irq->id == data->dev));
256}
257
258void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
259{
260 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
261 .dev = dev });
262
263 free_irq_by_cb(same_irq_and_dev, &data);
264}
265
266static int same_fd(struct irq_fd *irq, void *fd)
267{
268 return(irq->fd == *((int *) fd));
269}
270
271void free_irq_by_fd(int fd)
272{
273 free_irq_by_cb(same_fd, &fd);
274}
275
276static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
277{
278 struct irq_fd *irq;
279 int i = 0;
280
281 for(irq=active_fds; irq != NULL; irq = irq->next){
282 if((irq->fd == fd) && (irq->irq == irqnum)) break;
283 i++;
284 }
285 if(irq == NULL){
286 printk("find_irq_by_fd doesn't have descriptor %d\n", fd);
287 goto out;
288 }
289 if((pollfds[i].fd != -1) && (pollfds[i].fd != fd)){
290 printk("find_irq_by_fd - mismatch between active_fds and "
291 "pollfds, fd %d vs %d, need %d\n", irq->fd,
292 pollfds[i].fd, fd);
293 irq = NULL;
294 goto out;
295 }
296 *index_out = i;
297 out:
298 return(irq);
299}
300
301void reactivate_fd(int fd, int irqnum)
302{
303 struct irq_fd *irq;
304 unsigned long flags;
305 int i;
306
307 flags = irq_lock();
308 irq = find_irq_by_fd(fd, irqnum, &i);
309 if(irq == NULL){
310 irq_unlock(flags);
311 return;
312 }
313
314 pollfds[i].fd = irq->fd;
315
316 irq_unlock(flags);
317
318 /* This calls activate_fd, so it has to be outside the critical
319 * section.
320 */
321 maybe_sigio_broken(fd, irq->type);
322}
323
324void deactivate_fd(int fd, int irqnum)
325{
326 struct irq_fd *irq;
327 unsigned long flags;
328 int i;
329
330 flags = irq_lock();
331 irq = find_irq_by_fd(fd, irqnum, &i);
332 if(irq == NULL)
333 goto out;
334 pollfds[i].fd = -1;
335 out:
336 irq_unlock(flags);
337}
338
339int deactivate_all_fds(void)
340{
341 struct irq_fd *irq;
342 int err;
343
344 for(irq=active_fds;irq != NULL;irq = irq->next){
345 err = os_clear_fd_async(irq->fd);
346 if(err)
347 return(err);
348 }
349 /* If there is a signal already queued, after unblocking ignore it */
350 set_handler(SIGIO, SIG_IGN, 0, -1);
351
352 return(0);
353}
354
355void forward_ipi(int fd, int pid)
356{
357 int err;
358
359 err = os_set_owner(fd, pid);
360 if(err < 0)
361 printk("forward_ipi: set_owner failed, fd = %d, me = %d, "
362 "target = %d, err = %d\n", fd, os_getpid(), pid, -err);
363}
364
365void forward_interrupts(int pid)
366{
367 struct irq_fd *irq;
368 unsigned long flags;
369 int err;
370
371 flags = irq_lock();
372 for(irq=active_fds;irq != NULL;irq = irq->next){
373 err = os_set_owner(irq->fd, pid);
374 if(err < 0){
375 /* XXX Just remove the irq rather than
376 * print out an infinite stream of these
377 */
378 printk("Failed to forward %d to pid %d, err = %d\n",
379 irq->fd, pid, -err);
380 }
381
382 irq->pid = pid;
383 }
384 irq_unlock(flags);
385}
386
387void init_irq_signals(int on_sigstack)
388{
389 __sighandler_t h;
390 int flags;
391
392 flags = on_sigstack ? SA_ONSTACK : 0;
393 if(timer_irq_inited) h = (__sighandler_t) alarm_handler;
394 else h = boot_timer_handler;
395
396 set_handler(SIGVTALRM, h, flags | SA_RESTART,
397 SIGUSR1, SIGIO, SIGWINCH, SIGALRM, -1);
398 set_handler(SIGIO, (__sighandler_t) sig_handler, flags | SA_RESTART,
399 SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
400 signal(SIGWINCH, SIG_IGN);
401}
402
403/*
404 * Overrides for Emacs so that we follow Linus's tabbing style.
405 * Emacs will notice this stuff at the end of the file and automatically
406 * adjust the settings for this buffer only. This must remain at the end
407 * of the file.
408 * ---------------------------------------------------------------------------
409 * Local variables:
410 * c-file-style: "linux"
411 * End:
412 */
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index 0e65340eee33..0500800df1c1 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -9,6 +9,7 @@
9#include "linux/vmalloc.h" 9#include "linux/vmalloc.h"
10#include "linux/bootmem.h" 10#include "linux/bootmem.h"
11#include "linux/module.h" 11#include "linux/module.h"
12#include "linux/pfn.h"
12#include "asm/types.h" 13#include "asm/types.h"
13#include "asm/pgtable.h" 14#include "asm/pgtable.h"
14#include "kern_util.h" 15#include "kern_util.h"
@@ -316,8 +317,6 @@ void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
316 } 317 }
317} 318}
318 319
319#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
320
321extern int __syscall_stub_start, __binary_start; 320extern int __syscall_stub_start, __binary_start;
322 321
323void setup_physmem(unsigned long start, unsigned long reserve_end, 322void setup_physmem(unsigned long start, unsigned long reserve_end,
diff --git a/arch/um/kernel/sigio_kern.c b/arch/um/kernel/sigio_kern.c
index 229988463c4c..1c1300fb1e95 100644
--- a/arch/um/kernel/sigio_kern.c
+++ b/arch/um/kernel/sigio_kern.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2002 - 2003 Jeff Dike (jdike@addtoit.com) 2 * Copyright (C) 2002 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -12,13 +12,16 @@
12#include "sigio.h" 12#include "sigio.h"
13#include "irq_user.h" 13#include "irq_user.h"
14#include "irq_kern.h" 14#include "irq_kern.h"
15#include "os.h"
15 16
16/* Protected by sigio_lock() called from write_sigio_workaround */ 17/* Protected by sigio_lock() called from write_sigio_workaround */
17static int sigio_irq_fd = -1; 18static int sigio_irq_fd = -1;
18 19
19static irqreturn_t sigio_interrupt(int irq, void *data, struct pt_regs *unused) 20static irqreturn_t sigio_interrupt(int irq, void *data, struct pt_regs *unused)
20{ 21{
21 read_sigio_fd(sigio_irq_fd); 22 char c;
23
24 os_read_file(sigio_irq_fd, &c, sizeof(c));
22 reactivate_fd(sigio_irq_fd, SIGIO_WRITE_IRQ); 25 reactivate_fd(sigio_irq_fd, SIGIO_WRITE_IRQ);
23 return(IRQ_HANDLED); 26 return(IRQ_HANDLED);
24} 27}
@@ -51,6 +54,9 @@ void sigio_unlock(void)
51 spin_unlock(&sigio_spinlock); 54 spin_unlock(&sigio_spinlock);
52} 55}
53 56
57extern void sigio_cleanup(void);
58__uml_exitcall(sigio_cleanup);
59
54/* 60/*
55 * Overrides for Emacs so that we follow Linus's tabbing style. 61 * Overrides for Emacs so that we follow Linus's tabbing style.
56 * Emacs will notice this stuff at the end of the file and automatically 62 * Emacs will notice this stuff at the end of the file and automatically
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 72113b0a96e7..c8d8d0ac1a7f 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) 2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -77,9 +77,9 @@ static int idle_proc(void *cpup)
77 if(err < 0) 77 if(err < 0)
78 panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err); 78 panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err);
79 79
80 activate_ipi(cpu_data[cpu].ipi_pipe[0], 80 os_set_fd_async(cpu_data[cpu].ipi_pipe[0],
81 current->thread.mode.tt.extern_pid); 81 current->thread.mode.tt.extern_pid);
82 82
83 wmb(); 83 wmb();
84 if (cpu_test_and_set(cpu, cpu_callin_map)) { 84 if (cpu_test_and_set(cpu, cpu_callin_map)) {
85 printk("huh, CPU#%d already present??\n", cpu); 85 printk("huh, CPU#%d already present??\n", cpu);
@@ -106,7 +106,7 @@ static struct task_struct *idle_thread(int cpu)
106 panic("copy_process failed in idle_thread, error = %ld", 106 panic("copy_process failed in idle_thread, error = %ld",
107 PTR_ERR(new_task)); 107 PTR_ERR(new_task));
108 108
109 cpu_tasks[cpu] = ((struct cpu_task) 109 cpu_tasks[cpu] = ((struct cpu_task)
110 { .pid = new_task->thread.mode.tt.extern_pid, 110 { .pid = new_task->thread.mode.tt.extern_pid,
111 .task = new_task } ); 111 .task = new_task } );
112 idle_threads[cpu] = new_task; 112 idle_threads[cpu] = new_task;
@@ -134,12 +134,12 @@ void smp_prepare_cpus(unsigned int maxcpus)
134 if(err < 0) 134 if(err < 0)
135 panic("CPU#0 failed to create IPI pipe, errno = %d", -err); 135 panic("CPU#0 failed to create IPI pipe, errno = %d", -err);
136 136
137 activate_ipi(cpu_data[me].ipi_pipe[0], 137 os_set_fd_async(cpu_data[me].ipi_pipe[0],
138 current->thread.mode.tt.extern_pid); 138 current->thread.mode.tt.extern_pid);
139 139
140 for(cpu = 1; cpu < ncpus; cpu++){ 140 for(cpu = 1; cpu < ncpus; cpu++){
141 printk("Booting processor %d...\n", cpu); 141 printk("Booting processor %d...\n", cpu);
142 142
143 idle = idle_thread(cpu); 143 idle = idle_thread(cpu);
144 144
145 init_idle(idle, cpu); 145 init_idle(idle, cpu);
@@ -223,7 +223,7 @@ void smp_call_function_slave(int cpu)
223 atomic_inc(&scf_finished); 223 atomic_inc(&scf_finished);
224} 224}
225 225
226int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic, 226int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic,
227 int wait) 227 int wait)
228{ 228{
229 int cpus = num_online_cpus() - 1; 229 int cpus = num_online_cpus() - 1;
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 80c9c18aae94..7d51dd7201c3 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -421,7 +421,7 @@ int linux_main(int argc, char **argv)
421#ifndef CONFIG_HIGHMEM 421#ifndef CONFIG_HIGHMEM
422 highmem = 0; 422 highmem = 0;
423 printf("CONFIG_HIGHMEM not enabled - physical memory shrunk " 423 printf("CONFIG_HIGHMEM not enabled - physical memory shrunk "
424 "to %lu bytes\n", physmem_size); 424 "to %Lu bytes\n", physmem_size);
425#endif 425#endif
426 } 426 }
427 427
@@ -433,8 +433,8 @@ int linux_main(int argc, char **argv)
433 433
434 setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem); 434 setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
435 if(init_maps(physmem_size, iomem_size, highmem)){ 435 if(init_maps(physmem_size, iomem_size, highmem)){
436 printf("Failed to allocate mem_map for %lu bytes of physical " 436 printf("Failed to allocate mem_map for %Lu bytes of physical "
437 "memory and %lu bytes of highmem\n", physmem_size, 437 "memory and %Lu bytes of highmem\n", physmem_size,
438 highmem); 438 highmem);
439 exit(1); 439 exit(1);
440 } 440 }
@@ -477,7 +477,8 @@ static struct notifier_block panic_exit_notifier = {
477 477
478void __init setup_arch(char **cmdline_p) 478void __init setup_arch(char **cmdline_p)
479{ 479{
480 notifier_chain_register(&panic_notifier_list, &panic_exit_notifier); 480 atomic_notifier_chain_register(&panic_notifier_list,
481 &panic_exit_notifier);
481 paging_init(); 482 paging_init();
482 strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE); 483 strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
483 *cmdline_p = command_line; 484 *cmdline_p = command_line;
@@ -487,8 +488,7 @@ void __init setup_arch(char **cmdline_p)
487void __init check_bugs(void) 488void __init check_bugs(void)
488{ 489{
489 arch_check_bugs(); 490 arch_check_bugs();
490 check_sigio(); 491 os_check_bugs();
491 check_devanon();
492} 492}
493 493
494void apply_alternatives(struct alt_instr *start, struct alt_instr *end) 494void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
diff --git a/arch/um/os-Linux/Makefile b/arch/um/os-Linux/Makefile
index 08a4e628b24c..1659386b42bb 100644
--- a/arch/um/os-Linux/Makefile
+++ b/arch/um/os-Linux/Makefile
@@ -3,14 +3,17 @@
3# Licensed under the GPL 3# Licensed under the GPL
4# 4#
5 5
6obj-y = aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \ 6obj-y = aio.o elf_aux.o file.o helper.o irq.o main.o mem.o process.o sigio.o \
7 start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o user_syms.o \ 7 signal.o start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o \
8 util.o drivers/ sys-$(SUBARCH)/ 8 user_syms.o util.o drivers/ sys-$(SUBARCH)/
9 9
10obj-$(CONFIG_MODE_SKAS) += skas/ 10obj-$(CONFIG_MODE_SKAS) += skas/
11obj-$(CONFIG_TTY_LOG) += tty_log.o
12user-objs-$(CONFIG_TTY_LOG) += tty_log.o
11 13
12USER_OBJS := aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \ 14USER_OBJS := $(user-objs-y) aio.o elf_aux.o file.o helper.o irq.o main.o mem.o \
13 start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o util.o 15 process.o sigio.o signal.o start_up.o time.o trap.o tt.o tty.o \
16 uaccess.o umid.o util.o
14 17
15elf_aux.o: $(ARCH_DIR)/kernel-offsets.h 18elf_aux.o: $(ARCH_DIR)/kernel-offsets.h
16CFLAGS_elf_aux.o += -I$(objtree)/arch/um 19CFLAGS_elf_aux.o += -I$(objtree)/arch/um
diff --git a/arch/um/os-Linux/irq.c b/arch/um/os-Linux/irq.c
new file mode 100644
index 000000000000..e599be423da1
--- /dev/null
+++ b/arch/um/os-Linux/irq.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include <stdlib.h>
7#include <unistd.h>
8#include <errno.h>
9#include <signal.h>
10#include <string.h>
11#include <sys/poll.h>
12#include <sys/types.h>
13#include <sys/time.h>
14#include "user_util.h"
15#include "kern_util.h"
16#include "user.h"
17#include "process.h"
18#include "sigio.h"
19#include "irq_user.h"
20#include "os.h"
21
22static struct pollfd *pollfds = NULL;
23static int pollfds_num = 0;
24static int pollfds_size = 0;
25
26int os_waiting_for_events(struct irq_fd *active_fds)
27{
28 struct irq_fd *irq_fd;
29 int i, n, err;
30
31 n = poll(pollfds, pollfds_num, 0);
32 if(n < 0){
33 err = -errno;
34 if(errno != EINTR)
35 printk("sigio_handler: os_waiting_for_events:"
36 " poll returned %d, errno = %d\n", n, errno);
37 return err;
38 }
39
40 if(n == 0)
41 return 0;
42
43 irq_fd = active_fds;
44
45 for(i = 0; i < pollfds_num; i++){
46 if(pollfds[i].revents != 0){
47 irq_fd->current_events = pollfds[i].revents;
48 pollfds[i].fd = -1;
49 }
50 irq_fd = irq_fd->next;
51 }
52 return n;
53}
54
55int os_isatty(int fd)
56{
57 return(isatty(fd));
58}
59
60int os_create_pollfd(int fd, int events, void *tmp_pfd, int size_tmpfds)
61{
62 if (pollfds_num == pollfds_size) {
63 if (size_tmpfds <= pollfds_size * sizeof(pollfds[0])) {
64 /* return min size needed for new pollfds area */
65 return((pollfds_size + 1) * sizeof(pollfds[0]));
66 }
67
68 if(pollfds != NULL){
69 memcpy(tmp_pfd, pollfds,
70 sizeof(pollfds[0]) * pollfds_size);
71 /* remove old pollfds */
72 kfree(pollfds);
73 }
74 pollfds = tmp_pfd;
75 pollfds_size++;
76 } else {
77 /* remove not used tmp_pfd */
78 if (tmp_pfd != NULL)
79 kfree(tmp_pfd);
80 }
81
82 pollfds[pollfds_num] = ((struct pollfd) { .fd = fd,
83 .events = events,
84 .revents = 0 });
85 pollfds_num++;
86
87 return(0);
88}
89
90void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg,
91 struct irq_fd *active_fds, struct irq_fd ***last_irq_ptr2)
92{
93 struct irq_fd **prev;
94 int i = 0;
95
96 prev = &active_fds;
97 while(*prev != NULL){
98 if((*test)(*prev, arg)){
99 struct irq_fd *old_fd = *prev;
100 if((pollfds[i].fd != -1) &&
101 (pollfds[i].fd != (*prev)->fd)){
102 printk("os_free_irq_by_cb - mismatch between "
103 "active_fds and pollfds, fd %d vs %d\n",
104 (*prev)->fd, pollfds[i].fd);
105 goto out;
106 }
107
108 pollfds_num--;
109
110 /* This moves the *whole* array after pollfds[i]
111 * (though it doesn't spot as such)!
112 */
113
114 memmove(&pollfds[i], &pollfds[i + 1],
115 (pollfds_num - i) * sizeof(pollfds[0]));
116 if(*last_irq_ptr2 == &old_fd->next)
117 *last_irq_ptr2 = prev;
118
119 *prev = (*prev)->next;
120 if(old_fd->type == IRQ_WRITE)
121 ignore_sigio_fd(old_fd->fd);
122 kfree(old_fd);
123 continue;
124 }
125 prev = &(*prev)->next;
126 i++;
127 }
128 out:
129 return;
130}
131
132
133int os_get_pollfd(int i)
134{
135 return(pollfds[i].fd);
136}
137
138void os_set_pollfd(int i, int fd)
139{
140 pollfds[i].fd = fd;
141}
142
143void os_set_ioignore(void)
144{
145 set_handler(SIGIO, SIG_IGN, 0, -1);
146}
147
148void init_irq_signals(int on_sigstack)
149{
150 __sighandler_t h;
151 int flags;
152
153 flags = on_sigstack ? SA_ONSTACK : 0;
154 if(timer_irq_inited) h = (__sighandler_t) alarm_handler;
155 else h = boot_timer_handler;
156
157 set_handler(SIGVTALRM, h, flags | SA_RESTART,
158 SIGUSR1, SIGIO, SIGWINCH, SIGALRM, -1);
159 set_handler(SIGIO, (__sighandler_t) sig_handler, flags | SA_RESTART,
160 SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
161 signal(SIGWINCH, SIG_IGN);
162}
diff --git a/arch/um/kernel/sigio_user.c b/arch/um/os-Linux/sigio.c
index f7b18e157d35..9ba942947146 100644
--- a/arch/um/kernel/sigio_user.c
+++ b/arch/um/os-Linux/sigio.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -20,128 +20,7 @@
20#include "sigio.h" 20#include "sigio.h"
21#include "os.h" 21#include "os.h"
22 22
23/* Changed during early boot */ 23/* Protected by sigio_lock(), also used by sigio_cleanup, which is an
24int pty_output_sigio = 0;
25int pty_close_sigio = 0;
26
27/* Used as a flag during SIGIO testing early in boot */
28static volatile int got_sigio = 0;
29
30void __init handler(int sig)
31{
32 got_sigio = 1;
33}
34
35struct openpty_arg {
36 int master;
37 int slave;
38 int err;
39};
40
41static void openpty_cb(void *arg)
42{
43 struct openpty_arg *info = arg;
44
45 info->err = 0;
46 if(openpty(&info->master, &info->slave, NULL, NULL, NULL))
47 info->err = -errno;
48}
49
50void __init check_one_sigio(void (*proc)(int, int))
51{
52 struct sigaction old, new;
53 struct openpty_arg pty = { .master = -1, .slave = -1 };
54 int master, slave, err;
55
56 initial_thread_cb(openpty_cb, &pty);
57 if(pty.err){
58 printk("openpty failed, errno = %d\n", -pty.err);
59 return;
60 }
61
62 master = pty.master;
63 slave = pty.slave;
64
65 if((master == -1) || (slave == -1)){
66 printk("openpty failed to allocate a pty\n");
67 return;
68 }
69
70 /* Not now, but complain so we now where we failed. */
71 err = raw(master);
72 if (err < 0)
73 panic("check_sigio : __raw failed, errno = %d\n", -err);
74
75 err = os_sigio_async(master, slave);
76 if(err < 0)
77 panic("tty_fds : sigio_async failed, err = %d\n", -err);
78
79 if(sigaction(SIGIO, NULL, &old) < 0)
80 panic("check_sigio : sigaction 1 failed, errno = %d\n", errno);
81 new = old;
82 new.sa_handler = handler;
83 if(sigaction(SIGIO, &new, NULL) < 0)
84 panic("check_sigio : sigaction 2 failed, errno = %d\n", errno);
85
86 got_sigio = 0;
87 (*proc)(master, slave);
88
89 os_close_file(master);
90 os_close_file(slave);
91
92 if(sigaction(SIGIO, &old, NULL) < 0)
93 panic("check_sigio : sigaction 3 failed, errno = %d\n", errno);
94}
95
96static void tty_output(int master, int slave)
97{
98 int n;
99 char buf[512];
100
101 printk("Checking that host ptys support output SIGIO...");
102
103 memset(buf, 0, sizeof(buf));
104
105 while(os_write_file(master, buf, sizeof(buf)) > 0) ;
106 if(errno != EAGAIN)
107 panic("check_sigio : write failed, errno = %d\n", errno);
108 while(((n = os_read_file(slave, buf, sizeof(buf))) > 0) && !got_sigio) ;
109
110 if (got_sigio) {
111 printk("Yes\n");
112 pty_output_sigio = 1;
113 } else if (n == -EAGAIN) {
114 printk("No, enabling workaround\n");
115 } else {
116 panic("check_sigio : read failed, err = %d\n", n);
117 }
118}
119
120static void tty_close(int master, int slave)
121{
122 printk("Checking that host ptys support SIGIO on close...");
123
124 os_close_file(slave);
125 if(got_sigio){
126 printk("Yes\n");
127 pty_close_sigio = 1;
128 }
129 else printk("No, enabling workaround\n");
130}
131
132void __init check_sigio(void)
133{
134 if((os_access("/dev/ptmx", OS_ACC_R_OK) < 0) &&
135 (os_access("/dev/ptyp0", OS_ACC_R_OK) < 0)){
136 printk("No pseudo-terminals available - skipping pty SIGIO "
137 "check\n");
138 return;
139 }
140 check_one_sigio(tty_output);
141 check_one_sigio(tty_close);
142}
143
144/* Protected by sigio_lock(), also used by sigio_cleanup, which is an
145 * exitcall. 24 * exitcall.
146 */ 25 */
147static int write_sigio_pid = -1; 26static int write_sigio_pid = -1;
@@ -150,8 +29,10 @@ static int write_sigio_pid = -1;
150 * the descriptors closed after it is killed. So, it can't see them change. 29 * the descriptors closed after it is killed. So, it can't see them change.
151 * On the UML side, they are changed under the sigio_lock. 30 * On the UML side, they are changed under the sigio_lock.
152 */ 31 */
153static int write_sigio_fds[2] = { -1, -1 }; 32#define SIGIO_FDS_INIT {-1, -1}
154static int sigio_private[2] = { -1, -1 }; 33
34static int write_sigio_fds[2] = SIGIO_FDS_INIT;
35static int sigio_private[2] = SIGIO_FDS_INIT;
155 36
156struct pollfds { 37struct pollfds {
157 struct pollfd *poll; 38 struct pollfd *poll;
@@ -264,13 +145,13 @@ static void update_thread(void)
264 return; 145 return;
265 fail: 146 fail:
266 /* Critical section start */ 147 /* Critical section start */
267 if(write_sigio_pid != -1) 148 if(write_sigio_pid != -1)
268 os_kill_process(write_sigio_pid, 1); 149 os_kill_process(write_sigio_pid, 1);
269 write_sigio_pid = -1; 150 write_sigio_pid = -1;
270 os_close_file(sigio_private[0]); 151 close(sigio_private[0]);
271 os_close_file(sigio_private[1]); 152 close(sigio_private[1]);
272 os_close_file(write_sigio_fds[0]); 153 close(write_sigio_fds[0]);
273 os_close_file(write_sigio_fds[1]); 154 close(write_sigio_fds[1]);
274 /* Critical section end */ 155 /* Critical section end */
275 set_signals(flags); 156 set_signals(flags);
276} 157}
@@ -281,13 +162,13 @@ int add_sigio_fd(int fd, int read)
281 162
282 sigio_lock(); 163 sigio_lock();
283 for(i = 0; i < current_poll.used; i++){ 164 for(i = 0; i < current_poll.used; i++){
284 if(current_poll.poll[i].fd == fd) 165 if(current_poll.poll[i].fd == fd)
285 goto out; 166 goto out;
286 } 167 }
287 168
288 n = current_poll.used + 1; 169 n = current_poll.used + 1;
289 err = need_poll(n); 170 err = need_poll(n);
290 if(err) 171 if(err)
291 goto out; 172 goto out;
292 173
293 for(i = 0; i < current_poll.used; i++) 174 for(i = 0; i < current_poll.used; i++)
@@ -316,7 +197,7 @@ int ignore_sigio_fd(int fd)
316 } 197 }
317 if(i == current_poll.used) 198 if(i == current_poll.used)
318 goto out; 199 goto out;
319 200
320 err = need_poll(current_poll.used - 1); 201 err = need_poll(current_poll.used - 1);
321 if(err) 202 if(err)
322 goto out; 203 goto out;
@@ -337,7 +218,7 @@ int ignore_sigio_fd(int fd)
337 return(err); 218 return(err);
338} 219}
339 220
340static struct pollfd* setup_initial_poll(int fd) 221static struct pollfd *setup_initial_poll(int fd)
341{ 222{
342 struct pollfd *p; 223 struct pollfd *p;
343 224
@@ -377,7 +258,7 @@ void write_sigio_workaround(void)
377 } 258 }
378 err = os_pipe(l_sigio_private, 1, 1); 259 err = os_pipe(l_sigio_private, 1, 1);
379 if(err < 0){ 260 if(err < 0){
380 printk("write_sigio_workaround - os_pipe 1 failed, " 261 printk("write_sigio_workaround - os_pipe 2 failed, "
381 "err = %d\n", -err); 262 "err = %d\n", -err);
382 goto out_close1; 263 goto out_close1;
383 } 264 }
@@ -391,76 +272,52 @@ void write_sigio_workaround(void)
391 /* Did we race? Don't try to optimize this, please, it's not so likely 272 /* Did we race? Don't try to optimize this, please, it's not so likely
392 * to happen, and no more than once at the boot. */ 273 * to happen, and no more than once at the boot. */
393 if(write_sigio_pid != -1) 274 if(write_sigio_pid != -1)
394 goto out_unlock; 275 goto out_free;
395 276
396 write_sigio_pid = run_helper_thread(write_sigio_thread, NULL, 277 current_poll = ((struct pollfds) { .poll = p,
397 CLONE_FILES | CLONE_VM, &stack, 0); 278 .used = 1,
398 279 .size = 1 });
399 if (write_sigio_pid < 0)
400 goto out_clear;
401 280
402 if (write_sigio_irq(l_write_sigio_fds[0])) 281 if (write_sigio_irq(l_write_sigio_fds[0]))
403 goto out_kill; 282 goto out_clear_poll;
404 283
405 /* Success, finally. */
406 memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds)); 284 memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
407 memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private)); 285 memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
408 286
409 current_poll = ((struct pollfds) { .poll = p, 287 write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
410 .used = 1, 288 CLONE_FILES | CLONE_VM, &stack, 0);
411 .size = 1 });
412 289
413 sigio_unlock(); 290 if (write_sigio_pid < 0)
414 return; 291 goto out_clear;
415 292
416 out_kill:
417 l_write_sigio_pid = write_sigio_pid;
418 write_sigio_pid = -1;
419 sigio_unlock(); 293 sigio_unlock();
420 /* Going to call waitpid, avoid holding the lock. */ 294 return;
421 os_kill_process(l_write_sigio_pid, 1);
422 goto out_free;
423 295
424 out_clear: 296out_clear:
425 write_sigio_pid = -1; 297 write_sigio_pid = -1;
426 out_unlock: 298 write_sigio_fds[0] = -1;
427 sigio_unlock(); 299 write_sigio_fds[1] = -1;
428 out_free: 300 sigio_private[0] = -1;
301 sigio_private[1] = -1;
302out_clear_poll:
303 current_poll = ((struct pollfds) { .poll = NULL,
304 .size = 0,
305 .used = 0 });
306out_free:
429 kfree(p); 307 kfree(p);
430 out_close2: 308 sigio_unlock();
431 os_close_file(l_sigio_private[0]); 309out_close2:
432 os_close_file(l_sigio_private[1]); 310 close(l_sigio_private[0]);
433 out_close1: 311 close(l_sigio_private[1]);
434 os_close_file(l_write_sigio_fds[0]); 312out_close1:
435 os_close_file(l_write_sigio_fds[1]); 313 close(l_write_sigio_fds[0]);
436 return; 314 close(l_write_sigio_fds[1]);
437}
438
439int read_sigio_fd(int fd)
440{
441 int n;
442 char c;
443
444 n = os_read_file(fd, &c, sizeof(c));
445 if(n != sizeof(c)){
446 if(n < 0) {
447 printk("read_sigio_fd - read failed, err = %d\n", -n);
448 return(n);
449 }
450 else {
451 printk("read_sigio_fd - short read, bytes = %d\n", n);
452 return(-EIO);
453 }
454 }
455 return(n);
456} 315}
457 316
458static void sigio_cleanup(void) 317void sigio_cleanup(void)
459{ 318{
460 if (write_sigio_pid != -1) { 319 if(write_sigio_pid != -1){
461 os_kill_process(write_sigio_pid, 1); 320 os_kill_process(write_sigio_pid, 1);
462 write_sigio_pid = -1; 321 write_sigio_pid = -1;
463 } 322 }
464} 323}
465
466__uml_exitcall(sigio_cleanup);
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index 829d6b0d8b02..32753131f8d8 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -3,6 +3,7 @@
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include <pty.h>
6#include <stdio.h> 7#include <stdio.h>
7#include <stddef.h> 8#include <stddef.h>
8#include <stdarg.h> 9#include <stdarg.h>
@@ -539,3 +540,130 @@ int __init parse_iomem(char *str, int *add)
539 return(1); 540 return(1);
540} 541}
541 542
543
544/* Changed during early boot */
545int pty_output_sigio = 0;
546int pty_close_sigio = 0;
547
548/* Used as a flag during SIGIO testing early in boot */
549static volatile int got_sigio = 0;
550
551static void __init handler(int sig)
552{
553 got_sigio = 1;
554}
555
556struct openpty_arg {
557 int master;
558 int slave;
559 int err;
560};
561
562static void openpty_cb(void *arg)
563{
564 struct openpty_arg *info = arg;
565
566 info->err = 0;
567 if(openpty(&info->master, &info->slave, NULL, NULL, NULL))
568 info->err = -errno;
569}
570
571static void __init check_one_sigio(void (*proc)(int, int))
572{
573 struct sigaction old, new;
574 struct openpty_arg pty = { .master = -1, .slave = -1 };
575 int master, slave, err;
576
577 initial_thread_cb(openpty_cb, &pty);
578 if(pty.err){
579 printk("openpty failed, errno = %d\n", -pty.err);
580 return;
581 }
582
583 master = pty.master;
584 slave = pty.slave;
585
586 if((master == -1) || (slave == -1)){
587 printk("openpty failed to allocate a pty\n");
588 return;
589 }
590
591 /* Not now, but complain so we now where we failed. */
592 err = raw(master);
593 if (err < 0)
594 panic("check_sigio : __raw failed, errno = %d\n", -err);
595
596 err = os_sigio_async(master, slave);
597 if(err < 0)
598 panic("tty_fds : sigio_async failed, err = %d\n", -err);
599
600 if(sigaction(SIGIO, NULL, &old) < 0)
601 panic("check_sigio : sigaction 1 failed, errno = %d\n", errno);
602 new = old;
603 new.sa_handler = handler;
604 if(sigaction(SIGIO, &new, NULL) < 0)
605 panic("check_sigio : sigaction 2 failed, errno = %d\n", errno);
606
607 got_sigio = 0;
608 (*proc)(master, slave);
609
610 close(master);
611 close(slave);
612
613 if(sigaction(SIGIO, &old, NULL) < 0)
614 panic("check_sigio : sigaction 3 failed, errno = %d\n", errno);
615}
616
617static void tty_output(int master, int slave)
618{
619 int n;
620 char buf[512];
621
622 printk("Checking that host ptys support output SIGIO...");
623
624 memset(buf, 0, sizeof(buf));
625
626 while(os_write_file(master, buf, sizeof(buf)) > 0) ;
627 if(errno != EAGAIN)
628 panic("check_sigio : write failed, errno = %d\n", errno);
629 while(((n = os_read_file(slave, buf, sizeof(buf))) > 0) && !got_sigio) ;
630
631 if(got_sigio){
632 printk("Yes\n");
633 pty_output_sigio = 1;
634 }
635 else if(n == -EAGAIN) printk("No, enabling workaround\n");
636 else panic("check_sigio : read failed, err = %d\n", n);
637}
638
639static void tty_close(int master, int slave)
640{
641 printk("Checking that host ptys support SIGIO on close...");
642
643 close(slave);
644 if(got_sigio){
645 printk("Yes\n");
646 pty_close_sigio = 1;
647 }
648 else printk("No, enabling workaround\n");
649}
650
651void __init check_sigio(void)
652{
653 if((os_access("/dev/ptmx", OS_ACC_R_OK) < 0) &&
654 (os_access("/dev/ptyp0", OS_ACC_R_OK) < 0)){
655 printk("No pseudo-terminals available - skipping pty SIGIO "
656 "check\n");
657 return;
658 }
659 check_one_sigio(tty_output);
660 check_one_sigio(tty_close);
661}
662
663void os_check_bugs(void)
664{
665 check_ptrace();
666 check_sigio();
667 check_devanon();
668}
669
diff --git a/arch/um/os-Linux/tt.c b/arch/um/os-Linux/tt.c
index 919d19f11537..5461a065bbb9 100644
--- a/arch/um/os-Linux/tt.c
+++ b/arch/um/os-Linux/tt.c
@@ -110,6 +110,16 @@ int wait_for_stop(int pid, int sig, int cont_type, void *relay)
110 } 110 }
111} 111}
112 112
113void forward_ipi(int fd, int pid)
114{
115 int err;
116
117 err = os_set_owner(fd, pid);
118 if(err < 0)
119 printk("forward_ipi: set_owner failed, fd = %d, me = %d, "
120 "target = %d, err = %d\n", fd, os_getpid(), pid, -err);
121}
122
113/* 123/*
114 *------------------------- 124 *-------------------------
115 * only for tt mode (will be deleted in future...) 125 * only for tt mode (will be deleted in future...)
diff --git a/arch/um/kernel/tty_log.c b/arch/um/os-Linux/tty_log.c
index 9ada656f68ce..c6ba56c1560f 100644
--- a/arch/um/kernel/tty_log.c
+++ b/arch/um/os-Linux/tty_log.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) and 2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) and
3 * geoffrey hing <ghing@net.ohio-state.edu> 3 * geoffrey hing <ghing@net.ohio-state.edu>
4 * Licensed under the GPL 4 * Licensed under the GPL
5 */ 5 */
@@ -58,7 +58,7 @@ int open_tty_log(void *tty, void *current_tty)
58 return(tty_log_fd); 58 return(tty_log_fd);
59 } 59 }
60 60
61 sprintf(buf, "%s/%0u-%0u", tty_log_dir, (unsigned int) tv.tv_sec, 61 sprintf(buf, "%s/%0u-%0u", tty_log_dir, (unsigned int) tv.tv_sec,
62 (unsigned int) tv.tv_usec); 62 (unsigned int) tv.tv_usec);
63 63
64 fd = os_open_file(buf, of_append(of_create(of_rdwr(OPENFLAGS()))), 64 fd = os_open_file(buf, of_append(of_create(of_rdwr(OPENFLAGS()))),
@@ -216,15 +216,3 @@ __uml_setup("tty_log_fd=", set_tty_log_fd,
216" tty data will be written. Preconfigure the descriptor with something\n" 216" tty data will be written. Preconfigure the descriptor with something\n"
217" like '10>tty_log tty_log_fd=10'.\n\n" 217" like '10>tty_log tty_log_fd=10'.\n\n"
218); 218);
219
220
221/*
222 * Overrides for Emacs so that we follow Linus's tabbing style.
223 * Emacs will notice this stuff at the end of the file and automatically
224 * adjust the settings for this buffer only. This must remain at the end
225 * of the file.
226 * ---------------------------------------------------------------------------
227 * Local variables:
228 * c-file-style: "linux"
229 * End:
230 */
diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c
index ecf107ae5ac8..198e59163288 100644
--- a/arch/um/os-Linux/umid.c
+++ b/arch/um/os-Linux/umid.c
@@ -143,8 +143,10 @@ static int not_dead_yet(char *dir)
143 goto out_close; 143 goto out_close;
144 } 144 }
145 145
146 if((kill(p, 0) == 0) || (errno != ESRCH)) 146 if((kill(p, 0) == 0) || (errno != ESRCH)){
147 printk("umid \"%s\" is already in use by pid %d\n", umid, p);
147 return 1; 148 return 1;
149 }
148 150
149 err = actually_do_remove(dir); 151 err = actually_do_remove(dir);
150 if(err) 152 if(err)
@@ -234,33 +236,44 @@ int __init make_umid(void)
234 err = mkdir(tmp, 0777); 236 err = mkdir(tmp, 0777);
235 if(err < 0){ 237 if(err < 0){
236 err = -errno; 238 err = -errno;
237 if(errno != EEXIST) 239 if(err != -EEXIST)
238 goto err; 240 goto err;
239 241
240 if(not_dead_yet(tmp) < 0) 242 /* 1 -> this umid is already in use
243 * < 0 -> we couldn't remove the umid directory
244 * In either case, we can't use this umid, so return -EEXIST.
245 */
246 if(not_dead_yet(tmp) != 0)
241 goto err; 247 goto err;
242 248
243 err = mkdir(tmp, 0777); 249 err = mkdir(tmp, 0777);
244 } 250 }
245 if(err < 0){ 251 if(err){
246 printk("Failed to create '%s' - err = %d\n", umid, err); 252 err = -errno;
247 goto err_rmdir; 253 printk("Failed to create '%s' - err = %d\n", umid, -errno);
254 goto err;
248 } 255 }
249 256
250 umid_setup = 1; 257 umid_setup = 1;
251 258
252 create_pid_file(); 259 create_pid_file();
253 260
254 return 0; 261 err = 0;
255
256 err_rmdir:
257 rmdir(tmp);
258 err: 262 err:
259 return err; 263 return err;
260} 264}
261 265
262static int __init make_umid_init(void) 266static int __init make_umid_init(void)
263{ 267{
268 if(!make_umid())
269 return 0;
270
271 /* If initializing with the given umid failed, then try again with
272 * a random one.
273 */
274 printk("Failed to initialize umid \"%s\", trying with a random umid\n",
275 umid);
276 *umid = '\0';
264 make_umid(); 277 make_umid();
265 278
266 return 0; 279 return 0;
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c
index e839ce65ad28..8032a105949a 100644
--- a/arch/um/sys-i386/ptrace.c
+++ b/arch/um/sys-i386/ptrace.c
@@ -6,6 +6,7 @@
6#include <linux/config.h> 6#include <linux/config.h>
7#include <linux/compiler.h> 7#include <linux/compiler.h>
8#include "linux/sched.h" 8#include "linux/sched.h"
9#include "linux/mm.h"
9#include "asm/elf.h" 10#include "asm/elf.h"
10#include "asm/ptrace.h" 11#include "asm/ptrace.h"
11#include "asm/uaccess.h" 12#include "asm/uaccess.h"
@@ -26,9 +27,17 @@ int is_syscall(unsigned long addr)
26 27
27 n = copy_from_user(&instr, (void __user *) addr, sizeof(instr)); 28 n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
28 if(n){ 29 if(n){
29 printk("is_syscall : failed to read instruction from 0x%lx\n", 30 /* access_process_vm() grants access to vsyscall and stub,
30 addr); 31 * while copy_from_user doesn't. Maybe access_process_vm is
31 return(0); 32 * slow, but that doesn't matter, since it will be called only
33 * in case of singlestepping, if copy_from_user failed.
34 */
35 n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
36 if(n != sizeof(instr)) {
37 printk("is_syscall : failed to read instruction from "
38 "0x%lx\n", addr);
39 return(1);
40 }
32 } 41 }
33 /* int 0x80 or sysenter */ 42 /* int 0x80 or sysenter */
34 return((instr == 0x80cd) || (instr == 0x340f)); 43 return((instr == 0x80cd) || (instr == 0x340f));
diff --git a/arch/um/sys-i386/signal.c b/arch/um/sys-i386/signal.c
index 7cd1a82dc8c2..33a40f5ef0d2 100644
--- a/arch/um/sys-i386/signal.c
+++ b/arch/um/sys-i386/signal.c
@@ -58,7 +58,7 @@ static int copy_sc_from_user_skas(struct pt_regs *regs,
58} 58}
59 59
60int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, 60int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
61 struct pt_regs *regs) 61 struct pt_regs *regs, unsigned long sp)
62{ 62{
63 struct sigcontext sc; 63 struct sigcontext sc;
64 unsigned long fpregs[HOST_FP_SIZE]; 64 unsigned long fpregs[HOST_FP_SIZE];
@@ -72,7 +72,7 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
72 sc.edi = REGS_EDI(regs->regs.skas.regs); 72 sc.edi = REGS_EDI(regs->regs.skas.regs);
73 sc.esi = REGS_ESI(regs->regs.skas.regs); 73 sc.esi = REGS_ESI(regs->regs.skas.regs);
74 sc.ebp = REGS_EBP(regs->regs.skas.regs); 74 sc.ebp = REGS_EBP(regs->regs.skas.regs);
75 sc.esp = REGS_SP(regs->regs.skas.regs); 75 sc.esp = sp;
76 sc.ebx = REGS_EBX(regs->regs.skas.regs); 76 sc.ebx = REGS_EBX(regs->regs.skas.regs);
77 sc.edx = REGS_EDX(regs->regs.skas.regs); 77 sc.edx = REGS_EDX(regs->regs.skas.regs);
78 sc.ecx = REGS_ECX(regs->regs.skas.regs); 78 sc.ecx = REGS_ECX(regs->regs.skas.regs);
@@ -132,7 +132,7 @@ int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from,
132} 132}
133 133
134int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp, 134int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp,
135 struct sigcontext *from, int fpsize) 135 struct sigcontext *from, int fpsize, unsigned long sp)
136{ 136{
137 struct _fpstate *to_fp, *from_fp; 137 struct _fpstate *to_fp, *from_fp;
138 int err; 138 int err;
@@ -140,11 +140,18 @@ int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp,
140 to_fp = (fp ? fp : (struct _fpstate *) (to + 1)); 140 to_fp = (fp ? fp : (struct _fpstate *) (to + 1));
141 from_fp = from->fpstate; 141 from_fp = from->fpstate;
142 err = copy_to_user(to, from, sizeof(*to)); 142 err = copy_to_user(to, from, sizeof(*to));
143
144 /* The SP in the sigcontext is the updated one for the signal
145 * delivery. The sp passed in is the original, and this needs
146 * to be restored, so we stick it in separately.
147 */
148 err |= copy_to_user(&SC_SP(to), sp, sizeof(sp));
149
143 if(from_fp != NULL){ 150 if(from_fp != NULL){
144 err |= copy_to_user(&to->fpstate, &to_fp, sizeof(to->fpstate)); 151 err |= copy_to_user(&to->fpstate, &to_fp, sizeof(to->fpstate));
145 err |= copy_to_user(to_fp, from_fp, fpsize); 152 err |= copy_to_user(to_fp, from_fp, fpsize);
146 } 153 }
147 return(err); 154 return err;
148} 155}
149#endif 156#endif
150 157
@@ -159,11 +166,11 @@ static int copy_sc_from_user(struct pt_regs *to, void __user *from)
159} 166}
160 167
161static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp, 168static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp,
162 struct pt_regs *from) 169 struct pt_regs *from, unsigned long sp)
163{ 170{
164 return(CHOOSE_MODE(copy_sc_to_user_tt(to, fp, UPT_SC(&from->regs), 171 return(CHOOSE_MODE(copy_sc_to_user_tt(to, fp, UPT_SC(&from->regs),
165 sizeof(*fp)), 172 sizeof(*fp), sp),
166 copy_sc_to_user_skas(to, fp, from))); 173 copy_sc_to_user_skas(to, fp, from, sp)));
167} 174}
168 175
169static int copy_ucontext_to_user(struct ucontext *uc, struct _fpstate *fp, 176static int copy_ucontext_to_user(struct ucontext *uc, struct _fpstate *fp,
@@ -174,7 +181,7 @@ static int copy_ucontext_to_user(struct ucontext *uc, struct _fpstate *fp,
174 err |= put_user(current->sas_ss_sp, &uc->uc_stack.ss_sp); 181 err |= put_user(current->sas_ss_sp, &uc->uc_stack.ss_sp);
175 err |= put_user(sas_ss_flags(sp), &uc->uc_stack.ss_flags); 182 err |= put_user(sas_ss_flags(sp), &uc->uc_stack.ss_flags);
176 err |= put_user(current->sas_ss_size, &uc->uc_stack.ss_size); 183 err |= put_user(current->sas_ss_size, &uc->uc_stack.ss_size);
177 err |= copy_sc_to_user(&uc->uc_mcontext, fp, &current->thread.regs); 184 err |= copy_sc_to_user(&uc->uc_mcontext, fp, &current->thread.regs, sp);
178 err |= copy_to_user(&uc->uc_sigmask, set, sizeof(*set)); 185 err |= copy_to_user(&uc->uc_sigmask, set, sizeof(*set));
179 return(err); 186 return(err);
180} 187}
@@ -207,6 +214,7 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig,
207{ 214{
208 struct sigframe __user *frame; 215 struct sigframe __user *frame;
209 void *restorer; 216 void *restorer;
217 unsigned long save_sp = PT_REGS_SP(regs);
210 int err = 0; 218 int err = 0;
211 219
212 stack_top &= -8UL; 220 stack_top &= -8UL;
@@ -218,9 +226,19 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig,
218 if(ka->sa.sa_flags & SA_RESTORER) 226 if(ka->sa.sa_flags & SA_RESTORER)
219 restorer = ka->sa.sa_restorer; 227 restorer = ka->sa.sa_restorer;
220 228
229 /* Update SP now because the page fault handler refuses to extend
230 * the stack if the faulting address is too far below the current
231 * SP, which frame now certainly is. If there's an error, the original
232 * value is restored on the way out.
233 * When writing the sigcontext to the stack, we have to write the
234 * original value, so that's passed to copy_sc_to_user, which does
235 * the right thing with it.
236 */
237 PT_REGS_SP(regs) = (unsigned long) frame;
238
221 err |= __put_user(restorer, &frame->pretcode); 239 err |= __put_user(restorer, &frame->pretcode);
222 err |= __put_user(sig, &frame->sig); 240 err |= __put_user(sig, &frame->sig);
223 err |= copy_sc_to_user(&frame->sc, NULL, regs); 241 err |= copy_sc_to_user(&frame->sc, NULL, regs, save_sp);
224 err |= __put_user(mask->sig[0], &frame->sc.oldmask); 242 err |= __put_user(mask->sig[0], &frame->sc.oldmask);
225 if (_NSIG_WORDS > 1) 243 if (_NSIG_WORDS > 1)
226 err |= __copy_to_user(&frame->extramask, &mask->sig[1], 244 err |= __copy_to_user(&frame->extramask, &mask->sig[1],
@@ -238,7 +256,7 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig,
238 err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); 256 err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
239 257
240 if(err) 258 if(err)
241 return(err); 259 goto err;
242 260
243 PT_REGS_SP(regs) = (unsigned long) frame; 261 PT_REGS_SP(regs) = (unsigned long) frame;
244 PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; 262 PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler;
@@ -248,7 +266,11 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig,
248 266
249 if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) 267 if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
250 ptrace_notify(SIGTRAP); 268 ptrace_notify(SIGTRAP);
251 return(0); 269 return 0;
270
271err:
272 PT_REGS_SP(regs) = save_sp;
273 return err;
252} 274}
253 275
254int setup_signal_stack_si(unsigned long stack_top, int sig, 276int setup_signal_stack_si(unsigned long stack_top, int sig,
@@ -257,6 +279,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
257{ 279{
258 struct rt_sigframe __user *frame; 280 struct rt_sigframe __user *frame;
259 void *restorer; 281 void *restorer;
282 unsigned long save_sp = PT_REGS_SP(regs);
260 int err = 0; 283 int err = 0;
261 284
262 stack_top &= -8UL; 285 stack_top &= -8UL;
@@ -268,13 +291,16 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
268 if(ka->sa.sa_flags & SA_RESTORER) 291 if(ka->sa.sa_flags & SA_RESTORER)
269 restorer = ka->sa.sa_restorer; 292 restorer = ka->sa.sa_restorer;
270 293
294 /* See comment above about why this is here */
295 PT_REGS_SP(regs) = (unsigned long) frame;
296
271 err |= __put_user(restorer, &frame->pretcode); 297 err |= __put_user(restorer, &frame->pretcode);
272 err |= __put_user(sig, &frame->sig); 298 err |= __put_user(sig, &frame->sig);
273 err |= __put_user(&frame->info, &frame->pinfo); 299 err |= __put_user(&frame->info, &frame->pinfo);
274 err |= __put_user(&frame->uc, &frame->puc); 300 err |= __put_user(&frame->uc, &frame->puc);
275 err |= copy_siginfo_to_user(&frame->info, info); 301 err |= copy_siginfo_to_user(&frame->info, info);
276 err |= copy_ucontext_to_user(&frame->uc, &frame->fpstate, mask, 302 err |= copy_ucontext_to_user(&frame->uc, &frame->fpstate, mask,
277 PT_REGS_SP(regs)); 303 save_sp);
278 304
279 /* 305 /*
280 * This is movl $,%eax ; int $0x80 306 * This is movl $,%eax ; int $0x80
@@ -288,9 +314,8 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
288 err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); 314 err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
289 315
290 if(err) 316 if(err)
291 return(err); 317 goto err;
292 318
293 PT_REGS_SP(regs) = (unsigned long) frame;
294 PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; 319 PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler;
295 PT_REGS_EAX(regs) = (unsigned long) sig; 320 PT_REGS_EAX(regs) = (unsigned long) sig;
296 PT_REGS_EDX(regs) = (unsigned long) &frame->info; 321 PT_REGS_EDX(regs) = (unsigned long) &frame->info;
@@ -298,7 +323,11 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
298 323
299 if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) 324 if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
300 ptrace_notify(SIGTRAP); 325 ptrace_notify(SIGTRAP);
301 return(0); 326 return 0;
327
328err:
329 PT_REGS_SP(regs) = save_sp;
330 return err;
302} 331}
303 332
304long sys_sigreturn(struct pt_regs regs) 333long sys_sigreturn(struct pt_regs regs)
diff --git a/arch/um/sys-i386/user-offsets.c b/arch/um/sys-i386/user-offsets.c
index 26b68675053d..6f4ef2b7fa4a 100644
--- a/arch/um/sys-i386/user-offsets.c
+++ b/arch/um/sys-i386/user-offsets.c
@@ -3,12 +3,13 @@
3#include <asm/ptrace.h> 3#include <asm/ptrace.h>
4#include <asm/user.h> 4#include <asm/user.h>
5#include <linux/stddef.h> 5#include <linux/stddef.h>
6#include <sys/poll.h>
6 7
7#define DEFINE(sym, val) \ 8#define DEFINE(sym, val) \
8 asm volatile("\n->" #sym " %0 " #val : : "i" (val)) 9 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
9 10
10#define DEFINE_LONGS(sym, val) \ 11#define DEFINE_LONGS(sym, val) \
11 asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long))) 12 asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long)))
12 13
13#define OFFSET(sym, str, mem) \ 14#define OFFSET(sym, str, mem) \
14 DEFINE(sym, offsetof(struct str, mem)); 15 DEFINE(sym, offsetof(struct str, mem));
@@ -67,4 +68,9 @@ void foo(void)
67 DEFINE(HOST_ES, ES); 68 DEFINE(HOST_ES, ES);
68 DEFINE(HOST_GS, GS); 69 DEFINE(HOST_GS, GS);
69 DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct)); 70 DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct));
71
72 /* XXX Duplicated between i386 and x86_64 */
73 DEFINE(UM_POLLIN, POLLIN);
74 DEFINE(UM_POLLPRI, POLLPRI);
75 DEFINE(UM_POLLOUT, POLLOUT);
70} 76}
diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c
index 74eee5c7c6dd..147bbf05cbc2 100644
--- a/arch/um/sys-x86_64/ptrace.c
+++ b/arch/um/sys-x86_64/ptrace.c
@@ -8,6 +8,7 @@
8#include <asm/ptrace.h> 8#include <asm/ptrace.h>
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/mm.h>
11#include <asm/uaccess.h> 12#include <asm/uaccess.h>
12#include <asm/elf.h> 13#include <asm/elf.h>
13 14
@@ -136,9 +137,28 @@ void arch_switch(void)
136*/ 137*/
137} 138}
138 139
140/* XXX Mostly copied from sys-i386 */
139int is_syscall(unsigned long addr) 141int is_syscall(unsigned long addr)
140{ 142{
141 panic("is_syscall"); 143 unsigned short instr;
144 int n;
145
146 n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
147 if(n){
148 /* access_process_vm() grants access to vsyscall and stub,
149 * while copy_from_user doesn't. Maybe access_process_vm is
150 * slow, but that doesn't matter, since it will be called only
151 * in case of singlestepping, if copy_from_user failed.
152 */
153 n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
154 if(n != sizeof(instr)) {
155 printk("is_syscall : failed to read instruction from "
156 "0x%lx\n", addr);
157 return(1);
158 }
159 }
160 /* sysenter */
161 return(instr == 0x050f);
142} 162}
143 163
144int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu ) 164int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu )
diff --git a/arch/um/sys-x86_64/signal.c b/arch/um/sys-x86_64/signal.c
index fe1d065332b1..e75c4e1838b0 100644
--- a/arch/um/sys-x86_64/signal.c
+++ b/arch/um/sys-x86_64/signal.c
@@ -55,7 +55,8 @@ static int copy_sc_from_user_skas(struct pt_regs *regs,
55} 55}
56 56
57int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, 57int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
58 struct pt_regs *regs, unsigned long mask) 58 struct pt_regs *regs, unsigned long mask,
59 unsigned long sp)
59{ 60{
60 struct faultinfo * fi = &current->thread.arch.faultinfo; 61 struct faultinfo * fi = &current->thread.arch.faultinfo;
61 int err = 0; 62 int err = 0;
@@ -70,7 +71,11 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
70 err |= PUTREG(regs, RDI, to, rdi); 71 err |= PUTREG(regs, RDI, to, rdi);
71 err |= PUTREG(regs, RSI, to, rsi); 72 err |= PUTREG(regs, RSI, to, rsi);
72 err |= PUTREG(regs, RBP, to, rbp); 73 err |= PUTREG(regs, RBP, to, rbp);
73 err |= PUTREG(regs, RSP, to, rsp); 74 /* Must use orignal RSP, which is passed in, rather than what's in
75 * the pt_regs, because that's already been updated to point at the
76 * signal frame.
77 */
78 err |= __put_user(sp, &to->rsp);
74 err |= PUTREG(regs, RBX, to, rbx); 79 err |= PUTREG(regs, RBX, to, rbx);
75 err |= PUTREG(regs, RDX, to, rdx); 80 err |= PUTREG(regs, RDX, to, rdx);
76 err |= PUTREG(regs, RCX, to, rcx); 81 err |= PUTREG(regs, RCX, to, rcx);
@@ -102,7 +107,7 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
102 107
103#ifdef CONFIG_MODE_TT 108#ifdef CONFIG_MODE_TT
104int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from, 109int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from,
105 int fpsize) 110 int fpsize)
106{ 111{
107 struct _fpstate *to_fp, *from_fp; 112 struct _fpstate *to_fp, *from_fp;
108 unsigned long sigs; 113 unsigned long sigs;
@@ -120,7 +125,7 @@ int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from,
120} 125}
121 126
122int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp, 127int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp,
123 struct sigcontext *from, int fpsize) 128 struct sigcontext *from, int fpsize, unsigned long sp)
124{ 129{
125 struct _fpstate *to_fp, *from_fp; 130 struct _fpstate *to_fp, *from_fp;
126 int err; 131 int err;
@@ -128,11 +133,17 @@ int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp,
128 to_fp = (fp ? fp : (struct _fpstate *) (to + 1)); 133 to_fp = (fp ? fp : (struct _fpstate *) (to + 1));
129 from_fp = from->fpstate; 134 from_fp = from->fpstate;
130 err = copy_to_user(to, from, sizeof(*to)); 135 err = copy_to_user(to, from, sizeof(*to));
136 /* The SP in the sigcontext is the updated one for the signal
137 * delivery. The sp passed in is the original, and this needs
138 * to be restored, so we stick it in separately.
139 */
140 err |= copy_to_user(&SC_SP(to), sp, sizeof(sp));
141
131 if(from_fp != NULL){ 142 if(from_fp != NULL){
132 err |= copy_to_user(&to->fpstate, &to_fp, sizeof(to->fpstate)); 143 err |= copy_to_user(&to->fpstate, &to_fp, sizeof(to->fpstate));
133 err |= copy_to_user(to_fp, from_fp, fpsize); 144 err |= copy_to_user(to_fp, from_fp, fpsize);
134 } 145 }
135 return(err); 146 return err;
136} 147}
137 148
138#endif 149#endif
@@ -148,11 +159,12 @@ static int copy_sc_from_user(struct pt_regs *to, void __user *from)
148} 159}
149 160
150static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp, 161static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp,
151 struct pt_regs *from, unsigned long mask) 162 struct pt_regs *from, unsigned long mask,
163 unsigned long sp)
152{ 164{
153 return(CHOOSE_MODE(copy_sc_to_user_tt(to, fp, UPT_SC(&from->regs), 165 return(CHOOSE_MODE(copy_sc_to_user_tt(to, fp, UPT_SC(&from->regs),
154 sizeof(*fp)), 166 sizeof(*fp), sp),
155 copy_sc_to_user_skas(to, fp, from, mask))); 167 copy_sc_to_user_skas(to, fp, from, mask, sp)));
156} 168}
157 169
158struct rt_sigframe 170struct rt_sigframe
@@ -170,6 +182,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
170{ 182{
171 struct rt_sigframe __user *frame; 183 struct rt_sigframe __user *frame;
172 struct _fpstate __user *fp = NULL; 184 struct _fpstate __user *fp = NULL;
185 unsigned long save_sp = PT_REGS_RSP(regs);
173 int err = 0; 186 int err = 0;
174 struct task_struct *me = current; 187 struct task_struct *me = current;
175 188
@@ -193,14 +206,25 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
193 goto out; 206 goto out;
194 } 207 }
195 208
209 /* Update SP now because the page fault handler refuses to extend
210 * the stack if the faulting address is too far below the current
211 * SP, which frame now certainly is. If there's an error, the original
212 * value is restored on the way out.
213 * When writing the sigcontext to the stack, we have to write the
214 * original value, so that's passed to copy_sc_to_user, which does
215 * the right thing with it.
216 */
217 PT_REGS_RSP(regs) = (unsigned long) frame;
218
196 /* Create the ucontext. */ 219 /* Create the ucontext. */
197 err |= __put_user(0, &frame->uc.uc_flags); 220 err |= __put_user(0, &frame->uc.uc_flags);
198 err |= __put_user(0, &frame->uc.uc_link); 221 err |= __put_user(0, &frame->uc.uc_link);
199 err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); 222 err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
200 err |= __put_user(sas_ss_flags(PT_REGS_SP(regs)), 223 err |= __put_user(sas_ss_flags(save_sp),
201 &frame->uc.uc_stack.ss_flags); 224 &frame->uc.uc_stack.ss_flags);
202 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); 225 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
203 err |= copy_sc_to_user(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); 226 err |= copy_sc_to_user(&frame->uc.uc_mcontext, fp, regs, set->sig[0],
227 save_sp);
204 err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); 228 err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
205 if (sizeof(*set) == 16) { 229 if (sizeof(*set) == 16) {
206 __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); 230 __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
@@ -217,10 +241,10 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
217 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); 241 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
218 else 242 else
219 /* could use a vstub here */ 243 /* could use a vstub here */
220 goto out; 244 goto restore_sp;
221 245
222 if (err) 246 if (err)
223 goto out; 247 goto restore_sp;
224 248
225 /* Set up registers for signal handler */ 249 /* Set up registers for signal handler */
226 { 250 {
@@ -238,10 +262,12 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
238 PT_REGS_RSI(regs) = (unsigned long) &frame->info; 262 PT_REGS_RSI(regs) = (unsigned long) &frame->info;
239 PT_REGS_RDX(regs) = (unsigned long) &frame->uc; 263 PT_REGS_RDX(regs) = (unsigned long) &frame->uc;
240 PT_REGS_RIP(regs) = (unsigned long) ka->sa.sa_handler; 264 PT_REGS_RIP(regs) = (unsigned long) ka->sa.sa_handler;
241
242 PT_REGS_RSP(regs) = (unsigned long) frame;
243 out: 265 out:
244 return(err); 266 return err;
267
268restore_sp:
269 PT_REGS_RSP(regs) = save_sp;
270 return err;
245} 271}
246 272
247long sys_rt_sigreturn(struct pt_regs *regs) 273long sys_rt_sigreturn(struct pt_regs *regs)
diff --git a/arch/um/sys-x86_64/user-offsets.c b/arch/um/sys-x86_64/user-offsets.c
index 7bd54a921cf7..899cebb57c3f 100644
--- a/arch/um/sys-x86_64/user-offsets.c
+++ b/arch/um/sys-x86_64/user-offsets.c
@@ -1,6 +1,7 @@
1#include <stdio.h> 1#include <stdio.h>
2#include <stddef.h> 2#include <stddef.h>
3#include <signal.h> 3#include <signal.h>
4#include <sys/poll.h>
4#define __FRAME_OFFSETS 5#define __FRAME_OFFSETS
5#include <asm/ptrace.h> 6#include <asm/ptrace.h>
6#include <asm/types.h> 7#include <asm/types.h>
@@ -88,4 +89,9 @@ void foo(void)
88 DEFINE_LONGS(HOST_IP, RIP); 89 DEFINE_LONGS(HOST_IP, RIP);
89 DEFINE_LONGS(HOST_SP, RSP); 90 DEFINE_LONGS(HOST_SP, RSP);
90 DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct)); 91 DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct));
92
93 /* XXX Duplicated between i386 and x86_64 */
94 DEFINE(UM_POLLIN, POLLIN);
95 DEFINE(UM_POLLPRI, POLLPRI);
96 DEFINE(UM_POLLOUT, POLLOUT);
91} 97}
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig
index e7fc3e500342..37ec644603ab 100644
--- a/arch/v850/Kconfig
+++ b/arch/v850/Kconfig
@@ -16,6 +16,12 @@ config RWSEM_GENERIC_SPINLOCK
16config RWSEM_XCHGADD_ALGORITHM 16config RWSEM_XCHGADD_ALGORITHM
17 bool 17 bool
18 default n 18 default n
19config GENERIC_FIND_NEXT_BIT
20 bool
21 default y
22config GENERIC_HWEIGHT
23 bool
24 default y
19config GENERIC_CALIBRATE_DELAY 25config GENERIC_CALIBRATE_DELAY
20 bool 26 bool
21 default y 27 default y
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 6420baeb8c1f..4310b4a311a5 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -45,6 +45,10 @@ config RWSEM_GENERIC_SPINLOCK
45config RWSEM_XCHGADD_ALGORITHM 45config RWSEM_XCHGADD_ALGORITHM
46 bool 46 bool
47 47
48config GENERIC_HWEIGHT
49 bool
50 default y
51
48config GENERIC_CALIBRATE_DELAY 52config GENERIC_CALIBRATE_DELAY
49 bool 53 bool
50 default y 54 default y
@@ -246,6 +250,15 @@ config SCHED_SMT
246 cost of slightly increased overhead in some places. If unsure say 250 cost of slightly increased overhead in some places. If unsure say
247 N here. 251 N here.
248 252
253config SCHED_MC
254 bool "Multi-core scheduler support"
255 depends on SMP
256 default y
257 help
258 Multi-core scheduler support improves the CPU scheduler's decision
259 making when dealing with multi-core CPU chips at a cost of slightly
260 increased overhead in some places. If unsure say N here.
261
249source "kernel/Kconfig.preempt" 262source "kernel/Kconfig.preempt"
250 263
251config NUMA 264config NUMA
@@ -321,6 +334,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
321 def_bool y 334 def_bool y
322 depends on NUMA 335 depends on NUMA
323 336
337config OUT_OF_LINE_PFN_TO_PAGE
338 def_bool y
339 depends on DISCONTIGMEM
340
324config NR_CPUS 341config NR_CPUS
325 int "Maximum number of CPUs (2-256)" 342 int "Maximum number of CPUs (2-256)"
326 range 2 255 343 range 2 255
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index 0fbc0283609c..585fd4a559c8 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -70,7 +70,7 @@ drivers-$(CONFIG_OPROFILE) += arch/x86_64/oprofile/
70boot := arch/x86_64/boot 70boot := arch/x86_64/boot
71 71
72PHONY += bzImage bzlilo install archmrproper \ 72PHONY += bzImage bzlilo install archmrproper \
73 fdimage fdimage144 fdimage288 archclean 73 fdimage fdimage144 fdimage288 isoimage archclean
74 74
75#Default target when executing "make" 75#Default target when executing "make"
76all: bzImage 76all: bzImage
@@ -87,7 +87,7 @@ bzlilo: vmlinux
87bzdisk: vmlinux 87bzdisk: vmlinux
88 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zdisk 88 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zdisk
89 89
90fdimage fdimage144 fdimage288: vmlinux 90fdimage fdimage144 fdimage288 isoimage: vmlinux
91 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@ 91 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
92 92
93install: 93install:
@@ -99,11 +99,16 @@ archclean:
99define archhelp 99define archhelp
100 echo '* bzImage - Compressed kernel image (arch/$(ARCH)/boot/bzImage)' 100 echo '* bzImage - Compressed kernel image (arch/$(ARCH)/boot/bzImage)'
101 echo ' install - Install kernel using' 101 echo ' install - Install kernel using'
102 echo ' (your) ~/bin/installkernel or' 102 echo ' (your) ~/bin/installkernel or'
103 echo ' (distribution) /sbin/installkernel or' 103 echo ' (distribution) /sbin/installkernel or'
104 echo ' install to $$(INSTALL_PATH) and run lilo' 104 echo ' install to $$(INSTALL_PATH) and run lilo'
105 echo ' bzdisk - Create a boot floppy in /dev/fd0'
106 echo ' fdimage - Create a boot floppy image'
107 echo ' isoimage - Create a boot CD-ROM image'
105endef 108endef
106 109
107CLEAN_FILES += arch/$(ARCH)/boot/fdimage arch/$(ARCH)/boot/mtools.conf 110CLEAN_FILES += arch/$(ARCH)/boot/fdimage \
111 arch/$(ARCH)/boot/image.iso \
112 arch/$(ARCH)/boot/mtools.conf
108 113
109 114
diff --git a/arch/x86_64/boot/Makefile b/arch/x86_64/boot/Makefile
index 29f8396ed151..43ee6c50c277 100644
--- a/arch/x86_64/boot/Makefile
+++ b/arch/x86_64/boot/Makefile
@@ -60,8 +60,12 @@ $(obj)/setup $(obj)/bootsect: %: %.o FORCE
60$(obj)/compressed/vmlinux: FORCE 60$(obj)/compressed/vmlinux: FORCE
61 $(Q)$(MAKE) $(build)=$(obj)/compressed IMAGE_OFFSET=$(IMAGE_OFFSET) $@ 61 $(Q)$(MAKE) $(build)=$(obj)/compressed IMAGE_OFFSET=$(IMAGE_OFFSET) $@
62 62
63# Set this if you want to pass append arguments to the zdisk/fdimage kernel 63# Set this if you want to pass append arguments to the zdisk/fdimage/isoimage kernel
64FDARGS = 64FDARGS =
65# Set this if you want an initrd included with the zdisk/fdimage/isoimage kernel
66FDINITRD =
67
68image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,)
65 69
66$(obj)/mtools.conf: $(src)/mtools.conf.in 70$(obj)/mtools.conf: $(src)/mtools.conf.in
67 sed -e 's|@OBJ@|$(obj)|g' < $< > $@ 71 sed -e 's|@OBJ@|$(obj)|g' < $< > $@
@@ -70,8 +74,11 @@ $(obj)/mtools.conf: $(src)/mtools.conf.in
70zdisk: $(BOOTIMAGE) $(obj)/mtools.conf 74zdisk: $(BOOTIMAGE) $(obj)/mtools.conf
71 MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync 75 MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync
72 syslinux /dev/fd0 ; sync 76 syslinux /dev/fd0 ; sync
73 echo 'default linux $(FDARGS)' | \ 77 echo '$(image_cmdline)' | \
74 MTOOLSRC=$(obj)/mtools.conf mcopy - a:syslinux.cfg 78 MTOOLSRC=$(obj)/mtools.conf mcopy - a:syslinux.cfg
79 if [ -f '$(FDINITRD)' ] ; then \
80 MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' a:initrd.img ; \
81 fi
75 MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) a:linux ; sync 82 MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) a:linux ; sync
76 83
77# These require being root or having syslinux 2.02 or higher installed 84# These require being root or having syslinux 2.02 or higher installed
@@ -79,18 +86,39 @@ fdimage fdimage144: $(BOOTIMAGE) $(obj)/mtools.conf
79 dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440 86 dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440
80 MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync 87 MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync
81 syslinux $(obj)/fdimage ; sync 88 syslinux $(obj)/fdimage ; sync
82 echo 'default linux $(FDARGS)' | \ 89 echo '$(image_cmdline)' | \
83 MTOOLSRC=$(obj)/mtools.conf mcopy - v:syslinux.cfg 90 MTOOLSRC=$(obj)/mtools.conf mcopy - v:syslinux.cfg
91 if [ -f '$(FDINITRD)' ] ; then \
92 MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' v:initrd.img ; \
93 fi
84 MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) v:linux ; sync 94 MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) v:linux ; sync
85 95
86fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf 96fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
87 dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880 97 dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880
88 MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync 98 MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync
89 syslinux $(obj)/fdimage ; sync 99 syslinux $(obj)/fdimage ; sync
90 echo 'default linux $(FDARGS)' | \ 100 echo '$(image_cmdline)' | \
91 MTOOLSRC=$(obj)/mtools.conf mcopy - w:syslinux.cfg 101 MTOOLSRC=$(obj)/mtools.conf mcopy - w:syslinux.cfg
102 if [ -f '$(FDINITRD)' ] ; then \
103 MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' w:initrd.img ; \
104 fi
92 MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) w:linux ; sync 105 MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) w:linux ; sync
93 106
107isoimage: $(BOOTIMAGE)
108 -rm -rf $(obj)/isoimage
109 mkdir $(obj)/isoimage
110 cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \
111 $(obj)/isoimage
112 cp $(BOOTIMAGE) $(obj)/isoimage/linux
113 echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
114 if [ -f '$(FDINITRD)' ] ; then \
115 cp '$(FDINITRD)' $(obj)/isoimage/initrd.img ; \
116 fi
117 mkisofs -J -r -o $(obj)/image.iso -b isolinux.bin -c boot.cat \
118 -no-emul-boot -boot-load-size 4 -boot-info-table \
119 $(obj)/isoimage
120 rm -rf $(obj)/isoimage
121
94zlilo: $(BOOTIMAGE) 122zlilo: $(BOOTIMAGE)
95 if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi 123 if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi
96 if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi 124 if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 00dee176c08e..35b2faccdc6c 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -501,7 +501,7 @@ ia32_sys_call_table:
501 .quad sys_setdomainname 501 .quad sys_setdomainname
502 .quad sys_uname 502 .quad sys_uname
503 .quad sys_modify_ldt 503 .quad sys_modify_ldt
504 .quad sys32_adjtimex 504 .quad compat_sys_adjtimex
505 .quad sys32_mprotect /* 125 */ 505 .quad sys32_mprotect /* 125 */
506 .quad compat_sys_sigprocmask 506 .quad compat_sys_sigprocmask
507 .quad quiet_ni_syscall /* create_module */ 507 .quad quiet_ni_syscall /* create_module */
@@ -688,6 +688,8 @@ ia32_sys_call_table:
688 .quad sys_ni_syscall /* pselect6 for now */ 688 .quad sys_ni_syscall /* pselect6 for now */
689 .quad sys_ni_syscall /* ppoll for now */ 689 .quad sys_ni_syscall /* ppoll for now */
690 .quad sys_unshare /* 310 */ 690 .quad sys_unshare /* 310 */
691 .quad compat_sys_set_robust_list
692 .quad compat_sys_get_robust_list
691ia32_syscall_end: 693ia32_syscall_end:
692 .rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8 694 .rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8
693 .quad ni_syscall 695 .quad ni_syscall
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index 2b2d029f477c..f182b20858e2 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -30,7 +30,6 @@
30#include <linux/resource.h> 30#include <linux/resource.h>
31#include <linux/times.h> 31#include <linux/times.h>
32#include <linux/utsname.h> 32#include <linux/utsname.h>
33#include <linux/timex.h>
34#include <linux/smp.h> 33#include <linux/smp.h>
35#include <linux/smp_lock.h> 34#include <linux/smp_lock.h>
36#include <linux/sem.h> 35#include <linux/sem.h>
@@ -767,82 +766,6 @@ sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count)
767 return ret; 766 return ret;
768} 767}
769 768
770/* Handle adjtimex compatibility. */
771
772struct timex32 {
773 u32 modes;
774 s32 offset, freq, maxerror, esterror;
775 s32 status, constant, precision, tolerance;
776 struct compat_timeval time;
777 s32 tick;
778 s32 ppsfreq, jitter, shift, stabil;
779 s32 jitcnt, calcnt, errcnt, stbcnt;
780 s32 :32; s32 :32; s32 :32; s32 :32;
781 s32 :32; s32 :32; s32 :32; s32 :32;
782 s32 :32; s32 :32; s32 :32; s32 :32;
783};
784
785extern int do_adjtimex(struct timex *);
786
787asmlinkage long
788sys32_adjtimex(struct timex32 __user *utp)
789{
790 struct timex txc;
791 int ret;
792
793 memset(&txc, 0, sizeof(struct timex));
794
795 if (!access_ok(VERIFY_READ, utp, sizeof(struct timex32)) ||
796 __get_user(txc.modes, &utp->modes) ||
797 __get_user(txc.offset, &utp->offset) ||
798 __get_user(txc.freq, &utp->freq) ||
799 __get_user(txc.maxerror, &utp->maxerror) ||
800 __get_user(txc.esterror, &utp->esterror) ||
801 __get_user(txc.status, &utp->status) ||
802 __get_user(txc.constant, &utp->constant) ||
803 __get_user(txc.precision, &utp->precision) ||
804 __get_user(txc.tolerance, &utp->tolerance) ||
805 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
806 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
807 __get_user(txc.tick, &utp->tick) ||
808 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
809 __get_user(txc.jitter, &utp->jitter) ||
810 __get_user(txc.shift, &utp->shift) ||
811 __get_user(txc.stabil, &utp->stabil) ||
812 __get_user(txc.jitcnt, &utp->jitcnt) ||
813 __get_user(txc.calcnt, &utp->calcnt) ||
814 __get_user(txc.errcnt, &utp->errcnt) ||
815 __get_user(txc.stbcnt, &utp->stbcnt))
816 return -EFAULT;
817
818 ret = do_adjtimex(&txc);
819
820 if (!access_ok(VERIFY_WRITE, utp, sizeof(struct timex32)) ||
821 __put_user(txc.modes, &utp->modes) ||
822 __put_user(txc.offset, &utp->offset) ||
823 __put_user(txc.freq, &utp->freq) ||
824 __put_user(txc.maxerror, &utp->maxerror) ||
825 __put_user(txc.esterror, &utp->esterror) ||
826 __put_user(txc.status, &utp->status) ||
827 __put_user(txc.constant, &utp->constant) ||
828 __put_user(txc.precision, &utp->precision) ||
829 __put_user(txc.tolerance, &utp->tolerance) ||
830 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
831 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
832 __put_user(txc.tick, &utp->tick) ||
833 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
834 __put_user(txc.jitter, &utp->jitter) ||
835 __put_user(txc.shift, &utp->shift) ||
836 __put_user(txc.stabil, &utp->stabil) ||
837 __put_user(txc.jitcnt, &utp->jitcnt) ||
838 __put_user(txc.calcnt, &utp->calcnt) ||
839 __put_user(txc.errcnt, &utp->errcnt) ||
840 __put_user(txc.stbcnt, &utp->stbcnt))
841 ret = -EFAULT;
842
843 return ret;
844}
845
846asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len, 769asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
847 unsigned long prot, unsigned long flags, 770 unsigned long prot, unsigned long flags,
848 unsigned long fd, unsigned long pgoff) 771 unsigned long fd, unsigned long pgoff)
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 14f0ced613b6..accbff3fec49 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -37,10 +37,12 @@
37#include <linux/string.h> 37#include <linux/string.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/preempt.h> 39#include <linux/preempt.h>
40#include <linux/module.h>
40 41
41#include <asm/cacheflush.h> 42#include <asm/cacheflush.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
43#include <asm/kdebug.h> 44#include <asm/kdebug.h>
45#include <asm/uaccess.h>
44 46
45void jprobe_return_end(void); 47void jprobe_return_end(void);
46static void __kprobes arch_copy_kprobe(struct kprobe *p); 48static void __kprobes arch_copy_kprobe(struct kprobe *p);
@@ -578,16 +580,62 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
578{ 580{
579 struct kprobe *cur = kprobe_running(); 581 struct kprobe *cur = kprobe_running();
580 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 582 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
583 const struct exception_table_entry *fixup;
581 584
582 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 585 switch(kcb->kprobe_status) {
583 return 1; 586 case KPROBE_HIT_SS:
584 587 case KPROBE_REENTER:
585 if (kcb->kprobe_status & KPROBE_HIT_SS) { 588 /*
586 resume_execution(cur, regs, kcb); 589 * We are here because the instruction being single
590 * stepped caused a page fault. We reset the current
591 * kprobe and the rip points back to the probe address
592 * and allow the page fault handler to continue as a
593 * normal page fault.
594 */
595 regs->rip = (unsigned long)cur->addr;
587 regs->eflags |= kcb->kprobe_old_rflags; 596 regs->eflags |= kcb->kprobe_old_rflags;
588 597 if (kcb->kprobe_status == KPROBE_REENTER)
589 reset_current_kprobe(); 598 restore_previous_kprobe(kcb);
599 else
600 reset_current_kprobe();
590 preempt_enable_no_resched(); 601 preempt_enable_no_resched();
602 break;
603 case KPROBE_HIT_ACTIVE:
604 case KPROBE_HIT_SSDONE:
605 /*
606 * We increment the nmissed count for accounting,
607 * we can also use npre/npostfault count for accouting
608 * these specific fault cases.
609 */
610 kprobes_inc_nmissed_count(cur);
611
612 /*
613 * We come here because instructions in the pre/post
614 * handler caused the page_fault, this could happen
615 * if handler tries to access user space by
616 * copy_from_user(), get_user() etc. Let the
617 * user-specified handler try to fix it first.
618 */
619 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
620 return 1;
621
622 /*
623 * In case the user-specified fault handler returned
624 * zero, try to fix up.
625 */
626 fixup = search_exception_tables(regs->rip);
627 if (fixup) {
628 regs->rip = fixup->fixup;
629 return 1;
630 }
631
632 /*
633 * fixup() could not handle it,
634 * Let do_page_fault() fix it.
635 */
636 break;
637 default:
638 break;
591 } 639 }
592 return 0; 640 return 0;
593} 641}
@@ -601,6 +649,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
601 struct die_args *args = (struct die_args *)data; 649 struct die_args *args = (struct die_args *)data;
602 int ret = NOTIFY_DONE; 650 int ret = NOTIFY_DONE;
603 651
652 if (args->regs && user_mode(args->regs))
653 return ret;
654
604 switch (val) { 655 switch (val) {
605 case DIE_INT3: 656 case DIE_INT3:
606 if (kprobe_handler(args->regs)) 657 if (kprobe_handler(args->regs))
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 81111835722d..70dd8e5c6889 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -35,8 +35,8 @@
35#include <linux/ptrace.h> 35#include <linux/ptrace.h>
36#include <linux/utsname.h> 36#include <linux/utsname.h>
37#include <linux/random.h> 37#include <linux/random.h>
38#include <linux/kprobes.h>
39#include <linux/notifier.h> 38#include <linux/notifier.h>
39#include <linux/kprobes.h>
40 40
41#include <asm/uaccess.h> 41#include <asm/uaccess.h>
42#include <asm/pgtable.h> 42#include <asm/pgtable.h>
@@ -66,24 +66,17 @@ EXPORT_SYMBOL(boot_option_idle_override);
66void (*pm_idle)(void); 66void (*pm_idle)(void);
67static DEFINE_PER_CPU(unsigned int, cpu_idle_state); 67static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
68 68
69static struct notifier_block *idle_notifier; 69static ATOMIC_NOTIFIER_HEAD(idle_notifier);
70static DEFINE_SPINLOCK(idle_notifier_lock);
71 70
72void idle_notifier_register(struct notifier_block *n) 71void idle_notifier_register(struct notifier_block *n)
73{ 72{
74 unsigned long flags; 73 atomic_notifier_chain_register(&idle_notifier, n);
75 spin_lock_irqsave(&idle_notifier_lock, flags);
76 notifier_chain_register(&idle_notifier, n);
77 spin_unlock_irqrestore(&idle_notifier_lock, flags);
78} 74}
79EXPORT_SYMBOL_GPL(idle_notifier_register); 75EXPORT_SYMBOL_GPL(idle_notifier_register);
80 76
81void idle_notifier_unregister(struct notifier_block *n) 77void idle_notifier_unregister(struct notifier_block *n)
82{ 78{
83 unsigned long flags; 79 atomic_notifier_chain_unregister(&idle_notifier, n);
84 spin_lock_irqsave(&idle_notifier_lock, flags);
85 notifier_chain_unregister(&idle_notifier, n);
86 spin_unlock_irqrestore(&idle_notifier_lock, flags);
87} 80}
88EXPORT_SYMBOL(idle_notifier_unregister); 81EXPORT_SYMBOL(idle_notifier_unregister);
89 82
@@ -93,13 +86,13 @@ static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
93void enter_idle(void) 86void enter_idle(void)
94{ 87{
95 __get_cpu_var(idle_state) = CPU_IDLE; 88 __get_cpu_var(idle_state) = CPU_IDLE;
96 notifier_call_chain(&idle_notifier, IDLE_START, NULL); 89 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
97} 90}
98 91
99static void __exit_idle(void) 92static void __exit_idle(void)
100{ 93{
101 __get_cpu_var(idle_state) = CPU_NOT_IDLE; 94 __get_cpu_var(idle_state) = CPU_NOT_IDLE;
102 notifier_call_chain(&idle_notifier, IDLE_END, NULL); 95 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
103} 96}
104 97
105/* Called from interrupts to signify idle end */ 98/* Called from interrupts to signify idle end */
@@ -353,13 +346,6 @@ void exit_thread(void)
353 struct task_struct *me = current; 346 struct task_struct *me = current;
354 struct thread_struct *t = &me->thread; 347 struct thread_struct *t = &me->thread;
355 348
356 /*
357 * Remove function-return probe instances associated with this task
358 * and put them back on the free list. Do not insert an exit probe for
359 * this function, it will be disabled by kprobe_flush_task if you do.
360 */
361 kprobe_flush_task(me);
362
363 if (me->thread.io_bitmap_ptr) { 349 if (me->thread.io_bitmap_ptr) {
364 struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); 350 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
365 351
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index a57eec8311a7..d1f3e9272c05 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -962,7 +962,6 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
962 962
963 cpuid(1, &eax, &ebx, &ecx, &edx); 963 cpuid(1, &eax, &ebx, &ecx, &edx);
964 964
965 c->apicid = phys_pkg_id(0);
966 965
967 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) 966 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
968 return; 967 return;
@@ -1171,6 +1170,8 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1171 c->x86_capability[2] = cpuid_edx(0x80860001); 1170 c->x86_capability[2] = cpuid_edx(0x80860001);
1172 } 1171 }
1173 1172
1173 c->apicid = phys_pkg_id(0);
1174
1174 /* 1175 /*
1175 * Vendor-specific initialization. In this section we 1176 * Vendor-specific initialization. In this section we
1176 * canonicalize the feature flags, meaning if there are 1177 * canonicalize the feature flags, meaning if there are
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 66e98659d077..ea48fa638070 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -68,6 +68,9 @@ u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
68/* core ID of each logical CPU */ 68/* core ID of each logical CPU */
69u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; 69u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
70 70
71/* Last level cache ID of each logical CPU */
72u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
73
71/* Bitmask of currently online CPUs */ 74/* Bitmask of currently online CPUs */
72cpumask_t cpu_online_map __read_mostly; 75cpumask_t cpu_online_map __read_mostly;
73 76
@@ -445,6 +448,18 @@ void __cpuinit smp_callin(void)
445 cpu_set(cpuid, cpu_callin_map); 448 cpu_set(cpuid, cpu_callin_map);
446} 449}
447 450
451/* maps the cpu to the sched domain representing multi-core */
452cpumask_t cpu_coregroup_map(int cpu)
453{
454 struct cpuinfo_x86 *c = cpu_data + cpu;
455 /*
456 * For perf, we return last level cache shared map.
457 * TBD: when power saving sched policy is added, we will return
458 * cpu_core_map when power saving policy is enabled
459 */
460 return c->llc_shared_map;
461}
462
448/* representing cpus for which sibling maps can be computed */ 463/* representing cpus for which sibling maps can be computed */
449static cpumask_t cpu_sibling_setup_map; 464static cpumask_t cpu_sibling_setup_map;
450 465
@@ -463,12 +478,16 @@ static inline void set_cpu_sibling_map(int cpu)
463 cpu_set(cpu, cpu_sibling_map[i]); 478 cpu_set(cpu, cpu_sibling_map[i]);
464 cpu_set(i, cpu_core_map[cpu]); 479 cpu_set(i, cpu_core_map[cpu]);
465 cpu_set(cpu, cpu_core_map[i]); 480 cpu_set(cpu, cpu_core_map[i]);
481 cpu_set(i, c[cpu].llc_shared_map);
482 cpu_set(cpu, c[i].llc_shared_map);
466 } 483 }
467 } 484 }
468 } else { 485 } else {
469 cpu_set(cpu, cpu_sibling_map[cpu]); 486 cpu_set(cpu, cpu_sibling_map[cpu]);
470 } 487 }
471 488
489 cpu_set(cpu, c[cpu].llc_shared_map);
490
472 if (current_cpu_data.x86_max_cores == 1) { 491 if (current_cpu_data.x86_max_cores == 1) {
473 cpu_core_map[cpu] = cpu_sibling_map[cpu]; 492 cpu_core_map[cpu] = cpu_sibling_map[cpu];
474 c[cpu].booted_cores = 1; 493 c[cpu].booted_cores = 1;
@@ -476,6 +495,11 @@ static inline void set_cpu_sibling_map(int cpu)
476 } 495 }
477 496
478 for_each_cpu_mask(i, cpu_sibling_setup_map) { 497 for_each_cpu_mask(i, cpu_sibling_setup_map) {
498 if (cpu_llc_id[cpu] != BAD_APICID &&
499 cpu_llc_id[cpu] == cpu_llc_id[i]) {
500 cpu_set(i, c[cpu].llc_shared_map);
501 cpu_set(cpu, c[i].llc_shared_map);
502 }
479 if (phys_proc_id[cpu] == phys_proc_id[i]) { 503 if (phys_proc_id[cpu] == phys_proc_id[i]) {
480 cpu_set(i, cpu_core_map[cpu]); 504 cpu_set(i, cpu_core_map[cpu]);
481 cpu_set(cpu, cpu_core_map[i]); 505 cpu_set(cpu, cpu_core_map[i]);
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 7f58fa682491..473b514b66e4 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -504,42 +504,25 @@ unsigned long long sched_clock(void)
504 504
505static unsigned long get_cmos_time(void) 505static unsigned long get_cmos_time(void)
506{ 506{
507 unsigned int timeout = 1000000, year, mon, day, hour, min, sec; 507 unsigned int year, mon, day, hour, min, sec;
508 unsigned char uip = 0, this = 0;
509 unsigned long flags; 508 unsigned long flags;
510 unsigned extyear = 0; 509 unsigned extyear = 0;
511 510
512/*
513 * The Linux interpretation of the CMOS clock register contents: When the
514 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
515 * second which has precisely just started. Waiting for this can take up to 1
516 * second, we timeout approximately after 2.4 seconds on a machine with
517 * standard 8.3 MHz ISA bus.
518 */
519
520 spin_lock_irqsave(&rtc_lock, flags); 511 spin_lock_irqsave(&rtc_lock, flags);
521 512
522 while (timeout && (!uip || this)) { 513 do {
523 uip |= this; 514 sec = CMOS_READ(RTC_SECONDS);
524 this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP; 515 min = CMOS_READ(RTC_MINUTES);
525 timeout--; 516 hour = CMOS_READ(RTC_HOURS);
526 } 517 day = CMOS_READ(RTC_DAY_OF_MONTH);
527 518 mon = CMOS_READ(RTC_MONTH);
528 /* 519 year = CMOS_READ(RTC_YEAR);
529 * Here we are safe to assume the registers won't change for a whole
530 * second, so we just go ahead and read them.
531 */
532 sec = CMOS_READ(RTC_SECONDS);
533 min = CMOS_READ(RTC_MINUTES);
534 hour = CMOS_READ(RTC_HOURS);
535 day = CMOS_READ(RTC_DAY_OF_MONTH);
536 mon = CMOS_READ(RTC_MONTH);
537 year = CMOS_READ(RTC_YEAR);
538
539#ifdef CONFIG_ACPI 520#ifdef CONFIG_ACPI
540 if (acpi_fadt.revision >= FADT2_REVISION_ID && acpi_fadt.century) 521 if (acpi_fadt.revision >= FADT2_REVISION_ID &&
541 extyear = CMOS_READ(acpi_fadt.century); 522 acpi_fadt.century)
523 extyear = CMOS_READ(acpi_fadt.century);
542#endif 524#endif
525 } while (sec != CMOS_READ(RTC_SECONDS));
543 526
544 spin_unlock_irqrestore(&rtc_lock, flags); 527 spin_unlock_irqrestore(&rtc_lock, flags);
545 528
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 7b148309c529..edaa9fe654dc 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -69,20 +69,20 @@ asmlinkage void alignment_check(void);
69asmlinkage void machine_check(void); 69asmlinkage void machine_check(void);
70asmlinkage void spurious_interrupt_bug(void); 70asmlinkage void spurious_interrupt_bug(void);
71 71
72struct notifier_block *die_chain; 72ATOMIC_NOTIFIER_HEAD(die_chain);
73static DEFINE_SPINLOCK(die_notifier_lock);
74 73
75int register_die_notifier(struct notifier_block *nb) 74int register_die_notifier(struct notifier_block *nb)
76{ 75{
77 int err = 0;
78 unsigned long flags;
79
80 vmalloc_sync_all(); 76 vmalloc_sync_all();
81 spin_lock_irqsave(&die_notifier_lock, flags); 77 return atomic_notifier_chain_register(&die_chain, nb);
82 err = notifier_chain_register(&die_chain, nb); 78}
83 spin_unlock_irqrestore(&die_notifier_lock, flags); 79EXPORT_SYMBOL(register_die_notifier);
84 return err; 80
81int unregister_die_notifier(struct notifier_block *nb)
82{
83 return atomic_notifier_chain_unregister(&die_chain, nb);
85} 84}
85EXPORT_SYMBOL(unregister_die_notifier);
86 86
87static inline void conditional_sti(struct pt_regs *regs) 87static inline void conditional_sti(struct pt_regs *regs)
88{ 88{
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index b04415625442..e5f7f1c34462 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -72,7 +72,7 @@ void show_mem(void)
72 show_free_areas(); 72 show_free_areas();
73 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 73 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
74 74
75 for_each_pgdat(pgdat) { 75 for_each_online_pgdat(pgdat) {
76 for (i = 0; i < pgdat->node_spanned_pages; ++i) { 76 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
77 page = pfn_to_page(pgdat->node_start_pfn + i); 77 page = pfn_to_page(pgdat->node_start_pfn + i);
78 total++; 78 total++;
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 63c72641b737..4be82d6e2b48 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -377,21 +377,6 @@ EXPORT_SYMBOL(node_data);
377 * Should do that. 377 * Should do that.
378 */ 378 */
379 379
380/* Requires pfn_valid(pfn) to be true */
381struct page *pfn_to_page(unsigned long pfn)
382{
383 int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT);
384 return (pfn - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map;
385}
386EXPORT_SYMBOL(pfn_to_page);
387
388unsigned long page_to_pfn(struct page *page)
389{
390 return (long)(((page) - page_zone(page)->zone_mem_map) +
391 page_zone(page)->zone_start_pfn);
392}
393EXPORT_SYMBOL(page_to_pfn);
394
395int pfn_valid(unsigned long pfn) 380int pfn_valid(unsigned long pfn)
396{ 381{
397 unsigned nid; 382 unsigned nid;
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index e90ef5db8913..dbeb3504c3c8 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -22,6 +22,14 @@ config RWSEM_XCHGADD_ALGORITHM
22 bool 22 bool
23 default y 23 default y
24 24
25config GENERIC_FIND_NEXT_BIT
26 bool
27 default y
28
29config GENERIC_HWEIGHT
30 bool
31 default y
32
25config GENERIC_HARDIRQS 33config GENERIC_HARDIRQS
26 bool 34 bool
27 default y 35 default y
diff --git a/arch/xtensa/platform-iss/setup.c b/arch/xtensa/platform-iss/setup.c
index 2e6dcbf0cc04..23790a5610e2 100644
--- a/arch/xtensa/platform-iss/setup.c
+++ b/arch/xtensa/platform-iss/setup.c
@@ -108,5 +108,5 @@ static struct notifier_block iss_panic_block = {
108 108
109void __init platform_setup(char **p_cmdline) 109void __init platform_setup(char **p_cmdline)
110{ 110{
111 notifier_chain_register(&panic_notifier_list, &iss_panic_block); 111 atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);
112} 112}
diff --git a/block/Kconfig b/block/Kconfig
index 96783645092d..5536839886ff 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -13,6 +13,7 @@ config LBD
13 13
14config BLK_DEV_IO_TRACE 14config BLK_DEV_IO_TRACE
15 bool "Support for tracing block io actions" 15 bool "Support for tracing block io actions"
16 depends on SYSFS
16 select RELAY 17 select RELAY
17 select DEBUG_FS 18 select DEBUG_FS
18 help 19 help
@@ -23,4 +24,13 @@ config BLK_DEV_IO_TRACE
23 24
24 git://brick.kernel.dk/data/git/blktrace.git 25 git://brick.kernel.dk/data/git/blktrace.git
25 26
27config LSF
28 bool "Support for Large Single Files"
29 depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML
30 default n
31 help
32 When CONFIG_LBD is disabled, say Y here if you want to
33 handle large file(bigger than 2TB), otherwise say N.
34 When CONFIG_LBD is enabled, Y is set automatically.
35
26source block/Kconfig.iosched 36source block/Kconfig.iosched
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index c4a0d5d8d7f0..67d446de0227 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -26,18 +26,12 @@ static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
26static const int cfq_slice_sync = HZ / 10; 26static const int cfq_slice_sync = HZ / 10;
27static int cfq_slice_async = HZ / 25; 27static int cfq_slice_async = HZ / 25;
28static const int cfq_slice_async_rq = 2; 28static const int cfq_slice_async_rq = 2;
29static int cfq_slice_idle = HZ / 100; 29static int cfq_slice_idle = HZ / 70;
30 30
31#define CFQ_IDLE_GRACE (HZ / 10) 31#define CFQ_IDLE_GRACE (HZ / 10)
32#define CFQ_SLICE_SCALE (5) 32#define CFQ_SLICE_SCALE (5)
33 33
34#define CFQ_KEY_ASYNC (0) 34#define CFQ_KEY_ASYNC (0)
35#define CFQ_KEY_ANY (0xffff)
36
37/*
38 * disable queueing at the driver/hardware level
39 */
40static const int cfq_max_depth = 2;
41 35
42static DEFINE_RWLOCK(cfq_exit_lock); 36static DEFINE_RWLOCK(cfq_exit_lock);
43 37
@@ -102,6 +96,8 @@ static struct completion *ioc_gone;
102#define cfq_cfqq_sync(cfqq) \ 96#define cfq_cfqq_sync(cfqq) \
103 (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC]) 97 (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
104 98
99#define sample_valid(samples) ((samples) > 80)
100
105/* 101/*
106 * Per block device queue structure 102 * Per block device queue structure
107 */ 103 */
@@ -170,7 +166,6 @@ struct cfq_data {
170 unsigned int cfq_slice[2]; 166 unsigned int cfq_slice[2];
171 unsigned int cfq_slice_async_rq; 167 unsigned int cfq_slice_async_rq;
172 unsigned int cfq_slice_idle; 168 unsigned int cfq_slice_idle;
173 unsigned int cfq_max_depth;
174 169
175 struct list_head cic_list; 170 struct list_head cic_list;
176}; 171};
@@ -343,17 +338,27 @@ static int cfq_queue_empty(request_queue_t *q)
343 return !cfqd->busy_queues; 338 return !cfqd->busy_queues;
344} 339}
345 340
341static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
342{
343 if (rw == READ || process_sync(task))
344 return task->pid;
345
346 return CFQ_KEY_ASYNC;
347}
348
346/* 349/*
347 * Lifted from AS - choose which of crq1 and crq2 that is best served now. 350 * Lifted from AS - choose which of crq1 and crq2 that is best served now.
348 * We choose the request that is closest to the head right now. Distance 351 * We choose the request that is closest to the head right now. Distance
349 * behind the head are penalized and only allowed to a certain extent. 352 * behind the head is penalized and only allowed to a certain extent.
350 */ 353 */
351static struct cfq_rq * 354static struct cfq_rq *
352cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2) 355cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
353{ 356{
354 sector_t last, s1, s2, d1 = 0, d2 = 0; 357 sector_t last, s1, s2, d1 = 0, d2 = 0;
355 int r1_wrap = 0, r2_wrap = 0; /* requests are behind the disk head */
356 unsigned long back_max; 358 unsigned long back_max;
359#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
360#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
361 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
357 362
358 if (crq1 == NULL || crq1 == crq2) 363 if (crq1 == NULL || crq1 == crq2)
359 return crq2; 364 return crq2;
@@ -385,35 +390,47 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
385 else if (s1 + back_max >= last) 390 else if (s1 + back_max >= last)
386 d1 = (last - s1) * cfqd->cfq_back_penalty; 391 d1 = (last - s1) * cfqd->cfq_back_penalty;
387 else 392 else
388 r1_wrap = 1; 393 wrap |= CFQ_RQ1_WRAP;
389 394
390 if (s2 >= last) 395 if (s2 >= last)
391 d2 = s2 - last; 396 d2 = s2 - last;
392 else if (s2 + back_max >= last) 397 else if (s2 + back_max >= last)
393 d2 = (last - s2) * cfqd->cfq_back_penalty; 398 d2 = (last - s2) * cfqd->cfq_back_penalty;
394 else 399 else
395 r2_wrap = 1; 400 wrap |= CFQ_RQ2_WRAP;
396 401
397 /* Found required data */ 402 /* Found required data */
398 if (!r1_wrap && r2_wrap) 403
399 return crq1; 404 /*
400 else if (!r2_wrap && r1_wrap) 405 * By doing switch() on the bit mask "wrap" we avoid having to
401 return crq2; 406 * check two variables for all permutations: --> faster!
402 else if (r1_wrap && r2_wrap) { 407 */
403 /* both behind the head */ 408 switch (wrap) {
404 if (s1 <= s2) 409 case 0: /* common case for CFQ: crq1 and crq2 not wrapped */
410 if (d1 < d2)
405 return crq1; 411 return crq1;
406 else 412 else if (d2 < d1)
407 return crq2; 413 return crq2;
408 } 414 else {
415 if (s1 >= s2)
416 return crq1;
417 else
418 return crq2;
419 }
409 420
410 /* Both requests in front of the head */ 421 case CFQ_RQ2_WRAP:
411 if (d1 < d2)
412 return crq1; 422 return crq1;
413 else if (d2 < d1) 423 case CFQ_RQ1_WRAP:
414 return crq2; 424 return crq2;
415 else { 425 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both crqs wrapped */
416 if (s1 >= s2) 426 default:
427 /*
428 * Since both rqs are wrapped,
429 * start with the one that's further behind head
430 * (--> only *one* back seek required),
431 * since back seek takes more time than forward.
432 */
433 if (s1 <= s2)
417 return crq1; 434 return crq1;
418 else 435 else
419 return crq2; 436 return crq2;
@@ -612,15 +629,20 @@ cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
612 cfq_add_crq_rb(crq); 629 cfq_add_crq_rb(crq);
613} 630}
614 631
615static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) 632static struct request *
616 633cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
617{ 634{
618 struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY); 635 struct task_struct *tsk = current;
636 pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
637 struct cfq_queue *cfqq;
619 struct rb_node *n; 638 struct rb_node *n;
639 sector_t sector;
620 640
641 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
621 if (!cfqq) 642 if (!cfqq)
622 goto out; 643 goto out;
623 644
645 sector = bio->bi_sector + bio_sectors(bio);
624 n = cfqq->sort_list.rb_node; 646 n = cfqq->sort_list.rb_node;
625 while (n) { 647 while (n) {
626 struct cfq_rq *crq = rb_entry_crq(n); 648 struct cfq_rq *crq = rb_entry_crq(n);
@@ -674,7 +696,7 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
674 goto out; 696 goto out;
675 } 697 }
676 698
677 __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio)); 699 __rq = cfq_find_rq_fmerge(cfqd, bio);
678 if (__rq && elv_rq_merge_ok(__rq, bio)) { 700 if (__rq && elv_rq_merge_ok(__rq, bio)) {
679 ret = ELEVATOR_FRONT_MERGE; 701 ret = ELEVATOR_FRONT_MERGE;
680 goto out; 702 goto out;
@@ -877,6 +899,7 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
877static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) 899static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
878 900
879{ 901{
902 struct cfq_io_context *cic;
880 unsigned long sl; 903 unsigned long sl;
881 904
882 WARN_ON(!RB_EMPTY(&cfqq->sort_list)); 905 WARN_ON(!RB_EMPTY(&cfqq->sort_list));
@@ -892,13 +915,23 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
892 /* 915 /*
893 * task has exited, don't wait 916 * task has exited, don't wait
894 */ 917 */
895 if (cfqd->active_cic && !cfqd->active_cic->ioc->task) 918 cic = cfqd->active_cic;
919 if (!cic || !cic->ioc->task)
896 return 0; 920 return 0;
897 921
898 cfq_mark_cfqq_must_dispatch(cfqq); 922 cfq_mark_cfqq_must_dispatch(cfqq);
899 cfq_mark_cfqq_wait_request(cfqq); 923 cfq_mark_cfqq_wait_request(cfqq);
900 924
901 sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle); 925 sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
926
927 /*
928 * we don't want to idle for seeks, but we do want to allow
929 * fair distribution of slice time for a process doing back-to-back
930 * seeks. so allow a little bit of time for him to submit a new rq
931 */
932 if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072)
933 sl = 2;
934
902 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 935 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
903 return 1; 936 return 1;
904} 937}
@@ -1115,13 +1148,6 @@ cfq_dispatch_requests(request_queue_t *q, int force)
1115 if (cfqq) { 1148 if (cfqq) {
1116 int max_dispatch; 1149 int max_dispatch;
1117 1150
1118 /*
1119 * if idle window is disabled, allow queue buildup
1120 */
1121 if (!cfq_cfqq_idle_window(cfqq) &&
1122 cfqd->rq_in_driver >= cfqd->cfq_max_depth)
1123 return 0;
1124
1125 cfq_clear_cfqq_must_dispatch(cfqq); 1151 cfq_clear_cfqq_must_dispatch(cfqq);
1126 cfq_clear_cfqq_wait_request(cfqq); 1152 cfq_clear_cfqq_wait_request(cfqq);
1127 del_timer(&cfqd->idle_slice_timer); 1153 del_timer(&cfqd->idle_slice_timer);
@@ -1171,13 +1197,13 @@ __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
1171 const int hashval) 1197 const int hashval)
1172{ 1198{
1173 struct hlist_head *hash_list = &cfqd->cfq_hash[hashval]; 1199 struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
1174 struct hlist_node *entry, *next; 1200 struct hlist_node *entry;
1201 struct cfq_queue *__cfqq;
1175 1202
1176 hlist_for_each_safe(entry, next, hash_list) { 1203 hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
1177 struct cfq_queue *__cfqq = list_entry_qhash(entry);
1178 const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio); 1204 const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
1179 1205
1180 if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY)) 1206 if (__cfqq->key == key && (__p == prio || !prio))
1181 return __cfqq; 1207 return __cfqq;
1182 } 1208 }
1183 1209
@@ -1190,19 +1216,19 @@ cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
1190 return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT)); 1216 return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
1191} 1217}
1192 1218
1193static void cfq_free_io_context(struct cfq_io_context *cic) 1219static void cfq_free_io_context(struct io_context *ioc)
1194{ 1220{
1195 struct cfq_io_context *__cic; 1221 struct cfq_io_context *__cic;
1196 struct list_head *entry, *next; 1222 struct rb_node *n;
1197 int freed = 1; 1223 int freed = 0;
1198 1224
1199 list_for_each_safe(entry, next, &cic->list) { 1225 while ((n = rb_first(&ioc->cic_root)) != NULL) {
1200 __cic = list_entry(entry, struct cfq_io_context, list); 1226 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1227 rb_erase(&__cic->rb_node, &ioc->cic_root);
1201 kmem_cache_free(cfq_ioc_pool, __cic); 1228 kmem_cache_free(cfq_ioc_pool, __cic);
1202 freed++; 1229 freed++;
1203 } 1230 }
1204 1231
1205 kmem_cache_free(cfq_ioc_pool, cic);
1206 if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone) 1232 if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone)
1207 complete(ioc_gone); 1233 complete(ioc_gone);
1208} 1234}
@@ -1210,8 +1236,7 @@ static void cfq_free_io_context(struct cfq_io_context *cic)
1210static void cfq_trim(struct io_context *ioc) 1236static void cfq_trim(struct io_context *ioc)
1211{ 1237{
1212 ioc->set_ioprio = NULL; 1238 ioc->set_ioprio = NULL;
1213 if (ioc->cic) 1239 cfq_free_io_context(ioc);
1214 cfq_free_io_context(ioc->cic);
1215} 1240}
1216 1241
1217/* 1242/*
@@ -1250,26 +1275,26 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1250 spin_unlock(q->queue_lock); 1275 spin_unlock(q->queue_lock);
1251} 1276}
1252 1277
1253static void cfq_exit_io_context(struct cfq_io_context *cic) 1278static void cfq_exit_io_context(struct io_context *ioc)
1254{ 1279{
1255 struct cfq_io_context *__cic; 1280 struct cfq_io_context *__cic;
1256 struct list_head *entry;
1257 unsigned long flags; 1281 unsigned long flags;
1258 1282 struct rb_node *n;
1259 local_irq_save(flags);
1260 1283
1261 /* 1284 /*
1262 * put the reference this task is holding to the various queues 1285 * put the reference this task is holding to the various queues
1263 */ 1286 */
1264 read_lock(&cfq_exit_lock); 1287 read_lock_irqsave(&cfq_exit_lock, flags);
1265 list_for_each(entry, &cic->list) { 1288
1266 __cic = list_entry(entry, struct cfq_io_context, list); 1289 n = rb_first(&ioc->cic_root);
1290 while (n != NULL) {
1291 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1292
1267 cfq_exit_single_io_context(__cic); 1293 cfq_exit_single_io_context(__cic);
1294 n = rb_next(n);
1268 } 1295 }
1269 1296
1270 cfq_exit_single_io_context(cic); 1297 read_unlock_irqrestore(&cfq_exit_lock, flags);
1271 read_unlock(&cfq_exit_lock);
1272 local_irq_restore(flags);
1273} 1298}
1274 1299
1275static struct cfq_io_context * 1300static struct cfq_io_context *
@@ -1278,10 +1303,10 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1278 struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); 1303 struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
1279 1304
1280 if (cic) { 1305 if (cic) {
1281 INIT_LIST_HEAD(&cic->list); 1306 RB_CLEAR(&cic->rb_node);
1307 cic->key = NULL;
1282 cic->cfqq[ASYNC] = NULL; 1308 cic->cfqq[ASYNC] = NULL;
1283 cic->cfqq[SYNC] = NULL; 1309 cic->cfqq[SYNC] = NULL;
1284 cic->key = NULL;
1285 cic->last_end_request = jiffies; 1310 cic->last_end_request = jiffies;
1286 cic->ttime_total = 0; 1311 cic->ttime_total = 0;
1287 cic->ttime_samples = 0; 1312 cic->ttime_samples = 0;
@@ -1373,15 +1398,17 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
1373static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) 1398static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
1374{ 1399{
1375 struct cfq_io_context *cic; 1400 struct cfq_io_context *cic;
1401 struct rb_node *n;
1376 1402
1377 write_lock(&cfq_exit_lock); 1403 write_lock(&cfq_exit_lock);
1378 1404
1379 cic = ioc->cic; 1405 n = rb_first(&ioc->cic_root);
1380 1406 while (n != NULL) {
1381 changed_ioprio(cic); 1407 cic = rb_entry(n, struct cfq_io_context, rb_node);
1382 1408
1383 list_for_each_entry(cic, &cic->list, list)
1384 changed_ioprio(cic); 1409 changed_ioprio(cic);
1410 n = rb_next(n);
1411 }
1385 1412
1386 write_unlock(&cfq_exit_lock); 1413 write_unlock(&cfq_exit_lock);
1387 1414
@@ -1445,14 +1472,67 @@ out:
1445 return cfqq; 1472 return cfqq;
1446} 1473}
1447 1474
1475static struct cfq_io_context *
1476cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1477{
1478 struct rb_node *n = ioc->cic_root.rb_node;
1479 struct cfq_io_context *cic;
1480 void *key = cfqd;
1481
1482 while (n) {
1483 cic = rb_entry(n, struct cfq_io_context, rb_node);
1484
1485 if (key < cic->key)
1486 n = n->rb_left;
1487 else if (key > cic->key)
1488 n = n->rb_right;
1489 else
1490 return cic;
1491 }
1492
1493 return NULL;
1494}
1495
1496static inline void
1497cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1498 struct cfq_io_context *cic)
1499{
1500 struct rb_node **p = &ioc->cic_root.rb_node;
1501 struct rb_node *parent = NULL;
1502 struct cfq_io_context *__cic;
1503
1504 read_lock(&cfq_exit_lock);
1505
1506 cic->ioc = ioc;
1507 cic->key = cfqd;
1508
1509 ioc->set_ioprio = cfq_ioc_set_ioprio;
1510
1511 while (*p) {
1512 parent = *p;
1513 __cic = rb_entry(parent, struct cfq_io_context, rb_node);
1514
1515 if (cic->key < __cic->key)
1516 p = &(*p)->rb_left;
1517 else if (cic->key > __cic->key)
1518 p = &(*p)->rb_right;
1519 else
1520 BUG();
1521 }
1522
1523 rb_link_node(&cic->rb_node, parent, p);
1524 rb_insert_color(&cic->rb_node, &ioc->cic_root);
1525 list_add(&cic->queue_list, &cfqd->cic_list);
1526 read_unlock(&cfq_exit_lock);
1527}
1528
1448/* 1529/*
1449 * Setup general io context and cfq io context. There can be several cfq 1530 * Setup general io context and cfq io context. There can be several cfq
1450 * io contexts per general io context, if this process is doing io to more 1531 * io contexts per general io context, if this process is doing io to more
1451 * than one device managed by cfq. Note that caller is holding a reference to 1532 * than one device managed by cfq.
1452 * cfqq, so we don't need to worry about it disappearing
1453 */ 1533 */
1454static struct cfq_io_context * 1534static struct cfq_io_context *
1455cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) 1535cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1456{ 1536{
1457 struct io_context *ioc = NULL; 1537 struct io_context *ioc = NULL;
1458 struct cfq_io_context *cic; 1538 struct cfq_io_context *cic;
@@ -1463,88 +1543,15 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
1463 if (!ioc) 1543 if (!ioc)
1464 return NULL; 1544 return NULL;
1465 1545
1466restart: 1546 cic = cfq_cic_rb_lookup(cfqd, ioc);
1467 if ((cic = ioc->cic) == NULL) { 1547 if (cic)
1468 cic = cfq_alloc_io_context(cfqd, gfp_mask); 1548 goto out;
1469
1470 if (cic == NULL)
1471 goto err;
1472
1473 /*
1474 * manually increment generic io_context usage count, it
1475 * cannot go away since we are already holding one ref to it
1476 */
1477 cic->ioc = ioc;
1478 cic->key = cfqd;
1479 read_lock(&cfq_exit_lock);
1480 ioc->set_ioprio = cfq_ioc_set_ioprio;
1481 ioc->cic = cic;
1482 list_add(&cic->queue_list, &cfqd->cic_list);
1483 read_unlock(&cfq_exit_lock);
1484 } else {
1485 struct cfq_io_context *__cic;
1486
1487 /*
1488 * the first cic on the list is actually the head itself
1489 */
1490 if (cic->key == cfqd)
1491 goto out;
1492
1493 if (unlikely(!cic->key)) {
1494 read_lock(&cfq_exit_lock);
1495 if (list_empty(&cic->list))
1496 ioc->cic = NULL;
1497 else
1498 ioc->cic = list_entry(cic->list.next,
1499 struct cfq_io_context,
1500 list);
1501 read_unlock(&cfq_exit_lock);
1502 kmem_cache_free(cfq_ioc_pool, cic);
1503 atomic_dec(&ioc_count);
1504 goto restart;
1505 }
1506
1507 /*
1508 * cic exists, check if we already are there. linear search
1509 * should be ok here, the list will usually not be more than
1510 * 1 or a few entries long
1511 */
1512 list_for_each_entry(__cic, &cic->list, list) {
1513 /*
1514 * this process is already holding a reference to
1515 * this queue, so no need to get one more
1516 */
1517 if (__cic->key == cfqd) {
1518 cic = __cic;
1519 goto out;
1520 }
1521 if (unlikely(!__cic->key)) {
1522 read_lock(&cfq_exit_lock);
1523 list_del(&__cic->list);
1524 read_unlock(&cfq_exit_lock);
1525 kmem_cache_free(cfq_ioc_pool, __cic);
1526 atomic_dec(&ioc_count);
1527 goto restart;
1528 }
1529 }
1530 1549
1531 /* 1550 cic = cfq_alloc_io_context(cfqd, gfp_mask);
1532 * nope, process doesn't have a cic assoicated with this 1551 if (cic == NULL)
1533 * cfqq yet. get a new one and add to list 1552 goto err;
1534 */
1535 __cic = cfq_alloc_io_context(cfqd, gfp_mask);
1536 if (__cic == NULL)
1537 goto err;
1538
1539 __cic->ioc = ioc;
1540 __cic->key = cfqd;
1541 read_lock(&cfq_exit_lock);
1542 list_add(&__cic->list, &cic->list);
1543 list_add(&__cic->queue_list, &cfqd->cic_list);
1544 read_unlock(&cfq_exit_lock);
1545 cic = __cic;
1546 }
1547 1553
1554 cfq_cic_link(cfqd, ioc, cic);
1548out: 1555out:
1549 return cic; 1556 return cic;
1550err: 1557err:
@@ -1577,7 +1584,33 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1577 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; 1584 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1578} 1585}
1579 1586
1580#define sample_valid(samples) ((samples) > 80) 1587static void
1588cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1589 struct cfq_rq *crq)
1590{
1591 sector_t sdist;
1592 u64 total;
1593
1594 if (cic->last_request_pos < crq->request->sector)
1595 sdist = crq->request->sector - cic->last_request_pos;
1596 else
1597 sdist = cic->last_request_pos - crq->request->sector;
1598
1599 /*
1600 * Don't allow the seek distance to get too large from the
1601 * odd fragment, pagein, etc
1602 */
1603 if (cic->seek_samples <= 60) /* second&third seek */
1604 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1605 else
1606 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
1607
1608 cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1609 cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1610 total = cic->seek_total + (cic->seek_samples/2);
1611 do_div(total, cic->seek_samples);
1612 cic->seek_mean = (sector_t)total;
1613}
1581 1614
1582/* 1615/*
1583 * Disable idle window if the process thinks too long or seeks so much that 1616 * Disable idle window if the process thinks too long or seeks so much that
@@ -1690,9 +1723,11 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1690 cic = crq->io_context; 1723 cic = crq->io_context;
1691 1724
1692 cfq_update_io_thinktime(cfqd, cic); 1725 cfq_update_io_thinktime(cfqd, cic);
1726 cfq_update_io_seektime(cfqd, cic, crq);
1693 cfq_update_idle_window(cfqd, cfqq, cic); 1727 cfq_update_idle_window(cfqd, cfqq, cic);
1694 1728
1695 cic->last_queue = jiffies; 1729 cic->last_queue = jiffies;
1730 cic->last_request_pos = crq->request->sector + crq->request->nr_sectors;
1696 1731
1697 if (cfqq == cfqd->active_queue) { 1732 if (cfqq == cfqd->active_queue) {
1698 /* 1733 /*
@@ -1825,14 +1860,6 @@ static void cfq_prio_boost(struct cfq_queue *cfqq)
1825 cfq_resort_rr_list(cfqq, 0); 1860 cfq_resort_rr_list(cfqq, 0);
1826} 1861}
1827 1862
1828static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
1829{
1830 if (rw == READ || process_sync(task))
1831 return task->pid;
1832
1833 return CFQ_KEY_ASYNC;
1834}
1835
1836static inline int 1863static inline int
1837__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1864__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1838 struct task_struct *task, int rw) 1865 struct task_struct *task, int rw)
@@ -1965,7 +1992,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
1965 1992
1966 might_sleep_if(gfp_mask & __GFP_WAIT); 1993 might_sleep_if(gfp_mask & __GFP_WAIT);
1967 1994
1968 cic = cfq_get_io_context(cfqd, key, gfp_mask); 1995 cic = cfq_get_io_context(cfqd, gfp_mask);
1969 1996
1970 spin_lock_irqsave(q->queue_lock, flags); 1997 spin_lock_irqsave(q->queue_lock, flags);
1971 1998
@@ -2133,11 +2160,14 @@ static void cfq_exit_queue(elevator_t *e)
2133 request_queue_t *q = cfqd->queue; 2160 request_queue_t *q = cfqd->queue;
2134 2161
2135 cfq_shutdown_timer_wq(cfqd); 2162 cfq_shutdown_timer_wq(cfqd);
2163
2136 write_lock(&cfq_exit_lock); 2164 write_lock(&cfq_exit_lock);
2137 spin_lock_irq(q->queue_lock); 2165 spin_lock_irq(q->queue_lock);
2166
2138 if (cfqd->active_queue) 2167 if (cfqd->active_queue)
2139 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); 2168 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2140 while(!list_empty(&cfqd->cic_list)) { 2169
2170 while (!list_empty(&cfqd->cic_list)) {
2141 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, 2171 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2142 struct cfq_io_context, 2172 struct cfq_io_context,
2143 queue_list); 2173 queue_list);
@@ -2152,6 +2182,7 @@ static void cfq_exit_queue(elevator_t *e)
2152 cic->key = NULL; 2182 cic->key = NULL;
2153 list_del_init(&cic->queue_list); 2183 list_del_init(&cic->queue_list);
2154 } 2184 }
2185
2155 spin_unlock_irq(q->queue_lock); 2186 spin_unlock_irq(q->queue_lock);
2156 write_unlock(&cfq_exit_lock); 2187 write_unlock(&cfq_exit_lock);
2157 2188
@@ -2191,7 +2222,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
2191 if (!cfqd->cfq_hash) 2222 if (!cfqd->cfq_hash)
2192 goto out_cfqhash; 2223 goto out_cfqhash;
2193 2224
2194 cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool); 2225 cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool);
2195 if (!cfqd->crq_pool) 2226 if (!cfqd->crq_pool)
2196 goto out_crqpool; 2227 goto out_crqpool;
2197 2228
@@ -2227,7 +2258,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
2227 cfqd->cfq_slice[1] = cfq_slice_sync; 2258 cfqd->cfq_slice[1] = cfq_slice_sync;
2228 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2259 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2229 cfqd->cfq_slice_idle = cfq_slice_idle; 2260 cfqd->cfq_slice_idle = cfq_slice_idle;
2230 cfqd->cfq_max_depth = cfq_max_depth;
2231 2261
2232 return 0; 2262 return 0;
2233out_crqpool: 2263out_crqpool:
@@ -2310,7 +2340,6 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2310SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 2340SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2311SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 2341SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2312SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 2342SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2313SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0);
2314#undef SHOW_FUNCTION 2343#undef SHOW_FUNCTION
2315 2344
2316#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 2345#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
@@ -2339,7 +2368,6 @@ STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2339STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 2368STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2340STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2369STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2341STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); 2370STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
2342STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0);
2343#undef STORE_FUNCTION 2371#undef STORE_FUNCTION
2344 2372
2345#define CFQ_ATTR(name) \ 2373#define CFQ_ATTR(name) \
@@ -2356,7 +2384,6 @@ static struct elv_fs_entry cfq_attrs[] = {
2356 CFQ_ATTR(slice_async), 2384 CFQ_ATTR(slice_async),
2357 CFQ_ATTR(slice_async_rq), 2385 CFQ_ATTR(slice_async_rq),
2358 CFQ_ATTR(slice_idle), 2386 CFQ_ATTR(slice_idle),
2359 CFQ_ATTR(max_depth),
2360 __ATTR_NULL 2387 __ATTR_NULL
2361}; 2388};
2362 2389
diff --git a/block/genhd.c b/block/genhd.c
index 64510fd88621..db4c60c802d6 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -454,8 +454,8 @@ static ssize_t disk_stats_read(struct gendisk * disk, char *page)
454 disk_round_stats(disk); 454 disk_round_stats(disk);
455 preempt_enable(); 455 preempt_enable();
456 return sprintf(page, 456 return sprintf(page,
457 "%8u %8u %8llu %8u " 457 "%8lu %8lu %8llu %8u "
458 "%8u %8u %8llu %8u " 458 "%8lu %8lu %8llu %8u "
459 "%8u %8u %8u" 459 "%8u %8u %8u"
460 "\n", 460 "\n",
461 disk_stat_read(disk, ios[READ]), 461 disk_stat_read(disk, ios[READ]),
@@ -649,7 +649,7 @@ static int diskstats_show(struct seq_file *s, void *v)
649 preempt_disable(); 649 preempt_disable();
650 disk_round_stats(gp); 650 disk_round_stats(gp);
651 preempt_enable(); 651 preempt_enable();
652 seq_printf(s, "%4d %4d %s %u %u %llu %u %u %u %llu %u %u %u %u\n", 652 seq_printf(s, "%4d %4d %s %lu %lu %llu %u %lu %lu %llu %u %u %u %u\n",
653 gp->major, n + gp->first_minor, disk_name(gp, n, buf), 653 gp->major, n + gp->first_minor, disk_name(gp, n, buf),
654 disk_stat_read(gp, ios[0]), disk_stat_read(gp, merges[0]), 654 disk_stat_read(gp, ios[0]), disk_stat_read(gp, merges[0]),
655 (unsigned long long)disk_stat_read(gp, sectors[0]), 655 (unsigned long long)disk_stat_read(gp, sectors[0]),
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 062067fa7ead..5b26af8597f3 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -785,6 +785,8 @@ void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
785 t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); 785 t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
786 t->max_segment_size = min(t->max_segment_size,b->max_segment_size); 786 t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
787 t->hardsect_size = max(t->hardsect_size,b->hardsect_size); 787 t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
788 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
789 clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
788} 790}
789 791
790EXPORT_SYMBOL(blk_queue_stack_limits); 792EXPORT_SYMBOL(blk_queue_stack_limits);
@@ -906,17 +908,15 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
906 __FUNCTION__, depth); 908 __FUNCTION__, depth);
907 } 909 }
908 910
909 tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC); 911 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
910 if (!tag_index) 912 if (!tag_index)
911 goto fail; 913 goto fail;
912 914
913 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; 915 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
914 tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); 916 tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
915 if (!tag_map) 917 if (!tag_map)
916 goto fail; 918 goto fail;
917 919
918 memset(tag_index, 0, depth * sizeof(struct request *));
919 memset(tag_map, 0, nr_ulongs * sizeof(unsigned long));
920 tags->real_max_depth = depth; 920 tags->real_max_depth = depth;
921 tags->max_depth = depth; 921 tags->max_depth = depth;
922 tags->tag_index = tag_index; 922 tags->tag_index = tag_index;
@@ -2479,10 +2479,12 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
2479 rq->rq_disk = bd_disk; 2479 rq->rq_disk = bd_disk;
2480 rq->flags |= REQ_NOMERGE; 2480 rq->flags |= REQ_NOMERGE;
2481 rq->end_io = done; 2481 rq->end_io = done;
2482 elv_add_request(q, rq, where, 1); 2482 WARN_ON(irqs_disabled());
2483 generic_unplug_device(q); 2483 spin_lock_irq(q->queue_lock);
2484 __elv_add_request(q, rq, where, 1);
2485 __generic_unplug_device(q);
2486 spin_unlock_irq(q->queue_lock);
2484} 2487}
2485
2486EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); 2488EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
2487 2489
2488/** 2490/**
@@ -3512,7 +3514,7 @@ int __init blk_dev_init(void)
3512 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3514 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3513 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); 3515 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
3514 3516
3515 for_each_cpu(i) 3517 for_each_possible_cpu(i)
3516 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); 3518 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3517 3519
3518 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); 3520 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
@@ -3537,11 +3539,17 @@ void put_io_context(struct io_context *ioc)
3537 BUG_ON(atomic_read(&ioc->refcount) == 0); 3539 BUG_ON(atomic_read(&ioc->refcount) == 0);
3538 3540
3539 if (atomic_dec_and_test(&ioc->refcount)) { 3541 if (atomic_dec_and_test(&ioc->refcount)) {
3542 struct cfq_io_context *cic;
3543
3540 rcu_read_lock(); 3544 rcu_read_lock();
3541 if (ioc->aic && ioc->aic->dtor) 3545 if (ioc->aic && ioc->aic->dtor)
3542 ioc->aic->dtor(ioc->aic); 3546 ioc->aic->dtor(ioc->aic);
3543 if (ioc->cic && ioc->cic->dtor) 3547 if (ioc->cic_root.rb_node != NULL) {
3544 ioc->cic->dtor(ioc->cic); 3548 struct rb_node *n = rb_first(&ioc->cic_root);
3549
3550 cic = rb_entry(n, struct cfq_io_context, rb_node);
3551 cic->dtor(ioc);
3552 }
3545 rcu_read_unlock(); 3553 rcu_read_unlock();
3546 3554
3547 kmem_cache_free(iocontext_cachep, ioc); 3555 kmem_cache_free(iocontext_cachep, ioc);
@@ -3554,6 +3562,7 @@ void exit_io_context(void)
3554{ 3562{
3555 unsigned long flags; 3563 unsigned long flags;
3556 struct io_context *ioc; 3564 struct io_context *ioc;
3565 struct cfq_io_context *cic;
3557 3566
3558 local_irq_save(flags); 3567 local_irq_save(flags);
3559 task_lock(current); 3568 task_lock(current);
@@ -3565,9 +3574,11 @@ void exit_io_context(void)
3565 3574
3566 if (ioc->aic && ioc->aic->exit) 3575 if (ioc->aic && ioc->aic->exit)
3567 ioc->aic->exit(ioc->aic); 3576 ioc->aic->exit(ioc->aic);
3568 if (ioc->cic && ioc->cic->exit) 3577 if (ioc->cic_root.rb_node != NULL) {
3569 ioc->cic->exit(ioc->cic); 3578 cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node);
3570 3579 cic->exit(ioc);
3580 }
3581
3571 put_io_context(ioc); 3582 put_io_context(ioc);
3572} 3583}
3573 3584
@@ -3596,7 +3607,7 @@ struct io_context *current_io_context(gfp_t gfp_flags)
3596 ret->last_waited = jiffies; /* doesn't matter... */ 3607 ret->last_waited = jiffies; /* doesn't matter... */
3597 ret->nr_batch_requests = 0; /* because this is 0 */ 3608 ret->nr_batch_requests = 0; /* because this is 0 */
3598 ret->aic = NULL; 3609 ret->aic = NULL;
3599 ret->cic = NULL; 3610 ret->cic_root.rb_node = NULL;
3600 tsk->io_context = ret; 3611 tsk->io_context = ret;
3601 } 3612 }
3602 3613
diff --git a/drivers/Kconfig b/drivers/Kconfig
index bddf431bbb72..9f5c0da57c90 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -70,4 +70,6 @@ source "drivers/sn/Kconfig"
70 70
71source "drivers/edac/Kconfig" 71source "drivers/edac/Kconfig"
72 72
73source "drivers/rtc/Kconfig"
74
73endmenu 75endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 5c69b86db624..424955274e60 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_USB_GADGET) += usb/gadget/
56obj-$(CONFIG_GAMEPORT) += input/gameport/ 56obj-$(CONFIG_GAMEPORT) += input/gameport/
57obj-$(CONFIG_INPUT) += input/ 57obj-$(CONFIG_INPUT) += input/
58obj-$(CONFIG_I2O) += message/ 58obj-$(CONFIG_I2O) += message/
59obj-$(CONFIG_RTC_LIB) += rtc/
59obj-$(CONFIG_I2C) += i2c/ 60obj-$(CONFIG_I2C) += i2c/
60obj-$(CONFIG_W1) += w1/ 61obj-$(CONFIG_W1) += w1/
61obj-$(CONFIG_HWMON) += hwmon/ 62obj-$(CONFIG_HWMON) += hwmon/
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index ac5bbaedac1b..13b5fd5854a8 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -156,12 +156,10 @@ acpi_status acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr)
156{ 156{
157 if (efi_enabled) { 157 if (efi_enabled) {
158 addr->pointer_type = ACPI_PHYSICAL_POINTER; 158 addr->pointer_type = ACPI_PHYSICAL_POINTER;
159 if (efi.acpi20) 159 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
160 addr->pointer.physical = 160 addr->pointer.physical = efi.acpi20;
161 (acpi_physical_address) virt_to_phys(efi.acpi20); 161 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
162 else if (efi.acpi) 162 addr->pointer.physical = efi.acpi;
163 addr->pointer.physical =
164 (acpi_physical_address) virt_to_phys(efi.acpi);
165 else { 163 else {
166 printk(KERN_ERR PREFIX 164 printk(KERN_ERR PREFIX
167 "System description tables not found\n"); 165 "System description tables not found\n");
@@ -182,22 +180,14 @@ acpi_status
182acpi_os_map_memory(acpi_physical_address phys, acpi_size size, 180acpi_os_map_memory(acpi_physical_address phys, acpi_size size,
183 void __iomem ** virt) 181 void __iomem ** virt)
184{ 182{
185 if (efi_enabled) { 183 if (phys > ULONG_MAX) {
186 if (EFI_MEMORY_WB & efi_mem_attributes(phys)) { 184 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
187 *virt = (void __iomem *)phys_to_virt(phys); 185 return AE_BAD_PARAMETER;
188 } else {
189 *virt = ioremap(phys, size);
190 }
191 } else {
192 if (phys > ULONG_MAX) {
193 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
194 return AE_BAD_PARAMETER;
195 }
196 /*
197 * ioremap checks to ensure this is in reserved space
198 */
199 *virt = ioremap((unsigned long)phys, size);
200 } 186 }
187 /*
188 * ioremap checks to ensure this is in reserved space
189 */
190 *virt = ioremap((unsigned long)phys, size);
201 191
202 if (!*virt) 192 if (!*virt)
203 return AE_NO_MEMORY; 193 return AE_NO_MEMORY;
@@ -409,18 +399,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
409{ 399{
410 u32 dummy; 400 u32 dummy;
411 void __iomem *virt_addr; 401 void __iomem *virt_addr;
412 int iomem = 0;
413 402
414 if (efi_enabled) { 403 virt_addr = ioremap(phys_addr, width);
415 if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) {
416 /* HACK ALERT! We can use readb/w/l on real memory too.. */
417 virt_addr = (void __iomem *)phys_to_virt(phys_addr);
418 } else {
419 iomem = 1;
420 virt_addr = ioremap(phys_addr, width);
421 }
422 } else
423 virt_addr = (void __iomem *)phys_to_virt(phys_addr);
424 if (!value) 404 if (!value)
425 value = &dummy; 405 value = &dummy;
426 406
@@ -438,10 +418,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
438 BUG(); 418 BUG();
439 } 419 }
440 420
441 if (efi_enabled) { 421 iounmap(virt_addr);
442 if (iomem)
443 iounmap(virt_addr);
444 }
445 422
446 return AE_OK; 423 return AE_OK;
447} 424}
@@ -450,18 +427,8 @@ acpi_status
450acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) 427acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
451{ 428{
452 void __iomem *virt_addr; 429 void __iomem *virt_addr;
453 int iomem = 0;
454 430
455 if (efi_enabled) { 431 virt_addr = ioremap(phys_addr, width);
456 if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) {
457 /* HACK ALERT! We can use writeb/w/l on real memory too */
458 virt_addr = (void __iomem *)phys_to_virt(phys_addr);
459 } else {
460 iomem = 1;
461 virt_addr = ioremap(phys_addr, width);
462 }
463 } else
464 virt_addr = (void __iomem *)phys_to_virt(phys_addr);
465 432
466 switch (width) { 433 switch (width) {
467 case 8: 434 case 8:
@@ -477,8 +444,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
477 BUG(); 444 BUG();
478 } 445 }
479 446
480 if (iomem) 447 iounmap(virt_addr);
481 iounmap(virt_addr);
482 448
483 return AE_OK; 449 return AE_OK;
484} 450}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 99a3a28594da..713b763884a9 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -246,7 +246,7 @@ static int acpi_processor_errata(struct acpi_processor *pr)
246} 246}
247 247
248/* -------------------------------------------------------------------------- 248/* --------------------------------------------------------------------------
249 Common ACPI processor fucntions 249 Common ACPI processor functions
250 -------------------------------------------------------------------------- */ 250 -------------------------------------------------------------------------- */
251 251
252/* 252/*
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 31d4f3ffc265..7f37c7cc5ef1 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -587,7 +587,8 @@ int __init acpi_table_init(void)
587 return -ENODEV; 587 return -ENODEV;
588 } 588 }
589 589
590 rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys); 590 rsdp = (struct acpi_table_rsdp *)__acpi_map_table(rsdp_phys,
591 sizeof(struct acpi_table_rsdp));
591 if (!rsdp) { 592 if (!rsdp) {
592 printk(KERN_WARNING PREFIX "Unable to map RSDP\n"); 593 printk(KERN_WARNING PREFIX "Unable to map RSDP\n");
593 return -ENODEV; 594 return -ENODEV;
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 69f4c7ce9a63..cac09e353be8 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -1972,7 +1972,7 @@ static int __devinit lanai_pci_start(struct lanai_dev *lanai)
1972 "(itf %d): No suitable DMA available.\n", lanai->number); 1972 "(itf %d): No suitable DMA available.\n", lanai->number);
1973 return -EBUSY; 1973 return -EBUSY;
1974 } 1974 }
1975 if (pci_set_consistent_dma_mask(pci, 0xFFFFFFFF) != 0) { 1975 if (pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK) != 0) {
1976 printk(KERN_WARNING DEV_LABEL 1976 printk(KERN_WARNING DEV_LABEL
1977 "(itf %d): No suitable DMA available.\n", lanai->number); 1977 "(itf %d): No suitable DMA available.\n", lanai->number);
1978 return -EBUSY; 1978 return -EBUSY;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 105a0d61eb1f..dd547af4681a 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -47,16 +47,16 @@ static struct kset_uevent_ops memory_uevent_ops = {
47 .uevent = memory_uevent, 47 .uevent = memory_uevent,
48}; 48};
49 49
50static struct notifier_block *memory_chain; 50static BLOCKING_NOTIFIER_HEAD(memory_chain);
51 51
52int register_memory_notifier(struct notifier_block *nb) 52int register_memory_notifier(struct notifier_block *nb)
53{ 53{
54 return notifier_chain_register(&memory_chain, nb); 54 return blocking_notifier_chain_register(&memory_chain, nb);
55} 55}
56 56
57void unregister_memory_notifier(struct notifier_block *nb) 57void unregister_memory_notifier(struct notifier_block *nb)
58{ 58{
59 notifier_chain_unregister(&memory_chain, nb); 59 blocking_notifier_chain_unregister(&memory_chain, nb);
60} 60}
61 61
62/* 62/*
@@ -140,7 +140,7 @@ static ssize_t show_mem_state(struct sys_device *dev, char *buf)
140 140
141static inline int memory_notify(unsigned long val, void *v) 141static inline int memory_notify(unsigned long val, void *v)
142{ 142{
143 return notifier_call_chain(&memory_chain, val, v); 143 return blocking_notifier_call_chain(&memory_chain, val, v);
144} 144}
145 145
146/* 146/*
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 9bdea2a5cf0e..45bcda544880 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -41,6 +41,7 @@
41#include <linux/timer.h> 41#include <linux/timer.h>
42#include <linux/pci.h> 42#include <linux/pci.h>
43#include <linux/init.h> 43#include <linux/init.h>
44#include <linux/jiffies.h>
44#include <linux/random.h> 45#include <linux/random.h>
45#include <asm/io.h> 46#include <asm/io.h>
46#include <asm/uaccess.h> 47#include <asm/uaccess.h>
@@ -311,11 +312,10 @@ static boolean DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
311 CommandsRemaining = CommandAllocationGroupSize; 312 CommandsRemaining = CommandAllocationGroupSize;
312 CommandGroupByteCount = 313 CommandGroupByteCount =
313 CommandsRemaining * CommandAllocationLength; 314 CommandsRemaining * CommandAllocationLength;
314 AllocationPointer = kmalloc(CommandGroupByteCount, GFP_ATOMIC); 315 AllocationPointer = kzalloc(CommandGroupByteCount, GFP_ATOMIC);
315 if (AllocationPointer == NULL) 316 if (AllocationPointer == NULL)
316 return DAC960_Failure(Controller, 317 return DAC960_Failure(Controller,
317 "AUXILIARY STRUCTURE CREATION"); 318 "AUXILIARY STRUCTURE CREATION");
318 memset(AllocationPointer, 0, CommandGroupByteCount);
319 } 319 }
320 Command = (DAC960_Command_T *) AllocationPointer; 320 Command = (DAC960_Command_T *) AllocationPointer;
321 AllocationPointer += CommandAllocationLength; 321 AllocationPointer += CommandAllocationLength;
@@ -2709,14 +2709,12 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
2709 void __iomem *BaseAddress; 2709 void __iomem *BaseAddress;
2710 int i; 2710 int i;
2711 2711
2712 Controller = (DAC960_Controller_T *) 2712 Controller = kzalloc(sizeof(DAC960_Controller_T), GFP_ATOMIC);
2713 kmalloc(sizeof(DAC960_Controller_T), GFP_ATOMIC);
2714 if (Controller == NULL) { 2713 if (Controller == NULL) {
2715 DAC960_Error("Unable to allocate Controller structure for " 2714 DAC960_Error("Unable to allocate Controller structure for "
2716 "Controller at\n", NULL); 2715 "Controller at\n", NULL);
2717 return NULL; 2716 return NULL;
2718 } 2717 }
2719 memset(Controller, 0, sizeof(DAC960_Controller_T));
2720 Controller->ControllerNumber = DAC960_ControllerCount; 2718 Controller->ControllerNumber = DAC960_ControllerCount;
2721 DAC960_Controllers[DAC960_ControllerCount++] = Controller; 2719 DAC960_Controllers[DAC960_ControllerCount++] = Controller;
2722 Controller->Bus = PCI_Device->bus->number; 2720 Controller->Bus = PCI_Device->bus->number;
@@ -3657,8 +3655,8 @@ static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command)
3657 (NewEnquiry->EventLogSequenceNumber != 3655 (NewEnquiry->EventLogSequenceNumber !=
3658 OldEnquiry->EventLogSequenceNumber) || 3656 OldEnquiry->EventLogSequenceNumber) ||
3659 Controller->MonitoringTimerCount == 0 || 3657 Controller->MonitoringTimerCount == 0 ||
3660 (jiffies - Controller->SecondaryMonitoringTime 3658 time_after_eq(jiffies, Controller->SecondaryMonitoringTime
3661 >= DAC960_SecondaryMonitoringInterval)) 3659 + DAC960_SecondaryMonitoringInterval))
3662 { 3660 {
3663 Controller->V1.NeedLogicalDriveInformation = true; 3661 Controller->V1.NeedLogicalDriveInformation = true;
3664 Controller->V1.NewEventLogSequenceNumber = 3662 Controller->V1.NewEventLogSequenceNumber =
@@ -5643,8 +5641,8 @@ static void DAC960_MonitoringTimerFunction(unsigned long TimerData)
5643 unsigned int StatusChangeCounter = 5641 unsigned int StatusChangeCounter =
5644 Controller->V2.HealthStatusBuffer->StatusChangeCounter; 5642 Controller->V2.HealthStatusBuffer->StatusChangeCounter;
5645 boolean ForceMonitoringCommand = false; 5643 boolean ForceMonitoringCommand = false;
5646 if (jiffies - Controller->SecondaryMonitoringTime 5644 if (time_after(jiffies, Controller->SecondaryMonitoringTime
5647 > DAC960_SecondaryMonitoringInterval) 5645 + DAC960_SecondaryMonitoringInterval))
5648 { 5646 {
5649 int LogicalDriveNumber; 5647 int LogicalDriveNumber;
5650 for (LogicalDriveNumber = 0; 5648 for (LogicalDriveNumber = 0;
@@ -5672,8 +5670,8 @@ static void DAC960_MonitoringTimerFunction(unsigned long TimerData)
5672 ControllerInfo->ConsistencyChecksActive + 5670 ControllerInfo->ConsistencyChecksActive +
5673 ControllerInfo->RebuildsActive + 5671 ControllerInfo->RebuildsActive +
5674 ControllerInfo->OnlineExpansionsActive == 0 || 5672 ControllerInfo->OnlineExpansionsActive == 0 ||
5675 jiffies - Controller->PrimaryMonitoringTime 5673 time_before(jiffies, Controller->PrimaryMonitoringTime
5676 < DAC960_MonitoringTimerInterval) && 5674 + DAC960_MonitoringTimerInterval)) &&
5677 !ForceMonitoringCommand) 5675 !ForceMonitoringCommand)
5678 { 5676 {
5679 Controller->MonitoringTimer.expires = 5677 Controller->MonitoringTimer.expires =
@@ -5810,8 +5808,8 @@ static void DAC960_Message(DAC960_MessageLevel_T MessageLevel,
5810 Controller->ProgressBufferLength = Length; 5808 Controller->ProgressBufferLength = Length;
5811 if (Controller->EphemeralProgressMessage) 5809 if (Controller->EphemeralProgressMessage)
5812 { 5810 {
5813 if (jiffies - Controller->LastProgressReportTime 5811 if (time_after_eq(jiffies, Controller->LastProgressReportTime
5814 >= DAC960_ProgressReportingInterval) 5812 + DAC960_ProgressReportingInterval))
5815 { 5813 {
5816 printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel], 5814 printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
5817 Controller->ControllerNumber, Buffer); 5815 Controller->ControllerNumber, Buffer);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index e57ac5a43246..ae0949b3394f 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -383,8 +383,9 @@ config BLK_DEV_RAM
383 thus say N here. 383 thus say N here.
384 384
385config BLK_DEV_RAM_COUNT 385config BLK_DEV_RAM_COUNT
386 int "Default number of RAM disks" if BLK_DEV_RAM 386 int "Default number of RAM disks"
387 default "16" 387 default "16"
388 depends on BLK_DEV_RAM
388 help 389 help
389 The default value is 16 RAM disks. Change this if you know what 390 The default value is 16 RAM disks. Change this if you know what
390 are doing. If you boot from a filesystem that needs to be extracted 391 are doing. If you boot from a filesystem that needs to be extracted
@@ -400,13 +401,16 @@ config BLK_DEV_RAM_SIZE
400 8192. 401 8192.
401 402
402config BLK_DEV_INITRD 403config BLK_DEV_INITRD
403 bool "Initial RAM disk (initrd) support" 404 bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
404 help 405 help
405 The initial RAM disk is a RAM disk that is loaded by the boot loader 406 The initial RAM filesystem is a ramfs which is loaded by the
406 (loadlin or lilo) and that is mounted as root before the normal boot 407 boot loader (loadlin or lilo) and that is mounted as root
407 procedure. It is typically used to load modules needed to mount the 408 before the normal boot procedure. It is typically used to
408 "real" root file system, etc. See <file:Documentation/initrd.txt> 409 load modules needed to mount the "real" root file system,
409 for details. 410 etc. See <file:Documentation/initrd.txt> for details.
411
412 If RAM disk support (BLK_DEV_RAM) is also included, this
413 also enables initial RAM disk (initrd) support.
410 414
411 415
412config CDROM_PKTCDVD 416config CDROM_PKTCDVD
diff --git a/drivers/block/acsi_slm.c b/drivers/block/acsi_slm.c
index a5c1c8e871ec..4cb9c1336287 100644
--- a/drivers/block/acsi_slm.c
+++ b/drivers/block/acsi_slm.c
@@ -369,8 +369,6 @@ static ssize_t slm_read( struct file *file, char *buf, size_t count,
369 int length; 369 int length;
370 int end; 370 int end;
371 371
372 if (count < 0)
373 return( -EINVAL );
374 if (!(page = __get_free_page( GFP_KERNEL ))) 372 if (!(page = __get_free_page( GFP_KERNEL )))
375 return( -ENOMEM ); 373 return( -ENOMEM );
376 374
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 32fea55fac48..393b86a3dbf8 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -211,9 +211,7 @@ aoeblk_gdalloc(void *vp)
211 return; 211 return;
212 } 212 }
213 213
214 d->bufpool = mempool_create(MIN_BUFS, 214 d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache);
215 mempool_alloc_slab, mempool_free_slab,
216 buf_pool_cache);
217 if (d->bufpool == NULL) { 215 if (d->bufpool == NULL) {
218 printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate bufpool " 216 printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate bufpool "
219 "for %ld.%ld\n", d->aoemajor, d->aoeminor); 217 "for %ld.%ld\n", d->aoemajor, d->aoeminor);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 71ec9e664383..1b0fd31c57c3 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -996,13 +996,11 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
996 status = -EINVAL; 996 status = -EINVAL;
997 goto cleanup1; 997 goto cleanup1;
998 } 998 }
999 buff = (unsigned char **) kmalloc(MAXSGENTRIES * 999 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1000 sizeof(char *), GFP_KERNEL);
1001 if (!buff) { 1000 if (!buff) {
1002 status = -ENOMEM; 1001 status = -ENOMEM;
1003 goto cleanup1; 1002 goto cleanup1;
1004 } 1003 }
1005 memset(buff, 0, MAXSGENTRIES);
1006 buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int), 1004 buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int),
1007 GFP_KERNEL); 1005 GFP_KERNEL);
1008 if (!buff_size) { 1006 if (!buff_size) {
@@ -2729,9 +2727,9 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *c, struct pci_dev *pdev,
2729 return; 2727 return;
2730 } 2728 }
2731 } 2729 }
2730default_int_mode:
2732#endif /* CONFIG_PCI_MSI */ 2731#endif /* CONFIG_PCI_MSI */
2733 /* if we get here we're going to use the default interrupt mode */ 2732 /* if we get here we're going to use the default interrupt mode */
2734default_int_mode:
2735 c->intr[SIMPLE_MODE_INT] = pdev->irq; 2733 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2736 return; 2734 return;
2737} 2735}
@@ -2940,13 +2938,12 @@ static void cciss_getgeometry(int cntl_num)
2940 int block_size; 2938 int block_size;
2941 int total_size; 2939 int total_size;
2942 2940
2943 ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL); 2941 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
2944 if (ld_buff == NULL) 2942 if (ld_buff == NULL)
2945 { 2943 {
2946 printk(KERN_ERR "cciss: out of memory\n"); 2944 printk(KERN_ERR "cciss: out of memory\n");
2947 return; 2945 return;
2948 } 2946 }
2949 memset(ld_buff, 0, sizeof(ReportLunData_struct));
2950 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL); 2947 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
2951 if (size_buff == NULL) 2948 if (size_buff == NULL)
2952 { 2949 {
@@ -3060,10 +3057,9 @@ static int alloc_cciss_hba(void)
3060 for(i=0; i< MAX_CTLR; i++) { 3057 for(i=0; i< MAX_CTLR; i++) {
3061 if (!hba[i]) { 3058 if (!hba[i]) {
3062 ctlr_info_t *p; 3059 ctlr_info_t *p;
3063 p = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL); 3060 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3064 if (!p) 3061 if (!p)
3065 goto Enomem; 3062 goto Enomem;
3066 memset(p, 0, sizeof(ctlr_info_t));
3067 for (n = 0; n < NWD; n++) 3063 for (n = 0; n < NWD; n++)
3068 p->gendisk[n] = disk[n]; 3064 p->gendisk[n] = disk[n];
3069 hba[i] = p; 3065 hba[i] = p;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 0e66e904bd8c..597c007fe81b 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -1027,12 +1027,11 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
1027 int i; 1027 int i;
1028 1028
1029 c = (ctlr_info_t *) hba[cntl_num]; 1029 c = (ctlr_info_t *) hba[cntl_num];
1030 ld_buff = kmalloc(reportlunsize, GFP_KERNEL); 1030 ld_buff = kzalloc(reportlunsize, GFP_KERNEL);
1031 if (ld_buff == NULL) { 1031 if (ld_buff == NULL) {
1032 printk(KERN_ERR "cciss: out of memory\n"); 1032 printk(KERN_ERR "cciss: out of memory\n");
1033 return; 1033 return;
1034 } 1034 }
1035 memset(ld_buff, 0, reportlunsize);
1036 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1035 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1037 if (inq_buff == NULL) { 1036 if (inq_buff == NULL) {
1038 printk(KERN_ERR "cciss: out of memory\n"); 1037 printk(KERN_ERR "cciss: out of memory\n");
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 840919bba76c..bedb689b051f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -170,6 +170,7 @@ static int print_unex = 1;
170#include <linux/mm.h> 170#include <linux/mm.h>
171#include <linux/bio.h> 171#include <linux/bio.h>
172#include <linux/string.h> 172#include <linux/string.h>
173#include <linux/jiffies.h>
173#include <linux/fcntl.h> 174#include <linux/fcntl.h>
174#include <linux/delay.h> 175#include <linux/delay.h>
175#include <linux/mc146818rtc.h> /* CMOS defines */ 176#include <linux/mc146818rtc.h> /* CMOS defines */
@@ -250,6 +251,18 @@ static int irqdma_allocated;
250#include <linux/cdrom.h> /* for the compatibility eject ioctl */ 251#include <linux/cdrom.h> /* for the compatibility eject ioctl */
251#include <linux/completion.h> 252#include <linux/completion.h>
252 253
254/*
255 * Interrupt freeing also means /proc VFS work - dont do it
256 * from interrupt context. We push this work into keventd:
257 */
258static void fd_free_irq_fn(void *data)
259{
260 fd_free_irq();
261}
262
263static DECLARE_WORK(fd_free_irq_work, fd_free_irq_fn, NULL);
264
265
253static struct request *current_req; 266static struct request *current_req;
254static struct request_queue *floppy_queue; 267static struct request_queue *floppy_queue;
255static void do_fd_request(request_queue_t * q); 268static void do_fd_request(request_queue_t * q);
@@ -735,7 +748,7 @@ static int disk_change(int drive)
735{ 748{
736 int fdc = FDC(drive); 749 int fdc = FDC(drive);
737#ifdef FLOPPY_SANITY_CHECK 750#ifdef FLOPPY_SANITY_CHECK
738 if (jiffies - UDRS->select_date < UDP->select_delay) 751 if (time_before(jiffies, UDRS->select_date + UDP->select_delay))
739 DPRINT("WARNING disk change called early\n"); 752 DPRINT("WARNING disk change called early\n");
740 if (!(FDCS->dor & (0x10 << UNIT(drive))) || 753 if (!(FDCS->dor & (0x10 << UNIT(drive))) ||
741 (FDCS->dor & 3) != UNIT(drive) || fdc != FDC(drive)) { 754 (FDCS->dor & 3) != UNIT(drive) || fdc != FDC(drive)) {
@@ -1063,7 +1076,7 @@ static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
1063 return 1; 1076 return 1;
1064 } 1077 }
1065 1078
1066 if ((signed)(jiffies - delay) < 0) { 1079 if (time_before(jiffies, delay)) {
1067 del_timer(&fd_timer); 1080 del_timer(&fd_timer);
1068 fd_timer.function = function; 1081 fd_timer.function = function;
1069 fd_timer.expires = delay; 1082 fd_timer.expires = delay;
@@ -1523,7 +1536,7 @@ static void setup_rw_floppy(void)
1523 * again just before spinup completion. Beware that 1536 * again just before spinup completion. Beware that
1524 * after scandrives, we must again wait for selection. 1537 * after scandrives, we must again wait for selection.
1525 */ 1538 */
1526 if ((signed)(ready_date - jiffies) > DP->select_delay) { 1539 if (time_after(ready_date, jiffies + DP->select_delay)) {
1527 ready_date -= DP->select_delay; 1540 ready_date -= DP->select_delay;
1528 function = (timeout_fn) floppy_start; 1541 function = (timeout_fn) floppy_start;
1529 } else 1542 } else
@@ -3811,7 +3824,7 @@ static int check_floppy_change(struct gendisk *disk)
3811 if (UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY)) 3824 if (UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY))
3812 return 1; 3825 return 1;
3813 3826
3814 if (UDP->checkfreq < (int)(jiffies - UDRS->last_checked)) { 3827 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
3815 if (floppy_grab_irq_and_dma()) { 3828 if (floppy_grab_irq_and_dma()) {
3816 return 1; 3829 return 1;
3817 } 3830 }
@@ -4433,6 +4446,13 @@ static int floppy_grab_irq_and_dma(void)
4433 return 0; 4446 return 0;
4434 } 4447 }
4435 spin_unlock_irqrestore(&floppy_usage_lock, flags); 4448 spin_unlock_irqrestore(&floppy_usage_lock, flags);
4449
4450 /*
4451 * We might have scheduled a free_irq(), wait it to
4452 * drain first:
4453 */
4454 flush_scheduled_work();
4455
4436 if (fd_request_irq()) { 4456 if (fd_request_irq()) {
4437 DPRINT("Unable to grab IRQ%d for the floppy driver\n", 4457 DPRINT("Unable to grab IRQ%d for the floppy driver\n",
4438 FLOPPY_IRQ); 4458 FLOPPY_IRQ);
@@ -4522,7 +4542,7 @@ static void floppy_release_irq_and_dma(void)
4522 if (irqdma_allocated) { 4542 if (irqdma_allocated) {
4523 fd_disable_dma(); 4543 fd_disable_dma();
4524 fd_free_dma(); 4544 fd_free_dma();
4525 fd_free_irq(); 4545 schedule_work(&fd_free_irq_work);
4526 irqdma_allocated = 0; 4546 irqdma_allocated = 0;
4527 } 4547 }
4528 set_dor(0, ~0, 8); 4548 set_dor(0, ~0, 8);
@@ -4633,6 +4653,8 @@ void cleanup_module(void)
4633 /* eject disk, if any */ 4653 /* eject disk, if any */
4634 fd_eject(0); 4654 fd_eject(0);
4635 4655
4656 flush_scheduled_work(); /* fd_free_irq() might be pending */
4657
4636 wait_for_completion(&device_release); 4658 wait_for_completion(&device_release);
4637} 4659}
4638 4660
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 74bf0255e98f..9c3b94e8f03b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -839,7 +839,9 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
839 839
840 set_blocksize(bdev, lo_blocksize); 840 set_blocksize(bdev, lo_blocksize);
841 841
842 kernel_thread(loop_thread, lo, CLONE_KERNEL); 842 error = kernel_thread(loop_thread, lo, CLONE_KERNEL);
843 if (error < 0)
844 goto out_putf;
843 wait_for_completion(&lo->lo_done); 845 wait_for_completion(&lo->lo_done);
844 return 0; 846 return 0;
845 847
diff --git a/drivers/block/paride/bpck6.c b/drivers/block/paride/bpck6.c
index 08d858ad64db..41a237c5957d 100644
--- a/drivers/block/paride/bpck6.c
+++ b/drivers/block/paride/bpck6.c
@@ -224,10 +224,9 @@ static void bpck6_log_adapter( PIA *pi, char * scratch, int verbose )
224 224
225static int bpck6_init_proto(PIA *pi) 225static int bpck6_init_proto(PIA *pi)
226{ 226{
227 Interface *p = kmalloc(sizeof(Interface), GFP_KERNEL); 227 Interface *p = kzalloc(sizeof(Interface), GFP_KERNEL);
228 228
229 if (p) { 229 if (p) {
230 memset(p, 0, sizeof(Interface));
231 pi->private = (unsigned long)p; 230 pi->private = (unsigned long)p;
232 return 0; 231 return 0;
233 } 232 }
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 62d2464c12f2..2403721f9db1 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -151,6 +151,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
151#include <linux/cdrom.h> /* for the eject ioctl */ 151#include <linux/cdrom.h> /* for the eject ioctl */
152#include <linux/blkdev.h> 152#include <linux/blkdev.h>
153#include <linux/blkpg.h> 153#include <linux/blkpg.h>
154#include <linux/kernel.h>
154#include <asm/uaccess.h> 155#include <asm/uaccess.h>
155#include <linux/sched.h> 156#include <linux/sched.h>
156#include <linux/workqueue.h> 157#include <linux/workqueue.h>
@@ -275,7 +276,7 @@ static void pd_print_error(struct pd_unit *disk, char *msg, int status)
275 int i; 276 int i;
276 277
277 printk("%s: %s: status = 0x%x =", disk->name, msg, status); 278 printk("%s: %s: status = 0x%x =", disk->name, msg, status);
278 for (i = 0; i < 18; i++) 279 for (i = 0; i < ARRAY_SIZE(pd_errs); i++)
279 if (status & (1 << i)) 280 if (status & (1 << i))
280 printk(" %s", pd_errs[i]); 281 printk(" %s", pd_errs[i]);
281 printk("\n"); 282 printk("\n");
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 6f5df0fad703..79b868254032 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -643,7 +643,8 @@ static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t
643 643
644static int __init pg_init(void) 644static int __init pg_init(void)
645{ 645{
646 int unit, err = 0; 646 int unit;
647 int err;
647 648
648 if (disable){ 649 if (disable){
649 err = -1; 650 err = -1;
@@ -657,16 +658,17 @@ static int __init pg_init(void)
657 goto out; 658 goto out;
658 } 659 }
659 660
660 if (register_chrdev(major, name, &pg_fops)) { 661 err = register_chrdev(major, name, &pg_fops);
662 if (err < 0) {
661 printk("pg_init: unable to get major number %d\n", major); 663 printk("pg_init: unable to get major number %d\n", major);
662 for (unit = 0; unit < PG_UNITS; unit++) { 664 for (unit = 0; unit < PG_UNITS; unit++) {
663 struct pg *dev = &devices[unit]; 665 struct pg *dev = &devices[unit];
664 if (dev->present) 666 if (dev->present)
665 pi_release(dev->pi); 667 pi_release(dev->pi);
666 } 668 }
667 err = -1;
668 goto out; 669 goto out;
669 } 670 }
671 major = err; /* In case the user specified `major=0' (dynamic) */
670 pg_class = class_create(THIS_MODULE, "pg"); 672 pg_class = class_create(THIS_MODULE, "pg");
671 if (IS_ERR(pg_class)) { 673 if (IS_ERR(pg_class)) {
672 err = PTR_ERR(pg_class); 674 err = PTR_ERR(pg_class);
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 715ae5dc88fb..d2013d362403 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -943,7 +943,8 @@ static ssize_t pt_write(struct file *filp, const char __user *buf, size_t count,
943 943
944static int __init pt_init(void) 944static int __init pt_init(void)
945{ 945{
946 int unit, err = 0; 946 int unit;
947 int err;
947 948
948 if (disable) { 949 if (disable) {
949 err = -1; 950 err = -1;
@@ -955,14 +956,15 @@ static int __init pt_init(void)
955 goto out; 956 goto out;
956 } 957 }
957 958
958 if (register_chrdev(major, name, &pt_fops)) { 959 err = register_chrdev(major, name, &pt_fops);
960 if (err < 0) {
959 printk("pt_init: unable to get major number %d\n", major); 961 printk("pt_init: unable to get major number %d\n", major);
960 for (unit = 0; unit < PT_UNITS; unit++) 962 for (unit = 0; unit < PT_UNITS; unit++)
961 if (pt[unit].present) 963 if (pt[unit].present)
962 pi_release(pt[unit].pi); 964 pi_release(pt[unit].pi);
963 err = -1;
964 goto out; 965 goto out;
965 } 966 }
967 major = err;
966 pt_class = class_create(THIS_MODULE, "pt"); 968 pt_class = class_create(THIS_MODULE, "pt");
967 if (IS_ERR(pt_class)) { 969 if (IS_ERR(pt_class)) {
968 err = PTR_ERR(pt_class); 970 err = PTR_ERR(pt_class);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 1d261f985f31..a04f60693c39 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -230,16 +230,6 @@ static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
230 return 1; 230 return 1;
231} 231}
232 232
233static void *pkt_rb_alloc(gfp_t gfp_mask, void *data)
234{
235 return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
236}
237
238static void pkt_rb_free(void *ptr, void *data)
239{
240 kfree(ptr);
241}
242
243static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node) 233static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
244{ 234{
245 struct rb_node *n = rb_next(&node->rb_node); 235 struct rb_node *n = rb_next(&node->rb_node);
@@ -2073,16 +2063,6 @@ static int pkt_close(struct inode *inode, struct file *file)
2073} 2063}
2074 2064
2075 2065
2076static void *psd_pool_alloc(gfp_t gfp_mask, void *data)
2077{
2078 return kmalloc(sizeof(struct packet_stacked_data), gfp_mask);
2079}
2080
2081static void psd_pool_free(void *ptr, void *data)
2082{
2083 kfree(ptr);
2084}
2085
2086static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err) 2066static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
2087{ 2067{
2088 struct packet_stacked_data *psd = bio->bi_private; 2068 struct packet_stacked_data *psd = bio->bi_private;
@@ -2475,7 +2455,8 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
2475 if (!pd) 2455 if (!pd)
2476 return ret; 2456 return ret;
2477 2457
2478 pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL); 2458 pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
2459 sizeof(struct pkt_rb_node));
2479 if (!pd->rb_pool) 2460 if (!pd->rb_pool)
2480 goto out_mem; 2461 goto out_mem;
2481 2462
@@ -2639,7 +2620,8 @@ static int __init pkt_init(void)
2639{ 2620{
2640 int ret; 2621 int ret;
2641 2622
2642 psd_pool = mempool_create(PSD_POOL_SIZE, psd_pool_alloc, psd_pool_free, NULL); 2623 psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
2624 sizeof(struct packet_stacked_data));
2643 if (!psd_pool) 2625 if (!psd_pool)
2644 return -ENOMEM; 2626 return -ENOMEM;
2645 2627
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index c16e66b9c7a7..f7d4c65a7b8c 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -50,6 +50,7 @@
50#include <linux/timer.h> 50#include <linux/timer.h>
51#include <linux/pci.h> 51#include <linux/pci.h>
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/dma-mapping.h>
53 54
54#include <linux/fcntl.h> /* O_ACCMODE */ 55#include <linux/fcntl.h> /* O_ACCMODE */
55#include <linux/hdreg.h> /* HDIO_GETGEO */ 56#include <linux/hdreg.h> /* HDIO_GETGEO */
@@ -881,8 +882,8 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
881 printk(KERN_INFO "Micro Memory(tm) controller #%d found at %02x:%02x (PCI Mem Module (Battery Backup))\n", 882 printk(KERN_INFO "Micro Memory(tm) controller #%d found at %02x:%02x (PCI Mem Module (Battery Backup))\n",
882 card->card_number, dev->bus->number, dev->devfn); 883 card->card_number, dev->bus->number, dev->devfn);
883 884
884 if (pci_set_dma_mask(dev, 0xffffffffffffffffLL) && 885 if (pci_set_dma_mask(dev, DMA_64BIT_MASK) &&
885 pci_set_dma_mask(dev, 0xffffffffLL)) { 886 pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
886 printk(KERN_WARNING "MM%d: NO suitable DMA found\n",num_cards); 887 printk(KERN_WARNING "MM%d: NO suitable DMA found\n",num_cards);
887 return -ENOMEM; 888 return -ENOMEM;
888 } 889 }
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 5980f3e886fc..73d30bf01582 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -187,6 +187,7 @@ config MOXA_SMARTIO
187config ISI 187config ISI
188 tristate "Multi-Tech multiport card support (EXPERIMENTAL)" 188 tristate "Multi-Tech multiport card support (EXPERIMENTAL)"
189 depends on SERIAL_NONSTANDARD 189 depends on SERIAL_NONSTANDARD
190 select FW_LOADER
190 help 191 help
191 This is a driver for the Multi-Tech cards which provide several 192 This is a driver for the Multi-Tech cards which provide several
192 serial ports. The driver is experimental and can currently only be 193 serial ports. The driver is experimental and can currently only be
@@ -695,7 +696,7 @@ config NVRAM
695 696
696config RTC 697config RTC
697 tristate "Enhanced Real Time Clock Support" 698 tristate "Enhanced Real Time Clock Support"
698 depends on !PPC && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV 699 depends on !PPC && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV && !ARM
699 ---help--- 700 ---help---
700 If you say Y here and create a character special file /dev/rtc with 701 If you say Y here and create a character special file /dev/rtc with
701 major number 10 and minor number 135 using mknod ("man mknod"), you 702 major number 10 and minor number 135 using mknod ("man mknod"), you
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 70b8ed9cd172..4c67135c12d8 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -11,6 +11,7 @@
11#include <linux/gfp.h> 11#include <linux/gfp.h>
12#include <linux/page-flags.h> 12#include <linux/page-flags.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/jiffies.h>
14#include "agp.h" 15#include "agp.h"
15 16
16/* NVIDIA registers */ 17/* NVIDIA registers */
@@ -256,7 +257,7 @@ static void nvidia_tlbflush(struct agp_memory *mem)
256 do { 257 do {
257 pci_read_config_dword(nvidia_private.dev_1, 258 pci_read_config_dword(nvidia_private.dev_1,
258 NVIDIA_1_WBC, &wbc_reg); 259 NVIDIA_1_WBC, &wbc_reg);
259 if ((signed)(end - jiffies) <= 0) { 260 if (time_before_eq(end, jiffies)) {
260 printk(KERN_ERR PFX 261 printk(KERN_ERR PFX
261 "TLB flush took more than 3 seconds.\n"); 262 "TLB flush took more than 3 seconds.\n");
262 } 263 }
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index 641f7633878c..b7f7951c4587 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -175,7 +175,7 @@ int drm_stub_open(struct inode *inode, struct file *filp)
175 drm_device_t *dev = NULL; 175 drm_device_t *dev = NULL;
176 int minor = iminor(inode); 176 int minor = iminor(inode);
177 int err = -ENODEV; 177 int err = -ENODEV;
178 struct file_operations *old_fops; 178 const struct file_operations *old_fops;
179 179
180 DRM_DEBUG("\n"); 180 DRM_DEBUG("\n");
181 181
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index ae0aa6d7e0bb..c658dde3633b 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -126,7 +126,7 @@ static int i810_map_buffer(drm_buf_t * buf, struct file *filp)
126 drm_device_t *dev = priv->head->dev; 126 drm_device_t *dev = priv->head->dev;
127 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 127 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
128 drm_i810_private_t *dev_priv = dev->dev_private; 128 drm_i810_private_t *dev_priv = dev->dev_private;
129 struct file_operations *old_fops; 129 const struct file_operations *old_fops;
130 int retcode = 0; 130 int retcode = 0;
131 131
132 if (buf_priv->currently_mapped == I810_BUF_MAPPED) 132 if (buf_priv->currently_mapped == I810_BUF_MAPPED)
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index 163f2cbfe60d..b0f815d8cea8 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -128,7 +128,7 @@ static int i830_map_buffer(drm_buf_t * buf, struct file *filp)
128 drm_device_t *dev = priv->head->dev; 128 drm_device_t *dev = priv->head->dev;
129 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 129 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
130 drm_i830_private_t *dev_priv = dev->dev_private; 130 drm_i830_private_t *dev_priv = dev->dev_private;
131 struct file_operations *old_fops; 131 const struct file_operations *old_fops;
132 unsigned long virtual; 132 unsigned long virtual;
133 int retcode = 0; 133 int retcode = 0;
134 134
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 765c5c108bf4..9cad8501d62c 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -486,8 +486,7 @@ static void pc_close(struct tty_struct * tty, struct file * filp)
486 } /* End channel is open more than once */ 486 } /* End channel is open more than once */
487 487
488 /* Port open only once go ahead with shutdown & reset */ 488 /* Port open only once go ahead with shutdown & reset */
489 if (ch->count < 0) 489 BUG_ON(ch->count < 0);
490 BUG();
491 490
492 /* --------------------------------------------------------------- 491 /* ---------------------------------------------------------------
493 Let the rest of the driver know the channel is being closed. 492 Let the rest of the driver know the channel is being closed.
diff --git a/drivers/char/ftape/lowlevel/fdc-io.c b/drivers/char/ftape/lowlevel/fdc-io.c
index b2e0928e8428..093fdf98b19a 100644
--- a/drivers/char/ftape/lowlevel/fdc-io.c
+++ b/drivers/char/ftape/lowlevel/fdc-io.c
@@ -607,7 +607,7 @@ void fdc_reset(void)
607 607
608 fdc_mode = fdc_idle; 608 fdc_mode = fdc_idle;
609 609
610 /* maybe the cli()/sti() pair is not necessary, BUT: 610 /* maybe the spin_lock_irq* pair is not necessary, BUT:
611 * the following line MUST be here. Otherwise fdc_interrupt_wait() 611 * the following line MUST be here. Otherwise fdc_interrupt_wait()
612 * won't wait. Note that fdc_reset() is called from 612 * won't wait. Note that fdc_reset() is called from
613 * ftape_dumb_stop() when the fdc is busy transferring data. In this 613 * ftape_dumb_stop() when the fdc is busy transferring data. In this
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 7c0684deea06..932feedda262 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -90,7 +90,7 @@ static unsigned int ipmi_poll(struct file *file, poll_table *wait)
90 90
91 spin_lock_irqsave(&priv->recv_msg_lock, flags); 91 spin_lock_irqsave(&priv->recv_msg_lock, flags);
92 92
93 if (! list_empty(&(priv->recv_msgs))) 93 if (!list_empty(&(priv->recv_msgs)))
94 mask |= (POLLIN | POLLRDNORM); 94 mask |= (POLLIN | POLLRDNORM);
95 95
96 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 96 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
@@ -789,21 +789,53 @@ MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
789 " interface. Other values will set the major device number" 789 " interface. Other values will set the major device number"
790 " to that value."); 790 " to that value.");
791 791
792/* Keep track of the devices that are registered. */
793struct ipmi_reg_list {
794 dev_t dev;
795 struct list_head link;
796};
797static LIST_HEAD(reg_list);
798static DEFINE_MUTEX(reg_list_mutex);
799
792static struct class *ipmi_class; 800static struct class *ipmi_class;
793 801
794static void ipmi_new_smi(int if_num) 802static void ipmi_new_smi(int if_num, struct device *device)
795{ 803{
796 dev_t dev = MKDEV(ipmi_major, if_num); 804 dev_t dev = MKDEV(ipmi_major, if_num);
805 struct ipmi_reg_list *entry;
797 806
798 devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR, 807 devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR,
799 "ipmidev/%d", if_num); 808 "ipmidev/%d", if_num);
800 809
801 class_device_create(ipmi_class, NULL, dev, NULL, "ipmi%d", if_num); 810 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
811 if (!entry) {
812 printk(KERN_ERR "ipmi_devintf: Unable to create the"
813 " ipmi class device link\n");
814 return;
815 }
816 entry->dev = dev;
817
818 mutex_lock(&reg_list_mutex);
819 class_device_create(ipmi_class, NULL, dev, device, "ipmi%d", if_num);
820 list_add(&entry->link, &reg_list);
821 mutex_unlock(&reg_list_mutex);
802} 822}
803 823
804static void ipmi_smi_gone(int if_num) 824static void ipmi_smi_gone(int if_num)
805{ 825{
806 class_device_destroy(ipmi_class, MKDEV(ipmi_major, if_num)); 826 dev_t dev = MKDEV(ipmi_major, if_num);
827 struct ipmi_reg_list *entry;
828
829 mutex_lock(&reg_list_mutex);
830 list_for_each_entry(entry, &reg_list, link) {
831 if (entry->dev == dev) {
832 list_del(&entry->link);
833 kfree(entry);
834 break;
835 }
836 }
837 class_device_destroy(ipmi_class, dev);
838 mutex_unlock(&reg_list_mutex);
807 devfs_remove("ipmidev/%d", if_num); 839 devfs_remove("ipmidev/%d", if_num);
808} 840}
809 841
@@ -856,6 +888,14 @@ module_init(init_ipmi_devintf);
856 888
857static __exit void cleanup_ipmi(void) 889static __exit void cleanup_ipmi(void)
858{ 890{
891 struct ipmi_reg_list *entry, *entry2;
892 mutex_lock(&reg_list_mutex);
893 list_for_each_entry_safe(entry, entry2, &reg_list, link) {
894 list_del(&entry->link);
895 class_device_destroy(ipmi_class, entry->dev);
896 kfree(entry);
897 }
898 mutex_unlock(&reg_list_mutex);
859 class_destroy(ipmi_class); 899 class_destroy(ipmi_class);
860 ipmi_smi_watcher_unregister(&smi_watcher); 900 ipmi_smi_watcher_unregister(&smi_watcher);
861 devfs_remove(DEVICE_NAME); 901 devfs_remove(DEVICE_NAME);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index abd4c5118a1b..40eb005b9d77 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -48,7 +48,7 @@
48 48
49#define PFX "IPMI message handler: " 49#define PFX "IPMI message handler: "
50 50
51#define IPMI_DRIVER_VERSION "38.0" 51#define IPMI_DRIVER_VERSION "39.0"
52 52
53static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 53static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54static int ipmi_init_msghandler(void); 54static int ipmi_init_msghandler(void);
@@ -162,6 +162,28 @@ struct ipmi_proc_entry
162}; 162};
163#endif 163#endif
164 164
165struct bmc_device
166{
167 struct platform_device *dev;
168 struct ipmi_device_id id;
169 unsigned char guid[16];
170 int guid_set;
171
172 struct kref refcount;
173
174 /* bmc device attributes */
175 struct device_attribute device_id_attr;
176 struct device_attribute provides_dev_sdrs_attr;
177 struct device_attribute revision_attr;
178 struct device_attribute firmware_rev_attr;
179 struct device_attribute version_attr;
180 struct device_attribute add_dev_support_attr;
181 struct device_attribute manufacturer_id_attr;
182 struct device_attribute product_id_attr;
183 struct device_attribute guid_attr;
184 struct device_attribute aux_firmware_rev_attr;
185};
186
165#define IPMI_IPMB_NUM_SEQ 64 187#define IPMI_IPMB_NUM_SEQ 64
166#define IPMI_MAX_CHANNELS 16 188#define IPMI_MAX_CHANNELS 16
167struct ipmi_smi 189struct ipmi_smi
@@ -178,9 +200,8 @@ struct ipmi_smi
178 /* Used for wake ups at startup. */ 200 /* Used for wake ups at startup. */
179 wait_queue_head_t waitq; 201 wait_queue_head_t waitq;
180 202
181 /* The IPMI version of the BMC on the other end. */ 203 struct bmc_device *bmc;
182 unsigned char version_major; 204 char *my_dev_name;
183 unsigned char version_minor;
184 205
185 /* This is the lower-layer's sender routine. */ 206 /* This is the lower-layer's sender routine. */
186 struct ipmi_smi_handlers *handlers; 207 struct ipmi_smi_handlers *handlers;
@@ -194,6 +215,9 @@ struct ipmi_smi
194 struct ipmi_proc_entry *proc_entries; 215 struct ipmi_proc_entry *proc_entries;
195#endif 216#endif
196 217
218 /* Driver-model device for the system interface. */
219 struct device *si_dev;
220
197 /* A table of sequence numbers for this interface. We use the 221 /* A table of sequence numbers for this interface. We use the
198 sequence numbers for IPMB messages that go out of the 222 sequence numbers for IPMB messages that go out of the
199 interface to match them up with their responses. A routine 223 interface to match them up with their responses. A routine
@@ -312,6 +336,7 @@ struct ipmi_smi
312 /* Events that were received with the proper format. */ 336 /* Events that were received with the proper format. */
313 unsigned int events; 337 unsigned int events;
314}; 338};
339#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
315 340
316/* Used to mark an interface entry that cannot be used but is not a 341/* Used to mark an interface entry that cannot be used but is not a
317 * free entry, either, primarily used at creation and deletion time so 342 * free entry, either, primarily used at creation and deletion time so
@@ -320,6 +345,15 @@ struct ipmi_smi
320#define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \ 345#define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
321 || (i == IPMI_INVALID_INTERFACE_ENTRY)) 346 || (i == IPMI_INVALID_INTERFACE_ENTRY))
322 347
348/**
349 * The driver model view of the IPMI messaging driver.
350 */
351static struct device_driver ipmidriver = {
352 .name = "ipmi",
353 .bus = &platform_bus_type
354};
355static DEFINE_MUTEX(ipmidriver_mutex);
356
323#define MAX_IPMI_INTERFACES 4 357#define MAX_IPMI_INTERFACES 4
324static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES]; 358static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
325 359
@@ -393,7 +427,7 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
393 if (IPMI_INVALID_INTERFACE(intf)) 427 if (IPMI_INVALID_INTERFACE(intf))
394 continue; 428 continue;
395 spin_unlock_irqrestore(&interfaces_lock, flags); 429 spin_unlock_irqrestore(&interfaces_lock, flags);
396 watcher->new_smi(i); 430 watcher->new_smi(i, intf->si_dev);
397 spin_lock_irqsave(&interfaces_lock, flags); 431 spin_lock_irqsave(&interfaces_lock, flags);
398 } 432 }
399 spin_unlock_irqrestore(&interfaces_lock, flags); 433 spin_unlock_irqrestore(&interfaces_lock, flags);
@@ -409,14 +443,14 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
409} 443}
410 444
411static void 445static void
412call_smi_watchers(int i) 446call_smi_watchers(int i, struct device *dev)
413{ 447{
414 struct ipmi_smi_watcher *w; 448 struct ipmi_smi_watcher *w;
415 449
416 down_read(&smi_watchers_sem); 450 down_read(&smi_watchers_sem);
417 list_for_each_entry(w, &smi_watchers, link) { 451 list_for_each_entry(w, &smi_watchers, link) {
418 if (try_module_get(w->owner)) { 452 if (try_module_get(w->owner)) {
419 w->new_smi(i); 453 w->new_smi(i, dev);
420 module_put(w->owner); 454 module_put(w->owner);
421 } 455 }
422 } 456 }
@@ -844,8 +878,8 @@ void ipmi_get_version(ipmi_user_t user,
844 unsigned char *major, 878 unsigned char *major,
845 unsigned char *minor) 879 unsigned char *minor)
846{ 880{
847 *major = user->intf->version_major; 881 *major = ipmi_version_major(&user->intf->bmc->id);
848 *minor = user->intf->version_minor; 882 *minor = ipmi_version_minor(&user->intf->bmc->id);
849} 883}
850 884
851int ipmi_set_my_address(ipmi_user_t user, 885int ipmi_set_my_address(ipmi_user_t user,
@@ -1553,7 +1587,8 @@ static int version_file_read_proc(char *page, char **start, off_t off,
1553 ipmi_smi_t intf = data; 1587 ipmi_smi_t intf = data;
1554 1588
1555 return sprintf(out, "%d.%d\n", 1589 return sprintf(out, "%d.%d\n",
1556 intf->version_major, intf->version_minor); 1590 ipmi_version_major(&intf->bmc->id),
1591 ipmi_version_minor(&intf->bmc->id));
1557} 1592}
1558 1593
1559static int stat_file_read_proc(char *page, char **start, off_t off, 1594static int stat_file_read_proc(char *page, char **start, off_t off,
@@ -1712,6 +1747,470 @@ static void remove_proc_entries(ipmi_smi_t smi)
1712#endif /* CONFIG_PROC_FS */ 1747#endif /* CONFIG_PROC_FS */
1713} 1748}
1714 1749
1750static int __find_bmc_guid(struct device *dev, void *data)
1751{
1752 unsigned char *id = data;
1753 struct bmc_device *bmc = dev_get_drvdata(dev);
1754 return memcmp(bmc->guid, id, 16) == 0;
1755}
1756
1757static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1758 unsigned char *guid)
1759{
1760 struct device *dev;
1761
1762 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1763 if (dev)
1764 return dev_get_drvdata(dev);
1765 else
1766 return NULL;
1767}
1768
1769struct prod_dev_id {
1770 unsigned int product_id;
1771 unsigned char device_id;
1772};
1773
1774static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1775{
1776 struct prod_dev_id *id = data;
1777 struct bmc_device *bmc = dev_get_drvdata(dev);
1778
1779 return (bmc->id.product_id == id->product_id
1780 && bmc->id.product_id == id->product_id
1781 && bmc->id.device_id == id->device_id);
1782}
1783
1784static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1785 struct device_driver *drv,
1786 unsigned char product_id, unsigned char device_id)
1787{
1788 struct prod_dev_id id = {
1789 .product_id = product_id,
1790 .device_id = device_id,
1791 };
1792 struct device *dev;
1793
1794 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
1795 if (dev)
1796 return dev_get_drvdata(dev);
1797 else
1798 return NULL;
1799}
1800
1801static ssize_t device_id_show(struct device *dev,
1802 struct device_attribute *attr,
1803 char *buf)
1804{
1805 struct bmc_device *bmc = dev_get_drvdata(dev);
1806
1807 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
1808}
1809
1810static ssize_t provides_dev_sdrs_show(struct device *dev,
1811 struct device_attribute *attr,
1812 char *buf)
1813{
1814 struct bmc_device *bmc = dev_get_drvdata(dev);
1815
1816 return snprintf(buf, 10, "%u\n",
1817 bmc->id.device_revision && 0x80 >> 7);
1818}
1819
1820static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
1821 char *buf)
1822{
1823 struct bmc_device *bmc = dev_get_drvdata(dev);
1824
1825 return snprintf(buf, 20, "%u\n",
1826 bmc->id.device_revision && 0x0F);
1827}
1828
1829static ssize_t firmware_rev_show(struct device *dev,
1830 struct device_attribute *attr,
1831 char *buf)
1832{
1833 struct bmc_device *bmc = dev_get_drvdata(dev);
1834
1835 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
1836 bmc->id.firmware_revision_2);
1837}
1838
1839static ssize_t ipmi_version_show(struct device *dev,
1840 struct device_attribute *attr,
1841 char *buf)
1842{
1843 struct bmc_device *bmc = dev_get_drvdata(dev);
1844
1845 return snprintf(buf, 20, "%u.%u\n",
1846 ipmi_version_major(&bmc->id),
1847 ipmi_version_minor(&bmc->id));
1848}
1849
1850static ssize_t add_dev_support_show(struct device *dev,
1851 struct device_attribute *attr,
1852 char *buf)
1853{
1854 struct bmc_device *bmc = dev_get_drvdata(dev);
1855
1856 return snprintf(buf, 10, "0x%02x\n",
1857 bmc->id.additional_device_support);
1858}
1859
1860static ssize_t manufacturer_id_show(struct device *dev,
1861 struct device_attribute *attr,
1862 char *buf)
1863{
1864 struct bmc_device *bmc = dev_get_drvdata(dev);
1865
1866 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
1867}
1868
1869static ssize_t product_id_show(struct device *dev,
1870 struct device_attribute *attr,
1871 char *buf)
1872{
1873 struct bmc_device *bmc = dev_get_drvdata(dev);
1874
1875 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
1876}
1877
1878static ssize_t aux_firmware_rev_show(struct device *dev,
1879 struct device_attribute *attr,
1880 char *buf)
1881{
1882 struct bmc_device *bmc = dev_get_drvdata(dev);
1883
1884 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
1885 bmc->id.aux_firmware_revision[3],
1886 bmc->id.aux_firmware_revision[2],
1887 bmc->id.aux_firmware_revision[1],
1888 bmc->id.aux_firmware_revision[0]);
1889}
1890
1891static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
1892 char *buf)
1893{
1894 struct bmc_device *bmc = dev_get_drvdata(dev);
1895
1896 return snprintf(buf, 100, "%Lx%Lx\n",
1897 (long long) bmc->guid[0],
1898 (long long) bmc->guid[8]);
1899}
1900
1901static void
1902cleanup_bmc_device(struct kref *ref)
1903{
1904 struct bmc_device *bmc;
1905
1906 bmc = container_of(ref, struct bmc_device, refcount);
1907
1908 device_remove_file(&bmc->dev->dev,
1909 &bmc->device_id_attr);
1910 device_remove_file(&bmc->dev->dev,
1911 &bmc->provides_dev_sdrs_attr);
1912 device_remove_file(&bmc->dev->dev,
1913 &bmc->revision_attr);
1914 device_remove_file(&bmc->dev->dev,
1915 &bmc->firmware_rev_attr);
1916 device_remove_file(&bmc->dev->dev,
1917 &bmc->version_attr);
1918 device_remove_file(&bmc->dev->dev,
1919 &bmc->add_dev_support_attr);
1920 device_remove_file(&bmc->dev->dev,
1921 &bmc->manufacturer_id_attr);
1922 device_remove_file(&bmc->dev->dev,
1923 &bmc->product_id_attr);
1924 if (bmc->id.aux_firmware_revision_set)
1925 device_remove_file(&bmc->dev->dev,
1926 &bmc->aux_firmware_rev_attr);
1927 if (bmc->guid_set)
1928 device_remove_file(&bmc->dev->dev,
1929 &bmc->guid_attr);
1930 platform_device_unregister(bmc->dev);
1931 kfree(bmc);
1932}
1933
1934static void ipmi_bmc_unregister(ipmi_smi_t intf)
1935{
1936 struct bmc_device *bmc = intf->bmc;
1937
1938 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
1939 if (intf->my_dev_name) {
1940 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
1941 kfree(intf->my_dev_name);
1942 intf->my_dev_name = NULL;
1943 }
1944
1945 mutex_lock(&ipmidriver_mutex);
1946 kref_put(&bmc->refcount, cleanup_bmc_device);
1947 mutex_unlock(&ipmidriver_mutex);
1948}
1949
1950static int ipmi_bmc_register(ipmi_smi_t intf)
1951{
1952 int rv;
1953 struct bmc_device *bmc = intf->bmc;
1954 struct bmc_device *old_bmc;
1955 int size;
1956 char dummy[1];
1957
1958 mutex_lock(&ipmidriver_mutex);
1959
1960 /*
1961 * Try to find if there is an bmc_device struct
1962 * representing the interfaced BMC already
1963 */
1964 if (bmc->guid_set)
1965 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
1966 else
1967 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
1968 bmc->id.product_id,
1969 bmc->id.device_id);
1970
1971 /*
1972 * If there is already an bmc_device, free the new one,
1973 * otherwise register the new BMC device
1974 */
1975 if (old_bmc) {
1976 kfree(bmc);
1977 intf->bmc = old_bmc;
1978 bmc = old_bmc;
1979
1980 kref_get(&bmc->refcount);
1981 mutex_unlock(&ipmidriver_mutex);
1982
1983 printk(KERN_INFO
1984 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
1985 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
1986 bmc->id.manufacturer_id,
1987 bmc->id.product_id,
1988 bmc->id.device_id);
1989 } else {
1990 bmc->dev = platform_device_alloc("ipmi_bmc",
1991 bmc->id.device_id);
1992 if (! bmc->dev) {
1993 printk(KERN_ERR
1994 "ipmi_msghandler:"
1995 " Unable to allocate platform device\n");
1996 return -ENOMEM;
1997 }
1998 bmc->dev->dev.driver = &ipmidriver;
1999 dev_set_drvdata(&bmc->dev->dev, bmc);
2000 kref_init(&bmc->refcount);
2001
2002 rv = platform_device_register(bmc->dev);
2003 mutex_unlock(&ipmidriver_mutex);
2004 if (rv) {
2005 printk(KERN_ERR
2006 "ipmi_msghandler:"
2007 " Unable to register bmc device: %d\n",
2008 rv);
2009 /* Don't go to out_err, you can only do that if
2010 the device is registered already. */
2011 return rv;
2012 }
2013
2014 bmc->device_id_attr.attr.name = "device_id";
2015 bmc->device_id_attr.attr.owner = THIS_MODULE;
2016 bmc->device_id_attr.attr.mode = S_IRUGO;
2017 bmc->device_id_attr.show = device_id_show;
2018
2019 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2020 bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
2021 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2022 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2023
2024
2025 bmc->revision_attr.attr.name = "revision";
2026 bmc->revision_attr.attr.owner = THIS_MODULE;
2027 bmc->revision_attr.attr.mode = S_IRUGO;
2028 bmc->revision_attr.show = revision_show;
2029
2030 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2031 bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
2032 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2033 bmc->firmware_rev_attr.show = firmware_rev_show;
2034
2035 bmc->version_attr.attr.name = "ipmi_version";
2036 bmc->version_attr.attr.owner = THIS_MODULE;
2037 bmc->version_attr.attr.mode = S_IRUGO;
2038 bmc->version_attr.show = ipmi_version_show;
2039
2040 bmc->add_dev_support_attr.attr.name
2041 = "additional_device_support";
2042 bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
2043 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2044 bmc->add_dev_support_attr.show = add_dev_support_show;
2045
2046 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2047 bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
2048 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2049 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2050
2051 bmc->product_id_attr.attr.name = "product_id";
2052 bmc->product_id_attr.attr.owner = THIS_MODULE;
2053 bmc->product_id_attr.attr.mode = S_IRUGO;
2054 bmc->product_id_attr.show = product_id_show;
2055
2056 bmc->guid_attr.attr.name = "guid";
2057 bmc->guid_attr.attr.owner = THIS_MODULE;
2058 bmc->guid_attr.attr.mode = S_IRUGO;
2059 bmc->guid_attr.show = guid_show;
2060
2061 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2062 bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
2063 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2064 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2065
2066 device_create_file(&bmc->dev->dev,
2067 &bmc->device_id_attr);
2068 device_create_file(&bmc->dev->dev,
2069 &bmc->provides_dev_sdrs_attr);
2070 device_create_file(&bmc->dev->dev,
2071 &bmc->revision_attr);
2072 device_create_file(&bmc->dev->dev,
2073 &bmc->firmware_rev_attr);
2074 device_create_file(&bmc->dev->dev,
2075 &bmc->version_attr);
2076 device_create_file(&bmc->dev->dev,
2077 &bmc->add_dev_support_attr);
2078 device_create_file(&bmc->dev->dev,
2079 &bmc->manufacturer_id_attr);
2080 device_create_file(&bmc->dev->dev,
2081 &bmc->product_id_attr);
2082 if (bmc->id.aux_firmware_revision_set)
2083 device_create_file(&bmc->dev->dev,
2084 &bmc->aux_firmware_rev_attr);
2085 if (bmc->guid_set)
2086 device_create_file(&bmc->dev->dev,
2087 &bmc->guid_attr);
2088
2089 printk(KERN_INFO
2090 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2091 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2092 bmc->id.manufacturer_id,
2093 bmc->id.product_id,
2094 bmc->id.device_id);
2095 }
2096
2097 /*
2098 * create symlink from system interface device to bmc device
2099 * and back.
2100 */
2101 rv = sysfs_create_link(&intf->si_dev->kobj,
2102 &bmc->dev->dev.kobj, "bmc");
2103 if (rv) {
2104 printk(KERN_ERR
2105 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2106 rv);
2107 goto out_err;
2108 }
2109
2110 size = snprintf(dummy, 0, "ipmi%d", intf->intf_num);
2111 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2112 if (!intf->my_dev_name) {
2113 rv = -ENOMEM;
2114 printk(KERN_ERR
2115 "ipmi_msghandler: allocate link from BMC: %d\n",
2116 rv);
2117 goto out_err;
2118 }
2119 snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num);
2120
2121 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2122 intf->my_dev_name);
2123 if (rv) {
2124 kfree(intf->my_dev_name);
2125 intf->my_dev_name = NULL;
2126 printk(KERN_ERR
2127 "ipmi_msghandler:"
2128 " Unable to create symlink to bmc: %d\n",
2129 rv);
2130 goto out_err;
2131 }
2132
2133 return 0;
2134
2135out_err:
2136 ipmi_bmc_unregister(intf);
2137 return rv;
2138}
2139
2140static int
2141send_guid_cmd(ipmi_smi_t intf, int chan)
2142{
2143 struct kernel_ipmi_msg msg;
2144 struct ipmi_system_interface_addr si;
2145
2146 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2147 si.channel = IPMI_BMC_CHANNEL;
2148 si.lun = 0;
2149
2150 msg.netfn = IPMI_NETFN_APP_REQUEST;
2151 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2152 msg.data = NULL;
2153 msg.data_len = 0;
2154 return i_ipmi_request(NULL,
2155 intf,
2156 (struct ipmi_addr *) &si,
2157 0,
2158 &msg,
2159 intf,
2160 NULL,
2161 NULL,
2162 0,
2163 intf->channels[0].address,
2164 intf->channels[0].lun,
2165 -1, 0);
2166}
2167
2168static void
2169guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2170{
2171 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2172 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2173 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2174 /* Not for me */
2175 return;
2176
2177 if (msg->msg.data[0] != 0) {
2178 /* Error from getting the GUID, the BMC doesn't have one. */
2179 intf->bmc->guid_set = 0;
2180 goto out;
2181 }
2182
2183 if (msg->msg.data_len < 17) {
2184 intf->bmc->guid_set = 0;
2185 printk(KERN_WARNING PFX
2186 "guid_handler: The GUID response from the BMC was too"
2187 " short, it was %d but should have been 17. Assuming"
2188 " GUID is not available.\n",
2189 msg->msg.data_len);
2190 goto out;
2191 }
2192
2193 memcpy(intf->bmc->guid, msg->msg.data, 16);
2194 intf->bmc->guid_set = 1;
2195 out:
2196 wake_up(&intf->waitq);
2197}
2198
2199static void
2200get_guid(ipmi_smi_t intf)
2201{
2202 int rv;
2203
2204 intf->bmc->guid_set = 0x2;
2205 intf->null_user_handler = guid_handler;
2206 rv = send_guid_cmd(intf, 0);
2207 if (rv)
2208 /* Send failed, no GUID available. */
2209 intf->bmc->guid_set = 0;
2210 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2211 intf->null_user_handler = NULL;
2212}
2213
1715static int 2214static int
1716send_channel_info_cmd(ipmi_smi_t intf, int chan) 2215send_channel_info_cmd(ipmi_smi_t intf, int chan)
1717{ 2216{
@@ -1804,8 +2303,8 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
1804 2303
1805int ipmi_register_smi(struct ipmi_smi_handlers *handlers, 2304int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1806 void *send_info, 2305 void *send_info,
1807 unsigned char version_major, 2306 struct ipmi_device_id *device_id,
1808 unsigned char version_minor, 2307 struct device *si_dev,
1809 unsigned char slave_addr, 2308 unsigned char slave_addr,
1810 ipmi_smi_t *new_intf) 2309 ipmi_smi_t *new_intf)
1811{ 2310{
@@ -1813,7 +2312,11 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1813 int rv; 2312 int rv;
1814 ipmi_smi_t intf; 2313 ipmi_smi_t intf;
1815 unsigned long flags; 2314 unsigned long flags;
2315 int version_major;
2316 int version_minor;
1816 2317
2318 version_major = ipmi_version_major(device_id);
2319 version_minor = ipmi_version_minor(device_id);
1817 2320
1818 /* Make sure the driver is actually initialized, this handles 2321 /* Make sure the driver is actually initialized, this handles
1819 problems with initialization order. */ 2322 problems with initialization order. */
@@ -1831,10 +2334,15 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1831 if (!intf) 2334 if (!intf)
1832 return -ENOMEM; 2335 return -ENOMEM;
1833 memset(intf, 0, sizeof(*intf)); 2336 memset(intf, 0, sizeof(*intf));
2337 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2338 if (!intf->bmc) {
2339 kfree(intf);
2340 return -ENOMEM;
2341 }
1834 intf->intf_num = -1; 2342 intf->intf_num = -1;
1835 kref_init(&intf->refcount); 2343 kref_init(&intf->refcount);
1836 intf->version_major = version_major; 2344 intf->bmc->id = *device_id;
1837 intf->version_minor = version_minor; 2345 intf->si_dev = si_dev;
1838 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 2346 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
1839 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR; 2347 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
1840 intf->channels[j].lun = 2; 2348 intf->channels[j].lun = 2;
@@ -1884,6 +2392,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1884 caller before sending any messages with it. */ 2392 caller before sending any messages with it. */
1885 *new_intf = intf; 2393 *new_intf = intf;
1886 2394
2395 get_guid(intf);
2396
1887 if ((version_major > 1) 2397 if ((version_major > 1)
1888 || ((version_major == 1) && (version_minor >= 5))) 2398 || ((version_major == 1) && (version_minor >= 5)))
1889 { 2399 {
@@ -1898,6 +2408,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1898 /* Wait for the channel info to be read. */ 2408 /* Wait for the channel info to be read. */
1899 wait_event(intf->waitq, 2409 wait_event(intf->waitq,
1900 intf->curr_channel >= IPMI_MAX_CHANNELS); 2410 intf->curr_channel >= IPMI_MAX_CHANNELS);
2411 intf->null_user_handler = NULL;
1901 } else { 2412 } else {
1902 /* Assume a single IPMB channel at zero. */ 2413 /* Assume a single IPMB channel at zero. */
1903 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 2414 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
@@ -1907,6 +2418,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1907 if (rv == 0) 2418 if (rv == 0)
1908 rv = add_proc_entries(intf, i); 2419 rv = add_proc_entries(intf, i);
1909 2420
2421 rv = ipmi_bmc_register(intf);
2422
1910 out: 2423 out:
1911 if (rv) { 2424 if (rv) {
1912 if (intf->proc_dir) 2425 if (intf->proc_dir)
@@ -1921,7 +2434,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1921 spin_lock_irqsave(&interfaces_lock, flags); 2434 spin_lock_irqsave(&interfaces_lock, flags);
1922 ipmi_interfaces[i] = intf; 2435 ipmi_interfaces[i] = intf;
1923 spin_unlock_irqrestore(&interfaces_lock, flags); 2436 spin_unlock_irqrestore(&interfaces_lock, flags);
1924 call_smi_watchers(i); 2437 call_smi_watchers(i, intf->si_dev);
1925 } 2438 }
1926 2439
1927 return rv; 2440 return rv;
@@ -1933,6 +2446,8 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
1933 struct ipmi_smi_watcher *w; 2446 struct ipmi_smi_watcher *w;
1934 unsigned long flags; 2447 unsigned long flags;
1935 2448
2449 ipmi_bmc_unregister(intf);
2450
1936 spin_lock_irqsave(&interfaces_lock, flags); 2451 spin_lock_irqsave(&interfaces_lock, flags);
1937 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 2452 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1938 if (ipmi_interfaces[i] == intf) { 2453 if (ipmi_interfaces[i] == intf) {
@@ -3196,10 +3711,17 @@ static struct notifier_block panic_block = {
3196static int ipmi_init_msghandler(void) 3711static int ipmi_init_msghandler(void)
3197{ 3712{
3198 int i; 3713 int i;
3714 int rv;
3199 3715
3200 if (initialized) 3716 if (initialized)
3201 return 0; 3717 return 0;
3202 3718
3719 rv = driver_register(&ipmidriver);
3720 if (rv) {
3721 printk(KERN_ERR PFX "Could not register IPMI driver\n");
3722 return rv;
3723 }
3724
3203 printk(KERN_INFO "ipmi message handler version " 3725 printk(KERN_INFO "ipmi message handler version "
3204 IPMI_DRIVER_VERSION "\n"); 3726 IPMI_DRIVER_VERSION "\n");
3205 3727
@@ -3222,7 +3744,7 @@ static int ipmi_init_msghandler(void)
3222 ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES; 3744 ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
3223 add_timer(&ipmi_timer); 3745 add_timer(&ipmi_timer);
3224 3746
3225 notifier_chain_register(&panic_notifier_list, &panic_block); 3747 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
3226 3748
3227 initialized = 1; 3749 initialized = 1;
3228 3750
@@ -3242,7 +3764,7 @@ static __exit void cleanup_ipmi(void)
3242 if (!initialized) 3764 if (!initialized)
3243 return; 3765 return;
3244 3766
3245 notifier_chain_unregister(&panic_notifier_list, &panic_block); 3767 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
3246 3768
3247 /* This can't be called if any interfaces exist, so no worry about 3769 /* This can't be called if any interfaces exist, so no worry about
3248 shutting down the interfaces. */ 3770 shutting down the interfaces. */
@@ -3256,6 +3778,8 @@ static __exit void cleanup_ipmi(void)
3256 remove_proc_entry(proc_ipmi_root->name, &proc_root); 3778 remove_proc_entry(proc_ipmi_root->name, &proc_root);
3257#endif /* CONFIG_PROC_FS */ 3779#endif /* CONFIG_PROC_FS */
3258 3780
3781 driver_unregister(&ipmidriver);
3782
3259 initialized = 0; 3783 initialized = 0;
3260 3784
3261 /* Check for buffer leaks. */ 3785 /* Check for buffer leaks. */
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index e8ed26b77d4c..786a2802ca34 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -464,7 +464,7 @@ static void ipmi_poweroff_function (void)
464 464
465/* Wait for an IPMI interface to be installed, the first one installed 465/* Wait for an IPMI interface to be installed, the first one installed
466 will be grabbed by this code and used to perform the powerdown. */ 466 will be grabbed by this code and used to perform the powerdown. */
467static void ipmi_po_new_smi(int if_num) 467static void ipmi_po_new_smi(int if_num, struct device *device)
468{ 468{
469 struct ipmi_system_interface_addr smi_addr; 469 struct ipmi_system_interface_addr smi_addr;
470 struct kernel_ipmi_msg send_msg; 470 struct kernel_ipmi_msg send_msg;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index e59b638766ef..35fbd4d8ed4b 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -52,6 +52,7 @@
52#include <linux/pci.h> 52#include <linux/pci.h>
53#include <linux/ioport.h> 53#include <linux/ioport.h>
54#include <linux/notifier.h> 54#include <linux/notifier.h>
55#include <linux/mutex.h>
55#include <linux/kthread.h> 56#include <linux/kthread.h>
56#include <asm/irq.h> 57#include <asm/irq.h>
57#ifdef CONFIG_HIGH_RES_TIMERS 58#ifdef CONFIG_HIGH_RES_TIMERS
@@ -109,21 +110,15 @@ enum si_intf_state {
109enum si_type { 110enum si_type {
110 SI_KCS, SI_SMIC, SI_BT 111 SI_KCS, SI_SMIC, SI_BT
111}; 112};
113static char *si_to_str[] = { "KCS", "SMIC", "BT" };
112 114
113struct ipmi_device_id { 115#define DEVICE_NAME "ipmi_si"
114 unsigned char device_id; 116
115 unsigned char device_revision; 117static struct device_driver ipmi_driver =
116 unsigned char firmware_revision_1; 118{
117 unsigned char firmware_revision_2; 119 .name = DEVICE_NAME,
118 unsigned char ipmi_version; 120 .bus = &platform_bus_type
119 unsigned char additional_device_support; 121};
120 unsigned char manufacturer_id[3];
121 unsigned char product_id[2];
122 unsigned char aux_firmware_revision[4];
123} __attribute__((packed));
124
125#define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
126#define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
127 122
128struct smi_info 123struct smi_info
129{ 124{
@@ -147,6 +142,9 @@ struct smi_info
147 int (*irq_setup)(struct smi_info *info); 142 int (*irq_setup)(struct smi_info *info);
148 void (*irq_cleanup)(struct smi_info *info); 143 void (*irq_cleanup)(struct smi_info *info);
149 unsigned int io_size; 144 unsigned int io_size;
145 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
146 void (*addr_source_cleanup)(struct smi_info *info);
147 void *addr_source_data;
150 148
151 /* Per-OEM handler, called from handle_flags(). 149 /* Per-OEM handler, called from handle_flags().
152 Returns 1 when handle_flags() needs to be re-run 150 Returns 1 when handle_flags() needs to be re-run
@@ -203,8 +201,17 @@ struct smi_info
203 interrupts. */ 201 interrupts. */
204 int interrupt_disabled; 202 int interrupt_disabled;
205 203
204 /* From the get device id response... */
206 struct ipmi_device_id device_id; 205 struct ipmi_device_id device_id;
207 206
207 /* Driver model stuff. */
208 struct device *dev;
209 struct platform_device *pdev;
210
211 /* True if we allocated the device, false if it came from
212 * someplace else (like PCI). */
213 int dev_registered;
214
208 /* Slave address, could be reported from DMI. */ 215 /* Slave address, could be reported from DMI. */
209 unsigned char slave_addr; 216 unsigned char slave_addr;
210 217
@@ -224,12 +231,16 @@ struct smi_info
224 unsigned long incoming_messages; 231 unsigned long incoming_messages;
225 232
226 struct task_struct *thread; 233 struct task_struct *thread;
234
235 struct list_head link;
227}; 236};
228 237
229static struct notifier_block *xaction_notifier_list; 238static int try_smi_init(struct smi_info *smi);
239
240static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
230static int register_xaction_notifier(struct notifier_block * nb) 241static int register_xaction_notifier(struct notifier_block * nb)
231{ 242{
232 return notifier_chain_register(&xaction_notifier_list, nb); 243 return atomic_notifier_chain_register(&xaction_notifier_list, nb);
233} 244}
234 245
235static void si_restart_short_timer(struct smi_info *smi_info); 246static void si_restart_short_timer(struct smi_info *smi_info);
@@ -271,13 +282,13 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
271 spin_lock(&(smi_info->msg_lock)); 282 spin_lock(&(smi_info->msg_lock));
272 283
273 /* Pick the high priority queue first. */ 284 /* Pick the high priority queue first. */
274 if (! list_empty(&(smi_info->hp_xmit_msgs))) { 285 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
275 entry = smi_info->hp_xmit_msgs.next; 286 entry = smi_info->hp_xmit_msgs.next;
276 } else if (! list_empty(&(smi_info->xmit_msgs))) { 287 } else if (!list_empty(&(smi_info->xmit_msgs))) {
277 entry = smi_info->xmit_msgs.next; 288 entry = smi_info->xmit_msgs.next;
278 } 289 }
279 290
280 if (! entry) { 291 if (!entry) {
281 smi_info->curr_msg = NULL; 292 smi_info->curr_msg = NULL;
282 rv = SI_SM_IDLE; 293 rv = SI_SM_IDLE;
283 } else { 294 } else {
@@ -291,7 +302,8 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
291 do_gettimeofday(&t); 302 do_gettimeofday(&t);
292 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); 303 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
293#endif 304#endif
294 err = notifier_call_chain(&xaction_notifier_list, 0, smi_info); 305 err = atomic_notifier_call_chain(&xaction_notifier_list,
306 0, smi_info);
295 if (err & NOTIFY_STOP_MASK) { 307 if (err & NOTIFY_STOP_MASK) {
296 rv = SI_SM_CALL_WITHOUT_DELAY; 308 rv = SI_SM_CALL_WITHOUT_DELAY;
297 goto out; 309 goto out;
@@ -344,7 +356,7 @@ static void start_clear_flags(struct smi_info *smi_info)
344 memory, we will re-enable the interrupt. */ 356 memory, we will re-enable the interrupt. */
345static inline void disable_si_irq(struct smi_info *smi_info) 357static inline void disable_si_irq(struct smi_info *smi_info)
346{ 358{
347 if ((smi_info->irq) && (! smi_info->interrupt_disabled)) { 359 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
348 disable_irq_nosync(smi_info->irq); 360 disable_irq_nosync(smi_info->irq);
349 smi_info->interrupt_disabled = 1; 361 smi_info->interrupt_disabled = 1;
350 } 362 }
@@ -375,7 +387,7 @@ static void handle_flags(struct smi_info *smi_info)
375 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { 387 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
376 /* Messages available. */ 388 /* Messages available. */
377 smi_info->curr_msg = ipmi_alloc_smi_msg(); 389 smi_info->curr_msg = ipmi_alloc_smi_msg();
378 if (! smi_info->curr_msg) { 390 if (!smi_info->curr_msg) {
379 disable_si_irq(smi_info); 391 disable_si_irq(smi_info);
380 smi_info->si_state = SI_NORMAL; 392 smi_info->si_state = SI_NORMAL;
381 return; 393 return;
@@ -394,7 +406,7 @@ static void handle_flags(struct smi_info *smi_info)
394 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { 406 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
395 /* Events available. */ 407 /* Events available. */
396 smi_info->curr_msg = ipmi_alloc_smi_msg(); 408 smi_info->curr_msg = ipmi_alloc_smi_msg();
397 if (! smi_info->curr_msg) { 409 if (!smi_info->curr_msg) {
398 disable_si_irq(smi_info); 410 disable_si_irq(smi_info);
399 smi_info->si_state = SI_NORMAL; 411 smi_info->si_state = SI_NORMAL;
400 return; 412 return;
@@ -430,7 +442,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
430#endif 442#endif
431 switch (smi_info->si_state) { 443 switch (smi_info->si_state) {
432 case SI_NORMAL: 444 case SI_NORMAL:
433 if (! smi_info->curr_msg) 445 if (!smi_info->curr_msg)
434 break; 446 break;
435 447
436 smi_info->curr_msg->rsp_size 448 smi_info->curr_msg->rsp_size
@@ -880,7 +892,7 @@ static void smi_timeout(unsigned long data)
880 892
881 smi_info->last_timeout_jiffies = jiffies_now; 893 smi_info->last_timeout_jiffies = jiffies_now;
882 894
883 if ((smi_info->irq) && (! smi_info->interrupt_disabled)) { 895 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
884 /* Running with interrupts, only do long timeouts. */ 896 /* Running with interrupts, only do long timeouts. */
885 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 897 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
886 spin_lock_irqsave(&smi_info->count_lock, flags); 898 spin_lock_irqsave(&smi_info->count_lock, flags);
@@ -974,15 +986,10 @@ static struct ipmi_smi_handlers handlers =
974 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */ 986 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
975 987
976#define SI_MAX_PARMS 4 988#define SI_MAX_PARMS 4
977#define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2) 989static LIST_HEAD(smi_infos);
978static struct smi_info *smi_infos[SI_MAX_DRIVERS] = 990static DECLARE_MUTEX(smi_infos_lock);
979{ NULL, NULL, NULL, NULL }; 991static int smi_num; /* Used to sequence the SMIs */
980 992
981#define DEVICE_NAME "ipmi_si"
982
983#define DEFAULT_KCS_IO_PORT 0xca2
984#define DEFAULT_SMIC_IO_PORT 0xca9
985#define DEFAULT_BT_IO_PORT 0xe4
986#define DEFAULT_REGSPACING 1 993#define DEFAULT_REGSPACING 1
987 994
988static int si_trydefaults = 1; 995static int si_trydefaults = 1;
@@ -1053,38 +1060,23 @@ MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1053 " by interface number."); 1060 " by interface number.");
1054 1061
1055 1062
1063#define IPMI_IO_ADDR_SPACE 0
1056#define IPMI_MEM_ADDR_SPACE 1 1064#define IPMI_MEM_ADDR_SPACE 1
1057#define IPMI_IO_ADDR_SPACE 2 1065static char *addr_space_to_str[] = { "I/O", "memory" };
1058 1066
1059#if defined(CONFIG_ACPI) || defined(CONFIG_DMI) || defined(CONFIG_PCI) 1067static void std_irq_cleanup(struct smi_info *info)
1060static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr)
1061{ 1068{
1062 int i; 1069 if (info->si_type == SI_BT)
1063 1070 /* Disable the interrupt in the BT interface. */
1064 for (i = 0; i < SI_MAX_PARMS; ++i) { 1071 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1065 /* Don't check our address. */ 1072 free_irq(info->irq, info);
1066 if (i == intf)
1067 continue;
1068 if (si_type[i] != NULL) {
1069 if ((addr_space == IPMI_MEM_ADDR_SPACE &&
1070 base_addr == addrs[i]) ||
1071 (addr_space == IPMI_IO_ADDR_SPACE &&
1072 base_addr == ports[i]))
1073 return 0;
1074 }
1075 else
1076 break;
1077 }
1078
1079 return 1;
1080} 1073}
1081#endif
1082 1074
1083static int std_irq_setup(struct smi_info *info) 1075static int std_irq_setup(struct smi_info *info)
1084{ 1076{
1085 int rv; 1077 int rv;
1086 1078
1087 if (! info->irq) 1079 if (!info->irq)
1088 return 0; 1080 return 0;
1089 1081
1090 if (info->si_type == SI_BT) { 1082 if (info->si_type == SI_BT) {
@@ -1093,7 +1085,7 @@ static int std_irq_setup(struct smi_info *info)
1093 SA_INTERRUPT, 1085 SA_INTERRUPT,
1094 DEVICE_NAME, 1086 DEVICE_NAME,
1095 info); 1087 info);
1096 if (! rv) 1088 if (!rv)
1097 /* Enable the interrupt in the BT interface. */ 1089 /* Enable the interrupt in the BT interface. */
1098 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 1090 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1099 IPMI_BT_INTMASK_ENABLE_IRQ_BIT); 1091 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
@@ -1110,88 +1102,77 @@ static int std_irq_setup(struct smi_info *info)
1110 DEVICE_NAME, info->irq); 1102 DEVICE_NAME, info->irq);
1111 info->irq = 0; 1103 info->irq = 0;
1112 } else { 1104 } else {
1105 info->irq_cleanup = std_irq_cleanup;
1113 printk(" Using irq %d\n", info->irq); 1106 printk(" Using irq %d\n", info->irq);
1114 } 1107 }
1115 1108
1116 return rv; 1109 return rv;
1117} 1110}
1118 1111
1119static void std_irq_cleanup(struct smi_info *info)
1120{
1121 if (! info->irq)
1122 return;
1123
1124 if (info->si_type == SI_BT)
1125 /* Disable the interrupt in the BT interface. */
1126 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1127 free_irq(info->irq, info);
1128}
1129
1130static unsigned char port_inb(struct si_sm_io *io, unsigned int offset) 1112static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1131{ 1113{
1132 unsigned int *addr = io->info; 1114 unsigned int addr = io->addr_data;
1133 1115
1134 return inb((*addr)+(offset*io->regspacing)); 1116 return inb(addr + (offset * io->regspacing));
1135} 1117}
1136 1118
1137static void port_outb(struct si_sm_io *io, unsigned int offset, 1119static void port_outb(struct si_sm_io *io, unsigned int offset,
1138 unsigned char b) 1120 unsigned char b)
1139{ 1121{
1140 unsigned int *addr = io->info; 1122 unsigned int addr = io->addr_data;
1141 1123
1142 outb(b, (*addr)+(offset * io->regspacing)); 1124 outb(b, addr + (offset * io->regspacing));
1143} 1125}
1144 1126
1145static unsigned char port_inw(struct si_sm_io *io, unsigned int offset) 1127static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1146{ 1128{
1147 unsigned int *addr = io->info; 1129 unsigned int addr = io->addr_data;
1148 1130
1149 return (inw((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; 1131 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1150} 1132}
1151 1133
1152static void port_outw(struct si_sm_io *io, unsigned int offset, 1134static void port_outw(struct si_sm_io *io, unsigned int offset,
1153 unsigned char b) 1135 unsigned char b)
1154{ 1136{
1155 unsigned int *addr = io->info; 1137 unsigned int addr = io->addr_data;
1156 1138
1157 outw(b << io->regshift, (*addr)+(offset * io->regspacing)); 1139 outw(b << io->regshift, addr + (offset * io->regspacing));
1158} 1140}
1159 1141
1160static unsigned char port_inl(struct si_sm_io *io, unsigned int offset) 1142static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1161{ 1143{
1162 unsigned int *addr = io->info; 1144 unsigned int addr = io->addr_data;
1163 1145
1164 return (inl((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; 1146 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1165} 1147}
1166 1148
1167static void port_outl(struct si_sm_io *io, unsigned int offset, 1149static void port_outl(struct si_sm_io *io, unsigned int offset,
1168 unsigned char b) 1150 unsigned char b)
1169{ 1151{
1170 unsigned int *addr = io->info; 1152 unsigned int addr = io->addr_data;
1171 1153
1172 outl(b << io->regshift, (*addr)+(offset * io->regspacing)); 1154 outl(b << io->regshift, addr+(offset * io->regspacing));
1173} 1155}
1174 1156
1175static void port_cleanup(struct smi_info *info) 1157static void port_cleanup(struct smi_info *info)
1176{ 1158{
1177 unsigned int *addr = info->io.info; 1159 unsigned int addr = info->io.addr_data;
1178 int mapsize; 1160 int mapsize;
1179 1161
1180 if (addr && (*addr)) { 1162 if (addr) {
1181 mapsize = ((info->io_size * info->io.regspacing) 1163 mapsize = ((info->io_size * info->io.regspacing)
1182 - (info->io.regspacing - info->io.regsize)); 1164 - (info->io.regspacing - info->io.regsize));
1183 1165
1184 release_region (*addr, mapsize); 1166 release_region (addr, mapsize);
1185 } 1167 }
1186 kfree(info);
1187} 1168}
1188 1169
1189static int port_setup(struct smi_info *info) 1170static int port_setup(struct smi_info *info)
1190{ 1171{
1191 unsigned int *addr = info->io.info; 1172 unsigned int addr = info->io.addr_data;
1192 int mapsize; 1173 int mapsize;
1193 1174
1194 if (! addr || (! *addr)) 1175 if (!addr)
1195 return -ENODEV; 1176 return -ENODEV;
1196 1177
1197 info->io_cleanup = port_cleanup; 1178 info->io_cleanup = port_cleanup;
@@ -1225,51 +1206,11 @@ static int port_setup(struct smi_info *info)
1225 mapsize = ((info->io_size * info->io.regspacing) 1206 mapsize = ((info->io_size * info->io.regspacing)
1226 - (info->io.regspacing - info->io.regsize)); 1207 - (info->io.regspacing - info->io.regsize));
1227 1208
1228 if (request_region(*addr, mapsize, DEVICE_NAME) == NULL) 1209 if (request_region(addr, mapsize, DEVICE_NAME) == NULL)
1229 return -EIO; 1210 return -EIO;
1230 return 0; 1211 return 0;
1231} 1212}
1232 1213
1233static int try_init_port(int intf_num, struct smi_info **new_info)
1234{
1235 struct smi_info *info;
1236
1237 if (! ports[intf_num])
1238 return -ENODEV;
1239
1240 if (! is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
1241 ports[intf_num]))
1242 return -ENODEV;
1243
1244 info = kmalloc(sizeof(*info), GFP_KERNEL);
1245 if (! info) {
1246 printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
1247 return -ENOMEM;
1248 }
1249 memset(info, 0, sizeof(*info));
1250
1251 info->io_setup = port_setup;
1252 info->io.info = &(ports[intf_num]);
1253 info->io.addr = NULL;
1254 info->io.regspacing = regspacings[intf_num];
1255 if (! info->io.regspacing)
1256 info->io.regspacing = DEFAULT_REGSPACING;
1257 info->io.regsize = regsizes[intf_num];
1258 if (! info->io.regsize)
1259 info->io.regsize = DEFAULT_REGSPACING;
1260 info->io.regshift = regshifts[intf_num];
1261 info->irq = 0;
1262 info->irq_setup = NULL;
1263 *new_info = info;
1264
1265 if (si_type[intf_num] == NULL)
1266 si_type[intf_num] = "kcs";
1267
1268 printk("ipmi_si: Trying \"%s\" at I/O port 0x%x\n",
1269 si_type[intf_num], ports[intf_num]);
1270 return 0;
1271}
1272
1273static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset) 1214static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1274{ 1215{
1275 return readb((io->addr)+(offset * io->regspacing)); 1216 return readb((io->addr)+(offset * io->regspacing));
@@ -1321,7 +1262,7 @@ static void mem_outq(struct si_sm_io *io, unsigned int offset,
1321 1262
1322static void mem_cleanup(struct smi_info *info) 1263static void mem_cleanup(struct smi_info *info)
1323{ 1264{
1324 unsigned long *addr = info->io.info; 1265 unsigned long addr = info->io.addr_data;
1325 int mapsize; 1266 int mapsize;
1326 1267
1327 if (info->io.addr) { 1268 if (info->io.addr) {
@@ -1330,17 +1271,16 @@ static void mem_cleanup(struct smi_info *info)
1330 mapsize = ((info->io_size * info->io.regspacing) 1271 mapsize = ((info->io_size * info->io.regspacing)
1331 - (info->io.regspacing - info->io.regsize)); 1272 - (info->io.regspacing - info->io.regsize));
1332 1273
1333 release_mem_region(*addr, mapsize); 1274 release_mem_region(addr, mapsize);
1334 } 1275 }
1335 kfree(info);
1336} 1276}
1337 1277
1338static int mem_setup(struct smi_info *info) 1278static int mem_setup(struct smi_info *info)
1339{ 1279{
1340 unsigned long *addr = info->io.info; 1280 unsigned long addr = info->io.addr_data;
1341 int mapsize; 1281 int mapsize;
1342 1282
1343 if (! addr || (! *addr)) 1283 if (!addr)
1344 return -ENODEV; 1284 return -ENODEV;
1345 1285
1346 info->io_cleanup = mem_cleanup; 1286 info->io_cleanup = mem_cleanup;
@@ -1380,57 +1320,83 @@ static int mem_setup(struct smi_info *info)
1380 mapsize = ((info->io_size * info->io.regspacing) 1320 mapsize = ((info->io_size * info->io.regspacing)
1381 - (info->io.regspacing - info->io.regsize)); 1321 - (info->io.regspacing - info->io.regsize));
1382 1322
1383 if (request_mem_region(*addr, mapsize, DEVICE_NAME) == NULL) 1323 if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1384 return -EIO; 1324 return -EIO;
1385 1325
1386 info->io.addr = ioremap(*addr, mapsize); 1326 info->io.addr = ioremap(addr, mapsize);
1387 if (info->io.addr == NULL) { 1327 if (info->io.addr == NULL) {
1388 release_mem_region(*addr, mapsize); 1328 release_mem_region(addr, mapsize);
1389 return -EIO; 1329 return -EIO;
1390 } 1330 }
1391 return 0; 1331 return 0;
1392} 1332}
1393 1333
1394static int try_init_mem(int intf_num, struct smi_info **new_info) 1334
1335static __devinit void hardcode_find_bmc(void)
1395{ 1336{
1337 int i;
1396 struct smi_info *info; 1338 struct smi_info *info;
1397 1339
1398 if (! addrs[intf_num]) 1340 for (i = 0; i < SI_MAX_PARMS; i++) {
1399 return -ENODEV; 1341 if (!ports[i] && !addrs[i])
1342 continue;
1400 1343
1401 if (! is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE, 1344 info = kzalloc(sizeof(*info), GFP_KERNEL);
1402 addrs[intf_num])) 1345 if (!info)
1403 return -ENODEV; 1346 return;
1404 1347
1405 info = kmalloc(sizeof(*info), GFP_KERNEL); 1348 info->addr_source = "hardcoded";
1406 if (! info) {
1407 printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
1408 return -ENOMEM;
1409 }
1410 memset(info, 0, sizeof(*info));
1411 1349
1412 info->io_setup = mem_setup; 1350 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1413 info->io.info = &addrs[intf_num]; 1351 info->si_type = SI_KCS;
1414 info->io.addr = NULL; 1352 } else if (strcmp(si_type[i], "smic") == 0) {
1415 info->io.regspacing = regspacings[intf_num]; 1353 info->si_type = SI_SMIC;
1416 if (! info->io.regspacing) 1354 } else if (strcmp(si_type[i], "bt") == 0) {
1417 info->io.regspacing = DEFAULT_REGSPACING; 1355 info->si_type = SI_BT;
1418 info->io.regsize = regsizes[intf_num]; 1356 } else {
1419 if (! info->io.regsize) 1357 printk(KERN_WARNING
1420 info->io.regsize = DEFAULT_REGSPACING; 1358 "ipmi_si: Interface type specified "
1421 info->io.regshift = regshifts[intf_num]; 1359 "for interface %d, was invalid: %s\n",
1422 info->irq = 0; 1360 i, si_type[i]);
1423 info->irq_setup = NULL; 1361 kfree(info);
1424 *new_info = info; 1362 continue;
1363 }
1425 1364
1426 if (si_type[intf_num] == NULL) 1365 if (ports[i]) {
1427 si_type[intf_num] = "kcs"; 1366 /* An I/O port */
1367 info->io_setup = port_setup;
1368 info->io.addr_data = ports[i];
1369 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1370 } else if (addrs[i]) {
1371 /* A memory port */
1372 info->io_setup = mem_setup;
1373 info->io.addr_data = addrs[i];
1374 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1375 } else {
1376 printk(KERN_WARNING
1377 "ipmi_si: Interface type specified "
1378 "for interface %d, "
1379 "but port and address were not set or "
1380 "set to zero.\n", i);
1381 kfree(info);
1382 continue;
1383 }
1428 1384
1429 printk("ipmi_si: Trying \"%s\" at memory address 0x%lx\n", 1385 info->io.addr = NULL;
1430 si_type[intf_num], addrs[intf_num]); 1386 info->io.regspacing = regspacings[i];
1431 return 0; 1387 if (!info->io.regspacing)
1432} 1388 info->io.regspacing = DEFAULT_REGSPACING;
1389 info->io.regsize = regsizes[i];
1390 if (!info->io.regsize)
1391 info->io.regsize = DEFAULT_REGSPACING;
1392 info->io.regshift = regshifts[i];
1393 info->irq = irqs[i];
1394 if (info->irq)
1395 info->irq_setup = std_irq_setup;
1433 1396
1397 try_smi_init(info);
1398 }
1399}
1434 1400
1435#ifdef CONFIG_ACPI 1401#ifdef CONFIG_ACPI
1436 1402
@@ -1470,11 +1436,19 @@ static u32 ipmi_acpi_gpe(void *context)
1470 return ACPI_INTERRUPT_HANDLED; 1436 return ACPI_INTERRUPT_HANDLED;
1471} 1437}
1472 1438
1439static void acpi_gpe_irq_cleanup(struct smi_info *info)
1440{
1441 if (!info->irq)
1442 return;
1443
1444 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1445}
1446
1473static int acpi_gpe_irq_setup(struct smi_info *info) 1447static int acpi_gpe_irq_setup(struct smi_info *info)
1474{ 1448{
1475 acpi_status status; 1449 acpi_status status;
1476 1450
1477 if (! info->irq) 1451 if (!info->irq)
1478 return 0; 1452 return 0;
1479 1453
1480 /* FIXME - is level triggered right? */ 1454 /* FIXME - is level triggered right? */
@@ -1491,19 +1465,12 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
1491 info->irq = 0; 1465 info->irq = 0;
1492 return -EINVAL; 1466 return -EINVAL;
1493 } else { 1467 } else {
1468 info->irq_cleanup = acpi_gpe_irq_cleanup;
1494 printk(" Using ACPI GPE %d\n", info->irq); 1469 printk(" Using ACPI GPE %d\n", info->irq);
1495 return 0; 1470 return 0;
1496 } 1471 }
1497} 1472}
1498 1473
1499static void acpi_gpe_irq_cleanup(struct smi_info *info)
1500{
1501 if (! info->irq)
1502 return;
1503
1504 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1505}
1506
1507/* 1474/*
1508 * Defined at 1475 * Defined at
1509 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf 1476 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
@@ -1546,28 +1513,12 @@ struct SPMITable {
1546 s8 spmi_id[1]; /* A '\0' terminated array starts here. */ 1513 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1547}; 1514};
1548 1515
1549static int try_init_acpi(int intf_num, struct smi_info **new_info) 1516static __devinit int try_init_acpi(struct SPMITable *spmi)
1550{ 1517{
1551 struct smi_info *info; 1518 struct smi_info *info;
1552 acpi_status status;
1553 struct SPMITable *spmi;
1554 char *io_type; 1519 char *io_type;
1555 u8 addr_space; 1520 u8 addr_space;
1556 1521
1557 if (acpi_disabled)
1558 return -ENODEV;
1559
1560 if (acpi_failure)
1561 return -ENODEV;
1562
1563 status = acpi_get_firmware_table("SPMI", intf_num+1,
1564 ACPI_LOGICAL_ADDRESSING,
1565 (struct acpi_table_header **) &spmi);
1566 if (status != AE_OK) {
1567 acpi_failure = 1;
1568 return -ENODEV;
1569 }
1570
1571 if (spmi->IPMIlegacy != 1) { 1522 if (spmi->IPMIlegacy != 1) {
1572 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); 1523 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1573 return -ENODEV; 1524 return -ENODEV;
@@ -1577,47 +1528,42 @@ static int try_init_acpi(int intf_num, struct smi_info **new_info)
1577 addr_space = IPMI_MEM_ADDR_SPACE; 1528 addr_space = IPMI_MEM_ADDR_SPACE;
1578 else 1529 else
1579 addr_space = IPMI_IO_ADDR_SPACE; 1530 addr_space = IPMI_IO_ADDR_SPACE;
1580 if (! is_new_interface(-1, addr_space, spmi->addr.address)) 1531
1581 return -ENODEV; 1532 info = kzalloc(sizeof(*info), GFP_KERNEL);
1533 if (!info) {
1534 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1535 return -ENOMEM;
1536 }
1537
1538 info->addr_source = "ACPI";
1582 1539
1583 /* Figure out the interface type. */ 1540 /* Figure out the interface type. */
1584 switch (spmi->InterfaceType) 1541 switch (spmi->InterfaceType)
1585 { 1542 {
1586 case 1: /* KCS */ 1543 case 1: /* KCS */
1587 si_type[intf_num] = "kcs"; 1544 info->si_type = SI_KCS;
1588 break; 1545 break;
1589
1590 case 2: /* SMIC */ 1546 case 2: /* SMIC */
1591 si_type[intf_num] = "smic"; 1547 info->si_type = SI_SMIC;
1592 break; 1548 break;
1593
1594 case 3: /* BT */ 1549 case 3: /* BT */
1595 si_type[intf_num] = "bt"; 1550 info->si_type = SI_BT;
1596 break; 1551 break;
1597
1598 default: 1552 default:
1599 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n", 1553 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1600 spmi->InterfaceType); 1554 spmi->InterfaceType);
1555 kfree(info);
1601 return -EIO; 1556 return -EIO;
1602 } 1557 }
1603 1558
1604 info = kmalloc(sizeof(*info), GFP_KERNEL);
1605 if (! info) {
1606 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1607 return -ENOMEM;
1608 }
1609 memset(info, 0, sizeof(*info));
1610
1611 if (spmi->InterruptType & 1) { 1559 if (spmi->InterruptType & 1) {
1612 /* We've got a GPE interrupt. */ 1560 /* We've got a GPE interrupt. */
1613 info->irq = spmi->GPE; 1561 info->irq = spmi->GPE;
1614 info->irq_setup = acpi_gpe_irq_setup; 1562 info->irq_setup = acpi_gpe_irq_setup;
1615 info->irq_cleanup = acpi_gpe_irq_cleanup;
1616 } else if (spmi->InterruptType & 2) { 1563 } else if (spmi->InterruptType & 2) {
1617 /* We've got an APIC/SAPIC interrupt. */ 1564 /* We've got an APIC/SAPIC interrupt. */
1618 info->irq = spmi->GlobalSystemInterrupt; 1565 info->irq = spmi->GlobalSystemInterrupt;
1619 info->irq_setup = std_irq_setup; 1566 info->irq_setup = std_irq_setup;
1620 info->irq_cleanup = std_irq_cleanup;
1621 } else { 1567 } else {
1622 /* Use the default interrupt setting. */ 1568 /* Use the default interrupt setting. */
1623 info->irq = 0; 1569 info->irq = 0;
@@ -1626,43 +1572,60 @@ static int try_init_acpi(int intf_num, struct smi_info **new_info)
1626 1572
1627 if (spmi->addr.register_bit_width) { 1573 if (spmi->addr.register_bit_width) {
1628 /* A (hopefully) properly formed register bit width. */ 1574 /* A (hopefully) properly formed register bit width. */
1629 regspacings[intf_num] = spmi->addr.register_bit_width / 8;
1630 info->io.regspacing = spmi->addr.register_bit_width / 8; 1575 info->io.regspacing = spmi->addr.register_bit_width / 8;
1631 } else { 1576 } else {
1632 regspacings[intf_num] = DEFAULT_REGSPACING;
1633 info->io.regspacing = DEFAULT_REGSPACING; 1577 info->io.regspacing = DEFAULT_REGSPACING;
1634 } 1578 }
1635 regsizes[intf_num] = regspacings[intf_num]; 1579 info->io.regsize = info->io.regspacing;
1636 info->io.regsize = regsizes[intf_num]; 1580 info->io.regshift = spmi->addr.register_bit_offset;
1637 regshifts[intf_num] = spmi->addr.register_bit_offset;
1638 info->io.regshift = regshifts[intf_num];
1639 1581
1640 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 1582 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1641 io_type = "memory"; 1583 io_type = "memory";
1642 info->io_setup = mem_setup; 1584 info->io_setup = mem_setup;
1643 addrs[intf_num] = spmi->addr.address; 1585 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1644 info->io.info = &(addrs[intf_num]);
1645 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 1586 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1646 io_type = "I/O"; 1587 io_type = "I/O";
1647 info->io_setup = port_setup; 1588 info->io_setup = port_setup;
1648 ports[intf_num] = spmi->addr.address; 1589 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1649 info->io.info = &(ports[intf_num]);
1650 } else { 1590 } else {
1651 kfree(info); 1591 kfree(info);
1652 printk("ipmi_si: Unknown ACPI I/O Address type\n"); 1592 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1653 return -EIO; 1593 return -EIO;
1654 } 1594 }
1595 info->io.addr_data = spmi->addr.address;
1655 1596
1656 *new_info = info; 1597 try_smi_init(info);
1657 1598
1658 printk("ipmi_si: ACPI/SPMI specifies \"%s\" %s SI @ 0x%lx\n",
1659 si_type[intf_num], io_type, (unsigned long) spmi->addr.address);
1660 return 0; 1599 return 0;
1661} 1600}
1601
1602static __devinit void acpi_find_bmc(void)
1603{
1604 acpi_status status;
1605 struct SPMITable *spmi;
1606 int i;
1607
1608 if (acpi_disabled)
1609 return;
1610
1611 if (acpi_failure)
1612 return;
1613
1614 for (i = 0; ; i++) {
1615 status = acpi_get_firmware_table("SPMI", i+1,
1616 ACPI_LOGICAL_ADDRESSING,
1617 (struct acpi_table_header **)
1618 &spmi);
1619 if (status != AE_OK)
1620 return;
1621
1622 try_init_acpi(spmi);
1623 }
1624}
1662#endif 1625#endif
1663 1626
1664#ifdef CONFIG_DMI 1627#ifdef CONFIG_DMI
1665typedef struct dmi_ipmi_data 1628struct dmi_ipmi_data
1666{ 1629{
1667 u8 type; 1630 u8 type;
1668 u8 addr_space; 1631 u8 addr_space;
@@ -1670,49 +1633,46 @@ typedef struct dmi_ipmi_data
1670 u8 irq; 1633 u8 irq;
1671 u8 offset; 1634 u8 offset;
1672 u8 slave_addr; 1635 u8 slave_addr;
1673} dmi_ipmi_data_t; 1636};
1674
1675static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS];
1676static int dmi_data_entries;
1677 1637
1678static int __init decode_dmi(struct dmi_header *dm, int intf_num) 1638static int __devinit decode_dmi(struct dmi_header *dm,
1639 struct dmi_ipmi_data *dmi)
1679{ 1640{
1680 u8 *data = (u8 *)dm; 1641 u8 *data = (u8 *)dm;
1681 unsigned long base_addr; 1642 unsigned long base_addr;
1682 u8 reg_spacing; 1643 u8 reg_spacing;
1683 u8 len = dm->length; 1644 u8 len = dm->length;
1684 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1685 1645
1686 ipmi_data->type = data[4]; 1646 dmi->type = data[4];
1687 1647
1688 memcpy(&base_addr, data+8, sizeof(unsigned long)); 1648 memcpy(&base_addr, data+8, sizeof(unsigned long));
1689 if (len >= 0x11) { 1649 if (len >= 0x11) {
1690 if (base_addr & 1) { 1650 if (base_addr & 1) {
1691 /* I/O */ 1651 /* I/O */
1692 base_addr &= 0xFFFE; 1652 base_addr &= 0xFFFE;
1693 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE; 1653 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1694 } 1654 }
1695 else { 1655 else {
1696 /* Memory */ 1656 /* Memory */
1697 ipmi_data->addr_space = IPMI_MEM_ADDR_SPACE; 1657 dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1698 } 1658 }
1699 /* If bit 4 of byte 0x10 is set, then the lsb for the address 1659 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1700 is odd. */ 1660 is odd. */
1701 ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); 1661 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1702 1662
1703 ipmi_data->irq = data[0x11]; 1663 dmi->irq = data[0x11];
1704 1664
1705 /* The top two bits of byte 0x10 hold the register spacing. */ 1665 /* The top two bits of byte 0x10 hold the register spacing. */
1706 reg_spacing = (data[0x10] & 0xC0) >> 6; 1666 reg_spacing = (data[0x10] & 0xC0) >> 6;
1707 switch(reg_spacing){ 1667 switch(reg_spacing){
1708 case 0x00: /* Byte boundaries */ 1668 case 0x00: /* Byte boundaries */
1709 ipmi_data->offset = 1; 1669 dmi->offset = 1;
1710 break; 1670 break;
1711 case 0x01: /* 32-bit boundaries */ 1671 case 0x01: /* 32-bit boundaries */
1712 ipmi_data->offset = 4; 1672 dmi->offset = 4;
1713 break; 1673 break;
1714 case 0x02: /* 16-byte boundaries */ 1674 case 0x02: /* 16-byte boundaries */
1715 ipmi_data->offset = 16; 1675 dmi->offset = 16;
1716 break; 1676 break;
1717 default: 1677 default:
1718 /* Some other interface, just ignore it. */ 1678 /* Some other interface, just ignore it. */
@@ -1726,217 +1686,227 @@ static int __init decode_dmi(struct dmi_header *dm, int intf_num)
1726 * wrong (and all that I have seen are I/O) so we just 1686 * wrong (and all that I have seen are I/O) so we just
1727 * ignore that bit and assume I/O. Systems that use 1687 * ignore that bit and assume I/O. Systems that use
1728 * memory should use the newer spec, anyway. */ 1688 * memory should use the newer spec, anyway. */
1729 ipmi_data->base_addr = base_addr & 0xfffe; 1689 dmi->base_addr = base_addr & 0xfffe;
1730 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE; 1690 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1731 ipmi_data->offset = 1; 1691 dmi->offset = 1;
1732 }
1733
1734 ipmi_data->slave_addr = data[6];
1735
1736 if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) {
1737 dmi_data_entries++;
1738 return 0;
1739 } 1692 }
1740 1693
1741 memset(ipmi_data, 0, sizeof(dmi_ipmi_data_t)); 1694 dmi->slave_addr = data[6];
1742 1695
1743 return -1; 1696 return 0;
1744} 1697}
1745 1698
1746static void __init dmi_find_bmc(void) 1699static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1747{ 1700{
1748 struct dmi_device *dev = NULL; 1701 struct smi_info *info;
1749 int intf_num = 0;
1750
1751 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
1752 if (intf_num >= SI_MAX_DRIVERS)
1753 break;
1754 1702
1755 decode_dmi((struct dmi_header *) dev->device_data, intf_num++); 1703 info = kzalloc(sizeof(*info), GFP_KERNEL);
1704 if (!info) {
1705 printk(KERN_ERR
1706 "ipmi_si: Could not allocate SI data\n");
1707 return;
1756 } 1708 }
1757}
1758
1759static int try_init_smbios(int intf_num, struct smi_info **new_info)
1760{
1761 struct smi_info *info;
1762 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1763 char *io_type;
1764 1709
1765 if (intf_num >= dmi_data_entries) 1710 info->addr_source = "SMBIOS";
1766 return -ENODEV;
1767 1711
1768 switch (ipmi_data->type) { 1712 switch (ipmi_data->type) {
1769 case 0x01: /* KCS */ 1713 case 0x01: /* KCS */
1770 si_type[intf_num] = "kcs"; 1714 info->si_type = SI_KCS;
1771 break; 1715 break;
1772 case 0x02: /* SMIC */ 1716 case 0x02: /* SMIC */
1773 si_type[intf_num] = "smic"; 1717 info->si_type = SI_SMIC;
1774 break; 1718 break;
1775 case 0x03: /* BT */ 1719 case 0x03: /* BT */
1776 si_type[intf_num] = "bt"; 1720 info->si_type = SI_BT;
1777 break; 1721 break;
1778 default: 1722 default:
1779 return -EIO; 1723 return;
1780 }
1781
1782 info = kmalloc(sizeof(*info), GFP_KERNEL);
1783 if (! info) {
1784 printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
1785 return -ENOMEM;
1786 } 1724 }
1787 memset(info, 0, sizeof(*info));
1788 1725
1789 if (ipmi_data->addr_space == 1) { 1726 switch (ipmi_data->addr_space) {
1790 io_type = "memory"; 1727 case IPMI_MEM_ADDR_SPACE:
1791 info->io_setup = mem_setup; 1728 info->io_setup = mem_setup;
1792 addrs[intf_num] = ipmi_data->base_addr; 1729 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1793 info->io.info = &(addrs[intf_num]); 1730 break;
1794 } else if (ipmi_data->addr_space == 2) { 1731
1795 io_type = "I/O"; 1732 case IPMI_IO_ADDR_SPACE:
1796 info->io_setup = port_setup; 1733 info->io_setup = port_setup;
1797 ports[intf_num] = ipmi_data->base_addr; 1734 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1798 info->io.info = &(ports[intf_num]); 1735 break;
1799 } else { 1736
1737 default:
1800 kfree(info); 1738 kfree(info);
1801 printk("ipmi_si: Unknown SMBIOS I/O Address type.\n"); 1739 printk(KERN_WARNING
1802 return -EIO; 1740 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
1741 ipmi_data->addr_space);
1742 return;
1803 } 1743 }
1744 info->io.addr_data = ipmi_data->base_addr;
1804 1745
1805 regspacings[intf_num] = ipmi_data->offset; 1746 info->io.regspacing = ipmi_data->offset;
1806 info->io.regspacing = regspacings[intf_num]; 1747 if (!info->io.regspacing)
1807 if (! info->io.regspacing)
1808 info->io.regspacing = DEFAULT_REGSPACING; 1748 info->io.regspacing = DEFAULT_REGSPACING;
1809 info->io.regsize = DEFAULT_REGSPACING; 1749 info->io.regsize = DEFAULT_REGSPACING;
1810 info->io.regshift = regshifts[intf_num]; 1750 info->io.regshift = 0;
1811 1751
1812 info->slave_addr = ipmi_data->slave_addr; 1752 info->slave_addr = ipmi_data->slave_addr;
1813 1753
1814 irqs[intf_num] = ipmi_data->irq; 1754 info->irq = ipmi_data->irq;
1755 if (info->irq)
1756 info->irq_setup = std_irq_setup;
1815 1757
1816 *new_info = info; 1758 try_smi_init(info);
1759}
1817 1760
1818 printk("ipmi_si: Found SMBIOS-specified state machine at %s" 1761static void __devinit dmi_find_bmc(void)
1819 " address 0x%lx, slave address 0x%x\n", 1762{
1820 io_type, (unsigned long)ipmi_data->base_addr, 1763 struct dmi_device *dev = NULL;
1821 ipmi_data->slave_addr); 1764 struct dmi_ipmi_data data;
1822 return 0; 1765 int rv;
1766
1767 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
1768 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
1769 if (!rv)
1770 try_init_dmi(&data);
1771 }
1823} 1772}
1824#endif /* CONFIG_DMI */ 1773#endif /* CONFIG_DMI */
1825 1774
1826#ifdef CONFIG_PCI 1775#ifdef CONFIG_PCI
1827 1776
1828#define PCI_ERMC_CLASSCODE 0x0C0700 1777#define PCI_ERMC_CLASSCODE 0x0C0700
1778#define PCI_ERMC_CLASSCODE_MASK 0xffffff00
1779#define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
1780#define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
1781#define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
1782#define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
1783
1829#define PCI_HP_VENDOR_ID 0x103C 1784#define PCI_HP_VENDOR_ID 0x103C
1830#define PCI_MMC_DEVICE_ID 0x121A 1785#define PCI_MMC_DEVICE_ID 0x121A
1831#define PCI_MMC_ADDR_CW 0x10 1786#define PCI_MMC_ADDR_CW 0x10
1832 1787
1833/* Avoid more than one attempt to probe pci smic. */ 1788static void ipmi_pci_cleanup(struct smi_info *info)
1834static int pci_smic_checked = 0; 1789{
1790 struct pci_dev *pdev = info->addr_source_data;
1791
1792 pci_disable_device(pdev);
1793}
1835 1794
1836static int find_pci_smic(int intf_num, struct smi_info **new_info) 1795static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
1796 const struct pci_device_id *ent)
1837{ 1797{
1838 struct smi_info *info; 1798 int rv;
1839 int error; 1799 int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
1840 struct pci_dev *pci_dev = NULL; 1800 struct smi_info *info;
1841 u16 base_addr; 1801 int first_reg_offset = 0;
1842 int fe_rmc = 0;
1843 1802
1844 if (pci_smic_checked) 1803 info = kzalloc(sizeof(*info), GFP_KERNEL);
1845 return -ENODEV; 1804 if (!info)
1805 return ENOMEM;
1846 1806
1847 pci_smic_checked = 1; 1807 info->addr_source = "PCI";
1848 1808
1849 pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID, NULL); 1809 switch (class_type) {
1850 if (! pci_dev) { 1810 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
1851 pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL); 1811 info->si_type = SI_SMIC;
1852 if (pci_dev && (pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID)) 1812 break;
1853 fe_rmc = 1;
1854 else
1855 return -ENODEV;
1856 }
1857 1813
1858 error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr); 1814 case PCI_ERMC_CLASSCODE_TYPE_KCS:
1859 if (error) 1815 info->si_type = SI_KCS;
1860 { 1816 break;
1861 pci_dev_put(pci_dev); 1817
1862 printk(KERN_ERR 1818 case PCI_ERMC_CLASSCODE_TYPE_BT:
1863 "ipmi_si: pci_read_config_word() failed (%d).\n", 1819 info->si_type = SI_BT;
1864 error); 1820 break;
1865 return -ENODEV; 1821
1822 default:
1823 kfree(info);
1824 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
1825 pci_name(pdev), class_type);
1826 return ENOMEM;
1866 } 1827 }
1867 1828
1868 /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */ 1829 rv = pci_enable_device(pdev);
1869 if (! (base_addr & 0x0001)) 1830 if (rv) {
1870 { 1831 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
1871 pci_dev_put(pci_dev); 1832 pci_name(pdev));
1872 printk(KERN_ERR 1833 kfree(info);
1873 "ipmi_si: memory mapped I/O not supported for PCI" 1834 return rv;
1874 " smic.\n");
1875 return -ENODEV;
1876 } 1835 }
1877 1836
1878 base_addr &= 0xFFFE; 1837 info->addr_source_cleanup = ipmi_pci_cleanup;
1879 if (! fe_rmc) 1838 info->addr_source_data = pdev;
1880 /* Data register starts at base address + 1 in eRMC */
1881 ++base_addr;
1882 1839
1883 if (! is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) { 1840 if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
1884 pci_dev_put(pci_dev); 1841 first_reg_offset = 1;
1885 return -ENODEV;
1886 }
1887 1842
1888 info = kmalloc(sizeof(*info), GFP_KERNEL); 1843 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
1889 if (! info) { 1844 info->io_setup = port_setup;
1890 pci_dev_put(pci_dev); 1845 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1891 printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n"); 1846 } else {
1892 return -ENOMEM; 1847 info->io_setup = mem_setup;
1848 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1893 } 1849 }
1894 memset(info, 0, sizeof(*info)); 1850 info->io.addr_data = pci_resource_start(pdev, 0);
1895 1851
1896 info->io_setup = port_setup; 1852 info->io.regspacing = DEFAULT_REGSPACING;
1897 ports[intf_num] = base_addr;
1898 info->io.info = &(ports[intf_num]);
1899 info->io.regspacing = regspacings[intf_num];
1900 if (! info->io.regspacing)
1901 info->io.regspacing = DEFAULT_REGSPACING;
1902 info->io.regsize = DEFAULT_REGSPACING; 1853 info->io.regsize = DEFAULT_REGSPACING;
1903 info->io.regshift = regshifts[intf_num]; 1854 info->io.regshift = 0;
1904 1855
1905 *new_info = info; 1856 info->irq = pdev->irq;
1857 if (info->irq)
1858 info->irq_setup = std_irq_setup;
1906 1859
1907 irqs[intf_num] = pci_dev->irq; 1860 info->dev = &pdev->dev;
1908 si_type[intf_num] = "smic";
1909 1861
1910 printk("ipmi_si: Found PCI SMIC at I/O address 0x%lx\n", 1862 return try_smi_init(info);
1911 (long unsigned int) base_addr); 1863}
1912 1864
1913 pci_dev_put(pci_dev); 1865static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
1866{
1867}
1868
1869#ifdef CONFIG_PM
1870static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1871{
1914 return 0; 1872 return 0;
1915} 1873}
1916#endif /* CONFIG_PCI */
1917 1874
1918static int try_init_plug_and_play(int intf_num, struct smi_info **new_info) 1875static int ipmi_pci_resume(struct pci_dev *pdev)
1919{ 1876{
1920#ifdef CONFIG_PCI 1877 return 0;
1921 if (find_pci_smic(intf_num, new_info) == 0) 1878}
1922 return 0;
1923#endif 1879#endif
1924 /* Include other methods here. */
1925 1880
1926 return -ENODEV; 1881static struct pci_device_id ipmi_pci_devices[] = {
1927} 1882 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
1883 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE) }
1884};
1885MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
1886
1887static struct pci_driver ipmi_pci_driver = {
1888 .name = DEVICE_NAME,
1889 .id_table = ipmi_pci_devices,
1890 .probe = ipmi_pci_probe,
1891 .remove = __devexit_p(ipmi_pci_remove),
1892#ifdef CONFIG_PM
1893 .suspend = ipmi_pci_suspend,
1894 .resume = ipmi_pci_resume,
1895#endif
1896};
1897#endif /* CONFIG_PCI */
1928 1898
1929 1899
1930static int try_get_dev_id(struct smi_info *smi_info) 1900static int try_get_dev_id(struct smi_info *smi_info)
1931{ 1901{
1932 unsigned char msg[2]; 1902 unsigned char msg[2];
1933 unsigned char *resp; 1903 unsigned char *resp;
1934 unsigned long resp_len; 1904 unsigned long resp_len;
1935 enum si_sm_result smi_result; 1905 enum si_sm_result smi_result;
1936 int rv = 0; 1906 int rv = 0;
1937 1907
1938 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 1908 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1939 if (! resp) 1909 if (!resp)
1940 return -ENOMEM; 1910 return -ENOMEM;
1941 1911
1942 /* Do a Get Device ID command, since it comes back with some 1912 /* Do a Get Device ID command, since it comes back with some
@@ -1972,7 +1942,7 @@ static int try_get_dev_id(struct smi_info *smi_info)
1972 /* Otherwise, we got some data. */ 1942 /* Otherwise, we got some data. */
1973 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 1943 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1974 resp, IPMI_MAX_MSG_LENGTH); 1944 resp, IPMI_MAX_MSG_LENGTH);
1975 if (resp_len < 6) { 1945 if (resp_len < 14) {
1976 /* That's odd, it should be longer. */ 1946 /* That's odd, it should be longer. */
1977 rv = -EINVAL; 1947 rv = -EINVAL;
1978 goto out; 1948 goto out;
@@ -1985,8 +1955,7 @@ static int try_get_dev_id(struct smi_info *smi_info)
1985 } 1955 }
1986 1956
1987 /* Record info from the get device id, in case we need it. */ 1957 /* Record info from the get device id, in case we need it. */
1988 memcpy(&smi_info->device_id, &resp[3], 1958 ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
1989 min_t(unsigned long, resp_len-3, sizeof(smi_info->device_id)));
1990 1959
1991 out: 1960 out:
1992 kfree(resp); 1961 kfree(resp);
@@ -2018,7 +1987,7 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
2018 struct smi_info *smi = data; 1987 struct smi_info *smi = data;
2019 1988
2020 out += sprintf(out, "interrupts_enabled: %d\n", 1989 out += sprintf(out, "interrupts_enabled: %d\n",
2021 smi->irq && ! smi->interrupt_disabled); 1990 smi->irq && !smi->interrupt_disabled);
2022 out += sprintf(out, "short_timeouts: %ld\n", 1991 out += sprintf(out, "short_timeouts: %ld\n",
2023 smi->short_timeouts); 1992 smi->short_timeouts);
2024 out += sprintf(out, "long_timeouts: %ld\n", 1993 out += sprintf(out, "long_timeouts: %ld\n",
@@ -2089,15 +2058,14 @@ static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2089#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 2058#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2090#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 2059#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2091#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51 2060#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2092#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00} 2061#define DELL_IANA_MFR_ID 0x0002a2
2093static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) 2062static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2094{ 2063{
2095 struct ipmi_device_id *id = &smi_info->device_id; 2064 struct ipmi_device_id *id = &smi_info->device_id;
2096 const char mfr[3]=DELL_IANA_MFR_ID; 2065 if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2097 if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr))) {
2098 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && 2066 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2099 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && 2067 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2100 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { 2068 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2101 smi_info->oem_data_avail_handler = 2069 smi_info->oem_data_avail_handler =
2102 oem_data_avail_to_receive_msg_avail; 2070 oem_data_avail_to_receive_msg_avail;
2103 } 2071 }
@@ -2169,8 +2137,7 @@ static void
2169setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) 2137setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2170{ 2138{
2171 struct ipmi_device_id *id = &smi_info->device_id; 2139 struct ipmi_device_id *id = &smi_info->device_id;
2172 const char mfr[3]=DELL_IANA_MFR_ID; 2140 if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2173 if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr)) &&
2174 smi_info->si_type == SI_BT) 2141 smi_info->si_type == SI_BT)
2175 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); 2142 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2176} 2143}
@@ -2200,62 +2167,110 @@ static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2200 del_timer_sync(&smi_info->si_timer); 2167 del_timer_sync(&smi_info->si_timer);
2201} 2168}
2202 2169
2203/* Returns 0 if initialized, or negative on an error. */ 2170static struct ipmi_default_vals
2204static int init_one_smi(int intf_num, struct smi_info **smi)
2205{ 2171{
2206 int rv; 2172 int type;
2207 struct smi_info *new_smi; 2173 int port;
2174} __devinit ipmi_defaults[] =
2175{
2176 { .type = SI_KCS, .port = 0xca2 },
2177 { .type = SI_SMIC, .port = 0xca9 },
2178 { .type = SI_BT, .port = 0xe4 },
2179 { .port = 0 }
2180};
2208 2181
2182static __devinit void default_find_bmc(void)
2183{
2184 struct smi_info *info;
2185 int i;
2209 2186
2210 rv = try_init_mem(intf_num, &new_smi); 2187 for (i = 0; ; i++) {
2211 if (rv) 2188 if (!ipmi_defaults[i].port)
2212 rv = try_init_port(intf_num, &new_smi); 2189 break;
2213#ifdef CONFIG_ACPI
2214 if (rv && si_trydefaults)
2215 rv = try_init_acpi(intf_num, &new_smi);
2216#endif
2217#ifdef CONFIG_DMI
2218 if (rv && si_trydefaults)
2219 rv = try_init_smbios(intf_num, &new_smi);
2220#endif
2221 if (rv && si_trydefaults)
2222 rv = try_init_plug_and_play(intf_num, &new_smi);
2223 2190
2224 if (rv) 2191 info = kzalloc(sizeof(*info), GFP_KERNEL);
2225 return rv; 2192 if (!info)
2193 return;
2226 2194
2227 /* So we know not to free it unless we have allocated one. */ 2195 info->addr_source = NULL;
2228 new_smi->intf = NULL;
2229 new_smi->si_sm = NULL;
2230 new_smi->handlers = NULL;
2231 2196
2232 if (! new_smi->irq_setup) { 2197 info->si_type = ipmi_defaults[i].type;
2233 new_smi->irq = irqs[intf_num]; 2198 info->io_setup = port_setup;
2234 new_smi->irq_setup = std_irq_setup; 2199 info->io.addr_data = ipmi_defaults[i].port;
2235 new_smi->irq_cleanup = std_irq_cleanup; 2200 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2236 }
2237 2201
2238 /* Default to KCS if no type is specified. */ 2202 info->io.addr = NULL;
2239 if (si_type[intf_num] == NULL) { 2203 info->io.regspacing = DEFAULT_REGSPACING;
2240 if (si_trydefaults) 2204 info->io.regsize = DEFAULT_REGSPACING;
2241 si_type[intf_num] = "kcs"; 2205 info->io.regshift = 0;
2242 else { 2206
2243 rv = -EINVAL; 2207 if (try_smi_init(info) == 0) {
2244 goto out_err; 2208 /* Found one... */
2209 printk(KERN_INFO "ipmi_si: Found default %s state"
2210 " machine at %s address 0x%lx\n",
2211 si_to_str[info->si_type],
2212 addr_space_to_str[info->io.addr_type],
2213 info->io.addr_data);
2214 return;
2245 } 2215 }
2246 } 2216 }
2217}
2218
2219static int is_new_interface(struct smi_info *info)
2220{
2221 struct smi_info *e;
2222
2223 list_for_each_entry(e, &smi_infos, link) {
2224 if (e->io.addr_type != info->io.addr_type)
2225 continue;
2226 if (e->io.addr_data == info->io.addr_data)
2227 return 0;
2228 }
2229
2230 return 1;
2231}
2232
2233static int try_smi_init(struct smi_info *new_smi)
2234{
2235 int rv;
2236
2237 if (new_smi->addr_source) {
2238 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2239 " machine at %s address 0x%lx, slave address 0x%x,"
2240 " irq %d\n",
2241 new_smi->addr_source,
2242 si_to_str[new_smi->si_type],
2243 addr_space_to_str[new_smi->io.addr_type],
2244 new_smi->io.addr_data,
2245 new_smi->slave_addr, new_smi->irq);
2246 }
2247
2248 down(&smi_infos_lock);
2249 if (!is_new_interface(new_smi)) {
2250 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2251 rv = -EBUSY;
2252 goto out_err;
2253 }
2247 2254
2248 /* Set up the state machine to use. */ 2255 /* So we know not to free it unless we have allocated one. */
2249 if (strcmp(si_type[intf_num], "kcs") == 0) { 2256 new_smi->intf = NULL;
2257 new_smi->si_sm = NULL;
2258 new_smi->handlers = NULL;
2259
2260 switch (new_smi->si_type) {
2261 case SI_KCS:
2250 new_smi->handlers = &kcs_smi_handlers; 2262 new_smi->handlers = &kcs_smi_handlers;
2251 new_smi->si_type = SI_KCS; 2263 break;
2252 } else if (strcmp(si_type[intf_num], "smic") == 0) { 2264
2265 case SI_SMIC:
2253 new_smi->handlers = &smic_smi_handlers; 2266 new_smi->handlers = &smic_smi_handlers;
2254 new_smi->si_type = SI_SMIC; 2267 break;
2255 } else if (strcmp(si_type[intf_num], "bt") == 0) { 2268
2269 case SI_BT:
2256 new_smi->handlers = &bt_smi_handlers; 2270 new_smi->handlers = &bt_smi_handlers;
2257 new_smi->si_type = SI_BT; 2271 break;
2258 } else { 2272
2273 default:
2259 /* No support for anything else yet. */ 2274 /* No support for anything else yet. */
2260 rv = -EIO; 2275 rv = -EIO;
2261 goto out_err; 2276 goto out_err;
@@ -2263,7 +2278,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2263 2278
2264 /* Allocate the state machine's data and initialize it. */ 2279 /* Allocate the state machine's data and initialize it. */
2265 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); 2280 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2266 if (! new_smi->si_sm) { 2281 if (!new_smi->si_sm) {
2267 printk(" Could not allocate state machine memory\n"); 2282 printk(" Could not allocate state machine memory\n");
2268 rv = -ENOMEM; 2283 rv = -ENOMEM;
2269 goto out_err; 2284 goto out_err;
@@ -2284,21 +2299,29 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2284 2299
2285 /* Do low-level detection first. */ 2300 /* Do low-level detection first. */
2286 if (new_smi->handlers->detect(new_smi->si_sm)) { 2301 if (new_smi->handlers->detect(new_smi->si_sm)) {
2302 if (new_smi->addr_source)
2303 printk(KERN_INFO "ipmi_si: Interface detection"
2304 " failed\n");
2287 rv = -ENODEV; 2305 rv = -ENODEV;
2288 goto out_err; 2306 goto out_err;
2289 } 2307 }
2290 2308
2291 /* Attempt a get device id command. If it fails, we probably 2309 /* Attempt a get device id command. If it fails, we probably
2292 don't have a SMI here. */ 2310 don't have a BMC here. */
2293 rv = try_get_dev_id(new_smi); 2311 rv = try_get_dev_id(new_smi);
2294 if (rv) 2312 if (rv) {
2313 if (new_smi->addr_source)
2314 printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2315 " at this location\n");
2295 goto out_err; 2316 goto out_err;
2317 }
2296 2318
2297 setup_oem_data_handler(new_smi); 2319 setup_oem_data_handler(new_smi);
2298 setup_xaction_handlers(new_smi); 2320 setup_xaction_handlers(new_smi);
2299 2321
2300 /* Try to claim any interrupts. */ 2322 /* Try to claim any interrupts. */
2301 new_smi->irq_setup(new_smi); 2323 if (new_smi->irq_setup)
2324 new_smi->irq_setup(new_smi);
2302 2325
2303 INIT_LIST_HEAD(&(new_smi->xmit_msgs)); 2326 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2304 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); 2327 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
@@ -2308,7 +2331,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2308 2331
2309 new_smi->interrupt_disabled = 0; 2332 new_smi->interrupt_disabled = 0;
2310 atomic_set(&new_smi->stop_operation, 0); 2333 atomic_set(&new_smi->stop_operation, 0);
2311 new_smi->intf_num = intf_num; 2334 new_smi->intf_num = smi_num;
2335 smi_num++;
2312 2336
2313 /* Start clearing the flags before we enable interrupts or the 2337 /* Start clearing the flags before we enable interrupts or the
2314 timer to avoid racing with the timer. */ 2338 timer to avoid racing with the timer. */
@@ -2332,10 +2356,36 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2332 new_smi->thread = kthread_run(ipmi_thread, new_smi, 2356 new_smi->thread = kthread_run(ipmi_thread, new_smi,
2333 "kipmi%d", new_smi->intf_num); 2357 "kipmi%d", new_smi->intf_num);
2334 2358
2359 if (!new_smi->dev) {
2360 /* If we don't already have a device from something
2361 * else (like PCI), then register a new one. */
2362 new_smi->pdev = platform_device_alloc("ipmi_si",
2363 new_smi->intf_num);
2364 if (rv) {
2365 printk(KERN_ERR
2366 "ipmi_si_intf:"
2367 " Unable to allocate platform device\n");
2368 goto out_err_stop_timer;
2369 }
2370 new_smi->dev = &new_smi->pdev->dev;
2371 new_smi->dev->driver = &ipmi_driver;
2372
2373 rv = platform_device_register(new_smi->pdev);
2374 if (rv) {
2375 printk(KERN_ERR
2376 "ipmi_si_intf:"
2377 " Unable to register system interface device:"
2378 " %d\n",
2379 rv);
2380 goto out_err_stop_timer;
2381 }
2382 new_smi->dev_registered = 1;
2383 }
2384
2335 rv = ipmi_register_smi(&handlers, 2385 rv = ipmi_register_smi(&handlers,
2336 new_smi, 2386 new_smi,
2337 ipmi_version_major(&new_smi->device_id), 2387 &new_smi->device_id,
2338 ipmi_version_minor(&new_smi->device_id), 2388 new_smi->dev,
2339 new_smi->slave_addr, 2389 new_smi->slave_addr,
2340 &(new_smi->intf)); 2390 &(new_smi->intf));
2341 if (rv) { 2391 if (rv) {
@@ -2365,9 +2415,11 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2365 goto out_err_stop_timer; 2415 goto out_err_stop_timer;
2366 } 2416 }
2367 2417
2368 *smi = new_smi; 2418 list_add_tail(&new_smi->link, &smi_infos);
2419
2420 up(&smi_infos_lock);
2369 2421
2370 printk(" IPMI %s interface initialized\n", si_type[intf_num]); 2422 printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2371 2423
2372 return 0; 2424 return 0;
2373 2425
@@ -2379,7 +2431,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2379 if (new_smi->intf) 2431 if (new_smi->intf)
2380 ipmi_unregister_smi(new_smi->intf); 2432 ipmi_unregister_smi(new_smi->intf);
2381 2433
2382 new_smi->irq_cleanup(new_smi); 2434 if (new_smi->irq_cleanup)
2435 new_smi->irq_cleanup(new_smi);
2383 2436
2384 /* Wait until we know that we are out of any interrupt 2437 /* Wait until we know that we are out of any interrupt
2385 handlers might have been running before we freed the 2438 handlers might have been running before we freed the
@@ -2391,23 +2444,41 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2391 new_smi->handlers->cleanup(new_smi->si_sm); 2444 new_smi->handlers->cleanup(new_smi->si_sm);
2392 kfree(new_smi->si_sm); 2445 kfree(new_smi->si_sm);
2393 } 2446 }
2447 if (new_smi->addr_source_cleanup)
2448 new_smi->addr_source_cleanup(new_smi);
2394 if (new_smi->io_cleanup) 2449 if (new_smi->io_cleanup)
2395 new_smi->io_cleanup(new_smi); 2450 new_smi->io_cleanup(new_smi);
2396 2451
2452 if (new_smi->dev_registered)
2453 platform_device_unregister(new_smi->pdev);
2454
2455 kfree(new_smi);
2456
2457 up(&smi_infos_lock);
2458
2397 return rv; 2459 return rv;
2398} 2460}
2399 2461
2400static __init int init_ipmi_si(void) 2462static __devinit int init_ipmi_si(void)
2401{ 2463{
2402 int rv = 0;
2403 int pos = 0;
2404 int i; 2464 int i;
2405 char *str; 2465 char *str;
2466 int rv;
2406 2467
2407 if (initialized) 2468 if (initialized)
2408 return 0; 2469 return 0;
2409 initialized = 1; 2470 initialized = 1;
2410 2471
2472 /* Register the device drivers. */
2473 rv = driver_register(&ipmi_driver);
2474 if (rv) {
2475 printk(KERN_ERR
2476 "init_ipmi_si: Unable to register driver: %d\n",
2477 rv);
2478 return rv;
2479 }
2480
2481
2411 /* Parse out the si_type string into its components. */ 2482 /* Parse out the si_type string into its components. */
2412 str = si_type_str; 2483 str = si_type_str;
2413 if (*str != '\0') { 2484 if (*str != '\0') {
@@ -2425,63 +2496,66 @@ static __init int init_ipmi_si(void)
2425 2496
2426 printk(KERN_INFO "IPMI System Interface driver.\n"); 2497 printk(KERN_INFO "IPMI System Interface driver.\n");
2427 2498
2499 hardcode_find_bmc();
2500
2428#ifdef CONFIG_DMI 2501#ifdef CONFIG_DMI
2429 dmi_find_bmc(); 2502 dmi_find_bmc();
2430#endif 2503#endif
2431 2504
2432 rv = init_one_smi(0, &(smi_infos[pos])); 2505#ifdef CONFIG_ACPI
2433 if (rv && ! ports[0] && si_trydefaults) { 2506 if (si_trydefaults)
2434 /* If we are trying defaults and the initial port is 2507 acpi_find_bmc();
2435 not set, then set it. */ 2508#endif
2436 si_type[0] = "kcs";
2437 ports[0] = DEFAULT_KCS_IO_PORT;
2438 rv = init_one_smi(0, &(smi_infos[pos]));
2439 if (rv) {
2440 /* No KCS - try SMIC */
2441 si_type[0] = "smic";
2442 ports[0] = DEFAULT_SMIC_IO_PORT;
2443 rv = init_one_smi(0, &(smi_infos[pos]));
2444 }
2445 if (rv) {
2446 /* No SMIC - try BT */
2447 si_type[0] = "bt";
2448 ports[0] = DEFAULT_BT_IO_PORT;
2449 rv = init_one_smi(0, &(smi_infos[pos]));
2450 }
2451 }
2452 if (rv == 0)
2453 pos++;
2454 2509
2455 for (i = 1; i < SI_MAX_PARMS; i++) { 2510#ifdef CONFIG_PCI
2456 rv = init_one_smi(i, &(smi_infos[pos])); 2511 pci_module_init(&ipmi_pci_driver);
2457 if (rv == 0) 2512#endif
2458 pos++; 2513
2514 if (si_trydefaults) {
2515 down(&smi_infos_lock);
2516 if (list_empty(&smi_infos)) {
2517 /* No BMC was found, try defaults. */
2518 up(&smi_infos_lock);
2519 default_find_bmc();
2520 } else {
2521 up(&smi_infos_lock);
2522 }
2459 } 2523 }
2460 2524
2461 if (smi_infos[0] == NULL) { 2525 down(&smi_infos_lock);
2526 if (list_empty(&smi_infos)) {
2527 up(&smi_infos_lock);
2528#ifdef CONFIG_PCI
2529 pci_unregister_driver(&ipmi_pci_driver);
2530#endif
2462 printk("ipmi_si: Unable to find any System Interface(s)\n"); 2531 printk("ipmi_si: Unable to find any System Interface(s)\n");
2463 return -ENODEV; 2532 return -ENODEV;
2533 } else {
2534 up(&smi_infos_lock);
2535 return 0;
2464 } 2536 }
2465
2466 return 0;
2467} 2537}
2468module_init(init_ipmi_si); 2538module_init(init_ipmi_si);
2469 2539
2470static void __exit cleanup_one_si(struct smi_info *to_clean) 2540static void __devexit cleanup_one_si(struct smi_info *to_clean)
2471{ 2541{
2472 int rv; 2542 int rv;
2473 unsigned long flags; 2543 unsigned long flags;
2474 2544
2475 if (! to_clean) 2545 if (!to_clean)
2476 return; 2546 return;
2477 2547
2548 list_del(&to_clean->link);
2549
2478 /* Tell the timer and interrupt handlers that we are shutting 2550 /* Tell the timer and interrupt handlers that we are shutting
2479 down. */ 2551 down. */
2480 spin_lock_irqsave(&(to_clean->si_lock), flags); 2552 spin_lock_irqsave(&(to_clean->si_lock), flags);
2481 spin_lock(&(to_clean->msg_lock)); 2553 spin_lock(&(to_clean->msg_lock));
2482 2554
2483 atomic_inc(&to_clean->stop_operation); 2555 atomic_inc(&to_clean->stop_operation);
2484 to_clean->irq_cleanup(to_clean); 2556
2557 if (to_clean->irq_cleanup)
2558 to_clean->irq_cleanup(to_clean);
2485 2559
2486 spin_unlock(&(to_clean->msg_lock)); 2560 spin_unlock(&(to_clean->msg_lock));
2487 spin_unlock_irqrestore(&(to_clean->si_lock), flags); 2561 spin_unlock_irqrestore(&(to_clean->si_lock), flags);
@@ -2511,20 +2585,34 @@ static void __exit cleanup_one_si(struct smi_info *to_clean)
2511 2585
2512 kfree(to_clean->si_sm); 2586 kfree(to_clean->si_sm);
2513 2587
2588 if (to_clean->addr_source_cleanup)
2589 to_clean->addr_source_cleanup(to_clean);
2514 if (to_clean->io_cleanup) 2590 if (to_clean->io_cleanup)
2515 to_clean->io_cleanup(to_clean); 2591 to_clean->io_cleanup(to_clean);
2592
2593 if (to_clean->dev_registered)
2594 platform_device_unregister(to_clean->pdev);
2595
2596 kfree(to_clean);
2516} 2597}
2517 2598
2518static __exit void cleanup_ipmi_si(void) 2599static __exit void cleanup_ipmi_si(void)
2519{ 2600{
2520 int i; 2601 struct smi_info *e, *tmp_e;
2521 2602
2522 if (! initialized) 2603 if (!initialized)
2523 return; 2604 return;
2524 2605
2525 for (i = 0; i < SI_MAX_DRIVERS; i++) { 2606#ifdef CONFIG_PCI
2526 cleanup_one_si(smi_infos[i]); 2607 pci_unregister_driver(&ipmi_pci_driver);
2527 } 2608#endif
2609
2610 down(&smi_infos_lock);
2611 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2612 cleanup_one_si(e);
2613 up(&smi_infos_lock);
2614
2615 driver_unregister(&ipmi_driver);
2528} 2616}
2529module_exit(cleanup_ipmi_si); 2617module_exit(cleanup_ipmi_si);
2530 2618
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
index bf3d4962d6a5..4b731b24dc16 100644
--- a/drivers/char/ipmi/ipmi_si_sm.h
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -50,11 +50,12 @@ struct si_sm_io
50 50
51 /* Generic info used by the actual handling routines, the 51 /* Generic info used by the actual handling routines, the
52 state machine shouldn't touch these. */ 52 state machine shouldn't touch these. */
53 void *info;
54 void __iomem *addr; 53 void __iomem *addr;
55 int regspacing; 54 int regspacing;
56 int regsize; 55 int regsize;
57 int regshift; 56 int regshift;
57 int addr_type;
58 long addr_data;
58}; 59};
59 60
60/* Results of SMI events. */ 61/* Results of SMI events. */
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 1f3159eb1ede..7ece9f3c8f70 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -996,7 +996,7 @@ static struct notifier_block wdog_panic_notifier = {
996}; 996};
997 997
998 998
999static void ipmi_new_smi(int if_num) 999static void ipmi_new_smi(int if_num, struct device *device)
1000{ 1000{
1001 ipmi_register_watchdog(if_num); 1001 ipmi_register_watchdog(if_num);
1002} 1002}
@@ -1158,7 +1158,8 @@ static int __init ipmi_wdog_init(void)
1158 } 1158 }
1159 1159
1160 register_reboot_notifier(&wdog_reboot_notifier); 1160 register_reboot_notifier(&wdog_reboot_notifier);
1161 notifier_chain_register(&panic_notifier_list, &wdog_panic_notifier); 1161 atomic_notifier_chain_register(&panic_notifier_list,
1162 &wdog_panic_notifier);
1162 1163
1163 printk(KERN_INFO PFX "driver initialized\n"); 1164 printk(KERN_INFO PFX "driver initialized\n");
1164 1165
@@ -1176,7 +1177,8 @@ static __exit void ipmi_unregister_watchdog(void)
1176 release_nmi(&ipmi_nmi_handler); 1177 release_nmi(&ipmi_nmi_handler);
1177#endif 1178#endif
1178 1179
1179 notifier_chain_unregister(&panic_notifier_list, &wdog_panic_notifier); 1180 atomic_notifier_chain_unregister(&panic_notifier_list,
1181 &wdog_panic_notifier);
1180 unregister_reboot_notifier(&wdog_reboot_notifier); 1182 unregister_reboot_notifier(&wdog_reboot_notifier);
1181 1183
1182 if (! watchdog_user) 1184 if (! watchdog_user)
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 26d0116b48d4..66719f9d294c 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -88,21 +88,15 @@ static inline int uncached_access(struct file *file, unsigned long addr)
88} 88}
89 89
90#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE 90#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
91static inline int valid_phys_addr_range(unsigned long addr, size_t *count) 91static inline int valid_phys_addr_range(unsigned long addr, size_t count)
92{ 92{
93 unsigned long end_mem; 93 if (addr + count > __pa(high_memory))
94
95 end_mem = __pa(high_memory);
96 if (addr >= end_mem)
97 return 0; 94 return 0;
98 95
99 if (*count > end_mem - addr)
100 *count = end_mem - addr;
101
102 return 1; 96 return 1;
103} 97}
104 98
105static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t *size) 99static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t size)
106{ 100{
107 return 1; 101 return 1;
108} 102}
@@ -119,7 +113,7 @@ static ssize_t read_mem(struct file * file, char __user * buf,
119 ssize_t read, sz; 113 ssize_t read, sz;
120 char *ptr; 114 char *ptr;
121 115
122 if (!valid_phys_addr_range(p, &count)) 116 if (!valid_phys_addr_range(p, count))
123 return -EFAULT; 117 return -EFAULT;
124 read = 0; 118 read = 0;
125#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 119#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
@@ -177,7 +171,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
177 unsigned long copied; 171 unsigned long copied;
178 void *ptr; 172 void *ptr;
179 173
180 if (!valid_phys_addr_range(p, &count)) 174 if (!valid_phys_addr_range(p, count))
181 return -EFAULT; 175 return -EFAULT;
182 176
183 written = 0; 177 written = 0;
@@ -249,7 +243,7 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
249{ 243{
250 size_t size = vma->vm_end - vma->vm_start; 244 size_t size = vma->vm_end - vma->vm_start;
251 245
252 if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, &size)) 246 if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, size))
253 return -EINVAL; 247 return -EINVAL;
254 248
255 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, 249 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
@@ -905,7 +899,7 @@ static const struct {
905 unsigned int minor; 899 unsigned int minor;
906 char *name; 900 char *name;
907 umode_t mode; 901 umode_t mode;
908 struct file_operations *fops; 902 const struct file_operations *fops;
909} devlist[] = { /* list of minor devices */ 903} devlist[] = { /* list of minor devices */
910 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, 904 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
911 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, 905 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 3e4c0414a01a..96eb2a709e21 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -129,7 +129,7 @@ static int misc_open(struct inode * inode, struct file * file)
129 int minor = iminor(inode); 129 int minor = iminor(inode);
130 struct miscdevice *c; 130 struct miscdevice *c;
131 int err = -ENODEV; 131 int err = -ENODEV;
132 struct file_operations *old_fops, *new_fops = NULL; 132 const struct file_operations *old_fops, *new_fops = NULL;
133 133
134 down(&misc_sem); 134 down(&misc_sem);
135 135
diff --git a/drivers/char/mxser.h b/drivers/char/mxser.h
index e7fd0b08e0b7..7e188a4d602a 100644
--- a/drivers/char/mxser.h
+++ b/drivers/char/mxser.h
@@ -118,7 +118,7 @@
118 118
119// enable CTS interrupt 119// enable CTS interrupt
120#define MOXA_MUST_IER_ECTSI 0x80 120#define MOXA_MUST_IER_ECTSI 0x80
121// eanble RTS interrupt 121// enable RTS interrupt
122#define MOXA_MUST_IER_ERTSI 0x40 122#define MOXA_MUST_IER_ERTSI 0x40
123// enable Xon/Xoff interrupt 123// enable Xon/Xoff interrupt
124#define MOXA_MUST_IER_XINT 0x20 124#define MOXA_MUST_IER_XINT 0x20
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index d68be61f0a49..fee2aca3f6a5 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -941,17 +941,6 @@ static void* mgsl_get_text_ptr(void)
941 return mgsl_get_text_ptr; 941 return mgsl_get_text_ptr;
942} 942}
943 943
944/*
945 * tmp_buf is used as a temporary buffer by mgsl_write. We need to
946 * lock it in case the COPY_FROM_USER blocks while swapping in a page,
947 * and some other program tries to do a serial write at the same time.
948 * Since the lock will only come under contention when the system is
949 * swapping and available memory is low, it makes sense to share one
950 * buffer across all the serial ioports, since it significantly saves
951 * memory if large numbers of serial ports are open.
952 */
953static unsigned char *tmp_buf;
954
955static inline int mgsl_paranoia_check(struct mgsl_struct *info, 944static inline int mgsl_paranoia_check(struct mgsl_struct *info,
956 char *name, const char *routine) 945 char *name, const char *routine)
957{ 946{
@@ -2150,7 +2139,7 @@ static int mgsl_write(struct tty_struct * tty,
2150 if (mgsl_paranoia_check(info, tty->name, "mgsl_write")) 2139 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2151 goto cleanup; 2140 goto cleanup;
2152 2141
2153 if (!tty || !info->xmit_buf || !tmp_buf) 2142 if (!tty || !info->xmit_buf)
2154 goto cleanup; 2143 goto cleanup;
2155 2144
2156 if ( info->params.mode == MGSL_MODE_HDLC || 2145 if ( info->params.mode == MGSL_MODE_HDLC ||
@@ -3438,7 +3427,6 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
3438{ 3427{
3439 struct mgsl_struct *info; 3428 struct mgsl_struct *info;
3440 int retval, line; 3429 int retval, line;
3441 unsigned long page;
3442 unsigned long flags; 3430 unsigned long flags;
3443 3431
3444 /* verify range of specified line number */ 3432 /* verify range of specified line number */
@@ -3472,18 +3460,6 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
3472 goto cleanup; 3460 goto cleanup;
3473 } 3461 }
3474 3462
3475 if (!tmp_buf) {
3476 page = get_zeroed_page(GFP_KERNEL);
3477 if (!page) {
3478 retval = -ENOMEM;
3479 goto cleanup;
3480 }
3481 if (tmp_buf)
3482 free_page(page);
3483 else
3484 tmp_buf = (unsigned char *) page;
3485 }
3486
3487 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; 3463 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3488 3464
3489 spin_lock_irqsave(&info->netlock, flags); 3465 spin_lock_irqsave(&info->netlock, flags);
@@ -4502,11 +4478,6 @@ static void synclink_cleanup(void)
4502 kfree(tmp); 4478 kfree(tmp);
4503 } 4479 }
4504 4480
4505 if (tmp_buf) {
4506 free_page((unsigned long) tmp_buf);
4507 tmp_buf = NULL;
4508 }
4509
4510 if (pci_registered) 4481 if (pci_registered)
4511 pci_unregister_driver(&synclink_pci_driver); 4482 pci_unregister_driver(&synclink_pci_driver);
4512} 4483}
@@ -6025,7 +5996,7 @@ static void usc_set_async_mode( struct mgsl_struct *info )
6025 * <15..8> ? RxFIFO IRQ Request Level 5996 * <15..8> ? RxFIFO IRQ Request Level
6026 * 5997 *
6027 * Note: For async mode the receive FIFO level must be set 5998 * Note: For async mode the receive FIFO level must be set
6028 * to 0 to aviod the situation where the FIFO contains fewer bytes 5999 * to 0 to avoid the situation where the FIFO contains fewer bytes
6029 * than the trigger level and no more data is expected. 6000 * than the trigger level and no more data is expected.
6030 * 6001 *
6031 * <7> 0 Exited Hunt IA (Interrupt Arm) 6002 * <7> 0 Exited Hunt IA (Interrupt Arm)
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 738ec2f4e563..b4d1f4eea435 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: synclink_gt.c,v 4.22 2006/01/09 20:16:06 paulkf Exp $ 2 * $Id: synclink_gt.c,v 4.25 2006/02/06 21:20:33 paulkf Exp $
3 * 3 *
4 * Device driver for Microgate SyncLink GT serial adapters. 4 * Device driver for Microgate SyncLink GT serial adapters.
5 * 5 *
@@ -92,7 +92,7 @@
92 * module identification 92 * module identification
93 */ 93 */
94static char *driver_name = "SyncLink GT"; 94static char *driver_name = "SyncLink GT";
95static char *driver_version = "$Revision: 4.22 $"; 95static char *driver_version = "$Revision: 4.25 $";
96static char *tty_driver_name = "synclink_gt"; 96static char *tty_driver_name = "synclink_gt";
97static char *tty_dev_prefix = "ttySLG"; 97static char *tty_dev_prefix = "ttySLG";
98MODULE_LICENSE("GPL"); 98MODULE_LICENSE("GPL");
@@ -188,6 +188,20 @@ static void hdlcdev_exit(struct slgt_info *info);
188#define SLGT_REG_SIZE 256 188#define SLGT_REG_SIZE 256
189 189
190/* 190/*
191 * conditional wait facility
192 */
193struct cond_wait {
194 struct cond_wait *next;
195 wait_queue_head_t q;
196 wait_queue_t wait;
197 unsigned int data;
198};
199static void init_cond_wait(struct cond_wait *w, unsigned int data);
200static void add_cond_wait(struct cond_wait **head, struct cond_wait *w);
201static void remove_cond_wait(struct cond_wait **head, struct cond_wait *w);
202static void flush_cond_wait(struct cond_wait **head);
203
204/*
191 * DMA buffer descriptor and access macros 205 * DMA buffer descriptor and access macros
192 */ 206 */
193struct slgt_desc 207struct slgt_desc
@@ -269,6 +283,9 @@ struct slgt_info {
269 struct timer_list tx_timer; 283 struct timer_list tx_timer;
270 struct timer_list rx_timer; 284 struct timer_list rx_timer;
271 285
286 unsigned int gpio_present;
287 struct cond_wait *gpio_wait_q;
288
272 spinlock_t lock; /* spinlock for synchronizing with ISR */ 289 spinlock_t lock; /* spinlock for synchronizing with ISR */
273 290
274 struct work_struct task; 291 struct work_struct task;
@@ -379,6 +396,11 @@ static MGSL_PARAMS default_params = {
379#define MASK_OVERRUN BIT4 396#define MASK_OVERRUN BIT4
380 397
381#define GSR 0x00 /* global status */ 398#define GSR 0x00 /* global status */
399#define JCR 0x04 /* JTAG control */
400#define IODR 0x08 /* GPIO direction */
401#define IOER 0x0c /* GPIO interrupt enable */
402#define IOVR 0x10 /* GPIO value */
403#define IOSR 0x14 /* GPIO interrupt status */
382#define TDR 0x80 /* tx data */ 404#define TDR 0x80 /* tx data */
383#define RDR 0x80 /* rx data */ 405#define RDR 0x80 /* rx data */
384#define TCR 0x82 /* tx control */ 406#define TCR 0x82 /* tx control */
@@ -503,6 +525,9 @@ static int tiocmset(struct tty_struct *tty, struct file *file,
503static void set_break(struct tty_struct *tty, int break_state); 525static void set_break(struct tty_struct *tty, int break_state);
504static int get_interface(struct slgt_info *info, int __user *if_mode); 526static int get_interface(struct slgt_info *info, int __user *if_mode);
505static int set_interface(struct slgt_info *info, int if_mode); 527static int set_interface(struct slgt_info *info, int if_mode);
528static int set_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
529static int get_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
530static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
506 531
507/* 532/*
508 * driver functions 533 * driver functions
@@ -1112,6 +1137,12 @@ static int ioctl(struct tty_struct *tty, struct file *file,
1112 return get_interface(info, argp); 1137 return get_interface(info, argp);
1113 case MGSL_IOCSIF: 1138 case MGSL_IOCSIF:
1114 return set_interface(info,(int)arg); 1139 return set_interface(info,(int)arg);
1140 case MGSL_IOCSGPIO:
1141 return set_gpio(info, argp);
1142 case MGSL_IOCGGPIO:
1143 return get_gpio(info, argp);
1144 case MGSL_IOCWAITGPIO:
1145 return wait_gpio(info, argp);
1115 case TIOCGICOUNT: 1146 case TIOCGICOUNT:
1116 spin_lock_irqsave(&info->lock,flags); 1147 spin_lock_irqsave(&info->lock,flags);
1117 cnow = info->icount; 1148 cnow = info->icount;
@@ -1762,10 +1793,6 @@ static void rx_async(struct slgt_info *info)
1762 DBGDATA(info, p, count, "rx"); 1793 DBGDATA(info, p, count, "rx");
1763 1794
1764 for(i=0 ; i < count; i+=2, p+=2) { 1795 for(i=0 ; i < count; i+=2, p+=2) {
1765 if (tty && chars) {
1766 tty_flip_buffer_push(tty);
1767 chars = 0;
1768 }
1769 ch = *p; 1796 ch = *p;
1770 icount->rx++; 1797 icount->rx++;
1771 1798
@@ -2158,6 +2185,24 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
2158 } 2185 }
2159} 2186}
2160 2187
2188static void isr_gpio(struct slgt_info *info, unsigned int changed, unsigned int state)
2189{
2190 struct cond_wait *w, *prev;
2191
2192 /* wake processes waiting for specific transitions */
2193 for (w = info->gpio_wait_q, prev = NULL ; w != NULL ; w = w->next) {
2194 if (w->data & changed) {
2195 w->data = state;
2196 wake_up_interruptible(&w->q);
2197 if (prev != NULL)
2198 prev->next = w->next;
2199 else
2200 info->gpio_wait_q = w->next;
2201 } else
2202 prev = w;
2203 }
2204}
2205
2161/* interrupt service routine 2206/* interrupt service routine
2162 * 2207 *
2163 * irq interrupt number 2208 * irq interrupt number
@@ -2193,6 +2238,22 @@ static irqreturn_t slgt_interrupt(int irq, void *dev_id, struct pt_regs * regs)
2193 } 2238 }
2194 } 2239 }
2195 2240
2241 if (info->gpio_present) {
2242 unsigned int state;
2243 unsigned int changed;
2244 while ((changed = rd_reg32(info, IOSR)) != 0) {
2245 DBGISR(("%s iosr=%08x\n", info->device_name, changed));
2246 /* read latched state of GPIO signals */
2247 state = rd_reg32(info, IOVR);
2248 /* clear pending GPIO interrupt bits */
2249 wr_reg32(info, IOSR, changed);
2250 for (i=0 ; i < info->port_count ; i++) {
2251 if (info->port_array[i] != NULL)
2252 isr_gpio(info->port_array[i], changed, state);
2253 }
2254 }
2255 }
2256
2196 for(i=0; i < info->port_count ; i++) { 2257 for(i=0; i < info->port_count ; i++) {
2197 struct slgt_info *port = info->port_array[i]; 2258 struct slgt_info *port = info->port_array[i];
2198 2259
@@ -2276,6 +2337,8 @@ static void shutdown(struct slgt_info *info)
2276 set_signals(info); 2337 set_signals(info);
2277 } 2338 }
2278 2339
2340 flush_cond_wait(&info->gpio_wait_q);
2341
2279 spin_unlock_irqrestore(&info->lock,flags); 2342 spin_unlock_irqrestore(&info->lock,flags);
2280 2343
2281 if (info->tty) 2344 if (info->tty)
@@ -2650,6 +2713,175 @@ static int set_interface(struct slgt_info *info, int if_mode)
2650 return 0; 2713 return 0;
2651} 2714}
2652 2715
2716/*
2717 * set general purpose IO pin state and direction
2718 *
2719 * user_gpio fields:
2720 * state each bit indicates a pin state
2721 * smask set bit indicates pin state to set
2722 * dir each bit indicates a pin direction (0=input, 1=output)
2723 * dmask set bit indicates pin direction to set
2724 */
2725static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
2726{
2727 unsigned long flags;
2728 struct gpio_desc gpio;
2729 __u32 data;
2730
2731 if (!info->gpio_present)
2732 return -EINVAL;
2733 if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
2734 return -EFAULT;
2735 DBGINFO(("%s set_gpio state=%08x smask=%08x dir=%08x dmask=%08x\n",
2736 info->device_name, gpio.state, gpio.smask,
2737 gpio.dir, gpio.dmask));
2738
2739 spin_lock_irqsave(&info->lock,flags);
2740 if (gpio.dmask) {
2741 data = rd_reg32(info, IODR);
2742 data |= gpio.dmask & gpio.dir;
2743 data &= ~(gpio.dmask & ~gpio.dir);
2744 wr_reg32(info, IODR, data);
2745 }
2746 if (gpio.smask) {
2747 data = rd_reg32(info, IOVR);
2748 data |= gpio.smask & gpio.state;
2749 data &= ~(gpio.smask & ~gpio.state);
2750 wr_reg32(info, IOVR, data);
2751 }
2752 spin_unlock_irqrestore(&info->lock,flags);
2753
2754 return 0;
2755}
2756
2757/*
2758 * get general purpose IO pin state and direction
2759 */
2760static int get_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
2761{
2762 struct gpio_desc gpio;
2763 if (!info->gpio_present)
2764 return -EINVAL;
2765 gpio.state = rd_reg32(info, IOVR);
2766 gpio.smask = 0xffffffff;
2767 gpio.dir = rd_reg32(info, IODR);
2768 gpio.dmask = 0xffffffff;
2769 if (copy_to_user(user_gpio, &gpio, sizeof(gpio)))
2770 return -EFAULT;
2771 DBGINFO(("%s get_gpio state=%08x dir=%08x\n",
2772 info->device_name, gpio.state, gpio.dir));
2773 return 0;
2774}
2775
2776/*
2777 * conditional wait facility
2778 */
2779static void init_cond_wait(struct cond_wait *w, unsigned int data)
2780{
2781 init_waitqueue_head(&w->q);
2782 init_waitqueue_entry(&w->wait, current);
2783 w->data = data;
2784}
2785
2786static void add_cond_wait(struct cond_wait **head, struct cond_wait *w)
2787{
2788 set_current_state(TASK_INTERRUPTIBLE);
2789 add_wait_queue(&w->q, &w->wait);
2790 w->next = *head;
2791 *head = w;
2792}
2793
2794static void remove_cond_wait(struct cond_wait **head, struct cond_wait *cw)
2795{
2796 struct cond_wait *w, *prev;
2797 remove_wait_queue(&cw->q, &cw->wait);
2798 set_current_state(TASK_RUNNING);
2799 for (w = *head, prev = NULL ; w != NULL ; prev = w, w = w->next) {
2800 if (w == cw) {
2801 if (prev != NULL)
2802 prev->next = w->next;
2803 else
2804 *head = w->next;
2805 break;
2806 }
2807 }
2808}
2809
2810static void flush_cond_wait(struct cond_wait **head)
2811{
2812 while (*head != NULL) {
2813 wake_up_interruptible(&(*head)->q);
2814 *head = (*head)->next;
2815 }
2816}
2817
2818/*
2819 * wait for general purpose I/O pin(s) to enter specified state
2820 *
2821 * user_gpio fields:
2822 * state - bit indicates target pin state
2823 * smask - set bit indicates watched pin
2824 *
2825 * The wait ends when at least one watched pin enters the specified
2826 * state. When 0 (no error) is returned, user_gpio->state is set to the
2827 * state of all GPIO pins when the wait ends.
2828 *
2829 * Note: Each pin may be a dedicated input, dedicated output, or
2830 * configurable input/output. The number and configuration of pins
2831 * varies with the specific adapter model. Only input pins (dedicated
2832 * or configured) can be monitored with this function.
2833 */
2834static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
2835{
2836 unsigned long flags;
2837 int rc = 0;
2838 struct gpio_desc gpio;
2839 struct cond_wait wait;
2840 u32 state;
2841
2842 if (!info->gpio_present)
2843 return -EINVAL;
2844 if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
2845 return -EFAULT;
2846 DBGINFO(("%s wait_gpio() state=%08x smask=%08x\n",
2847 info->device_name, gpio.state, gpio.smask));
2848 /* ignore output pins identified by set IODR bit */
2849 if ((gpio.smask &= ~rd_reg32(info, IODR)) == 0)
2850 return -EINVAL;
2851 init_cond_wait(&wait, gpio.smask);
2852
2853 spin_lock_irqsave(&info->lock, flags);
2854 /* enable interrupts for watched pins */
2855 wr_reg32(info, IOER, rd_reg32(info, IOER) | gpio.smask);
2856 /* get current pin states */
2857 state = rd_reg32(info, IOVR);
2858
2859 if (gpio.smask & ~(state ^ gpio.state)) {
2860 /* already in target state */
2861 gpio.state = state;
2862 } else {
2863 /* wait for target state */
2864 add_cond_wait(&info->gpio_wait_q, &wait);
2865 spin_unlock_irqrestore(&info->lock, flags);
2866 schedule();
2867 if (signal_pending(current))
2868 rc = -ERESTARTSYS;
2869 else
2870 gpio.state = wait.data;
2871 spin_lock_irqsave(&info->lock, flags);
2872 remove_cond_wait(&info->gpio_wait_q, &wait);
2873 }
2874
2875 /* disable all GPIO interrupts if no waiting processes */
2876 if (info->gpio_wait_q == NULL)
2877 wr_reg32(info, IOER, 0);
2878 spin_unlock_irqrestore(&info->lock,flags);
2879
2880 if ((rc == 0) && copy_to_user(user_gpio, &gpio, sizeof(gpio)))
2881 rc = -EFAULT;
2882 return rc;
2883}
2884
2653static int modem_input_wait(struct slgt_info *info,int arg) 2885static int modem_input_wait(struct slgt_info *info,int arg)
2654{ 2886{
2655 unsigned long flags; 2887 unsigned long flags;
@@ -3166,8 +3398,10 @@ static void device_init(int adapter_num, struct pci_dev *pdev)
3166 } else { 3398 } else {
3167 port_array[0]->irq_requested = 1; 3399 port_array[0]->irq_requested = 1;
3168 adapter_test(port_array[0]); 3400 adapter_test(port_array[0]);
3169 for (i=1 ; i < port_count ; i++) 3401 for (i=1 ; i < port_count ; i++) {
3170 port_array[i]->init_error = port_array[0]->init_error; 3402 port_array[i]->init_error = port_array[0]->init_error;
3403 port_array[i]->gpio_present = port_array[0]->gpio_present;
3404 }
3171 } 3405 }
3172 } 3406 }
3173} 3407}
@@ -4301,7 +4535,7 @@ static int register_test(struct slgt_info *info)
4301 break; 4535 break;
4302 } 4536 }
4303 } 4537 }
4304 4538 info->gpio_present = (rd_reg32(info, JCR) & BIT5) ? 1 : 0;
4305 info->init_error = rc ? 0 : DiagStatus_AddressFailure; 4539 info->init_error = rc ? 0 : DiagStatus_AddressFailure;
4306 return rc; 4540 return rc;
4307} 4541}
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 4c272189cd42..2546637a55c0 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -767,6 +767,7 @@ static int __init tlclk_init(void)
767 printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major); 767 printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
768 return ret; 768 return ret;
769 } 769 }
770 tlclk_major = ret;
770 alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); 771 alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
771 if (!alarm_events) 772 if (!alarm_events)
772 goto out1; 773 goto out1;
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 48d795bb8c4b..811dadb9ce3e 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -543,14 +543,12 @@ void tty_ldisc_put(int disc)
543 struct tty_ldisc *ld; 543 struct tty_ldisc *ld;
544 unsigned long flags; 544 unsigned long flags;
545 545
546 if (disc < N_TTY || disc >= NR_LDISCS) 546 BUG_ON(disc < N_TTY || disc >= NR_LDISCS);
547 BUG();
548 547
549 spin_lock_irqsave(&tty_ldisc_lock, flags); 548 spin_lock_irqsave(&tty_ldisc_lock, flags);
550 ld = &tty_ldiscs[disc]; 549 ld = &tty_ldiscs[disc];
551 if(ld->refcount == 0) 550 BUG_ON(ld->refcount == 0);
552 BUG(); 551 ld->refcount--;
553 ld->refcount --;
554 module_put(ld->owner); 552 module_put(ld->owner);
555 spin_unlock_irqrestore(&tty_ldisc_lock, flags); 553 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
556} 554}
@@ -645,8 +643,7 @@ void tty_ldisc_deref(struct tty_ldisc *ld)
645{ 643{
646 unsigned long flags; 644 unsigned long flags;
647 645
648 if(ld == NULL) 646 BUG_ON(ld == NULL);
649 BUG();
650 647
651 spin_lock_irqsave(&tty_ldisc_lock, flags); 648 spin_lock_irqsave(&tty_ldisc_lock, flags);
652 if(ld->refcount == 0) 649 if(ld->refcount == 0)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index aed80e6aec6d..9b6ae7dc8b8a 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -52,9 +52,8 @@ static void handle_update(void *data);
52 * changes to devices when the CPU clock speed changes. 52 * changes to devices when the CPU clock speed changes.
53 * The mutex locks both lists. 53 * The mutex locks both lists.
54 */ 54 */
55static struct notifier_block *cpufreq_policy_notifier_list; 55static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
56static struct notifier_block *cpufreq_transition_notifier_list; 56static BLOCKING_NOTIFIER_HEAD(cpufreq_transition_notifier_list);
57static DECLARE_RWSEM (cpufreq_notifier_rwsem);
58 57
59 58
60static LIST_HEAD(cpufreq_governor_list); 59static LIST_HEAD(cpufreq_governor_list);
@@ -247,8 +246,6 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
247 dprintk("notification %u of frequency transition to %u kHz\n", 246 dprintk("notification %u of frequency transition to %u kHz\n",
248 state, freqs->new); 247 state, freqs->new);
249 248
250 down_read(&cpufreq_notifier_rwsem);
251
252 policy = cpufreq_cpu_data[freqs->cpu]; 249 policy = cpufreq_cpu_data[freqs->cpu];
253 switch (state) { 250 switch (state) {
254 251
@@ -266,20 +263,19 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
266 freqs->old = policy->cur; 263 freqs->old = policy->cur;
267 } 264 }
268 } 265 }
269 notifier_call_chain(&cpufreq_transition_notifier_list, 266 blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
270 CPUFREQ_PRECHANGE, freqs); 267 CPUFREQ_PRECHANGE, freqs);
271 adjust_jiffies(CPUFREQ_PRECHANGE, freqs); 268 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
272 break; 269 break;
273 270
274 case CPUFREQ_POSTCHANGE: 271 case CPUFREQ_POSTCHANGE:
275 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 272 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
276 notifier_call_chain(&cpufreq_transition_notifier_list, 273 blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
277 CPUFREQ_POSTCHANGE, freqs); 274 CPUFREQ_POSTCHANGE, freqs);
278 if (likely(policy) && likely(policy->cpu == freqs->cpu)) 275 if (likely(policy) && likely(policy->cpu == freqs->cpu))
279 policy->cur = freqs->new; 276 policy->cur = freqs->new;
280 break; 277 break;
281 } 278 }
282 up_read(&cpufreq_notifier_rwsem);
283} 279}
284EXPORT_SYMBOL_GPL(cpufreq_notify_transition); 280EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
285 281
@@ -1007,7 +1003,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
1007 freqs.old = cpu_policy->cur; 1003 freqs.old = cpu_policy->cur;
1008 freqs.new = cur_freq; 1004 freqs.new = cur_freq;
1009 1005
1010 notifier_call_chain(&cpufreq_transition_notifier_list, 1006 blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
1011 CPUFREQ_SUSPENDCHANGE, &freqs); 1007 CPUFREQ_SUSPENDCHANGE, &freqs);
1012 adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs); 1008 adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
1013 1009
@@ -1088,7 +1084,8 @@ static int cpufreq_resume(struct sys_device * sysdev)
1088 freqs.old = cpu_policy->cur; 1084 freqs.old = cpu_policy->cur;
1089 freqs.new = cur_freq; 1085 freqs.new = cur_freq;
1090 1086
1091 notifier_call_chain(&cpufreq_transition_notifier_list, 1087 blocking_notifier_call_chain(
1088 &cpufreq_transition_notifier_list,
1092 CPUFREQ_RESUMECHANGE, &freqs); 1089 CPUFREQ_RESUMECHANGE, &freqs);
1093 adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs); 1090 adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
1094 1091
@@ -1125,24 +1122,24 @@ static struct sysdev_driver cpufreq_sysdev_driver = {
1125 * changes in cpufreq policy. 1122 * changes in cpufreq policy.
1126 * 1123 *
1127 * This function may sleep, and has the same return conditions as 1124 * This function may sleep, and has the same return conditions as
1128 * notifier_chain_register. 1125 * blocking_notifier_chain_register.
1129 */ 1126 */
1130int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) 1127int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1131{ 1128{
1132 int ret; 1129 int ret;
1133 1130
1134 down_write(&cpufreq_notifier_rwsem);
1135 switch (list) { 1131 switch (list) {
1136 case CPUFREQ_TRANSITION_NOTIFIER: 1132 case CPUFREQ_TRANSITION_NOTIFIER:
1137 ret = notifier_chain_register(&cpufreq_transition_notifier_list, nb); 1133 ret = blocking_notifier_chain_register(
1134 &cpufreq_transition_notifier_list, nb);
1138 break; 1135 break;
1139 case CPUFREQ_POLICY_NOTIFIER: 1136 case CPUFREQ_POLICY_NOTIFIER:
1140 ret = notifier_chain_register(&cpufreq_policy_notifier_list, nb); 1137 ret = blocking_notifier_chain_register(
1138 &cpufreq_policy_notifier_list, nb);
1141 break; 1139 break;
1142 default: 1140 default:
1143 ret = -EINVAL; 1141 ret = -EINVAL;
1144 } 1142 }
1145 up_write(&cpufreq_notifier_rwsem);
1146 1143
1147 return ret; 1144 return ret;
1148} 1145}
@@ -1157,24 +1154,24 @@ EXPORT_SYMBOL(cpufreq_register_notifier);
1157 * Remove a driver from the CPU frequency notifier list. 1154 * Remove a driver from the CPU frequency notifier list.
1158 * 1155 *
1159 * This function may sleep, and has the same return conditions as 1156 * This function may sleep, and has the same return conditions as
1160 * notifier_chain_unregister. 1157 * blocking_notifier_chain_unregister.
1161 */ 1158 */
1162int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) 1159int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1163{ 1160{
1164 int ret; 1161 int ret;
1165 1162
1166 down_write(&cpufreq_notifier_rwsem);
1167 switch (list) { 1163 switch (list) {
1168 case CPUFREQ_TRANSITION_NOTIFIER: 1164 case CPUFREQ_TRANSITION_NOTIFIER:
1169 ret = notifier_chain_unregister(&cpufreq_transition_notifier_list, nb); 1165 ret = blocking_notifier_chain_unregister(
1166 &cpufreq_transition_notifier_list, nb);
1170 break; 1167 break;
1171 case CPUFREQ_POLICY_NOTIFIER: 1168 case CPUFREQ_POLICY_NOTIFIER:
1172 ret = notifier_chain_unregister(&cpufreq_policy_notifier_list, nb); 1169 ret = blocking_notifier_chain_unregister(
1170 &cpufreq_policy_notifier_list, nb);
1173 break; 1171 break;
1174 default: 1172 default:
1175 ret = -EINVAL; 1173 ret = -EINVAL;
1176 } 1174 }
1177 up_write(&cpufreq_notifier_rwsem);
1178 1175
1179 return ret; 1176 return ret;
1180} 1177}
@@ -1346,29 +1343,23 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
1346 if (ret) 1343 if (ret)
1347 goto error_out; 1344 goto error_out;
1348 1345
1349 down_read(&cpufreq_notifier_rwsem);
1350
1351 /* adjust if necessary - all reasons */ 1346 /* adjust if necessary - all reasons */
1352 notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST, 1347 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1353 policy); 1348 CPUFREQ_ADJUST, policy);
1354 1349
1355 /* adjust if necessary - hardware incompatibility*/ 1350 /* adjust if necessary - hardware incompatibility*/
1356 notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_INCOMPATIBLE, 1351 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1357 policy); 1352 CPUFREQ_INCOMPATIBLE, policy);
1358 1353
1359 /* verify the cpu speed can be set within this limit, 1354 /* verify the cpu speed can be set within this limit,
1360 which might be different to the first one */ 1355 which might be different to the first one */
1361 ret = cpufreq_driver->verify(policy); 1356 ret = cpufreq_driver->verify(policy);
1362 if (ret) { 1357 if (ret)
1363 up_read(&cpufreq_notifier_rwsem);
1364 goto error_out; 1358 goto error_out;
1365 }
1366 1359
1367 /* notification of the new policy */ 1360 /* notification of the new policy */
1368 notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_NOTIFY, 1361 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1369 policy); 1362 CPUFREQ_NOTIFY, policy);
1370
1371 up_read(&cpufreq_notifier_rwsem);
1372 1363
1373 data->min = policy->min; 1364 data->min = policy->min;
1374 data->max = policy->max; 1365 data->max = policy->max;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index ac38766b2583..037f6bf4543c 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -35,12 +35,7 @@
35 */ 35 */
36 36
37#define DEF_FREQUENCY_UP_THRESHOLD (80) 37#define DEF_FREQUENCY_UP_THRESHOLD (80)
38#define MIN_FREQUENCY_UP_THRESHOLD (0)
39#define MAX_FREQUENCY_UP_THRESHOLD (100)
40
41#define DEF_FREQUENCY_DOWN_THRESHOLD (20) 38#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
42#define MIN_FREQUENCY_DOWN_THRESHOLD (0)
43#define MAX_FREQUENCY_DOWN_THRESHOLD (100)
44 39
45/* 40/*
46 * The polling frequency of this governor depends on the capability of 41 * The polling frequency of this governor depends on the capability of
@@ -53,10 +48,14 @@
53 * All times here are in uS. 48 * All times here are in uS.
54 */ 49 */
55static unsigned int def_sampling_rate; 50static unsigned int def_sampling_rate;
56#define MIN_SAMPLING_RATE (def_sampling_rate / 2) 51#define MIN_SAMPLING_RATE_RATIO (2)
52/* for correct statistics, we need at least 10 ticks between each measure */
53#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
54#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
57#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 55#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
58#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (100000) 56#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
59#define DEF_SAMPLING_DOWN_FACTOR (5) 57#define DEF_SAMPLING_DOWN_FACTOR (1)
58#define MAX_SAMPLING_DOWN_FACTOR (10)
60#define TRANSITION_LATENCY_LIMIT (10 * 1000) 59#define TRANSITION_LATENCY_LIMIT (10 * 1000)
61 60
62static void do_dbs_timer(void *data); 61static void do_dbs_timer(void *data);
@@ -66,6 +65,8 @@ struct cpu_dbs_info_s {
66 unsigned int prev_cpu_idle_up; 65 unsigned int prev_cpu_idle_up;
67 unsigned int prev_cpu_idle_down; 66 unsigned int prev_cpu_idle_down;
68 unsigned int enable; 67 unsigned int enable;
68 unsigned int down_skip;
69 unsigned int requested_freq;
69}; 70};
70static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 71static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
71 72
@@ -87,6 +88,8 @@ static struct dbs_tuners dbs_tuners_ins = {
87 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 88 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
88 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, 89 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
89 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 90 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
91 .ignore_nice = 0,
92 .freq_step = 5,
90}; 93};
91 94
92static inline unsigned int get_cpu_idle_time(unsigned int cpu) 95static inline unsigned int get_cpu_idle_time(unsigned int cpu)
@@ -136,7 +139,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
136 unsigned int input; 139 unsigned int input;
137 int ret; 140 int ret;
138 ret = sscanf (buf, "%u", &input); 141 ret = sscanf (buf, "%u", &input);
139 if (ret != 1 ) 142 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
140 return -EINVAL; 143 return -EINVAL;
141 144
142 mutex_lock(&dbs_mutex); 145 mutex_lock(&dbs_mutex);
@@ -173,8 +176,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
173 ret = sscanf (buf, "%u", &input); 176 ret = sscanf (buf, "%u", &input);
174 177
175 mutex_lock(&dbs_mutex); 178 mutex_lock(&dbs_mutex);
176 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 179 if (ret != 1 || input > 100 || input < 0 ||
177 input < MIN_FREQUENCY_UP_THRESHOLD ||
178 input <= dbs_tuners_ins.down_threshold) { 180 input <= dbs_tuners_ins.down_threshold) {
179 mutex_unlock(&dbs_mutex); 181 mutex_unlock(&dbs_mutex);
180 return -EINVAL; 182 return -EINVAL;
@@ -194,8 +196,7 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
194 ret = sscanf (buf, "%u", &input); 196 ret = sscanf (buf, "%u", &input);
195 197
196 mutex_lock(&dbs_mutex); 198 mutex_lock(&dbs_mutex);
197 if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || 199 if (ret != 1 || input > 100 || input < 0 ||
198 input < MIN_FREQUENCY_DOWN_THRESHOLD ||
199 input >= dbs_tuners_ins.up_threshold) { 200 input >= dbs_tuners_ins.up_threshold) {
200 mutex_unlock(&dbs_mutex); 201 mutex_unlock(&dbs_mutex);
201 return -EINVAL; 202 return -EINVAL;
@@ -297,31 +298,17 @@ static struct attribute_group dbs_attr_group = {
297static void dbs_check_cpu(int cpu) 298static void dbs_check_cpu(int cpu)
298{ 299{
299 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 300 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
301 unsigned int tmp_idle_ticks, total_idle_ticks;
300 unsigned int freq_step; 302 unsigned int freq_step;
301 unsigned int freq_down_sampling_rate; 303 unsigned int freq_down_sampling_rate;
302 static int down_skip[NR_CPUS]; 304 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
303 static int requested_freq[NR_CPUS];
304 static unsigned short init_flag = 0;
305 struct cpu_dbs_info_s *this_dbs_info;
306 struct cpu_dbs_info_s *dbs_info;
307
308 struct cpufreq_policy *policy; 305 struct cpufreq_policy *policy;
309 unsigned int j;
310 306
311 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
312 if (!this_dbs_info->enable) 307 if (!this_dbs_info->enable)
313 return; 308 return;
314 309
315 policy = this_dbs_info->cur_policy; 310 policy = this_dbs_info->cur_policy;
316 311
317 if ( init_flag == 0 ) {
318 for_each_online_cpu(j) {
319 dbs_info = &per_cpu(cpu_dbs_info, j);
320 requested_freq[j] = dbs_info->cur_policy->cur;
321 }
322 init_flag = 1;
323 }
324
325 /* 312 /*
326 * The default safe range is 20% to 80% 313 * The default safe range is 20% to 80%
327 * Every sampling_rate, we check 314 * Every sampling_rate, we check
@@ -337,39 +324,29 @@ static void dbs_check_cpu(int cpu)
337 */ 324 */
338 325
339 /* Check for frequency increase */ 326 /* Check for frequency increase */
340
341 idle_ticks = UINT_MAX; 327 idle_ticks = UINT_MAX;
342 for_each_cpu_mask(j, policy->cpus) {
343 unsigned int tmp_idle_ticks, total_idle_ticks;
344 struct cpu_dbs_info_s *j_dbs_info;
345 328
346 j_dbs_info = &per_cpu(cpu_dbs_info, j); 329 /* Check for frequency increase */
347 /* Check for frequency increase */ 330 total_idle_ticks = get_cpu_idle_time(cpu);
348 total_idle_ticks = get_cpu_idle_time(j); 331 tmp_idle_ticks = total_idle_ticks -
349 tmp_idle_ticks = total_idle_ticks - 332 this_dbs_info->prev_cpu_idle_up;
350 j_dbs_info->prev_cpu_idle_up; 333 this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
351 j_dbs_info->prev_cpu_idle_up = total_idle_ticks; 334
352 335 if (tmp_idle_ticks < idle_ticks)
353 if (tmp_idle_ticks < idle_ticks) 336 idle_ticks = tmp_idle_ticks;
354 idle_ticks = tmp_idle_ticks;
355 }
356 337
357 /* Scale idle ticks by 100 and compare with up and down ticks */ 338 /* Scale idle ticks by 100 and compare with up and down ticks */
358 idle_ticks *= 100; 339 idle_ticks *= 100;
359 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * 340 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
360 usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 341 usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
361 342
362 if (idle_ticks < up_idle_ticks) { 343 if (idle_ticks < up_idle_ticks) {
363 down_skip[cpu] = 0; 344 this_dbs_info->down_skip = 0;
364 for_each_cpu_mask(j, policy->cpus) { 345 this_dbs_info->prev_cpu_idle_down =
365 struct cpu_dbs_info_s *j_dbs_info; 346 this_dbs_info->prev_cpu_idle_up;
366 347
367 j_dbs_info = &per_cpu(cpu_dbs_info, j);
368 j_dbs_info->prev_cpu_idle_down =
369 j_dbs_info->prev_cpu_idle_up;
370 }
371 /* if we are already at full speed then break out early */ 348 /* if we are already at full speed then break out early */
372 if (requested_freq[cpu] == policy->max) 349 if (this_dbs_info->requested_freq == policy->max)
373 return; 350 return;
374 351
375 freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; 352 freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
@@ -378,49 +355,45 @@ static void dbs_check_cpu(int cpu)
378 if (unlikely(freq_step == 0)) 355 if (unlikely(freq_step == 0))
379 freq_step = 5; 356 freq_step = 5;
380 357
381 requested_freq[cpu] += freq_step; 358 this_dbs_info->requested_freq += freq_step;
382 if (requested_freq[cpu] > policy->max) 359 if (this_dbs_info->requested_freq > policy->max)
383 requested_freq[cpu] = policy->max; 360 this_dbs_info->requested_freq = policy->max;
384 361
385 __cpufreq_driver_target(policy, requested_freq[cpu], 362 __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
386 CPUFREQ_RELATION_H); 363 CPUFREQ_RELATION_H);
387 return; 364 return;
388 } 365 }
389 366
390 /* Check for frequency decrease */ 367 /* Check for frequency decrease */
391 down_skip[cpu]++; 368 this_dbs_info->down_skip++;
392 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) 369 if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor)
393 return; 370 return;
394 371
395 idle_ticks = UINT_MAX; 372 /* Check for frequency decrease */
396 for_each_cpu_mask(j, policy->cpus) { 373 total_idle_ticks = this_dbs_info->prev_cpu_idle_up;
397 unsigned int tmp_idle_ticks, total_idle_ticks; 374 tmp_idle_ticks = total_idle_ticks -
398 struct cpu_dbs_info_s *j_dbs_info; 375 this_dbs_info->prev_cpu_idle_down;
376 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
399 377
400 j_dbs_info = &per_cpu(cpu_dbs_info, j); 378 if (tmp_idle_ticks < idle_ticks)
401 total_idle_ticks = j_dbs_info->prev_cpu_idle_up; 379 idle_ticks = tmp_idle_ticks;
402 tmp_idle_ticks = total_idle_ticks -
403 j_dbs_info->prev_cpu_idle_down;
404 j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
405
406 if (tmp_idle_ticks < idle_ticks)
407 idle_ticks = tmp_idle_ticks;
408 }
409 380
410 /* Scale idle ticks by 100 and compare with up and down ticks */ 381 /* Scale idle ticks by 100 and compare with up and down ticks */
411 idle_ticks *= 100; 382 idle_ticks *= 100;
412 down_skip[cpu] = 0; 383 this_dbs_info->down_skip = 0;
413 384
414 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * 385 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
415 dbs_tuners_ins.sampling_down_factor; 386 dbs_tuners_ins.sampling_down_factor;
416 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * 387 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
417 usecs_to_jiffies(freq_down_sampling_rate); 388 usecs_to_jiffies(freq_down_sampling_rate);
418 389
419 if (idle_ticks > down_idle_ticks) { 390 if (idle_ticks > down_idle_ticks) {
420 /* if we are already at the lowest speed then break out early 391 /*
392 * if we are already at the lowest speed then break out early
421 * or if we 'cannot' reduce the speed as the user might want 393 * or if we 'cannot' reduce the speed as the user might want
422 * freq_step to be zero */ 394 * freq_step to be zero
423 if (requested_freq[cpu] == policy->min 395 */
396 if (this_dbs_info->requested_freq == policy->min
424 || dbs_tuners_ins.freq_step == 0) 397 || dbs_tuners_ins.freq_step == 0)
425 return; 398 return;
426 399
@@ -430,13 +403,12 @@ static void dbs_check_cpu(int cpu)
430 if (unlikely(freq_step == 0)) 403 if (unlikely(freq_step == 0))
431 freq_step = 5; 404 freq_step = 5;
432 405
433 requested_freq[cpu] -= freq_step; 406 this_dbs_info->requested_freq -= freq_step;
434 if (requested_freq[cpu] < policy->min) 407 if (this_dbs_info->requested_freq < policy->min)
435 requested_freq[cpu] = policy->min; 408 this_dbs_info->requested_freq = policy->min;
436 409
437 __cpufreq_driver_target(policy, 410 __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
438 requested_freq[cpu], 411 CPUFREQ_RELATION_H);
439 CPUFREQ_RELATION_H);
440 return; 412 return;
441 } 413 }
442} 414}
@@ -493,11 +465,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
493 j_dbs_info = &per_cpu(cpu_dbs_info, j); 465 j_dbs_info = &per_cpu(cpu_dbs_info, j);
494 j_dbs_info->cur_policy = policy; 466 j_dbs_info->cur_policy = policy;
495 467
496 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 468 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu);
497 j_dbs_info->prev_cpu_idle_down 469 j_dbs_info->prev_cpu_idle_down
498 = j_dbs_info->prev_cpu_idle_up; 470 = j_dbs_info->prev_cpu_idle_up;
499 } 471 }
500 this_dbs_info->enable = 1; 472 this_dbs_info->enable = 1;
473 this_dbs_info->down_skip = 0;
474 this_dbs_info->requested_freq = policy->cur;
501 sysfs_create_group(&policy->kobj, &dbs_attr_group); 475 sysfs_create_group(&policy->kobj, &dbs_attr_group);
502 dbs_enable++; 476 dbs_enable++;
503 /* 477 /*
@@ -507,16 +481,17 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
507 if (dbs_enable == 1) { 481 if (dbs_enable == 1) {
508 unsigned int latency; 482 unsigned int latency;
509 /* policy latency is in nS. Convert it to uS first */ 483 /* policy latency is in nS. Convert it to uS first */
484 latency = policy->cpuinfo.transition_latency / 1000;
485 if (latency == 0)
486 latency = 1;
510 487
511 latency = policy->cpuinfo.transition_latency; 488 def_sampling_rate = 10 * latency *
512 if (latency < 1000)
513 latency = 1000;
514
515 def_sampling_rate = (latency / 1000) *
516 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; 489 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
490
491 if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
492 def_sampling_rate = MIN_STAT_SAMPLING_RATE;
493
517 dbs_tuners_ins.sampling_rate = def_sampling_rate; 494 dbs_tuners_ins.sampling_rate = def_sampling_rate;
518 dbs_tuners_ins.ignore_nice = 0;
519 dbs_tuners_ins.freq_step = 5;
520 495
521 dbs_timer_init(); 496 dbs_timer_init();
522 } 497 }
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 69aa1db8336c..956d121cb161 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -84,6 +84,7 @@ struct dbs_tuners {
84static struct dbs_tuners dbs_tuners_ins = { 84static struct dbs_tuners dbs_tuners_ins = {
85 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 85 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
86 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 86 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
87 .ignore_nice = 0,
87}; 88};
88 89
89static inline unsigned int get_cpu_idle_time(unsigned int cpu) 90static inline unsigned int get_cpu_idle_time(unsigned int cpu)
@@ -350,6 +351,9 @@ static void dbs_check_cpu(int cpu)
350 freq_next = (freq_next * policy->cur) / 351 freq_next = (freq_next * policy->cur) /
351 (dbs_tuners_ins.up_threshold - 10); 352 (dbs_tuners_ins.up_threshold - 10);
352 353
354 if (freq_next < policy->min)
355 freq_next = policy->min;
356
353 if (freq_next <= ((policy->cur * 95) / 100)) 357 if (freq_next <= ((policy->cur * 95) / 100))
354 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); 358 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
355} 359}
@@ -395,8 +399,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
395 return -EINVAL; 399 return -EINVAL;
396 400
397 if (policy->cpuinfo.transition_latency > 401 if (policy->cpuinfo.transition_latency >
398 (TRANSITION_LATENCY_LIMIT * 1000)) 402 (TRANSITION_LATENCY_LIMIT * 1000)) {
403 printk(KERN_WARNING "ondemand governor failed to load "
404 "due to too long transition latency\n");
399 return -EINVAL; 405 return -EINVAL;
406 }
400 if (this_dbs_info->enable) /* Already enabled */ 407 if (this_dbs_info->enable) /* Already enabled */
401 break; 408 break;
402 409
@@ -431,8 +438,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
431 def_sampling_rate = MIN_STAT_SAMPLING_RATE; 438 def_sampling_rate = MIN_STAT_SAMPLING_RATE;
432 439
433 dbs_tuners_ins.sampling_rate = def_sampling_rate; 440 dbs_tuners_ins.sampling_rate = def_sampling_rate;
434 dbs_tuners_ins.ignore_nice = 0;
435
436 dbs_timer_init(); 441 dbs_timer_init();
437 } 442 }
438 443
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 52f3eb45d2b9..b582d0cdc24f 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -64,35 +64,35 @@ config EDAC_AMD76X
64 64
65config EDAC_E7XXX 65config EDAC_E7XXX
66 tristate "Intel e7xxx (e7205, e7500, e7501, e7505)" 66 tristate "Intel e7xxx (e7205, e7500, e7501, e7505)"
67 depends on EDAC_MM_EDAC && PCI 67 depends on EDAC_MM_EDAC && PCI && X86_32
68 help 68 help
69 Support for error detection and correction on the Intel 69 Support for error detection and correction on the Intel
70 E7205, E7500, E7501 and E7505 server chipsets. 70 E7205, E7500, E7501 and E7505 server chipsets.
71 71
72config EDAC_E752X 72config EDAC_E752X
73 tristate "Intel e752x (e7520, e7525, e7320)" 73 tristate "Intel e752x (e7520, e7525, e7320)"
74 depends on EDAC_MM_EDAC && PCI 74 depends on EDAC_MM_EDAC && PCI && X86
75 help 75 help
76 Support for error detection and correction on the Intel 76 Support for error detection and correction on the Intel
77 E7520, E7525, E7320 server chipsets. 77 E7520, E7525, E7320 server chipsets.
78 78
79config EDAC_I82875P 79config EDAC_I82875P
80 tristate "Intel 82875p (D82875P, E7210)" 80 tristate "Intel 82875p (D82875P, E7210)"
81 depends on EDAC_MM_EDAC && PCI 81 depends on EDAC_MM_EDAC && PCI && X86_32
82 help 82 help
83 Support for error detection and correction on the Intel 83 Support for error detection and correction on the Intel
84 DP82785P and E7210 server chipsets. 84 DP82785P and E7210 server chipsets.
85 85
86config EDAC_I82860 86config EDAC_I82860
87 tristate "Intel 82860" 87 tristate "Intel 82860"
88 depends on EDAC_MM_EDAC && PCI 88 depends on EDAC_MM_EDAC && PCI && X86_32
89 help 89 help
90 Support for error detection and correction on the Intel 90 Support for error detection and correction on the Intel
91 82860 chipset. 91 82860 chipset.
92 92
93config EDAC_R82600 93config EDAC_R82600
94 tristate "Radisys 82600 embedded chipset" 94 tristate "Radisys 82600 embedded chipset"
95 depends on EDAC_MM_EDAC 95 depends on EDAC_MM_EDAC && PCI && X86_32
96 help 96 help
97 Support for error detection and correction on the Radisys 97 Support for error detection and correction on the Radisys
98 82600 embedded chipset. 98 82600 embedded chipset.
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 2fcc8120b53c..53423ad6d4a3 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -12,25 +12,26 @@
12 * 12 *
13 */ 13 */
14 14
15
16#include <linux/config.h> 15#include <linux/config.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/init.h> 17#include <linux/init.h>
19
20#include <linux/pci.h> 18#include <linux/pci.h>
21#include <linux/pci_ids.h> 19#include <linux/pci_ids.h>
22
23#include <linux/slab.h> 20#include <linux/slab.h>
24
25#include "edac_mc.h" 21#include "edac_mc.h"
26 22
23#define amd76x_printk(level, fmt, arg...) \
24 edac_printk(level, "amd76x", fmt, ##arg)
25
26#define amd76x_mc_printk(mci, level, fmt, arg...) \
27 edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
27 28
28#define AMD76X_NR_CSROWS 8 29#define AMD76X_NR_CSROWS 8
29#define AMD76X_NR_CHANS 1 30#define AMD76X_NR_CHANS 1
30#define AMD76X_NR_DIMMS 4 31#define AMD76X_NR_DIMMS 4
31 32
32
33/* AMD 76x register addresses - device 0 function 0 - PCI bridge */ 33/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
34
34#define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b) 35#define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b)
35 * 36 *
36 * 31:16 reserved 37 * 31:16 reserved
@@ -42,6 +43,7 @@
42 * 7:4 UE cs row 43 * 7:4 UE cs row
43 * 3:0 CE cs row 44 * 3:0 CE cs row
44 */ 45 */
46
45#define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b) 47#define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b)
46 * 48 *
47 * 31:26 clock disable 5 - 0 49 * 31:26 clock disable 5 - 0
@@ -56,6 +58,7 @@
56 * 15:8 reserved 58 * 15:8 reserved
57 * 7:0 x4 mode enable 7 - 0 59 * 7:0 x4 mode enable 7 - 0
58 */ 60 */
61
59#define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b) 62#define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b)
60 * 63 *
61 * 31:23 chip-select base 64 * 31:23 chip-select base
@@ -66,29 +69,28 @@
66 * 0 chip-select enable 69 * 0 chip-select enable
67 */ 70 */
68 71
69
70struct amd76x_error_info { 72struct amd76x_error_info {
71 u32 ecc_mode_status; 73 u32 ecc_mode_status;
72}; 74};
73 75
74
75enum amd76x_chips { 76enum amd76x_chips {
76 AMD761 = 0, 77 AMD761 = 0,
77 AMD762 78 AMD762
78}; 79};
79 80
80
81struct amd76x_dev_info { 81struct amd76x_dev_info {
82 const char *ctl_name; 82 const char *ctl_name;
83}; 83};
84 84
85
86static const struct amd76x_dev_info amd76x_devs[] = { 85static const struct amd76x_dev_info amd76x_devs[] = {
87 [AMD761] = {.ctl_name = "AMD761"}, 86 [AMD761] = {
88 [AMD762] = {.ctl_name = "AMD762"}, 87 .ctl_name = "AMD761"
88 },
89 [AMD762] = {
90 .ctl_name = "AMD762"
91 },
89}; 92};
90 93
91
92/** 94/**
93 * amd76x_get_error_info - fetch error information 95 * amd76x_get_error_info - fetch error information
94 * @mci: Memory controller 96 * @mci: Memory controller
@@ -97,23 +99,21 @@ static const struct amd76x_dev_info amd76x_devs[] = {
97 * Fetch and store the AMD76x ECC status. Clear pending status 99 * Fetch and store the AMD76x ECC status. Clear pending status
98 * on the chip so that further errors will be reported 100 * on the chip so that further errors will be reported
99 */ 101 */
100 102static void amd76x_get_error_info(struct mem_ctl_info *mci,
101static void amd76x_get_error_info (struct mem_ctl_info *mci, 103 struct amd76x_error_info *info)
102 struct amd76x_error_info *info)
103{ 104{
104 pci_read_config_dword(mci->pdev, AMD76X_ECC_MODE_STATUS, 105 pci_read_config_dword(mci->pdev, AMD76X_ECC_MODE_STATUS,
105 &info->ecc_mode_status); 106 &info->ecc_mode_status);
106 107
107 if (info->ecc_mode_status & BIT(8)) 108 if (info->ecc_mode_status & BIT(8))
108 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, 109 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
109 (u32) BIT(8), (u32) BIT(8)); 110 (u32) BIT(8), (u32) BIT(8));
110 111
111 if (info->ecc_mode_status & BIT(9)) 112 if (info->ecc_mode_status & BIT(9))
112 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, 113 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
113 (u32) BIT(9), (u32) BIT(9)); 114 (u32) BIT(9), (u32) BIT(9));
114} 115}
115 116
116
117/** 117/**
118 * amd76x_process_error_info - Error check 118 * amd76x_process_error_info - Error check
119 * @mci: Memory controller 119 * @mci: Memory controller
@@ -124,8 +124,7 @@ static void amd76x_get_error_info (struct mem_ctl_info *mci,
124 * A return of 1 indicates an error. Also if handle_errors is true 124 * A return of 1 indicates an error. Also if handle_errors is true
125 * then attempt to handle and clean up after the error 125 * then attempt to handle and clean up after the error
126 */ 126 */
127 127static int amd76x_process_error_info(struct mem_ctl_info *mci,
128static int amd76x_process_error_info (struct mem_ctl_info *mci,
129 struct amd76x_error_info *info, int handle_errors) 128 struct amd76x_error_info *info, int handle_errors)
130{ 129{
131 int error_found; 130 int error_found;
@@ -141,9 +140,8 @@ static int amd76x_process_error_info (struct mem_ctl_info *mci,
141 140
142 if (handle_errors) { 141 if (handle_errors) {
143 row = (info->ecc_mode_status >> 4) & 0xf; 142 row = (info->ecc_mode_status >> 4) & 0xf;
144 edac_mc_handle_ue(mci, 143 edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0,
145 mci->csrows[row].first_page, 0, row, 144 row, mci->ctl_name);
146 mci->ctl_name);
147 } 145 }
148 } 146 }
149 147
@@ -155,11 +153,11 @@ static int amd76x_process_error_info (struct mem_ctl_info *mci,
155 153
156 if (handle_errors) { 154 if (handle_errors) {
157 row = info->ecc_mode_status & 0xf; 155 row = info->ecc_mode_status & 0xf;
158 edac_mc_handle_ce(mci, 156 edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0,
159 mci->csrows[row].first_page, 0, 0, row, 0, 157 0, row, 0, mci->ctl_name);
160 mci->ctl_name);
161 } 158 }
162 } 159 }
160
163 return error_found; 161 return error_found;
164} 162}
165 163
@@ -170,16 +168,14 @@ static int amd76x_process_error_info (struct mem_ctl_info *mci,
170 * Called by the poll handlers this function reads the status 168 * Called by the poll handlers this function reads the status
171 * from the controller and checks for errors. 169 * from the controller and checks for errors.
172 */ 170 */
173
174static void amd76x_check(struct mem_ctl_info *mci) 171static void amd76x_check(struct mem_ctl_info *mci)
175{ 172{
176 struct amd76x_error_info info; 173 struct amd76x_error_info info;
177 debugf3("MC: " __FILE__ ": %s()\n", __func__); 174 debugf3("%s()\n", __func__);
178 amd76x_get_error_info(mci, &info); 175 amd76x_get_error_info(mci, &info);
179 amd76x_process_error_info(mci, &info, 1); 176 amd76x_process_error_info(mci, &info, 1);
180} 177}
181 178
182
183/** 179/**
184 * amd76x_probe1 - Perform set up for detected device 180 * amd76x_probe1 - Perform set up for detected device
185 * @pdev; PCI device detected 181 * @pdev; PCI device detected
@@ -189,7 +185,6 @@ static void amd76x_check(struct mem_ctl_info *mci)
189 * controller status reporting. We configure and set up the 185 * controller status reporting. We configure and set up the
190 * memory controller reporting and claim the device. 186 * memory controller reporting and claim the device.
191 */ 187 */
192
193static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) 188static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
194{ 189{
195 int rc = -ENODEV; 190 int rc = -ENODEV;
@@ -203,12 +198,11 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
203 }; 198 };
204 u32 ems; 199 u32 ems;
205 u32 ems_mode; 200 u32 ems_mode;
201 struct amd76x_error_info discard;
206 202
207 debugf0("MC: " __FILE__ ": %s()\n", __func__); 203 debugf0("%s()\n", __func__);
208
209 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); 204 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
210 ems_mode = (ems >> 10) & 0x3; 205 ems_mode = (ems >> 10) & 0x3;
211
212 mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS); 206 mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
213 207
214 if (mci == NULL) { 208 if (mci == NULL) {
@@ -216,16 +210,13 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
216 goto fail; 210 goto fail;
217 } 211 }
218 212
219 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 213 debugf0("%s(): mci = %p\n", __func__, mci);
220 214 mci->pdev = pdev;
221 mci->pdev = pci_dev_get(pdev);
222 mci->mtype_cap = MEM_FLAG_RDDR; 215 mci->mtype_cap = MEM_FLAG_RDDR;
223
224 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 216 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
225 mci->edac_cap = ems_mode ? 217 mci->edac_cap = ems_mode ?
226 (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE; 218 (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
227 219 mci->mod_name = EDAC_MOD_STR;
228 mci->mod_name = BS_MOD_STR;
229 mci->mod_ver = "$Revision: 1.4.2.5 $"; 220 mci->mod_ver = "$Revision: 1.4.2.5 $";
230 mci->ctl_name = amd76x_devs[dev_idx].ctl_name; 221 mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
231 mci->edac_check = amd76x_check; 222 mci->edac_check = amd76x_check;
@@ -240,18 +231,15 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
240 231
241 /* find the DRAM Chip Select Base address and mask */ 232 /* find the DRAM Chip Select Base address and mask */
242 pci_read_config_dword(mci->pdev, 233 pci_read_config_dword(mci->pdev,
243 AMD76X_MEM_BASE_ADDR + (index * 4), 234 AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
244 &mba);
245 235
246 if (!(mba & BIT(0))) 236 if (!(mba & BIT(0)))
247 continue; 237 continue;
248 238
249 mba_base = mba & 0xff800000UL; 239 mba_base = mba & 0xff800000UL;
250 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; 240 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
251
252 pci_read_config_dword(mci->pdev, AMD76X_DRAM_MODE_STATUS, 241 pci_read_config_dword(mci->pdev, AMD76X_DRAM_MODE_STATUS,
253 &dms); 242 &dms);
254
255 csrow->first_page = mba_base >> PAGE_SHIFT; 243 csrow->first_page = mba_base >> PAGE_SHIFT;
256 csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; 244 csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
257 csrow->last_page = csrow->first_page + csrow->nr_pages - 1; 245 csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
@@ -262,40 +250,33 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
262 csrow->edac_mode = ems_modes[ems_mode]; 250 csrow->edac_mode = ems_modes[ems_mode];
263 } 251 }
264 252
265 /* clear counters */ 253 amd76x_get_error_info(mci, &discard); /* clear counters */
266 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, (u32) (0x3 << 8),
267 (u32) (0x3 << 8));
268 254
269 if (edac_mc_add_mc(mci)) { 255 if (edac_mc_add_mc(mci)) {
270 debugf3("MC: " __FILE__ 256 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
271 ": %s(): failed edac_mc_add_mc()\n", __func__);
272 goto fail; 257 goto fail;
273 } 258 }
274 259
275 /* get this far and it's successful */ 260 /* get this far and it's successful */
276 debugf3("MC: " __FILE__ ": %s(): success\n", __func__); 261 debugf3("%s(): success\n", __func__);
277 return 0; 262 return 0;
278 263
279fail: 264fail:
280 if (mci) { 265 if (mci != NULL)
281 if(mci->pdev)
282 pci_dev_put(mci->pdev);
283 edac_mc_free(mci); 266 edac_mc_free(mci);
284 }
285 return rc; 267 return rc;
286} 268}
287 269
288/* returns count (>= 0), or negative on error */ 270/* returns count (>= 0), or negative on error */
289static int __devinit amd76x_init_one(struct pci_dev *pdev, 271static int __devinit amd76x_init_one(struct pci_dev *pdev,
290 const struct pci_device_id *ent) 272 const struct pci_device_id *ent)
291{ 273{
292 debugf0("MC: " __FILE__ ": %s()\n", __func__); 274 debugf0("%s()\n", __func__);
293 275
294 /* don't need to call pci_device_enable() */ 276 /* don't need to call pci_device_enable() */
295 return amd76x_probe1(pdev, ent->driver_data); 277 return amd76x_probe1(pdev, ent->driver_data);
296} 278}
297 279
298
299/** 280/**
300 * amd76x_remove_one - driver shutdown 281 * amd76x_remove_one - driver shutdown
301 * @pdev: PCI device being handed back 282 * @pdev: PCI device being handed back
@@ -304,35 +285,36 @@ static int __devinit amd76x_init_one(struct pci_dev *pdev,
304 * structure for the device then delete the mci and free the 285 * structure for the device then delete the mci and free the
305 * resources. 286 * resources.
306 */ 287 */
307
308static void __devexit amd76x_remove_one(struct pci_dev *pdev) 288static void __devexit amd76x_remove_one(struct pci_dev *pdev)
309{ 289{
310 struct mem_ctl_info *mci; 290 struct mem_ctl_info *mci;
311 291
312 debugf0(__FILE__ ": %s()\n", __func__); 292 debugf0("%s()\n", __func__);
313 293
314 if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL) 294 if ((mci = edac_mc_del_mc(pdev)) == NULL)
315 return; 295 return;
316 if (edac_mc_del_mc(mci)) 296
317 return;
318 pci_dev_put(mci->pdev);
319 edac_mc_free(mci); 297 edac_mc_free(mci);
320} 298}
321 299
322
323static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = { 300static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
324 {PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 301 {
325 AMD762}, 302 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
326 {PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 303 AMD762
327 AMD761}, 304 },
328 {0,} /* 0 terminated list. */ 305 {
306 PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
307 AMD761
308 },
309 {
310 0,
311 } /* 0 terminated list. */
329}; 312};
330 313
331MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl); 314MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
332 315
333
334static struct pci_driver amd76x_driver = { 316static struct pci_driver amd76x_driver = {
335 .name = BS_MOD_STR, 317 .name = EDAC_MOD_STR,
336 .probe = amd76x_init_one, 318 .probe = amd76x_init_one,
337 .remove = __devexit_p(amd76x_remove_one), 319 .remove = __devexit_p(amd76x_remove_one),
338 .id_table = amd76x_pci_tbl, 320 .id_table = amd76x_pci_tbl,
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index c454ded2b060..66572c5323ad 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -17,18 +17,19 @@
17 * 17 *
18 */ 18 */
19 19
20
21#include <linux/config.h> 20#include <linux/config.h>
22#include <linux/module.h> 21#include <linux/module.h>
23#include <linux/init.h> 22#include <linux/init.h>
24
25#include <linux/pci.h> 23#include <linux/pci.h>
26#include <linux/pci_ids.h> 24#include <linux/pci_ids.h>
27
28#include <linux/slab.h> 25#include <linux/slab.h>
29
30#include "edac_mc.h" 26#include "edac_mc.h"
31 27
28#define e752x_printk(level, fmt, arg...) \
29 edac_printk(level, "e752x", fmt, ##arg)
30
31#define e752x_mc_printk(mci, level, fmt, arg...) \
32 edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
32 33
33#ifndef PCI_DEVICE_ID_INTEL_7520_0 34#ifndef PCI_DEVICE_ID_INTEL_7520_0
34#define PCI_DEVICE_ID_INTEL_7520_0 0x3590 35#define PCI_DEVICE_ID_INTEL_7520_0 0x3590
@@ -56,7 +57,6 @@
56 57
57#define E752X_NR_CSROWS 8 /* number of csrows */ 58#define E752X_NR_CSROWS 8 /* number of csrows */
58 59
59
60/* E752X register addresses - device 0 function 0 */ 60/* E752X register addresses - device 0 function 0 */
61#define E752X_DRB 0x60 /* DRAM row boundary register (8b) */ 61#define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
62#define E752X_DRA 0x70 /* DRAM row attribute register (8b) */ 62#define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
@@ -156,7 +156,6 @@ enum e752x_chips {
156 E7320 = 2 156 E7320 = 2
157}; 157};
158 158
159
160struct e752x_pvt { 159struct e752x_pvt {
161 struct pci_dev *bridge_ck; 160 struct pci_dev *bridge_ck;
162 struct pci_dev *dev_d0f0; 161 struct pci_dev *dev_d0f0;
@@ -170,9 +169,9 @@ struct e752x_pvt {
170 const struct e752x_dev_info *dev_info; 169 const struct e752x_dev_info *dev_info;
171}; 170};
172 171
173
174struct e752x_dev_info { 172struct e752x_dev_info {
175 u16 err_dev; 173 u16 err_dev;
174 u16 ctl_dev;
176 const char *ctl_name; 175 const char *ctl_name;
177}; 176};
178 177
@@ -198,38 +197,47 @@ struct e752x_error_info {
198 197
199static const struct e752x_dev_info e752x_devs[] = { 198static const struct e752x_dev_info e752x_devs[] = {
200 [E7520] = { 199 [E7520] = {
201 .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR, 200 .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
202 .ctl_name = "E7520"}, 201 .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
202 .ctl_name = "E7520"
203 },
203 [E7525] = { 204 [E7525] = {
204 .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR, 205 .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
205 .ctl_name = "E7525"}, 206 .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
207 .ctl_name = "E7525"
208 },
206 [E7320] = { 209 [E7320] = {
207 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, 210 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
208 .ctl_name = "E7320"}, 211 .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
212 .ctl_name = "E7320"
213 },
209}; 214};
210 215
211
212static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, 216static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
213 unsigned long page) 217 unsigned long page)
214{ 218{
215 u32 remap; 219 u32 remap;
216 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; 220 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
217 221
218 debugf3("MC: " __FILE__ ": %s()\n", __func__); 222 debugf3("%s()\n", __func__);
219 223
220 if (page < pvt->tolm) 224 if (page < pvt->tolm)
221 return page; 225 return page;
226
222 if ((page >= 0x100000) && (page < pvt->remapbase)) 227 if ((page >= 0x100000) && (page < pvt->remapbase))
223 return page; 228 return page;
229
224 remap = (page - pvt->tolm) + pvt->remapbase; 230 remap = (page - pvt->tolm) + pvt->remapbase;
231
225 if (remap < pvt->remaplimit) 232 if (remap < pvt->remaplimit)
226 return remap; 233 return remap;
227 printk(KERN_ERR "Invalid page %lx - out of range\n", page); 234
235 e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
228 return pvt->tolm - 1; 236 return pvt->tolm - 1;
229} 237}
230 238
231static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, 239static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
232 u32 sec1_add, u16 sec1_syndrome) 240 u32 sec1_add, u16 sec1_syndrome)
233{ 241{
234 u32 page; 242 u32 page;
235 int row; 243 int row;
@@ -237,7 +245,7 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
237 int i; 245 int i;
238 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; 246 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
239 247
240 debugf3("MC: " __FILE__ ": %s()\n", __func__); 248 debugf3("%s()\n", __func__);
241 249
242 /* convert the addr to 4k page */ 250 /* convert the addr to 4k page */
243 page = sec1_add >> (PAGE_SHIFT - 4); 251 page = sec1_add >> (PAGE_SHIFT - 4);
@@ -246,36 +254,37 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
246 if (pvt->mc_symmetric) { 254 if (pvt->mc_symmetric) {
247 /* chip select are bits 14 & 13 */ 255 /* chip select are bits 14 & 13 */
248 row = ((page >> 1) & 3); 256 row = ((page >> 1) & 3);
249 printk(KERN_WARNING 257 e752x_printk(KERN_WARNING,
250 "Test row %d Table %d %d %d %d %d %d %d %d\n", 258 "Test row %d Table %d %d %d %d %d %d %d %d\n", row,
251 row, pvt->map[0], pvt->map[1], pvt->map[2], 259 pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
252 pvt->map[3], pvt->map[4], pvt->map[5], 260 pvt->map[4], pvt->map[5], pvt->map[6], pvt->map[7]);
253 pvt->map[6], pvt->map[7]);
254 261
255 /* test for channel remapping */ 262 /* test for channel remapping */
256 for (i = 0; i < 8; i++) { 263 for (i = 0; i < 8; i++) {
257 if (pvt->map[i] == row) 264 if (pvt->map[i] == row)
258 break; 265 break;
259 } 266 }
260 printk(KERN_WARNING "Test computed row %d\n", i); 267
268 e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
269
261 if (i < 8) 270 if (i < 8)
262 row = i; 271 row = i;
263 else 272 else
264 printk(KERN_WARNING 273 e752x_mc_printk(mci, KERN_WARNING,
265 "MC%d: row %d not found in remap table\n", 274 "row %d not found in remap table\n", row);
266 mci->mc_idx, row);
267 } else 275 } else
268 row = edac_mc_find_csrow_by_page(mci, page); 276 row = edac_mc_find_csrow_by_page(mci, page);
277
269 /* 0 = channel A, 1 = channel B */ 278 /* 0 = channel A, 1 = channel B */
270 channel = !(error_one & 1); 279 channel = !(error_one & 1);
271 280
272 if (!pvt->map_type) 281 if (!pvt->map_type)
273 row = 7 - row; 282 row = 7 - row;
283
274 edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel, 284 edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel,
275 "e752x CE"); 285 "e752x CE");
276} 286}
277 287
278
279static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, 288static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
280 u32 sec1_add, u16 sec1_syndrome, int *error_found, 289 u32 sec1_add, u16 sec1_syndrome, int *error_found,
281 int handle_error) 290 int handle_error)
@@ -286,36 +295,42 @@ static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
286 do_process_ce(mci, error_one, sec1_add, sec1_syndrome); 295 do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
287} 296}
288 297
289static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, u32 ded_add, 298static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
290 u32 scrb_add) 299 u32 ded_add, u32 scrb_add)
291{ 300{
292 u32 error_2b, block_page; 301 u32 error_2b, block_page;
293 int row; 302 int row;
294 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; 303 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
295 304
296 debugf3("MC: " __FILE__ ": %s()\n", __func__); 305 debugf3("%s()\n", __func__);
297 306
298 if (error_one & 0x0202) { 307 if (error_one & 0x0202) {
299 error_2b = ded_add; 308 error_2b = ded_add;
309
300 /* convert to 4k address */ 310 /* convert to 4k address */
301 block_page = error_2b >> (PAGE_SHIFT - 4); 311 block_page = error_2b >> (PAGE_SHIFT - 4);
312
302 row = pvt->mc_symmetric ? 313 row = pvt->mc_symmetric ?
303 /* chip select are bits 14 & 13 */ 314 /* chip select are bits 14 & 13 */
304 ((block_page >> 1) & 3) : 315 ((block_page >> 1) & 3) :
305 edac_mc_find_csrow_by_page(mci, block_page); 316 edac_mc_find_csrow_by_page(mci, block_page);
317
306 edac_mc_handle_ue(mci, block_page, 0, row, 318 edac_mc_handle_ue(mci, block_page, 0, row,
307 "e752x UE from Read"); 319 "e752x UE from Read");
308 } 320 }
309 if (error_one & 0x0404) { 321 if (error_one & 0x0404) {
310 error_2b = scrb_add; 322 error_2b = scrb_add;
323
311 /* convert to 4k address */ 324 /* convert to 4k address */
312 block_page = error_2b >> (PAGE_SHIFT - 4); 325 block_page = error_2b >> (PAGE_SHIFT - 4);
326
313 row = pvt->mc_symmetric ? 327 row = pvt->mc_symmetric ?
314 /* chip select are bits 14 & 13 */ 328 /* chip select are bits 14 & 13 */
315 ((block_page >> 1) & 3) : 329 ((block_page >> 1) & 3) :
316 edac_mc_find_csrow_by_page(mci, block_page); 330 edac_mc_find_csrow_by_page(mci, block_page);
331
317 edac_mc_handle_ue(mci, block_page, 0, row, 332 edac_mc_handle_ue(mci, block_page, 0, row,
318 "e752x UE from Scruber"); 333 "e752x UE from Scruber");
319 } 334 }
320} 335}
321 336
@@ -336,7 +351,7 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
336 if (!handle_error) 351 if (!handle_error)
337 return; 352 return;
338 353
339 debugf3("MC: " __FILE__ ": %s()\n", __func__); 354 debugf3("%s()\n", __func__);
340 edac_mc_handle_ue_no_info(mci, "e752x UE log memory write"); 355 edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
341} 356}
342 357
@@ -348,13 +363,13 @@ static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
348 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; 363 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
349 364
350 error_1b = retry_add; 365 error_1b = retry_add;
351 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */ 366 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
352 row = pvt->mc_symmetric ? 367 row = pvt->mc_symmetric ?
353 ((page >> 1) & 3) : /* chip select are bits 14 & 13 */ 368 ((page >> 1) & 3) : /* chip select are bits 14 & 13 */
354 edac_mc_find_csrow_by_page(mci, page); 369 edac_mc_find_csrow_by_page(mci, page);
355 printk(KERN_WARNING 370 e752x_mc_printk(mci, KERN_WARNING,
356 "MC%d: CE page 0x%lx, row %d : Memory read retry\n", 371 "CE page 0x%lx, row %d : Memory read retry\n",
357 mci->mc_idx, (long unsigned int) page, row); 372 (long unsigned int) page, row);
358} 373}
359 374
360static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error, 375static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
@@ -372,8 +387,7 @@ static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
372 *error_found = 1; 387 *error_found = 1;
373 388
374 if (handle_error) 389 if (handle_error)
375 printk(KERN_WARNING "MC%d: Memory threshold CE\n", 390 e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
376 mci->mc_idx);
377} 391}
378 392
379static char *global_message[11] = { 393static char *global_message[11] = {
@@ -391,8 +405,8 @@ static void do_global_error(int fatal, u32 errors)
391 405
392 for (i = 0; i < 11; i++) { 406 for (i = 0; i < 11; i++) {
393 if (errors & (1 << i)) 407 if (errors & (1 << i))
394 printk(KERN_WARNING "%sError %s\n", 408 e752x_printk(KERN_WARNING, "%sError %s\n",
395 fatal_message[fatal], global_message[i]); 409 fatal_message[fatal], global_message[i]);
396 } 410 }
397} 411}
398 412
@@ -418,8 +432,8 @@ static void do_hub_error(int fatal, u8 errors)
418 432
419 for (i = 0; i < 7; i++) { 433 for (i = 0; i < 7; i++) {
420 if (errors & (1 << i)) 434 if (errors & (1 << i))
421 printk(KERN_WARNING "%sError %s\n", 435 e752x_printk(KERN_WARNING, "%sError %s\n",
422 fatal_message[fatal], hub_message[i]); 436 fatal_message[fatal], hub_message[i]);
423 } 437 }
424} 438}
425 439
@@ -445,8 +459,8 @@ static void do_membuf_error(u8 errors)
445 459
446 for (i = 0; i < 4; i++) { 460 for (i = 0; i < 4; i++) {
447 if (errors & (1 << i)) 461 if (errors & (1 << i))
448 printk(KERN_WARNING "Non-Fatal Error %s\n", 462 e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
449 membuf_message[i]); 463 membuf_message[i]);
450 } 464 }
451} 465}
452 466
@@ -458,8 +472,7 @@ static inline void membuf_error(u8 errors, int *error_found, int handle_error)
458 do_membuf_error(errors); 472 do_membuf_error(errors);
459} 473}
460 474
461#if 0 475static char *sysbus_message[10] = {
462char *sysbus_message[10] = {
463 "Addr or Request Parity", 476 "Addr or Request Parity",
464 "Data Strobe Glitch", 477 "Data Strobe Glitch",
465 "Addr Strobe Glitch", 478 "Addr Strobe Glitch",
@@ -470,7 +483,6 @@ char *sysbus_message[10] = {
470 "Memory Parity", 483 "Memory Parity",
471 "IO Subsystem Parity" 484 "IO Subsystem Parity"
472}; 485};
473#endif /* 0 */
474 486
475static void do_sysbus_error(int fatal, u32 errors) 487static void do_sysbus_error(int fatal, u32 errors)
476{ 488{
@@ -478,8 +490,8 @@ static void do_sysbus_error(int fatal, u32 errors)
478 490
479 for (i = 0; i < 10; i++) { 491 for (i = 0; i < 10; i++) {
480 if (errors & (1 << i)) 492 if (errors & (1 << i))
481 printk(KERN_WARNING "%sError System Bus %s\n", 493 e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
482 fatal_message[fatal], global_message[i]); 494 fatal_message[fatal], sysbus_message[i]);
483 } 495 }
484} 496}
485 497
@@ -492,33 +504,42 @@ static inline void sysbus_error(int fatal, u32 errors, int *error_found,
492 do_sysbus_error(fatal, errors); 504 do_sysbus_error(fatal, errors);
493} 505}
494 506
495static void e752x_check_hub_interface (struct e752x_error_info *info, 507static void e752x_check_hub_interface(struct e752x_error_info *info,
496 int *error_found, int handle_error) 508 int *error_found, int handle_error)
497{ 509{
498 u8 stat8; 510 u8 stat8;
499 511
500 //pci_read_config_byte(dev,E752X_HI_FERR,&stat8); 512 //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
513
501 stat8 = info->hi_ferr; 514 stat8 = info->hi_ferr;
515
502 if(stat8 & 0x7f) { /* Error, so process */ 516 if(stat8 & 0x7f) { /* Error, so process */
503 stat8 &= 0x7f; 517 stat8 &= 0x7f;
518
504 if(stat8 & 0x2b) 519 if(stat8 & 0x2b)
505 hub_error(1, stat8 & 0x2b, error_found, handle_error); 520 hub_error(1, stat8 & 0x2b, error_found, handle_error);
521
506 if(stat8 & 0x54) 522 if(stat8 & 0x54)
507 hub_error(0, stat8 & 0x54, error_found, handle_error); 523 hub_error(0, stat8 & 0x54, error_found, handle_error);
508 } 524 }
525
509 //pci_read_config_byte(dev,E752X_HI_NERR,&stat8); 526 //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
527
510 stat8 = info->hi_nerr; 528 stat8 = info->hi_nerr;
529
511 if(stat8 & 0x7f) { /* Error, so process */ 530 if(stat8 & 0x7f) { /* Error, so process */
512 stat8 &= 0x7f; 531 stat8 &= 0x7f;
532
513 if (stat8 & 0x2b) 533 if (stat8 & 0x2b)
514 hub_error(1, stat8 & 0x2b, error_found, handle_error); 534 hub_error(1, stat8 & 0x2b, error_found, handle_error);
535
515 if(stat8 & 0x54) 536 if(stat8 & 0x54)
516 hub_error(0, stat8 & 0x54, error_found, handle_error); 537 hub_error(0, stat8 & 0x54, error_found, handle_error);
517 } 538 }
518} 539}
519 540
520static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found, 541static void e752x_check_sysbus(struct e752x_error_info *info,
521 int handle_error) 542 int *error_found, int handle_error)
522{ 543{
523 u32 stat32, error32; 544 u32 stat32, error32;
524 545
@@ -530,27 +551,34 @@ static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found,
530 551
531 error32 = (stat32 >> 16) & 0x3ff; 552 error32 = (stat32 >> 16) & 0x3ff;
532 stat32 = stat32 & 0x3ff; 553 stat32 = stat32 & 0x3ff;
554
533 if(stat32 & 0x083) 555 if(stat32 & 0x083)
534 sysbus_error(1, stat32 & 0x083, error_found, handle_error); 556 sysbus_error(1, stat32 & 0x083, error_found, handle_error);
557
535 if(stat32 & 0x37c) 558 if(stat32 & 0x37c)
536 sysbus_error(0, stat32 & 0x37c, error_found, handle_error); 559 sysbus_error(0, stat32 & 0x37c, error_found, handle_error);
560
537 if(error32 & 0x083) 561 if(error32 & 0x083)
538 sysbus_error(1, error32 & 0x083, error_found, handle_error); 562 sysbus_error(1, error32 & 0x083, error_found, handle_error);
563
539 if(error32 & 0x37c) 564 if(error32 & 0x37c)
540 sysbus_error(0, error32 & 0x37c, error_found, handle_error); 565 sysbus_error(0, error32 & 0x37c, error_found, handle_error);
541} 566}
542 567
543static void e752x_check_membuf (struct e752x_error_info *info, int *error_found, 568static void e752x_check_membuf (struct e752x_error_info *info,
544 int handle_error) 569 int *error_found, int handle_error)
545{ 570{
546 u8 stat8; 571 u8 stat8;
547 572
548 stat8 = info->buf_ferr; 573 stat8 = info->buf_ferr;
574
549 if (stat8 & 0x0f) { /* Error, so process */ 575 if (stat8 & 0x0f) { /* Error, so process */
550 stat8 &= 0x0f; 576 stat8 &= 0x0f;
551 membuf_error(stat8, error_found, handle_error); 577 membuf_error(stat8, error_found, handle_error);
552 } 578 }
579
553 stat8 = info->buf_nerr; 580 stat8 = info->buf_nerr;
581
554 if (stat8 & 0x0f) { /* Error, so process */ 582 if (stat8 & 0x0f) { /* Error, so process */
555 stat8 &= 0x0f; 583 stat8 &= 0x0f;
556 membuf_error(stat8, error_found, handle_error); 584 membuf_error(stat8, error_found, handle_error);
@@ -558,7 +586,8 @@ static void e752x_check_membuf (struct e752x_error_info *info, int *error_found,
558} 586}
559 587
560static void e752x_check_dram (struct mem_ctl_info *mci, 588static void e752x_check_dram (struct mem_ctl_info *mci,
561 struct e752x_error_info *info, int *error_found, int handle_error) 589 struct e752x_error_info *info, int *error_found,
590 int handle_error)
562{ 591{
563 u16 error_one, error_next; 592 u16 error_one, error_next;
564 593
@@ -608,7 +637,7 @@ static void e752x_check_dram (struct mem_ctl_info *mci,
608} 637}
609 638
610static void e752x_get_error_info (struct mem_ctl_info *mci, 639static void e752x_get_error_info (struct mem_ctl_info *mci,
611 struct e752x_error_info *info) 640 struct e752x_error_info *info)
612{ 641{
613 struct pci_dev *dev; 642 struct pci_dev *dev;
614 struct e752x_pvt *pvt; 643 struct e752x_pvt *pvt;
@@ -616,7 +645,6 @@ static void e752x_get_error_info (struct mem_ctl_info *mci,
616 memset(info, 0, sizeof(*info)); 645 memset(info, 0, sizeof(*info));
617 pvt = (struct e752x_pvt *) mci->pvt_info; 646 pvt = (struct e752x_pvt *) mci->pvt_info;
618 dev = pvt->dev_d0f1; 647 dev = pvt->dev_d0f1;
619
620 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global); 648 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
621 649
622 if (info->ferr_global) { 650 if (info->ferr_global) {
@@ -727,7 +755,8 @@ static int e752x_process_error_info (struct mem_ctl_info *mci,
727static void e752x_check(struct mem_ctl_info *mci) 755static void e752x_check(struct mem_ctl_info *mci)
728{ 756{
729 struct e752x_error_info info; 757 struct e752x_error_info info;
730 debugf3("MC: " __FILE__ ": %s()\n", __func__); 758
759 debugf3("%s()\n", __func__);
731 e752x_get_error_info(mci, &info); 760 e752x_get_error_info(mci, &info);
732 e752x_process_error_info(mci, &info, 1); 761 e752x_process_error_info(mci, &info, 1);
733} 762}
@@ -736,23 +765,21 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
736{ 765{
737 int rc = -ENODEV; 766 int rc = -ENODEV;
738 int index; 767 int index;
739 u16 pci_data, stat; 768 u16 pci_data;
740 u32 stat32;
741 u16 stat16;
742 u8 stat8; 769 u8 stat8;
743 struct mem_ctl_info *mci = NULL; 770 struct mem_ctl_info *mci = NULL;
744 struct e752x_pvt *pvt = NULL; 771 struct e752x_pvt *pvt = NULL;
745 u16 ddrcsr; 772 u16 ddrcsr;
746 u32 drc; 773 u32 drc;
747 int drc_chan; /* Number of channels 0=1chan,1=2chan */ 774 int drc_chan; /* Number of channels 0=1chan,1=2chan */
748 int drc_drbg; /* DRB granularity 0=64mb,1=128mb */ 775 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
749 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ 776 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
750 u32 dra; 777 u32 dra;
751 unsigned long last_cumul_size; 778 unsigned long last_cumul_size;
752 struct pci_dev *pres_dev;
753 struct pci_dev *dev = NULL; 779 struct pci_dev *dev = NULL;
780 struct e752x_error_info discard;
754 781
755 debugf0("MC: " __FILE__ ": %s(): mci\n", __func__); 782 debugf0("%s(): mci\n", __func__);
756 debugf0("Starting Probe1\n"); 783 debugf0("Starting Probe1\n");
757 784
758 /* enable device 0 function 1 */ 785 /* enable device 0 function 1 */
@@ -776,34 +803,35 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
776 goto fail; 803 goto fail;
777 } 804 }
778 805
779 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); 806 debugf3("%s(): init mci\n", __func__);
780
781 mci->mtype_cap = MEM_FLAG_RDDR; 807 mci->mtype_cap = MEM_FLAG_RDDR;
782 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | 808 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
783 EDAC_FLAG_S4ECD4ED; 809 EDAC_FLAG_S4ECD4ED;
784 /* FIXME - what if different memory types are in different csrows? */ 810 /* FIXME - what if different memory types are in different csrows? */
785 mci->mod_name = BS_MOD_STR; 811 mci->mod_name = EDAC_MOD_STR;
786 mci->mod_ver = "$Revision: 1.5.2.11 $"; 812 mci->mod_ver = "$Revision: 1.5.2.11 $";
787 mci->pdev = pdev; 813 mci->pdev = pdev;
788 814
789 debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__); 815 debugf3("%s(): init pvt\n", __func__);
790 pvt = (struct e752x_pvt *) mci->pvt_info; 816 pvt = (struct e752x_pvt *) mci->pvt_info;
791 pvt->dev_info = &e752x_devs[dev_idx]; 817 pvt->dev_info = &e752x_devs[dev_idx];
792 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, 818 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
793 pvt->dev_info->err_dev, 819 pvt->dev_info->err_dev,
794 pvt->bridge_ck); 820 pvt->bridge_ck);
821
795 if (pvt->bridge_ck == NULL) 822 if (pvt->bridge_ck == NULL)
796 pvt->bridge_ck = pci_scan_single_device(pdev->bus, 823 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
797 PCI_DEVFN(0, 1)); 824 PCI_DEVFN(0, 1));
825
798 if (pvt->bridge_ck == NULL) { 826 if (pvt->bridge_ck == NULL) {
799 printk(KERN_ERR "MC: error reporting device not found:" 827 e752x_printk(KERN_ERR, "error reporting device not found:"
800 "vendor %x device 0x%x (broken BIOS?)\n", 828 "vendor %x device 0x%x (broken BIOS?)\n",
801 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); 829 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
802 goto fail; 830 goto fail;
803 } 831 }
804 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
805 832
806 debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__); 833 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
834 debugf3("%s(): more mci init\n", __func__);
807 mci->ctl_name = pvt->dev_info->ctl_name; 835 mci->ctl_name = pvt->dev_info->ctl_name;
808 mci->edac_check = e752x_check; 836 mci->edac_check = e752x_check;
809 mci->ctl_page_to_phys = ctl_page_to_phys; 837 mci->ctl_page_to_phys = ctl_page_to_phys;
@@ -820,6 +848,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
820 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { 848 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
821 u8 value; 849 u8 value;
822 u32 cumul_size; 850 u32 cumul_size;
851
823 /* mem_dev 0=x8, 1=x4 */ 852 /* mem_dev 0=x8, 1=x4 */
824 int mem_dev = (dra >> (index * 4 + 2)) & 0x3; 853 int mem_dev = (dra >> (index * 4 + 2)) & 0x3;
825 struct csrow_info *csrow = &mci->csrows[index]; 854 struct csrow_info *csrow = &mci->csrows[index];
@@ -828,17 +857,18 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
828 pci_read_config_byte(mci->pdev, E752X_DRB + index, &value); 857 pci_read_config_byte(mci->pdev, E752X_DRB + index, &value);
829 /* convert a 128 or 64 MiB DRB to a page size. */ 858 /* convert a 128 or 64 MiB DRB to a page size. */
830 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); 859 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
831 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", 860 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
832 __func__, index, cumul_size); 861 cumul_size);
862
833 if (cumul_size == last_cumul_size) 863 if (cumul_size == last_cumul_size)
834 continue; /* not populated */ 864 continue; /* not populated */
835 865
836 csrow->first_page = last_cumul_size; 866 csrow->first_page = last_cumul_size;
837 csrow->last_page = cumul_size - 1; 867 csrow->last_page = cumul_size - 1;
838 csrow->nr_pages = cumul_size - last_cumul_size; 868 csrow->nr_pages = cumul_size - last_cumul_size;
839 last_cumul_size = cumul_size; 869 last_cumul_size = cumul_size;
840 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 870 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
841 csrow->mtype = MEM_RDDR; /* only one type supported */ 871 csrow->mtype = MEM_RDDR; /* only one type supported */
842 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; 872 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
843 873
844 /* 874 /*
@@ -862,29 +892,32 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
862 u8 value; 892 u8 value;
863 u8 last = 0; 893 u8 last = 0;
864 u8 row = 0; 894 u8 row = 0;
865 for (index = 0; index < 8; index += 2) {
866 895
896 for (index = 0; index < 8; index += 2) {
867 pci_read_config_byte(mci->pdev, E752X_DRB + index, 897 pci_read_config_byte(mci->pdev, E752X_DRB + index,
868 &value); 898 &value);
899
869 /* test if there is a dimm in this slot */ 900 /* test if there is a dimm in this slot */
870 if (value == last) { 901 if (value == last) {
871 /* no dimm in the slot, so flag it as empty */ 902 /* no dimm in the slot, so flag it as empty */
872 pvt->map[index] = 0xff; 903 pvt->map[index] = 0xff;
873 pvt->map[index + 1] = 0xff; 904 pvt->map[index + 1] = 0xff;
874 } else { /* there is a dimm in the slot */ 905 } else { /* there is a dimm in the slot */
875 pvt->map[index] = row; 906 pvt->map[index] = row;
876 row++; 907 row++;
877 last = value; 908 last = value;
878 /* test the next value to see if the dimm is 909 /* test the next value to see if the dimm is
879 double sided */ 910 double sided */
880 pci_read_config_byte(mci->pdev, 911 pci_read_config_byte(mci->pdev,
881 E752X_DRB + index + 1, 912 E752X_DRB + index + 1,
882 &value); 913 &value);
883 pvt->map[index + 1] = (value == last) ? 914 pvt->map[index + 1] = (value == last) ?
884 0xff : /* the dimm is single sided, 915 0xff : /* the dimm is single sided,
885 so flag as empty */ 916 * so flag as empty
886 row; /* this is a double sided dimm 917 */
887 to save the next row # */ 918 row; /* this is a double sided dimm
919 * to save the next row #
920 */
888 row++; 921 row++;
889 last = value; 922 last = value;
890 } 923 }
@@ -896,9 +929,8 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
896 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); 929 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
897 930
898 mci->edac_cap |= EDAC_FLAG_NONE; 931 mci->edac_cap |= EDAC_FLAG_NONE;
932 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
899 933
900 debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
901 __func__);
902 /* load the top of low memory, remap base, and remap limit vars */ 934 /* load the top of low memory, remap base, and remap limit vars */
903 pci_read_config_word(mci->pdev, E752X_TOLM, &pci_data); 935 pci_read_config_word(mci->pdev, E752X_TOLM, &pci_data);
904 pvt->tolm = ((u32) pci_data) << 4; 936 pvt->tolm = ((u32) pci_data) << 4;
@@ -906,43 +938,18 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
906 pvt->remapbase = ((u32) pci_data) << 14; 938 pvt->remapbase = ((u32) pci_data) << 14;
907 pci_read_config_word(mci->pdev, E752X_REMAPLIMIT, &pci_data); 939 pci_read_config_word(mci->pdev, E752X_REMAPLIMIT, &pci_data);
908 pvt->remaplimit = ((u32) pci_data) << 14; 940 pvt->remaplimit = ((u32) pci_data) << 14;
909 printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, 941 e752x_printk(KERN_INFO,
910 pvt->remapbase, pvt->remaplimit); 942 "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
943 pvt->remapbase, pvt->remaplimit);
911 944
912 if (edac_mc_add_mc(mci)) { 945 if (edac_mc_add_mc(mci)) {
913 debugf3("MC: " __FILE__ 946 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
914 ": %s(): failed edac_mc_add_mc()\n",
915 __func__);
916 goto fail; 947 goto fail;
917 } 948 }
918 949
919 /* Walk through the PCI table and clear errors */ 950 dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev,
920 switch (dev_idx) { 951 NULL);
921 case E7520:
922 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
923 PCI_DEVICE_ID_INTEL_7520_0, NULL);
924 break;
925 case E7525:
926 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
927 PCI_DEVICE_ID_INTEL_7525_0, NULL);
928 break;
929 case E7320:
930 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
931 PCI_DEVICE_ID_INTEL_7320_0, NULL);
932 break;
933 }
934
935
936 pvt->dev_d0f0 = dev; 952 pvt->dev_d0f0 = dev;
937 for (pres_dev = dev;
938 ((struct pci_dev *) pres_dev->global_list.next != dev);
939 pres_dev = (struct pci_dev *) pres_dev->global_list.next) {
940 pci_read_config_dword(pres_dev, PCI_COMMAND, &stat32);
941 stat = (u16) (stat32 >> 16);
942 /* clear any error bits */
943 if (stat32 & ((1 << 6) + (1 << 8)))
944 pci_write_config_word(pres_dev, PCI_STATUS, stat);
945 }
946 /* find the error reporting device and clear errors */ 953 /* find the error reporting device and clear errors */
947 dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck); 954 dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
948 /* Turn off error disable & SMI in case the BIOS turned it on */ 955 /* Turn off error disable & SMI in case the BIOS turned it on */
@@ -954,67 +961,51 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
954 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); 961 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
955 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00); 962 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
956 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00); 963 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
957 /* clear other MCH errors */ 964
958 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &stat32); 965 e752x_get_error_info(mci, &discard); /* clear other MCH errors */
959 pci_write_config_dword(dev, E752X_FERR_GLOBAL, stat32);
960 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &stat32);
961 pci_write_config_dword(dev, E752X_NERR_GLOBAL, stat32);
962 pci_read_config_byte(dev, E752X_HI_FERR, &stat8);
963 pci_write_config_byte(dev, E752X_HI_FERR, stat8);
964 pci_read_config_byte(dev, E752X_HI_NERR, &stat8);
965 pci_write_config_byte(dev, E752X_HI_NERR, stat8);
966 pci_read_config_dword(dev, E752X_SYSBUS_FERR, &stat32);
967 pci_write_config_dword(dev, E752X_SYSBUS_FERR, stat32);
968 pci_read_config_byte(dev, E752X_BUF_FERR, &stat8);
969 pci_write_config_byte(dev, E752X_BUF_FERR, stat8);
970 pci_read_config_byte(dev, E752X_BUF_NERR, &stat8);
971 pci_write_config_byte(dev, E752X_BUF_NERR, stat8);
972 pci_read_config_word(dev, E752X_DRAM_FERR, &stat16);
973 pci_write_config_word(dev, E752X_DRAM_FERR, stat16);
974 pci_read_config_word(dev, E752X_DRAM_NERR, &stat16);
975 pci_write_config_word(dev, E752X_DRAM_NERR, stat16);
976 966
977 /* get this far and it's successful */ 967 /* get this far and it's successful */
978 debugf3("MC: " __FILE__ ": %s(): success\n", __func__); 968 debugf3("%s(): success\n", __func__);
979 return 0; 969 return 0;
980 970
981fail: 971fail:
982 if (mci) { 972 if (mci) {
983 if (pvt->dev_d0f0) 973 if (pvt->dev_d0f0)
984 pci_dev_put(pvt->dev_d0f0); 974 pci_dev_put(pvt->dev_d0f0);
975
985 if (pvt->dev_d0f1) 976 if (pvt->dev_d0f1)
986 pci_dev_put(pvt->dev_d0f1); 977 pci_dev_put(pvt->dev_d0f1);
978
987 if (pvt->bridge_ck) 979 if (pvt->bridge_ck)
988 pci_dev_put(pvt->bridge_ck); 980 pci_dev_put(pvt->bridge_ck);
981
989 edac_mc_free(mci); 982 edac_mc_free(mci);
990 } 983 }
984
991 return rc; 985 return rc;
992} 986}
993 987
994/* returns count (>= 0), or negative on error */ 988/* returns count (>= 0), or negative on error */
995static int __devinit e752x_init_one(struct pci_dev *pdev, 989static int __devinit e752x_init_one(struct pci_dev *pdev,
996 const struct pci_device_id *ent) 990 const struct pci_device_id *ent)
997{ 991{
998 debugf0("MC: " __FILE__ ": %s()\n", __func__); 992 debugf0("%s()\n", __func__);
999 993
1000 /* wake up and enable device */ 994 /* wake up and enable device */
1001 if(pci_enable_device(pdev) < 0) 995 if(pci_enable_device(pdev) < 0)
1002 return -EIO; 996 return -EIO;
997
1003 return e752x_probe1(pdev, ent->driver_data); 998 return e752x_probe1(pdev, ent->driver_data);
1004} 999}
1005 1000
1006
1007static void __devexit e752x_remove_one(struct pci_dev *pdev) 1001static void __devexit e752x_remove_one(struct pci_dev *pdev)
1008{ 1002{
1009 struct mem_ctl_info *mci; 1003 struct mem_ctl_info *mci;
1010 struct e752x_pvt *pvt; 1004 struct e752x_pvt *pvt;
1011 1005
1012 debugf0(__FILE__ ": %s()\n", __func__); 1006 debugf0("%s()\n", __func__);
1013
1014 if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
1015 return;
1016 1007
1017 if (edac_mc_del_mc(mci)) 1008 if ((mci = edac_mc_del_mc(pdev)) == NULL)
1018 return; 1009 return;
1019 1010
1020 pvt = (struct e752x_pvt *) mci->pvt_info; 1011 pvt = (struct e752x_pvt *) mci->pvt_info;
@@ -1024,45 +1015,48 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
1024 edac_mc_free(mci); 1015 edac_mc_free(mci);
1025} 1016}
1026 1017
1027
1028static const struct pci_device_id e752x_pci_tbl[] __devinitdata = { 1018static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
1029 {PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1019 {
1030 E7520}, 1020 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1031 {PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1021 E7520
1032 E7525}, 1022 },
1033 {PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1023 {
1034 E7320}, 1024 PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1035 {0,} /* 0 terminated list. */ 1025 E7525
1026 },
1027 {
1028 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1029 E7320
1030 },
1031 {
1032 0,
1033 } /* 0 terminated list. */
1036}; 1034};
1037 1035
1038MODULE_DEVICE_TABLE(pci, e752x_pci_tbl); 1036MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
1039 1037
1040
1041static struct pci_driver e752x_driver = { 1038static struct pci_driver e752x_driver = {
1042 .name = BS_MOD_STR, 1039 .name = EDAC_MOD_STR,
1043 .probe = e752x_init_one, 1040 .probe = e752x_init_one,
1044 .remove = __devexit_p(e752x_remove_one), 1041 .remove = __devexit_p(e752x_remove_one),
1045 .id_table = e752x_pci_tbl, 1042 .id_table = e752x_pci_tbl,
1046}; 1043};
1047 1044
1048
1049static int __init e752x_init(void) 1045static int __init e752x_init(void)
1050{ 1046{
1051 int pci_rc; 1047 int pci_rc;
1052 1048
1053 debugf3("MC: " __FILE__ ": %s()\n", __func__); 1049 debugf3("%s()\n", __func__);
1054 pci_rc = pci_register_driver(&e752x_driver); 1050 pci_rc = pci_register_driver(&e752x_driver);
1055 return (pci_rc < 0) ? pci_rc : 0; 1051 return (pci_rc < 0) ? pci_rc : 0;
1056} 1052}
1057 1053
1058
1059static void __exit e752x_exit(void) 1054static void __exit e752x_exit(void)
1060{ 1055{
1061 debugf3("MC: " __FILE__ ": %s()\n", __func__); 1056 debugf3("%s()\n", __func__);
1062 pci_unregister_driver(&e752x_driver); 1057 pci_unregister_driver(&e752x_driver);
1063} 1058}
1064 1059
1065
1066module_init(e752x_init); 1060module_init(e752x_init);
1067module_exit(e752x_exit); 1061module_exit(e752x_exit);
1068 1062
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index d5e320dfc66f..a9518d3e4be4 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -11,9 +11,9 @@
11 * http://www.anime.net/~goemon/linux-ecc/ 11 * http://www.anime.net/~goemon/linux-ecc/
12 * 12 *
13 * Contributors: 13 * Contributors:
14 * Eric Biederman (Linux Networx) 14 * Eric Biederman (Linux Networx)
15 * Tom Zimmerman (Linux Networx) 15 * Tom Zimmerman (Linux Networx)
16 * Jim Garlick (Lawrence Livermore National Labs) 16 * Jim Garlick (Lawrence Livermore National Labs)
17 * Dave Peterson (Lawrence Livermore National Labs) 17 * Dave Peterson (Lawrence Livermore National Labs)
18 * That One Guy (Some other place) 18 * That One Guy (Some other place)
19 * Wang Zhenyu (intel.com) 19 * Wang Zhenyu (intel.com)
@@ -22,7 +22,6 @@
22 * 22 *
23 */ 23 */
24 24
25
26#include <linux/config.h> 25#include <linux/config.h>
27#include <linux/module.h> 26#include <linux/module.h>
28#include <linux/init.h> 27#include <linux/init.h>
@@ -31,6 +30,11 @@
31#include <linux/slab.h> 30#include <linux/slab.h>
32#include "edac_mc.h" 31#include "edac_mc.h"
33 32
33#define e7xxx_printk(level, fmt, arg...) \
34 edac_printk(level, "e7xxx", fmt, ##arg)
35
36#define e7xxx_mc_printk(mci, level, fmt, arg...) \
37 edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg)
34 38
35#ifndef PCI_DEVICE_ID_INTEL_7205_0 39#ifndef PCI_DEVICE_ID_INTEL_7205_0
36#define PCI_DEVICE_ID_INTEL_7205_0 0x255d 40#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
@@ -64,11 +68,9 @@
64#define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551 68#define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551
65#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */ 69#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
66 70
67
68#define E7XXX_NR_CSROWS 8 /* number of csrows */ 71#define E7XXX_NR_CSROWS 8 /* number of csrows */
69#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */ 72#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */
70 73
71
72/* E7XXX register addresses - device 0 function 0 */ 74/* E7XXX register addresses - device 0 function 0 */
73#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */ 75#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
74#define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */ 76#define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */
@@ -118,7 +120,6 @@ enum e7xxx_chips {
118 E7205, 120 E7205,
119}; 121};
120 122
121
122struct e7xxx_pvt { 123struct e7xxx_pvt {
123 struct pci_dev *bridge_ck; 124 struct pci_dev *bridge_ck;
124 u32 tolm; 125 u32 tolm;
@@ -127,13 +128,11 @@ struct e7xxx_pvt {
127 const struct e7xxx_dev_info *dev_info; 128 const struct e7xxx_dev_info *dev_info;
128}; 129};
129 130
130
131struct e7xxx_dev_info { 131struct e7xxx_dev_info {
132 u16 err_dev; 132 u16 err_dev;
133 const char *ctl_name; 133 const char *ctl_name;
134}; 134};
135 135
136
137struct e7xxx_error_info { 136struct e7xxx_error_info {
138 u8 dram_ferr; 137 u8 dram_ferr;
139 u8 dram_nerr; 138 u8 dram_nerr;
@@ -144,108 +143,110 @@ struct e7xxx_error_info {
144 143
145static const struct e7xxx_dev_info e7xxx_devs[] = { 144static const struct e7xxx_dev_info e7xxx_devs[] = {
146 [E7500] = { 145 [E7500] = {
147 .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR, 146 .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
148 .ctl_name = "E7500"}, 147 .ctl_name = "E7500"
148 },
149 [E7501] = { 149 [E7501] = {
150 .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR, 150 .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
151 .ctl_name = "E7501"}, 151 .ctl_name = "E7501"
152 },
152 [E7505] = { 153 [E7505] = {
153 .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR, 154 .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
154 .ctl_name = "E7505"}, 155 .ctl_name = "E7505"
156 },
155 [E7205] = { 157 [E7205] = {
156 .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR, 158 .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
157 .ctl_name = "E7205"}, 159 .ctl_name = "E7205"
160 },
158}; 161};
159 162
160
161/* FIXME - is this valid for both SECDED and S4ECD4ED? */ 163/* FIXME - is this valid for both SECDED and S4ECD4ED? */
162static inline int e7xxx_find_channel(u16 syndrome) 164static inline int e7xxx_find_channel(u16 syndrome)
163{ 165{
164 debugf3("MC: " __FILE__ ": %s()\n", __func__); 166 debugf3("%s()\n", __func__);
165 167
166 if ((syndrome & 0xff00) == 0) 168 if ((syndrome & 0xff00) == 0)
167 return 0; 169 return 0;
170
168 if ((syndrome & 0x00ff) == 0) 171 if ((syndrome & 0x00ff) == 0)
169 return 1; 172 return 1;
173
170 if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0) 174 if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
171 return 0; 175 return 0;
176
172 return 1; 177 return 1;
173} 178}
174 179
175 180static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
176static unsigned long 181 unsigned long page)
177ctl_page_to_phys(struct mem_ctl_info *mci, unsigned long page)
178{ 182{
179 u32 remap; 183 u32 remap;
180 struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info; 184 struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info;
181 185
182 debugf3("MC: " __FILE__ ": %s()\n", __func__); 186 debugf3("%s()\n", __func__);
183 187
184 if ((page < pvt->tolm) || 188 if ((page < pvt->tolm) ||
185 ((page >= 0x100000) && (page < pvt->remapbase))) 189 ((page >= 0x100000) && (page < pvt->remapbase)))
186 return page; 190 return page;
191
187 remap = (page - pvt->tolm) + pvt->remapbase; 192 remap = (page - pvt->tolm) + pvt->remapbase;
193
188 if (remap < pvt->remaplimit) 194 if (remap < pvt->remaplimit)
189 return remap; 195 return remap;
190 printk(KERN_ERR "Invalid page %lx - out of range\n", page); 196
197 e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
191 return pvt->tolm - 1; 198 return pvt->tolm - 1;
192} 199}
193 200
194 201static void process_ce(struct mem_ctl_info *mci,
195static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info) 202 struct e7xxx_error_info *info)
196{ 203{
197 u32 error_1b, page; 204 u32 error_1b, page;
198 u16 syndrome; 205 u16 syndrome;
199 int row; 206 int row;
200 int channel; 207 int channel;
201 208
202 debugf3("MC: " __FILE__ ": %s()\n", __func__); 209 debugf3("%s()\n", __func__);
203
204 /* read the error address */ 210 /* read the error address */
205 error_1b = info->dram_celog_add; 211 error_1b = info->dram_celog_add;
206 /* FIXME - should use PAGE_SHIFT */ 212 /* FIXME - should use PAGE_SHIFT */
207 page = error_1b >> 6; /* convert the address to 4k page */ 213 page = error_1b >> 6; /* convert the address to 4k page */
208 /* read the syndrome */ 214 /* read the syndrome */
209 syndrome = info->dram_celog_syndrome; 215 syndrome = info->dram_celog_syndrome;
210 /* FIXME - check for -1 */ 216 /* FIXME - check for -1 */
211 row = edac_mc_find_csrow_by_page(mci, page); 217 row = edac_mc_find_csrow_by_page(mci, page);
212 /* convert syndrome to channel */ 218 /* convert syndrome to channel */
213 channel = e7xxx_find_channel(syndrome); 219 channel = e7xxx_find_channel(syndrome);
214 edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, 220 edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE");
215 "e7xxx CE");
216} 221}
217 222
218
219static void process_ce_no_info(struct mem_ctl_info *mci) 223static void process_ce_no_info(struct mem_ctl_info *mci)
220{ 224{
221 debugf3("MC: " __FILE__ ": %s()\n", __func__); 225 debugf3("%s()\n", __func__);
222 edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow"); 226 edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
223} 227}
224 228
225 229static void process_ue(struct mem_ctl_info *mci,
226static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) 230 struct e7xxx_error_info *info)
227{ 231{
228 u32 error_2b, block_page; 232 u32 error_2b, block_page;
229 int row; 233 int row;
230 234
231 debugf3("MC: " __FILE__ ": %s()\n", __func__); 235 debugf3("%s()\n", __func__);
232
233 /* read the error address */ 236 /* read the error address */
234 error_2b = info->dram_uelog_add; 237 error_2b = info->dram_uelog_add;
235 /* FIXME - should use PAGE_SHIFT */ 238 /* FIXME - should use PAGE_SHIFT */
236 block_page = error_2b >> 6; /* convert to 4k address */ 239 block_page = error_2b >> 6; /* convert to 4k address */
237 row = edac_mc_find_csrow_by_page(mci, block_page); 240 row = edac_mc_find_csrow_by_page(mci, block_page);
238 edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE"); 241 edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
239} 242}
240 243
241
242static void process_ue_no_info(struct mem_ctl_info *mci) 244static void process_ue_no_info(struct mem_ctl_info *mci)
243{ 245{
244 debugf3("MC: " __FILE__ ": %s()\n", __func__); 246 debugf3("%s()\n", __func__);
245 edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow"); 247 edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
246} 248}
247 249
248
249static void e7xxx_get_error_info (struct mem_ctl_info *mci, 250static void e7xxx_get_error_info (struct mem_ctl_info *mci,
250 struct e7xxx_error_info *info) 251 struct e7xxx_error_info *info)
251{ 252{
@@ -253,31 +254,29 @@ static void e7xxx_get_error_info (struct mem_ctl_info *mci,
253 254
254 pvt = (struct e7xxx_pvt *) mci->pvt_info; 255 pvt = (struct e7xxx_pvt *) mci->pvt_info;
255 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, 256 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR,
256 &info->dram_ferr); 257 &info->dram_ferr);
257 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, 258 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR,
258 &info->dram_nerr); 259 &info->dram_nerr);
259 260
260 if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) { 261 if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
261 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD, 262 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
262 &info->dram_celog_add); 263 &info->dram_celog_add);
263 pci_read_config_word(pvt->bridge_ck, 264 pci_read_config_word(pvt->bridge_ck,
264 E7XXX_DRAM_CELOG_SYNDROME, &info->dram_celog_syndrome); 265 E7XXX_DRAM_CELOG_SYNDROME,
266 &info->dram_celog_syndrome);
265 } 267 }
266 268
267 if ((info->dram_ferr & 2) || (info->dram_nerr & 2)) 269 if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
268 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD, 270 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
269 &info->dram_uelog_add); 271 &info->dram_uelog_add);
270 272
271 if (info->dram_ferr & 3) 273 if (info->dram_ferr & 3)
272 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 274 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
273 0x03);
274 275
275 if (info->dram_nerr & 3) 276 if (info->dram_nerr & 3)
276 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 277 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
277 0x03);
278} 278}
279 279
280
281static int e7xxx_process_error_info (struct mem_ctl_info *mci, 280static int e7xxx_process_error_info (struct mem_ctl_info *mci,
282 struct e7xxx_error_info *info, int handle_errors) 281 struct e7xxx_error_info *info, int handle_errors)
283{ 282{
@@ -325,17 +324,15 @@ static int e7xxx_process_error_info (struct mem_ctl_info *mci,
325 return error_found; 324 return error_found;
326} 325}
327 326
328
329static void e7xxx_check(struct mem_ctl_info *mci) 327static void e7xxx_check(struct mem_ctl_info *mci)
330{ 328{
331 struct e7xxx_error_info info; 329 struct e7xxx_error_info info;
332 330
333 debugf3("MC: " __FILE__ ": %s()\n", __func__); 331 debugf3("%s()\n", __func__);
334 e7xxx_get_error_info(mci, &info); 332 e7xxx_get_error_info(mci, &info);
335 e7xxx_process_error_info(mci, &info, 1); 333 e7xxx_process_error_info(mci, &info, 1);
336} 334}
337 335
338
339static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) 336static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
340{ 337{
341 int rc = -ENODEV; 338 int rc = -ENODEV;
@@ -349,19 +346,20 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
349 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ 346 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
350 u32 dra; 347 u32 dra;
351 unsigned long last_cumul_size; 348 unsigned long last_cumul_size;
349 struct e7xxx_error_info discard;
352 350
353 351 debugf0("%s(): mci\n", __func__);
354 debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
355 352
356 /* need to find out the number of channels */ 353 /* need to find out the number of channels */
357 pci_read_config_dword(pdev, E7XXX_DRC, &drc); 354 pci_read_config_dword(pdev, E7XXX_DRC, &drc);
355
358 /* only e7501 can be single channel */ 356 /* only e7501 can be single channel */
359 if (dev_idx == E7501) { 357 if (dev_idx == E7501) {
360 drc_chan = ((drc >> 22) & 0x1); 358 drc_chan = ((drc >> 22) & 0x1);
361 drc_drbg = (drc >> 18) & 0x3; 359 drc_drbg = (drc >> 18) & 0x3;
362 } 360 }
363 drc_ddim = (drc >> 20) & 0x3;
364 361
362 drc_ddim = (drc >> 20) & 0x3;
365 mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1); 363 mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
366 364
367 if (mci == NULL) { 365 if (mci == NULL) {
@@ -369,33 +367,31 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
369 goto fail; 367 goto fail;
370 } 368 }
371 369
372 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); 370 debugf3("%s(): init mci\n", __func__);
373
374 mci->mtype_cap = MEM_FLAG_RDDR; 371 mci->mtype_cap = MEM_FLAG_RDDR;
375 mci->edac_ctl_cap = 372 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
376 EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED; 373 EDAC_FLAG_S4ECD4ED;
377 /* FIXME - what if different memory types are in different csrows? */ 374 /* FIXME - what if different memory types are in different csrows? */
378 mci->mod_name = BS_MOD_STR; 375 mci->mod_name = EDAC_MOD_STR;
379 mci->mod_ver = "$Revision: 1.5.2.9 $"; 376 mci->mod_ver = "$Revision: 1.5.2.9 $";
380 mci->pdev = pdev; 377 mci->pdev = pdev;
381 378
382 debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__); 379 debugf3("%s(): init pvt\n", __func__);
383 pvt = (struct e7xxx_pvt *) mci->pvt_info; 380 pvt = (struct e7xxx_pvt *) mci->pvt_info;
384 pvt->dev_info = &e7xxx_devs[dev_idx]; 381 pvt->dev_info = &e7xxx_devs[dev_idx];
385 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, 382 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
386 pvt->dev_info->err_dev, 383 pvt->dev_info->err_dev,
387 pvt->bridge_ck); 384 pvt->bridge_ck);
385
388 if (!pvt->bridge_ck) { 386 if (!pvt->bridge_ck) {
389 printk(KERN_ERR 387 e7xxx_printk(KERN_ERR, "error reporting device not found:"
390 "MC: error reporting device not found:" 388 "vendor %x device 0x%x (broken BIOS?)\n",
391 "vendor %x device 0x%x (broken BIOS?)\n", 389 PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
392 PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
393 goto fail; 390 goto fail;
394 } 391 }
395 392
396 debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__); 393 debugf3("%s(): more mci init\n", __func__);
397 mci->ctl_name = pvt->dev_info->ctl_name; 394 mci->ctl_name = pvt->dev_info->ctl_name;
398
399 mci->edac_check = e7xxx_check; 395 mci->edac_check = e7xxx_check;
400 mci->ctl_page_to_phys = ctl_page_to_phys; 396 mci->ctl_page_to_phys = ctl_page_to_phys;
401 397
@@ -418,17 +414,18 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
418 pci_read_config_byte(mci->pdev, E7XXX_DRB + index, &value); 414 pci_read_config_byte(mci->pdev, E7XXX_DRB + index, &value);
419 /* convert a 64 or 32 MiB DRB to a page size. */ 415 /* convert a 64 or 32 MiB DRB to a page size. */
420 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); 416 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
421 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", 417 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
422 __func__, index, cumul_size); 418 cumul_size);
419
423 if (cumul_size == last_cumul_size) 420 if (cumul_size == last_cumul_size)
424 continue; /* not populated */ 421 continue; /* not populated */
425 422
426 csrow->first_page = last_cumul_size; 423 csrow->first_page = last_cumul_size;
427 csrow->last_page = cumul_size - 1; 424 csrow->last_page = cumul_size - 1;
428 csrow->nr_pages = cumul_size - last_cumul_size; 425 csrow->nr_pages = cumul_size - last_cumul_size;
429 last_cumul_size = cumul_size; 426 last_cumul_size = cumul_size;
430 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 427 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
431 csrow->mtype = MEM_RDDR; /* only one type supported */ 428 csrow->mtype = MEM_RDDR; /* only one type supported */
432 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; 429 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
433 430
434 /* 431 /*
@@ -449,8 +446,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
449 446
450 mci->edac_cap |= EDAC_FLAG_NONE; 447 mci->edac_cap |= EDAC_FLAG_NONE;
451 448
452 debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n", 449 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
453 __func__);
454 /* load the top of low memory, remap base, and remap limit vars */ 450 /* load the top of low memory, remap base, and remap limit vars */
455 pci_read_config_word(mci->pdev, E7XXX_TOLM, &pci_data); 451 pci_read_config_word(mci->pdev, E7XXX_TOLM, &pci_data);
456 pvt->tolm = ((u32) pci_data) << 4; 452 pvt->tolm = ((u32) pci_data) << 4;
@@ -458,22 +454,20 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
458 pvt->remapbase = ((u32) pci_data) << 14; 454 pvt->remapbase = ((u32) pci_data) << 14;
459 pci_read_config_word(mci->pdev, E7XXX_REMAPLIMIT, &pci_data); 455 pci_read_config_word(mci->pdev, E7XXX_REMAPLIMIT, &pci_data);
460 pvt->remaplimit = ((u32) pci_data) << 14; 456 pvt->remaplimit = ((u32) pci_data) << 14;
461 printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, 457 e7xxx_printk(KERN_INFO,
462 pvt->remapbase, pvt->remaplimit); 458 "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
459 pvt->remapbase, pvt->remaplimit);
463 460
464 /* clear any pending errors, or initial state bits */ 461 /* clear any pending errors, or initial state bits */
465 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03); 462 e7xxx_get_error_info(mci, &discard);
466 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
467 463
468 if (edac_mc_add_mc(mci) != 0) { 464 if (edac_mc_add_mc(mci) != 0) {
469 debugf3("MC: " __FILE__ 465 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
470 ": %s(): failed edac_mc_add_mc()\n",
471 __func__);
472 goto fail; 466 goto fail;
473 } 467 }
474 468
475 /* get this far and it's successful */ 469 /* get this far and it's successful */
476 debugf3("MC: " __FILE__ ": %s(): success\n", __func__); 470 debugf3("%s(): success\n", __func__);
477 return 0; 471 return 0;
478 472
479fail: 473fail:
@@ -487,62 +481,67 @@ fail:
487} 481}
488 482
489/* returns count (>= 0), or negative on error */ 483/* returns count (>= 0), or negative on error */
490static int __devinit 484static int __devinit e7xxx_init_one(struct pci_dev *pdev,
491e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 485 const struct pci_device_id *ent)
492{ 486{
493 debugf0("MC: " __FILE__ ": %s()\n", __func__); 487 debugf0("%s()\n", __func__);
494 488
495 /* wake up and enable device */ 489 /* wake up and enable device */
496 return pci_enable_device(pdev) ? 490 return pci_enable_device(pdev) ?
497 -EIO : e7xxx_probe1(pdev, ent->driver_data); 491 -EIO : e7xxx_probe1(pdev, ent->driver_data);
498} 492}
499 493
500
501static void __devexit e7xxx_remove_one(struct pci_dev *pdev) 494static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
502{ 495{
503 struct mem_ctl_info *mci; 496 struct mem_ctl_info *mci;
504 struct e7xxx_pvt *pvt; 497 struct e7xxx_pvt *pvt;
505 498
506 debugf0(__FILE__ ": %s()\n", __func__); 499 debugf0("%s()\n", __func__);
507 500
508 if (((mci = edac_mc_find_mci_by_pdev(pdev)) != 0) && 501 if ((mci = edac_mc_del_mc(pdev)) == NULL)
509 edac_mc_del_mc(mci)) { 502 return;
510 pvt = (struct e7xxx_pvt *) mci->pvt_info;
511 pci_dev_put(pvt->bridge_ck);
512 edac_mc_free(mci);
513 }
514}
515 503
504 pvt = (struct e7xxx_pvt *) mci->pvt_info;
505 pci_dev_put(pvt->bridge_ck);
506 edac_mc_free(mci);
507}
516 508
517static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = { 509static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
518 {PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 510 {
519 E7205}, 511 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
520 {PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 512 E7205
521 E7500}, 513 },
522 {PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 514 {
523 E7501}, 515 PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
524 {PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 516 E7500
525 E7505}, 517 },
526 {0,} /* 0 terminated list. */ 518 {
519 PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
520 E7501
521 },
522 {
523 PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
524 E7505
525 },
526 {
527 0,
528 } /* 0 terminated list. */
527}; 529};
528 530
529MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl); 531MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
530 532
531
532static struct pci_driver e7xxx_driver = { 533static struct pci_driver e7xxx_driver = {
533 .name = BS_MOD_STR, 534 .name = EDAC_MOD_STR,
534 .probe = e7xxx_init_one, 535 .probe = e7xxx_init_one,
535 .remove = __devexit_p(e7xxx_remove_one), 536 .remove = __devexit_p(e7xxx_remove_one),
536 .id_table = e7xxx_pci_tbl, 537 .id_table = e7xxx_pci_tbl,
537}; 538};
538 539
539
540static int __init e7xxx_init(void) 540static int __init e7xxx_init(void)
541{ 541{
542 return pci_register_driver(&e7xxx_driver); 542 return pci_register_driver(&e7xxx_driver);
543} 543}
544 544
545
546static void __exit e7xxx_exit(void) 545static void __exit e7xxx_exit(void)
547{ 546{
548 pci_unregister_driver(&e7xxx_driver); 547 pci_unregister_driver(&e7xxx_driver);
@@ -551,8 +550,7 @@ static void __exit e7xxx_exit(void)
551module_init(e7xxx_init); 550module_init(e7xxx_init);
552module_exit(e7xxx_exit); 551module_exit(e7xxx_exit);
553 552
554
555MODULE_LICENSE("GPL"); 553MODULE_LICENSE("GPL");
556MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" 554MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
557 "Based on.work by Dan Hollis et al"); 555 "Based on.work by Dan Hollis et al");
558MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers"); 556MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 9c205274c1cb..ea06e3a4dc35 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -12,7 +12,6 @@
12 * 12 *
13 */ 13 */
14 14
15
16#include <linux/config.h> 15#include <linux/config.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
@@ -29,14 +28,13 @@
29#include <linux/list.h> 28#include <linux/list.h>
30#include <linux/sysdev.h> 29#include <linux/sysdev.h>
31#include <linux/ctype.h> 30#include <linux/ctype.h>
32 31#include <linux/kthread.h>
33#include <asm/uaccess.h> 32#include <asm/uaccess.h>
34#include <asm/page.h> 33#include <asm/page.h>
35#include <asm/edac.h> 34#include <asm/edac.h>
36
37#include "edac_mc.h" 35#include "edac_mc.h"
38 36
39#define EDAC_MC_VERSION "edac_mc Ver: 2.0.0 " __DATE__ 37#define EDAC_MC_VERSION "Ver: 2.0.0 " __DATE__
40 38
41/* For now, disable the EDAC sysfs code. The sysfs interface that EDAC 39/* For now, disable the EDAC sysfs code. The sysfs interface that EDAC
42 * presents to user space needs more thought, and is likely to change 40 * presents to user space needs more thought, and is likely to change
@@ -47,7 +45,7 @@
47#ifdef CONFIG_EDAC_DEBUG 45#ifdef CONFIG_EDAC_DEBUG
48/* Values of 0 to 4 will generate output */ 46/* Values of 0 to 4 will generate output */
49int edac_debug_level = 1; 47int edac_debug_level = 1;
50EXPORT_SYMBOL(edac_debug_level); 48EXPORT_SYMBOL_GPL(edac_debug_level);
51#endif 49#endif
52 50
53/* EDAC Controls, setable by module parameter, and sysfs */ 51/* EDAC Controls, setable by module parameter, and sysfs */
@@ -64,13 +62,14 @@ static atomic_t pci_parity_count = ATOMIC_INIT(0);
64static DECLARE_MUTEX(mem_ctls_mutex); 62static DECLARE_MUTEX(mem_ctls_mutex);
65static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices); 63static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
66 64
65static struct task_struct *edac_thread;
66
67/* Structure of the whitelist and blacklist arrays */ 67/* Structure of the whitelist and blacklist arrays */
68struct edac_pci_device_list { 68struct edac_pci_device_list {
69 unsigned int vendor; /* Vendor ID */ 69 unsigned int vendor; /* Vendor ID */
70 unsigned int device; /* Deviice ID */ 70 unsigned int device; /* Deviice ID */
71}; 71};
72 72
73
74#define MAX_LISTED_PCI_DEVICES 32 73#define MAX_LISTED_PCI_DEVICES 32
75 74
76/* List of PCI devices (vendor-id:device-id) that should be skipped */ 75/* List of PCI devices (vendor-id:device-id) that should be skipped */
@@ -123,7 +122,6 @@ static const char *edac_caps[] = {
123 [EDAC_S16ECD16ED] = "S16ECD16ED" 122 [EDAC_S16ECD16ED] = "S16ECD16ED"
124}; 123};
125 124
126
127/* sysfs object: /sys/devices/system/edac */ 125/* sysfs object: /sys/devices/system/edac */
128static struct sysdev_class edac_class = { 126static struct sysdev_class edac_class = {
129 set_kset_name("edac"), 127 set_kset_name("edac"),
@@ -136,9 +134,15 @@ static struct sysdev_class edac_class = {
136static struct kobject edac_memctrl_kobj; 134static struct kobject edac_memctrl_kobj;
137static struct kobject edac_pci_kobj; 135static struct kobject edac_pci_kobj;
138 136
137/* We use these to wait for the reference counts on edac_memctrl_kobj and
138 * edac_pci_kobj to reach 0.
139 */
140static struct completion edac_memctrl_kobj_complete;
141static struct completion edac_pci_kobj_complete;
142
139/* 143/*
140 * /sys/devices/system/edac/mc; 144 * /sys/devices/system/edac/mc;
141 * data structures and methods 145 * data structures and methods
142 */ 146 */
143#if 0 147#if 0
144static ssize_t memctrl_string_show(void *ptr, char *buffer) 148static ssize_t memctrl_string_show(void *ptr, char *buffer)
@@ -165,33 +169,34 @@ static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
165} 169}
166 170
167struct memctrl_dev_attribute { 171struct memctrl_dev_attribute {
168 struct attribute attr; 172 struct attribute attr;
169 void *value; 173 void *value;
170 ssize_t (*show)(void *,char *); 174 ssize_t (*show)(void *,char *);
171 ssize_t (*store)(void *, const char *, size_t); 175 ssize_t (*store)(void *, const char *, size_t);
172}; 176};
173 177
174/* Set of show/store abstract level functions for memory control object */ 178/* Set of show/store abstract level functions for memory control object */
175static ssize_t 179static ssize_t memctrl_dev_show(struct kobject *kobj,
176memctrl_dev_show(struct kobject *kobj, struct attribute *attr, char *buffer) 180 struct attribute *attr, char *buffer)
177{ 181{
178 struct memctrl_dev_attribute *memctrl_dev; 182 struct memctrl_dev_attribute *memctrl_dev;
179 memctrl_dev = (struct memctrl_dev_attribute*)attr; 183 memctrl_dev = (struct memctrl_dev_attribute*)attr;
180 184
181 if (memctrl_dev->show) 185 if (memctrl_dev->show)
182 return memctrl_dev->show(memctrl_dev->value, buffer); 186 return memctrl_dev->show(memctrl_dev->value, buffer);
187
183 return -EIO; 188 return -EIO;
184} 189}
185 190
186static ssize_t 191static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
187memctrl_dev_store(struct kobject *kobj, struct attribute *attr, 192 const char *buffer, size_t count)
188 const char *buffer, size_t count)
189{ 193{
190 struct memctrl_dev_attribute *memctrl_dev; 194 struct memctrl_dev_attribute *memctrl_dev;
191 memctrl_dev = (struct memctrl_dev_attribute*)attr; 195 memctrl_dev = (struct memctrl_dev_attribute*)attr;
192 196
193 if (memctrl_dev->store) 197 if (memctrl_dev->store)
194 return memctrl_dev->store(memctrl_dev->value, buffer, count); 198 return memctrl_dev->store(memctrl_dev->value, buffer, count);
199
195 return -EIO; 200 return -EIO;
196} 201}
197 202
@@ -227,7 +232,6 @@ MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
227MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); 232MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
228MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); 233MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
229 234
230
231/* Base Attributes of the memory ECC object */ 235/* Base Attributes of the memory ECC object */
232static struct memctrl_dev_attribute *memctrl_attr[] = { 236static struct memctrl_dev_attribute *memctrl_attr[] = {
233 &attr_panic_on_ue, 237 &attr_panic_on_ue,
@@ -240,13 +244,14 @@ static struct memctrl_dev_attribute *memctrl_attr[] = {
240/* Main MC kobject release() function */ 244/* Main MC kobject release() function */
241static void edac_memctrl_master_release(struct kobject *kobj) 245static void edac_memctrl_master_release(struct kobject *kobj)
242{ 246{
243 debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__); 247 debugf1("%s()\n", __func__);
248 complete(&edac_memctrl_kobj_complete);
244} 249}
245 250
246static struct kobj_type ktype_memctrl = { 251static struct kobj_type ktype_memctrl = {
247 .release = edac_memctrl_master_release, 252 .release = edac_memctrl_master_release,
248 .sysfs_ops = &memctrlfs_ops, 253 .sysfs_ops = &memctrlfs_ops,
249 .default_attrs = (struct attribute **) memctrl_attr, 254 .default_attrs = (struct attribute **) memctrl_attr,
250}; 255};
251 256
252#endif /* DISABLE_EDAC_SYSFS */ 257#endif /* DISABLE_EDAC_SYSFS */
@@ -268,32 +273,31 @@ static int edac_sysfs_memctrl_setup(void)
268{ 273{
269 int err=0; 274 int err=0;
270 275
271 debugf1("MC: " __FILE__ ": %s()\n", __func__); 276 debugf1("%s()\n", __func__);
272 277
273 /* create the /sys/devices/system/edac directory */ 278 /* create the /sys/devices/system/edac directory */
274 err = sysdev_class_register(&edac_class); 279 err = sysdev_class_register(&edac_class);
280
275 if (!err) { 281 if (!err) {
276 /* Init the MC's kobject */ 282 /* Init the MC's kobject */
277 memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj)); 283 memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj));
278 kobject_init(&edac_memctrl_kobj);
279
280 edac_memctrl_kobj.parent = &edac_class.kset.kobj; 284 edac_memctrl_kobj.parent = &edac_class.kset.kobj;
281 edac_memctrl_kobj.ktype = &ktype_memctrl; 285 edac_memctrl_kobj.ktype = &ktype_memctrl;
282 286
283 /* generate sysfs "..../edac/mc" */ 287 /* generate sysfs "..../edac/mc" */
284 err = kobject_set_name(&edac_memctrl_kobj,"mc"); 288 err = kobject_set_name(&edac_memctrl_kobj,"mc");
289
285 if (!err) { 290 if (!err) {
286 /* FIXME: maybe new sysdev_create_subdir() */ 291 /* FIXME: maybe new sysdev_create_subdir() */
287 err = kobject_register(&edac_memctrl_kobj); 292 err = kobject_register(&edac_memctrl_kobj);
288 if (err) { 293
294 if (err)
289 debugf1("Failed to register '.../edac/mc'\n"); 295 debugf1("Failed to register '.../edac/mc'\n");
290 } else { 296 else
291 debugf1("Registered '.../edac/mc' kobject\n"); 297 debugf1("Registered '.../edac/mc' kobject\n");
292 }
293 } 298 }
294 } else { 299 } else
295 debugf1(KERN_WARNING "__FILE__ %s() error=%d\n", __func__,err); 300 debugf1("%s() error=%d\n", __func__, err);
296 }
297 301
298 return err; 302 return err;
299} 303}
@@ -308,11 +312,12 @@ static void edac_sysfs_memctrl_teardown(void)
308#ifndef DISABLE_EDAC_SYSFS 312#ifndef DISABLE_EDAC_SYSFS
309 debugf0("MC: " __FILE__ ": %s()\n", __func__); 313 debugf0("MC: " __FILE__ ": %s()\n", __func__);
310 314
311 /* Unregister the MC's kobject */ 315 /* Unregister the MC's kobject and wait for reference count to reach
316 * 0.
317 */
318 init_completion(&edac_memctrl_kobj_complete);
312 kobject_unregister(&edac_memctrl_kobj); 319 kobject_unregister(&edac_memctrl_kobj);
313 320 wait_for_completion(&edac_memctrl_kobj_complete);
314 /* release the master edac mc kobject */
315 kobject_put(&edac_memctrl_kobj);
316 321
317 /* Unregister the 'edac' object */ 322 /* Unregister the 'edac' object */
318 sysdev_class_unregister(&edac_class); 323 sysdev_class_unregister(&edac_class);
@@ -331,7 +336,6 @@ struct list_control {
331 int *count; 336 int *count;
332}; 337};
333 338
334
335#if 0 339#if 0
336/* Output the list as: vendor_id:device:id<,vendor_id:device_id> */ 340/* Output the list as: vendor_id:device:id<,vendor_id:device_id> */
337static ssize_t edac_pci_list_string_show(void *ptr, char *buffer) 341static ssize_t edac_pci_list_string_show(void *ptr, char *buffer)
@@ -356,7 +360,6 @@ static ssize_t edac_pci_list_string_show(void *ptr, char *buffer)
356 } 360 }
357 361
358 len += snprintf(p + len,(PAGE_SIZE-len), "\n"); 362 len += snprintf(p + len,(PAGE_SIZE-len), "\n");
359
360 return (ssize_t) len; 363 return (ssize_t) len;
361} 364}
362 365
@@ -378,7 +381,7 @@ static int parse_one_device(const char **s,const char **e,
378 381
379 /* if null byte, we are done */ 382 /* if null byte, we are done */
380 if (!**s) { 383 if (!**s) {
381 (*s)++; /* keep *s moving */ 384 (*s)++; /* keep *s moving */
382 return 0; 385 return 0;
383 } 386 }
384 387
@@ -395,6 +398,7 @@ static int parse_one_device(const char **s,const char **e,
395 398
396 /* parse vendor_id */ 399 /* parse vendor_id */
397 runner = *s; 400 runner = *s;
401
398 while (runner < *e) { 402 while (runner < *e) {
399 /* scan for vendor:device delimiter */ 403 /* scan for vendor:device delimiter */
400 if (*runner == ':') { 404 if (*runner == ':') {
@@ -402,6 +406,7 @@ static int parse_one_device(const char **s,const char **e,
402 runner = p + 1; 406 runner = p + 1;
403 break; 407 break;
404 } 408 }
409
405 runner++; 410 runner++;
406 } 411 }
407 412
@@ -417,12 +422,11 @@ static int parse_one_device(const char **s,const char **e,
417 } 422 }
418 423
419 *s = runner; 424 *s = runner;
420
421 return 1; 425 return 1;
422} 426}
423 427
424static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer, 428static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer,
425 size_t count) 429 size_t count)
426{ 430{
427 struct list_control *listctl; 431 struct list_control *listctl;
428 struct edac_pci_device_list *list; 432 struct edac_pci_device_list *list;
@@ -432,14 +436,12 @@ static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer,
432 436
433 s = (char*)buffer; 437 s = (char*)buffer;
434 e = s + count; 438 e = s + count;
435
436 listctl = ptr; 439 listctl = ptr;
437 list = listctl->list; 440 list = listctl->list;
438 index = listctl->count; 441 index = listctl->count;
439
440 *index = 0; 442 *index = 0;
441 while (*index < MAX_LISTED_PCI_DEVICES) {
442 443
444 while (*index < MAX_LISTED_PCI_DEVICES) {
443 if (parse_one_device(&s,&e,&vendor_id,&device_id)) { 445 if (parse_one_device(&s,&e,&vendor_id,&device_id)) {
444 list[ *index ].vendor = vendor_id; 446 list[ *index ].vendor = vendor_id;
445 list[ *index ].device = device_id; 447 list[ *index ].device = device_id;
@@ -472,15 +474,15 @@ static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
472} 474}
473 475
474struct edac_pci_dev_attribute { 476struct edac_pci_dev_attribute {
475 struct attribute attr; 477 struct attribute attr;
476 void *value; 478 void *value;
477 ssize_t (*show)(void *,char *); 479 ssize_t (*show)(void *,char *);
478 ssize_t (*store)(void *, const char *,size_t); 480 ssize_t (*store)(void *, const char *,size_t);
479}; 481};
480 482
481/* Set of show/store abstract level functions for PCI Parity object */ 483/* Set of show/store abstract level functions for PCI Parity object */
482static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr, 484static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
483 char *buffer) 485 char *buffer)
484{ 486{
485 struct edac_pci_dev_attribute *edac_pci_dev; 487 struct edac_pci_dev_attribute *edac_pci_dev;
486 edac_pci_dev= (struct edac_pci_dev_attribute*)attr; 488 edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
@@ -490,8 +492,8 @@ static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
490 return -EIO; 492 return -EIO;
491} 493}
492 494
493static ssize_t edac_pci_dev_store(struct kobject *kobj, struct attribute *attr, 495static ssize_t edac_pci_dev_store(struct kobject *kobj,
494 const char *buffer, size_t count) 496 struct attribute *attr, const char *buffer, size_t count)
495{ 497{
496 struct edac_pci_dev_attribute *edac_pci_dev; 498 struct edac_pci_dev_attribute *edac_pci_dev;
497 edac_pci_dev= (struct edac_pci_dev_attribute*)attr; 499 edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
@@ -506,7 +508,6 @@ static struct sysfs_ops edac_pci_sysfs_ops = {
506 .store = edac_pci_dev_store 508 .store = edac_pci_dev_store
507}; 509};
508 510
509
510#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \ 511#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \
511struct edac_pci_dev_attribute edac_pci_attr_##_name = { \ 512struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
512 .attr = {.name = __stringify(_name), .mode = _mode }, \ 513 .attr = {.name = __stringify(_name), .mode = _mode }, \
@@ -549,9 +550,11 @@ EDAC_PCI_STRING_ATTR(pci_parity_blacklist,
549#endif 550#endif
550 551
551/* PCI Parity control files */ 552/* PCI Parity control files */
552EDAC_PCI_ATTR(check_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store); 553EDAC_PCI_ATTR(check_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show,
553EDAC_PCI_ATTR(panic_on_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store); 554 edac_pci_int_store);
554EDAC_PCI_ATTR(pci_parity_count,S_IRUGO,edac_pci_int_show,NULL); 555EDAC_PCI_ATTR(panic_on_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show,
556 edac_pci_int_store);
557EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
555 558
556/* Base Attributes of the memory ECC object */ 559/* Base Attributes of the memory ECC object */
557static struct edac_pci_dev_attribute *edac_pci_attr[] = { 560static struct edac_pci_dev_attribute *edac_pci_attr[] = {
@@ -564,13 +567,14 @@ static struct edac_pci_dev_attribute *edac_pci_attr[] = {
564/* No memory to release */ 567/* No memory to release */
565static void edac_pci_release(struct kobject *kobj) 568static void edac_pci_release(struct kobject *kobj)
566{ 569{
567 debugf1("EDAC PCI: " __FILE__ ": %s()\n", __func__); 570 debugf1("%s()\n", __func__);
571 complete(&edac_pci_kobj_complete);
568} 572}
569 573
570static struct kobj_type ktype_edac_pci = { 574static struct kobj_type ktype_edac_pci = {
571 .release = edac_pci_release, 575 .release = edac_pci_release,
572 .sysfs_ops = &edac_pci_sysfs_ops, 576 .sysfs_ops = &edac_pci_sysfs_ops,
573 .default_attrs = (struct attribute **) edac_pci_attr, 577 .default_attrs = (struct attribute **) edac_pci_attr,
574}; 578};
575 579
576#endif /* DISABLE_EDAC_SYSFS */ 580#endif /* DISABLE_EDAC_SYSFS */
@@ -588,24 +592,24 @@ static int edac_sysfs_pci_setup(void)
588{ 592{
589 int err; 593 int err;
590 594
591 debugf1("MC: " __FILE__ ": %s()\n", __func__); 595 debugf1("%s()\n", __func__);
592 596
593 memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj)); 597 memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj));
594
595 kobject_init(&edac_pci_kobj);
596 edac_pci_kobj.parent = &edac_class.kset.kobj; 598 edac_pci_kobj.parent = &edac_class.kset.kobj;
597 edac_pci_kobj.ktype = &ktype_edac_pci; 599 edac_pci_kobj.ktype = &ktype_edac_pci;
598
599 err = kobject_set_name(&edac_pci_kobj, "pci"); 600 err = kobject_set_name(&edac_pci_kobj, "pci");
601
600 if (!err) { 602 if (!err) {
601 /* Instanstiate the csrow object */ 603 /* Instanstiate the csrow object */
602 /* FIXME: maybe new sysdev_create_subdir() */ 604 /* FIXME: maybe new sysdev_create_subdir() */
603 err = kobject_register(&edac_pci_kobj); 605 err = kobject_register(&edac_pci_kobj);
606
604 if (err) 607 if (err)
605 debugf1("Failed to register '.../edac/pci'\n"); 608 debugf1("Failed to register '.../edac/pci'\n");
606 else 609 else
607 debugf1("Registered '.../edac/pci' kobject\n"); 610 debugf1("Registered '.../edac/pci' kobject\n");
608 } 611 }
612
609 return err; 613 return err;
610} 614}
611#endif /* DISABLE_EDAC_SYSFS */ 615#endif /* DISABLE_EDAC_SYSFS */
@@ -613,10 +617,10 @@ static int edac_sysfs_pci_setup(void)
613static void edac_sysfs_pci_teardown(void) 617static void edac_sysfs_pci_teardown(void)
614{ 618{
615#ifndef DISABLE_EDAC_SYSFS 619#ifndef DISABLE_EDAC_SYSFS
616 debugf0("MC: " __FILE__ ": %s()\n", __func__); 620 debugf0("%s()\n", __func__);
617 621 init_completion(&edac_pci_kobj_complete);
618 kobject_unregister(&edac_pci_kobj); 622 kobject_unregister(&edac_pci_kobj);
619 kobject_put(&edac_pci_kobj); 623 wait_for_completion(&edac_pci_kobj_complete);
620#endif 624#endif
621} 625}
622 626
@@ -633,6 +637,7 @@ static ssize_t csrow_ch0_dimm_label_show(struct csrow_info *csrow, char *data)
633 size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n", 637 size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n",
634 csrow->channels[0].label); 638 csrow->channels[0].label);
635 } 639 }
640
636 return size; 641 return size;
637} 642}
638 643
@@ -644,11 +649,12 @@ static ssize_t csrow_ch1_dimm_label_show(struct csrow_info *csrow, char *data)
644 size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", 649 size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
645 csrow->channels[1].label); 650 csrow->channels[1].label);
646 } 651 }
652
647 return size; 653 return size;
648} 654}
649 655
650static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow, 656static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow,
651 const char *data, size_t size) 657 const char *data, size_t size)
652{ 658{
653 ssize_t max_size = 0; 659 ssize_t max_size = 0;
654 660
@@ -657,11 +663,12 @@ static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow,
657 strncpy(csrow->channels[0].label, data, max_size); 663 strncpy(csrow->channels[0].label, data, max_size);
658 csrow->channels[0].label[max_size] = '\0'; 664 csrow->channels[0].label[max_size] = '\0';
659 } 665 }
666
660 return size; 667 return size;
661} 668}
662 669
663static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow, 670static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow,
664 const char *data, size_t size) 671 const char *data, size_t size)
665{ 672{
666 ssize_t max_size = 0; 673 ssize_t max_size = 0;
667 674
@@ -670,6 +677,7 @@ static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow,
670 strncpy(csrow->channels[1].label, data, max_size); 677 strncpy(csrow->channels[1].label, data, max_size);
671 csrow->channels[1].label[max_size] = '\0'; 678 csrow->channels[1].label[max_size] = '\0';
672 } 679 }
680
673 return max_size; 681 return max_size;
674} 682}
675 683
@@ -690,6 +698,7 @@ static ssize_t csrow_ch0_ce_count_show(struct csrow_info *csrow, char *data)
690 if (csrow->nr_channels > 0) { 698 if (csrow->nr_channels > 0) {
691 size = sprintf(data,"%u\n", csrow->channels[0].ce_count); 699 size = sprintf(data,"%u\n", csrow->channels[0].ce_count);
692 } 700 }
701
693 return size; 702 return size;
694} 703}
695 704
@@ -700,6 +709,7 @@ static ssize_t csrow_ch1_ce_count_show(struct csrow_info *csrow, char *data)
700 if (csrow->nr_channels > 1) { 709 if (csrow->nr_channels > 1) {
701 size = sprintf(data,"%u\n", csrow->channels[1].ce_count); 710 size = sprintf(data,"%u\n", csrow->channels[1].ce_count);
702 } 711 }
712
703 return size; 713 return size;
704} 714}
705 715
@@ -724,7 +734,7 @@ static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data)
724} 734}
725 735
726struct csrowdev_attribute { 736struct csrowdev_attribute {
727 struct attribute attr; 737 struct attribute attr;
728 ssize_t (*show)(struct csrow_info *,char *); 738 ssize_t (*show)(struct csrow_info *,char *);
729 ssize_t (*store)(struct csrow_info *, const char *,size_t); 739 ssize_t (*store)(struct csrow_info *, const char *,size_t);
730}; 740};
@@ -734,24 +744,26 @@ struct csrowdev_attribute {
734 744
735/* Set of show/store higher level functions for csrow objects */ 745/* Set of show/store higher level functions for csrow objects */
736static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr, 746static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr,
737 char *buffer) 747 char *buffer)
738{ 748{
739 struct csrow_info *csrow = to_csrow(kobj); 749 struct csrow_info *csrow = to_csrow(kobj);
740 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr); 750 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
741 751
742 if (csrowdev_attr->show) 752 if (csrowdev_attr->show)
743 return csrowdev_attr->show(csrow, buffer); 753 return csrowdev_attr->show(csrow, buffer);
754
744 return -EIO; 755 return -EIO;
745} 756}
746 757
747static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr, 758static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
748 const char *buffer, size_t count) 759 const char *buffer, size_t count)
749{ 760{
750 struct csrow_info *csrow = to_csrow(kobj); 761 struct csrow_info *csrow = to_csrow(kobj);
751 struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr); 762 struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr);
752 763
753 if (csrowdev_attr->store) 764 if (csrowdev_attr->store)
754 return csrowdev_attr->store(csrow, buffer, count); 765 return csrowdev_attr->store(csrow, buffer, count);
766
755 return -EIO; 767 return -EIO;
756} 768}
757 769
@@ -785,7 +797,6 @@ CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR,
785 csrow_ch1_dimm_label_show, 797 csrow_ch1_dimm_label_show,
786 csrow_ch1_dimm_label_store); 798 csrow_ch1_dimm_label_store);
787 799
788
789/* Attributes of the CSROW<id> object */ 800/* Attributes of the CSROW<id> object */
790static struct csrowdev_attribute *csrow_attr[] = { 801static struct csrowdev_attribute *csrow_attr[] = {
791 &attr_dev_type, 802 &attr_dev_type,
@@ -801,40 +812,43 @@ static struct csrowdev_attribute *csrow_attr[] = {
801 NULL, 812 NULL,
802}; 813};
803 814
804
805/* No memory to release */ 815/* No memory to release */
806static void edac_csrow_instance_release(struct kobject *kobj) 816static void edac_csrow_instance_release(struct kobject *kobj)
807{ 817{
808 debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__); 818 struct csrow_info *cs;
819
820 debugf1("%s()\n", __func__);
821 cs = container_of(kobj, struct csrow_info, kobj);
822 complete(&cs->kobj_complete);
809} 823}
810 824
811static struct kobj_type ktype_csrow = { 825static struct kobj_type ktype_csrow = {
812 .release = edac_csrow_instance_release, 826 .release = edac_csrow_instance_release,
813 .sysfs_ops = &csrowfs_ops, 827 .sysfs_ops = &csrowfs_ops,
814 .default_attrs = (struct attribute **) csrow_attr, 828 .default_attrs = (struct attribute **) csrow_attr,
815}; 829};
816 830
817/* Create a CSROW object under specifed edac_mc_device */ 831/* Create a CSROW object under specifed edac_mc_device */
818static int edac_create_csrow_object(struct kobject *edac_mci_kobj, 832static int edac_create_csrow_object(struct kobject *edac_mci_kobj,
819 struct csrow_info *csrow, int index ) 833 struct csrow_info *csrow, int index)
820{ 834{
821 int err = 0; 835 int err = 0;
822 836
823 debugf0("MC: " __FILE__ ": %s()\n", __func__); 837 debugf0("%s()\n", __func__);
824
825 memset(&csrow->kobj, 0, sizeof(csrow->kobj)); 838 memset(&csrow->kobj, 0, sizeof(csrow->kobj));
826 839
827 /* generate ..../edac/mc/mc<id>/csrow<index> */ 840 /* generate ..../edac/mc/mc<id>/csrow<index> */
828 841
829 kobject_init(&csrow->kobj);
830 csrow->kobj.parent = edac_mci_kobj; 842 csrow->kobj.parent = edac_mci_kobj;
831 csrow->kobj.ktype = &ktype_csrow; 843 csrow->kobj.ktype = &ktype_csrow;
832 844
833 /* name this instance of csrow<id> */ 845 /* name this instance of csrow<id> */
834 err = kobject_set_name(&csrow->kobj,"csrow%d",index); 846 err = kobject_set_name(&csrow->kobj,"csrow%d",index);
847
835 if (!err) { 848 if (!err) {
836 /* Instanstiate the csrow object */ 849 /* Instanstiate the csrow object */
837 err = kobject_register(&csrow->kobj); 850 err = kobject_register(&csrow->kobj);
851
838 if (err) 852 if (err)
839 debugf0("Failed to register CSROW%d\n",index); 853 debugf0("Failed to register CSROW%d\n",index);
840 else 854 else
@@ -846,8 +860,8 @@ static int edac_create_csrow_object(struct kobject *edac_mci_kobj,
846 860
847/* sysfs data structures and methods for the MCI kobjects */ 861/* sysfs data structures and methods for the MCI kobjects */
848 862
849static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, 863static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
850 const char *data, size_t count ) 864 const char *data, size_t count)
851{ 865{
852 int row, chan; 866 int row, chan;
853 867
@@ -855,16 +869,18 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
855 mci->ce_noinfo_count = 0; 869 mci->ce_noinfo_count = 0;
856 mci->ue_count = 0; 870 mci->ue_count = 0;
857 mci->ce_count = 0; 871 mci->ce_count = 0;
872
858 for (row = 0; row < mci->nr_csrows; row++) { 873 for (row = 0; row < mci->nr_csrows; row++) {
859 struct csrow_info *ri = &mci->csrows[row]; 874 struct csrow_info *ri = &mci->csrows[row];
860 875
861 ri->ue_count = 0; 876 ri->ue_count = 0;
862 ri->ce_count = 0; 877 ri->ce_count = 0;
878
863 for (chan = 0; chan < ri->nr_channels; chan++) 879 for (chan = 0; chan < ri->nr_channels; chan++)
864 ri->channels[chan].ce_count = 0; 880 ri->channels[chan].ce_count = 0;
865 } 881 }
866 mci->start_time = jiffies;
867 882
883 mci->start_time = jiffies;
868 return count; 884 return count;
869} 885}
870 886
@@ -922,18 +938,16 @@ static ssize_t mci_edac_capability_show(struct mem_ctl_info *mci, char *data)
922 938
923 p += mci_output_edac_cap(p,mci->edac_ctl_cap); 939 p += mci_output_edac_cap(p,mci->edac_ctl_cap);
924 p += sprintf(p, "\n"); 940 p += sprintf(p, "\n");
925
926 return p - data; 941 return p - data;
927} 942}
928 943
929static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci, 944static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci,
930 char *data) 945 char *data)
931{ 946{
932 char *p = data; 947 char *p = data;
933 948
934 p += mci_output_edac_cap(p,mci->edac_cap); 949 p += mci_output_edac_cap(p,mci->edac_cap);
935 p += sprintf(p, "\n"); 950 p += sprintf(p, "\n");
936
937 return p - data; 951 return p - data;
938} 952}
939 953
@@ -950,13 +964,13 @@ static int mci_output_mtype_cap(char *buf, unsigned long mtype_cap)
950 return p - buf; 964 return p - buf;
951} 965}
952 966
953static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci, char *data) 967static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci,
968 char *data)
954{ 969{
955 char *p = data; 970 char *p = data;
956 971
957 p += mci_output_mtype_cap(p,mci->mtype_cap); 972 p += mci_output_mtype_cap(p,mci->mtype_cap);
958 p += sprintf(p, "\n"); 973 p += sprintf(p, "\n");
959
960 return p - data; 974 return p - data;
961} 975}
962 976
@@ -970,6 +984,7 @@ static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
970 984
971 if (!csrow->nr_pages) 985 if (!csrow->nr_pages)
972 continue; 986 continue;
987
973 total_pages += csrow->nr_pages; 988 total_pages += csrow->nr_pages;
974 } 989 }
975 990
@@ -977,7 +992,7 @@ static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
977} 992}
978 993
979struct mcidev_attribute { 994struct mcidev_attribute {
980 struct attribute attr; 995 struct attribute attr;
981 ssize_t (*show)(struct mem_ctl_info *,char *); 996 ssize_t (*show)(struct mem_ctl_info *,char *);
982 ssize_t (*store)(struct mem_ctl_info *, const char *,size_t); 997 ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
983}; 998};
@@ -986,30 +1001,32 @@ struct mcidev_attribute {
986#define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr) 1001#define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr)
987 1002
988static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr, 1003static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
989 char *buffer) 1004 char *buffer)
990{ 1005{
991 struct mem_ctl_info *mem_ctl_info = to_mci(kobj); 1006 struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
992 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr); 1007 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
993 1008
994 if (mcidev_attr->show) 1009 if (mcidev_attr->show)
995 return mcidev_attr->show(mem_ctl_info, buffer); 1010 return mcidev_attr->show(mem_ctl_info, buffer);
1011
996 return -EIO; 1012 return -EIO;
997} 1013}
998 1014
999static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr, 1015static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
1000 const char *buffer, size_t count) 1016 const char *buffer, size_t count)
1001{ 1017{
1002 struct mem_ctl_info *mem_ctl_info = to_mci(kobj); 1018 struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
1003 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr); 1019 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
1004 1020
1005 if (mcidev_attr->store) 1021 if (mcidev_attr->store)
1006 return mcidev_attr->store(mem_ctl_info, buffer, count); 1022 return mcidev_attr->store(mem_ctl_info, buffer, count);
1023
1007 return -EIO; 1024 return -EIO;
1008} 1025}
1009 1026
1010static struct sysfs_ops mci_ops = { 1027static struct sysfs_ops mci_ops = {
1011 .show = mcidev_show, 1028 .show = mcidev_show,
1012 .store = mcidev_store 1029 .store = mcidev_store
1013}; 1030};
1014 1031
1015#define MCIDEV_ATTR(_name,_mode,_show,_store) \ 1032#define MCIDEV_ATTR(_name,_mode,_show,_store) \
@@ -1037,7 +1054,6 @@ MCIDEV_ATTR(edac_current_capability,S_IRUGO,
1037MCIDEV_ATTR(supported_mem_type,S_IRUGO, 1054MCIDEV_ATTR(supported_mem_type,S_IRUGO,
1038 mci_supported_mem_type_show,NULL); 1055 mci_supported_mem_type_show,NULL);
1039 1056
1040
1041static struct mcidev_attribute *mci_attr[] = { 1057static struct mcidev_attribute *mci_attr[] = {
1042 &mci_attr_reset_counters, 1058 &mci_attr_reset_counters,
1043 &mci_attr_module_name, 1059 &mci_attr_module_name,
@@ -1054,25 +1070,22 @@ static struct mcidev_attribute *mci_attr[] = {
1054 NULL 1070 NULL
1055}; 1071};
1056 1072
1057
1058/* 1073/*
1059 * Release of a MC controlling instance 1074 * Release of a MC controlling instance
1060 */ 1075 */
1061static void edac_mci_instance_release(struct kobject *kobj) 1076static void edac_mci_instance_release(struct kobject *kobj)
1062{ 1077{
1063 struct mem_ctl_info *mci; 1078 struct mem_ctl_info *mci;
1064 mci = container_of(kobj,struct mem_ctl_info,edac_mci_kobj);
1065 1079
1066 debugf0("MC: " __FILE__ ": %s() idx=%d calling kfree\n", 1080 mci = to_mci(kobj);
1067 __func__, mci->mc_idx); 1081 debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
1068 1082 complete(&mci->kobj_complete);
1069 kfree(mci);
1070} 1083}
1071 1084
1072static struct kobj_type ktype_mci = { 1085static struct kobj_type ktype_mci = {
1073 .release = edac_mci_instance_release, 1086 .release = edac_mci_instance_release,
1074 .sysfs_ops = &mci_ops, 1087 .sysfs_ops = &mci_ops,
1075 .default_attrs = (struct attribute **) mci_attr, 1088 .default_attrs = (struct attribute **) mci_attr,
1076}; 1089};
1077 1090
1078#endif /* DISABLE_EDAC_SYSFS */ 1091#endif /* DISABLE_EDAC_SYSFS */
@@ -1099,13 +1112,12 @@ static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
1099 struct csrow_info *csrow; 1112 struct csrow_info *csrow;
1100 struct kobject *edac_mci_kobj=&mci->edac_mci_kobj; 1113 struct kobject *edac_mci_kobj=&mci->edac_mci_kobj;
1101 1114
1102 debugf0("MC: " __FILE__ ": %s() idx=%d\n", __func__, mci->mc_idx); 1115 debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
1103
1104 memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj)); 1116 memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj));
1105 kobject_init(edac_mci_kobj);
1106 1117
1107 /* set the name of the mc<id> object */ 1118 /* set the name of the mc<id> object */
1108 err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx); 1119 err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx);
1120
1109 if (err) 1121 if (err)
1110 return err; 1122 return err;
1111 1123
@@ -1115,50 +1127,48 @@ static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
1115 1127
1116 /* register the mc<id> kobject */ 1128 /* register the mc<id> kobject */
1117 err = kobject_register(edac_mci_kobj); 1129 err = kobject_register(edac_mci_kobj);
1130
1118 if (err) 1131 if (err)
1119 return err; 1132 return err;
1120 1133
1121 /* create a symlink for the device */ 1134 /* create a symlink for the device */
1122 err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj, 1135 err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj,
1123 EDAC_DEVICE_SYMLINK); 1136 EDAC_DEVICE_SYMLINK);
1124 if (err) { 1137
1125 kobject_unregister(edac_mci_kobj); 1138 if (err)
1126 return err; 1139 goto fail0;
1127 }
1128 1140
1129 /* Make directories for each CSROW object 1141 /* Make directories for each CSROW object
1130 * under the mc<id> kobject 1142 * under the mc<id> kobject
1131 */ 1143 */
1132 for (i = 0; i < mci->nr_csrows; i++) { 1144 for (i = 0; i < mci->nr_csrows; i++) {
1133
1134 csrow = &mci->csrows[i]; 1145 csrow = &mci->csrows[i];
1135 1146
1136 /* Only expose populated CSROWs */ 1147 /* Only expose populated CSROWs */
1137 if (csrow->nr_pages > 0) { 1148 if (csrow->nr_pages > 0) {
1138 err = edac_create_csrow_object(edac_mci_kobj,csrow,i); 1149 err = edac_create_csrow_object(edac_mci_kobj,csrow,i);
1150
1139 if (err) 1151 if (err)
1140 goto fail; 1152 goto fail1;
1141 } 1153 }
1142 } 1154 }
1143 1155
1144 /* Mark this MCI instance as having sysfs entries */
1145 mci->sysfs_active = MCI_SYSFS_ACTIVE;
1146
1147 return 0; 1156 return 0;
1148 1157
1149
1150 /* CSROW error: backout what has already been registered, */ 1158 /* CSROW error: backout what has already been registered, */
1151fail: 1159fail1:
1152 for ( i--; i >= 0; i--) { 1160 for ( i--; i >= 0; i--) {
1153 if (csrow->nr_pages > 0) { 1161 if (csrow->nr_pages > 0) {
1162 init_completion(&csrow->kobj_complete);
1154 kobject_unregister(&mci->csrows[i].kobj); 1163 kobject_unregister(&mci->csrows[i].kobj);
1155 kobject_put(&mci->csrows[i].kobj); 1164 wait_for_completion(&csrow->kobj_complete);
1156 } 1165 }
1157 } 1166 }
1158 1167
1168fail0:
1169 init_completion(&mci->kobj_complete);
1159 kobject_unregister(edac_mci_kobj); 1170 kobject_unregister(edac_mci_kobj);
1160 kobject_put(edac_mci_kobj); 1171 wait_for_completion(&mci->kobj_complete);
1161
1162 return err; 1172 return err;
1163} 1173}
1164#endif /* DISABLE_EDAC_SYSFS */ 1174#endif /* DISABLE_EDAC_SYSFS */
@@ -1171,20 +1181,21 @@ static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
1171#ifndef DISABLE_EDAC_SYSFS 1181#ifndef DISABLE_EDAC_SYSFS
1172 int i; 1182 int i;
1173 1183
1174 debugf0("MC: " __FILE__ ": %s()\n", __func__); 1184 debugf0("%s()\n", __func__);
1175 1185
1176 /* remove all csrow kobjects */ 1186 /* remove all csrow kobjects */
1177 for (i = 0; i < mci->nr_csrows; i++) { 1187 for (i = 0; i < mci->nr_csrows; i++) {
1178 if (mci->csrows[i].nr_pages > 0) { 1188 if (mci->csrows[i].nr_pages > 0) {
1189 init_completion(&mci->csrows[i].kobj_complete);
1179 kobject_unregister(&mci->csrows[i].kobj); 1190 kobject_unregister(&mci->csrows[i].kobj);
1180 kobject_put(&mci->csrows[i].kobj); 1191 wait_for_completion(&mci->csrows[i].kobj_complete);
1181 } 1192 }
1182 } 1193 }
1183 1194
1184 sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); 1195 sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
1185 1196 init_completion(&mci->kobj_complete);
1186 kobject_unregister(&mci->edac_mci_kobj); 1197 kobject_unregister(&mci->edac_mci_kobj);
1187 kobject_put(&mci->edac_mci_kobj); 1198 wait_for_completion(&mci->kobj_complete);
1188#endif /* DISABLE_EDAC_SYSFS */ 1199#endif /* DISABLE_EDAC_SYSFS */
1189} 1200}
1190 1201
@@ -1192,8 +1203,6 @@ static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
1192 1203
1193#ifdef CONFIG_EDAC_DEBUG 1204#ifdef CONFIG_EDAC_DEBUG
1194 1205
1195EXPORT_SYMBOL(edac_mc_dump_channel);
1196
1197void edac_mc_dump_channel(struct channel_info *chan) 1206void edac_mc_dump_channel(struct channel_info *chan)
1198{ 1207{
1199 debugf4("\tchannel = %p\n", chan); 1208 debugf4("\tchannel = %p\n", chan);
@@ -1202,9 +1211,7 @@ void edac_mc_dump_channel(struct channel_info *chan)
1202 debugf4("\tchannel->label = '%s'\n", chan->label); 1211 debugf4("\tchannel->label = '%s'\n", chan->label);
1203 debugf4("\tchannel->csrow = %p\n\n", chan->csrow); 1212 debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
1204} 1213}
1205 1214EXPORT_SYMBOL_GPL(edac_mc_dump_channel);
1206
1207EXPORT_SYMBOL(edac_mc_dump_csrow);
1208 1215
1209void edac_mc_dump_csrow(struct csrow_info *csrow) 1216void edac_mc_dump_csrow(struct csrow_info *csrow)
1210{ 1217{
@@ -1220,9 +1227,7 @@ void edac_mc_dump_csrow(struct csrow_info *csrow)
1220 debugf4("\tcsrow->channels = %p\n", csrow->channels); 1227 debugf4("\tcsrow->channels = %p\n", csrow->channels);
1221 debugf4("\tcsrow->mci = %p\n\n", csrow->mci); 1228 debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
1222} 1229}
1223 1230EXPORT_SYMBOL_GPL(edac_mc_dump_csrow);
1224
1225EXPORT_SYMBOL(edac_mc_dump_mci);
1226 1231
1227void edac_mc_dump_mci(struct mem_ctl_info *mci) 1232void edac_mc_dump_mci(struct mem_ctl_info *mci)
1228{ 1233{
@@ -1238,9 +1243,9 @@ void edac_mc_dump_mci(struct mem_ctl_info *mci)
1238 mci->mod_name, mci->ctl_name); 1243 mci->mod_name, mci->ctl_name);
1239 debugf3("\tpvt_info = %p\n\n", mci->pvt_info); 1244 debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
1240} 1245}
1246EXPORT_SYMBOL_GPL(edac_mc_dump_mci);
1241 1247
1242 1248#endif /* CONFIG_EDAC_DEBUG */
1243#endif /* CONFIG_EDAC_DEBUG */
1244 1249
1245/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. 1250/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
1246 * Adjust 'ptr' so that its alignment is at least as stringent as what the 1251 * Adjust 'ptr' so that its alignment is at least as stringent as what the
@@ -1249,7 +1254,7 @@ void edac_mc_dump_mci(struct mem_ctl_info *mci)
1249 * If 'size' is a constant, the compiler will optimize this whole function 1254 * If 'size' is a constant, the compiler will optimize this whole function
1250 * down to either a no-op or the addition of a constant to the value of 'ptr'. 1255 * down to either a no-op or the addition of a constant to the value of 'ptr'.
1251 */ 1256 */
1252static inline char * align_ptr (void *ptr, unsigned size) 1257static inline char * align_ptr(void *ptr, unsigned size)
1253{ 1258{
1254 unsigned align, r; 1259 unsigned align, r;
1255 1260
@@ -1276,9 +1281,6 @@ static inline char * align_ptr (void *ptr, unsigned size)
1276 return (char *) (((unsigned long) ptr) + align - r); 1281 return (char *) (((unsigned long) ptr) + align - r);
1277} 1282}
1278 1283
1279
1280EXPORT_SYMBOL(edac_mc_alloc);
1281
1282/** 1284/**
1283 * edac_mc_alloc: Allocate a struct mem_ctl_info structure 1285 * edac_mc_alloc: Allocate a struct mem_ctl_info structure
1284 * @size_pvt: size of private storage needed 1286 * @size_pvt: size of private storage needed
@@ -1296,7 +1298,7 @@ EXPORT_SYMBOL(edac_mc_alloc);
1296 * struct mem_ctl_info pointer 1298 * struct mem_ctl_info pointer
1297 */ 1299 */
1298struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, 1300struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
1299 unsigned nr_chans) 1301 unsigned nr_chans)
1300{ 1302{
1301 struct mem_ctl_info *mci; 1303 struct mem_ctl_info *mci;
1302 struct csrow_info *csi, *csrow; 1304 struct csrow_info *csi, *csrow;
@@ -1327,8 +1329,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
1327 chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi)); 1329 chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi));
1328 pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL; 1330 pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL;
1329 1331
1330 memset(mci, 0, size); /* clear all fields */ 1332 memset(mci, 0, size); /* clear all fields */
1331
1332 mci->csrows = csi; 1333 mci->csrows = csi;
1333 mci->pvt_info = pvt; 1334 mci->pvt_info = pvt;
1334 mci->nr_csrows = nr_csrows; 1335 mci->nr_csrows = nr_csrows;
@@ -1350,50 +1351,24 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
1350 1351
1351 return mci; 1352 return mci;
1352} 1353}
1353 1354EXPORT_SYMBOL_GPL(edac_mc_alloc);
1354
1355EXPORT_SYMBOL(edac_mc_free);
1356 1355
1357/** 1356/**
1358 * edac_mc_free: Free a previously allocated 'mci' structure 1357 * edac_mc_free: Free a previously allocated 'mci' structure
1359 * @mci: pointer to a struct mem_ctl_info structure 1358 * @mci: pointer to a struct mem_ctl_info structure
1360 *
1361 * Free up a previously allocated mci structure
1362 * A MCI structure can be in 2 states after being allocated
1363 * by edac_mc_alloc().
1364 * 1) Allocated in a MC driver's probe, but not yet committed
1365 * 2) Allocated and committed, by a call to edac_mc_add_mc()
1366 * edac_mc_add_mc() is the function that adds the sysfs entries
1367 * thus, this free function must determine which state the 'mci'
1368 * structure is in, then either free it directly or
1369 * perform kobject cleanup by calling edac_remove_sysfs_mci_device().
1370 *
1371 * VOID Return
1372 */ 1359 */
1373void edac_mc_free(struct mem_ctl_info *mci) 1360void edac_mc_free(struct mem_ctl_info *mci)
1374{ 1361{
1375 /* only if sysfs entries for this mci instance exist 1362 kfree(mci);
1376 * do we remove them and defer the actual kfree via
1377 * the kobject 'release()' callback.
1378 *
1379 * Otherwise, do a straight kfree now.
1380 */
1381 if (mci->sysfs_active == MCI_SYSFS_ACTIVE)
1382 edac_remove_sysfs_mci_device(mci);
1383 else
1384 kfree(mci);
1385} 1363}
1364EXPORT_SYMBOL_GPL(edac_mc_free);
1386 1365
1387 1366static struct mem_ctl_info *find_mci_by_pdev(struct pci_dev *pdev)
1388
1389EXPORT_SYMBOL(edac_mc_find_mci_by_pdev);
1390
1391struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev)
1392{ 1367{
1393 struct mem_ctl_info *mci; 1368 struct mem_ctl_info *mci;
1394 struct list_head *item; 1369 struct list_head *item;
1395 1370
1396 debugf3("MC: " __FILE__ ": %s()\n", __func__); 1371 debugf3("%s()\n", __func__);
1397 1372
1398 list_for_each(item, &mc_devices) { 1373 list_for_each(item, &mc_devices) {
1399 mci = list_entry(item, struct mem_ctl_info, link); 1374 mci = list_entry(item, struct mem_ctl_info, link);
@@ -1405,7 +1380,7 @@ struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev)
1405 return NULL; 1380 return NULL;
1406} 1381}
1407 1382
1408static int add_mc_to_global_list (struct mem_ctl_info *mci) 1383static int add_mc_to_global_list(struct mem_ctl_info *mci)
1409{ 1384{
1410 struct list_head *item, *insert_before; 1385 struct list_head *item, *insert_before;
1411 struct mem_ctl_info *p; 1386 struct mem_ctl_info *p;
@@ -1415,11 +1390,12 @@ static int add_mc_to_global_list (struct mem_ctl_info *mci)
1415 mci->mc_idx = 0; 1390 mci->mc_idx = 0;
1416 insert_before = &mc_devices; 1391 insert_before = &mc_devices;
1417 } else { 1392 } else {
1418 if (edac_mc_find_mci_by_pdev(mci->pdev)) { 1393 if (find_mci_by_pdev(mci->pdev)) {
1419 printk(KERN_WARNING 1394 edac_printk(KERN_WARNING, EDAC_MC,
1420 "EDAC MC: %s (%s) %s %s already assigned %d\n", 1395 "%s (%s) %s %s already assigned %d\n",
1421 mci->pdev->dev.bus_id, pci_name(mci->pdev), 1396 mci->pdev->dev.bus_id,
1422 mci->mod_name, mci->ctl_name, mci->mc_idx); 1397 pci_name(mci->pdev), mci->mod_name,
1398 mci->ctl_name, mci->mc_idx);
1423 return 1; 1399 return 1;
1424 } 1400 }
1425 1401
@@ -1447,12 +1423,26 @@ static int add_mc_to_global_list (struct mem_ctl_info *mci)
1447 return 0; 1423 return 0;
1448} 1424}
1449 1425
1426static void complete_mc_list_del(struct rcu_head *head)
1427{
1428 struct mem_ctl_info *mci;
1450 1429
1430 mci = container_of(head, struct mem_ctl_info, rcu);
1431 INIT_LIST_HEAD(&mci->link);
1432 complete(&mci->complete);
1433}
1451 1434
1452EXPORT_SYMBOL(edac_mc_add_mc); 1435static void del_mc_from_global_list(struct mem_ctl_info *mci)
1436{
1437 list_del_rcu(&mci->link);
1438 init_completion(&mci->complete);
1439 call_rcu(&mci->rcu, complete_mc_list_del);
1440 wait_for_completion(&mci->complete);
1441}
1453 1442
1454/** 1443/**
1455 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list 1444 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
1445 * create sysfs entries associated with mci structure
1456 * @mci: pointer to the mci structure to be added to the list 1446 * @mci: pointer to the mci structure to be added to the list
1457 * 1447 *
1458 * Return: 1448 * Return:
@@ -1463,111 +1453,90 @@ EXPORT_SYMBOL(edac_mc_add_mc);
1463/* FIXME - should a warning be printed if no error detection? correction? */ 1453/* FIXME - should a warning be printed if no error detection? correction? */
1464int edac_mc_add_mc(struct mem_ctl_info *mci) 1454int edac_mc_add_mc(struct mem_ctl_info *mci)
1465{ 1455{
1466 int rc = 1; 1456 debugf0("%s()\n", __func__);
1467
1468 debugf0("MC: " __FILE__ ": %s()\n", __func__);
1469#ifdef CONFIG_EDAC_DEBUG 1457#ifdef CONFIG_EDAC_DEBUG
1470 if (edac_debug_level >= 3) 1458 if (edac_debug_level >= 3)
1471 edac_mc_dump_mci(mci); 1459 edac_mc_dump_mci(mci);
1460
1472 if (edac_debug_level >= 4) { 1461 if (edac_debug_level >= 4) {
1473 int i; 1462 int i;
1474 1463
1475 for (i = 0; i < mci->nr_csrows; i++) { 1464 for (i = 0; i < mci->nr_csrows; i++) {
1476 int j; 1465 int j;
1466
1477 edac_mc_dump_csrow(&mci->csrows[i]); 1467 edac_mc_dump_csrow(&mci->csrows[i]);
1478 for (j = 0; j < mci->csrows[i].nr_channels; j++) 1468 for (j = 0; j < mci->csrows[i].nr_channels; j++)
1479 edac_mc_dump_channel(&mci->csrows[i]. 1469 edac_mc_dump_channel(
1480 channels[j]); 1470 &mci->csrows[i].channels[j]);
1481 } 1471 }
1482 } 1472 }
1483#endif 1473#endif
1484 down(&mem_ctls_mutex); 1474 down(&mem_ctls_mutex);
1485 1475
1486 if (add_mc_to_global_list(mci)) 1476 if (add_mc_to_global_list(mci))
1487 goto finish; 1477 goto fail0;
1488 1478
1489 /* set load time so that error rate can be tracked */ 1479 /* set load time so that error rate can be tracked */
1490 mci->start_time = jiffies; 1480 mci->start_time = jiffies;
1491 1481
1492 if (edac_create_sysfs_mci_device(mci)) { 1482 if (edac_create_sysfs_mci_device(mci)) {
1493 printk(KERN_WARNING 1483 edac_mc_printk(mci, KERN_WARNING,
1494 "EDAC MC%d: failed to create sysfs device\n", 1484 "failed to create sysfs device\n");
1495 mci->mc_idx); 1485 goto fail1;
1496 /* FIXME - should there be an error code and unwind? */
1497 goto finish;
1498 } 1486 }
1499 1487
1500 /* Report action taken */ 1488 /* Report action taken */
1501 printk(KERN_INFO 1489 edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: PCI %s\n",
1502 "EDAC MC%d: Giving out device to %s %s: PCI %s\n", 1490 mci->mod_name, mci->ctl_name, pci_name(mci->pdev));
1503 mci->mc_idx, mci->mod_name, mci->ctl_name,
1504 pci_name(mci->pdev));
1505 1491
1506
1507 rc = 0;
1508
1509finish:
1510 up(&mem_ctls_mutex); 1492 up(&mem_ctls_mutex);
1511 return rc; 1493 return 0;
1512}
1513
1514
1515
1516static void complete_mc_list_del (struct rcu_head *head)
1517{
1518 struct mem_ctl_info *mci;
1519 1494
1520 mci = container_of(head, struct mem_ctl_info, rcu); 1495fail1:
1521 INIT_LIST_HEAD(&mci->link); 1496 del_mc_from_global_list(mci);
1522 complete(&mci->complete);
1523}
1524 1497
1525static void del_mc_from_global_list (struct mem_ctl_info *mci) 1498fail0:
1526{ 1499 up(&mem_ctls_mutex);
1527 list_del_rcu(&mci->link); 1500 return 1;
1528 init_completion(&mci->complete);
1529 call_rcu(&mci->rcu, complete_mc_list_del);
1530 wait_for_completion(&mci->complete);
1531} 1501}
1532 1502EXPORT_SYMBOL_GPL(edac_mc_add_mc);
1533EXPORT_SYMBOL(edac_mc_del_mc);
1534 1503
1535/** 1504/**
1536 * edac_mc_del_mc: Remove the specified mci structure from global list 1505 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
1537 * @mci: Pointer to struct mem_ctl_info structure 1506 * remove mci structure from global list
1507 * @pdev: Pointer to 'struct pci_dev' representing mci structure to remove.
1538 * 1508 *
1539 * Returns: 1509 * Return pointer to removed mci structure, or NULL if device not found.
1540 * 0 Success
1541 * 1 Failure
1542 */ 1510 */
1543int edac_mc_del_mc(struct mem_ctl_info *mci) 1511struct mem_ctl_info * edac_mc_del_mc(struct pci_dev *pdev)
1544{ 1512{
1545 int rc = 1; 1513 struct mem_ctl_info *mci;
1546 1514
1547 debugf0("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 1515 debugf0("MC: %s()\n", __func__);
1548 down(&mem_ctls_mutex); 1516 down(&mem_ctls_mutex);
1517
1518 if ((mci = find_mci_by_pdev(pdev)) == NULL) {
1519 up(&mem_ctls_mutex);
1520 return NULL;
1521 }
1522
1523 edac_remove_sysfs_mci_device(mci);
1549 del_mc_from_global_list(mci); 1524 del_mc_from_global_list(mci);
1550 printk(KERN_INFO
1551 "EDAC MC%d: Removed device %d for %s %s: PCI %s\n",
1552 mci->mc_idx, mci->mc_idx, mci->mod_name, mci->ctl_name,
1553 pci_name(mci->pdev));
1554 rc = 0;
1555 up(&mem_ctls_mutex); 1525 up(&mem_ctls_mutex);
1556 1526 edac_printk(KERN_INFO, EDAC_MC,
1557 return rc; 1527 "Removed device %d for %s %s: PCI %s\n", mci->mc_idx,
1528 mci->mod_name, mci->ctl_name, pci_name(mci->pdev));
1529 return mci;
1558} 1530}
1531EXPORT_SYMBOL_GPL(edac_mc_del_mc);
1559 1532
1560 1533void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size)
1561EXPORT_SYMBOL(edac_mc_scrub_block);
1562
1563void edac_mc_scrub_block(unsigned long page, unsigned long offset,
1564 u32 size)
1565{ 1534{
1566 struct page *pg; 1535 struct page *pg;
1567 void *virt_addr; 1536 void *virt_addr;
1568 unsigned long flags = 0; 1537 unsigned long flags = 0;
1569 1538
1570 debugf3("MC: " __FILE__ ": %s()\n", __func__); 1539 debugf3("%s()\n", __func__);
1571 1540
1572 /* ECC error page was not in our memory. Ignore it. */ 1541 /* ECC error page was not in our memory. Ignore it. */
1573 if(!pfn_valid(page)) 1542 if(!pfn_valid(page))
@@ -1590,19 +1559,15 @@ void edac_mc_scrub_block(unsigned long page, unsigned long offset,
1590 if (PageHighMem(pg)) 1559 if (PageHighMem(pg))
1591 local_irq_restore(flags); 1560 local_irq_restore(flags);
1592} 1561}
1593 1562EXPORT_SYMBOL_GPL(edac_mc_scrub_block);
1594 1563
1595/* FIXME - should return -1 */ 1564/* FIXME - should return -1 */
1596EXPORT_SYMBOL(edac_mc_find_csrow_by_page); 1565int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
1597
1598int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
1599 unsigned long page)
1600{ 1566{
1601 struct csrow_info *csrows = mci->csrows; 1567 struct csrow_info *csrows = mci->csrows;
1602 int row, i; 1568 int row, i;
1603 1569
1604 debugf1("MC%d: " __FILE__ ": %s(): 0x%lx\n", mci->mc_idx, __func__, 1570 debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
1605 page);
1606 row = -1; 1571 row = -1;
1607 1572
1608 for (i = 0; i < mci->nr_csrows; i++) { 1573 for (i = 0; i < mci->nr_csrows; i++) {
@@ -1611,11 +1576,10 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
1611 if (csrow->nr_pages == 0) 1576 if (csrow->nr_pages == 0)
1612 continue; 1577 continue;
1613 1578
1614 debugf3("MC%d: " __FILE__ 1579 debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
1615 ": %s(): first(0x%lx) page(0x%lx)" 1580 "mask(0x%lx)\n", mci->mc_idx, __func__,
1616 " last(0x%lx) mask(0x%lx)\n", mci->mc_idx, 1581 csrow->first_page, page, csrow->last_page,
1617 __func__, csrow->first_page, page, 1582 csrow->page_mask);
1618 csrow->last_page, csrow->page_mask);
1619 1583
1620 if ((page >= csrow->first_page) && 1584 if ((page >= csrow->first_page) &&
1621 (page <= csrow->last_page) && 1585 (page <= csrow->last_page) &&
@@ -1627,56 +1591,52 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
1627 } 1591 }
1628 1592
1629 if (row == -1) 1593 if (row == -1)
1630 printk(KERN_ERR 1594 edac_mc_printk(mci, KERN_ERR,
1631 "EDAC MC%d: could not look up page error address %lx\n", 1595 "could not look up page error address %lx\n",
1632 mci->mc_idx, (unsigned long) page); 1596 (unsigned long) page);
1633 1597
1634 return row; 1598 return row;
1635} 1599}
1636 1600EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
1637
1638EXPORT_SYMBOL(edac_mc_handle_ce);
1639 1601
1640/* FIXME - setable log (warning/emerg) levels */ 1602/* FIXME - setable log (warning/emerg) levels */
1641/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */ 1603/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
1642void edac_mc_handle_ce(struct mem_ctl_info *mci, 1604void edac_mc_handle_ce(struct mem_ctl_info *mci,
1643 unsigned long page_frame_number, 1605 unsigned long page_frame_number, unsigned long offset_in_page,
1644 unsigned long offset_in_page, 1606 unsigned long syndrome, int row, int channel, const char *msg)
1645 unsigned long syndrome, int row, int channel,
1646 const char *msg)
1647{ 1607{
1648 unsigned long remapped_page; 1608 unsigned long remapped_page;
1649 1609
1650 debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 1610 debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
1651 1611
1652 /* FIXME - maybe make panic on INTERNAL ERROR an option */ 1612 /* FIXME - maybe make panic on INTERNAL ERROR an option */
1653 if (row >= mci->nr_csrows || row < 0) { 1613 if (row >= mci->nr_csrows || row < 0) {
1654 /* something is wrong */ 1614 /* something is wrong */
1655 printk(KERN_ERR 1615 edac_mc_printk(mci, KERN_ERR,
1656 "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n", 1616 "INTERNAL ERROR: row out of range "
1657 mci->mc_idx, row, mci->nr_csrows); 1617 "(%d >= %d)\n", row, mci->nr_csrows);
1658 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); 1618 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1659 return; 1619 return;
1660 } 1620 }
1621
1661 if (channel >= mci->csrows[row].nr_channels || channel < 0) { 1622 if (channel >= mci->csrows[row].nr_channels || channel < 0) {
1662 /* something is wrong */ 1623 /* something is wrong */
1663 printk(KERN_ERR 1624 edac_mc_printk(mci, KERN_ERR,
1664 "EDAC MC%d: INTERNAL ERROR: channel out of range " 1625 "INTERNAL ERROR: channel out of range "
1665 "(%d >= %d)\n", 1626 "(%d >= %d)\n", channel,
1666 mci->mc_idx, channel, mci->csrows[row].nr_channels); 1627 mci->csrows[row].nr_channels);
1667 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); 1628 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1668 return; 1629 return;
1669 } 1630 }
1670 1631
1671 if (log_ce) 1632 if (log_ce)
1672 /* FIXME - put in DIMM location */ 1633 /* FIXME - put in DIMM location */
1673 printk(KERN_WARNING 1634 edac_mc_printk(mci, KERN_WARNING,
1674 "EDAC MC%d: CE page 0x%lx, offset 0x%lx," 1635 "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
1675 " grain %d, syndrome 0x%lx, row %d, channel %d," 1636 "0x%lx, row %d, channel %d, label \"%s\": %s\n",
1676 " label \"%s\": %s\n", mci->mc_idx, 1637 page_frame_number, offset_in_page,
1677 page_frame_number, offset_in_page, 1638 mci->csrows[row].grain, syndrome, row, channel,
1678 mci->csrows[row].grain, syndrome, row, channel, 1639 mci->csrows[row].channels[channel].label, msg);
1679 mci->csrows[row].channels[channel].label, msg);
1680 1640
1681 mci->ce_count++; 1641 mci->ce_count++;
1682 mci->csrows[row].ce_count++; 1642 mci->csrows[row].ce_count++;
@@ -1697,31 +1657,25 @@ void edac_mc_handle_ce(struct mem_ctl_info *mci,
1697 page_frame_number; 1657 page_frame_number;
1698 1658
1699 edac_mc_scrub_block(remapped_page, offset_in_page, 1659 edac_mc_scrub_block(remapped_page, offset_in_page,
1700 mci->csrows[row].grain); 1660 mci->csrows[row].grain);
1701 } 1661 }
1702} 1662}
1663EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
1703 1664
1704 1665void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
1705EXPORT_SYMBOL(edac_mc_handle_ce_no_info);
1706
1707void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
1708 const char *msg)
1709{ 1666{
1710 if (log_ce) 1667 if (log_ce)
1711 printk(KERN_WARNING 1668 edac_mc_printk(mci, KERN_WARNING,
1712 "EDAC MC%d: CE - no information available: %s\n", 1669 "CE - no information available: %s\n", msg);
1713 mci->mc_idx, msg); 1670
1714 mci->ce_noinfo_count++; 1671 mci->ce_noinfo_count++;
1715 mci->ce_count++; 1672 mci->ce_count++;
1716} 1673}
1717 1674EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
1718
1719EXPORT_SYMBOL(edac_mc_handle_ue);
1720 1675
1721void edac_mc_handle_ue(struct mem_ctl_info *mci, 1676void edac_mc_handle_ue(struct mem_ctl_info *mci,
1722 unsigned long page_frame_number, 1677 unsigned long page_frame_number, unsigned long offset_in_page,
1723 unsigned long offset_in_page, int row, 1678 int row, const char *msg)
1724 const char *msg)
1725{ 1679{
1726 int len = EDAC_MC_LABEL_LEN * 4; 1680 int len = EDAC_MC_LABEL_LEN * 4;
1727 char labels[len + 1]; 1681 char labels[len + 1];
@@ -1729,65 +1683,61 @@ void edac_mc_handle_ue(struct mem_ctl_info *mci,
1729 int chan; 1683 int chan;
1730 int chars; 1684 int chars;
1731 1685
1732 debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 1686 debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
1733 1687
1734 /* FIXME - maybe make panic on INTERNAL ERROR an option */ 1688 /* FIXME - maybe make panic on INTERNAL ERROR an option */
1735 if (row >= mci->nr_csrows || row < 0) { 1689 if (row >= mci->nr_csrows || row < 0) {
1736 /* something is wrong */ 1690 /* something is wrong */
1737 printk(KERN_ERR 1691 edac_mc_printk(mci, KERN_ERR,
1738 "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n", 1692 "INTERNAL ERROR: row out of range "
1739 mci->mc_idx, row, mci->nr_csrows); 1693 "(%d >= %d)\n", row, mci->nr_csrows);
1740 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR"); 1694 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
1741 return; 1695 return;
1742 } 1696 }
1743 1697
1744 chars = snprintf(pos, len + 1, "%s", 1698 chars = snprintf(pos, len + 1, "%s",
1745 mci->csrows[row].channels[0].label); 1699 mci->csrows[row].channels[0].label);
1746 len -= chars; 1700 len -= chars;
1747 pos += chars; 1701 pos += chars;
1702
1748 for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0); 1703 for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
1749 chan++) { 1704 chan++) {
1750 chars = snprintf(pos, len + 1, ":%s", 1705 chars = snprintf(pos, len + 1, ":%s",
1751 mci->csrows[row].channels[chan].label); 1706 mci->csrows[row].channels[chan].label);
1752 len -= chars; 1707 len -= chars;
1753 pos += chars; 1708 pos += chars;
1754 } 1709 }
1755 1710
1756 if (log_ue) 1711 if (log_ue)
1757 printk(KERN_EMERG 1712 edac_mc_printk(mci, KERN_EMERG,
1758 "EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d," 1713 "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
1759 " labels \"%s\": %s\n", mci->mc_idx, 1714 "labels \"%s\": %s\n", page_frame_number,
1760 page_frame_number, offset_in_page, 1715 offset_in_page, mci->csrows[row].grain, row, labels,
1761 mci->csrows[row].grain, row, labels, msg); 1716 msg);
1762 1717
1763 if (panic_on_ue) 1718 if (panic_on_ue)
1764 panic 1719 panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
1765 ("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d," 1720 "row %d, labels \"%s\": %s\n", mci->mc_idx,
1766 " labels \"%s\": %s\n", mci->mc_idx, 1721 page_frame_number, offset_in_page,
1767 page_frame_number, offset_in_page, 1722 mci->csrows[row].grain, row, labels, msg);
1768 mci->csrows[row].grain, row, labels, msg);
1769 1723
1770 mci->ue_count++; 1724 mci->ue_count++;
1771 mci->csrows[row].ue_count++; 1725 mci->csrows[row].ue_count++;
1772} 1726}
1727EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
1773 1728
1774 1729void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
1775EXPORT_SYMBOL(edac_mc_handle_ue_no_info);
1776
1777void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
1778 const char *msg)
1779{ 1730{
1780 if (panic_on_ue) 1731 if (panic_on_ue)
1781 panic("EDAC MC%d: Uncorrected Error", mci->mc_idx); 1732 panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
1782 1733
1783 if (log_ue) 1734 if (log_ue)
1784 printk(KERN_WARNING 1735 edac_mc_printk(mci, KERN_WARNING,
1785 "EDAC MC%d: UE - no information available: %s\n", 1736 "UE - no information available: %s\n", msg);
1786 mci->mc_idx, msg);
1787 mci->ue_noinfo_count++; 1737 mci->ue_noinfo_count++;
1788 mci->ue_count++; 1738 mci->ue_count++;
1789} 1739}
1790 1740EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
1791 1741
1792#ifdef CONFIG_PCI 1742#ifdef CONFIG_PCI
1793 1743
@@ -1799,18 +1749,22 @@ static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
1799 where = secondary ? PCI_SEC_STATUS : PCI_STATUS; 1749 where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
1800 pci_read_config_word(dev, where, &status); 1750 pci_read_config_word(dev, where, &status);
1801 1751
1802 /* If we get back 0xFFFF then we must suspect that the card has been pulled but 1752 /* If we get back 0xFFFF then we must suspect that the card has been
1803 the Linux PCI layer has not yet finished cleaning up. We don't want to report 1753 * pulled but the Linux PCI layer has not yet finished cleaning up.
1804 on such devices */ 1754 * We don't want to report on such devices
1755 */
1805 1756
1806 if (status == 0xFFFF) { 1757 if (status == 0xFFFF) {
1807 u32 sanity; 1758 u32 sanity;
1759
1808 pci_read_config_dword(dev, 0, &sanity); 1760 pci_read_config_dword(dev, 0, &sanity);
1761
1809 if (sanity == 0xFFFFFFFF) 1762 if (sanity == 0xFFFFFFFF)
1810 return 0; 1763 return 0;
1811 } 1764 }
1765
1812 status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR | 1766 status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
1813 PCI_STATUS_PARITY; 1767 PCI_STATUS_PARITY;
1814 1768
1815 if (status) 1769 if (status)
1816 /* reset only the bits we are interested in */ 1770 /* reset only the bits we are interested in */
@@ -1822,7 +1776,7 @@ static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
1822typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev); 1776typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
1823 1777
1824/* Clear any PCI parity errors logged by this device. */ 1778/* Clear any PCI parity errors logged by this device. */
1825static void edac_pci_dev_parity_clear( struct pci_dev *dev ) 1779static void edac_pci_dev_parity_clear(struct pci_dev *dev)
1826{ 1780{
1827 u8 header_type; 1781 u8 header_type;
1828 1782
@@ -1853,25 +1807,22 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
1853 /* check the status reg for errors */ 1807 /* check the status reg for errors */
1854 if (status) { 1808 if (status) {
1855 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) 1809 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
1856 printk(KERN_CRIT 1810 edac_printk(KERN_CRIT, EDAC_PCI,
1857 "EDAC PCI- "
1858 "Signaled System Error on %s\n", 1811 "Signaled System Error on %s\n",
1859 pci_name (dev)); 1812 pci_name(dev));
1860 1813
1861 if (status & (PCI_STATUS_PARITY)) { 1814 if (status & (PCI_STATUS_PARITY)) {
1862 printk(KERN_CRIT 1815 edac_printk(KERN_CRIT, EDAC_PCI,
1863 "EDAC PCI- "
1864 "Master Data Parity Error on %s\n", 1816 "Master Data Parity Error on %s\n",
1865 pci_name (dev)); 1817 pci_name(dev));
1866 1818
1867 atomic_inc(&pci_parity_count); 1819 atomic_inc(&pci_parity_count);
1868 } 1820 }
1869 1821
1870 if (status & (PCI_STATUS_DETECTED_PARITY)) { 1822 if (status & (PCI_STATUS_DETECTED_PARITY)) {
1871 printk(KERN_CRIT 1823 edac_printk(KERN_CRIT, EDAC_PCI,
1872 "EDAC PCI- "
1873 "Detected Parity Error on %s\n", 1824 "Detected Parity Error on %s\n",
1874 pci_name (dev)); 1825 pci_name(dev));
1875 1826
1876 atomic_inc(&pci_parity_count); 1827 atomic_inc(&pci_parity_count);
1877 } 1828 }
@@ -1892,25 +1843,22 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
1892 /* check the secondary status reg for errors */ 1843 /* check the secondary status reg for errors */
1893 if (status) { 1844 if (status) {
1894 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) 1845 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
1895 printk(KERN_CRIT 1846 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
1896 "EDAC PCI-Bridge- "
1897 "Signaled System Error on %s\n", 1847 "Signaled System Error on %s\n",
1898 pci_name (dev)); 1848 pci_name(dev));
1899 1849
1900 if (status & (PCI_STATUS_PARITY)) { 1850 if (status & (PCI_STATUS_PARITY)) {
1901 printk(KERN_CRIT 1851 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
1902 "EDAC PCI-Bridge- " 1852 "Master Data Parity Error on "
1903 "Master Data Parity Error on %s\n", 1853 "%s\n", pci_name(dev));
1904 pci_name (dev));
1905 1854
1906 atomic_inc(&pci_parity_count); 1855 atomic_inc(&pci_parity_count);
1907 } 1856 }
1908 1857
1909 if (status & (PCI_STATUS_DETECTED_PARITY)) { 1858 if (status & (PCI_STATUS_DETECTED_PARITY)) {
1910 printk(KERN_CRIT 1859 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
1911 "EDAC PCI-Bridge- "
1912 "Detected Parity Error on %s\n", 1860 "Detected Parity Error on %s\n",
1913 pci_name (dev)); 1861 pci_name(dev));
1914 1862
1915 atomic_inc(&pci_parity_count); 1863 atomic_inc(&pci_parity_count);
1916 } 1864 }
@@ -1929,58 +1877,55 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
1929 * Returns: 0 not found 1877 * Returns: 0 not found
1930 * 1 found on list 1878 * 1 found on list
1931 */ 1879 */
1932static int check_dev_on_list(struct edac_pci_device_list *list, int free_index, 1880static int check_dev_on_list(struct edac_pci_device_list *list,
1933 struct pci_dev *dev) 1881 int free_index, struct pci_dev *dev)
1934{ 1882{
1935 int i; 1883 int i;
1936 int rc = 0; /* Assume not found */ 1884 int rc = 0; /* Assume not found */
1937 unsigned short vendor=dev->vendor; 1885 unsigned short vendor=dev->vendor;
1938 unsigned short device=dev->device; 1886 unsigned short device=dev->device;
1939 1887
1940 /* Scan the list, looking for a vendor/device match 1888 /* Scan the list, looking for a vendor/device match */
1941 */ 1889 for (i = 0; i < free_index; i++, list++ ) {
1942 for (i = 0; i < free_index; i++, list++ ) { 1890 if ((list->vendor == vendor ) && (list->device == device )) {
1943 if ( (list->vendor == vendor ) && 1891 rc = 1;
1944 (list->device == device )) { 1892 break;
1945 rc = 1; 1893 }
1946 break; 1894 }
1947 }
1948 }
1949 1895
1950 return rc; 1896 return rc;
1951} 1897}
1952 1898
1953/* 1899/*
1954 * pci_dev parity list iterator 1900 * pci_dev parity list iterator
1955 * Scan the PCI device list for one iteration, looking for SERRORs 1901 * Scan the PCI device list for one iteration, looking for SERRORs
1956 * Master Parity ERRORS or Parity ERRORs on primary or secondary devices 1902 * Master Parity ERRORS or Parity ERRORs on primary or secondary devices
1957 */ 1903 */
1958static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn) 1904static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
1959{ 1905{
1960 struct pci_dev *dev=NULL; 1906 struct pci_dev *dev = NULL;
1961 1907
1962 /* request for kernel access to the next PCI device, if any, 1908 /* request for kernel access to the next PCI device, if any,
1963 * and while we are looking at it have its reference count 1909 * and while we are looking at it have its reference count
1964 * bumped until we are done with it 1910 * bumped until we are done with it
1965 */ 1911 */
1966 while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 1912 while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1967 1913 /* if whitelist exists then it has priority, so only scan
1968 /* if whitelist exists then it has priority, so only scan those 1914 * those devices on the whitelist
1969 * devices on the whitelist 1915 */
1970 */ 1916 if (pci_whitelist_count > 0 ) {
1971 if (pci_whitelist_count > 0 ) { 1917 if (check_dev_on_list(pci_whitelist,
1972 if (check_dev_on_list(pci_whitelist,
1973 pci_whitelist_count, dev)) 1918 pci_whitelist_count, dev))
1974 fn(dev); 1919 fn(dev);
1975 } else { 1920 } else {
1976 /* 1921 /*
1977 * if no whitelist, then check if this devices is 1922 * if no whitelist, then check if this devices is
1978 * blacklisted 1923 * blacklisted
1979 */ 1924 */
1980 if (!check_dev_on_list(pci_blacklist, 1925 if (!check_dev_on_list(pci_blacklist,
1981 pci_blacklist_count, dev)) 1926 pci_blacklist_count, dev))
1982 fn(dev); 1927 fn(dev);
1983 } 1928 }
1984 } 1929 }
1985} 1930}
1986 1931
@@ -1989,7 +1934,7 @@ static void do_pci_parity_check(void)
1989 unsigned long flags; 1934 unsigned long flags;
1990 int before_count; 1935 int before_count;
1991 1936
1992 debugf3("MC: " __FILE__ ": %s()\n", __func__); 1937 debugf3("%s()\n", __func__);
1993 1938
1994 if (!check_pci_parity) 1939 if (!check_pci_parity)
1995 return; 1940 return;
@@ -2011,7 +1956,6 @@ static void do_pci_parity_check(void)
2011 } 1956 }
2012} 1957}
2013 1958
2014
2015static inline void clear_pci_parity_errors(void) 1959static inline void clear_pci_parity_errors(void)
2016{ 1960{
2017 /* Clear any PCI bus parity errors that devices initially have logged 1961 /* Clear any PCI bus parity errors that devices initially have logged
@@ -2020,37 +1964,30 @@ static inline void clear_pci_parity_errors(void)
2020 edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear); 1964 edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
2021} 1965}
2022 1966
2023
2024#else /* CONFIG_PCI */ 1967#else /* CONFIG_PCI */
2025 1968
2026
2027static inline void do_pci_parity_check(void) 1969static inline void do_pci_parity_check(void)
2028{ 1970{
2029 /* no-op */ 1971 /* no-op */
2030} 1972}
2031 1973
2032
2033static inline void clear_pci_parity_errors(void) 1974static inline void clear_pci_parity_errors(void)
2034{ 1975{
2035 /* no-op */ 1976 /* no-op */
2036} 1977}
2037 1978
2038
2039#endif /* CONFIG_PCI */ 1979#endif /* CONFIG_PCI */
2040 1980
2041/* 1981/*
2042 * Iterate over all MC instances and check for ECC, et al, errors 1982 * Iterate over all MC instances and check for ECC, et al, errors
2043 */ 1983 */
2044static inline void check_mc_devices (void) 1984static inline void check_mc_devices(void)
2045{ 1985{
2046 unsigned long flags;
2047 struct list_head *item; 1986 struct list_head *item;
2048 struct mem_ctl_info *mci; 1987 struct mem_ctl_info *mci;
2049 1988
2050 debugf3("MC: " __FILE__ ": %s()\n", __func__); 1989 debugf3("%s()\n", __func__);
2051 1990 down(&mem_ctls_mutex);
2052 /* during poll, have interrupts off */
2053 local_irq_save(flags);
2054 1991
2055 list_for_each(item, &mc_devices) { 1992 list_for_each(item, &mc_devices) {
2056 mci = list_entry(item, struct mem_ctl_info, link); 1993 mci = list_entry(item, struct mem_ctl_info, link);
@@ -2059,10 +1996,9 @@ static inline void check_mc_devices (void)
2059 mci->edac_check(mci); 1996 mci->edac_check(mci);
2060 } 1997 }
2061 1998
2062 local_irq_restore(flags); 1999 up(&mem_ctls_mutex);
2063} 2000}
2064 2001
2065
2066/* 2002/*
2067 * Check MC status every poll_msec. 2003 * Check MC status every poll_msec.
2068 * Check PCI status every poll_msec as well. 2004 * Check PCI status every poll_msec as well.
@@ -2073,70 +2009,21 @@ static inline void check_mc_devices (void)
2073 */ 2009 */
2074static void do_edac_check(void) 2010static void do_edac_check(void)
2075{ 2011{
2076 2012 debugf3("%s()\n", __func__);
2077 debugf3("MC: " __FILE__ ": %s()\n", __func__);
2078
2079 check_mc_devices(); 2013 check_mc_devices();
2080
2081 do_pci_parity_check(); 2014 do_pci_parity_check();
2082} 2015}
2083 2016
2084
2085/*
2086 * EDAC thread state information
2087 */
2088struct bs_thread_info
2089{
2090 struct task_struct *task;
2091 struct completion *event;
2092 char *name;
2093 void (*run)(void);
2094};
2095
2096static struct bs_thread_info bs_thread;
2097
2098/*
2099 * edac_kernel_thread
2100 * This the kernel thread that processes edac operations
2101 * in a normal thread environment
2102 */
2103static int edac_kernel_thread(void *arg) 2017static int edac_kernel_thread(void *arg)
2104{ 2018{
2105 struct bs_thread_info *thread = (struct bs_thread_info *) arg; 2019 while (!kthread_should_stop()) {
2106 2020 do_edac_check();
2107 /* detach thread */
2108 daemonize(thread->name);
2109
2110 current->exit_signal = SIGCHLD;
2111 allow_signal(SIGKILL);
2112 thread->task = current;
2113
2114 /* indicate to starting task we have started */
2115 complete(thread->event);
2116
2117 /* loop forever, until we are told to stop */
2118 while(thread->run != NULL) {
2119 void (*run)(void);
2120
2121 /* call the function to check the memory controllers */
2122 run = thread->run;
2123 if (run)
2124 run();
2125
2126 if (signal_pending(current))
2127 flush_signals(current);
2128
2129 /* ensure we are interruptable */
2130 set_current_state(TASK_INTERRUPTIBLE);
2131 2021
2132 /* goto sleep for the interval */ 2022 /* goto sleep for the interval */
2133 schedule_timeout((HZ * poll_msec) / 1000); 2023 schedule_timeout_interruptible((HZ * poll_msec) / 1000);
2134 try_to_freeze(); 2024 try_to_freeze();
2135 } 2025 }
2136 2026
2137 /* notify waiter that we are exiting */
2138 complete(thread->event);
2139
2140 return 0; 2027 return 0;
2141} 2028}
2142 2029
@@ -2146,10 +2033,7 @@ static int edac_kernel_thread(void *arg)
2146 */ 2033 */
2147static int __init edac_mc_init(void) 2034static int __init edac_mc_init(void)
2148{ 2035{
2149 int ret; 2036 edac_printk(KERN_INFO, EDAC_MC, EDAC_MC_VERSION "\n");
2150 struct completion event;
2151
2152 printk(KERN_INFO "MC: " __FILE__ " version " EDAC_MC_VERSION "\n");
2153 2037
2154 /* 2038 /*
2155 * Harvest and clear any boot/initialization PCI parity errors 2039 * Harvest and clear any boot/initialization PCI parity errors
@@ -2160,80 +2044,54 @@ static int __init edac_mc_init(void)
2160 */ 2044 */
2161 clear_pci_parity_errors(); 2045 clear_pci_parity_errors();
2162 2046
2163 /* perform check for first time to harvest boot leftovers */ 2047 /* Create the MC sysfs entries */
2164 do_edac_check();
2165
2166 /* Create the MC sysfs entires */
2167 if (edac_sysfs_memctrl_setup()) { 2048 if (edac_sysfs_memctrl_setup()) {
2168 printk(KERN_ERR "EDAC MC: Error initializing sysfs code\n"); 2049 edac_printk(KERN_ERR, EDAC_MC,
2050 "Error initializing sysfs code\n");
2169 return -ENODEV; 2051 return -ENODEV;
2170 } 2052 }
2171 2053
2172 /* Create the PCI parity sysfs entries */ 2054 /* Create the PCI parity sysfs entries */
2173 if (edac_sysfs_pci_setup()) { 2055 if (edac_sysfs_pci_setup()) {
2174 edac_sysfs_memctrl_teardown(); 2056 edac_sysfs_memctrl_teardown();
2175 printk(KERN_ERR "EDAC PCI: Error initializing sysfs code\n"); 2057 edac_printk(KERN_ERR, EDAC_MC,
2058 "EDAC PCI: Error initializing sysfs code\n");
2176 return -ENODEV; 2059 return -ENODEV;
2177 } 2060 }
2178 2061
2179 /* Create our kernel thread */
2180 init_completion(&event);
2181 bs_thread.event = &event;
2182 bs_thread.name = "kedac";
2183 bs_thread.run = do_edac_check;
2184
2185 /* create our kernel thread */ 2062 /* create our kernel thread */
2186 ret = kernel_thread(edac_kernel_thread, &bs_thread, CLONE_KERNEL); 2063 edac_thread = kthread_run(edac_kernel_thread, NULL, "kedac");
2187 if (ret < 0) { 2064
2065 if (IS_ERR(edac_thread)) {
2188 /* remove the sysfs entries */ 2066 /* remove the sysfs entries */
2189 edac_sysfs_memctrl_teardown(); 2067 edac_sysfs_memctrl_teardown();
2190 edac_sysfs_pci_teardown(); 2068 edac_sysfs_pci_teardown();
2191 return -ENOMEM; 2069 return PTR_ERR(edac_thread);
2192 } 2070 }
2193 2071
2194 /* wait for our kernel theard ack that it is up and running */
2195 wait_for_completion(&event);
2196
2197 return 0; 2072 return 0;
2198} 2073}
2199 2074
2200
2201/* 2075/*
2202 * edac_mc_exit() 2076 * edac_mc_exit()
2203 * module exit/termination functioni 2077 * module exit/termination functioni
2204 */ 2078 */
2205static void __exit edac_mc_exit(void) 2079static void __exit edac_mc_exit(void)
2206{ 2080{
2207 struct completion event; 2081 debugf0("%s()\n", __func__);
2208 2082 kthread_stop(edac_thread);
2209 debugf0("MC: " __FILE__ ": %s()\n", __func__);
2210
2211 init_completion(&event);
2212 bs_thread.event = &event;
2213
2214 /* As soon as ->run is set to NULL, the task could disappear,
2215 * so we need to hold tasklist_lock until we have sent the signal
2216 */
2217 read_lock(&tasklist_lock);
2218 bs_thread.run = NULL;
2219 send_sig(SIGKILL, bs_thread.task, 1);
2220 read_unlock(&tasklist_lock);
2221 wait_for_completion(&event);
2222 2083
2223 /* tear down the sysfs device */ 2084 /* tear down the sysfs device */
2224 edac_sysfs_memctrl_teardown(); 2085 edac_sysfs_memctrl_teardown();
2225 edac_sysfs_pci_teardown(); 2086 edac_sysfs_pci_teardown();
2226} 2087}
2227 2088
2228
2229
2230
2231module_init(edac_mc_init); 2089module_init(edac_mc_init);
2232module_exit(edac_mc_exit); 2090module_exit(edac_mc_exit);
2233 2091
2234MODULE_LICENSE("GPL"); 2092MODULE_LICENSE("GPL");
2235MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" 2093MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
2236 "Based on.work by Dan Hollis et al"); 2094 "Based on work by Dan Hollis et al");
2237MODULE_DESCRIPTION("Core library routines for MC reporting"); 2095MODULE_DESCRIPTION("Core library routines for MC reporting");
2238 2096
2239module_param(panic_on_ue, int, 0644); 2097module_param(panic_on_ue, int, 0644);
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
index 75ecf484a43a..8d9e83909b9c 100644
--- a/drivers/edac/edac_mc.h
+++ b/drivers/edac/edac_mc.h
@@ -15,11 +15,9 @@
15 * 15 *
16 */ 16 */
17 17
18
19#ifndef _EDAC_MC_H_ 18#ifndef _EDAC_MC_H_
20#define _EDAC_MC_H_ 19#define _EDAC_MC_H_
21 20
22
23#include <linux/config.h> 21#include <linux/config.h>
24#include <linux/kernel.h> 22#include <linux/kernel.h>
25#include <linux/types.h> 23#include <linux/types.h>
@@ -33,7 +31,6 @@
33#include <linux/completion.h> 31#include <linux/completion.h>
34#include <linux/kobject.h> 32#include <linux/kobject.h>
35 33
36
37#define EDAC_MC_LABEL_LEN 31 34#define EDAC_MC_LABEL_LEN 31
38#define MC_PROC_NAME_MAX_LEN 7 35#define MC_PROC_NAME_MAX_LEN 7
39 36
@@ -43,31 +40,53 @@
43#define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) ) 40#define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) )
44#endif 41#endif
45 42
43#define edac_printk(level, prefix, fmt, arg...) \
44 printk(level "EDAC " prefix ": " fmt, ##arg)
45
46#define edac_mc_printk(mci, level, fmt, arg...) \
47 printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
48
49#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
50 printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
51
52/* prefixes for edac_printk() and edac_mc_printk() */
53#define EDAC_MC "MC"
54#define EDAC_PCI "PCI"
55#define EDAC_DEBUG "DEBUG"
56
46#ifdef CONFIG_EDAC_DEBUG 57#ifdef CONFIG_EDAC_DEBUG
47extern int edac_debug_level; 58extern int edac_debug_level;
48#define edac_debug_printk(level, fmt, args...) \ 59
49do { if (level <= edac_debug_level) printk(KERN_DEBUG fmt, ##args); } while(0) 60#define edac_debug_printk(level, fmt, arg...) \
61 do { \
62 if (level <= edac_debug_level) \
63 edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \
64 } while(0)
65
50#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ ) 66#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
51#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ ) 67#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
52#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ ) 68#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
53#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ ) 69#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
54#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ ) 70#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
55#else /* !CONFIG_EDAC_DEBUG */ 71
72#else /* !CONFIG_EDAC_DEBUG */
73
56#define debugf0( ... ) 74#define debugf0( ... )
57#define debugf1( ... ) 75#define debugf1( ... )
58#define debugf2( ... ) 76#define debugf2( ... )
59#define debugf3( ... ) 77#define debugf3( ... )
60#define debugf4( ... ) 78#define debugf4( ... )
61#endif /* !CONFIG_EDAC_DEBUG */
62 79
80#endif /* !CONFIG_EDAC_DEBUG */
63 81
64#define bs_xstr(s) bs_str(s) 82#define edac_xstr(s) edac_str(s)
65#define bs_str(s) #s 83#define edac_str(s) #s
66#define BS_MOD_STR bs_xstr(KBUILD_BASENAME) 84#define EDAC_MOD_STR edac_xstr(KBUILD_BASENAME)
67 85
68#define BIT(x) (1 << (x)) 86#define BIT(x) (1 << (x))
69 87
70#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, PCI_DEVICE_ID_ ## vend ## _ ## dev 88#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
89 PCI_DEVICE_ID_ ## vend ## _ ## dev
71 90
72/* memory devices */ 91/* memory devices */
73enum dev_type { 92enum dev_type {
@@ -117,7 +136,6 @@ enum mem_type {
117#define MEM_FLAG_RDDR BIT(MEM_RDDR) 136#define MEM_FLAG_RDDR BIT(MEM_RDDR)
118#define MEM_FLAG_RMBS BIT(MEM_RMBS) 137#define MEM_FLAG_RMBS BIT(MEM_RMBS)
119 138
120
121/* chipset Error Detection and Correction capabilities and mode */ 139/* chipset Error Detection and Correction capabilities and mode */
122enum edac_type { 140enum edac_type {
123 EDAC_UNKNOWN = 0, /* Unknown if ECC is available */ 141 EDAC_UNKNOWN = 0, /* Unknown if ECC is available */
@@ -142,7 +160,6 @@ enum edac_type {
142#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED) 160#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED)
143#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED) 161#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED)
144 162
145
146/* scrubbing capabilities */ 163/* scrubbing capabilities */
147enum scrub_type { 164enum scrub_type {
148 SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */ 165 SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */
@@ -166,11 +183,6 @@ enum scrub_type {
166#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR) 183#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR)
167#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE) 184#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE)
168 185
169enum mci_sysfs_status {
170 MCI_SYSFS_INACTIVE = 0, /* sysfs entries NOT registered */
171 MCI_SYSFS_ACTIVE /* sysfs entries ARE registered */
172};
173
174/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */ 186/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
175 187
176/* 188/*
@@ -255,20 +267,19 @@ enum mci_sysfs_status {
255 * PS - I enjoyed writing all that about as much as you enjoyed reading it. 267 * PS - I enjoyed writing all that about as much as you enjoyed reading it.
256 */ 268 */
257 269
258
259struct channel_info { 270struct channel_info {
260 int chan_idx; /* channel index */ 271 int chan_idx; /* channel index */
261 u32 ce_count; /* Correctable Errors for this CHANNEL */ 272 u32 ce_count; /* Correctable Errors for this CHANNEL */
262 char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ 273 char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
263 struct csrow_info *csrow; /* the parent */ 274 struct csrow_info *csrow; /* the parent */
264}; 275};
265 276
266
267struct csrow_info { 277struct csrow_info {
268 unsigned long first_page; /* first page number in dimm */ 278 unsigned long first_page; /* first page number in dimm */
269 unsigned long last_page; /* last page number in dimm */ 279 unsigned long last_page; /* last page number in dimm */
270 unsigned long page_mask; /* used for interleaving - 280 unsigned long page_mask; /* used for interleaving -
271 0UL for non intlv */ 281 * 0UL for non intlv
282 */
272 u32 nr_pages; /* number of pages in csrow */ 283 u32 nr_pages; /* number of pages in csrow */
273 u32 grain; /* granularity of reported error in bytes */ 284 u32 grain; /* granularity of reported error in bytes */
274 int csrow_idx; /* the chip-select row */ 285 int csrow_idx; /* the chip-select row */
@@ -280,29 +291,28 @@ struct csrow_info {
280 struct mem_ctl_info *mci; /* the parent */ 291 struct mem_ctl_info *mci; /* the parent */
281 292
282 struct kobject kobj; /* sysfs kobject for this csrow */ 293 struct kobject kobj; /* sysfs kobject for this csrow */
294 struct completion kobj_complete;
283 295
284 /* FIXME the number of CHANNELs might need to become dynamic */ 296 /* FIXME the number of CHANNELs might need to become dynamic */
285 u32 nr_channels; 297 u32 nr_channels;
286 struct channel_info *channels; 298 struct channel_info *channels;
287}; 299};
288 300
289
290struct mem_ctl_info { 301struct mem_ctl_info {
291 struct list_head link; /* for global list of mem_ctl_info structs */ 302 struct list_head link; /* for global list of mem_ctl_info structs */
292 unsigned long mtype_cap; /* memory types supported by mc */ 303 unsigned long mtype_cap; /* memory types supported by mc */
293 unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */ 304 unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */
294 unsigned long edac_cap; /* configuration capabilities - this is 305 unsigned long edac_cap; /* configuration capabilities - this is
295 closely related to edac_ctl_cap. The 306 * closely related to edac_ctl_cap. The
296 difference is that the controller 307 * difference is that the controller may be
297 may be capable of s4ecd4ed which would 308 * capable of s4ecd4ed which would be listed
298 be listed in edac_ctl_cap, but if 309 * in edac_ctl_cap, but if channels aren't
299 channels aren't capable of s4ecd4ed then the 310 * capable of s4ecd4ed then the edac_cap would
300 edac_cap would not have that capability. */ 311 * not have that capability.
312 */
301 unsigned long scrub_cap; /* chipset scrub capabilities */ 313 unsigned long scrub_cap; /* chipset scrub capabilities */
302 enum scrub_type scrub_mode; /* current scrub mode */ 314 enum scrub_type scrub_mode; /* current scrub mode */
303 315
304 enum mci_sysfs_status sysfs_active; /* status of sysfs */
305
306 /* pointer to edac checking routine */ 316 /* pointer to edac checking routine */
307 void (*edac_check) (struct mem_ctl_info * mci); 317 void (*edac_check) (struct mem_ctl_info * mci);
308 /* 318 /*
@@ -311,7 +321,7 @@ struct mem_ctl_info {
311 */ 321 */
312 /* FIXME - why not send the phys page to begin with? */ 322 /* FIXME - why not send the phys page to begin with? */
313 unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, 323 unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
314 unsigned long page); 324 unsigned long page);
315 int mc_idx; 325 int mc_idx;
316 int nr_csrows; 326 int nr_csrows;
317 struct csrow_info *csrows; 327 struct csrow_info *csrows;
@@ -340,72 +350,69 @@ struct mem_ctl_info {
340 350
341 /* edac sysfs device control */ 351 /* edac sysfs device control */
342 struct kobject edac_mci_kobj; 352 struct kobject edac_mci_kobj;
353 struct completion kobj_complete;
343}; 354};
344 355
345
346
347/* write all or some bits in a byte-register*/ 356/* write all or some bits in a byte-register*/
348static inline void pci_write_bits8(struct pci_dev *pdev, int offset, 357static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
349 u8 value, u8 mask) 358 u8 mask)
350{ 359{
351 if (mask != 0xff) { 360 if (mask != 0xff) {
352 u8 buf; 361 u8 buf;
362
353 pci_read_config_byte(pdev, offset, &buf); 363 pci_read_config_byte(pdev, offset, &buf);
354 value &= mask; 364 value &= mask;
355 buf &= ~mask; 365 buf &= ~mask;
356 value |= buf; 366 value |= buf;
357 } 367 }
368
358 pci_write_config_byte(pdev, offset, value); 369 pci_write_config_byte(pdev, offset, value);
359} 370}
360 371
361
362/* write all or some bits in a word-register*/ 372/* write all or some bits in a word-register*/
363static inline void pci_write_bits16(struct pci_dev *pdev, int offset, 373static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
364 u16 value, u16 mask) 374 u16 value, u16 mask)
365{ 375{
366 if (mask != 0xffff) { 376 if (mask != 0xffff) {
367 u16 buf; 377 u16 buf;
378
368 pci_read_config_word(pdev, offset, &buf); 379 pci_read_config_word(pdev, offset, &buf);
369 value &= mask; 380 value &= mask;
370 buf &= ~mask; 381 buf &= ~mask;
371 value |= buf; 382 value |= buf;
372 } 383 }
384
373 pci_write_config_word(pdev, offset, value); 385 pci_write_config_word(pdev, offset, value);
374} 386}
375 387
376
377/* write all or some bits in a dword-register*/ 388/* write all or some bits in a dword-register*/
378static inline void pci_write_bits32(struct pci_dev *pdev, int offset, 389static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
379 u32 value, u32 mask) 390 u32 value, u32 mask)
380{ 391{
381 if (mask != 0xffff) { 392 if (mask != 0xffff) {
382 u32 buf; 393 u32 buf;
394
383 pci_read_config_dword(pdev, offset, &buf); 395 pci_read_config_dword(pdev, offset, &buf);
384 value &= mask; 396 value &= mask;
385 buf &= ~mask; 397 buf &= ~mask;
386 value |= buf; 398 value |= buf;
387 } 399 }
400
388 pci_write_config_dword(pdev, offset, value); 401 pci_write_config_dword(pdev, offset, value);
389} 402}
390 403
391
392#ifdef CONFIG_EDAC_DEBUG 404#ifdef CONFIG_EDAC_DEBUG
393void edac_mc_dump_channel(struct channel_info *chan); 405void edac_mc_dump_channel(struct channel_info *chan);
394void edac_mc_dump_mci(struct mem_ctl_info *mci); 406void edac_mc_dump_mci(struct mem_ctl_info *mci);
395void edac_mc_dump_csrow(struct csrow_info *csrow); 407void edac_mc_dump_csrow(struct csrow_info *csrow);
396#endif /* CONFIG_EDAC_DEBUG */ 408#endif /* CONFIG_EDAC_DEBUG */
397 409
398extern int edac_mc_add_mc(struct mem_ctl_info *mci); 410extern int edac_mc_add_mc(struct mem_ctl_info *mci);
399extern int edac_mc_del_mc(struct mem_ctl_info *mci); 411extern struct mem_ctl_info * edac_mc_del_mc(struct pci_dev *pdev);
400
401extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, 412extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
402 unsigned long page); 413 unsigned long page);
403 414extern void edac_mc_scrub_block(unsigned long page, unsigned long offset,
404extern struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev 415 u32 size);
405 *pdev);
406
407extern void edac_mc_scrub_block(unsigned long page,
408 unsigned long offset, u32 size);
409 416
410/* 417/*
411 * The no info errors are used when error overflows are reported. 418 * The no info errors are used when error overflows are reported.
@@ -418,31 +425,25 @@ extern void edac_mc_scrub_block(unsigned long page,
418 * statement clutter and extra function arguments. 425 * statement clutter and extra function arguments.
419 */ 426 */
420extern void edac_mc_handle_ce(struct mem_ctl_info *mci, 427extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
421 unsigned long page_frame_number, 428 unsigned long page_frame_number, unsigned long offset_in_page,
422 unsigned long offset_in_page, 429 unsigned long syndrome, int row, int channel,
423 unsigned long syndrome, 430 const char *msg);
424 int row, int channel, const char *msg);
425
426extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, 431extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
427 const char *msg); 432 const char *msg);
428
429extern void edac_mc_handle_ue(struct mem_ctl_info *mci, 433extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
430 unsigned long page_frame_number, 434 unsigned long page_frame_number, unsigned long offset_in_page,
431 unsigned long offset_in_page, 435 int row, const char *msg);
432 int row, const char *msg);
433
434extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, 436extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
435 const char *msg); 437 const char *msg);
436 438
437/* 439/*
438 * This kmalloc's and initializes all the structures. 440 * This kmalloc's and initializes all the structures.
439 * Can't be used if all structures don't have the same lifetime. 441 * Can't be used if all structures don't have the same lifetime.
440 */ 442 */
441extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, 443extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
442 unsigned nr_csrows, unsigned nr_chans); 444 unsigned nr_chans);
443 445
444/* Free an mc previously allocated by edac_mc_alloc() */ 446/* Free an mc previously allocated by edac_mc_alloc() */
445extern void edac_mc_free(struct mem_ctl_info *mci); 447extern void edac_mc_free(struct mem_ctl_info *mci);
446 448
447
448#endif /* _EDAC_MC_H_ */ 449#endif /* _EDAC_MC_H_ */
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index 52596e75f9c2..fd342163cf97 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -9,7 +9,6 @@
9 * by Thayne Harbaugh of Linux Networx. (http://lnxi.com) 9 * by Thayne Harbaugh of Linux Networx. (http://lnxi.com)
10 */ 10 */
11 11
12
13#include <linux/config.h> 12#include <linux/config.h>
14#include <linux/module.h> 13#include <linux/module.h>
15#include <linux/init.h> 14#include <linux/init.h>
@@ -18,6 +17,11 @@
18#include <linux/slab.h> 17#include <linux/slab.h>
19#include "edac_mc.h" 18#include "edac_mc.h"
20 19
20#define i82860_printk(level, fmt, arg...) \
21 edac_printk(level, "i82860", fmt, ##arg)
22
23#define i82860_mc_printk(mci, level, fmt, arg...) \
24 edac_mc_chipset_printk(mci, level, "i82860", fmt, ##arg)
21 25
22#ifndef PCI_DEVICE_ID_INTEL_82860_0 26#ifndef PCI_DEVICE_ID_INTEL_82860_0
23#define PCI_DEVICE_ID_INTEL_82860_0 0x2531 27#define PCI_DEVICE_ID_INTEL_82860_0 0x2531
@@ -48,15 +52,15 @@ struct i82860_error_info {
48 52
49static const struct i82860_dev_info i82860_devs[] = { 53static const struct i82860_dev_info i82860_devs[] = {
50 [I82860] = { 54 [I82860] = {
51 .ctl_name = "i82860"}, 55 .ctl_name = "i82860"
56 },
52}; 57};
53 58
54static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code 59static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
55 has already registered driver */ 60 * has already registered driver
61 */
56 62
57static int i82860_registered = 1; 63static void i82860_get_error_info(struct mem_ctl_info *mci,
58
59static void i82860_get_error_info (struct mem_ctl_info *mci,
60 struct i82860_error_info *info) 64 struct i82860_error_info *info)
61{ 65{
62 /* 66 /*
@@ -78,14 +82,15 @@ static void i82860_get_error_info (struct mem_ctl_info *mci,
78 */ 82 */
79 if (!(info->errsts2 & 0x0003)) 83 if (!(info->errsts2 & 0x0003))
80 return; 84 return;
85
81 if ((info->errsts ^ info->errsts2) & 0x0003) { 86 if ((info->errsts ^ info->errsts2) & 0x0003) {
82 pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap); 87 pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap);
83 pci_read_config_word(mci->pdev, I82860_DERRCTL_STS, 88 pci_read_config_word(mci->pdev, I82860_DERRCTL_STS,
84 &info->derrsyn); 89 &info->derrsyn);
85 } 90 }
86} 91}
87 92
88static int i82860_process_error_info (struct mem_ctl_info *mci, 93static int i82860_process_error_info(struct mem_ctl_info *mci,
89 struct i82860_error_info *info, int handle_errors) 94 struct i82860_error_info *info, int handle_errors)
90{ 95{
91 int row; 96 int row;
@@ -107,8 +112,8 @@ static int i82860_process_error_info (struct mem_ctl_info *mci,
107 if (info->errsts & 0x0002) 112 if (info->errsts & 0x0002)
108 edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE"); 113 edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
109 else 114 else
110 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 115 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0,
111 0, "i82860 UE"); 116 "i82860 UE");
112 117
113 return 1; 118 return 1;
114} 119}
@@ -117,7 +122,7 @@ static void i82860_check(struct mem_ctl_info *mci)
117{ 122{
118 struct i82860_error_info info; 123 struct i82860_error_info info;
119 124
120 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 125 debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
121 i82860_get_error_info(mci, &info); 126 i82860_get_error_info(mci, &info);
122 i82860_process_error_info(mci, &info, 1); 127 i82860_process_error_info(mci, &info, 1);
123} 128}
@@ -128,6 +133,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
128 int index; 133 int index;
129 struct mem_ctl_info *mci = NULL; 134 struct mem_ctl_info *mci = NULL;
130 unsigned long last_cumul_size; 135 unsigned long last_cumul_size;
136 struct i82860_error_info discard;
131 137
132 u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ 138 u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
133 139
@@ -140,21 +146,20 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
140 going to make 1 channel for group. 146 going to make 1 channel for group.
141 */ 147 */
142 mci = edac_mc_alloc(0, 16, 1); 148 mci = edac_mc_alloc(0, 16, 1);
149
143 if (!mci) 150 if (!mci)
144 return -ENOMEM; 151 return -ENOMEM;
145 152
146 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); 153 debugf3("%s(): init mci\n", __func__);
147
148 mci->pdev = pdev; 154 mci->pdev = pdev;
149 mci->mtype_cap = MEM_FLAG_DDR; 155 mci->mtype_cap = MEM_FLAG_DDR;
150 156
151
152 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 157 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
153 /* I"m not sure about this but I think that all RDRAM is SECDED */ 158 /* I"m not sure about this but I think that all RDRAM is SECDED */
154 mci->edac_cap = EDAC_FLAG_SECDED; 159 mci->edac_cap = EDAC_FLAG_SECDED;
155 /* adjust FLAGS */ 160 /* adjust FLAGS */
156 161
157 mci->mod_name = BS_MOD_STR; 162 mci->mod_name = EDAC_MOD_STR;
158 mci->mod_ver = "$Revision: 1.1.2.6 $"; 163 mci->mod_ver = "$Revision: 1.1.2.6 $";
159 mci->ctl_name = i82860_devs[dev_idx].ctl_name; 164 mci->ctl_name = i82860_devs[dev_idx].ctl_name;
160 mci->edac_check = i82860_check; 165 mci->edac_check = i82860_check;
@@ -175,12 +180,13 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
175 struct csrow_info *csrow = &mci->csrows[index]; 180 struct csrow_info *csrow = &mci->csrows[index];
176 181
177 pci_read_config_word(mci->pdev, I82860_GBA + index * 2, 182 pci_read_config_word(mci->pdev, I82860_GBA + index * 2,
178 &value); 183 &value);
179 184
180 cumul_size = (value & I82860_GBA_MASK) << 185 cumul_size = (value & I82860_GBA_MASK) <<
181 (I82860_GBA_SHIFT - PAGE_SHIFT); 186 (I82860_GBA_SHIFT - PAGE_SHIFT);
182 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", 187 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
183 __func__, index, cumul_size); 188 cumul_size);
189
184 if (cumul_size == last_cumul_size) 190 if (cumul_size == last_cumul_size)
185 continue; /* not populated */ 191 continue; /* not populated */
186 192
@@ -188,42 +194,43 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
188 csrow->last_page = cumul_size - 1; 194 csrow->last_page = cumul_size - 1;
189 csrow->nr_pages = cumul_size - last_cumul_size; 195 csrow->nr_pages = cumul_size - last_cumul_size;
190 last_cumul_size = cumul_size; 196 last_cumul_size = cumul_size;
191 csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ 197 csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
192 csrow->mtype = MEM_RMBS; 198 csrow->mtype = MEM_RMBS;
193 csrow->dtype = DEV_UNKNOWN; 199 csrow->dtype = DEV_UNKNOWN;
194 csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; 200 csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
195 } 201 }
196 202
197 /* clear counters */ 203 i82860_get_error_info(mci, &discard); /* clear counters */
198 pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003);
199 204
200 if (edac_mc_add_mc(mci)) { 205 if (edac_mc_add_mc(mci)) {
201 debugf3("MC: " __FILE__ 206 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
202 ": %s(): failed edac_mc_add_mc()\n",
203 __func__);
204 edac_mc_free(mci); 207 edac_mc_free(mci);
205 } else { 208 } else {
206 /* get this far and it's successful */ 209 /* get this far and it's successful */
207 debugf3("MC: " __FILE__ ": %s(): success\n", __func__); 210 debugf3("%s(): success\n", __func__);
208 rc = 0; 211 rc = 0;
209 } 212 }
213
210 return rc; 214 return rc;
211} 215}
212 216
213/* returns count (>= 0), or negative on error */ 217/* returns count (>= 0), or negative on error */
214static int __devinit i82860_init_one(struct pci_dev *pdev, 218static int __devinit i82860_init_one(struct pci_dev *pdev,
215 const struct pci_device_id *ent) 219 const struct pci_device_id *ent)
216{ 220{
217 int rc; 221 int rc;
218 222
219 debugf0("MC: " __FILE__ ": %s()\n", __func__); 223 debugf0("%s()\n", __func__);
224 i82860_printk(KERN_INFO, "i82860 init one\n");
220 225
221 printk(KERN_INFO "i82860 init one\n"); 226 if (pci_enable_device(pdev) < 0)
222 if(pci_enable_device(pdev) < 0)
223 return -EIO; 227 return -EIO;
228
224 rc = i82860_probe1(pdev, ent->driver_data); 229 rc = i82860_probe1(pdev, ent->driver_data);
225 if(rc == 0) 230
231 if (rc == 0)
226 mci_pdev = pci_dev_get(pdev); 232 mci_pdev = pci_dev_get(pdev);
233
227 return rc; 234 return rc;
228} 235}
229 236
@@ -231,23 +238,28 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
231{ 238{
232 struct mem_ctl_info *mci; 239 struct mem_ctl_info *mci;
233 240
234 debugf0(__FILE__ ": %s()\n", __func__); 241 debugf0("%s()\n", __func__);
235 242
236 mci = edac_mc_find_mci_by_pdev(pdev); 243 if ((mci = edac_mc_del_mc(pdev)) == NULL)
237 if ((mci != NULL) && (edac_mc_del_mc(mci) == 0)) 244 return;
238 edac_mc_free(mci); 245
246 edac_mc_free(mci);
239} 247}
240 248
241static const struct pci_device_id i82860_pci_tbl[] __devinitdata = { 249static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
242 {PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 250 {
243 I82860}, 251 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
244 {0,} /* 0 terminated list. */ 252 I82860
253 },
254 {
255 0,
256 } /* 0 terminated list. */
245}; 257};
246 258
247MODULE_DEVICE_TABLE(pci, i82860_pci_tbl); 259MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
248 260
249static struct pci_driver i82860_driver = { 261static struct pci_driver i82860_driver = {
250 .name = BS_MOD_STR, 262 .name = EDAC_MOD_STR,
251 .probe = i82860_init_one, 263 .probe = i82860_init_one,
252 .remove = __devexit_p(i82860_remove_one), 264 .remove = __devexit_p(i82860_remove_one),
253 .id_table = i82860_pci_tbl, 265 .id_table = i82860_pci_tbl,
@@ -257,43 +269,56 @@ static int __init i82860_init(void)
257{ 269{
258 int pci_rc; 270 int pci_rc;
259 271
260 debugf3("MC: " __FILE__ ": %s()\n", __func__); 272 debugf3("%s()\n", __func__);
273
261 if ((pci_rc = pci_register_driver(&i82860_driver)) < 0) 274 if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
262 return pci_rc; 275 goto fail0;
263 276
264 if (!mci_pdev) { 277 if (!mci_pdev) {
265 i82860_registered = 0;
266 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 278 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
267 PCI_DEVICE_ID_INTEL_82860_0, NULL); 279 PCI_DEVICE_ID_INTEL_82860_0, NULL);
280
268 if (mci_pdev == NULL) { 281 if (mci_pdev == NULL) {
269 debugf0("860 pci_get_device fail\n"); 282 debugf0("860 pci_get_device fail\n");
270 return -ENODEV; 283 pci_rc = -ENODEV;
284 goto fail1;
271 } 285 }
286
272 pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl); 287 pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
288
273 if (pci_rc < 0) { 289 if (pci_rc < 0) {
274 debugf0("860 init fail\n"); 290 debugf0("860 init fail\n");
275 pci_dev_put(mci_pdev); 291 pci_rc = -ENODEV;
276 return -ENODEV; 292 goto fail1;
277 } 293 }
278 } 294 }
295
279 return 0; 296 return 0;
297
298fail1:
299 pci_unregister_driver(&i82860_driver);
300
301fail0:
302 if (mci_pdev != NULL)
303 pci_dev_put(mci_pdev);
304
305 return pci_rc;
280} 306}
281 307
282static void __exit i82860_exit(void) 308static void __exit i82860_exit(void)
283{ 309{
284 debugf3("MC: " __FILE__ ": %s()\n", __func__); 310 debugf3("%s()\n", __func__);
285 311
286 pci_unregister_driver(&i82860_driver); 312 pci_unregister_driver(&i82860_driver);
287 if (!i82860_registered) { 313
288 i82860_remove_one(mci_pdev); 314 if (mci_pdev != NULL)
289 pci_dev_put(mci_pdev); 315 pci_dev_put(mci_pdev);
290 }
291} 316}
292 317
293module_init(i82860_init); 318module_init(i82860_init);
294module_exit(i82860_exit); 319module_exit(i82860_exit);
295 320
296MODULE_LICENSE("GPL"); 321MODULE_LICENSE("GPL");
297MODULE_AUTHOR 322MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
298 ("Red Hat Inc. (http://www.redhat.com.com) Ben Woodard <woodard@redhat.com>"); 323 "Ben Woodard <woodard@redhat.com>");
299MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers"); 324MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 1991f94af753..0aec92698f17 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -13,18 +13,19 @@
13 * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com 13 * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com
14 */ 14 */
15 15
16
17#include <linux/config.h> 16#include <linux/config.h>
18#include <linux/module.h> 17#include <linux/module.h>
19#include <linux/init.h> 18#include <linux/init.h>
20
21#include <linux/pci.h> 19#include <linux/pci.h>
22#include <linux/pci_ids.h> 20#include <linux/pci_ids.h>
23
24#include <linux/slab.h> 21#include <linux/slab.h>
25
26#include "edac_mc.h" 22#include "edac_mc.h"
27 23
24#define i82875p_printk(level, fmt, arg...) \
25 edac_printk(level, "i82875p", fmt, ##arg)
26
27#define i82875p_mc_printk(mci, level, fmt, arg...) \
28 edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg)
28 29
29#ifndef PCI_DEVICE_ID_INTEL_82875_0 30#ifndef PCI_DEVICE_ID_INTEL_82875_0
30#define PCI_DEVICE_ID_INTEL_82875_0 0x2578 31#define PCI_DEVICE_ID_INTEL_82875_0 0x2578
@@ -34,11 +35,9 @@
34#define PCI_DEVICE_ID_INTEL_82875_6 0x257e 35#define PCI_DEVICE_ID_INTEL_82875_6 0x257e
35#endif /* PCI_DEVICE_ID_INTEL_82875_6 */ 36#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
36 37
37
38/* four csrows in dual channel, eight in single channel */ 38/* four csrows in dual channel, eight in single channel */
39#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans)) 39#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
40 40
41
42/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */ 41/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
43#define I82875P_EAP 0x58 /* Error Address Pointer (32b) 42#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
44 * 43 *
@@ -87,7 +86,6 @@
87 * 0 reserved 86 * 0 reserved
88 */ 87 */
89 88
90
91/* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */ 89/* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */
92#define I82875P_PCICMD6 0x04 /* PCI Command Register (16b) 90#define I82875P_PCICMD6 0x04 /* PCI Command Register (16b)
93 * 91 *
@@ -151,23 +149,19 @@
151 * 1:0 DRAM type 01=DDR 149 * 1:0 DRAM type 01=DDR
152 */ 150 */
153 151
154
155enum i82875p_chips { 152enum i82875p_chips {
156 I82875P = 0, 153 I82875P = 0,
157}; 154};
158 155
159
160struct i82875p_pvt { 156struct i82875p_pvt {
161 struct pci_dev *ovrfl_pdev; 157 struct pci_dev *ovrfl_pdev;
162 void __iomem *ovrfl_window; 158 void __iomem *ovrfl_window;
163}; 159};
164 160
165
166struct i82875p_dev_info { 161struct i82875p_dev_info {
167 const char *ctl_name; 162 const char *ctl_name;
168}; 163};
169 164
170
171struct i82875p_error_info { 165struct i82875p_error_info {
172 u16 errsts; 166 u16 errsts;
173 u32 eap; 167 u32 eap;
@@ -176,17 +170,19 @@ struct i82875p_error_info {
176 u16 errsts2; 170 u16 errsts2;
177}; 171};
178 172
179
180static const struct i82875p_dev_info i82875p_devs[] = { 173static const struct i82875p_dev_info i82875p_devs[] = {
181 [I82875P] = { 174 [I82875P] = {
182 .ctl_name = "i82875p"}, 175 .ctl_name = "i82875p"
176 },
183}; 177};
184 178
185static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code 179static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code has
186 has already registered driver */ 180 * already registered driver
181 */
182
187static int i82875p_registered = 1; 183static int i82875p_registered = 1;
188 184
189static void i82875p_get_error_info (struct mem_ctl_info *mci, 185static void i82875p_get_error_info(struct mem_ctl_info *mci,
190 struct i82875p_error_info *info) 186 struct i82875p_error_info *info)
191{ 187{
192 /* 188 /*
@@ -210,15 +206,16 @@ static void i82875p_get_error_info (struct mem_ctl_info *mci,
210 */ 206 */
211 if (!(info->errsts2 & 0x0081)) 207 if (!(info->errsts2 & 0x0081))
212 return; 208 return;
209
213 if ((info->errsts ^ info->errsts2) & 0x0081) { 210 if ((info->errsts ^ info->errsts2) & 0x0081) {
214 pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap); 211 pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap);
215 pci_read_config_byte(mci->pdev, I82875P_DES, &info->des); 212 pci_read_config_byte(mci->pdev, I82875P_DES, &info->des);
216 pci_read_config_byte(mci->pdev, I82875P_DERRSYN, 213 pci_read_config_byte(mci->pdev, I82875P_DERRSYN,
217 &info->derrsyn); 214 &info->derrsyn);
218 } 215 }
219} 216}
220 217
221static int i82875p_process_error_info (struct mem_ctl_info *mci, 218static int i82875p_process_error_info(struct mem_ctl_info *mci,
222 struct i82875p_error_info *info, int handle_errors) 219 struct i82875p_error_info *info, int handle_errors)
223{ 220{
224 int row, multi_chan; 221 int row, multi_chan;
@@ -243,23 +240,21 @@ static int i82875p_process_error_info (struct mem_ctl_info *mci,
243 edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE"); 240 edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
244 else 241 else
245 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 242 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
246 multi_chan ? (info->des & 0x1) : 0, 243 multi_chan ? (info->des & 0x1) : 0,
247 "i82875p CE"); 244 "i82875p CE");
248 245
249 return 1; 246 return 1;
250} 247}
251 248
252
253static void i82875p_check(struct mem_ctl_info *mci) 249static void i82875p_check(struct mem_ctl_info *mci)
254{ 250{
255 struct i82875p_error_info info; 251 struct i82875p_error_info info;
256 252
257 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 253 debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
258 i82875p_get_error_info(mci, &info); 254 i82875p_get_error_info(mci, &info);
259 i82875p_process_error_info(mci, &info, 1); 255 i82875p_process_error_info(mci, &info, 1);
260} 256}
261 257
262
263#ifdef CONFIG_PROC_FS 258#ifdef CONFIG_PROC_FS
264extern int pci_proc_attach_device(struct pci_dev *); 259extern int pci_proc_attach_device(struct pci_dev *);
265#endif 260#endif
@@ -273,15 +268,14 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
273 unsigned long last_cumul_size; 268 unsigned long last_cumul_size;
274 struct pci_dev *ovrfl_pdev; 269 struct pci_dev *ovrfl_pdev;
275 void __iomem *ovrfl_window = NULL; 270 void __iomem *ovrfl_window = NULL;
276
277 u32 drc; 271 u32 drc;
278 u32 drc_chan; /* Number of channels 0=1chan,1=2chan */ 272 u32 drc_chan; /* Number of channels 0=1chan,1=2chan */
279 u32 nr_chans; 273 u32 nr_chans;
280 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ 274 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
275 struct i82875p_error_info discard;
281 276
282 debugf0("MC: " __FILE__ ": %s()\n", __func__); 277 debugf0("%s()\n", __func__);
283 278 ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
284 ovrfl_pdev = pci_find_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
285 279
286 if (!ovrfl_pdev) { 280 if (!ovrfl_pdev) {
287 /* 281 /*
@@ -292,71 +286,69 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
292 */ 286 */
293 pci_write_bits8(pdev, 0xf4, 0x2, 0x2); 287 pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
294 ovrfl_pdev = 288 ovrfl_pdev =
295 pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); 289 pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
290
296 if (!ovrfl_pdev) 291 if (!ovrfl_pdev)
297 goto fail; 292 return -ENODEV;
298 } 293 }
294
299#ifdef CONFIG_PROC_FS 295#ifdef CONFIG_PROC_FS
300 if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) { 296 if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) {
301 printk(KERN_ERR "MC: " __FILE__ 297 i82875p_printk(KERN_ERR,
302 ": %s(): Failed to attach overflow device\n", 298 "%s(): Failed to attach overflow device\n", __func__);
303 __func__); 299 return -ENODEV;
304 goto fail;
305 } 300 }
306#endif /* CONFIG_PROC_FS */ 301#endif
302 /* CONFIG_PROC_FS */
307 if (pci_enable_device(ovrfl_pdev)) { 303 if (pci_enable_device(ovrfl_pdev)) {
308 printk(KERN_ERR "MC: " __FILE__ 304 i82875p_printk(KERN_ERR,
309 ": %s(): Failed to enable overflow device\n", 305 "%s(): Failed to enable overflow device\n", __func__);
310 __func__); 306 return -ENODEV;
311 goto fail;
312 } 307 }
313 308
314 if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) { 309 if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) {
315#ifdef CORRECT_BIOS 310#ifdef CORRECT_BIOS
316 goto fail; 311 goto fail0;
317#endif 312#endif
318 } 313 }
314
319 /* cache is irrelevant for PCI bus reads/writes */ 315 /* cache is irrelevant for PCI bus reads/writes */
320 ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0), 316 ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0),
321 pci_resource_len(ovrfl_pdev, 0)); 317 pci_resource_len(ovrfl_pdev, 0));
322 318
323 if (!ovrfl_window) { 319 if (!ovrfl_window) {
324 printk(KERN_ERR "MC: " __FILE__ 320 i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
325 ": %s(): Failed to ioremap bar6\n", __func__); 321 __func__);
326 goto fail; 322 goto fail1;
327 } 323 }
328 324
329 /* need to find out the number of channels */ 325 /* need to find out the number of channels */
330 drc = readl(ovrfl_window + I82875P_DRC); 326 drc = readl(ovrfl_window + I82875P_DRC);
331 drc_chan = ((drc >> 21) & 0x1); 327 drc_chan = ((drc >> 21) & 0x1);
332 nr_chans = drc_chan + 1; 328 nr_chans = drc_chan + 1;
333 drc_ddim = (drc >> 18) & 0x1;
334 329
330 drc_ddim = (drc >> 18) & 0x1;
335 mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), 331 mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
336 nr_chans); 332 nr_chans);
337 333
338 if (!mci) { 334 if (!mci) {
339 rc = -ENOMEM; 335 rc = -ENOMEM;
340 goto fail; 336 goto fail2;
341 } 337 }
342 338
343 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); 339 debugf3("%s(): init mci\n", __func__);
344
345 mci->pdev = pdev; 340 mci->pdev = pdev;
346 mci->mtype_cap = MEM_FLAG_DDR; 341 mci->mtype_cap = MEM_FLAG_DDR;
347
348 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 342 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
349 mci->edac_cap = EDAC_FLAG_UNKNOWN; 343 mci->edac_cap = EDAC_FLAG_UNKNOWN;
350 /* adjust FLAGS */ 344 /* adjust FLAGS */
351 345
352 mci->mod_name = BS_MOD_STR; 346 mci->mod_name = EDAC_MOD_STR;
353 mci->mod_ver = "$Revision: 1.5.2.11 $"; 347 mci->mod_ver = "$Revision: 1.5.2.11 $";
354 mci->ctl_name = i82875p_devs[dev_idx].ctl_name; 348 mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
355 mci->edac_check = i82875p_check; 349 mci->edac_check = i82875p_check;
356 mci->ctl_page_to_phys = NULL; 350 mci->ctl_page_to_phys = NULL;
357 351 debugf3("%s(): init pvt\n", __func__);
358 debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
359
360 pvt = (struct i82875p_pvt *) mci->pvt_info; 352 pvt = (struct i82875p_pvt *) mci->pvt_info;
361 pvt->ovrfl_pdev = ovrfl_pdev; 353 pvt->ovrfl_pdev = ovrfl_pdev;
362 pvt->ovrfl_window = ovrfl_window; 354 pvt->ovrfl_window = ovrfl_window;
@@ -374,8 +366,9 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
374 366
375 value = readb(ovrfl_window + I82875P_DRB + index); 367 value = readb(ovrfl_window + I82875P_DRB + index);
376 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); 368 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
377 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", 369 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
378 __func__, index, cumul_size); 370 cumul_size);
371
379 if (cumul_size == last_cumul_size) 372 if (cumul_size == last_cumul_size)
380 continue; /* not populated */ 373 continue; /* not populated */
381 374
@@ -383,71 +376,72 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
383 csrow->last_page = cumul_size - 1; 376 csrow->last_page = cumul_size - 1;
384 csrow->nr_pages = cumul_size - last_cumul_size; 377 csrow->nr_pages = cumul_size - last_cumul_size;
385 last_cumul_size = cumul_size; 378 last_cumul_size = cumul_size;
386 csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ 379 csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
387 csrow->mtype = MEM_DDR; 380 csrow->mtype = MEM_DDR;
388 csrow->dtype = DEV_UNKNOWN; 381 csrow->dtype = DEV_UNKNOWN;
389 csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; 382 csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
390 } 383 }
391 384
392 /* clear counters */ 385 i82875p_get_error_info(mci, &discard); /* clear counters */
393 pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081);
394 386
395 if (edac_mc_add_mc(mci)) { 387 if (edac_mc_add_mc(mci)) {
396 debugf3("MC: " __FILE__ 388 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
397 ": %s(): failed edac_mc_add_mc()\n", __func__); 389 goto fail3;
398 goto fail;
399 } 390 }
400 391
401 /* get this far and it's successful */ 392 /* get this far and it's successful */
402 debugf3("MC: " __FILE__ ": %s(): success\n", __func__); 393 debugf3("%s(): success\n", __func__);
403 return 0; 394 return 0;
404 395
405 fail: 396fail3:
406 if (mci) 397 edac_mc_free(mci);
407 edac_mc_free(mci);
408 398
409 if (ovrfl_window) 399fail2:
410 iounmap(ovrfl_window); 400 iounmap(ovrfl_window);
411 401
412 if (ovrfl_pdev) { 402fail1:
413 pci_release_regions(ovrfl_pdev); 403 pci_release_regions(ovrfl_pdev);
414 pci_disable_device(ovrfl_pdev);
415 }
416 404
405#ifdef CORRECT_BIOS
406fail0:
407#endif
408 pci_disable_device(ovrfl_pdev);
417 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ 409 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
418 return rc; 410 return rc;
419} 411}
420 412
421
422/* returns count (>= 0), or negative on error */ 413/* returns count (>= 0), or negative on error */
423static int __devinit i82875p_init_one(struct pci_dev *pdev, 414static int __devinit i82875p_init_one(struct pci_dev *pdev,
424 const struct pci_device_id *ent) 415 const struct pci_device_id *ent)
425{ 416{
426 int rc; 417 int rc;
427 418
428 debugf0("MC: " __FILE__ ": %s()\n", __func__); 419 debugf0("%s()\n", __func__);
420 i82875p_printk(KERN_INFO, "i82875p init one\n");
429 421
430 printk(KERN_INFO "i82875p init one\n"); 422 if (pci_enable_device(pdev) < 0)
431 if(pci_enable_device(pdev) < 0)
432 return -EIO; 423 return -EIO;
424
433 rc = i82875p_probe1(pdev, ent->driver_data); 425 rc = i82875p_probe1(pdev, ent->driver_data);
426
434 if (mci_pdev == NULL) 427 if (mci_pdev == NULL)
435 mci_pdev = pci_dev_get(pdev); 428 mci_pdev = pci_dev_get(pdev);
429
436 return rc; 430 return rc;
437} 431}
438 432
439
440static void __devexit i82875p_remove_one(struct pci_dev *pdev) 433static void __devexit i82875p_remove_one(struct pci_dev *pdev)
441{ 434{
442 struct mem_ctl_info *mci; 435 struct mem_ctl_info *mci;
443 struct i82875p_pvt *pvt = NULL; 436 struct i82875p_pvt *pvt = NULL;
444 437
445 debugf0(__FILE__ ": %s()\n", __func__); 438 debugf0("%s()\n", __func__);
446 439
447 if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL) 440 if ((mci = edac_mc_del_mc(pdev)) == NULL)
448 return; 441 return;
449 442
450 pvt = (struct i82875p_pvt *) mci->pvt_info; 443 pvt = (struct i82875p_pvt *) mci->pvt_info;
444
451 if (pvt->ovrfl_window) 445 if (pvt->ovrfl_window)
452 iounmap(pvt->ovrfl_window); 446 iounmap(pvt->ovrfl_window);
453 447
@@ -459,74 +453,84 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
459 pci_dev_put(pvt->ovrfl_pdev); 453 pci_dev_put(pvt->ovrfl_pdev);
460 } 454 }
461 455
462 if (edac_mc_del_mc(mci))
463 return;
464
465 edac_mc_free(mci); 456 edac_mc_free(mci);
466} 457}
467 458
468
469static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = { 459static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
470 {PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 460 {
471 I82875P}, 461 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
472 {0,} /* 0 terminated list. */ 462 I82875P
463 },
464 {
465 0,
466 } /* 0 terminated list. */
473}; 467};
474 468
475MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl); 469MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
476 470
477
478static struct pci_driver i82875p_driver = { 471static struct pci_driver i82875p_driver = {
479 .name = BS_MOD_STR, 472 .name = EDAC_MOD_STR,
480 .probe = i82875p_init_one, 473 .probe = i82875p_init_one,
481 .remove = __devexit_p(i82875p_remove_one), 474 .remove = __devexit_p(i82875p_remove_one),
482 .id_table = i82875p_pci_tbl, 475 .id_table = i82875p_pci_tbl,
483}; 476};
484 477
485
486static int __init i82875p_init(void) 478static int __init i82875p_init(void)
487{ 479{
488 int pci_rc; 480 int pci_rc;
489 481
490 debugf3("MC: " __FILE__ ": %s()\n", __func__); 482 debugf3("%s()\n", __func__);
491 pci_rc = pci_register_driver(&i82875p_driver); 483 pci_rc = pci_register_driver(&i82875p_driver);
484
492 if (pci_rc < 0) 485 if (pci_rc < 0)
493 return pci_rc; 486 goto fail0;
487
494 if (mci_pdev == NULL) { 488 if (mci_pdev == NULL) {
495 i82875p_registered = 0; 489 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
496 mci_pdev = 490 PCI_DEVICE_ID_INTEL_82875_0, NULL);
497 pci_get_device(PCI_VENDOR_ID_INTEL, 491
498 PCI_DEVICE_ID_INTEL_82875_0, NULL);
499 if (!mci_pdev) { 492 if (!mci_pdev) {
500 debugf0("875p pci_get_device fail\n"); 493 debugf0("875p pci_get_device fail\n");
501 return -ENODEV; 494 pci_rc = -ENODEV;
495 goto fail1;
502 } 496 }
497
503 pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); 498 pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
499
504 if (pci_rc < 0) { 500 if (pci_rc < 0) {
505 debugf0("875p init fail\n"); 501 debugf0("875p init fail\n");
506 pci_dev_put(mci_pdev); 502 pci_rc = -ENODEV;
507 return -ENODEV; 503 goto fail1;
508 } 504 }
509 } 505 }
506
510 return 0; 507 return 0;
511}
512 508
509fail1:
510 pci_unregister_driver(&i82875p_driver);
511
512fail0:
513 if (mci_pdev != NULL)
514 pci_dev_put(mci_pdev);
515
516 return pci_rc;
517}
513 518
514static void __exit i82875p_exit(void) 519static void __exit i82875p_exit(void)
515{ 520{
516 debugf3("MC: " __FILE__ ": %s()\n", __func__); 521 debugf3("%s()\n", __func__);
517 522
518 pci_unregister_driver(&i82875p_driver); 523 pci_unregister_driver(&i82875p_driver);
524
519 if (!i82875p_registered) { 525 if (!i82875p_registered) {
520 i82875p_remove_one(mci_pdev); 526 i82875p_remove_one(mci_pdev);
521 pci_dev_put(mci_pdev); 527 pci_dev_put(mci_pdev);
522 } 528 }
523} 529}
524 530
525
526module_init(i82875p_init); 531module_init(i82875p_init);
527module_exit(i82875p_exit); 532module_exit(i82875p_exit);
528 533
529
530MODULE_LICENSE("GPL"); 534MODULE_LICENSE("GPL");
531MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); 535MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
532MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers"); 536MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index e90892831b90..2c29fafe67c7 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -18,14 +18,17 @@
18#include <linux/config.h> 18#include <linux/config.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/init.h> 20#include <linux/init.h>
21
22#include <linux/pci.h> 21#include <linux/pci.h>
23#include <linux/pci_ids.h> 22#include <linux/pci_ids.h>
24
25#include <linux/slab.h> 23#include <linux/slab.h>
26
27#include "edac_mc.h" 24#include "edac_mc.h"
28 25
26#define r82600_printk(level, fmt, arg...) \
27 edac_printk(level, "r82600", fmt, ##arg)
28
29#define r82600_mc_printk(mci, level, fmt, arg...) \
30 edac_mc_chipset_printk(mci, level, "r82600", fmt, ##arg)
31
29/* Radisys say "The 82600 integrates a main memory SDRAM controller that 32/* Radisys say "The 82600 integrates a main memory SDRAM controller that
30 * supports up to four banks of memory. The four banks can support a mix of 33 * supports up to four banks of memory. The four banks can support a mix of
31 * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs, 34 * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs,
@@ -126,10 +129,8 @@ struct r82600_error_info {
126 u32 eapr; 129 u32 eapr;
127}; 130};
128 131
129
130static unsigned int disable_hardware_scrub = 0; 132static unsigned int disable_hardware_scrub = 0;
131 133
132
133static void r82600_get_error_info (struct mem_ctl_info *mci, 134static void r82600_get_error_info (struct mem_ctl_info *mci,
134 struct r82600_error_info *info) 135 struct r82600_error_info *info)
135{ 136{
@@ -138,17 +139,16 @@ static void r82600_get_error_info (struct mem_ctl_info *mci,
138 if (info->eapr & BIT(0)) 139 if (info->eapr & BIT(0))
139 /* Clear error to allow next error to be reported [p.62] */ 140 /* Clear error to allow next error to be reported [p.62] */
140 pci_write_bits32(mci->pdev, R82600_EAP, 141 pci_write_bits32(mci->pdev, R82600_EAP,
141 ((u32) BIT(0) & (u32) BIT(1)), 142 ((u32) BIT(0) & (u32) BIT(1)),
142 ((u32) BIT(0) & (u32) BIT(1))); 143 ((u32) BIT(0) & (u32) BIT(1)));
143 144
144 if (info->eapr & BIT(1)) 145 if (info->eapr & BIT(1))
145 /* Clear error to allow next error to be reported [p.62] */ 146 /* Clear error to allow next error to be reported [p.62] */
146 pci_write_bits32(mci->pdev, R82600_EAP, 147 pci_write_bits32(mci->pdev, R82600_EAP,
147 ((u32) BIT(0) & (u32) BIT(1)), 148 ((u32) BIT(0) & (u32) BIT(1)),
148 ((u32) BIT(0) & (u32) BIT(1))); 149 ((u32) BIT(0) & (u32) BIT(1)));
149} 150}
150 151
151
152static int r82600_process_error_info (struct mem_ctl_info *mci, 152static int r82600_process_error_info (struct mem_ctl_info *mci,
153 struct r82600_error_info *info, int handle_errors) 153 struct r82600_error_info *info, int handle_errors)
154{ 154{
@@ -167,26 +167,25 @@ static int r82600_process_error_info (struct mem_ctl_info *mci,
167 * granularity (upper 19 bits only) */ 167 * granularity (upper 19 bits only) */
168 page = eapaddr >> PAGE_SHIFT; 168 page = eapaddr >> PAGE_SHIFT;
169 169
170 if (info->eapr & BIT(0)) { /* CE? */ 170 if (info->eapr & BIT(0)) { /* CE? */
171 error_found = 1; 171 error_found = 1;
172 172
173 if (handle_errors) 173 if (handle_errors)
174 edac_mc_handle_ce( 174 edac_mc_handle_ce(mci, page, 0, /* not avail */
175 mci, page, 0, /* not avail */ 175 syndrome,
176 syndrome, 176 edac_mc_find_csrow_by_page(mci, page),
177 edac_mc_find_csrow_by_page(mci, page), 177 0, /* channel */
178 0, /* channel */ 178 mci->ctl_name);
179 mci->ctl_name);
180 } 179 }
181 180
182 if (info->eapr & BIT(1)) { /* UE? */ 181 if (info->eapr & BIT(1)) { /* UE? */
183 error_found = 1; 182 error_found = 1;
184 183
185 if (handle_errors) 184 if (handle_errors)
186 /* 82600 doesn't give enough info */ 185 /* 82600 doesn't give enough info */
187 edac_mc_handle_ue(mci, page, 0, 186 edac_mc_handle_ue(mci, page, 0,
188 edac_mc_find_csrow_by_page(mci, page), 187 edac_mc_find_csrow_by_page(mci, page),
189 mci->ctl_name); 188 mci->ctl_name);
190 } 189 }
191 190
192 return error_found; 191 return error_found;
@@ -196,7 +195,7 @@ static void r82600_check(struct mem_ctl_info *mci)
196{ 195{
197 struct r82600_error_info info; 196 struct r82600_error_info info;
198 197
199 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 198 debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
200 r82600_get_error_info(mci, &info); 199 r82600_get_error_info(mci, &info);
201 r82600_process_error_info(mci, &info, 1); 200 r82600_process_error_info(mci, &info, 1);
202} 201}
@@ -213,25 +212,18 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
213 u32 scrub_disabled; 212 u32 scrub_disabled;
214 u32 sdram_refresh_rate; 213 u32 sdram_refresh_rate;
215 u32 row_high_limit_last = 0; 214 u32 row_high_limit_last = 0;
216 u32 eap_init_bits; 215 struct r82600_error_info discard;
217
218 debugf0("MC: " __FILE__ ": %s()\n", __func__);
219
220 216
217 debugf0("%s()\n", __func__);
221 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); 218 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
222 pci_read_config_dword(pdev, R82600_EAP, &eapr); 219 pci_read_config_dword(pdev, R82600_EAP, &eapr);
223
224 ecc_on = dramcr & BIT(5); 220 ecc_on = dramcr & BIT(5);
225 reg_sdram = dramcr & BIT(4); 221 reg_sdram = dramcr & BIT(4);
226 scrub_disabled = eapr & BIT(31); 222 scrub_disabled = eapr & BIT(31);
227 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); 223 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
228 224 debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
229 debugf2("MC: " __FILE__ ": %s(): sdram refresh rate = %#0x\n", 225 sdram_refresh_rate);
230 __func__, sdram_refresh_rate); 226 debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
231
232 debugf2("MC: " __FILE__ ": %s(): DRAMC register = %#0x\n", __func__,
233 dramcr);
234
235 mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS); 227 mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS);
236 228
237 if (mci == NULL) { 229 if (mci == NULL) {
@@ -239,29 +231,28 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
239 goto fail; 231 goto fail;
240 } 232 }
241 233
242 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 234 debugf0("%s(): mci = %p\n", __func__, mci);
243
244 mci->pdev = pdev; 235 mci->pdev = pdev;
245 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; 236 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
246
247 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 237 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
248 /* FIXME try to work out if the chip leads have been * 238 /* FIXME try to work out if the chip leads have been used for COM2
249 * used for COM2 instead on this board? [MA6?] MAYBE: */ 239 * instead on this board? [MA6?] MAYBE:
240 */
250 241
251 /* On the R82600, the pins for memory bits 72:65 - i.e. the * 242 /* On the R82600, the pins for memory bits 72:65 - i.e. the *
252 * EC bits are shared with the pins for COM2 (!), so if COM2 * 243 * EC bits are shared with the pins for COM2 (!), so if COM2 *
253 * is enabled, we assume COM2 is wired up, and thus no EDAC * 244 * is enabled, we assume COM2 is wired up, and thus no EDAC *
254 * is possible. */ 245 * is possible. */
255 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 246 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
247
256 if (ecc_on) { 248 if (ecc_on) {
257 if (scrub_disabled) 249 if (scrub_disabled)
258 debugf3("MC: " __FILE__ ": %s(): mci = %p - " 250 debugf3("%s(): mci = %p - Scrubbing disabled! EAP: "
259 "Scrubbing disabled! EAP: %#0x\n", __func__, 251 "%#0x\n", __func__, mci, eapr);
260 mci, eapr);
261 } else 252 } else
262 mci->edac_cap = EDAC_FLAG_NONE; 253 mci->edac_cap = EDAC_FLAG_NONE;
263 254
264 mci->mod_name = BS_MOD_STR; 255 mci->mod_name = EDAC_MOD_STR;
265 mci->mod_ver = "$Revision: 1.1.2.6 $"; 256 mci->mod_ver = "$Revision: 1.1.2.6 $";
266 mci->ctl_name = "R82600"; 257 mci->ctl_name = "R82600";
267 mci->edac_check = r82600_check; 258 mci->edac_check = r82600_check;
@@ -276,23 +267,21 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
276 /* find the DRAM Chip Select Base address and mask */ 267 /* find the DRAM Chip Select Base address and mask */
277 pci_read_config_byte(mci->pdev, R82600_DRBA + index, &drbar); 268 pci_read_config_byte(mci->pdev, R82600_DRBA + index, &drbar);
278 269
279 debugf1("MC%d: " __FILE__ ": %s() Row=%d DRBA = %#0x\n", 270 debugf1("MC%d: %s() Row=%d DRBA = %#0x\n", mci->mc_idx,
280 mci->mc_idx, __func__, index, drbar); 271 __func__, index, drbar);
281 272
282 row_high_limit = ((u32) drbar << 24); 273 row_high_limit = ((u32) drbar << 24);
283/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ 274/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
284 275
285 debugf1("MC%d: " __FILE__ ": %s() Row=%d, " 276 debugf1("MC%d: %s() Row=%d, Boundry Address=%#0x, Last = "
286 "Boundry Address=%#0x, Last = %#0x \n", 277 "%#0x \n", mci->mc_idx, __func__, index,
287 mci->mc_idx, __func__, index, row_high_limit, 278 row_high_limit, row_high_limit_last);
288 row_high_limit_last);
289 279
290 /* Empty row [p.57] */ 280 /* Empty row [p.57] */
291 if (row_high_limit == row_high_limit_last) 281 if (row_high_limit == row_high_limit_last)
292 continue; 282 continue;
293 283
294 row_base = row_high_limit_last; 284 row_base = row_high_limit_last;
295
296 csrow->first_page = row_base >> PAGE_SHIFT; 285 csrow->first_page = row_base >> PAGE_SHIFT;
297 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; 286 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
298 csrow->nr_pages = csrow->last_page - csrow->first_page + 1; 287 csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
@@ -308,31 +297,22 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
308 row_high_limit_last = row_high_limit; 297 row_high_limit_last = row_high_limit;
309 } 298 }
310 299
311 /* clear counters */ 300 r82600_get_error_info(mci, &discard); /* clear counters */
312 /* FIXME should we? */
313 301
314 if (edac_mc_add_mc(mci)) { 302 if (edac_mc_add_mc(mci)) {
315 debugf3("MC: " __FILE__ 303 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
316 ": %s(): failed edac_mc_add_mc()\n", __func__);
317 goto fail; 304 goto fail;
318 } 305 }
319 306
320 /* get this far and it's successful */ 307 /* get this far and it's successful */
321 308
322 /* Clear error flags to allow next error to be reported [p.62] */
323 /* Test systems seem to always have the UE flag raised on boot */
324
325 eap_init_bits = BIT(0) & BIT(1);
326 if (disable_hardware_scrub) { 309 if (disable_hardware_scrub) {
327 eap_init_bits |= BIT(31); 310 debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n",
328 debugf3("MC: " __FILE__ ": %s(): Disabling Hardware Scrub " 311 __func__);
329 "(scrub on error)\n", __func__); 312 pci_write_bits32(mci->pdev, R82600_EAP, BIT(31), BIT(31));
330 } 313 }
331 314
332 pci_write_bits32(mci->pdev, R82600_EAP, eap_init_bits, 315 debugf3("%s(): success\n", __func__);
333 eap_init_bits);
334
335 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
336 return 0; 316 return 0;
337 317
338fail: 318fail:
@@ -344,62 +324,60 @@ fail:
344 324
345/* returns count (>= 0), or negative on error */ 325/* returns count (>= 0), or negative on error */
346static int __devinit r82600_init_one(struct pci_dev *pdev, 326static int __devinit r82600_init_one(struct pci_dev *pdev,
347 const struct pci_device_id *ent) 327 const struct pci_device_id *ent)
348{ 328{
349 debugf0("MC: " __FILE__ ": %s()\n", __func__); 329 debugf0("%s()\n", __func__);
350 330
351 /* don't need to call pci_device_enable() */ 331 /* don't need to call pci_device_enable() */
352 return r82600_probe1(pdev, ent->driver_data); 332 return r82600_probe1(pdev, ent->driver_data);
353} 333}
354 334
355
356static void __devexit r82600_remove_one(struct pci_dev *pdev) 335static void __devexit r82600_remove_one(struct pci_dev *pdev)
357{ 336{
358 struct mem_ctl_info *mci; 337 struct mem_ctl_info *mci;
359 338
360 debugf0(__FILE__ ": %s()\n", __func__); 339 debugf0("%s()\n", __func__);
361 340
362 if (((mci = edac_mc_find_mci_by_pdev(pdev)) != NULL) && 341 if ((mci = edac_mc_del_mc(pdev)) == NULL)
363 !edac_mc_del_mc(mci)) 342 return;
364 edac_mc_free(mci);
365}
366 343
344 edac_mc_free(mci);
345}
367 346
368static const struct pci_device_id r82600_pci_tbl[] __devinitdata = { 347static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
369 {PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)}, 348 {
370 {0,} /* 0 terminated list. */ 349 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
350 },
351 {
352 0,
353 } /* 0 terminated list. */
371}; 354};
372 355
373MODULE_DEVICE_TABLE(pci, r82600_pci_tbl); 356MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
374 357
375
376static struct pci_driver r82600_driver = { 358static struct pci_driver r82600_driver = {
377 .name = BS_MOD_STR, 359 .name = EDAC_MOD_STR,
378 .probe = r82600_init_one, 360 .probe = r82600_init_one,
379 .remove = __devexit_p(r82600_remove_one), 361 .remove = __devexit_p(r82600_remove_one),
380 .id_table = r82600_pci_tbl, 362 .id_table = r82600_pci_tbl,
381}; 363};
382 364
383
384static int __init r82600_init(void) 365static int __init r82600_init(void)
385{ 366{
386 return pci_register_driver(&r82600_driver); 367 return pci_register_driver(&r82600_driver);
387} 368}
388 369
389
390static void __exit r82600_exit(void) 370static void __exit r82600_exit(void)
391{ 371{
392 pci_unregister_driver(&r82600_driver); 372 pci_unregister_driver(&r82600_driver);
393} 373}
394 374
395
396module_init(r82600_init); 375module_init(r82600_init);
397module_exit(r82600_exit); 376module_exit(r82600_exit);
398 377
399
400MODULE_LICENSE("GPL"); 378MODULE_LICENSE("GPL");
401MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. " 379MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
402 "on behalf of EADS Astrium"); 380 "on behalf of EADS Astrium");
403MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers"); 381MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
404 382
405module_param(disable_hardware_scrub, bool, 0644); 383module_param(disable_hardware_scrub, bool, 0644);
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index d6543fc4a923..339f405ff708 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -484,26 +484,15 @@ static void dcdbas_host_control(void)
484static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code, 484static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
485 void *unused) 485 void *unused)
486{ 486{
487 static unsigned int notify_cnt = 0;
488
489 switch (code) { 487 switch (code) {
490 case SYS_DOWN: 488 case SYS_DOWN:
491 case SYS_HALT: 489 case SYS_HALT:
492 case SYS_POWER_OFF: 490 case SYS_POWER_OFF:
493 if (host_control_on_shutdown) { 491 if (host_control_on_shutdown) {
494 /* firmware is going to perform host control action */ 492 /* firmware is going to perform host control action */
495 if (++notify_cnt == 2) { 493 printk(KERN_WARNING "Please wait for shutdown "
496 printk(KERN_WARNING 494 "action to complete...\n");
497 "Please wait for shutdown " 495 dcdbas_host_control();
498 "action to complete...\n");
499 dcdbas_host_control();
500 }
501 /*
502 * register again and initiate the host control
503 * action on the second notification to allow
504 * everyone that registered to be notified
505 */
506 register_reboot_notifier(nb);
507 } 496 }
508 break; 497 break;
509 } 498 }
@@ -514,7 +503,7 @@ static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
514static struct notifier_block dcdbas_reboot_nb = { 503static struct notifier_block dcdbas_reboot_nb = {
515 .notifier_call = dcdbas_reboot_notify, 504 .notifier_call = dcdbas_reboot_notify,
516 .next = NULL, 505 .next = NULL,
517 .priority = 0 506 .priority = INT_MIN
518}; 507};
519 508
520static DCDBAS_BIN_ATTR_RW(smi_data); 509static DCDBAS_BIN_ATTR_RW(smi_data);
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 343379f23a53..9b7e4d52ffd4 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -568,20 +568,20 @@ systab_read(struct subsystem *entry, char *buf)
568 if (!entry || !buf) 568 if (!entry || !buf)
569 return -EINVAL; 569 return -EINVAL;
570 570
571 if (efi.mps) 571 if (efi.mps != EFI_INVALID_TABLE_ADDR)
572 str += sprintf(str, "MPS=0x%lx\n", __pa(efi.mps)); 572 str += sprintf(str, "MPS=0x%lx\n", efi.mps);
573 if (efi.acpi20) 573 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
574 str += sprintf(str, "ACPI20=0x%lx\n", __pa(efi.acpi20)); 574 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
575 if (efi.acpi) 575 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
576 str += sprintf(str, "ACPI=0x%lx\n", __pa(efi.acpi)); 576 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
577 if (efi.smbios) 577 if (efi.smbios != EFI_INVALID_TABLE_ADDR)
578 str += sprintf(str, "SMBIOS=0x%lx\n", __pa(efi.smbios)); 578 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
579 if (efi.hcdp) 579 if (efi.hcdp != EFI_INVALID_TABLE_ADDR)
580 str += sprintf(str, "HCDP=0x%lx\n", __pa(efi.hcdp)); 580 str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp);
581 if (efi.boot_info) 581 if (efi.boot_info != EFI_INVALID_TABLE_ADDR)
582 str += sprintf(str, "BOOTINFO=0x%lx\n", __pa(efi.boot_info)); 582 str += sprintf(str, "BOOTINFO=0x%lx\n", efi.boot_info);
583 if (efi.uga) 583 if (efi.uga != EFI_INVALID_TABLE_ADDR)
584 str += sprintf(str, "UGA=0x%lx\n", __pa(efi.uga)); 584 str += sprintf(str, "UGA=0x%lx\n", efi.uga);
585 585
586 return str - buf; 586 return str - buf;
587} 587}
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index ae1fb45dbb40..c37baf9448bc 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -89,19 +89,20 @@ efi_setup_pcdp_console(char *cmdline)
89 struct pcdp_uart *uart; 89 struct pcdp_uart *uart;
90 struct pcdp_device *dev, *end; 90 struct pcdp_device *dev, *end;
91 int i, serial = 0; 91 int i, serial = 0;
92 int rc = -ENODEV;
92 93
93 pcdp = efi.hcdp; 94 if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
94 if (!pcdp)
95 return -ENODEV; 95 return -ENODEV;
96 96
97 printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, __pa(pcdp)); 97 pcdp = ioremap(efi.hcdp, 4096);
98 printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
98 99
99 if (strstr(cmdline, "console=hcdp")) { 100 if (strstr(cmdline, "console=hcdp")) {
100 if (pcdp->rev < 3) 101 if (pcdp->rev < 3)
101 serial = 1; 102 serial = 1;
102 } else if (strstr(cmdline, "console=")) { 103 } else if (strstr(cmdline, "console=")) {
103 printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n"); 104 printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n");
104 return -ENODEV; 105 goto out;
105 } 106 }
106 107
107 if (pcdp->rev < 3 && efi_uart_console_only()) 108 if (pcdp->rev < 3 && efi_uart_console_only())
@@ -110,7 +111,8 @@ efi_setup_pcdp_console(char *cmdline)
110 for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) { 111 for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) {
111 if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) { 112 if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) {
112 if (uart->type == PCDP_CONSOLE_UART) { 113 if (uart->type == PCDP_CONSOLE_UART) {
113 return setup_serial_console(uart); 114 rc = setup_serial_console(uart);
115 goto out;
114 } 116 }
115 } 117 }
116 } 118 }
@@ -121,10 +123,13 @@ efi_setup_pcdp_console(char *cmdline)
121 dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) { 123 dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) {
122 if (dev->flags & PCDP_PRIMARY_CONSOLE) { 124 if (dev->flags & PCDP_PRIMARY_CONSOLE) {
123 if (dev->type == PCDP_CONSOLE_VGA) { 125 if (dev->type == PCDP_CONSOLE_VGA) {
124 return setup_vga_console(dev); 126 rc = setup_vga_console(dev);
127 goto out;
125 } 128 }
126 } 129 }
127 } 130 }
128 131
129 return -ENODEV; 132out:
133 iounmap(pcdp);
134 return rc;
130} 135}
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index f9fae28f5612..7aa5c38f0855 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -65,15 +65,6 @@ config SENSORS_PCF8591
65 This driver can also be built as a module. If so, the module 65 This driver can also be built as a module. If so, the module
66 will be called pcf8591. 66 will be called pcf8591.
67 67
68config SENSORS_RTC8564
69 tristate "Epson 8564 RTC chip"
70 depends on I2C && EXPERIMENTAL
71 help
72 If you say yes here you get support for the Epson 8564 RTC chip.
73
74 This driver can also be built as a module. If so, the module
75 will be called i2c-rtc8564.
76
77config ISP1301_OMAP 68config ISP1301_OMAP
78 tristate "Philips ISP1301 with OMAP OTG" 69 tristate "Philips ISP1301 with OMAP OTG"
79 depends on I2C && ARCH_OMAP_OTG 70 depends on I2C && ARCH_OMAP_OTG
@@ -126,13 +117,4 @@ config SENSORS_MAX6875
126 This driver can also be built as a module. If so, the module 117 This driver can also be built as a module. If so, the module
127 will be called max6875. 118 will be called max6875.
128 119
129config RTC_X1205_I2C
130 tristate "Xicor X1205 RTC chip"
131 depends on I2C && EXPERIMENTAL
132 help
133 If you say yes here you get support for the Xicor X1205 RTC chip.
134
135 This driver can also be built as a module. If so, the module
136 will be called x1205.
137
138endmenu 120endmenu
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index 46178b57b1f1..779868ef2e26 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -10,10 +10,8 @@ obj-$(CONFIG_SENSORS_M41T00) += m41t00.o
10obj-$(CONFIG_SENSORS_PCA9539) += pca9539.o 10obj-$(CONFIG_SENSORS_PCA9539) += pca9539.o
11obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o 11obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o
12obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o 12obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
13obj-$(CONFIG_SENSORS_RTC8564) += rtc8564.o
14obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o 13obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
15obj-$(CONFIG_TPS65010) += tps65010.o 14obj-$(CONFIG_TPS65010) += tps65010.o
16obj-$(CONFIG_RTC_X1205_I2C) += x1205.o
17 15
18ifeq ($(CONFIG_I2C_DEBUG_CHIP),y) 16ifeq ($(CONFIG_I2C_DEBUG_CHIP),y)
19EXTRA_CFLAGS += -DDEBUG 17EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/i2c/chips/rtc8564.c b/drivers/i2c/chips/rtc8564.c
deleted file mode 100644
index 0d8699b3f488..000000000000
--- a/drivers/i2c/chips/rtc8564.c
+++ /dev/null
@@ -1,385 +0,0 @@
1/*
2 * linux/drivers/i2c/chips/rtc8564.c
3 *
4 * Copyright (C) 2002-2004 Stefan Eletzhofer
5 *
6 * based on linux/drivers/acron/char/pcf8583.c
7 * Copyright (C) 2000 Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * Driver for system3's EPSON RTC 8564 chip
14 */
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/bcd.h>
18#include <linux/i2c.h>
19#include <linux/slab.h>
20#include <linux/string.h>
21#include <linux/rtc.h> /* get the user-level API */
22#include <linux/init.h>
23
24#include "rtc8564.h"
25
26#ifdef DEBUG
27# define _DBG(x, fmt, args...) do{ if (debug>=x) printk(KERN_DEBUG"%s: " fmt "\n", __FUNCTION__, ##args); } while(0);
28#else
29# define _DBG(x, fmt, args...) do { } while(0);
30#endif
31
32#define _DBGRTCTM(x, rtctm) if (debug>=x) printk("%s: secs=%d, mins=%d, hours=%d, mday=%d, " \
33 "mon=%d, year=%d, wday=%d VL=%d\n", __FUNCTION__, \
34 (rtctm).secs, (rtctm).mins, (rtctm).hours, (rtctm).mday, \
35 (rtctm).mon, (rtctm).year, (rtctm).wday, (rtctm).vl);
36
37struct rtc8564_data {
38 struct i2c_client client;
39 u16 ctrl;
40};
41
42static inline u8 _rtc8564_ctrl1(struct i2c_client *client)
43{
44 struct rtc8564_data *data = i2c_get_clientdata(client);
45 return data->ctrl & 0xff;
46}
47static inline u8 _rtc8564_ctrl2(struct i2c_client *client)
48{
49 struct rtc8564_data *data = i2c_get_clientdata(client);
50 return (data->ctrl & 0xff00) >> 8;
51}
52
53#define CTRL1(c) _rtc8564_ctrl1(c)
54#define CTRL2(c) _rtc8564_ctrl2(c)
55
56static int debug;
57module_param(debug, int, S_IRUGO | S_IWUSR);
58
59static struct i2c_driver rtc8564_driver;
60
61static unsigned short ignore[] = { I2C_CLIENT_END };
62static unsigned short normal_addr[] = { 0x51, I2C_CLIENT_END };
63
64static struct i2c_client_address_data addr_data = {
65 .normal_i2c = normal_addr,
66 .probe = ignore,
67 .ignore = ignore,
68};
69
70static int rtc8564_read_mem(struct i2c_client *client, struct mem *mem);
71static int rtc8564_write_mem(struct i2c_client *client, struct mem *mem);
72
73static int rtc8564_read(struct i2c_client *client, unsigned char adr,
74 unsigned char *buf, unsigned char len)
75{
76 int ret = -EIO;
77 unsigned char addr[1] = { adr };
78 struct i2c_msg msgs[2] = {
79 {client->addr, 0, 1, addr},
80 {client->addr, I2C_M_RD, len, buf}
81 };
82
83 _DBG(1, "client=%p, adr=%d, buf=%p, len=%d", client, adr, buf, len);
84
85 if (!buf) {
86 ret = -EINVAL;
87 goto done;
88 }
89
90 ret = i2c_transfer(client->adapter, msgs, 2);
91 if (ret == 2) {
92 ret = 0;
93 }
94
95done:
96 return ret;
97}
98
99static int rtc8564_write(struct i2c_client *client, unsigned char adr,
100 unsigned char *data, unsigned char len)
101{
102 int ret = 0;
103 unsigned char _data[16];
104 struct i2c_msg wr;
105 int i;
106
107 if (!data || len > 15) {
108 ret = -EINVAL;
109 goto done;
110 }
111
112 _DBG(1, "client=%p, adr=%d, buf=%p, len=%d", client, adr, data, len);
113
114 _data[0] = adr;
115 for (i = 0; i < len; i++) {
116 _data[i + 1] = data[i];
117 _DBG(5, "data[%d] = 0x%02x (%d)", i, data[i], data[i]);
118 }
119
120 wr.addr = client->addr;
121 wr.flags = 0;
122 wr.len = len + 1;
123 wr.buf = _data;
124
125 ret = i2c_transfer(client->adapter, &wr, 1);
126 if (ret == 1) {
127 ret = 0;
128 }
129
130done:
131 return ret;
132}
133
134static int rtc8564_attach(struct i2c_adapter *adap, int addr, int kind)
135{
136 int ret;
137 struct i2c_client *new_client;
138 struct rtc8564_data *d;
139 unsigned char data[10];
140 unsigned char ad[1] = { 0 };
141 struct i2c_msg ctrl_wr[1] = {
142 {addr, 0, 2, data}
143 };
144 struct i2c_msg ctrl_rd[2] = {
145 {addr, 0, 1, ad},
146 {addr, I2C_M_RD, 2, data}
147 };
148
149 d = kzalloc(sizeof(struct rtc8564_data), GFP_KERNEL);
150 if (!d) {
151 ret = -ENOMEM;
152 goto done;
153 }
154 new_client = &d->client;
155
156 strlcpy(new_client->name, "RTC8564", I2C_NAME_SIZE);
157 i2c_set_clientdata(new_client, d);
158 new_client->addr = addr;
159 new_client->adapter = adap;
160 new_client->driver = &rtc8564_driver;
161
162 _DBG(1, "client=%p", new_client);
163
164 /* init ctrl1 reg */
165 data[0] = 0;
166 data[1] = 0;
167 ret = i2c_transfer(new_client->adapter, ctrl_wr, 1);
168 if (ret != 1) {
169 printk(KERN_INFO "rtc8564: cant init ctrl1\n");
170 ret = -ENODEV;
171 goto done;
172 }
173
174 /* read back ctrl1 and ctrl2 */
175 ret = i2c_transfer(new_client->adapter, ctrl_rd, 2);
176 if (ret != 2) {
177 printk(KERN_INFO "rtc8564: cant read ctrl\n");
178 ret = -ENODEV;
179 goto done;
180 }
181
182 d->ctrl = data[0] | (data[1] << 8);
183
184 _DBG(1, "RTC8564_REG_CTRL1=%02x, RTC8564_REG_CTRL2=%02x",
185 data[0], data[1]);
186
187 ret = i2c_attach_client(new_client);
188done:
189 if (ret) {
190 kfree(d);
191 }
192 return ret;
193}
194
195static int rtc8564_probe(struct i2c_adapter *adap)
196{
197 return i2c_probe(adap, &addr_data, rtc8564_attach);
198}
199
200static int rtc8564_detach(struct i2c_client *client)
201{
202 i2c_detach_client(client);
203 kfree(i2c_get_clientdata(client));
204 return 0;
205}
206
207static int rtc8564_get_datetime(struct i2c_client *client, struct rtc_tm *dt)
208{
209 int ret = -EIO;
210 unsigned char buf[15];
211
212 _DBG(1, "client=%p, dt=%p", client, dt);
213
214 if (!dt)
215 return -EINVAL;
216
217 memset(buf, 0, sizeof(buf));
218
219 ret = rtc8564_read(client, 0, buf, 15);
220 if (ret)
221 return ret;
222
223 /* century stored in minute alarm reg */
224 dt->year = BCD2BIN(buf[RTC8564_REG_YEAR]);
225 dt->year += 100 * BCD2BIN(buf[RTC8564_REG_AL_MIN] & 0x3f);
226 dt->mday = BCD2BIN(buf[RTC8564_REG_DAY] & 0x3f);
227 dt->wday = BCD2BIN(buf[RTC8564_REG_WDAY] & 7);
228 dt->mon = BCD2BIN(buf[RTC8564_REG_MON_CENT] & 0x1f);
229
230 dt->secs = BCD2BIN(buf[RTC8564_REG_SEC] & 0x7f);
231 dt->vl = (buf[RTC8564_REG_SEC] & 0x80) == 0x80;
232 dt->mins = BCD2BIN(buf[RTC8564_REG_MIN] & 0x7f);
233 dt->hours = BCD2BIN(buf[RTC8564_REG_HR] & 0x3f);
234
235 _DBGRTCTM(2, *dt);
236
237 return 0;
238}
239
240static int
241rtc8564_set_datetime(struct i2c_client *client, struct rtc_tm *dt, int datetoo)
242{
243 int ret, len = 5;
244 unsigned char buf[15];
245
246 _DBG(1, "client=%p, dt=%p", client, dt);
247
248 if (!dt)
249 return -EINVAL;
250
251 _DBGRTCTM(2, *dt);
252
253 buf[RTC8564_REG_CTRL1] = CTRL1(client) | RTC8564_CTRL1_STOP;
254 buf[RTC8564_REG_CTRL2] = CTRL2(client);
255 buf[RTC8564_REG_SEC] = BIN2BCD(dt->secs);
256 buf[RTC8564_REG_MIN] = BIN2BCD(dt->mins);
257 buf[RTC8564_REG_HR] = BIN2BCD(dt->hours);
258
259 if (datetoo) {
260 len += 5;
261 buf[RTC8564_REG_DAY] = BIN2BCD(dt->mday);
262 buf[RTC8564_REG_WDAY] = BIN2BCD(dt->wday);
263 buf[RTC8564_REG_MON_CENT] = BIN2BCD(dt->mon) & 0x1f;
264 /* century stored in minute alarm reg */
265 buf[RTC8564_REG_YEAR] = BIN2BCD(dt->year % 100);
266 buf[RTC8564_REG_AL_MIN] = BIN2BCD(dt->year / 100);
267 }
268
269 ret = rtc8564_write(client, 0, buf, len);
270 if (ret) {
271 _DBG(1, "error writing data! %d", ret);
272 }
273
274 buf[RTC8564_REG_CTRL1] = CTRL1(client);
275 ret = rtc8564_write(client, 0, buf, 1);
276 if (ret) {
277 _DBG(1, "error writing data! %d", ret);
278 }
279
280 return ret;
281}
282
283static int rtc8564_get_ctrl(struct i2c_client *client, unsigned int *ctrl)
284{
285 struct rtc8564_data *data = i2c_get_clientdata(client);
286
287 if (!ctrl)
288 return -1;
289
290 *ctrl = data->ctrl;
291 return 0;
292}
293
294static int rtc8564_set_ctrl(struct i2c_client *client, unsigned int *ctrl)
295{
296 struct rtc8564_data *data = i2c_get_clientdata(client);
297 unsigned char buf[2];
298
299 if (!ctrl)
300 return -1;
301
302 buf[0] = *ctrl & 0xff;
303 buf[1] = (*ctrl & 0xff00) >> 8;
304 data->ctrl = *ctrl;
305
306 return rtc8564_write(client, 0, buf, 2);
307}
308
309static int rtc8564_read_mem(struct i2c_client *client, struct mem *mem)
310{
311
312 if (!mem)
313 return -EINVAL;
314
315 return rtc8564_read(client, mem->loc, mem->data, mem->nr);
316}
317
318static int rtc8564_write_mem(struct i2c_client *client, struct mem *mem)
319{
320
321 if (!mem)
322 return -EINVAL;
323
324 return rtc8564_write(client, mem->loc, mem->data, mem->nr);
325}
326
327static int
328rtc8564_command(struct i2c_client *client, unsigned int cmd, void *arg)
329{
330
331 _DBG(1, "cmd=%d", cmd);
332
333 switch (cmd) {
334 case RTC_GETDATETIME:
335 return rtc8564_get_datetime(client, arg);
336
337 case RTC_SETTIME:
338 return rtc8564_set_datetime(client, arg, 0);
339
340 case RTC_SETDATETIME:
341 return rtc8564_set_datetime(client, arg, 1);
342
343 case RTC_GETCTRL:
344 return rtc8564_get_ctrl(client, arg);
345
346 case RTC_SETCTRL:
347 return rtc8564_set_ctrl(client, arg);
348
349 case MEM_READ:
350 return rtc8564_read_mem(client, arg);
351
352 case MEM_WRITE:
353 return rtc8564_write_mem(client, arg);
354
355 default:
356 return -EINVAL;
357 }
358}
359
360static struct i2c_driver rtc8564_driver = {
361 .driver = {
362 .name = "RTC8564",
363 },
364 .id = I2C_DRIVERID_RTC8564,
365 .attach_adapter = rtc8564_probe,
366 .detach_client = rtc8564_detach,
367 .command = rtc8564_command
368};
369
370static __init int rtc8564_init(void)
371{
372 return i2c_add_driver(&rtc8564_driver);
373}
374
375static __exit void rtc8564_exit(void)
376{
377 i2c_del_driver(&rtc8564_driver);
378}
379
380MODULE_AUTHOR("Stefan Eletzhofer <Stefan.Eletzhofer@eletztrick.de>");
381MODULE_DESCRIPTION("EPSON RTC8564 Driver");
382MODULE_LICENSE("GPL");
383
384module_init(rtc8564_init);
385module_exit(rtc8564_exit);
diff --git a/drivers/i2c/chips/rtc8564.h b/drivers/i2c/chips/rtc8564.h
deleted file mode 100644
index e5342d10b8fa..000000000000
--- a/drivers/i2c/chips/rtc8564.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2 * linux/drivers/i2c/chips/rtc8564.h
3 *
4 * Copyright (C) 2002-2004 Stefan Eletzhofer
5 *
6 * based on linux/drivers/acron/char/pcf8583.h
7 * Copyright (C) 2000 Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13struct rtc_tm {
14 unsigned char secs;
15 unsigned char mins;
16 unsigned char hours;
17 unsigned char mday;
18 unsigned char mon;
19 unsigned short year; /* xxxx 4 digits :) */
20 unsigned char wday;
21 unsigned char vl;
22};
23
24struct mem {
25 unsigned int loc;
26 unsigned int nr;
27 unsigned char *data;
28};
29
30#define RTC_GETDATETIME 0
31#define RTC_SETTIME 1
32#define RTC_SETDATETIME 2
33#define RTC_GETCTRL 3
34#define RTC_SETCTRL 4
35#define MEM_READ 5
36#define MEM_WRITE 6
37
38#define RTC8564_REG_CTRL1 0x0 /* T 0 S 0 | T 0 0 0 */
39#define RTC8564_REG_CTRL2 0x1 /* 0 0 0 TI/TP | AF TF AIE TIE */
40#define RTC8564_REG_SEC 0x2 /* VL 4 2 1 | 8 4 2 1 */
41#define RTC8564_REG_MIN 0x3 /* x 4 2 1 | 8 4 2 1 */
42#define RTC8564_REG_HR 0x4 /* x x 2 1 | 8 4 2 1 */
43#define RTC8564_REG_DAY 0x5 /* x x 2 1 | 8 4 2 1 */
44#define RTC8564_REG_WDAY 0x6 /* x x x x | x 4 2 1 */
45#define RTC8564_REG_MON_CENT 0x7 /* C x x 1 | 8 4 2 1 */
46#define RTC8564_REG_YEAR 0x8 /* 8 4 2 1 | 8 4 2 1 */
47#define RTC8564_REG_AL_MIN 0x9 /* AE 4 2 1 | 8 4 2 1 */
48#define RTC8564_REG_AL_HR 0xa /* AE 4 2 1 | 8 4 2 1 */
49#define RTC8564_REG_AL_DAY 0xb /* AE x 2 1 | 8 4 2 1 */
50#define RTC8564_REG_AL_WDAY 0xc /* AE x x x | x 4 2 1 */
51#define RTC8564_REG_CLKOUT 0xd /* FE x x x | x x FD1 FD0 */
52#define RTC8564_REG_TCTL 0xe /* TE x x x | x x FD1 FD0 */
53#define RTC8564_REG_TIMER 0xf /* 8 bit binary */
54
55/* Control reg */
56#define RTC8564_CTRL1_TEST1 (1<<3)
57#define RTC8564_CTRL1_STOP (1<<5)
58#define RTC8564_CTRL1_TEST2 (1<<7)
59
60#define RTC8564_CTRL2_TIE (1<<0)
61#define RTC8564_CTRL2_AIE (1<<1)
62#define RTC8564_CTRL2_TF (1<<2)
63#define RTC8564_CTRL2_AF (1<<3)
64#define RTC8564_CTRL2_TI_TP (1<<4)
65
66/* CLKOUT frequencies */
67#define RTC8564_FD_32768HZ (0x0)
68#define RTC8564_FD_1024HZ (0x1)
69#define RTC8564_FD_32 (0x2)
70#define RTC8564_FD_1HZ (0x3)
71
72/* Timer CTRL */
73#define RTC8564_TD_4096HZ (0x0)
74#define RTC8564_TD_64HZ (0x1)
75#define RTC8564_TD_1HZ (0x2)
76#define RTC8564_TD_1_60HZ (0x3)
77
78#define I2C_DRIVERID_RTC8564 0xf000
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index c7671e188017..b4a41d6d0714 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -2143,6 +2143,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
2143 req.cmd[0] = GPCMD_READ_CDVD_CAPACITY; 2143 req.cmd[0] = GPCMD_READ_CDVD_CAPACITY;
2144 req.data = (char *)&capbuf; 2144 req.data = (char *)&capbuf;
2145 req.data_len = sizeof(capbuf); 2145 req.data_len = sizeof(capbuf);
2146 req.flags |= REQ_QUIET;
2146 2147
2147 stat = cdrom_queue_packet_command(drive, &req); 2148 stat = cdrom_queue_packet_command(drive, &req);
2148 if (stat == 0) { 2149 if (stat == 0) {
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index ebc59064b475..f04791a58df0 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -433,6 +433,7 @@
433#include <linux/timer.h> 433#include <linux/timer.h>
434#include <linux/mm.h> 434#include <linux/mm.h>
435#include <linux/interrupt.h> 435#include <linux/interrupt.h>
436#include <linux/jiffies.h>
436#include <linux/major.h> 437#include <linux/major.h>
437#include <linux/devfs_fs_kernel.h> 438#include <linux/devfs_fs_kernel.h>
438#include <linux/errno.h> 439#include <linux/errno.h>
@@ -2336,7 +2337,7 @@ static ide_startstop_t idetape_rw_callback (ide_drive_t *drive)
2336 } 2337 }
2337 if (time_after(jiffies, tape->insert_time)) 2338 if (time_after(jiffies, tape->insert_time))
2338 tape->insert_speed = tape->insert_size / 1024 * HZ / (jiffies - tape->insert_time); 2339 tape->insert_speed = tape->insert_size / 1024 * HZ / (jiffies - tape->insert_time);
2339 if (jiffies - tape->avg_time >= HZ) { 2340 if (time_after_eq(jiffies, tape->avg_time + HZ)) {
2340 tape->avg_speed = tape->avg_size * HZ / (jiffies - tape->avg_time) / 1024; 2341 tape->avg_speed = tape->avg_size * HZ / (jiffies - tape->avg_time) / 1024;
2341 tape->avg_size = 0; 2342 tape->avg_size = 0;
2342 tape->avg_time = jiffies; 2343 tape->avg_time = jiffies;
@@ -2497,7 +2498,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
2497 } else { 2498 } else {
2498 return ide_do_reset(drive); 2499 return ide_do_reset(drive);
2499 } 2500 }
2500 } else if (jiffies - tape->dsc_polling_start > IDETAPE_DSC_MA_THRESHOLD) 2501 } else if (time_after(jiffies, tape->dsc_polling_start + IDETAPE_DSC_MA_THRESHOLD))
2501 tape->dsc_polling_frequency = IDETAPE_DSC_MA_SLOW; 2502 tape->dsc_polling_frequency = IDETAPE_DSC_MA_SLOW;
2502 idetape_postpone_request(drive); 2503 idetape_postpone_request(drive);
2503 return ide_stopped; 2504 return ide_stopped;
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 21965e5ef25e..b22ee5462318 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -347,10 +347,8 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, const ch
347 break; 347 break;
348 348
349 case AMD_UDMA_66: 349 case AMD_UDMA_66:
350 pci_read_config_dword(dev, AMD_UDMA_TIMING, &u); 350 /* no host side cable detection */
351 for (i = 24; i >= 0; i -= 8) 351 amd_80w = 0x03;
352 if ((u >> i) & 4)
353 amd_80w |= (1 << (1 - (i >> 4)));
354 break; 352 break;
355 } 353 }
356 354
@@ -386,8 +384,6 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, const ch
386 if (amd_clock < 20000 || amd_clock > 50000) { 384 if (amd_clock < 20000 || amd_clock > 50000) {
387 printk(KERN_WARNING "%s: User given PCI clock speed impossible (%d), using 33 MHz instead.\n", 385 printk(KERN_WARNING "%s: User given PCI clock speed impossible (%d), using 33 MHz instead.\n",
388 amd_chipset->name, amd_clock); 386 amd_chipset->name, amd_clock);
389 printk(KERN_WARNING "%s: Use ide0=ata66 if you want to assume 80-wire cable\n",
390 amd_chipset->name);
391 amd_clock = 33333; 387 amd_clock = 33333;
392 } 388 }
393 389
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/pci/generic.c
index 6e3ab0c38c4d..f82e82109728 100644
--- a/drivers/ide/pci/generic.c
+++ b/drivers/ide/pci/generic.c
@@ -41,14 +41,15 @@
41 41
42static int ide_generic_all; /* Set to claim all devices */ 42static int ide_generic_all; /* Set to claim all devices */
43 43
44#ifndef MODULE
44static int __init ide_generic_all_on(char *unused) 45static int __init ide_generic_all_on(char *unused)
45{ 46{
46 ide_generic_all = 1; 47 ide_generic_all = 1;
47 printk(KERN_INFO "IDE generic will claim all unknown PCI IDE storage controllers.\n"); 48 printk(KERN_INFO "IDE generic will claim all unknown PCI IDE storage controllers.\n");
48 return 1; 49 return 1;
49} 50}
50
51__setup("all-generic-ide", ide_generic_all_on); 51__setup("all-generic-ide", ide_generic_all_on);
52#endif
52 53
53static void __devinit init_hwif_generic (ide_hwif_t *hwif) 54static void __devinit init_hwif_generic (ide_hwif_t *hwif)
54{ 55{
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index 75a2253a3e68..8e9d87701ce2 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -112,6 +112,7 @@ static const struct {
112 112
113 { "SiS5596", PCI_DEVICE_ID_SI_5596, ATA_16 }, 113 { "SiS5596", PCI_DEVICE_ID_SI_5596, ATA_16 },
114 { "SiS5571", PCI_DEVICE_ID_SI_5571, ATA_16 }, 114 { "SiS5571", PCI_DEVICE_ID_SI_5571, ATA_16 },
115 { "SiS5517", PCI_DEVICE_ID_SI_5517, ATA_16 },
115 { "SiS551x", PCI_DEVICE_ID_SI_5511, ATA_16 }, 116 { "SiS551x", PCI_DEVICE_ID_SI_5511, ATA_16 },
116}; 117};
117 118
@@ -524,6 +525,7 @@ static void config_art_rwp_pio (ide_drive_t *drive, u8 pio)
524 case 3: test1 = 0x30|0x03; break; 525 case 3: test1 = 0x30|0x03; break;
525 case 2: test1 = 0x40|0x04; break; 526 case 2: test1 = 0x40|0x04; break;
526 case 1: test1 = 0x60|0x07; break; 527 case 1: test1 = 0x60|0x07; break;
528 case 0: test1 = 0x00; break;
527 default: break; 529 default: break;
528 } 530 }
529 pci_write_config_byte(dev, drive_pci, test1); 531 pci_write_config_byte(dev, drive_pci, test1);
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index 734b121a0554..491e6032bdec 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -306,8 +306,7 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
306 u64 align_mask = ~(alignment - 1); 306 u64 align_mask = ~(alignment - 1);
307 307
308 if ((alignment & 3) || (alignment > 0x800000000000ULL) || 308 if ((alignment & 3) || (alignment > 0x800000000000ULL) ||
309 ((hweight32(alignment >> 32) + 309 (hweight64(alignment) != 1)) {
310 hweight32(alignment & 0xffffffff) != 1))) {
311 HPSB_ERR("%s called with invalid alignment: 0x%048llx", 310 HPSB_ERR("%s called with invalid alignment: 0x%048llx",
312 __FUNCTION__, (unsigned long long)alignment); 311 __FUNCTION__, (unsigned long long)alignment);
313 return retval; 312 return retval;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 4fe3da3c667a..f8af0945964e 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -923,7 +923,7 @@ void input_unregister_handler(struct input_handler *handler)
923static int input_open_file(struct inode *inode, struct file *file) 923static int input_open_file(struct inode *inode, struct file *file)
924{ 924{
925 struct input_handler *handler = input_table[iminor(inode) >> 5]; 925 struct input_handler *handler = input_table[iminor(inode) >> 5];
926 struct file_operations *old_fops, *new_fops = NULL; 926 const struct file_operations *old_fops, *new_fops = NULL;
927 int err; 927 int err;
928 928
929 /* No load-on-demand here? */ 929 /* No load-on-demand here? */
diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c
index 1c9426fd5205..aa4a8a4ccfdb 100644
--- a/drivers/input/serio/hp_sdc_mlc.c
+++ b/drivers/input/serio/hp_sdc_mlc.c
@@ -270,9 +270,10 @@ static void hp_sdc_mlc_out (hil_mlc *mlc) {
270 270
271 do_control: 271 do_control:
272 priv->emtestmode = mlc->opacket & HIL_CTRL_TEST; 272 priv->emtestmode = mlc->opacket & HIL_CTRL_TEST;
273 if ((mlc->opacket & (HIL_CTRL_APE | HIL_CTRL_IPF)) == HIL_CTRL_APE) { 273
274 BUG(); /* we cannot emulate this, it should not be used. */ 274 /* we cannot emulate this, it should not be used. */
275 } 275 BUG_ON((mlc->opacket & (HIL_CTRL_APE | HIL_CTRL_IPF)) == HIL_CTRL_APE);
276
276 if ((mlc->opacket & HIL_CTRL_ONLY) == HIL_CTRL_ONLY) goto control_only; 277 if ((mlc->opacket & HIL_CTRL_ONLY) == HIL_CTRL_ONLY) goto control_only;
277 if (mlc->opacket & HIL_CTRL_APE) { 278 if (mlc->opacket & HIL_CTRL_APE) {
278 BUG(); /* Should not send command/data after engaging APE */ 279 BUG(); /* Should not send command/data after engaging APE */
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index a81f987978c8..46d1fec2cfd8 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -23,7 +23,7 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/spi/spi.h> 24#include <linux/spi/spi.h>
25#include <linux/spi/ads7846.h> 25#include <linux/spi/ads7846.h>
26#include <linux/interrupt.h> 26#include <asm/irq.h>
27 27
28#ifdef CONFIG_ARM 28#ifdef CONFIG_ARM
29#include <asm/mach-types.h> 29#include <asm/mach-types.h>
diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile
index 03d8ccd51955..988142c30a6d 100644
--- a/drivers/isdn/Makefile
+++ b/drivers/isdn/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_ISDN_DRV_SC) += sc/
13obj-$(CONFIG_ISDN_DRV_LOOP) += isdnloop/ 13obj-$(CONFIG_ISDN_DRV_LOOP) += isdnloop/
14obj-$(CONFIG_ISDN_DRV_ACT2000) += act2000/ 14obj-$(CONFIG_ISDN_DRV_ACT2000) += act2000/
15obj-$(CONFIG_HYSDN) += hysdn/ 15obj-$(CONFIG_HYSDN) += hysdn/
16obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset/
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 623adbb0d13a..9b493f0becc4 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -1485,6 +1485,7 @@ static int __init capi_init(void)
1485{ 1485{
1486 char *p; 1486 char *p;
1487 char *compileinfo; 1487 char *compileinfo;
1488 int major_ret;
1488 1489
1489 if ((p = strchr(revision, ':')) != 0 && p[1]) { 1490 if ((p = strchr(revision, ':')) != 0 && p[1]) {
1490 strlcpy(rev, p + 2, sizeof(rev)); 1491 strlcpy(rev, p + 2, sizeof(rev));
@@ -1493,11 +1494,12 @@ static int __init capi_init(void)
1493 } else 1494 } else
1494 strcpy(rev, "1.0"); 1495 strcpy(rev, "1.0");
1495 1496
1496 if (register_chrdev(capi_major, "capi20", &capi_fops)) { 1497 major_ret = register_chrdev(capi_major, "capi20", &capi_fops);
1498 if (major_ret < 0) {
1497 printk(KERN_ERR "capi20: unable to get major %d\n", capi_major); 1499 printk(KERN_ERR "capi20: unable to get major %d\n", capi_major);
1498 return -EIO; 1500 return major_ret;
1499 } 1501 }
1500 1502 capi_major = major_ret;
1501 capi_class = class_create(THIS_MODULE, "capi"); 1503 capi_class = class_create(THIS_MODULE, "capi");
1502 if (IS_ERR(capi_class)) { 1504 if (IS_ERR(capi_class)) {
1503 unregister_chrdev(capi_major, "capi20"); 1505 unregister_chrdev(capi_major, "capi20");
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index 2cc8b27e4c3b..ca9dc00a45c4 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -233,7 +233,7 @@ static struct file_operations proc_applstats_ops = {
233}; 233};
234 234
235static void 235static void
236create_seq_entry(char *name, mode_t mode, struct file_operations *f) 236create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
237{ 237{
238 struct proc_dir_entry *entry; 238 struct proc_dir_entry *entry;
239 entry = create_proc_entry(name, mode, NULL); 239 entry = create_proc_entry(name, mode, NULL);
diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig
new file mode 100644
index 000000000000..53c4fb62ed85
--- /dev/null
+++ b/drivers/isdn/gigaset/Kconfig
@@ -0,0 +1,42 @@
1menu "Siemens Gigaset"
2 depends on ISDN_I4L
3
4config ISDN_DRV_GIGASET
5 tristate "Siemens Gigaset support (isdn)"
6 depends on ISDN_I4L && m
7# depends on ISDN_I4L && MODULES
8 help
9 Say m here if you have a Gigaset or Sinus isdn device.
10
11if ISDN_DRV_GIGASET!=n
12
13config GIGASET_BASE
14 tristate "Gigaset base station support"
15 depends on ISDN_DRV_GIGASET && USB
16 help
17 Say m here if you need to communicate with the base
18 directly via USB.
19
20config GIGASET_M105
21 tristate "Gigaset M105 support"
22 depends on ISDN_DRV_GIGASET && USB
23 help
24 Say m here if you need the driver for the Gigaset M105 device.
25
26config GIGASET_DEBUG
27 bool "Gigaset debugging"
28 help
29 This enables debugging code in the Gigaset drivers.
30 If in doubt, say yes.
31
32config GIGASET_UNDOCREQ
33 bool "Support for undocumented USB requests"
34 help
35 This enables support for USB requests we only know from
36 reverse engineering (currently M105 only). If you need
37 features like configuration mode of M105, say yes. If you
38 care about your device, say no.
39
40endif
41
42endmenu
diff --git a/drivers/isdn/gigaset/Makefile b/drivers/isdn/gigaset/Makefile
new file mode 100644
index 000000000000..9b9acf1a21ad
--- /dev/null
+++ b/drivers/isdn/gigaset/Makefile
@@ -0,0 +1,6 @@
1gigaset-y := common.o interface.o proc.o ev-layer.o i4l.o
2usb_gigaset-y := usb-gigaset.o asyncdata.o
3bas_gigaset-y := bas-gigaset.o isocdata.o
4
5obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o gigaset.o
6obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o gigaset.o
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
new file mode 100644
index 000000000000..171f8b703d61
--- /dev/null
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -0,0 +1,597 @@
1/*
2 * Common data handling layer for ser_gigaset and usb_gigaset
3 *
4 * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
5 * Hansjoerg Lipp <hjlipp@web.de>,
6 * Stefan Eilers <Eilers.Stefan@epost.de>.
7 *
8 * =====================================================================
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 * =====================================================================
14 * ToDo: ...
15 * =====================================================================
16 * Version: $Id: asyncdata.c,v 1.2.2.7 2005/11/13 23:05:18 hjlipp Exp $
17 * =====================================================================
18 */
19
20#include "gigaset.h"
21#include <linux/crc-ccitt.h>
22
23//#define GIG_M10x_STUFF_VOICE_DATA
24
25/* check if byte must be stuffed/escaped
26 * I'm not sure which data should be encoded.
27 * Therefore I will go the hard way and decode every value
28 * less than 0x20, the flag sequence and the control escape char.
29 */
30static inline int muststuff(unsigned char c)
31{
32 if (c < PPP_TRANS) return 1;
33 if (c == PPP_FLAG) return 1;
34 if (c == PPP_ESCAPE) return 1;
35 /* other possible candidates: */
36 /* 0x91: XON with parity set */
37 /* 0x93: XOFF with parity set */
38 return 0;
39}
40
41/* == data input =========================================================== */
42
43/* process a block of received bytes in command mode (modem response)
44 * Return value:
45 * number of processed bytes
46 */
47static inline int cmd_loop(unsigned char c, unsigned char *src, int numbytes,
48 struct inbuf_t *inbuf)
49{
50 struct cardstate *cs = inbuf->cs;
51 unsigned cbytes = cs->cbytes;
52 int inputstate = inbuf->inputstate;
53 int startbytes = numbytes;
54
55 for (;;) {
56 cs->respdata[cbytes] = c;
57 if (c == 10 || c == 13) {
58 dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)",
59 __func__, cbytes);
60 cs->cbytes = cbytes;
61 gigaset_handle_modem_response(cs); /* can change cs->dle */
62 cbytes = 0;
63
64 if (cs->dle &&
65 !(inputstate & INS_DLE_command)) {
66 inputstate &= ~INS_command;
67 break;
68 }
69 } else {
70 /* advance in line buffer, checking for overflow */
71 if (cbytes < MAX_RESP_SIZE - 1)
72 cbytes++;
73 else
74 warn("response too large");
75 }
76
77 if (!numbytes)
78 break;
79 c = *src++;
80 --numbytes;
81 if (c == DLE_FLAG &&
82 (cs->dle || inputstate & INS_DLE_command)) {
83 inputstate |= INS_DLE_char;
84 break;
85 }
86 }
87
88 cs->cbytes = cbytes;
89 inbuf->inputstate = inputstate;
90
91 return startbytes - numbytes;
92}
93
94/* process a block of received bytes in lock mode (tty i/f)
95 * Return value:
96 * number of processed bytes
97 */
98static inline int lock_loop(unsigned char *src, int numbytes,
99 struct inbuf_t *inbuf)
100{
101 struct cardstate *cs = inbuf->cs;
102
103 gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src, 0);
104 gigaset_if_receive(cs, src, numbytes);
105
106 return numbytes;
107}
108
109/* process a block of received bytes in HDLC data mode
110 * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes.
111 * When a frame is complete, check the FCS and pass valid frames to the LL.
112 * If DLE is encountered, return immediately to let the caller handle it.
113 * Return value:
114 * number of processed bytes
115 * numbytes (all bytes processed) on error --FIXME
116 */
117static inline int hdlc_loop(unsigned char c, unsigned char *src, int numbytes,
118 struct inbuf_t *inbuf)
119{
120 struct cardstate *cs = inbuf->cs;
121 struct bc_state *bcs = inbuf->bcs;
122 int inputstate;
123 __u16 fcs;
124 struct sk_buff *skb;
125 unsigned char error;
126 struct sk_buff *compskb;
127 int startbytes = numbytes;
128 int l;
129
130 IFNULLRETVAL(bcs, numbytes);
131 inputstate = bcs->inputstate;
132 fcs = bcs->fcs;
133 skb = bcs->skb;
134 IFNULLRETVAL(skb, numbytes);
135
136 if (unlikely(inputstate & INS_byte_stuff)) {
137 inputstate &= ~INS_byte_stuff;
138 goto byte_stuff;
139 }
140 for (;;) {
141 if (unlikely(c == PPP_ESCAPE)) {
142 if (unlikely(!numbytes)) {
143 inputstate |= INS_byte_stuff;
144 break;
145 }
146 c = *src++;
147 --numbytes;
148 if (unlikely(c == DLE_FLAG &&
149 (cs->dle ||
150 inbuf->inputstate & INS_DLE_command))) {
151 inbuf->inputstate |= INS_DLE_char;
152 inputstate |= INS_byte_stuff;
153 break;
154 }
155byte_stuff:
156 c ^= PPP_TRANS;
157#ifdef CONFIG_GIGASET_DEBUG
158 if (unlikely(!muststuff(c)))
159 dbg(DEBUG_HDLC,
160 "byte stuffed: 0x%02x", c);
161#endif
162 } else if (unlikely(c == PPP_FLAG)) {
163 if (unlikely(inputstate & INS_skip_frame)) {
164 if (!(inputstate & INS_have_data)) { /* 7E 7E */
165 //dbg(DEBUG_HDLC, "(7e)7e------------------------");
166#ifdef CONFIG_GIGASET_DEBUG
167 ++bcs->emptycount;
168#endif
169 } else
170 dbg(DEBUG_HDLC,
171 "7e----------------------------");
172
173 /* end of frame */
174 error = 1;
175 gigaset_rcv_error(NULL, cs, bcs);
176 } else if (!(inputstate & INS_have_data)) { /* 7E 7E */
177 //dbg(DEBUG_HDLC, "(7e)7e------------------------");
178#ifdef CONFIG_GIGASET_DEBUG
179 ++bcs->emptycount;
180#endif
181 break;
182 } else {
183 dbg(DEBUG_HDLC,
184 "7e----------------------------");
185
186 /* end of frame */
187 error = 0;
188
189 if (unlikely(fcs != PPP_GOODFCS)) {
190 err("Packet checksum at %lu failed, "
191 "packet is corrupted (%u bytes)!",
192 bcs->rcvbytes, skb->len);
193 compskb = NULL;
194 gigaset_rcv_error(compskb, cs, bcs);
195 error = 1;
196 } else {
197 if (likely((l = skb->len) > 2)) {
198 skb->tail -= 2;
199 skb->len -= 2;
200 } else {
201 dev_kfree_skb(skb);
202 skb = NULL;
203 inputstate |= INS_skip_frame;
204 if (l == 1) {
205 err("invalid packet size (1)!");
206 error = 1;
207 gigaset_rcv_error(NULL, cs, bcs);
208 }
209 }
210 if (likely(!(error ||
211 (inputstate &
212 INS_skip_frame)))) {
213 gigaset_rcv_skb(skb, cs, bcs);
214 }
215 }
216 }
217
218 if (unlikely(error))
219 if (skb)
220 dev_kfree_skb(skb);
221
222 fcs = PPP_INITFCS;
223 inputstate &= ~(INS_have_data | INS_skip_frame);
224 if (unlikely(bcs->ignore)) {
225 inputstate |= INS_skip_frame;
226 skb = NULL;
227 } else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)) {
228 skb_reserve(skb, HW_HDR_LEN);
229 } else {
230 warn("could not allocate new skb");
231 inputstate |= INS_skip_frame;
232 }
233
234 break;
235#ifdef CONFIG_GIGASET_DEBUG
236 } else if (unlikely(muststuff(c))) {
237 /* Should not happen. Possible after ZDLE=1<CR><LF>. */
238 dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c);
239#endif
240 }
241
242 /* add character */
243
244#ifdef CONFIG_GIGASET_DEBUG
245 if (unlikely(!(inputstate & INS_have_data))) {
246 dbg(DEBUG_HDLC,
247 "7e (%d x) ================", bcs->emptycount);
248 bcs->emptycount = 0;
249 }
250#endif
251
252 inputstate |= INS_have_data;
253
254 if (likely(!(inputstate & INS_skip_frame))) {
255 if (unlikely(skb->len == SBUFSIZE)) {
256 warn("received packet too long");
257 dev_kfree_skb_any(skb);
258 skb = NULL;
259 inputstate |= INS_skip_frame;
260 break;
261 }
262 *gigaset_skb_put_quick(skb, 1) = c;
263 /* *__skb_put (skb, 1) = c; */
264 fcs = crc_ccitt_byte(fcs, c);
265 }
266
267 if (unlikely(!numbytes))
268 break;
269 c = *src++;
270 --numbytes;
271 if (unlikely(c == DLE_FLAG &&
272 (cs->dle ||
273 inbuf->inputstate & INS_DLE_command))) {
274 inbuf->inputstate |= INS_DLE_char;
275 break;
276 }
277 }
278 bcs->inputstate = inputstate;
279 bcs->fcs = fcs;
280 bcs->skb = skb;
281 return startbytes - numbytes;
282}
283
284/* process a block of received bytes in transparent data mode
285 * Invert bytes, undoing byte stuffing and watching for DLE escapes.
286 * If DLE is encountered, return immediately to let the caller handle it.
287 * Return value:
288 * number of processed bytes
289 * numbytes (all bytes processed) on error --FIXME
290 */
291static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes,
292 struct inbuf_t *inbuf)
293{
294 struct cardstate *cs = inbuf->cs;
295 struct bc_state *bcs = inbuf->bcs;
296 int inputstate;
297 struct sk_buff *skb;
298 int startbytes = numbytes;
299
300 IFNULLRETVAL(bcs, numbytes);
301 inputstate = bcs->inputstate;
302 skb = bcs->skb;
303 IFNULLRETVAL(skb, numbytes);
304
305 for (;;) {
306 /* add character */
307 inputstate |= INS_have_data;
308
309 if (likely(!(inputstate & INS_skip_frame))) {
310 if (unlikely(skb->len == SBUFSIZE)) {
311 //FIXME just pass skb up and allocate a new one
312 warn("received packet too long");
313 dev_kfree_skb_any(skb);
314 skb = NULL;
315 inputstate |= INS_skip_frame;
316 break;
317 }
318 *gigaset_skb_put_quick(skb, 1) = gigaset_invtab[c];
319 }
320
321 if (unlikely(!numbytes))
322 break;
323 c = *src++;
324 --numbytes;
325 if (unlikely(c == DLE_FLAG &&
326 (cs->dle ||
327 inbuf->inputstate & INS_DLE_command))) {
328 inbuf->inputstate |= INS_DLE_char;
329 break;
330 }
331 }
332
333 /* pass data up */
334 if (likely(inputstate & INS_have_data)) {
335 if (likely(!(inputstate & INS_skip_frame))) {
336 gigaset_rcv_skb(skb, cs, bcs);
337 }
338 inputstate &= ~(INS_have_data | INS_skip_frame);
339 if (unlikely(bcs->ignore)) {
340 inputstate |= INS_skip_frame;
341 skb = NULL;
342 } else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN))
343 != NULL)) {
344 skb_reserve(skb, HW_HDR_LEN);
345 } else {
346 warn("could not allocate new skb");
347 inputstate |= INS_skip_frame;
348 }
349 }
350
351 bcs->inputstate = inputstate;
352 bcs->skb = skb;
353 return startbytes - numbytes;
354}
355
356/* process a block of data received from the device
357 */
358void gigaset_m10x_input(struct inbuf_t *inbuf)
359{
360 struct cardstate *cs;
361 unsigned tail, head, numbytes;
362 unsigned char *src, c;
363 int procbytes;
364
365 head = atomic_read(&inbuf->head);
366 tail = atomic_read(&inbuf->tail);
367 dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
368
369 if (head != tail) {
370 cs = inbuf->cs;
371 src = inbuf->data + head;
372 numbytes = (head > tail ? RBUFSIZE : tail) - head;
373 dbg(DEBUG_INTR, "processing %u bytes", numbytes);
374
375 while (numbytes) {
376 if (atomic_read(&cs->mstate) == MS_LOCKED) {
377 procbytes = lock_loop(src, numbytes, inbuf);
378 src += procbytes;
379 numbytes -= procbytes;
380 } else {
381 c = *src++;
382 --numbytes;
383 if (c == DLE_FLAG && (cs->dle ||
384 inbuf->inputstate & INS_DLE_command)) {
385 if (!(inbuf->inputstate & INS_DLE_char)) {
386 inbuf->inputstate |= INS_DLE_char;
387 goto nextbyte;
388 }
389 /* <DLE> <DLE> => <DLE> in data stream */
390 inbuf->inputstate &= ~INS_DLE_char;
391 }
392
393 if (!(inbuf->inputstate & INS_DLE_char)) {
394
395 /* FIXME Einfach je nach Modus Funktionszeiger in cs setzen [hier+hdlc_loop]? */
396 /* FIXME Spart folgendes "if" und ermoeglicht andere Protokolle */
397 if (inbuf->inputstate & INS_command)
398 procbytes = cmd_loop(c, src, numbytes, inbuf);
399 else if (inbuf->bcs->proto2 == ISDN_PROTO_L2_HDLC)
400 procbytes = hdlc_loop(c, src, numbytes, inbuf);
401 else
402 procbytes = iraw_loop(c, src, numbytes, inbuf);
403
404 src += procbytes;
405 numbytes -= procbytes;
406 } else { /* DLE-char */
407 inbuf->inputstate &= ~INS_DLE_char;
408 switch (c) {
409 case 'X': /*begin of command*/
410#ifdef CONFIG_GIGASET_DEBUG
411 if (inbuf->inputstate & INS_command)
412 err("received <DLE> 'X' in command mode");
413#endif
414 inbuf->inputstate |=
415 INS_command | INS_DLE_command;
416 break;
417 case '.': /*end of command*/
418#ifdef CONFIG_GIGASET_DEBUG
419 if (!(inbuf->inputstate & INS_command))
420 err("received <DLE> '.' in hdlc mode");
421#endif
422 inbuf->inputstate &= cs->dle ?
423 ~(INS_DLE_command|INS_command)
424 : ~INS_DLE_command;
425 break;
426 //case DLE_FLAG: /*DLE_FLAG in data stream*/ /* schon oben behandelt! */
427 default:
428 err("received 0x10 0x%02x!", (int) c);
429 /* FIXME: reset driver?? */
430 }
431 }
432 }
433nextbyte:
434 if (!numbytes) {
435 /* end of buffer, check for wrap */
436 if (head > tail) {
437 head = 0;
438 src = inbuf->data;
439 numbytes = tail;
440 } else {
441 head = tail;
442 break;
443 }
444 }
445 }
446
447 dbg(DEBUG_INTR, "setting head to %u", head);
448 atomic_set(&inbuf->head, head);
449 }
450}
451
452
453/* == data output ========================================================== */
454
455/* Encoding of a PPP packet into an octet stuffed HDLC frame
456 * with FCS, opening and closing flags.
457 * parameters:
458 * skb skb containing original packet (freed upon return)
459 * head number of headroom bytes to allocate in result skb
460 * tail number of tailroom bytes to allocate in result skb
461 * Return value:
462 * pointer to newly allocated skb containing the result frame
463 */
464static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int head, int tail)
465{
466 struct sk_buff *hdlc_skb;
467 __u16 fcs;
468 unsigned char c;
469 unsigned char *cp;
470 int len;
471 unsigned int stuf_cnt;
472
473 stuf_cnt = 0;
474 fcs = PPP_INITFCS;
475 cp = skb->data;
476 len = skb->len;
477 while (len--) {
478 if (muststuff(*cp))
479 stuf_cnt++;
480 fcs = crc_ccitt_byte(fcs, *cp++);
481 }
482 fcs ^= 0xffff; /* complement */
483
484 /* size of new buffer: original size + number of stuffing bytes
485 * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes
486 */
487 hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + tail + head);
488 if (!hdlc_skb) {
489 err("unable to allocate memory for HDLC encoding!");
490 dev_kfree_skb(skb);
491 return NULL;
492 }
493 skb_reserve(hdlc_skb, head);
494
495 /* Copy acknowledge request into new skb */
496 memcpy(hdlc_skb->head, skb->head, 2);
497
498 /* Add flag sequence in front of everything.. */
499 *(skb_put(hdlc_skb, 1)) = PPP_FLAG;
500
501 /* Perform byte stuffing while copying data. */
502 while (skb->len--) {
503 if (muststuff(*skb->data)) {
504 *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
505 *(skb_put(hdlc_skb, 1)) = (*skb->data++) ^ PPP_TRANS;
506 } else
507 *(skb_put(hdlc_skb, 1)) = *skb->data++;
508 }
509
510 /* Finally add FCS (byte stuffed) and flag sequence */
511 c = (fcs & 0x00ff); /* least significant byte first */
512 if (muststuff(c)) {
513 *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
514 c ^= PPP_TRANS;
515 }
516 *(skb_put(hdlc_skb, 1)) = c;
517
518 c = ((fcs >> 8) & 0x00ff);
519 if (muststuff(c)) {
520 *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
521 c ^= PPP_TRANS;
522 }
523 *(skb_put(hdlc_skb, 1)) = c;
524
525 *(skb_put(hdlc_skb, 1)) = PPP_FLAG;
526
527 dev_kfree_skb(skb);
528 return hdlc_skb;
529}
530
531/* Encoding of a raw packet into an octet stuffed bit inverted frame
532 * parameters:
533 * skb skb containing original packet (freed upon return)
534 * head number of headroom bytes to allocate in result skb
535 * tail number of tailroom bytes to allocate in result skb
536 * Return value:
537 * pointer to newly allocated skb containing the result frame
538 */
539static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
540{
541 struct sk_buff *iraw_skb;
542 unsigned char c;
543 unsigned char *cp;
544 int len;
545
546 /* worst case: every byte must be stuffed */
547 iraw_skb = dev_alloc_skb(2*skb->len + tail + head);
548 if (!iraw_skb) {
549 err("unable to allocate memory for HDLC encoding!");
550 dev_kfree_skb(skb);
551 return NULL;
552 }
553 skb_reserve(iraw_skb, head);
554
555 cp = skb->data;
556 len = skb->len;
557 while (len--) {
558 c = gigaset_invtab[*cp++];
559 if (c == DLE_FLAG)
560 *(skb_put(iraw_skb, 1)) = c;
561 *(skb_put(iraw_skb, 1)) = c;
562 }
563 dev_kfree_skb(skb);
564 return iraw_skb;
565}
566
567/* gigaset_send_skb
568 * called by common.c to queue an skb for sending
569 * and start transmission if necessary
570 * parameters:
571 * B Channel control structure
572 * skb
573 * Return value:
574 * number of bytes accepted for sending
575 * (skb->len if ok, 0 if out of buffer space)
576 * or error code (< 0, eg. -EINVAL)
577 */
578int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
579{
580 unsigned len;
581
582 IFNULLRETVAL(bcs, -EFAULT);
583 IFNULLRETVAL(skb, -EFAULT);
584 len = skb->len;
585
586 if (bcs->proto2 == ISDN_PROTO_L2_HDLC)
587 skb = HDLC_Encode(skb, HW_HDR_LEN, 0);
588 else
589 skb = iraw_encode(skb, HW_HDR_LEN, 0);
590 if (!skb)
591 return -ENOMEM;
592
593 skb_queue_tail(&bcs->squeue, skb);
594 tasklet_schedule(&bcs->cs->write_tasklet);
595
596 return len; /* ok so far */
597}
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
new file mode 100644
index 000000000000..31f0f07832bc
--- /dev/null
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -0,0 +1,2365 @@
1/*
2 * USB driver for Gigaset 307x base via direct USB connection.
3 *
4 * Copyright (c) 2001 by Hansjoerg Lipp <hjlipp@web.de>,
5 * Tilman Schmidt <tilman@imap.cc>,
6 * Stefan Eilers <Eilers.Stefan@epost.de>.
7 *
8 * Based on usb-gigaset.c.
9 *
10 * =====================================================================
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2 of
14 * the License, or (at your option) any later version.
15 * =====================================================================
16 * ToDo: ...
17 * =====================================================================
18 * Version: $Id: bas-gigaset.c,v 1.52.4.19 2006/02/04 18:28:16 hjlipp Exp $
19 * =====================================================================
20 */
21
22#include "gigaset.h"
23
24#include <linux/errno.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/timer.h>
28#include <linux/usb.h>
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31
32/* Version Information */
33#define DRIVER_AUTHOR "Tilman Schmidt <tilman@imap.cc>, Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers <Eilers.Stefan@epost.de>"
34#define DRIVER_DESC "USB Driver for Gigaset 307x"
35
36
37/* Module parameters */
38
39static int startmode = SM_ISDN;
40static int cidmode = 1;
41
42module_param(startmode, int, S_IRUGO);
43module_param(cidmode, int, S_IRUGO);
44MODULE_PARM_DESC(startmode, "start in isdn4linux mode");
45MODULE_PARM_DESC(cidmode, "Call-ID mode");
46
47#define GIGASET_MINORS 1
48#define GIGASET_MINOR 16
49#define GIGASET_MODULENAME "bas_gigaset"
50#define GIGASET_DEVFSNAME "gig/bas/"
51#define GIGASET_DEVNAME "ttyGB"
52
53#define IF_WRITEBUF 256 //FIXME
54
55/* Values for the Gigaset 307x */
56#define USB_GIGA_VENDOR_ID 0x0681
57#define USB_GIGA_PRODUCT_ID 0x0001
58#define USB_4175_PRODUCT_ID 0x0002
59#define USB_SX303_PRODUCT_ID 0x0021
60#define USB_SX353_PRODUCT_ID 0x0022
61
62/* table of devices that work with this driver */
63static struct usb_device_id gigaset_table [] = {
64 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_GIGA_PRODUCT_ID) },
65 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_4175_PRODUCT_ID) },
66 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) },
67 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX353_PRODUCT_ID) },
68 { } /* Terminating entry */
69};
70
71MODULE_DEVICE_TABLE(usb, gigaset_table);
72
73/* Get a minor range for your devices from the usb maintainer */
74#define USB_SKEL_MINOR_BASE 200
75
76/*======================= local function prototypes =============================*/
77
78/* This function is called if a new device is connected to the USB port. It
79 * checks whether this new device belongs to this driver.
80 */
81static int gigaset_probe(struct usb_interface *interface,
82 const struct usb_device_id *id);
83
84/* Function will be called if the device is unplugged */
85static void gigaset_disconnect(struct usb_interface *interface);
86
87
88/*==============================================================================*/
89
90struct bas_cardstate {
91 struct usb_device *udev; /* USB device pointer */
92 struct usb_interface *interface; /* interface for this device */
93 unsigned char minor; /* starting minor number */
94
95 struct urb *urb_ctrl; /* control pipe default URB */
96 struct usb_ctrlrequest dr_ctrl;
97 struct timer_list timer_ctrl; /* control request timeout */
98
99 struct timer_list timer_atrdy; /* AT command ready timeout */
100 struct urb *urb_cmd_out; /* for sending AT commands */
101 struct usb_ctrlrequest dr_cmd_out;
102 int retry_cmd_out;
103
104 struct urb *urb_cmd_in; /* for receiving AT replies */
105 struct usb_ctrlrequest dr_cmd_in;
106 struct timer_list timer_cmd_in; /* receive request timeout */
107 unsigned char *rcvbuf; /* AT reply receive buffer */
108
109 struct urb *urb_int_in; /* URB for interrupt pipe */
110 unsigned char int_in_buf[3];
111
112 spinlock_t lock; /* locks all following */
113 atomic_t basstate; /* bitmap (BS_*) */
114 int pending; /* uncompleted base request */
115 int rcvbuf_size; /* size of AT receive buffer */
116 /* 0: no receive in progress */
117 int retry_cmd_in; /* receive req retry count */
118};
119
120/* status of direct USB connection to 307x base (bits in basstate) */
121#define BS_ATOPEN 0x001
122#define BS_B1OPEN 0x002
123#define BS_B2OPEN 0x004
124#define BS_ATREADY 0x008
125#define BS_INIT 0x010
126#define BS_ATTIMER 0x020
127
128
129static struct gigaset_driver *driver = NULL;
130static struct cardstate *cardstate = NULL;
131
132/* usb specific object needed to register this driver with the usb subsystem */
133static struct usb_driver gigaset_usb_driver = {
134 .name = GIGASET_MODULENAME,
135 .probe = gigaset_probe,
136 .disconnect = gigaset_disconnect,
137 .id_table = gigaset_table,
138};
139
140/* get message text for USB status code
141 */
142static char *get_usb_statmsg(int status)
143{
144 static char unkmsg[28];
145
146 switch (status) {
147 case 0:
148 return "success";
149 case -ENOENT:
150 return "canceled";
151 case -ECONNRESET:
152 return "canceled (async)";
153 case -EINPROGRESS:
154 return "pending";
155 case -EPROTO:
156 return "bit stuffing or unknown USB error";
157 case -EILSEQ:
158 return "Illegal byte sequence (CRC mismatch)";
159 case -EPIPE:
160 return "babble detect or endpoint stalled";
161 case -ENOSR:
162 return "buffer error";
163 case -ETIMEDOUT:
164 return "timed out";
165 case -ENODEV:
166 return "device not present";
167 case -EREMOTEIO:
168 return "short packet detected";
169 case -EXDEV:
170 return "partial isochronous transfer";
171 case -EINVAL:
172 return "invalid argument";
173 case -ENXIO:
174 return "URB already queued";
175 case -EAGAIN:
176 return "isochronous start frame too early or too much scheduled";
177 case -EFBIG:
178 return "too many isochronous frames requested";
179 case -EMSGSIZE:
180 return "endpoint message size zero";
181 case -ESHUTDOWN:
182 return "endpoint shutdown";
183 case -EBUSY:
184 return "another request pending";
185 default:
186 snprintf(unkmsg, sizeof(unkmsg), "unknown error %d", status);
187 return unkmsg;
188 }
189}
190
191/* usb_pipetype_str
192 * retrieve string representation of USB pipe type
193 */
194static inline char *usb_pipetype_str(int pipe)
195{
196 if (usb_pipeisoc(pipe))
197 return "Isoc";
198 if (usb_pipeint(pipe))
199 return "Int";
200 if (usb_pipecontrol(pipe))
201 return "Ctrl";
202 if (usb_pipebulk(pipe))
203 return "Bulk";
204 return "?";
205}
206
207/* dump_urb
208 * write content of URB to syslog for debugging
209 */
210static inline void dump_urb(enum debuglevel level, const char *tag,
211 struct urb *urb)
212{
213#ifdef CONFIG_GIGASET_DEBUG
214 int i;
215 IFNULLRET(tag);
216 dbg(level, "%s urb(0x%08lx)->{", tag, (unsigned long) urb);
217 if (urb) {
218 dbg(level,
219 " dev=0x%08lx, pipe=%s:EP%d/DV%d:%s, "
220 "status=%d, hcpriv=0x%08lx, transfer_flags=0x%x,",
221 (unsigned long) urb->dev,
222 usb_pipetype_str(urb->pipe),
223 usb_pipeendpoint(urb->pipe), usb_pipedevice(urb->pipe),
224 usb_pipein(urb->pipe) ? "in" : "out",
225 urb->status, (unsigned long) urb->hcpriv,
226 urb->transfer_flags);
227 dbg(level,
228 " transfer_buffer=0x%08lx[%d], actual_length=%d, "
229 "bandwidth=%d, setup_packet=0x%08lx,",
230 (unsigned long) urb->transfer_buffer,
231 urb->transfer_buffer_length, urb->actual_length,
232 urb->bandwidth, (unsigned long) urb->setup_packet);
233 dbg(level,
234 " start_frame=%d, number_of_packets=%d, interval=%d, "
235 "error_count=%d,",
236 urb->start_frame, urb->number_of_packets, urb->interval,
237 urb->error_count);
238 dbg(level,
239 " context=0x%08lx, complete=0x%08lx, iso_frame_desc[]={",
240 (unsigned long) urb->context,
241 (unsigned long) urb->complete);
242 for (i = 0; i < urb->number_of_packets; i++) {
243 struct usb_iso_packet_descriptor *pifd = &urb->iso_frame_desc[i];
244 dbg(level,
245 " {offset=%u, length=%u, actual_length=%u, "
246 "status=%u}",
247 pifd->offset, pifd->length, pifd->actual_length,
248 pifd->status);
249 }
250 }
251 dbg(level, "}}");
252#endif
253}
254
255/* read/set modem control bits etc. (m10x only) */
256static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
257 unsigned new_state)
258{
259 return -EINVAL;
260}
261
262static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
263{
264 return -EINVAL;
265}
266
267static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
268{
269 return -EINVAL;
270}
271
272/* error_hangup
273 * hang up any existing connection because of an unrecoverable error
274 * This function may be called from any context and takes care of scheduling
275 * the necessary actions for execution outside of interrupt context.
276 * argument:
277 * B channel control structure
278 */
279static inline void error_hangup(struct bc_state *bcs)
280{
281 struct cardstate *cs = bcs->cs;
282
283 dbg(DEBUG_ANY,
284 "%s: scheduling HUP for channel %d", __func__, bcs->channel);
285
286 if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
287 //FIXME what should we do?
288 return;
289 }
290
291 gigaset_schedule_event(cs);
292}
293
294/* error_reset
295 * reset Gigaset device because of an unrecoverable error
296 * This function may be called from any context and takes care of scheduling
297 * the necessary actions for execution outside of interrupt context.
298 * argument:
299 * controller state structure
300 */
301static inline void error_reset(struct cardstate *cs)
302{
303 //FIXME try to recover without bothering the user
304 err("unrecoverable error - please disconnect the Gigaset base to reset");
305}
306
307/* check_pending
308 * check for completion of pending control request
309 * parameter:
310 * urb USB request block of completed request
311 * urb->context = hardware specific controller state structure
312 */
313static void check_pending(struct bas_cardstate *ucs)
314{
315 unsigned long flags;
316
317 IFNULLRET(ucs);
318 IFNULLRET(cardstate);
319
320 spin_lock_irqsave(&ucs->lock, flags);
321 switch (ucs->pending) {
322 case 0:
323 break;
324 case HD_OPEN_ATCHANNEL:
325 if (atomic_read(&ucs->basstate) & BS_ATOPEN)
326 ucs->pending = 0;
327 break;
328 case HD_OPEN_B1CHANNEL:
329 if (atomic_read(&ucs->basstate) & BS_B1OPEN)
330 ucs->pending = 0;
331 break;
332 case HD_OPEN_B2CHANNEL:
333 if (atomic_read(&ucs->basstate) & BS_B2OPEN)
334 ucs->pending = 0;
335 break;
336 case HD_CLOSE_ATCHANNEL:
337 if (!(atomic_read(&ucs->basstate) & BS_ATOPEN))
338 ucs->pending = 0;
339 //wake_up_interruptible(cs->initwait);
340 //FIXME need own wait queue?
341 break;
342 case HD_CLOSE_B1CHANNEL:
343 if (!(atomic_read(&ucs->basstate) & BS_B1OPEN))
344 ucs->pending = 0;
345 break;
346 case HD_CLOSE_B2CHANNEL:
347 if (!(atomic_read(&ucs->basstate) & BS_B2OPEN))
348 ucs->pending = 0;
349 break;
350 case HD_DEVICE_INIT_ACK: /* no reply expected */
351 ucs->pending = 0;
352 break;
353 /* HD_READ_ATMESSAGE, HD_WRITE_ATMESSAGE, HD_RESET_INTERRUPTPIPE
354 * are handled separately and should never end up here
355 */
356 default:
357 warn("unknown pending request 0x%02x cleared", ucs->pending);
358 ucs->pending = 0;
359 }
360
361 if (!ucs->pending)
362 del_timer(&ucs->timer_ctrl);
363
364 spin_unlock_irqrestore(&ucs->lock, flags);
365}
366
367/* cmd_in_timeout
368 * timeout routine for command input request
369 * argument:
370 * controller state structure
371 */
372static void cmd_in_timeout(unsigned long data)
373{
374 struct cardstate *cs = (struct cardstate *) data;
375 struct bas_cardstate *ucs;
376 unsigned long flags;
377
378 IFNULLRET(cs);
379 ucs = cs->hw.bas;
380 IFNULLRET(ucs);
381
382 spin_lock_irqsave(&cs->lock, flags);
383 if (!atomic_read(&cs->connected)) {
384 dbg(DEBUG_USBREQ, "%s: disconnected", __func__);
385 spin_unlock_irqrestore(&cs->lock, flags);
386 return;
387 }
388 if (!ucs->rcvbuf_size) {
389 dbg(DEBUG_USBREQ, "%s: no receive in progress", __func__);
390 spin_unlock_irqrestore(&cs->lock, flags);
391 return;
392 }
393 spin_unlock_irqrestore(&cs->lock, flags);
394
395 err("timeout reading AT response");
396 error_reset(cs); //FIXME retry?
397}
398
399
400static void read_ctrl_callback(struct urb *urb, struct pt_regs *regs);
401
402/* atread_submit
403 * submit an HD_READ_ATMESSAGE command URB
404 * parameters:
405 * cs controller state structure
406 * timeout timeout in 1/10 sec., 0: none
407 * return value:
408 * 0 on success
409 * -EINVAL if a NULL pointer is encountered somewhere
410 * -EBUSY if another request is pending
411 * any URB submission error code
412 */
413static int atread_submit(struct cardstate *cs, int timeout)
414{
415 struct bas_cardstate *ucs;
416 int ret;
417
418 IFNULLRETVAL(cs, -EINVAL);
419 ucs = cs->hw.bas;
420 IFNULLRETVAL(ucs, -EINVAL);
421 IFNULLRETVAL(ucs->urb_cmd_in, -EINVAL);
422
423 dbg(DEBUG_USBREQ, "-------> HD_READ_ATMESSAGE (%d)", ucs->rcvbuf_size);
424
425 if (ucs->urb_cmd_in->status == -EINPROGRESS) {
426 err("could not submit HD_READ_ATMESSAGE: URB busy");
427 return -EBUSY;
428 }
429
430 ucs->dr_cmd_in.bRequestType = IN_VENDOR_REQ;
431 ucs->dr_cmd_in.bRequest = HD_READ_ATMESSAGE;
432 ucs->dr_cmd_in.wValue = 0;
433 ucs->dr_cmd_in.wIndex = 0;
434 ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size);
435 usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev,
436 usb_rcvctrlpipe(ucs->udev, 0),
437 (unsigned char*) & ucs->dr_cmd_in,
438 ucs->rcvbuf, ucs->rcvbuf_size,
439 read_ctrl_callback, cs->inbuf);
440
441 if ((ret = usb_submit_urb(ucs->urb_cmd_in, SLAB_ATOMIC)) != 0) {
442 err("could not submit HD_READ_ATMESSAGE: %s",
443 get_usb_statmsg(ret));
444 return ret;
445 }
446
447 if (timeout > 0) {
448 dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
449 ucs->timer_cmd_in.expires = jiffies + timeout * HZ / 10;
450 ucs->timer_cmd_in.data = (unsigned long) cs;
451 ucs->timer_cmd_in.function = cmd_in_timeout;
452 add_timer(&ucs->timer_cmd_in);
453 }
454 return 0;
455}
456
457static void stopurbs(struct bas_bc_state *);
458static int start_cbsend(struct cardstate *);
459
460/* set/clear bits in base connection state
461 */
462inline static void update_basstate(struct bas_cardstate *ucs,
463 int set, int clear)
464{
465 unsigned long flags;
466 int state;
467
468 spin_lock_irqsave(&ucs->lock, flags);
469 state = atomic_read(&ucs->basstate);
470 state &= ~clear;
471 state |= set;
472 atomic_set(&ucs->basstate, state);
473 spin_unlock_irqrestore(&ucs->lock, flags);
474}
475
476
477/* read_int_callback
478 * USB completion handler for interrupt pipe input
479 * called by the USB subsystem in interrupt context
480 * parameter:
481 * urb USB request block
482 * urb->context = controller state structure
483 */
484static void read_int_callback(struct urb *urb, struct pt_regs *regs)
485{
486 struct cardstate *cs;
487 struct bas_cardstate *ucs;
488 struct bc_state *bcs;
489 unsigned long flags;
490 int status;
491 unsigned l;
492 int channel;
493
494 IFNULLRET(urb);
495 cs = (struct cardstate *) urb->context;
496 IFNULLRET(cs);
497 ucs = cs->hw.bas;
498 IFNULLRET(ucs);
499
500 if (unlikely(!atomic_read(&cs->connected))) {
501 warn("%s: disconnected", __func__);
502 return;
503 }
504
505 switch (urb->status) {
506 case 0: /* success */
507 break;
508 case -ENOENT: /* canceled */
509 case -ECONNRESET: /* canceled (async) */
510 case -EINPROGRESS: /* pending */
511 /* ignore silently */
512 dbg(DEBUG_USBREQ,
513 "%s: %s", __func__, get_usb_statmsg(urb->status));
514 return;
515 default: /* severe trouble */
516 warn("interrupt read: %s", get_usb_statmsg(urb->status));
517 //FIXME corrective action? resubmission always ok?
518 goto resubmit;
519 }
520
521 l = (unsigned) ucs->int_in_buf[1] +
522 (((unsigned) ucs->int_in_buf[2]) << 8);
523
524 dbg(DEBUG_USBREQ,
525 "<-------%d: 0x%02x (%u [0x%02x 0x%02x])", urb->actual_length,
526 (int)ucs->int_in_buf[0], l,
527 (int)ucs->int_in_buf[1], (int)ucs->int_in_buf[2]);
528
529 channel = 0;
530
531 switch (ucs->int_in_buf[0]) {
532 case HD_DEVICE_INIT_OK:
533 update_basstate(ucs, BS_INIT, 0);
534 break;
535
536 case HD_READY_SEND_ATDATA:
537 del_timer(&ucs->timer_atrdy);
538 update_basstate(ucs, BS_ATREADY, BS_ATTIMER);
539 start_cbsend(cs);
540 break;
541
542 case HD_OPEN_B2CHANNEL_ACK:
543 ++channel;
544 case HD_OPEN_B1CHANNEL_ACK:
545 bcs = cs->bcs + channel;
546 update_basstate(ucs, BS_B1OPEN << channel, 0);
547 gigaset_bchannel_up(bcs);
548 break;
549
550 case HD_OPEN_ATCHANNEL_ACK:
551 update_basstate(ucs, BS_ATOPEN, 0);
552 start_cbsend(cs);
553 break;
554
555 case HD_CLOSE_B2CHANNEL_ACK:
556 ++channel;
557 case HD_CLOSE_B1CHANNEL_ACK:
558 bcs = cs->bcs + channel;
559 update_basstate(ucs, 0, BS_B1OPEN << channel);
560 stopurbs(bcs->hw.bas);
561 gigaset_bchannel_down(bcs);
562 break;
563
564 case HD_CLOSE_ATCHANNEL_ACK:
565 update_basstate(ucs, 0, BS_ATOPEN);
566 break;
567
568 case HD_B2_FLOW_CONTROL:
569 ++channel;
570 case HD_B1_FLOW_CONTROL:
571 bcs = cs->bcs + channel;
572 atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES,
573 &bcs->hw.bas->corrbytes);
574 dbg(DEBUG_ISO,
575 "Flow control (channel %d, sub %d): 0x%02x => %d",
576 channel, bcs->hw.bas->numsub, l,
577 atomic_read(&bcs->hw.bas->corrbytes));
578 break;
579
580 case HD_RECEIVEATDATA_ACK: /* AT response ready to be received */
581 if (!l) {
582 warn("HD_RECEIVEATDATA_ACK with length 0 ignored");
583 break;
584 }
585 spin_lock_irqsave(&cs->lock, flags);
586 if (ucs->rcvbuf_size) {
587 spin_unlock_irqrestore(&cs->lock, flags);
588 err("receive AT data overrun, %d bytes lost", l);
589 error_reset(cs); //FIXME reschedule
590 break;
591 }
592 if ((ucs->rcvbuf = kmalloc(l, GFP_ATOMIC)) == NULL) {
593 spin_unlock_irqrestore(&cs->lock, flags);
594 err("%s: out of memory, %d bytes lost", __func__, l);
595 error_reset(cs); //FIXME reschedule
596 break;
597 }
598 ucs->rcvbuf_size = l;
599 ucs->retry_cmd_in = 0;
600 if ((status = atread_submit(cs, BAS_TIMEOUT)) < 0) {
601 kfree(ucs->rcvbuf);
602 ucs->rcvbuf = NULL;
603 ucs->rcvbuf_size = 0;
604 error_reset(cs); //FIXME reschedule
605 }
606 spin_unlock_irqrestore(&cs->lock, flags);
607 break;
608
609 case HD_RESET_INTERRUPT_PIPE_ACK:
610 dbg(DEBUG_USBREQ, "HD_RESET_INTERRUPT_PIPE_ACK");
611 break;
612
613 case HD_SUSPEND_END:
614 dbg(DEBUG_USBREQ, "HD_SUSPEND_END");
615 break;
616
617 default:
618 warn("unknown Gigaset signal 0x%02x (%u) ignored",
619 (int) ucs->int_in_buf[0], l);
620 }
621
622 check_pending(ucs);
623
624resubmit:
625 status = usb_submit_urb(urb, SLAB_ATOMIC);
626 if (unlikely(status)) {
627 err("could not resubmit interrupt URB: %s",
628 get_usb_statmsg(status));
629 error_reset(cs);
630 }
631}
632
633/* read_ctrl_callback
634 * USB completion handler for control pipe input
635 * called by the USB subsystem in interrupt context
636 * parameter:
637 * urb USB request block
638 * urb->context = inbuf structure for controller state
639 */
640static void read_ctrl_callback(struct urb *urb, struct pt_regs *regs)
641{
642 struct cardstate *cs;
643 struct bas_cardstate *ucs;
644 unsigned numbytes;
645 unsigned long flags;
646 struct inbuf_t *inbuf;
647 int have_data = 0;
648
649 IFNULLRET(urb);
650 inbuf = (struct inbuf_t *) urb->context;
651 IFNULLRET(inbuf);
652 cs = inbuf->cs;
653 IFNULLRET(cs);
654 ucs = cs->hw.bas;
655 IFNULLRET(ucs);
656
657 spin_lock_irqsave(&cs->lock, flags);
658 if (!atomic_read(&cs->connected)) {
659 warn("%s: disconnected", __func__);
660 spin_unlock_irqrestore(&cs->lock, flags);
661 return;
662 }
663
664 if (!ucs->rcvbuf_size) {
665 warn("%s: no receive in progress", __func__);
666 spin_unlock_irqrestore(&cs->lock, flags);
667 return;
668 }
669
670 del_timer(&ucs->timer_cmd_in);
671
672 switch (urb->status) {
673 case 0: /* normal completion */
674 numbytes = urb->actual_length;
675 if (unlikely(numbytes == 0)) {
676 warn("control read: empty block received");
677 goto retry;
678 }
679 if (unlikely(numbytes != ucs->rcvbuf_size)) {
680 warn("control read: received %d chars, expected %d",
681 numbytes, ucs->rcvbuf_size);
682 if (numbytes > ucs->rcvbuf_size)
683 numbytes = ucs->rcvbuf_size;
684 }
685
686 /* copy received bytes to inbuf */
687 have_data = gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes);
688
689 if (unlikely(numbytes < ucs->rcvbuf_size)) {
690 /* incomplete - resubmit for remaining bytes */
691 ucs->rcvbuf_size -= numbytes;
692 ucs->retry_cmd_in = 0;
693 goto retry;
694 }
695 break;
696
697 case -ENOENT: /* canceled */
698 case -ECONNRESET: /* canceled (async) */
699 case -EINPROGRESS: /* pending */
700 /* no action necessary */
701 dbg(DEBUG_USBREQ,
702 "%s: %s", __func__, get_usb_statmsg(urb->status));
703 break;
704
705 default: /* severe trouble */
706 warn("control read: %s", get_usb_statmsg(urb->status));
707 retry:
708 if (ucs->retry_cmd_in++ < BAS_RETRY) {
709 notice("control read: retry %d", ucs->retry_cmd_in);
710 if (atread_submit(cs, BAS_TIMEOUT) >= 0) {
711 /* resubmitted - bypass regular exit block */
712 spin_unlock_irqrestore(&cs->lock, flags);
713 return;
714 }
715 } else {
716 err("control read: giving up after %d tries",
717 ucs->retry_cmd_in);
718 }
719 error_reset(cs);
720 }
721
722 kfree(ucs->rcvbuf);
723 ucs->rcvbuf = NULL;
724 ucs->rcvbuf_size = 0;
725 spin_unlock_irqrestore(&cs->lock, flags);
726 if (have_data) {
727 dbg(DEBUG_INTR, "%s-->BH", __func__);
728 gigaset_schedule_event(cs);
729 }
730}
731
732/* read_iso_callback
733 * USB completion handler for B channel isochronous input
734 * called by the USB subsystem in interrupt context
735 * parameter:
736 * urb USB request block of completed request
737 * urb->context = bc_state structure
738 */
739static void read_iso_callback(struct urb *urb, struct pt_regs *regs)
740{
741 struct bc_state *bcs;
742 struct bas_bc_state *ubc;
743 unsigned long flags;
744 int i, rc;
745
746 IFNULLRET(urb);
747 IFNULLRET(urb->context);
748 IFNULLRET(cardstate);
749
750 /* status codes not worth bothering the tasklet with */
751 if (unlikely(urb->status == -ENOENT || urb->status == -ECONNRESET ||
752 urb->status == -EINPROGRESS)) {
753 dbg(DEBUG_ISO,
754 "%s: %s", __func__, get_usb_statmsg(urb->status));
755 return;
756 }
757
758 bcs = (struct bc_state *) urb->context;
759 ubc = bcs->hw.bas;
760 IFNULLRET(ubc);
761
762 spin_lock_irqsave(&ubc->isoinlock, flags);
763 if (likely(ubc->isoindone == NULL)) {
764 /* pass URB to tasklet */
765 ubc->isoindone = urb;
766 tasklet_schedule(&ubc->rcvd_tasklet);
767 } else {
768 /* tasklet still busy, drop data and resubmit URB */
769 ubc->loststatus = urb->status;
770 for (i = 0; i < BAS_NUMFRAMES; i++) {
771 ubc->isoinlost += urb->iso_frame_desc[i].actual_length;
772 if (unlikely(urb->iso_frame_desc[i].status != 0 &&
773 urb->iso_frame_desc[i].status != -EINPROGRESS)) {
774 ubc->loststatus = urb->iso_frame_desc[i].status;
775 }
776 urb->iso_frame_desc[i].status = 0;
777 urb->iso_frame_desc[i].actual_length = 0;
778 }
779 if (likely(atomic_read(&ubc->running))) {
780 urb->dev = bcs->cs->hw.bas->udev; /* clobbered by USB subsystem */
781 urb->transfer_flags = URB_ISO_ASAP;
782 urb->number_of_packets = BAS_NUMFRAMES;
783 dbg(DEBUG_ISO, "%s: isoc read overrun/resubmit", __func__);
784 rc = usb_submit_urb(urb, SLAB_ATOMIC);
785 if (unlikely(rc != 0)) {
786 err("could not resubmit isochronous read URB: %s",
787 get_usb_statmsg(rc));
788 dump_urb(DEBUG_ISO, "isoc read", urb);
789 error_hangup(bcs);
790 }
791 }
792 }
793 spin_unlock_irqrestore(&ubc->isoinlock, flags);
794}
795
796/* write_iso_callback
797 * USB completion handler for B channel isochronous output
798 * called by the USB subsystem in interrupt context
799 * parameter:
800 * urb USB request block of completed request
801 * urb->context = isow_urbctx_t structure
802 */
803static void write_iso_callback(struct urb *urb, struct pt_regs *regs)
804{
805 struct isow_urbctx_t *ucx;
806 struct bas_bc_state *ubc;
807 unsigned long flags;
808
809 IFNULLRET(urb);
810 IFNULLRET(urb->context);
811 IFNULLRET(cardstate);
812
813 /* status codes not worth bothering the tasklet with */
814 if (unlikely(urb->status == -ENOENT || urb->status == -ECONNRESET ||
815 urb->status == -EINPROGRESS)) {
816 dbg(DEBUG_ISO,
817 "%s: %s", __func__, get_usb_statmsg(urb->status));
818 return;
819 }
820
821 /* pass URB context to tasklet */
822 ucx = (struct isow_urbctx_t *) urb->context;
823 IFNULLRET(ucx->bcs);
824 ubc = ucx->bcs->hw.bas;
825 IFNULLRET(ubc);
826
827 spin_lock_irqsave(&ubc->isooutlock, flags);
828 ubc->isooutovfl = ubc->isooutdone;
829 ubc->isooutdone = ucx;
830 spin_unlock_irqrestore(&ubc->isooutlock, flags);
831 tasklet_schedule(&ubc->sent_tasklet);
832}
833
834/* starturbs
835 * prepare and submit USB request blocks for isochronous input and output
836 * argument:
837 * B channel control structure
838 * return value:
839 * 0 on success
840 * < 0 on error (no URBs submitted)
841 */
842static int starturbs(struct bc_state *bcs)
843{
844 struct urb *urb;
845 struct bas_bc_state *ubc;
846 int j, k;
847 int rc;
848
849 IFNULLRETVAL(bcs, -EFAULT);
850 ubc = bcs->hw.bas;
851 IFNULLRETVAL(ubc, -EFAULT);
852
853 /* initialize L2 reception */
854 if (bcs->proto2 == ISDN_PROTO_L2_HDLC)
855 bcs->inputstate |= INS_flag_hunt;
856
857 /* submit all isochronous input URBs */
858 atomic_set(&ubc->running, 1);
859 for (k = 0; k < BAS_INURBS; k++) {
860 urb = ubc->isoinurbs[k];
861 if (!urb) {
862 err("isoinurbs[%d]==NULL", k);
863 rc = -EFAULT;
864 goto error;
865 }
866
867 urb->dev = bcs->cs->hw.bas->udev;
868 urb->pipe = usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel);
869 urb->transfer_flags = URB_ISO_ASAP;
870 urb->transfer_buffer = ubc->isoinbuf + k * BAS_INBUFSIZE;
871 urb->transfer_buffer_length = BAS_INBUFSIZE;
872 urb->number_of_packets = BAS_NUMFRAMES;
873 urb->interval = BAS_FRAMETIME;
874 urb->complete = read_iso_callback;
875 urb->context = bcs;
876 for (j = 0; j < BAS_NUMFRAMES; j++) {
877 urb->iso_frame_desc[j].offset = j * BAS_MAXFRAME;
878 urb->iso_frame_desc[j].length = BAS_MAXFRAME;
879 urb->iso_frame_desc[j].status = 0;
880 urb->iso_frame_desc[j].actual_length = 0;
881 }
882
883 dump_urb(DEBUG_ISO, "Initial isoc read", urb);
884 if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) {
885 err("could not submit isochronous read URB %d: %s",
886 k, get_usb_statmsg(rc));
887 goto error;
888 }
889 }
890
891 /* initialize L2 transmission */
892 gigaset_isowbuf_init(ubc->isooutbuf, PPP_FLAG);
893
894 /* set up isochronous output URBs for flag idling */
895 for (k = 0; k < BAS_OUTURBS; ++k) {
896 urb = ubc->isoouturbs[k].urb;
897 if (!urb) {
898 err("isoouturbs[%d].urb==NULL", k);
899 rc = -EFAULT;
900 goto error;
901 }
902 urb->dev = bcs->cs->hw.bas->udev;
903 urb->pipe = usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel);
904 urb->transfer_flags = URB_ISO_ASAP;
905 urb->transfer_buffer = ubc->isooutbuf->data;
906 urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data);
907 urb->number_of_packets = BAS_NUMFRAMES;
908 urb->interval = BAS_FRAMETIME;
909 urb->complete = write_iso_callback;
910 urb->context = &ubc->isoouturbs[k];
911 for (j = 0; j < BAS_NUMFRAMES; ++j) {
912 urb->iso_frame_desc[j].offset = BAS_OUTBUFSIZE;
913 urb->iso_frame_desc[j].length = BAS_NORMFRAME;
914 urb->iso_frame_desc[j].status = 0;
915 urb->iso_frame_desc[j].actual_length = 0;
916 }
917 ubc->isoouturbs[k].limit = -1;
918 }
919
920 /* submit two URBs, keep third one */
921 for (k = 0; k < 2; ++k) {
922 dump_urb(DEBUG_ISO, "Initial isoc write", urb);
923 rc = usb_submit_urb(ubc->isoouturbs[k].urb, SLAB_ATOMIC);
924 if (rc != 0) {
925 err("could not submit isochronous write URB %d: %s",
926 k, get_usb_statmsg(rc));
927 goto error;
928 }
929 }
930 dump_urb(DEBUG_ISO, "Initial isoc write (free)", urb);
931 ubc->isooutfree = &ubc->isoouturbs[2];
932 ubc->isooutdone = ubc->isooutovfl = NULL;
933 return 0;
934 error:
935 stopurbs(ubc);
936 return rc;
937}
938
939/* stopurbs
940 * cancel the USB request blocks for isochronous input and output
941 * errors are silently ignored
942 * argument:
943 * B channel control structure
944 */
945static void stopurbs(struct bas_bc_state *ubc)
946{
947 int k, rc;
948
949 IFNULLRET(ubc);
950
951 atomic_set(&ubc->running, 0);
952
953 for (k = 0; k < BAS_INURBS; ++k) {
954 rc = usb_unlink_urb(ubc->isoinurbs[k]);
955 dbg(DEBUG_ISO, "%s: isoc input URB %d unlinked, result = %d",
956 __func__, k, rc);
957 }
958
959 for (k = 0; k < BAS_OUTURBS; ++k) {
960 rc = usb_unlink_urb(ubc->isoouturbs[k].urb);
961 dbg(DEBUG_ISO, "%s: isoc output URB %d unlinked, result = %d",
962 __func__, k, rc);
963 }
964}
965
966/* Isochronous Write - Bottom Half */
967/* =============================== */
968
969/* submit_iso_write_urb
970 * fill and submit the next isochronous write URB
971 * parameters:
972 * bcs B channel state structure
973 * return value:
974 * number of frames submitted in URB
975 * 0 if URB not submitted because no data available (isooutbuf busy)
976 * error code < 0 on error
977 */
978static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
979{
980 struct urb *urb;
981 struct bas_bc_state *ubc;
982 struct usb_iso_packet_descriptor *ifd;
983 int corrbytes, nframe, rc;
984
985 IFNULLRETVAL(ucx, -EFAULT);
986 urb = ucx->urb;
987 IFNULLRETVAL(urb, -EFAULT);
988 IFNULLRETVAL(ucx->bcs, -EFAULT);
989 ubc = ucx->bcs->hw.bas;
990 IFNULLRETVAL(ubc, -EFAULT);
991
992 urb->dev = ucx->bcs->cs->hw.bas->udev; /* clobbered by USB subsystem */
993 urb->transfer_flags = URB_ISO_ASAP;
994 urb->transfer_buffer = ubc->isooutbuf->data;
995 urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data);
996
997 for (nframe = 0; nframe < BAS_NUMFRAMES; nframe++) {
998 ifd = &urb->iso_frame_desc[nframe];
999
1000 /* compute frame length according to flow control */
1001 ifd->length = BAS_NORMFRAME;
1002 if ((corrbytes = atomic_read(&ubc->corrbytes)) != 0) {
1003 dbg(DEBUG_ISO, "%s: corrbytes=%d", __func__, corrbytes);
1004 if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME)
1005 corrbytes = BAS_HIGHFRAME - BAS_NORMFRAME;
1006 else if (corrbytes < BAS_LOWFRAME - BAS_NORMFRAME)
1007 corrbytes = BAS_LOWFRAME - BAS_NORMFRAME;
1008 ifd->length += corrbytes;
1009 atomic_add(-corrbytes, &ubc->corrbytes);
1010 }
1011 //dbg(DEBUG_ISO, "%s: frame %d length=%d", __func__, nframe, ifd->length);
1012
1013 /* retrieve block of data to send */
1014 ifd->offset = gigaset_isowbuf_getbytes(ubc->isooutbuf, ifd->length);
1015 if (ifd->offset < 0) {
1016 if (ifd->offset == -EBUSY) {
1017 dbg(DEBUG_ISO, "%s: buffer busy at frame %d",
1018 __func__, nframe);
1019 /* tasklet will be restarted from gigaset_send_skb() */
1020 } else {
1021 err("%s: buffer error %d at frame %d",
1022 __func__, ifd->offset, nframe);
1023 return ifd->offset;
1024 }
1025 break;
1026 }
1027 ucx->limit = atomic_read(&ubc->isooutbuf->nextread);
1028 ifd->status = 0;
1029 ifd->actual_length = 0;
1030 }
1031 if ((urb->number_of_packets = nframe) > 0) {
1032 if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) {
1033 err("could not submit isochronous write URB: %s",
1034 get_usb_statmsg(rc));
1035 dump_urb(DEBUG_ISO, "isoc write", urb);
1036 return rc;
1037 }
1038 ++ubc->numsub;
1039 }
1040 return nframe;
1041}
1042
1043/* write_iso_tasklet
1044 * tasklet scheduled when an isochronous output URB from the Gigaset device
1045 * has completed
1046 * parameter:
1047 * data B channel state structure
1048 */
1049static void write_iso_tasklet(unsigned long data)
1050{
1051 struct bc_state *bcs;
1052 struct bas_bc_state *ubc;
1053 struct cardstate *cs;
1054 struct isow_urbctx_t *done, *next, *ovfl;
1055 struct urb *urb;
1056 struct usb_iso_packet_descriptor *ifd;
1057 int offset;
1058 unsigned long flags;
1059 int i;
1060 struct sk_buff *skb;
1061 int len;
1062
1063 bcs = (struct bc_state *) data;
1064 IFNULLRET(bcs);
1065 ubc = bcs->hw.bas;
1066 IFNULLRET(ubc);
1067 cs = bcs->cs;
1068 IFNULLRET(cs);
1069
1070 /* loop while completed URBs arrive in time */
1071 for (;;) {
1072 if (unlikely(!atomic_read(&cs->connected))) {
1073 warn("%s: disconnected", __func__);
1074 return;
1075 }
1076
1077 if (unlikely(!(atomic_read(&ubc->running)))) {
1078 dbg(DEBUG_ISO, "%s: not running", __func__);
1079 return;
1080 }
1081
1082 /* retrieve completed URBs */
1083 spin_lock_irqsave(&ubc->isooutlock, flags);
1084 done = ubc->isooutdone;
1085 ubc->isooutdone = NULL;
1086 ovfl = ubc->isooutovfl;
1087 ubc->isooutovfl = NULL;
1088 spin_unlock_irqrestore(&ubc->isooutlock, flags);
1089 if (ovfl) {
1090 err("isochronous write buffer underrun - buy a faster machine :-)");
1091 error_hangup(bcs);
1092 break;
1093 }
1094 if (!done)
1095 break;
1096
1097 /* submit free URB if available */
1098 spin_lock_irqsave(&ubc->isooutlock, flags);
1099 next = ubc->isooutfree;
1100 ubc->isooutfree = NULL;
1101 spin_unlock_irqrestore(&ubc->isooutlock, flags);
1102 if (next) {
1103 if (submit_iso_write_urb(next) <= 0) {
1104 /* could not submit URB, put it back */
1105 spin_lock_irqsave(&ubc->isooutlock, flags);
1106 if (ubc->isooutfree == NULL) {
1107 ubc->isooutfree = next;
1108 next = NULL;
1109 }
1110 spin_unlock_irqrestore(&ubc->isooutlock, flags);
1111 if (next) {
1112 /* couldn't put it back */
1113 err("losing isochronous write URB");
1114 error_hangup(bcs);
1115 }
1116 }
1117 }
1118
1119 /* process completed URB */
1120 urb = done->urb;
1121 switch (urb->status) {
1122 case 0: /* normal completion */
1123 break;
1124 case -EXDEV: /* inspect individual frames */
1125 /* assumptions (for lack of documentation):
1126 * - actual_length bytes of the frame in error are successfully sent
1127 * - all following frames are not sent at all
1128 */
1129 dbg(DEBUG_ISO, "%s: URB partially completed", __func__);
1130 offset = done->limit; /* just in case */
1131 for (i = 0; i < BAS_NUMFRAMES; i++) {
1132 ifd = &urb->iso_frame_desc[i];
1133 if (ifd->status ||
1134 ifd->actual_length != ifd->length) {
1135 warn("isochronous write: frame %d: %s, "
1136 "only %d of %d bytes sent",
1137 i, get_usb_statmsg(ifd->status),
1138 ifd->actual_length, ifd->length);
1139 offset = (ifd->offset +
1140 ifd->actual_length)
1141 % BAS_OUTBUFSIZE;
1142 break;
1143 }
1144 }
1145#ifdef CONFIG_GIGASET_DEBUG
1146 /* check assumption on remaining frames */
1147 for (; i < BAS_NUMFRAMES; i++) {
1148 ifd = &urb->iso_frame_desc[i];
1149 if (ifd->status != -EINPROGRESS
1150 || ifd->actual_length != 0) {
1151 warn("isochronous write: frame %d: %s, "
1152 "%d of %d bytes sent",
1153 i, get_usb_statmsg(ifd->status),
1154 ifd->actual_length, ifd->length);
1155 offset = (ifd->offset +
1156 ifd->actual_length)
1157 % BAS_OUTBUFSIZE;
1158 break;
1159 }
1160 }
1161#endif
1162 break;
1163 case -EPIPE: //FIXME is this the code for "underrun"?
1164 err("isochronous write stalled");
1165 error_hangup(bcs);
1166 break;
1167 default: /* severe trouble */
1168 warn("isochronous write: %s",
1169 get_usb_statmsg(urb->status));
1170 }
1171
1172 /* mark the write buffer area covered by this URB as free */
1173 if (done->limit >= 0)
1174 atomic_set(&ubc->isooutbuf->read, done->limit);
1175
1176 /* mark URB as free */
1177 spin_lock_irqsave(&ubc->isooutlock, flags);
1178 next = ubc->isooutfree;
1179 ubc->isooutfree = done;
1180 spin_unlock_irqrestore(&ubc->isooutlock, flags);
1181 if (next) {
1182 /* only one URB still active - resubmit one */
1183 if (submit_iso_write_urb(next) <= 0) {
1184 /* couldn't submit */
1185 error_hangup(bcs);
1186 }
1187 }
1188 }
1189
1190 /* process queued SKBs */
1191 while ((skb = skb_dequeue(&bcs->squeue))) {
1192 /* copy to output buffer, doing L2 encapsulation */
1193 len = skb->len;
1194 if (gigaset_isoc_buildframe(bcs, skb->data, len) == -EAGAIN) {
1195 /* insufficient buffer space, push back onto queue */
1196 skb_queue_head(&bcs->squeue, skb);
1197 dbg(DEBUG_ISO, "%s: skb requeued, qlen=%d",
1198 __func__, skb_queue_len(&bcs->squeue));
1199 break;
1200 }
1201 skb_pull(skb, len);
1202 gigaset_skb_sent(bcs, skb);
1203 dev_kfree_skb_any(skb);
1204 }
1205}
1206
1207/* Isochronous Read - Bottom Half */
1208/* ============================== */
1209
1210/* read_iso_tasklet
1211 * tasklet scheduled when an isochronous input URB from the Gigaset device
1212 * has completed
1213 * parameter:
1214 * data B channel state structure
1215 */
1216static void read_iso_tasklet(unsigned long data)
1217{
1218 struct bc_state *bcs;
1219 struct bas_bc_state *ubc;
1220 struct cardstate *cs;
1221 struct urb *urb;
1222 char *rcvbuf;
1223 unsigned long flags;
1224 int totleft, numbytes, offset, frame, rc;
1225
1226 bcs = (struct bc_state *) data;
1227 IFNULLRET(bcs);
1228 ubc = bcs->hw.bas;
1229 IFNULLRET(ubc);
1230 cs = bcs->cs;
1231 IFNULLRET(cs);
1232
1233 /* loop while more completed URBs arrive in the meantime */
1234 for (;;) {
1235 if (!atomic_read(&cs->connected)) {
1236 warn("%s: disconnected", __func__);
1237 return;
1238 }
1239
1240 /* retrieve URB */
1241 spin_lock_irqsave(&ubc->isoinlock, flags);
1242 if (!(urb = ubc->isoindone)) {
1243 spin_unlock_irqrestore(&ubc->isoinlock, flags);
1244 return;
1245 }
1246 ubc->isoindone = NULL;
1247 if (unlikely(ubc->loststatus != -EINPROGRESS)) {
1248 warn("isochronous read overrun, dropped URB with status: %s, %d bytes lost",
1249 get_usb_statmsg(ubc->loststatus), ubc->isoinlost);
1250 ubc->loststatus = -EINPROGRESS;
1251 }
1252 spin_unlock_irqrestore(&ubc->isoinlock, flags);
1253
1254 if (unlikely(!(atomic_read(&ubc->running)))) {
1255 dbg(DEBUG_ISO, "%s: channel not running, dropped URB with status: %s",
1256 __func__, get_usb_statmsg(urb->status));
1257 return;
1258 }
1259
1260 switch (urb->status) {
1261 case 0: /* normal completion */
1262 break;
1263 case -EXDEV: /* inspect individual frames (we do that anyway) */
1264 dbg(DEBUG_ISO, "%s: URB partially completed", __func__);
1265 break;
1266 case -ENOENT:
1267 case -ECONNRESET:
1268 dbg(DEBUG_ISO, "%s: URB canceled", __func__);
1269 continue; /* -> skip */
1270 case -EINPROGRESS: /* huh? */
1271 dbg(DEBUG_ISO, "%s: URB still pending", __func__);
1272 continue; /* -> skip */
1273 case -EPIPE:
1274 err("isochronous read stalled");
1275 error_hangup(bcs);
1276 continue; /* -> skip */
1277 default: /* severe trouble */
1278 warn("isochronous read: %s",
1279 get_usb_statmsg(urb->status));
1280 goto error;
1281 }
1282
1283 rcvbuf = urb->transfer_buffer;
1284 totleft = urb->actual_length;
1285 for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) {
1286 if (unlikely(urb->iso_frame_desc[frame].status)) {
1287 warn("isochronous read: frame %d: %s",
1288 frame, get_usb_statmsg(urb->iso_frame_desc[frame].status));
1289 break;
1290 }
1291 numbytes = urb->iso_frame_desc[frame].actual_length;
1292 if (unlikely(numbytes > BAS_MAXFRAME)) {
1293 warn("isochronous read: frame %d: numbytes (%d) > BAS_MAXFRAME",
1294 frame, numbytes);
1295 break;
1296 }
1297 if (unlikely(numbytes > totleft)) {
1298 warn("isochronous read: frame %d: numbytes (%d) > totleft (%d)",
1299 frame, numbytes, totleft);
1300 break;
1301 }
1302 offset = urb->iso_frame_desc[frame].offset;
1303 if (unlikely(offset + numbytes > BAS_INBUFSIZE)) {
1304 warn("isochronous read: frame %d: offset (%d) + numbytes (%d) > BAS_INBUFSIZE",
1305 frame, offset, numbytes);
1306 break;
1307 }
1308 gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs);
1309 totleft -= numbytes;
1310 }
1311 if (unlikely(totleft > 0))
1312 warn("isochronous read: %d data bytes missing",
1313 totleft);
1314
1315 error:
1316 /* URB processed, resubmit */
1317 for (frame = 0; frame < BAS_NUMFRAMES; frame++) {
1318 urb->iso_frame_desc[frame].status = 0;
1319 urb->iso_frame_desc[frame].actual_length = 0;
1320 }
1321 urb->dev = bcs->cs->hw.bas->udev; /* clobbered by USB subsystem */
1322 urb->transfer_flags = URB_ISO_ASAP;
1323 urb->number_of_packets = BAS_NUMFRAMES;
1324 if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) {
1325 err("could not resubmit isochronous read URB: %s",
1326 get_usb_statmsg(rc));
1327 dump_urb(DEBUG_ISO, "resubmit iso read", urb);
1328 error_hangup(bcs);
1329 }
1330 }
1331}
1332
1333/* Channel Operations */
1334/* ================== */
1335
1336/* req_timeout
1337 * timeout routine for control output request
1338 * argument:
1339 * B channel control structure
1340 */
1341static void req_timeout(unsigned long data)
1342{
1343 struct bc_state *bcs = (struct bc_state *) data;
1344 struct bas_cardstate *ucs;
1345 int pending;
1346 unsigned long flags;
1347
1348 IFNULLRET(bcs);
1349 IFNULLRET(bcs->cs);
1350 ucs = bcs->cs->hw.bas;
1351 IFNULLRET(ucs);
1352
1353 check_pending(ucs);
1354
1355 spin_lock_irqsave(&ucs->lock, flags);
1356 pending = ucs->pending;
1357 ucs->pending = 0;
1358 spin_unlock_irqrestore(&ucs->lock, flags);
1359
1360 switch (pending) {
1361 case 0: /* no pending request */
1362 dbg(DEBUG_USBREQ, "%s: no request pending", __func__);
1363 break;
1364
1365 case HD_OPEN_ATCHANNEL:
1366 err("timeout opening AT channel");
1367 error_reset(bcs->cs);
1368 break;
1369
1370 case HD_OPEN_B2CHANNEL:
1371 case HD_OPEN_B1CHANNEL:
1372 err("timeout opening channel %d", bcs->channel + 1);
1373 error_hangup(bcs);
1374 break;
1375
1376 case HD_CLOSE_ATCHANNEL:
1377 err("timeout closing AT channel");
1378 //wake_up_interruptible(cs->initwait);
1379 //FIXME need own wait queue?
1380 break;
1381
1382 case HD_CLOSE_B2CHANNEL:
1383 case HD_CLOSE_B1CHANNEL:
1384 err("timeout closing channel %d", bcs->channel + 1);
1385 break;
1386
1387 default:
1388 warn("request 0x%02x timed out, clearing", pending);
1389 }
1390}
1391
1392/* write_ctrl_callback
1393 * USB completion handler for control pipe output
1394 * called by the USB subsystem in interrupt context
1395 * parameter:
1396 * urb USB request block of completed request
1397 * urb->context = hardware specific controller state structure
1398 */
1399static void write_ctrl_callback(struct urb *urb, struct pt_regs *regs)
1400{
1401 struct bas_cardstate *ucs;
1402 unsigned long flags;
1403
1404 IFNULLRET(urb);
1405 IFNULLRET(urb->context);
1406 IFNULLRET(cardstate);
1407
1408 ucs = (struct bas_cardstate *) urb->context;
1409 spin_lock_irqsave(&ucs->lock, flags);
1410 if (urb->status && ucs->pending) {
1411 err("control request 0x%02x failed: %s",
1412 ucs->pending, get_usb_statmsg(urb->status));
1413 del_timer(&ucs->timer_ctrl);
1414 ucs->pending = 0;
1415 }
1416 /* individual handling of specific request types */
1417 switch (ucs->pending) {
1418 case HD_DEVICE_INIT_ACK: /* no reply expected */
1419 ucs->pending = 0;
1420 break;
1421 }
1422 spin_unlock_irqrestore(&ucs->lock, flags);
1423}
1424
1425/* req_submit
1426 * submit a control output request without message buffer to the Gigaset base
1427 * and optionally start a timeout
1428 * parameters:
1429 * bcs B channel control structure
1430 * req control request code (HD_*)
1431 * val control request parameter value (set to 0 if unused)
1432 * timeout timeout in seconds (0: no timeout)
1433 * return value:
1434 * 0 on success
1435 * -EINVAL if a NULL pointer is encountered somewhere
1436 * -EBUSY if another request is pending
1437 * any URB submission error code
1438 */
1439static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
1440{
1441 struct bas_cardstate *ucs;
1442 int ret;
1443 unsigned long flags;
1444
1445 IFNULLRETVAL(bcs, -EINVAL);
1446 IFNULLRETVAL(bcs->cs, -EINVAL);
1447 ucs = bcs->cs->hw.bas;
1448 IFNULLRETVAL(ucs, -EINVAL);
1449 IFNULLRETVAL(ucs->urb_ctrl, -EINVAL);
1450
1451 dbg(DEBUG_USBREQ, "-------> 0x%02x (%d)", req, val);
1452
1453 spin_lock_irqsave(&ucs->lock, flags);
1454 if (ucs->pending) {
1455 spin_unlock_irqrestore(&ucs->lock, flags);
1456 err("submission of request 0x%02x failed: request 0x%02x still pending",
1457 req, ucs->pending);
1458 return -EBUSY;
1459 }
1460 if (ucs->urb_ctrl->status == -EINPROGRESS) {
1461 spin_unlock_irqrestore(&ucs->lock, flags);
1462 err("could not submit request 0x%02x: URB busy", req);
1463 return -EBUSY;
1464 }
1465
1466 ucs->dr_ctrl.bRequestType = OUT_VENDOR_REQ;
1467 ucs->dr_ctrl.bRequest = req;
1468 ucs->dr_ctrl.wValue = cpu_to_le16(val);
1469 ucs->dr_ctrl.wIndex = 0;
1470 ucs->dr_ctrl.wLength = 0;
1471 usb_fill_control_urb(ucs->urb_ctrl, ucs->udev,
1472 usb_sndctrlpipe(ucs->udev, 0),
1473 (unsigned char*) &ucs->dr_ctrl, NULL, 0,
1474 write_ctrl_callback, ucs);
1475 if ((ret = usb_submit_urb(ucs->urb_ctrl, SLAB_ATOMIC)) != 0) {
1476 err("could not submit request 0x%02x: %s",
1477 req, get_usb_statmsg(ret));
1478 spin_unlock_irqrestore(&ucs->lock, flags);
1479 return ret;
1480 }
1481 ucs->pending = req;
1482
1483 if (timeout > 0) {
1484 dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
1485 ucs->timer_ctrl.expires = jiffies + timeout * HZ / 10;
1486 ucs->timer_ctrl.data = (unsigned long) bcs;
1487 ucs->timer_ctrl.function = req_timeout;
1488 add_timer(&ucs->timer_ctrl);
1489 }
1490
1491 spin_unlock_irqrestore(&ucs->lock, flags);
1492 return 0;
1493}
1494
1495/* gigaset_init_bchannel
1496 * called by common.c to connect a B channel
1497 * initialize isochronous I/O and tell the Gigaset base to open the channel
1498 * argument:
1499 * B channel control structure
1500 * return value:
1501 * 0 on success, error code < 0 on error
1502 */
1503static int gigaset_init_bchannel(struct bc_state *bcs)
1504{
1505 int req, ret;
1506
1507 IFNULLRETVAL(bcs, -EINVAL);
1508
1509 if ((ret = starturbs(bcs)) < 0) {
1510 err("could not start isochronous I/O for channel %d",
1511 bcs->channel + 1);
1512 error_hangup(bcs);
1513 return ret;
1514 }
1515
1516 req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL;
1517 if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) {
1518 err("could not open channel %d: %s",
1519 bcs->channel + 1, get_usb_statmsg(ret));
1520 stopurbs(bcs->hw.bas);
1521 error_hangup(bcs);
1522 }
1523 return ret;
1524}
1525
1526/* gigaset_close_bchannel
1527 * called by common.c to disconnect a B channel
1528 * tell the Gigaset base to close the channel
1529 * stopping isochronous I/O and LL notification will be done when the
1530 * acknowledgement for the close arrives
1531 * argument:
1532 * B channel control structure
1533 * return value:
1534 * 0 on success, error code < 0 on error
1535 */
1536static int gigaset_close_bchannel(struct bc_state *bcs)
1537{
1538 int req, ret;
1539
1540 IFNULLRETVAL(bcs, -EINVAL);
1541
1542 if (!(atomic_read(&bcs->cs->hw.bas->basstate) &
1543 (bcs->channel ? BS_B2OPEN : BS_B1OPEN))) {
1544 /* channel not running: just signal common.c */
1545 gigaset_bchannel_down(bcs);
1546 return 0;
1547 }
1548
1549 req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL;
1550 if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0)
1551 err("could not submit HD_CLOSE_BxCHANNEL request: %s",
1552 get_usb_statmsg(ret));
1553 return ret;
1554}
1555
1556/* Device Operations */
1557/* ================= */
1558
1559/* complete_cb
1560 * unqueue first command buffer from queue, waking any sleepers
1561 * must be called with cs->cmdlock held
1562 * parameter:
1563 * cs controller state structure
1564 */
1565static void complete_cb(struct cardstate *cs)
1566{
1567 struct cmdbuf_t *cb;
1568
1569 IFNULLRET(cs);
1570 cb = cs->cmdbuf;
1571 IFNULLRET(cb);
1572
1573 /* unqueue completed buffer */
1574 cs->cmdbytes -= cs->curlen;
1575 dbg(DEBUG_TRANSCMD | DEBUG_LOCKCMD,
1576 "write_command: sent %u bytes, %u left",
1577 cs->curlen, cs->cmdbytes);
1578 if ((cs->cmdbuf = cb->next) != NULL) {
1579 cs->cmdbuf->prev = NULL;
1580 cs->curlen = cs->cmdbuf->len;
1581 } else {
1582 cs->lastcmdbuf = NULL;
1583 cs->curlen = 0;
1584 }
1585
1586 if (cb->wake_tasklet)
1587 tasklet_schedule(cb->wake_tasklet);
1588
1589 kfree(cb);
1590}
1591
1592static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len);
1593
1594/* write_command_callback
1595 * USB completion handler for AT command transmission
1596 * called by the USB subsystem in interrupt context
1597 * parameter:
1598 * urb USB request block of completed request
1599 * urb->context = controller state structure
1600 */
1601static void write_command_callback(struct urb *urb, struct pt_regs *regs)
1602{
1603 struct cardstate *cs;
1604 unsigned long flags;
1605 struct bas_cardstate *ucs;
1606
1607 IFNULLRET(urb);
1608 cs = (struct cardstate *) urb->context;
1609 IFNULLRET(cs);
1610 ucs = cs->hw.bas;
1611 IFNULLRET(ucs);
1612
1613 /* check status */
1614 switch (urb->status) {
1615 case 0: /* normal completion */
1616 break;
1617 case -ENOENT: /* canceled */
1618 case -ECONNRESET: /* canceled (async) */
1619 case -EINPROGRESS: /* pending */
1620 /* ignore silently */
1621 dbg(DEBUG_USBREQ,
1622 "%s: %s", __func__, get_usb_statmsg(urb->status));
1623 return;
1624 default: /* any failure */
1625 if (++ucs->retry_cmd_out > BAS_RETRY) {
1626 warn("command write: %s, giving up after %d retries",
1627 get_usb_statmsg(urb->status), ucs->retry_cmd_out);
1628 break;
1629 }
1630 if (cs->cmdbuf == NULL) {
1631 warn("command write: %s, cannot retry - cmdbuf gone",
1632 get_usb_statmsg(urb->status));
1633 break;
1634 }
1635 notice("command write: %s, retry %d",
1636 get_usb_statmsg(urb->status), ucs->retry_cmd_out);
1637 if (atwrite_submit(cs, cs->cmdbuf->buf, cs->cmdbuf->len) >= 0)
1638 /* resubmitted - bypass regular exit block */
1639 return;
1640 /* command send failed, assume base still waiting */
1641 update_basstate(ucs, BS_ATREADY, 0);
1642 }
1643
1644 spin_lock_irqsave(&cs->cmdlock, flags);
1645 if (cs->cmdbuf != NULL)
1646 complete_cb(cs);
1647 spin_unlock_irqrestore(&cs->cmdlock, flags);
1648}
1649
1650/* atrdy_timeout
1651 * timeout routine for AT command transmission
1652 * argument:
1653 * controller state structure
1654 */
1655static void atrdy_timeout(unsigned long data)
1656{
1657 struct cardstate *cs = (struct cardstate *) data;
1658 struct bas_cardstate *ucs;
1659
1660 IFNULLRET(cs);
1661 ucs = cs->hw.bas;
1662 IFNULLRET(ucs);
1663
1664 warn("timeout waiting for HD_READY_SEND_ATDATA");
1665
1666 /* fake the missing signal - what else can I do? */
1667 update_basstate(ucs, BS_ATREADY, BS_ATTIMER);
1668 start_cbsend(cs);
1669}
1670
1671/* atwrite_submit
1672 * submit an HD_WRITE_ATMESSAGE command URB
1673 * parameters:
1674 * cs controller state structure
1675 * buf buffer containing command to send
1676 * len length of command to send
1677 * return value:
1678 * 0 on success
1679 * -EFAULT if a NULL pointer is encountered somewhere
1680 * -EBUSY if another request is pending
1681 * any URB submission error code
1682 */
1683static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len)
1684{
1685 struct bas_cardstate *ucs;
1686 int ret;
1687
1688 IFNULLRETVAL(cs, -EFAULT);
1689 ucs = cs->hw.bas;
1690 IFNULLRETVAL(ucs, -EFAULT);
1691 IFNULLRETVAL(ucs->urb_cmd_out, -EFAULT);
1692
1693 dbg(DEBUG_USBREQ, "-------> HD_WRITE_ATMESSAGE (%d)", len);
1694
1695 if (ucs->urb_cmd_out->status == -EINPROGRESS) {
1696 err("could not submit HD_WRITE_ATMESSAGE: URB busy");
1697 return -EBUSY;
1698 }
1699
1700 ucs->dr_cmd_out.bRequestType = OUT_VENDOR_REQ;
1701 ucs->dr_cmd_out.bRequest = HD_WRITE_ATMESSAGE;
1702 ucs->dr_cmd_out.wValue = 0;
1703 ucs->dr_cmd_out.wIndex = 0;
1704 ucs->dr_cmd_out.wLength = cpu_to_le16(len);
1705 usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev,
1706 usb_sndctrlpipe(ucs->udev, 0),
1707 (unsigned char*) &ucs->dr_cmd_out, buf, len,
1708 write_command_callback, cs);
1709
1710 if ((ret = usb_submit_urb(ucs->urb_cmd_out, SLAB_ATOMIC)) != 0) {
1711 err("could not submit HD_WRITE_ATMESSAGE: %s",
1712 get_usb_statmsg(ret));
1713 return ret;
1714 }
1715
1716 /* submitted successfully */
1717 update_basstate(ucs, 0, BS_ATREADY);
1718
1719 /* start timeout if necessary */
1720 if (!(atomic_read(&ucs->basstate) & BS_ATTIMER)) {
1721 dbg(DEBUG_OUTPUT,
1722 "setting ATREADY timeout of %d/10 secs", ATRDY_TIMEOUT);
1723 ucs->timer_atrdy.expires = jiffies + ATRDY_TIMEOUT * HZ / 10;
1724 ucs->timer_atrdy.data = (unsigned long) cs;
1725 ucs->timer_atrdy.function = atrdy_timeout;
1726 add_timer(&ucs->timer_atrdy);
1727 update_basstate(ucs, BS_ATTIMER, 0);
1728 }
1729 return 0;
1730}
1731
1732/* start_cbsend
1733 * start transmission of AT command queue if necessary
1734 * parameter:
1735 * cs controller state structure
1736 * return value:
1737 * 0 on success
1738 * error code < 0 on error
1739 */
1740static int start_cbsend(struct cardstate *cs)
1741{
1742 struct cmdbuf_t *cb;
1743 struct bas_cardstate *ucs;
1744 unsigned long flags;
1745 int rc;
1746 int retval = 0;
1747
1748 IFNULLRETVAL(cs, -EFAULT);
1749 ucs = cs->hw.bas;
1750 IFNULLRETVAL(ucs, -EFAULT);
1751
1752 /* check if AT channel is open */
1753 if (!(atomic_read(&ucs->basstate) & BS_ATOPEN)) {
1754 dbg(DEBUG_TRANSCMD | DEBUG_LOCKCMD, "AT channel not open");
1755 rc = req_submit(cs->bcs, HD_OPEN_ATCHANNEL, 0, BAS_TIMEOUT);
1756 if (rc < 0) {
1757 err("could not open AT channel");
1758 /* flush command queue */
1759 spin_lock_irqsave(&cs->cmdlock, flags);
1760 while (cs->cmdbuf != NULL)
1761 complete_cb(cs);
1762 spin_unlock_irqrestore(&cs->cmdlock, flags);
1763 }
1764 return rc;
1765 }
1766
1767 /* try to send first command in queue */
1768 spin_lock_irqsave(&cs->cmdlock, flags);
1769
1770 while ((cb = cs->cmdbuf) != NULL &&
1771 atomic_read(&ucs->basstate) & BS_ATREADY) {
1772 ucs->retry_cmd_out = 0;
1773 rc = atwrite_submit(cs, cb->buf, cb->len);
1774 if (unlikely(rc)) {
1775 retval = rc;
1776 complete_cb(cs);
1777 }
1778 }
1779
1780 spin_unlock_irqrestore(&cs->cmdlock, flags);
1781 return retval;
1782}
1783
1784/* gigaset_write_cmd
1785 * This function is called by the device independent part of the driver
1786 * to transmit an AT command string to the Gigaset device.
1787 * It encapsulates the device specific method for transmission over the
1788 * direct USB connection to the base.
1789 * The command string is added to the queue of commands to send, and
1790 * USB transmission is started if necessary.
1791 * parameters:
1792 * cs controller state structure
1793 * buf command string to send
1794 * len number of bytes to send (max. IF_WRITEBUF)
1795 * wake_tasklet tasklet to run when transmission is completed (NULL if none)
1796 * return value:
1797 * number of bytes queued on success
1798 * error code < 0 on error
1799 */
1800static int gigaset_write_cmd(struct cardstate *cs,
1801 const unsigned char *buf, int len,
1802 struct tasklet_struct *wake_tasklet)
1803{
1804 struct cmdbuf_t *cb;
1805 unsigned long flags;
1806 int status;
1807
1808 gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ?
1809 DEBUG_TRANSCMD : DEBUG_LOCKCMD,
1810 "CMD Transmit", len, buf, 0);
1811
1812 if (!atomic_read(&cs->connected)) {
1813 err("%s: not connected", __func__);
1814 return -ENODEV;
1815 }
1816
1817 if (len <= 0)
1818 return 0; /* nothing to do */
1819
1820 if (len > IF_WRITEBUF)
1821 len = IF_WRITEBUF;
1822 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
1823 err("%s: out of memory", __func__);
1824 return -ENOMEM;
1825 }
1826
1827 memcpy(cb->buf, buf, len);
1828 cb->len = len;
1829 cb->offset = 0;
1830 cb->next = NULL;
1831 cb->wake_tasklet = wake_tasklet;
1832
1833 spin_lock_irqsave(&cs->cmdlock, flags);
1834 cb->prev = cs->lastcmdbuf;
1835 if (cs->lastcmdbuf)
1836 cs->lastcmdbuf->next = cb;
1837 else {
1838 cs->cmdbuf = cb;
1839 cs->curlen = len;
1840 }
1841 cs->cmdbytes += len;
1842 cs->lastcmdbuf = cb;
1843 spin_unlock_irqrestore(&cs->cmdlock, flags);
1844
1845 status = start_cbsend(cs);
1846
1847 return status < 0 ? status : len;
1848}
1849
1850/* gigaset_write_room
1851 * tty_driver.write_room interface routine
1852 * return number of characters the driver will accept to be written via gigaset_write_cmd
1853 * parameter:
1854 * controller state structure
1855 * return value:
1856 * number of characters
1857 */
1858static int gigaset_write_room(struct cardstate *cs)
1859{
1860 return IF_WRITEBUF;
1861}
1862
1863/* gigaset_chars_in_buffer
1864 * tty_driver.chars_in_buffer interface routine
1865 * return number of characters waiting to be sent
1866 * parameter:
1867 * controller state structure
1868 * return value:
1869 * number of characters
1870 */
1871static int gigaset_chars_in_buffer(struct cardstate *cs)
1872{
1873 unsigned long flags;
1874 unsigned bytes;
1875
1876 spin_lock_irqsave(&cs->cmdlock, flags);
1877 bytes = cs->cmdbytes;
1878 spin_unlock_irqrestore(&cs->cmdlock, flags);
1879
1880 return bytes;
1881}
1882
1883/* gigaset_brkchars
1884 * implementation of ioctl(GIGASET_BRKCHARS)
1885 * parameter:
1886 * controller state structure
1887 * return value:
1888 * -EINVAL (unimplemented function)
1889 */
1890static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
1891{
1892 return -EINVAL;
1893}
1894
1895
1896/* Device Initialization/Shutdown */
1897/* ============================== */
1898
1899/* Free hardware dependent part of the B channel structure
1900 * parameter:
1901 * bcs B channel structure
1902 * return value:
1903 * !=0 on success
1904 */
1905static int gigaset_freebcshw(struct bc_state *bcs)
1906{
1907 if (!bcs->hw.bas)
1908 return 0;
1909
1910 if (bcs->hw.bas->isooutbuf)
1911 kfree(bcs->hw.bas->isooutbuf);
1912 kfree(bcs->hw.bas);
1913 bcs->hw.bas = NULL;
1914 return 1;
1915}
1916
1917/* Initialize hardware dependent part of the B channel structure
1918 * parameter:
1919 * bcs B channel structure
1920 * return value:
1921 * !=0 on success
1922 */
1923static int gigaset_initbcshw(struct bc_state *bcs)
1924{
1925 int i;
1926 struct bas_bc_state *ubc;
1927
1928 bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL);
1929 if (!ubc) {
1930 err("could not allocate bas_bc_state");
1931 return 0;
1932 }
1933
1934 atomic_set(&ubc->running, 0);
1935 atomic_set(&ubc->corrbytes, 0);
1936 spin_lock_init(&ubc->isooutlock);
1937 for (i = 0; i < BAS_OUTURBS; ++i) {
1938 ubc->isoouturbs[i].urb = NULL;
1939 ubc->isoouturbs[i].bcs = bcs;
1940 }
1941 ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL;
1942 ubc->numsub = 0;
1943 if (!(ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL))) {
1944 err("could not allocate isochronous output buffer");
1945 kfree(ubc);
1946 bcs->hw.bas = NULL;
1947 return 0;
1948 }
1949 tasklet_init(&ubc->sent_tasklet,
1950 &write_iso_tasklet, (unsigned long) bcs);
1951
1952 spin_lock_init(&ubc->isoinlock);
1953 for (i = 0; i < BAS_INURBS; ++i)
1954 ubc->isoinurbs[i] = NULL;
1955 ubc->isoindone = NULL;
1956 ubc->loststatus = -EINPROGRESS;
1957 ubc->isoinlost = 0;
1958 ubc->seqlen = 0;
1959 ubc->inbyte = 0;
1960 ubc->inbits = 0;
1961 ubc->goodbytes = 0;
1962 ubc->alignerrs = 0;
1963 ubc->fcserrs = 0;
1964 ubc->frameerrs = 0;
1965 ubc->giants = 0;
1966 ubc->runts = 0;
1967 ubc->aborts = 0;
1968 ubc->shared0s = 0;
1969 ubc->stolen0s = 0;
1970 tasklet_init(&ubc->rcvd_tasklet,
1971 &read_iso_tasklet, (unsigned long) bcs);
1972 return 1;
1973}
1974
1975static void gigaset_reinitbcshw(struct bc_state *bcs)
1976{
1977 struct bas_bc_state *ubc = bcs->hw.bas;
1978
1979 atomic_set(&bcs->hw.bas->running, 0);
1980 atomic_set(&bcs->hw.bas->corrbytes, 0);
1981 bcs->hw.bas->numsub = 0;
1982 spin_lock_init(&ubc->isooutlock);
1983 spin_lock_init(&ubc->isoinlock);
1984 ubc->loststatus = -EINPROGRESS;
1985}
1986
1987static void gigaset_freecshw(struct cardstate *cs)
1988{
1989 struct bas_cardstate *ucs = cs->hw.bas;
1990
1991 del_timer(&ucs->timer_ctrl);
1992 del_timer(&ucs->timer_atrdy);
1993 del_timer(&ucs->timer_cmd_in);
1994
1995 kfree(cs->hw.bas);
1996}
1997
1998static int gigaset_initcshw(struct cardstate *cs)
1999{
2000 struct bas_cardstate *ucs;
2001
2002 cs->hw.bas = ucs = kmalloc(sizeof *ucs, GFP_KERNEL);
2003 if (!ucs)
2004 return 0;
2005
2006 ucs->urb_cmd_in = NULL;
2007 ucs->urb_cmd_out = NULL;
2008 ucs->rcvbuf = NULL;
2009 ucs->rcvbuf_size = 0;
2010
2011 spin_lock_init(&ucs->lock);
2012 ucs->pending = 0;
2013
2014 atomic_set(&ucs->basstate, 0);
2015 init_timer(&ucs->timer_ctrl);
2016 init_timer(&ucs->timer_atrdy);
2017 init_timer(&ucs->timer_cmd_in);
2018
2019 return 1;
2020}
2021
2022/* freeurbs
2023 * unlink and deallocate all URBs unconditionally
2024 * caller must make sure that no commands are still in progress
2025 * parameter:
2026 * cs controller state structure
2027 */
2028static void freeurbs(struct cardstate *cs)
2029{
2030 struct bas_cardstate *ucs;
2031 struct bas_bc_state *ubc;
2032 int i, j;
2033
2034 IFNULLRET(cs);
2035 ucs = cs->hw.bas;
2036 IFNULLRET(ucs);
2037
2038 for (j = 0; j < 2; ++j) {
2039 ubc = cs->bcs[j].hw.bas;
2040 IFNULLCONT(ubc);
2041 for (i = 0; i < BAS_OUTURBS; ++i)
2042 if (ubc->isoouturbs[i].urb) {
2043 usb_kill_urb(ubc->isoouturbs[i].urb);
2044 dbg(DEBUG_INIT,
2045 "%s: isoc output URB %d/%d unlinked",
2046 __func__, j, i);
2047 usb_free_urb(ubc->isoouturbs[i].urb);
2048 ubc->isoouturbs[i].urb = NULL;
2049 }
2050 for (i = 0; i < BAS_INURBS; ++i)
2051 if (ubc->isoinurbs[i]) {
2052 usb_kill_urb(ubc->isoinurbs[i]);
2053 dbg(DEBUG_INIT,
2054 "%s: isoc input URB %d/%d unlinked",
2055 __func__, j, i);
2056 usb_free_urb(ubc->isoinurbs[i]);
2057 ubc->isoinurbs[i] = NULL;
2058 }
2059 }
2060 if (ucs->urb_int_in) {
2061 usb_kill_urb(ucs->urb_int_in);
2062 dbg(DEBUG_INIT, "%s: interrupt input URB unlinked", __func__);
2063 usb_free_urb(ucs->urb_int_in);
2064 ucs->urb_int_in = NULL;
2065 }
2066 if (ucs->urb_cmd_out) {
2067 usb_kill_urb(ucs->urb_cmd_out);
2068 dbg(DEBUG_INIT, "%s: command output URB unlinked", __func__);
2069 usb_free_urb(ucs->urb_cmd_out);
2070 ucs->urb_cmd_out = NULL;
2071 }
2072 if (ucs->urb_cmd_in) {
2073 usb_kill_urb(ucs->urb_cmd_in);
2074 dbg(DEBUG_INIT, "%s: command input URB unlinked", __func__);
2075 usb_free_urb(ucs->urb_cmd_in);
2076 ucs->urb_cmd_in = NULL;
2077 }
2078 if (ucs->urb_ctrl) {
2079 usb_kill_urb(ucs->urb_ctrl);
2080 dbg(DEBUG_INIT, "%s: control output URB unlinked", __func__);
2081 usb_free_urb(ucs->urb_ctrl);
2082 ucs->urb_ctrl = NULL;
2083 }
2084}
2085
2086/* gigaset_probe
2087 * This function is called when a new USB device is connected.
2088 * It checks whether the new device is handled by this driver.
2089 */
2090static int gigaset_probe(struct usb_interface *interface,
2091 const struct usb_device_id *id)
2092{
2093 struct usb_host_interface *hostif;
2094 struct usb_device *udev = interface_to_usbdev(interface);
2095 struct cardstate *cs = NULL;
2096 struct bas_cardstate *ucs = NULL;
2097 struct bas_bc_state *ubc;
2098 struct usb_endpoint_descriptor *endpoint;
2099 int i, j;
2100 int ret;
2101
2102 IFNULLRETVAL(udev, -ENODEV);
2103
2104 dbg(DEBUG_ANY,
2105 "%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)",
2106 __func__, le16_to_cpu(udev->descriptor.idVendor),
2107 le16_to_cpu(udev->descriptor.idProduct));
2108
2109 /* See if the device offered us matches what we can accept */
2110 if ((le16_to_cpu(udev->descriptor.idVendor) != USB_GIGA_VENDOR_ID) ||
2111 (le16_to_cpu(udev->descriptor.idProduct) != USB_GIGA_PRODUCT_ID &&
2112 le16_to_cpu(udev->descriptor.idProduct) != USB_4175_PRODUCT_ID &&
2113 le16_to_cpu(udev->descriptor.idProduct) != USB_SX303_PRODUCT_ID &&
2114 le16_to_cpu(udev->descriptor.idProduct) != USB_SX353_PRODUCT_ID)) {
2115 dbg(DEBUG_ANY, "%s: unmatched ID - exiting", __func__);
2116 return -ENODEV;
2117 }
2118
2119 /* set required alternate setting */
2120 hostif = interface->cur_altsetting;
2121 if (hostif->desc.bAlternateSetting != 3) {
2122 dbg(DEBUG_ANY,
2123 "%s: wrong alternate setting %d - trying to switch",
2124 __func__, hostif->desc.bAlternateSetting);
2125 if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3) < 0) {
2126 warn("usb_set_interface failed, device %d interface %d altsetting %d",
2127 udev->devnum, hostif->desc.bInterfaceNumber,
2128 hostif->desc.bAlternateSetting);
2129 return -ENODEV;
2130 }
2131 hostif = interface->cur_altsetting;
2132 }
2133
2134 /* Reject application specific interfaces
2135 */
2136 if (hostif->desc.bInterfaceClass != 255) {
2137 warn("%s: bInterfaceClass == %d",
2138 __func__, hostif->desc.bInterfaceClass);
2139 return -ENODEV;
2140 }
2141
2142 info("%s: Device matched (Vendor: 0x%x, Product: 0x%x)",
2143 __func__, le16_to_cpu(udev->descriptor.idVendor),
2144 le16_to_cpu(udev->descriptor.idProduct));
2145
2146 cs = gigaset_getunassignedcs(driver);
2147 if (!cs) {
2148 err("%s: no free cardstate", __func__);
2149 return -ENODEV;
2150 }
2151 ucs = cs->hw.bas;
2152 ucs->udev = udev;
2153 ucs->interface = interface;
2154
2155 /* allocate URBs:
2156 * - one for the interrupt pipe
2157 * - three for the different uses of the default control pipe
2158 * - three for each isochronous pipe
2159 */
2160 ucs->urb_int_in = usb_alloc_urb(0, SLAB_KERNEL);
2161 if (!ucs->urb_int_in) {
2162 err("No free urbs available");
2163 goto error;
2164 }
2165 ucs->urb_cmd_in = usb_alloc_urb(0, SLAB_KERNEL);
2166 if (!ucs->urb_cmd_in) {
2167 err("No free urbs available");
2168 goto error;
2169 }
2170 ucs->urb_cmd_out = usb_alloc_urb(0, SLAB_KERNEL);
2171 if (!ucs->urb_cmd_out) {
2172 err("No free urbs available");
2173 goto error;
2174 }
2175 ucs->urb_ctrl = usb_alloc_urb(0, SLAB_KERNEL);
2176 if (!ucs->urb_ctrl) {
2177 err("No free urbs available");
2178 goto error;
2179 }
2180
2181 for (j = 0; j < 2; ++j) {
2182 ubc = cs->bcs[j].hw.bas;
2183 for (i = 0; i < BAS_OUTURBS; ++i) {
2184 ubc->isoouturbs[i].urb =
2185 usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL);
2186 if (!ubc->isoouturbs[i].urb) {
2187 err("No free urbs available");
2188 goto error;
2189 }
2190 }
2191 for (i = 0; i < BAS_INURBS; ++i) {
2192 ubc->isoinurbs[i] =
2193 usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL);
2194 if (!ubc->isoinurbs[i]) {
2195 err("No free urbs available");
2196 goto error;
2197 }
2198 }
2199 }
2200
2201 ucs->rcvbuf = NULL;
2202 ucs->rcvbuf_size = 0;
2203
2204 /* Fill the interrupt urb and send it to the core */
2205 endpoint = &hostif->endpoint[0].desc;
2206 usb_fill_int_urb(ucs->urb_int_in, udev,
2207 usb_rcvintpipe(udev,
2208 (endpoint->bEndpointAddress) & 0x0f),
2209 ucs->int_in_buf, 3, read_int_callback, cs,
2210 endpoint->bInterval);
2211 ret = usb_submit_urb(ucs->urb_int_in, SLAB_KERNEL);
2212 if (ret) {
2213 err("could not submit interrupt URB: %s", get_usb_statmsg(ret));
2214 goto error;
2215 }
2216
2217 /* tell the device that the driver is ready */
2218 if ((ret = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0)) != 0)
2219 goto error;
2220
2221 /* tell common part that the device is ready */
2222 if (startmode == SM_LOCKED)
2223 atomic_set(&cs->mstate, MS_LOCKED);
2224 if (!gigaset_start(cs))
2225 goto error;
2226
2227 /* save address of controller structure */
2228 usb_set_intfdata(interface, cs);
2229
2230 /* set up device sysfs */
2231 gigaset_init_dev_sysfs(interface);
2232 return 0;
2233
2234error:
2235 freeurbs(cs);
2236 gigaset_unassign(cs);
2237 return -ENODEV;
2238}
2239
2240/* gigaset_disconnect
2241 * This function is called when the Gigaset base is unplugged.
2242 */
2243static void gigaset_disconnect(struct usb_interface *interface)
2244{
2245 struct cardstate *cs;
2246 struct bas_cardstate *ucs;
2247
2248 /* clear device sysfs */
2249 gigaset_free_dev_sysfs(interface);
2250
2251 cs = usb_get_intfdata(interface);
2252 usb_set_intfdata(interface, NULL);
2253
2254 IFNULLRET(cs);
2255 ucs = cs->hw.bas;
2256 IFNULLRET(ucs);
2257
2258 info("disconnecting GigaSet base");
2259 gigaset_stop(cs);
2260 freeurbs(cs);
2261 kfree(ucs->rcvbuf);
2262 ucs->rcvbuf = NULL;
2263 ucs->rcvbuf_size = 0;
2264 atomic_set(&ucs->basstate, 0);
2265 gigaset_unassign(cs);
2266}
2267
2268static struct gigaset_ops gigops = {
2269 gigaset_write_cmd,
2270 gigaset_write_room,
2271 gigaset_chars_in_buffer,
2272 gigaset_brkchars,
2273 gigaset_init_bchannel,
2274 gigaset_close_bchannel,
2275 gigaset_initbcshw,
2276 gigaset_freebcshw,
2277 gigaset_reinitbcshw,
2278 gigaset_initcshw,
2279 gigaset_freecshw,
2280 gigaset_set_modem_ctrl,
2281 gigaset_baud_rate,
2282 gigaset_set_line_ctrl,
2283 gigaset_isoc_send_skb,
2284 gigaset_isoc_input,
2285};
2286
2287/* bas_gigaset_init
2288 * This function is called after the kernel module is loaded.
2289 */
2290static int __init bas_gigaset_init(void)
2291{
2292 int result;
2293
2294 /* allocate memory for our driver state and intialize it */
2295 if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
2296 GIGASET_MODULENAME, GIGASET_DEVNAME,
2297 GIGASET_DEVFSNAME, &gigops,
2298 THIS_MODULE)) == NULL)
2299 goto error;
2300
2301 /* allocate memory for our device state and intialize it */
2302 cardstate = gigaset_initcs(driver, 2, 0, 0, cidmode, GIGASET_MODULENAME);
2303 if (!cardstate)
2304 goto error;
2305
2306 /* register this driver with the USB subsystem */
2307 result = usb_register(&gigaset_usb_driver);
2308 if (result < 0) {
2309 err("usb_register failed (error %d)", -result);
2310 goto error;
2311 }
2312
2313 info(DRIVER_AUTHOR);
2314 info(DRIVER_DESC);
2315 return 0;
2316
2317error: if (cardstate)
2318 gigaset_freecs(cardstate);
2319 cardstate = NULL;
2320 if (driver)
2321 gigaset_freedriver(driver);
2322 driver = NULL;
2323 return -1;
2324}
2325
2326/* bas_gigaset_exit
2327 * This function is called before the kernel module is unloaded.
2328 */
2329static void __exit bas_gigaset_exit(void)
2330{
2331 gigaset_blockdriver(driver); /* => probe will fail
2332 * => no gigaset_start any more
2333 */
2334
2335 gigaset_shutdown(cardstate);
2336 /* from now on, no isdn callback should be possible */
2337
2338 if (atomic_read(&cardstate->hw.bas->basstate) & BS_ATOPEN) {
2339 dbg(DEBUG_ANY, "closing AT channel");
2340 if (req_submit(cardstate->bcs,
2341 HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT) >= 0) {
2342 /* successfully submitted - wait for completion */
2343 //wait_event_interruptible(cs->initwait, !cs->hw.bas->pending);
2344 //FIXME need own wait queue? wakeup?
2345 }
2346 }
2347
2348 /* deregister this driver with the USB subsystem */
2349 usb_deregister(&gigaset_usb_driver);
2350 /* this will call the disconnect-callback */
2351 /* from now on, no disconnect/probe callback should be running */
2352
2353 gigaset_freecs(cardstate);
2354 cardstate = NULL;
2355 gigaset_freedriver(driver);
2356 driver = NULL;
2357}
2358
2359
2360module_init(bas_gigaset_init);
2361module_exit(bas_gigaset_exit);
2362
2363MODULE_AUTHOR(DRIVER_AUTHOR);
2364MODULE_DESCRIPTION(DRIVER_DESC);
2365MODULE_LICENSE("GPL");
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
new file mode 100644
index 000000000000..64371995c1a9
--- /dev/null
+++ b/drivers/isdn/gigaset/common.c
@@ -0,0 +1,1203 @@
1/*
2 * Stuff used by all variants of the driver
3 *
4 * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>,
5 * Hansjoerg Lipp <hjlipp@web.de>,
6 * Tilman Schmidt <tilman@imap.cc>.
7 *
8 * =====================================================================
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 * =====================================================================
14 * ToDo: ...
15 * =====================================================================
16 * Version: $Id: common.c,v 1.104.4.22 2006/02/04 18:28:16 hjlipp Exp $
17 * =====================================================================
18 */
19
20#include "gigaset.h"
21#include <linux/ctype.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24
25/* Version Information */
26#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers <Eilers.Stefan@epost.de>"
27#define DRIVER_DESC "Driver for Gigaset 307x"
28
29/* Module parameters */
30int gigaset_debuglevel = DEBUG_DEFAULT;
31EXPORT_SYMBOL_GPL(gigaset_debuglevel);
32module_param_named(debug, gigaset_debuglevel, int, S_IRUGO|S_IWUSR);
33MODULE_PARM_DESC(debug, "debug level");
34
35/*======================================================================
36 Prototypes of internal functions
37 */
38
39//static void gigaset_process_response(int resp_code, int parameter,
40// struct at_state_t *at_state,
41// unsigned char ** pstring);
42static struct cardstate *alloc_cs(struct gigaset_driver *drv);
43static void free_cs(struct cardstate *cs);
44static void make_valid(struct cardstate *cs, unsigned mask);
45static void make_invalid(struct cardstate *cs, unsigned mask);
46
47#define VALID_MINOR 0x01
48#define VALID_ID 0x02
49#define ASSIGNED 0x04
50
51/* bitwise byte inversion table */
52__u8 gigaset_invtab[256] = {
53 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
54 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
55 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
56 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
57 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
58 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
59 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
60 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
61 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
62 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
63 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
64 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
65 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
66 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
67 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
68 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
69 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
70 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
71 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
72 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
73 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
74 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
75 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
76 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
77 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
78 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
79 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
80 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
81 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
82 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
83 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
84 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff
85};
86EXPORT_SYMBOL_GPL(gigaset_invtab);
87
88void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
89 size_t len, const unsigned char *buf, int from_user)
90{
91 unsigned char outbuf[80];
92 unsigned char inbuf[80 - 1];
93 size_t numin;
94 const unsigned char *in;
95 size_t space = sizeof outbuf - 1;
96 unsigned char *out = outbuf;
97
98 if (!from_user) {
99 in = buf;
100 numin = len;
101 } else {
102 numin = len < sizeof inbuf ? len : sizeof inbuf;
103 in = inbuf;
104 if (copy_from_user(inbuf, (const unsigned char __user *) buf, numin)) {
105 strncpy(inbuf, "<FAULT>", sizeof inbuf);
106 numin = sizeof "<FAULT>" - 1;
107 }
108 }
109
110 for (; numin && space; --numin, ++in) {
111 --space;
112 if (*in >= 32)
113 *out++ = *in;
114 else {
115 *out++ = '^';
116 if (space) {
117 *out++ = '@' + *in;
118 --space;
119 }
120 }
121 }
122 *out = 0;
123
124 dbg(level, "%s (%u bytes): %s", msg, (unsigned) len, outbuf);
125}
126EXPORT_SYMBOL_GPL(gigaset_dbg_buffer);
127
128static int setflags(struct cardstate *cs, unsigned flags, unsigned delay)
129{
130 int r;
131
132 r = cs->ops->set_modem_ctrl(cs, cs->control_state, flags);
133 cs->control_state = flags;
134 if (r < 0)
135 return r;
136
137 if (delay) {
138 set_current_state(TASK_INTERRUPTIBLE);
139 schedule_timeout(delay * HZ / 1000);
140 }
141
142 return 0;
143}
144
145int gigaset_enterconfigmode(struct cardstate *cs)
146{
147 int i, r;
148
149 if (!atomic_read(&cs->connected)) {
150 err("not connected!");
151 return -1;
152 }
153
154 cs->control_state = TIOCM_RTS; //FIXME
155
156 r = setflags(cs, TIOCM_DTR, 200);
157 if (r < 0)
158 goto error;
159 r = setflags(cs, 0, 200);
160 if (r < 0)
161 goto error;
162 for (i = 0; i < 5; ++i) {
163 r = setflags(cs, TIOCM_RTS, 100);
164 if (r < 0)
165 goto error;
166 r = setflags(cs, 0, 100);
167 if (r < 0)
168 goto error;
169 }
170 r = setflags(cs, TIOCM_RTS|TIOCM_DTR, 800);
171 if (r < 0)
172 goto error;
173
174 return 0;
175
176error:
177 err("error %d on setuartbits!\n", -r);
178 cs->control_state = TIOCM_RTS|TIOCM_DTR; // FIXME is this a good value?
179 cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS|TIOCM_DTR);
180
181 return -1; //r
182}
183
184static int test_timeout(struct at_state_t *at_state)
185{
186 if (!at_state->timer_expires)
187 return 0;
188
189 if (--at_state->timer_expires) {
190 dbg(DEBUG_MCMD, "decreased timer of %p to %lu",
191 at_state, at_state->timer_expires);
192 return 0;
193 }
194
195 if (!gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL,
196 atomic_read(&at_state->timer_index), NULL)) {
197 //FIXME what should we do?
198 }
199
200 return 1;
201}
202
203static void timer_tick(unsigned long data)
204{
205 struct cardstate *cs = (struct cardstate *) data;
206 unsigned long flags;
207 unsigned channel;
208 struct at_state_t *at_state;
209 int timeout = 0;
210
211 spin_lock_irqsave(&cs->lock, flags);
212
213 for (channel = 0; channel < cs->channels; ++channel)
214 if (test_timeout(&cs->bcs[channel].at_state))
215 timeout = 1;
216
217 if (test_timeout(&cs->at_state))
218 timeout = 1;
219
220 list_for_each_entry(at_state, &cs->temp_at_states, list)
221 if (test_timeout(at_state))
222 timeout = 1;
223
224 if (atomic_read(&cs->running)) {
225 mod_timer(&cs->timer, jiffies + GIG_TICK);
226 if (timeout) {
227 dbg(DEBUG_CMD, "scheduling timeout");
228 tasklet_schedule(&cs->event_tasklet);
229 }
230 }
231
232 spin_unlock_irqrestore(&cs->lock, flags);
233}
234
235int gigaset_get_channel(struct bc_state *bcs)
236{
237 unsigned long flags;
238
239 spin_lock_irqsave(&bcs->cs->lock, flags);
240 if (bcs->use_count) {
241 dbg(DEBUG_ANY, "could not allocate channel %d", bcs->channel);
242 spin_unlock_irqrestore(&bcs->cs->lock, flags);
243 return 0;
244 }
245 ++bcs->use_count;
246 bcs->busy = 1;
247 dbg(DEBUG_ANY, "allocated channel %d", bcs->channel);
248 spin_unlock_irqrestore(&bcs->cs->lock, flags);
249 return 1;
250}
251
252void gigaset_free_channel(struct bc_state *bcs)
253{
254 unsigned long flags;
255
256 spin_lock_irqsave(&bcs->cs->lock, flags);
257 if (!bcs->busy) {
258 dbg(DEBUG_ANY, "could not free channel %d", bcs->channel);
259 spin_unlock_irqrestore(&bcs->cs->lock, flags);
260 return;
261 }
262 --bcs->use_count;
263 bcs->busy = 0;
264 dbg(DEBUG_ANY, "freed channel %d", bcs->channel);
265 spin_unlock_irqrestore(&bcs->cs->lock, flags);
266}
267
268int gigaset_get_channels(struct cardstate *cs)
269{
270 unsigned long flags;
271 int i;
272
273 spin_lock_irqsave(&cs->lock, flags);
274 for (i = 0; i < cs->channels; ++i)
275 if (cs->bcs[i].use_count) {
276 spin_unlock_irqrestore(&cs->lock, flags);
277 dbg(DEBUG_ANY, "could not allocated all channels");
278 return 0;
279 }
280 for (i = 0; i < cs->channels; ++i)
281 ++cs->bcs[i].use_count;
282 spin_unlock_irqrestore(&cs->lock, flags);
283
284 dbg(DEBUG_ANY, "allocated all channels");
285
286 return 1;
287}
288
289void gigaset_free_channels(struct cardstate *cs)
290{
291 unsigned long flags;
292 int i;
293
294 dbg(DEBUG_ANY, "unblocking all channels");
295 spin_lock_irqsave(&cs->lock, flags);
296 for (i = 0; i < cs->channels; ++i)
297 --cs->bcs[i].use_count;
298 spin_unlock_irqrestore(&cs->lock, flags);
299}
300
301void gigaset_block_channels(struct cardstate *cs)
302{
303 unsigned long flags;
304 int i;
305
306 dbg(DEBUG_ANY, "blocking all channels");
307 spin_lock_irqsave(&cs->lock, flags);
308 for (i = 0; i < cs->channels; ++i)
309 ++cs->bcs[i].use_count;
310 spin_unlock_irqrestore(&cs->lock, flags);
311}
312
313static void clear_events(struct cardstate *cs)
314{
315 struct event_t *ev;
316 unsigned head, tail;
317
318 /* no locking needed (no reader/writer allowed) */
319
320 head = atomic_read(&cs->ev_head);
321 tail = atomic_read(&cs->ev_tail);
322
323 while (tail != head) {
324 ev = cs->events + head;
325 kfree(ev->ptr);
326
327 head = (head + 1) % MAX_EVENTS;
328 }
329
330 atomic_set(&cs->ev_head, tail);
331}
332
333struct event_t *gigaset_add_event(struct cardstate *cs,
334 struct at_state_t *at_state, int type,
335 void *ptr, int parameter, void *arg)
336{
337 unsigned long flags;
338 unsigned next, tail;
339 struct event_t *event = NULL;
340
341 spin_lock_irqsave(&cs->ev_lock, flags);
342
343 tail = atomic_read(&cs->ev_tail);
344 next = (tail + 1) % MAX_EVENTS;
345 if (unlikely(next == atomic_read(&cs->ev_head)))
346 err("event queue full");
347 else {
348 event = cs->events + tail;
349 event->type = type;
350 event->at_state = at_state;
351 event->cid = -1;
352 event->ptr = ptr;
353 event->arg = arg;
354 event->parameter = parameter;
355 atomic_set(&cs->ev_tail, next);
356 }
357
358 spin_unlock_irqrestore(&cs->ev_lock, flags);
359
360 return event;
361}
362EXPORT_SYMBOL_GPL(gigaset_add_event);
363
364static void free_strings(struct at_state_t *at_state)
365{
366 int i;
367
368 for (i = 0; i < STR_NUM; ++i) {
369 kfree(at_state->str_var[i]);
370 at_state->str_var[i] = NULL;
371 }
372}
373
374static void clear_at_state(struct at_state_t *at_state)
375{
376 free_strings(at_state);
377}
378
379static void dealloc_at_states(struct cardstate *cs)
380{
381 struct at_state_t *cur, *next;
382
383 list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) {
384 list_del(&cur->list);
385 free_strings(cur);
386 kfree(cur);
387 }
388}
389
390static void gigaset_freebcs(struct bc_state *bcs)
391{
392 int i;
393
394 dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
395 if (!bcs->cs->ops->freebcshw(bcs)) {
396 dbg(DEBUG_INIT, "failed");
397 }
398
399 dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
400 clear_at_state(&bcs->at_state);
401 dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel);
402
403 if (bcs->skb)
404 dev_kfree_skb(bcs->skb);
405 for (i = 0; i < AT_NUM; ++i) {
406 kfree(bcs->commands[i]);
407 bcs->commands[i] = NULL;
408 }
409}
410
411void gigaset_freecs(struct cardstate *cs)
412{
413 int i;
414 unsigned long flags;
415
416 if (!cs)
417 return;
418
419 down(&cs->sem);
420
421 if (!cs->bcs)
422 goto f_cs;
423 if (!cs->inbuf)
424 goto f_bcs;
425
426 spin_lock_irqsave(&cs->lock, flags);
427 atomic_set(&cs->running, 0);
428 spin_unlock_irqrestore(&cs->lock, flags); /* event handler and timer are not rescheduled below */
429
430 tasklet_kill(&cs->event_tasklet);
431 del_timer_sync(&cs->timer);
432
433 switch (cs->cs_init) {
434 default:
435 gigaset_if_free(cs);
436
437 dbg(DEBUG_INIT, "clearing hw");
438 cs->ops->freecshw(cs);
439
440 //FIXME cmdbuf
441
442 /* fall through */
443 case 2: /* error in initcshw */
444 /* Deregister from LL */
445 make_invalid(cs, VALID_ID);
446 dbg(DEBUG_INIT, "clearing iif");
447 gigaset_i4l_cmd(cs, ISDN_STAT_UNLOAD);
448
449 /* fall through */
450 case 1: /* error when regestering to LL */
451 dbg(DEBUG_INIT, "clearing at_state");
452 clear_at_state(&cs->at_state);
453 dealloc_at_states(cs);
454
455 /* fall through */
456 case 0: /* error in one call to initbcs */
457 for (i = 0; i < cs->channels; ++i) {
458 dbg(DEBUG_INIT, "clearing bcs[%d]", i);
459 gigaset_freebcs(cs->bcs + i);
460 }
461
462 clear_events(cs);
463 dbg(DEBUG_INIT, "freeing inbuf");
464 kfree(cs->inbuf);
465 }
466f_bcs: dbg(DEBUG_INIT, "freeing bcs[]");
467 kfree(cs->bcs);
468f_cs: dbg(DEBUG_INIT, "freeing cs");
469 up(&cs->sem);
470 free_cs(cs);
471}
472EXPORT_SYMBOL_GPL(gigaset_freecs);
473
474void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
475 struct cardstate *cs, int cid)
476{
477 int i;
478
479 INIT_LIST_HEAD(&at_state->list);
480 at_state->waiting = 0;
481 at_state->getstring = 0;
482 at_state->pending_commands = 0;
483 at_state->timer_expires = 0;
484 at_state->timer_active = 0;
485 atomic_set(&at_state->timer_index, 0);
486 atomic_set(&at_state->seq_index, 0);
487 at_state->ConState = 0;
488 for (i = 0; i < STR_NUM; ++i)
489 at_state->str_var[i] = NULL;
490 at_state->int_var[VAR_ZDLE] = 0;
491 at_state->int_var[VAR_ZCTP] = -1;
492 at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
493 at_state->cs = cs;
494 at_state->bcs = bcs;
495 at_state->cid = cid;
496 if (!cid)
497 at_state->replystruct = cs->tabnocid;
498 else
499 at_state->replystruct = cs->tabcid;
500}
501
502
503static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs,
504 struct cardstate *cs, int inputstate)
505/* inbuf->read must be allocated before! */
506{
507 atomic_set(&inbuf->head, 0);
508 atomic_set(&inbuf->tail, 0);
509 inbuf->cs = cs;
510 inbuf->bcs = bcs; /*base driver: NULL*/
511 inbuf->rcvbuf = NULL; //FIXME
512 inbuf->inputstate = inputstate;
513}
514
515/* Initialize the b-channel structure */
516static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
517 struct cardstate *cs, int channel)
518{
519 int i;
520
521 bcs->tx_skb = NULL; //FIXME -> hw part
522
523 skb_queue_head_init(&bcs->squeue);
524
525 bcs->corrupted = 0;
526 bcs->trans_down = 0;
527 bcs->trans_up = 0;
528
529 dbg(DEBUG_INIT, "setting up bcs[%d]->at_state", channel);
530 gigaset_at_init(&bcs->at_state, bcs, cs, -1);
531
532 bcs->rcvbytes = 0;
533
534#ifdef CONFIG_GIGASET_DEBUG
535 bcs->emptycount = 0;
536#endif
537
538 dbg(DEBUG_INIT, "allocating bcs[%d]->skb", channel);
539 bcs->fcs = PPP_INITFCS;
540 bcs->inputstate = 0;
541 if (cs->ignoreframes) {
542 bcs->inputstate |= INS_skip_frame;
543 bcs->skb = NULL;
544 } else if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
545 skb_reserve(bcs->skb, HW_HDR_LEN);
546 else {
547 warn("could not allocate skb");
548 bcs->inputstate |= INS_skip_frame;
549 }
550
551 bcs->channel = channel;
552 bcs->cs = cs;
553
554 bcs->chstate = 0;
555 bcs->use_count = 1;
556 bcs->busy = 0;
557 bcs->ignore = cs->ignoreframes;
558
559 for (i = 0; i < AT_NUM; ++i)
560 bcs->commands[i] = NULL;
561
562 dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel);
563 if (cs->ops->initbcshw(bcs))
564 return bcs;
565
566//error:
567 dbg(DEBUG_INIT, " failed");
568
569 dbg(DEBUG_INIT, " freeing bcs[%d]->skb", channel);
570 if (bcs->skb)
571 dev_kfree_skb(bcs->skb);
572
573 return NULL;
574}
575
576/* gigaset_initcs
577 * Allocate and initialize cardstate structure for Gigaset driver
578 * Calls hardware dependent gigaset_initcshw() function
579 * Calls B channel initialization function gigaset_initbcs() for each B channel
580 * parameters:
581 * drv hardware driver the device belongs to
582 * channels number of B channels supported by device
583 * onechannel !=0: B channel data and AT commands share one communication channel
584 * ==0: B channels have separate communication channels
585 * ignoreframes number of frames to ignore after setting up B channel
586 * cidmode !=0: start in CallID mode
587 * modulename name of driver module (used for I4L registration)
588 * return value:
589 * pointer to cardstate structure
590 */
591struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
592 int onechannel, int ignoreframes,
593 int cidmode, const char *modulename)
594{
595 struct cardstate *cs = NULL;
596 int i;
597
598 dbg(DEBUG_INIT, "allocating cs");
599 cs = alloc_cs(drv);
600 if (!cs)
601 goto error;
602 dbg(DEBUG_INIT, "allocating bcs[0..%d]", channels - 1);
603 cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL);
604 if (!cs->bcs)
605 goto error;
606 dbg(DEBUG_INIT, "allocating inbuf");
607 cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL);
608 if (!cs->inbuf)
609 goto error;
610
611 cs->cs_init = 0;
612 cs->channels = channels;
613 cs->onechannel = onechannel;
614 cs->ignoreframes = ignoreframes;
615 INIT_LIST_HEAD(&cs->temp_at_states);
616 atomic_set(&cs->running, 0);
617 init_timer(&cs->timer); /* clear next & prev */
618 spin_lock_init(&cs->ev_lock);
619 atomic_set(&cs->ev_tail, 0);
620 atomic_set(&cs->ev_head, 0);
621 init_MUTEX_LOCKED(&cs->sem);
622 tasklet_init(&cs->event_tasklet, &gigaset_handle_event, (unsigned long) cs);
623 atomic_set(&cs->commands_pending, 0);
624 cs->cur_at_seq = 0;
625 cs->gotfwver = -1;
626 cs->open_count = 0;
627 cs->tty = NULL;
628 atomic_set(&cs->cidmode, cidmode != 0);
629
630 //if(onechannel) { //FIXME
631 cs->tabnocid = gigaset_tab_nocid_m10x;
632 cs->tabcid = gigaset_tab_cid_m10x;
633 //} else {
634 // cs->tabnocid = gigaset_tab_nocid;
635 // cs->tabcid = gigaset_tab_cid;
636 //}
637
638 init_waitqueue_head(&cs->waitqueue);
639 cs->waiting = 0;
640
641 atomic_set(&cs->mode, M_UNKNOWN);
642 atomic_set(&cs->mstate, MS_UNINITIALIZED);
643
644 for (i = 0; i < channels; ++i) {
645 dbg(DEBUG_INIT, "setting up bcs[%d].read", i);
646 if (!gigaset_initbcs(cs->bcs + i, cs, i))
647 goto error;
648 }
649
650 ++cs->cs_init;
651
652 dbg(DEBUG_INIT, "setting up at_state");
653 spin_lock_init(&cs->lock);
654 gigaset_at_init(&cs->at_state, NULL, cs, 0);
655 cs->dle = 0;
656 cs->cbytes = 0;
657
658 dbg(DEBUG_INIT, "setting up inbuf");
659 if (onechannel) { //FIXME distinction necessary?
660 gigaset_inbuf_init(cs->inbuf, cs->bcs, cs, INS_command);
661 } else
662 gigaset_inbuf_init(cs->inbuf, NULL, cs, INS_command);
663
664 atomic_set(&cs->connected, 0);
665
666 dbg(DEBUG_INIT, "setting up cmdbuf");
667 cs->cmdbuf = cs->lastcmdbuf = NULL;
668 spin_lock_init(&cs->cmdlock);
669 cs->curlen = 0;
670 cs->cmdbytes = 0;
671
672 /*
673 * Tell the ISDN4Linux subsystem (the LL) that
674 * a driver for a USB-Device is available !
675 * If this is done, "isdnctrl" is able to bind a device for this driver even
676 * if no physical usb-device is currently connected.
677 * But this device will just be accessable if a physical USB device is connected
678 * (via "gigaset_probe") .
679 */
680 dbg(DEBUG_INIT, "setting up iif");
681 if (!gigaset_register_to_LL(cs, modulename)) {
682 err("register_isdn=>error");
683 goto error;
684 }
685
686 make_valid(cs, VALID_ID);
687 ++cs->cs_init;
688 dbg(DEBUG_INIT, "setting up hw");
689 if (!cs->ops->initcshw(cs))
690 goto error;
691
692 ++cs->cs_init;
693
694 gigaset_if_init(cs);
695
696 atomic_set(&cs->running, 1);
697 cs->timer.data = (unsigned long) cs;
698 cs->timer.function = timer_tick;
699 cs->timer.expires = jiffies + GIG_TICK;
700 /* FIXME: can jiffies increase too much until the timer is added?
701 * Same problem(?) with mod_timer() in timer_tick(). */
702 add_timer(&cs->timer);
703
704 dbg(DEBUG_INIT, "cs initialized!");
705 up(&cs->sem);
706 return cs;
707
708error: if (cs)
709 up(&cs->sem);
710 dbg(DEBUG_INIT, "failed");
711 gigaset_freecs(cs);
712 return NULL;
713}
714EXPORT_SYMBOL_GPL(gigaset_initcs);
715
716/* ReInitialize the b-channel structure */ /* e.g. called on hangup, disconnect */
717void gigaset_bcs_reinit(struct bc_state *bcs)
718{
719 struct sk_buff *skb;
720 struct cardstate *cs = bcs->cs;
721 unsigned long flags;
722
723 while ((skb = skb_dequeue(&bcs->squeue)) != NULL)
724 dev_kfree_skb(skb);
725
726 spin_lock_irqsave(&cs->lock, flags); //FIXME
727 clear_at_state(&bcs->at_state);
728 bcs->at_state.ConState = 0;
729 bcs->at_state.timer_active = 0;
730 bcs->at_state.timer_expires = 0;
731 bcs->at_state.cid = -1; /* No CID defined */
732 spin_unlock_irqrestore(&cs->lock, flags);
733
734 bcs->inputstate = 0;
735
736#ifdef CONFIG_GIGASET_DEBUG
737 bcs->emptycount = 0;
738#endif
739
740 bcs->fcs = PPP_INITFCS;
741 bcs->chstate = 0;
742
743 bcs->ignore = cs->ignoreframes;
744 if (bcs->ignore)
745 bcs->inputstate |= INS_skip_frame;
746
747
748 cs->ops->reinitbcshw(bcs);
749}
750
751static void cleanup_cs(struct cardstate *cs)
752{
753 struct cmdbuf_t *cb, *tcb;
754 int i;
755 unsigned long flags;
756
757 spin_lock_irqsave(&cs->lock, flags);
758
759 atomic_set(&cs->mode, M_UNKNOWN);
760 atomic_set(&cs->mstate, MS_UNINITIALIZED);
761
762 clear_at_state(&cs->at_state);
763 dealloc_at_states(cs);
764 free_strings(&cs->at_state);
765 gigaset_at_init(&cs->at_state, NULL, cs, 0);
766
767 kfree(cs->inbuf->rcvbuf);
768 cs->inbuf->rcvbuf = NULL;
769 cs->inbuf->inputstate = INS_command;
770 atomic_set(&cs->inbuf->head, 0);
771 atomic_set(&cs->inbuf->tail, 0);
772
773 cb = cs->cmdbuf;
774 while (cb) {
775 tcb = cb;
776 cb = cb->next;
777 kfree(tcb);
778 }
779 cs->cmdbuf = cs->lastcmdbuf = NULL;
780 cs->curlen = 0;
781 cs->cmdbytes = 0;
782 cs->gotfwver = -1;
783 cs->dle = 0;
784 cs->cur_at_seq = 0;
785 atomic_set(&cs->commands_pending, 0);
786 cs->cbytes = 0;
787
788 spin_unlock_irqrestore(&cs->lock, flags);
789
790 for (i = 0; i < cs->channels; ++i) {
791 gigaset_freebcs(cs->bcs + i);
792 if (!gigaset_initbcs(cs->bcs + i, cs, i))
793 break; //FIXME error handling
794 }
795
796 if (cs->waiting) {
797 cs->cmd_result = -ENODEV;
798 cs->waiting = 0;
799 wake_up_interruptible(&cs->waitqueue);
800 }
801}
802
803
804int gigaset_start(struct cardstate *cs)
805{
806 if (down_interruptible(&cs->sem))
807 return 0;
808 //info("USB device for Gigaset 307x now attached to Dev %d", ucs->minor);
809
810 atomic_set(&cs->connected, 1);
811
812 if (atomic_read(&cs->mstate) != MS_LOCKED) {
813 cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR|TIOCM_RTS);
814 cs->ops->baud_rate(cs, B115200);
815 cs->ops->set_line_ctrl(cs, CS8);
816 cs->control_state = TIOCM_DTR|TIOCM_RTS;
817 } else {
818 //FIXME use some saved values?
819 }
820
821 cs->waiting = 1;
822
823 if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) {
824 cs->waiting = 0;
825 //FIXME what should we do?
826 goto error;
827 }
828
829 dbg(DEBUG_CMD, "scheduling START");
830 gigaset_schedule_event(cs);
831
832 wait_event(cs->waitqueue, !cs->waiting);
833
834 up(&cs->sem);
835 return 1;
836
837error:
838 up(&cs->sem);
839 return 0;
840}
841EXPORT_SYMBOL_GPL(gigaset_start);
842
843void gigaset_shutdown(struct cardstate *cs)
844{
845 down(&cs->sem);
846
847 cs->waiting = 1;
848
849 if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) {
850 //FIXME what should we do?
851 goto exit;
852 }
853
854 dbg(DEBUG_CMD, "scheduling SHUTDOWN");
855 gigaset_schedule_event(cs);
856
857 if (wait_event_interruptible(cs->waitqueue, !cs->waiting)) {
858 warn("aborted");
859 //FIXME
860 }
861
862 if (atomic_read(&cs->mstate) != MS_LOCKED) {
863 //FIXME?
864 //gigaset_baud_rate(cs, B115200);
865 //gigaset_set_line_ctrl(cs, CS8);
866 //gigaset_set_modem_ctrl(cs, TIOCM_DTR|TIOCM_RTS, 0);
867 //cs->control_state = 0;
868 } else {
869 //FIXME use some saved values?
870 }
871
872 cleanup_cs(cs);
873
874exit:
875 up(&cs->sem);
876}
877EXPORT_SYMBOL_GPL(gigaset_shutdown);
878
879void gigaset_stop(struct cardstate *cs)
880{
881 down(&cs->sem);
882
883 atomic_set(&cs->connected, 0);
884
885 cs->waiting = 1;
886
887 if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) {
888 //FIXME what should we do?
889 goto exit;
890 }
891
892 dbg(DEBUG_CMD, "scheduling STOP");
893 gigaset_schedule_event(cs);
894
895 if (wait_event_interruptible(cs->waitqueue, !cs->waiting)) {
896 warn("aborted");
897 //FIXME
898 }
899
900 /* Tell the LL that the device is not available .. */
901 gigaset_i4l_cmd(cs, ISDN_STAT_STOP); // FIXME move to event layer?
902
903 cleanup_cs(cs);
904
905exit:
906 up(&cs->sem);
907}
908EXPORT_SYMBOL_GPL(gigaset_stop);
909
910static LIST_HEAD(drivers);
911static spinlock_t driver_lock = SPIN_LOCK_UNLOCKED;
912
913struct cardstate *gigaset_get_cs_by_id(int id)
914{
915 unsigned long flags;
916 static struct cardstate *ret = NULL;
917 static struct cardstate *cs;
918 struct gigaset_driver *drv;
919 unsigned i;
920
921 spin_lock_irqsave(&driver_lock, flags);
922 list_for_each_entry(drv, &drivers, list) {
923 spin_lock(&drv->lock);
924 for (i = 0; i < drv->minors; ++i) {
925 if (drv->flags[i] & VALID_ID) {
926 cs = drv->cs + i;
927 if (cs->myid == id)
928 ret = cs;
929 }
930 if (ret)
931 break;
932 }
933 spin_unlock(&drv->lock);
934 if (ret)
935 break;
936 }
937 spin_unlock_irqrestore(&driver_lock, flags);
938 return ret;
939}
940
941void gigaset_debugdrivers(void)
942{
943 unsigned long flags;
944 static struct cardstate *cs;
945 struct gigaset_driver *drv;
946 unsigned i;
947
948 spin_lock_irqsave(&driver_lock, flags);
949 list_for_each_entry(drv, &drivers, list) {
950 dbg(DEBUG_DRIVER, "driver %p", drv);
951 spin_lock(&drv->lock);
952 for (i = 0; i < drv->minors; ++i) {
953 dbg(DEBUG_DRIVER, " index %u", i);
954 dbg(DEBUG_DRIVER, " flags 0x%02x", drv->flags[i]);
955 cs = drv->cs + i;
956 dbg(DEBUG_DRIVER, " cardstate %p", cs);
957 dbg(DEBUG_DRIVER, " minor_index %u", cs->minor_index);
958 dbg(DEBUG_DRIVER, " driver %p", cs->driver);
959 dbg(DEBUG_DRIVER, " i4l id %d", cs->myid);
960 }
961 spin_unlock(&drv->lock);
962 }
963 spin_unlock_irqrestore(&driver_lock, flags);
964}
965EXPORT_SYMBOL_GPL(gigaset_debugdrivers);
966
967struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty)
968{
969 if (tty->index < 0 || tty->index >= tty->driver->num)
970 return NULL;
971 return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start);
972}
973
974struct cardstate *gigaset_get_cs_by_minor(unsigned minor)
975{
976 unsigned long flags;
977 static struct cardstate *ret = NULL;
978 struct gigaset_driver *drv;
979 unsigned index;
980
981 spin_lock_irqsave(&driver_lock, flags);
982 list_for_each_entry(drv, &drivers, list) {
983 if (minor < drv->minor || minor >= drv->minor + drv->minors)
984 continue;
985 index = minor - drv->minor;
986 spin_lock(&drv->lock);
987 if (drv->flags[index] & VALID_MINOR)
988 ret = drv->cs + index;
989 spin_unlock(&drv->lock);
990 if (ret)
991 break;
992 }
993 spin_unlock_irqrestore(&driver_lock, flags);
994 return ret;
995}
996
997void gigaset_freedriver(struct gigaset_driver *drv)
998{
999 unsigned long flags;
1000
1001 spin_lock_irqsave(&driver_lock, flags);
1002 list_del(&drv->list);
1003 spin_unlock_irqrestore(&driver_lock, flags);
1004
1005 gigaset_if_freedriver(drv);
1006 module_put(drv->owner);
1007
1008 kfree(drv->cs);
1009 kfree(drv->flags);
1010 kfree(drv);
1011}
1012EXPORT_SYMBOL_GPL(gigaset_freedriver);
1013
1014/* gigaset_initdriver
1015 * Allocate and initialize gigaset_driver structure. Initialize interface.
1016 * parameters:
1017 * minor First minor number
1018 * minors Number of minors this driver can handle
1019 * procname Name of the driver (e.g. for /proc/tty/drivers, path in /proc/driver)
1020 * devname Name of the device files (prefix without minor number)
1021 * devfsname Devfs name of the device files without %d
1022 * return value:
1023 * Pointer to the gigaset_driver structure on success, NULL on failure.
1024 */
1025struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
1026 const char *procname,
1027 const char *devname,
1028 const char *devfsname,
1029 const struct gigaset_ops *ops,
1030 struct module *owner)
1031{
1032 struct gigaset_driver *drv;
1033 unsigned long flags;
1034 unsigned i;
1035
1036 drv = kmalloc(sizeof *drv, GFP_KERNEL);
1037 if (!drv)
1038 return NULL;
1039 if (!try_module_get(owner))
1040 return NULL;
1041
1042 drv->cs = NULL;
1043 drv->have_tty = 0;
1044 drv->minor = minor;
1045 drv->minors = minors;
1046 spin_lock_init(&drv->lock);
1047 drv->blocked = 0;
1048 drv->ops = ops;
1049 drv->owner = owner;
1050 INIT_LIST_HEAD(&drv->list);
1051
1052 drv->cs = kmalloc(minors * sizeof *drv->cs, GFP_KERNEL);
1053 if (!drv->cs)
1054 goto out1;
1055 drv->flags = kmalloc(minors * sizeof *drv->flags, GFP_KERNEL);
1056 if (!drv->flags)
1057 goto out2;
1058
1059 for (i = 0; i < minors; ++i) {
1060 drv->flags[i] = 0;
1061 drv->cs[i].driver = drv;
1062 drv->cs[i].ops = drv->ops;
1063 drv->cs[i].minor_index = i;
1064 }
1065
1066 gigaset_if_initdriver(drv, procname, devname, devfsname);
1067
1068 spin_lock_irqsave(&driver_lock, flags);
1069 list_add(&drv->list, &drivers);
1070 spin_unlock_irqrestore(&driver_lock, flags);
1071
1072 return drv;
1073
1074out2:
1075 kfree(drv->cs);
1076out1:
1077 kfree(drv);
1078 module_put(owner);
1079 return NULL;
1080}
1081EXPORT_SYMBOL_GPL(gigaset_initdriver);
1082
1083static struct cardstate *alloc_cs(struct gigaset_driver *drv)
1084{
1085 unsigned long flags;
1086 unsigned i;
1087 static struct cardstate *ret = NULL;
1088
1089 spin_lock_irqsave(&drv->lock, flags);
1090 for (i = 0; i < drv->minors; ++i) {
1091 if (!(drv->flags[i] & VALID_MINOR)) {
1092 drv->flags[i] = VALID_MINOR;
1093 ret = drv->cs + i;
1094 }
1095 if (ret)
1096 break;
1097 }
1098 spin_unlock_irqrestore(&drv->lock, flags);
1099 return ret;
1100}
1101
1102static void free_cs(struct cardstate *cs)
1103{
1104 unsigned long flags;
1105 struct gigaset_driver *drv = cs->driver;
1106 spin_lock_irqsave(&drv->lock, flags);
1107 drv->flags[cs->minor_index] = 0;
1108 spin_unlock_irqrestore(&drv->lock, flags);
1109}
1110
1111static void make_valid(struct cardstate *cs, unsigned mask)
1112{
1113 unsigned long flags;
1114 struct gigaset_driver *drv = cs->driver;
1115 spin_lock_irqsave(&drv->lock, flags);
1116 drv->flags[cs->minor_index] |= mask;
1117 spin_unlock_irqrestore(&drv->lock, flags);
1118}
1119
1120static void make_invalid(struct cardstate *cs, unsigned mask)
1121{
1122 unsigned long flags;
1123 struct gigaset_driver *drv = cs->driver;
1124 spin_lock_irqsave(&drv->lock, flags);
1125 drv->flags[cs->minor_index] &= ~mask;
1126 spin_unlock_irqrestore(&drv->lock, flags);
1127}
1128
1129/* For drivers without fixed assignment device<->cardstate (usb) */
1130struct cardstate *gigaset_getunassignedcs(struct gigaset_driver *drv)
1131{
1132 unsigned long flags;
1133 struct cardstate *cs = NULL;
1134 unsigned i;
1135
1136 spin_lock_irqsave(&drv->lock, flags);
1137 if (drv->blocked)
1138 goto exit;
1139 for (i = 0; i < drv->minors; ++i) {
1140 if ((drv->flags[i] & VALID_MINOR) &&
1141 !(drv->flags[i] & ASSIGNED)) {
1142 drv->flags[i] |= ASSIGNED;
1143 cs = drv->cs + i;
1144 break;
1145 }
1146 }
1147exit:
1148 spin_unlock_irqrestore(&drv->lock, flags);
1149 return cs;
1150}
1151EXPORT_SYMBOL_GPL(gigaset_getunassignedcs);
1152
1153void gigaset_unassign(struct cardstate *cs)
1154{
1155 unsigned long flags;
1156 unsigned *minor_flags;
1157 struct gigaset_driver *drv;
1158
1159 if (!cs)
1160 return;
1161 drv = cs->driver;
1162 spin_lock_irqsave(&drv->lock, flags);
1163 minor_flags = drv->flags + cs->minor_index;
1164 if (*minor_flags & VALID_MINOR)
1165 *minor_flags &= ~ASSIGNED;
1166 spin_unlock_irqrestore(&drv->lock, flags);
1167}
1168EXPORT_SYMBOL_GPL(gigaset_unassign);
1169
1170void gigaset_blockdriver(struct gigaset_driver *drv)
1171{
1172 unsigned long flags;
1173 spin_lock_irqsave(&drv->lock, flags);
1174 drv->blocked = 1;
1175 spin_unlock_irqrestore(&drv->lock, flags);
1176}
1177EXPORT_SYMBOL_GPL(gigaset_blockdriver);
1178
1179static int __init gigaset_init_module(void)
1180{
1181 /* in accordance with the principle of least astonishment,
1182 * setting the 'debug' parameter to 1 activates a sensible
1183 * set of default debug levels
1184 */
1185 if (gigaset_debuglevel == 1)
1186 gigaset_debuglevel = DEBUG_DEFAULT;
1187
1188 info(DRIVER_AUTHOR);
1189 info(DRIVER_DESC);
1190 return 0;
1191}
1192
1193static void __exit gigaset_exit_module(void)
1194{
1195}
1196
1197module_init(gigaset_init_module);
1198module_exit(gigaset_exit_module);
1199
1200MODULE_AUTHOR(DRIVER_AUTHOR);
1201MODULE_DESCRIPTION(DRIVER_DESC);
1202
1203MODULE_LICENSE("GPL");
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
new file mode 100644
index 000000000000..fdcb80bb21c7
--- /dev/null
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -0,0 +1,1983 @@
1/*
2 * Stuff used by all variants of the driver
3 *
4 * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>,
5 * Hansjoerg Lipp <hjlipp@web.de>,
6 * Tilman Schmidt <tilman@imap.cc>.
7 *
8 * =====================================================================
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 * =====================================================================
14 * ToDo: ...
15 * =====================================================================
16 * Version: $Id: ev-layer.c,v 1.4.2.18 2006/02/04 18:28:16 hjlipp Exp $
17 * =====================================================================
18 */
19
20#include "gigaset.h"
21
22/* ========================================================== */
23/* bit masks for pending commands */
24#define PC_INIT 0x004
25#define PC_DLE0 0x008
26#define PC_DLE1 0x010
27#define PC_CID 0x080
28#define PC_NOCID 0x100
29#define PC_HUP 0x002
30#define PC_DIAL 0x001
31#define PC_ACCEPT 0x040
32#define PC_SHUTDOWN 0x020
33#define PC_CIDMODE 0x200
34#define PC_UMMODE 0x400
35
36/* types of modem responses */
37#define RT_NOTHING 0
38#define RT_ZSAU 1
39#define RT_RING 2
40#define RT_NUMBER 3
41#define RT_STRING 4
42#define RT_HEX 5
43#define RT_ZCAU 6
44
45/* Possible ASCII responses */
46#define RSP_OK 0
47//#define RSP_BUSY 1
48//#define RSP_CONNECT 2
49#define RSP_ZGCI 3
50#define RSP_RING 4
51#define RSP_ZAOC 5
52#define RSP_ZCSTR 6
53#define RSP_ZCFGT 7
54#define RSP_ZCFG 8
55#define RSP_ZCCR 9
56#define RSP_EMPTY 10
57#define RSP_ZLOG 11
58#define RSP_ZCAU 12
59#define RSP_ZMWI 13
60#define RSP_ZABINFO 14
61#define RSP_ZSMLSTCHG 15
62#define RSP_VAR 100
63#define RSP_ZSAU (RSP_VAR + VAR_ZSAU)
64#define RSP_ZDLE (RSP_VAR + VAR_ZDLE)
65#define RSP_ZVLS (RSP_VAR + VAR_ZVLS)
66#define RSP_ZCTP (RSP_VAR + VAR_ZCTP)
67#define RSP_STR (RSP_VAR + VAR_NUM)
68#define RSP_NMBR (RSP_STR + STR_NMBR)
69#define RSP_ZCPN (RSP_STR + STR_ZCPN)
70#define RSP_ZCON (RSP_STR + STR_ZCON)
71#define RSP_ZBC (RSP_STR + STR_ZBC)
72#define RSP_ZHLC (RSP_STR + STR_ZHLC)
73#define RSP_ERROR -1 /* ERROR */
74#define RSP_WRONG_CID -2 /* unknown cid in cmd */
75//#define RSP_EMPTY -3
76#define RSP_UNKNOWN -4 /* unknown response */
77#define RSP_FAIL -5 /* internal error */
78#define RSP_INVAL -6 /* invalid response */
79
80#define RSP_NONE -19
81#define RSP_STRING -20
82#define RSP_NULL -21
83//#define RSP_RETRYFAIL -22
84//#define RSP_RETRY -23
85//#define RSP_SKIP -24
86#define RSP_INIT -27
87#define RSP_ANY -26
88#define RSP_LAST -28
89#define RSP_NODEV -9
90
91/* actions for process_response */
92#define ACT_NOTHING 0
93#define ACT_SETDLE1 1
94#define ACT_SETDLE0 2
95#define ACT_FAILINIT 3
96#define ACT_HUPMODEM 4
97#define ACT_CONFIGMODE 5
98#define ACT_INIT 6
99#define ACT_DLE0 7
100#define ACT_DLE1 8
101#define ACT_FAILDLE0 9
102#define ACT_FAILDLE1 10
103#define ACT_RING 11
104#define ACT_CID 12
105#define ACT_FAILCID 13
106#define ACT_SDOWN 14
107#define ACT_FAILSDOWN 15
108#define ACT_DEBUG 16
109#define ACT_WARN 17
110#define ACT_DIALING 18
111#define ACT_ABORTDIAL 19
112#define ACT_DISCONNECT 20
113#define ACT_CONNECT 21
114#define ACT_REMOTEREJECT 22
115#define ACT_CONNTIMEOUT 23
116#define ACT_REMOTEHUP 24
117#define ACT_ABORTHUP 25
118#define ACT_ICALL 26
119#define ACT_ACCEPTED 27
120#define ACT_ABORTACCEPT 28
121#define ACT_TIMEOUT 29
122#define ACT_GETSTRING 30
123#define ACT_SETVER 31
124#define ACT_FAILVER 32
125#define ACT_GOTVER 33
126#define ACT_TEST 34
127#define ACT_ERROR 35
128#define ACT_ABORTCID 36
129#define ACT_ZCAU 37
130#define ACT_NOTIFY_BC_DOWN 38
131#define ACT_NOTIFY_BC_UP 39
132#define ACT_DIAL 40
133#define ACT_ACCEPT 41
134#define ACT_PROTO_L2 42
135#define ACT_HUP 43
136#define ACT_IF_LOCK 44
137#define ACT_START 45
138#define ACT_STOP 46
139#define ACT_FAKEDLE0 47
140#define ACT_FAKEHUP 48
141#define ACT_FAKESDOWN 49
142#define ACT_SHUTDOWN 50
143#define ACT_PROC_CIDMODE 51
144#define ACT_UMODESET 52
145#define ACT_FAILUMODE 53
146#define ACT_CMODESET 54
147#define ACT_FAILCMODE 55
148#define ACT_IF_VER 56
149#define ACT_CMD 100
150
151/* at command sequences */
152#define SEQ_NONE 0
153#define SEQ_INIT 100
154#define SEQ_DLE0 200
155#define SEQ_DLE1 250
156#define SEQ_CID 300
157#define SEQ_NOCID 350
158#define SEQ_HUP 400
159#define SEQ_DIAL 600
160#define SEQ_ACCEPT 720
161#define SEQ_SHUTDOWN 500
162#define SEQ_CIDMODE 10
163#define SEQ_UMMODE 11
164
165
166// 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid), 400: hup, 500: reset, 600: dial, 700: ring
167struct reply_t gigaset_tab_nocid_m10x[]= /* with dle mode */
168{
169 /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
170
171 /* initialize device, set cid mode if possible */
172 //{RSP_INIT, -1, -1,100, 900, 0, {ACT_TEST}},
173 //{RSP_ERROR, 900,900, -1, 0, 0, {ACT_FAILINIT}},
174 //{RSP_OK, 900,900, -1, 100, INIT_TIMEOUT,
175 // {ACT_TIMEOUT}},
176
177 {RSP_INIT, -1, -1,SEQ_INIT, 100, INIT_TIMEOUT,
178 {ACT_TIMEOUT}}, /* wait until device is ready */
179
180 {EV_TIMEOUT, 100,100, -1, 101, 3, {0}, "Z\r"}, /* device in transparent mode? try to initialize it. */
181 {RSP_OK, 101,103, -1, 120, 5, {ACT_GETSTRING}, "+GMR\r"}, /* get version */
182
183 {EV_TIMEOUT, 101,101, -1, 102, 5, {0}, "Z\r"}, /* timeout => try once again. */
184 {RSP_ERROR, 101,101, -1, 102, 5, {0}, "Z\r"}, /* error => try once again. */
185
186 {EV_TIMEOUT, 102,102, -1, 108, 5, {ACT_SETDLE1}, "^SDLE=0\r"}, /* timeout => try again in DLE mode. */
187 {RSP_OK, 108,108, -1, 104,-1},
188 {RSP_ZDLE, 104,104, 0, 103, 5, {0}, "Z\r"},
189 {EV_TIMEOUT, 104,104, -1, 0, 0, {ACT_FAILINIT}},
190 {RSP_ERROR, 108,108, -1, 0, 0, {ACT_FAILINIT}},
191
192 {EV_TIMEOUT, 108,108, -1, 105, 2, {ACT_SETDLE0,
193 ACT_HUPMODEM,
194 ACT_TIMEOUT}}, /* still timeout => connection in unimodem mode? */
195 {EV_TIMEOUT, 105,105, -1, 103, 5, {0}, "Z\r"},
196
197 {RSP_ERROR, 102,102, -1, 107, 5, {0}, "^GETPRE\r"}, /* ERROR on ATZ => maybe in config mode? */
198 {RSP_OK, 107,107, -1, 0, 0, {ACT_CONFIGMODE}},
199 {RSP_ERROR, 107,107, -1, 0, 0, {ACT_FAILINIT}},
200 {EV_TIMEOUT, 107,107, -1, 0, 0, {ACT_FAILINIT}},
201
202 {RSP_ERROR, 103,103, -1, 0, 0, {ACT_FAILINIT}},
203 {EV_TIMEOUT, 103,103, -1, 0, 0, {ACT_FAILINIT}},
204
205 {RSP_STRING, 120,120, -1, 121,-1, {ACT_SETVER}},
206
207 {EV_TIMEOUT, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}},
208 {RSP_ERROR, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}},
209 {RSP_OK, 121,121, -1, 0, 0, {ACT_GOTVER, ACT_INIT}},
210#if 0
211 {EV_TIMEOUT, 120,121, -1, 130, 5, {ACT_FAILVER}, "^SGCI=1\r"},
212 {RSP_ERROR, 120,121, -1, 130, 5, {ACT_FAILVER}, "^SGCI=1\r"},
213 {RSP_OK, 121,121, -1, 130, 5, {ACT_GOTVER}, "^SGCI=1\r"},
214
215 {RSP_OK, 130,130, -1, 0, 0, {ACT_INIT}},
216 {RSP_ERROR, 130,130, -1, 0, 0, {ACT_FAILINIT}},
217 {EV_TIMEOUT, 130,130, -1, 0, 0, {ACT_FAILINIT}},
218#endif
219
220 /* leave dle mode */
221 {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
222 {RSP_OK, 201,201, -1, 202,-1},
223 //{RSP_ZDLE, 202,202, 0, 202, 0, {ACT_ERROR}},//DELETE
224 {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}},
225 {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}},
226 {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}},
227 {EV_TIMEOUT, 200,249, -1, 0, 0, {ACT_FAILDLE0}},
228
229 /* enter dle mode */
230 {RSP_INIT, 0, 0,SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"},
231 {RSP_OK, 251,251, -1, 252,-1},
232 {RSP_ZDLE, 252,252, 1, 0, 0, {ACT_DLE1}},
233 {RSP_ERROR, 250,299, -1, 0, 0, {ACT_FAILDLE1}},
234 {EV_TIMEOUT, 250,299, -1, 0, 0, {ACT_FAILDLE1}},
235
236 /* incoming call */
237 {RSP_RING, -1, -1, -1, -1,-1, {ACT_RING}},
238
239 /* get cid */
240 //{RSP_INIT, 0, 0,300, 901, 0, {ACT_TEST}},
241 //{RSP_ERROR, 901,901, -1, 0, 0, {ACT_FAILCID}},
242 //{RSP_OK, 901,901, -1, 301, 5, {0}, "^SGCI?\r"},
243
244 {RSP_INIT, 0, 0,SEQ_CID, 301, 5, {0}, "^SGCI?\r"},
245 {RSP_OK, 301,301, -1, 302,-1},
246 {RSP_ZGCI, 302,302, -1, 0, 0, {ACT_CID}},
247 {RSP_ERROR, 301,349, -1, 0, 0, {ACT_FAILCID}},
248 {EV_TIMEOUT, 301,349, -1, 0, 0, {ACT_FAILCID}},
249
250 /* enter cid mode */
251 {RSP_INIT, 0, 0,SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"},
252 {RSP_OK, 150,150, -1, 0, 0, {ACT_CMODESET}},
253 {RSP_ERROR, 150,150, -1, 0, 0, {ACT_FAILCMODE}},
254 {EV_TIMEOUT, 150,150, -1, 0, 0, {ACT_FAILCMODE}},
255
256 /* leave cid mode */
257 //{RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "^SGCI=0\r"},
258 {RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "Z\r"},
259 {RSP_OK, 160,160, -1, 0, 0, {ACT_UMODESET}},
260 {RSP_ERROR, 160,160, -1, 0, 0, {ACT_FAILUMODE}},
261 {EV_TIMEOUT, 160,160, -1, 0, 0, {ACT_FAILUMODE}},
262
263 /* abort getting cid */
264 {RSP_INIT, 0, 0,SEQ_NOCID, 0, 0, {ACT_ABORTCID}},
265
266 /* reset */
267#if 0
268 {RSP_INIT, 0, 0,SEQ_SHUTDOWN, 503, 5, {0}, "^SGCI=0\r"},
269 {RSP_OK, 503,503, -1, 504, 5, {0}, "Z\r"},
270#endif
271 {RSP_INIT, 0, 0,SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"},
272 {RSP_OK, 504,504, -1, 0, 0, {ACT_SDOWN}},
273 {RSP_ERROR, 501,599, -1, 0, 0, {ACT_FAILSDOWN}},
274 {EV_TIMEOUT, 501,599, -1, 0, 0, {ACT_FAILSDOWN}},
275 {RSP_NODEV, 501,599, -1, 0, 0, {ACT_FAKESDOWN}},
276
277 {EV_PROC_CIDMODE,-1, -1, -1, -1,-1, {ACT_PROC_CIDMODE}}, //FIXME
278 {EV_IF_LOCK, -1, -1, -1, -1,-1, {ACT_IF_LOCK}}, //FIXME
279 {EV_IF_VER, -1, -1, -1, -1,-1, {ACT_IF_VER}}, //FIXME
280 {EV_START, -1, -1, -1, -1,-1, {ACT_START}}, //FIXME
281 {EV_STOP, -1, -1, -1, -1,-1, {ACT_STOP}}, //FIXME
282 {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME
283
284 /* misc. */
285 {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
286 {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
287 {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
288 {RSP_ZLOG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
289 {RSP_ZMWI, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
290 {RSP_ZABINFO, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
291 {RSP_ZSMLSTCHG,-1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
292
293 {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}},
294 {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}},
295 {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}},
296 {RSP_LAST}
297};
298
299// 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring, 400: hup, 750: accepted icall
300struct reply_t gigaset_tab_cid_m10x[] = /* for M10x */
301{
302 /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
303
304 /* dial */
305 {EV_DIAL, -1, -1, -1, -1,-1, {ACT_DIAL}}, //FIXME
306 {RSP_INIT, 0, 0,SEQ_DIAL, 601, 5, {ACT_CMD+AT_BC}},
307 {RSP_OK, 601,601, -1, 602, 5, {ACT_CMD+AT_HLC}},
308 {RSP_NULL, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}},
309 {RSP_OK, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}},
310 {RSP_OK, 603,603, -1, 604, 5, {ACT_CMD+AT_TYPE}},
311 {RSP_OK, 604,604, -1, 605, 5, {ACT_CMD+AT_MSN}},
312 {RSP_OK, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
313 {RSP_NULL, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
314 {RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */
315 {RSP_OK, 607,607, -1, 608,-1},
316 //{RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 608, 0, {ACT_ERROR}},//DELETE
317 {RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 609, 5, {ACT_CMD+AT_DIAL}},
318 {RSP_OK, 609,609, -1, 650, 0, {ACT_DIALING}},
319
320 {RSP_ZVLS, 608,608, 17, -1,-1, {ACT_DEBUG}},
321 {RSP_ZCTP, 609,609, -1, -1,-1, {ACT_DEBUG}},
322 {RSP_ZCPN, 609,609, -1, -1,-1, {ACT_DEBUG}},
323 {RSP_ERROR, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
324 {EV_TIMEOUT, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
325
326 /* dialing */
327 {RSP_ZCTP, 650,650, -1, -1,-1, {ACT_DEBUG}},
328 {RSP_ZCPN, 650,650, -1, -1,-1, {ACT_DEBUG}},
329 {RSP_ZSAU, 650,650,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}}, /* some devices don't send this */
330
331 /* connection established */
332 {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1
333 {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1
334
335 {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}}, //FIXME new constate + timeout
336
337 /* remote hangup */
338 {RSP_ZSAU, 650,650,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}},
339 {RSP_ZSAU, 750,750,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
340 {RSP_ZSAU, 800,800,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
341
342 /* hangup */
343 {EV_HUP, -1, -1, -1, -1,-1, {ACT_HUP}}, //FIXME
344 {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1?
345 {RSP_OK, 401,401, -1, 402, 5},
346 {RSP_ZVLS, 402,402, 0, 403, 5},
347 {RSP_ZSAU, 403,403,ZSAU_DISCONNECT_REQ, -1,-1, {ACT_DEBUG}}, /* if not remote hup */
348 //{RSP_ZSAU, 403,403,ZSAU_NULL, 401, 0, {ACT_ERROR}}, //DELETE//FIXME -> DLE0 // should we do this _before_ hanging up for base driver?
349 {RSP_ZSAU, 403,403,ZSAU_NULL, 0, 0, {ACT_DISCONNECT}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver?
350 {RSP_NODEV, 401,403, -1, 0, 0, {ACT_FAKEHUP}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver?
351 {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}},
352 {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}},
353
354 {EV_BC_CLOSED, 0, 0, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME new constate + timeout
355
356 /* ring */
357 {RSP_ZBC, 700,700, -1, -1,-1, {0}},
358 {RSP_ZHLC, 700,700, -1, -1,-1, {0}},
359 {RSP_NMBR, 700,700, -1, -1,-1, {0}},
360 {RSP_ZCPN, 700,700, -1, -1,-1, {0}},
361 {RSP_ZCTP, 700,700, -1, -1,-1, {0}},
362 {EV_TIMEOUT, 700,700, -1, 720,720, {ACT_ICALL}},
363 {EV_BC_CLOSED,720,720, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}},
364
365 /*accept icall*/
366 {EV_ACCEPT, -1, -1, -1, -1,-1, {ACT_ACCEPT}}, //FIXME
367 {RSP_INIT, 720,720,SEQ_ACCEPT, 721, 5, {ACT_CMD+AT_PROTO}},
368 {RSP_OK, 721,721, -1, 722, 5, {ACT_CMD+AT_ISO}},
369 {RSP_OK, 722,722, -1, 723, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */
370 {RSP_OK, 723,723, -1, 724, 5, {0}},
371 {RSP_ZVLS, 724,724, 17, 750,50, {ACT_ACCEPTED}},
372 {RSP_ERROR, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}},
373 {EV_TIMEOUT, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}},
374 {RSP_ZSAU, 700,729,ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT}},
375 {RSP_ZSAU, 700,729,ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT}},
376 {RSP_ZSAU, 700,729,ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT}},
377
378 {EV_TIMEOUT, 750,750, -1, 0, 0, {ACT_CONNTIMEOUT}},
379
380 /* misc. */
381 {EV_PROTO_L2, -1, -1, -1, -1,-1, {ACT_PROTO_L2}}, //FIXME
382
383 {RSP_ZCON, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
384 {RSP_ZCCR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
385 {RSP_ZAOC, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
386 {RSP_ZCSTR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
387
388 {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}},
389 {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}},
390 {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}},
391 {RSP_LAST}
392};
393
394
395#if 0
396static struct reply_t tab_nocid[]= /* no dle mode */ //FIXME aenderungen uebernehmen
397{
398 /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
399
400 {RSP_ANY, -1, -1, -1, -1,-1, ACT_WARN, NULL},
401 {RSP_LAST,0,0,0,0,0,0}
402};
403
404static struct reply_t tab_cid[] = /* no dle mode */ //FIXME aenderungen uebernehmen
405{
406 /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
407
408 {RSP_ANY, -1, -1, -1, -1,-1, ACT_WARN, NULL},
409 {RSP_LAST,0,0,0,0,0,0}
410};
411#endif
412
413static struct resp_type_t resp_type[]=
414{
415 /*{"", RSP_EMPTY, RT_NOTHING},*/
416 {"OK", RSP_OK, RT_NOTHING},
417 {"ERROR", RSP_ERROR, RT_NOTHING},
418 {"ZSAU", RSP_ZSAU, RT_ZSAU},
419 {"ZCAU", RSP_ZCAU, RT_ZCAU},
420 {"RING", RSP_RING, RT_RING},
421 {"ZGCI", RSP_ZGCI, RT_NUMBER},
422 {"ZVLS", RSP_ZVLS, RT_NUMBER},
423 {"ZCTP", RSP_ZCTP, RT_NUMBER},
424 {"ZDLE", RSP_ZDLE, RT_NUMBER},
425 {"ZCFGT", RSP_ZCFGT, RT_NUMBER},
426 {"ZCCR", RSP_ZCCR, RT_NUMBER},
427 {"ZMWI", RSP_ZMWI, RT_NUMBER},
428 {"ZHLC", RSP_ZHLC, RT_STRING},
429 {"ZBC", RSP_ZBC, RT_STRING},
430 {"NMBR", RSP_NMBR, RT_STRING},
431 {"ZCPN", RSP_ZCPN, RT_STRING},
432 {"ZCON", RSP_ZCON, RT_STRING},
433 {"ZAOC", RSP_ZAOC, RT_STRING},
434 {"ZCSTR", RSP_ZCSTR, RT_STRING},
435 {"ZCFG", RSP_ZCFG, RT_HEX},
436 {"ZLOG", RSP_ZLOG, RT_NOTHING},
437 {"ZABINFO", RSP_ZABINFO, RT_NOTHING},
438 {"ZSMLSTCHG", RSP_ZSMLSTCHG, RT_NOTHING},
439 {NULL,0,0}
440};
441
442/*
443 * Get integer from char-pointer
444 */
445static int isdn_getnum(char *p)
446{
447 int v = -1;
448
449 IFNULLRETVAL(p, -1);
450
451 dbg(DEBUG_TRANSCMD, "string: %s", p);
452
453 while (*p >= '0' && *p <= '9')
454 v = ((v < 0) ? 0 : (v * 10)) + (int) ((*p++) - '0');
455 if (*p)
456 v = -1; /* invalid Character */
457 return v;
458}
459
460/*
461 * Get integer from char-pointer
462 */
463static int isdn_gethex(char *p)
464{
465 int v = 0;
466 int c;
467
468 IFNULLRETVAL(p, -1);
469
470 dbg(DEBUG_TRANSCMD, "string: %s", p);
471
472 if (!*p)
473 return -1;
474
475 do {
476 if (v > (INT_MAX - 15) / 16)
477 return -1;
478 c = *p;
479 if (c >= '0' && c <= '9')
480 c -= '0';
481 else if (c >= 'a' && c <= 'f')
482 c -= 'a' - 10;
483 else if (c >= 'A' && c <= 'F')
484 c -= 'A' - 10;
485 else
486 return -1;
487 v = v * 16 + c;
488 } while (*++p);
489
490 return v;
491}
492
493static inline void new_index(atomic_t *index, int max)
494{
495 if (atomic_read(index) == max) //FIXME race?
496 atomic_set(index, 0);
497 else
498 atomic_inc(index);
499}
500
501/* retrieve CID from parsed response
502 * returns 0 if no CID, -1 if invalid CID, or CID value 1..65535
503 */
504static int cid_of_response(char *s)
505{
506 int cid;
507
508 if (s[-1] != ';')
509 return 0; /* no CID separator */
510 cid = isdn_getnum(s);
511 if (cid < 0)
512 return 0; /* CID not numeric */
513 if (cid < 1 || cid > 65535)
514 return -1; /* CID out of range */
515 return cid;
516 //FIXME is ;<digit>+ at end of non-CID response really impossible?
517}
518
519/* This function will be called via task queue from the callback handler.
520 * We received a modem response and have to handle it..
521 */
522void gigaset_handle_modem_response(struct cardstate *cs)
523{
524 unsigned char *argv[MAX_REC_PARAMS + 1];
525 int params;
526 int i, j;
527 struct resp_type_t *rt;
528 int curarg;
529 unsigned long flags;
530 unsigned next, tail, head;
531 struct event_t *event;
532 int resp_code;
533 int param_type;
534 int abort;
535 size_t len;
536 int cid;
537 int rawstring;
538
539 IFNULLRET(cs);
540
541 len = cs->cbytes;
542 if (!len) {
543 /* ignore additional LFs/CRs (M10x config mode or cx100) */
544 dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[len]);
545 return;
546 }
547 cs->respdata[len] = 0;
548 dbg(DEBUG_TRANSCMD, "raw string: '%s'", cs->respdata);
549 argv[0] = cs->respdata;
550 params = 1;
551 if (cs->at_state.getstring) {
552 /* getstring only allowed without cid at the moment */
553 cs->at_state.getstring = 0;
554 rawstring = 1;
555 cid = 0;
556 } else {
557 /* parse line */
558 for (i = 0; i < len; i++)
559 switch (cs->respdata[i]) {
560 case ';':
561 case ',':
562 case '=':
563 if (params > MAX_REC_PARAMS) {
564 warn("too many parameters in response");
565 /* need last parameter (might be CID) */
566 params--;
567 }
568 argv[params++] = cs->respdata + i + 1;
569 }
570
571 rawstring = 0;
572 cid = params > 1 ? cid_of_response(argv[params-1]) : 0;
573 if (cid < 0) {
574 gigaset_add_event(cs, &cs->at_state, RSP_INVAL,
575 NULL, 0, NULL);
576 return;
577 }
578
579 for (j = 1; j < params; ++j)
580 argv[j][-1] = 0;
581
582 dbg(DEBUG_TRANSCMD, "CMD received: %s", argv[0]);
583 if (cid) {
584 --params;
585 dbg(DEBUG_TRANSCMD, "CID: %s", argv[params]);
586 }
587 dbg(DEBUG_TRANSCMD, "available params: %d", params - 1);
588 for (j = 1; j < params; j++)
589 dbg(DEBUG_TRANSCMD, "param %d: %s", j, argv[j]);
590 }
591
592 spin_lock_irqsave(&cs->ev_lock, flags);
593 head = atomic_read(&cs->ev_head);
594 tail = atomic_read(&cs->ev_tail);
595
596 abort = 1;
597 curarg = 0;
598 while (curarg < params) {
599 next = (tail + 1) % MAX_EVENTS;
600 if (unlikely(next == head)) {
601 err("event queue full");
602 break;
603 }
604
605 event = cs->events + tail;
606 event->at_state = NULL;
607 event->cid = cid;
608 event->ptr = NULL;
609 event->arg = NULL;
610 tail = next;
611
612 if (rawstring) {
613 resp_code = RSP_STRING;
614 param_type = RT_STRING;
615 } else {
616 for (rt = resp_type; rt->response; ++rt)
617 if (!strcmp(argv[curarg], rt->response))
618 break;
619
620 if (!rt->response) {
621 event->type = RSP_UNKNOWN;
622 warn("unknown modem response: %s",
623 argv[curarg]);
624 break;
625 }
626
627 resp_code = rt->resp_code;
628 param_type = rt->type;
629 ++curarg;
630 }
631
632 event->type = resp_code;
633
634 switch (param_type) {
635 case RT_NOTHING:
636 break;
637 case RT_RING:
638 if (!cid) {
639 err("received RING without CID!");
640 event->type = RSP_INVAL;
641 abort = 1;
642 } else {
643 event->cid = 0;
644 event->parameter = cid;
645 abort = 0;
646 }
647 break;
648 case RT_ZSAU:
649 if (curarg >= params) {
650 event->parameter = ZSAU_NONE;
651 break;
652 }
653 if (!strcmp(argv[curarg], "OUTGOING_CALL_PROCEEDING"))
654 event->parameter = ZSAU_OUTGOING_CALL_PROCEEDING;
655 else if (!strcmp(argv[curarg], "CALL_DELIVERED"))
656 event->parameter = ZSAU_CALL_DELIVERED;
657 else if (!strcmp(argv[curarg], "ACTIVE"))
658 event->parameter = ZSAU_ACTIVE;
659 else if (!strcmp(argv[curarg], "DISCONNECT_IND"))
660 event->parameter = ZSAU_DISCONNECT_IND;
661 else if (!strcmp(argv[curarg], "NULL"))
662 event->parameter = ZSAU_NULL;
663 else if (!strcmp(argv[curarg], "DISCONNECT_REQ"))
664 event->parameter = ZSAU_DISCONNECT_REQ;
665 else {
666 event->parameter = ZSAU_UNKNOWN;
667 warn("%s: unknown parameter %s after ZSAU",
668 __func__, argv[curarg]);
669 }
670 ++curarg;
671 break;
672 case RT_STRING:
673 if (curarg < params) {
674 len = strlen(argv[curarg]) + 1;
675 event->ptr = kmalloc(len, GFP_ATOMIC);
676 if (event->ptr)
677 memcpy(event->ptr, argv[curarg], len);
678 else
679 err("no memory for string!");
680 ++curarg;
681 }
682#ifdef CONFIG_GIGASET_DEBUG
683 if (!event->ptr)
684 dbg(DEBUG_CMD, "string==NULL");
685 else
686 dbg(DEBUG_CMD,
687 "string==%s", (char *) event->ptr);
688#endif
689 break;
690 case RT_ZCAU:
691 event->parameter = -1;
692 if (curarg + 1 < params) {
693 i = isdn_gethex(argv[curarg]);
694 j = isdn_gethex(argv[curarg + 1]);
695 if (i >= 0 && i < 256 && j >= 0 && j < 256)
696 event->parameter = (unsigned) i << 8
697 | j;
698 curarg += 2;
699 } else
700 curarg = params - 1;
701 break;
702 case RT_NUMBER:
703 case RT_HEX:
704 if (curarg < params) {
705 if (param_type == RT_HEX)
706 event->parameter =
707 isdn_gethex(argv[curarg]);
708 else
709 event->parameter =
710 isdn_getnum(argv[curarg]);
711 ++curarg;
712 } else
713 event->parameter = -1;
714#ifdef CONFIG_GIGASET_DEBUG
715 dbg(DEBUG_CMD, "parameter==%d", event->parameter);
716#endif
717 break;
718 }
719
720 if (resp_code == RSP_ZDLE)
721 cs->dle = event->parameter;
722
723 if (abort)
724 break;
725 }
726
727 atomic_set(&cs->ev_tail, tail);
728 spin_unlock_irqrestore(&cs->ev_lock, flags);
729
730 if (curarg != params)
731 dbg(DEBUG_ANY, "invalid number of processed parameters: %d/%d",
732 curarg, params);
733}
734EXPORT_SYMBOL_GPL(gigaset_handle_modem_response);
735
736/* disconnect
737 * process closing of connection associated with given AT state structure
738 */
739static void disconnect(struct at_state_t **at_state_p)
740{
741 unsigned long flags;
742 struct bc_state *bcs;
743 struct cardstate *cs;
744
745 IFNULLRET(at_state_p);
746 IFNULLRET(*at_state_p);
747 bcs = (*at_state_p)->bcs;
748 cs = (*at_state_p)->cs;
749 IFNULLRET(cs);
750
751 new_index(&(*at_state_p)->seq_index, MAX_SEQ_INDEX);
752
753 /* revert to selected idle mode */
754 if (!atomic_read(&cs->cidmode)) {
755 cs->at_state.pending_commands |= PC_UMMODE;
756 atomic_set(&cs->commands_pending, 1); //FIXME
757 dbg(DEBUG_CMD, "Scheduling PC_UMMODE");
758 }
759
760 if (bcs) {
761 /* B channel assigned: invoke hardware specific handler */
762 cs->ops->close_bchannel(bcs);
763 } else {
764 /* no B channel assigned: just deallocate */
765 spin_lock_irqsave(&cs->lock, flags);
766 list_del(&(*at_state_p)->list);
767 kfree(*at_state_p);
768 *at_state_p = NULL;
769 spin_unlock_irqrestore(&cs->lock, flags);
770 }
771}
772
773/* get_free_channel
774 * get a free AT state structure: either one of those associated with the
775 * B channels of the Gigaset device, or if none of those is available,
776 * a newly allocated one with bcs=NULL
777 * The structure should be freed by calling disconnect() after use.
778 */
779static inline struct at_state_t *get_free_channel(struct cardstate *cs,
780 int cid)
781/* cids: >0: siemens-cid
782 0: without cid
783 -1: no cid assigned yet
784*/
785{
786 unsigned long flags;
787 int i;
788 struct at_state_t *ret;
789
790 for (i = 0; i < cs->channels; ++i)
791 if (gigaset_get_channel(cs->bcs + i)) {
792 ret = &cs->bcs[i].at_state;
793 ret->cid = cid;
794 return ret;
795 }
796
797 spin_lock_irqsave(&cs->lock, flags);
798 ret = kmalloc(sizeof(struct at_state_t), GFP_ATOMIC);
799 if (ret) {
800 gigaset_at_init(ret, NULL, cs, cid);
801 list_add(&ret->list, &cs->temp_at_states);
802 }
803 spin_unlock_irqrestore(&cs->lock, flags);
804 return ret;
805}
806
807static void init_failed(struct cardstate *cs, int mode)
808{
809 int i;
810 struct at_state_t *at_state;
811
812 cs->at_state.pending_commands &= ~PC_INIT;
813 atomic_set(&cs->mode, mode);
814 atomic_set(&cs->mstate, MS_UNINITIALIZED);
815 gigaset_free_channels(cs);
816 for (i = 0; i < cs->channels; ++i) {
817 at_state = &cs->bcs[i].at_state;
818 if (at_state->pending_commands & PC_CID) {
819 at_state->pending_commands &= ~PC_CID;
820 at_state->pending_commands |= PC_NOCID;
821 atomic_set(&cs->commands_pending, 1);
822 }
823 }
824}
825
826static void schedule_init(struct cardstate *cs, int state)
827{
828 if (cs->at_state.pending_commands & PC_INIT) {
829 dbg(DEBUG_CMD, "not scheduling PC_INIT again");
830 return;
831 }
832 atomic_set(&cs->mstate, state);
833 atomic_set(&cs->mode, M_UNKNOWN);
834 gigaset_block_channels(cs);
835 cs->at_state.pending_commands |= PC_INIT;
836 atomic_set(&cs->commands_pending, 1);
837 dbg(DEBUG_CMD, "Scheduling PC_INIT");
838}
839
840/* Add "AT" to a command, add the cid, dle encode it, send the result to the hardware. */
841static void send_command(struct cardstate *cs, const char *cmd, int cid,
842 int dle, gfp_t kmallocflags)
843{
844 size_t cmdlen, buflen;
845 char *cmdpos, *cmdbuf, *cmdtail;
846
847 cmdlen = strlen(cmd);
848 buflen = 11 + cmdlen;
849
850 if (likely(buflen > cmdlen)) {
851 cmdbuf = kmalloc(buflen, kmallocflags);
852 if (likely(cmdbuf != NULL)) {
853 cmdpos = cmdbuf + 9;
854 cmdtail = cmdpos + cmdlen;
855 memcpy(cmdpos, cmd, cmdlen);
856
857 if (cid > 0 && cid <= 65535) {
858 do {
859 *--cmdpos = '0' + cid % 10;
860 cid /= 10;
861 ++cmdlen;
862 } while (cid);
863 }
864
865 cmdlen += 2;
866 *--cmdpos = 'T';
867 *--cmdpos = 'A';
868
869 if (dle) {
870 cmdlen += 4;
871 *--cmdpos = '(';
872 *--cmdpos = 0x10;
873 *cmdtail++ = 0x10;
874 *cmdtail++ = ')';
875 }
876
877 cs->ops->write_cmd(cs, cmdpos, cmdlen, NULL);
878 kfree(cmdbuf);
879 } else
880 err("no memory for command buffer");
881 } else
882 err("overflow in buflen");
883}
884
885static struct at_state_t *at_state_from_cid(struct cardstate *cs, int cid)
886{
887 struct at_state_t *at_state;
888 int i;
889 unsigned long flags;
890
891 if (cid == 0)
892 return &cs->at_state;
893
894 for (i = 0; i < cs->channels; ++i)
895 if (cid == cs->bcs[i].at_state.cid)
896 return &cs->bcs[i].at_state;
897
898 spin_lock_irqsave(&cs->lock, flags);
899
900 list_for_each_entry(at_state, &cs->temp_at_states, list)
901 if (cid == at_state->cid) {
902 spin_unlock_irqrestore(&cs->lock, flags);
903 return at_state;
904 }
905
906 spin_unlock_irqrestore(&cs->lock, flags);
907
908 return NULL;
909}
910
911static void bchannel_down(struct bc_state *bcs)
912{
913 IFNULLRET(bcs);
914 IFNULLRET(bcs->cs);
915
916 if (bcs->chstate & CHS_B_UP) {
917 bcs->chstate &= ~CHS_B_UP;
918 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BHUP);
919 }
920
921 if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
922 bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
923 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP);
924 }
925
926 gigaset_free_channel(bcs);
927
928 gigaset_bcs_reinit(bcs);
929}
930
931static void bchannel_up(struct bc_state *bcs)
932{
933 IFNULLRET(bcs);
934
935 if (!(bcs->chstate & CHS_D_UP)) {
936 notice("%s: D channel not up", __func__);
937 bcs->chstate |= CHS_D_UP;
938 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
939 }
940
941 if (bcs->chstate & CHS_B_UP) {
942 notice("%s: B channel already up", __func__);
943 return;
944 }
945
946 bcs->chstate |= CHS_B_UP;
947 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BCONN);
948}
949
950static void start_dial(struct at_state_t *at_state, void *data, int seq_index)
951{
952 struct bc_state *bcs = at_state->bcs;
953 struct cardstate *cs = at_state->cs;
954 int retval;
955
956 bcs->chstate |= CHS_NOTIFY_LL;
957 //atomic_set(&bcs->status, BCS_INIT);
958
959 if (atomic_read(&at_state->seq_index) != seq_index)
960 goto error;
961
962 retval = gigaset_isdn_setup_dial(at_state, data);
963 if (retval != 0)
964 goto error;
965
966
967 at_state->pending_commands |= PC_CID;
968 dbg(DEBUG_CMD, "Scheduling PC_CID");
969//#ifdef GIG_MAYINITONDIAL
970// if (atomic_read(&cs->MState) == MS_UNKNOWN) {
971// cs->at_state.pending_commands |= PC_INIT;
972// dbg(DEBUG_CMD, "Scheduling PC_INIT");
973// }
974//#endif
975 atomic_set(&cs->commands_pending, 1); //FIXME
976 return;
977
978error:
979 at_state->pending_commands |= PC_NOCID;
980 dbg(DEBUG_CMD, "Scheduling PC_NOCID");
981 atomic_set(&cs->commands_pending, 1); //FIXME
982 return;
983}
984
985static void start_accept(struct at_state_t *at_state)
986{
987 struct cardstate *cs = at_state->cs;
988 int retval;
989
990 retval = gigaset_isdn_setup_accept(at_state);
991
992 if (retval == 0) {
993 at_state->pending_commands |= PC_ACCEPT;
994 dbg(DEBUG_CMD, "Scheduling PC_ACCEPT");
995 atomic_set(&cs->commands_pending, 1); //FIXME
996 } else {
997 //FIXME
998 at_state->pending_commands |= PC_HUP;
999 dbg(DEBUG_CMD, "Scheduling PC_HUP");
1000 atomic_set(&cs->commands_pending, 1); //FIXME
1001 }
1002}
1003
1004static void do_start(struct cardstate *cs)
1005{
1006 gigaset_free_channels(cs);
1007
1008 if (atomic_read(&cs->mstate) != MS_LOCKED)
1009 schedule_init(cs, MS_INIT);
1010
1011 gigaset_i4l_cmd(cs, ISDN_STAT_RUN);
1012 // FIXME: not in locked mode
1013 // FIXME 2: only after init sequence
1014
1015 cs->waiting = 0;
1016 wake_up(&cs->waitqueue);
1017}
1018
1019static void finish_shutdown(struct cardstate *cs)
1020{
1021 if (atomic_read(&cs->mstate) != MS_LOCKED) {
1022 atomic_set(&cs->mstate, MS_UNINITIALIZED);
1023 atomic_set(&cs->mode, M_UNKNOWN);
1024 }
1025
1026 /* The rest is done by cleanup_cs () in user mode. */
1027
1028 cs->cmd_result = -ENODEV;
1029 cs->waiting = 0;
1030 wake_up_interruptible(&cs->waitqueue);
1031}
1032
1033static void do_shutdown(struct cardstate *cs)
1034{
1035 gigaset_block_channels(cs);
1036
1037 if (atomic_read(&cs->mstate) == MS_READY) {
1038 atomic_set(&cs->mstate, MS_SHUTDOWN);
1039 cs->at_state.pending_commands |= PC_SHUTDOWN;
1040 atomic_set(&cs->commands_pending, 1); //FIXME
1041 dbg(DEBUG_CMD, "Scheduling PC_SHUTDOWN"); //FIXME
1042 //gigaset_schedule_event(cs); //FIXME
1043 } else
1044 finish_shutdown(cs);
1045}
1046
1047static void do_stop(struct cardstate *cs)
1048{
1049 do_shutdown(cs);
1050}
1051
1052/* Entering cid mode or getting a cid failed:
1053 * try to initialize the device and try again.
1054 *
1055 * channel >= 0: getting cid for the channel failed
1056 * channel < 0: entering cid mode failed
1057 *
1058 * returns 0 on failure
1059 */
1060static int reinit_and_retry(struct cardstate *cs, int channel)
1061{
1062 int i;
1063
1064 if (--cs->retry_count <= 0)
1065 return 0;
1066
1067 for (i = 0; i < cs->channels; ++i)
1068 if (cs->bcs[i].at_state.cid > 0)
1069 return 0;
1070
1071 if (channel < 0)
1072 warn("Could not enter cid mode. Reinit device and try again.");
1073 else {
1074 warn("Could not get a call id. Reinit device and try again.");
1075 cs->bcs[channel].at_state.pending_commands |= PC_CID;
1076 }
1077 schedule_init(cs, MS_INIT);
1078 return 1;
1079}
1080
1081static int at_state_invalid(struct cardstate *cs,
1082 struct at_state_t *test_ptr)
1083{
1084 unsigned long flags;
1085 unsigned channel;
1086 struct at_state_t *at_state;
1087 int retval = 0;
1088
1089 spin_lock_irqsave(&cs->lock, flags);
1090
1091 if (test_ptr == &cs->at_state)
1092 goto exit;
1093
1094 list_for_each_entry(at_state, &cs->temp_at_states, list)
1095 if (at_state == test_ptr)
1096 goto exit;
1097
1098 for (channel = 0; channel < cs->channels; ++channel)
1099 if (&cs->bcs[channel].at_state == test_ptr)
1100 goto exit;
1101
1102 retval = 1;
1103exit:
1104 spin_unlock_irqrestore(&cs->lock, flags);
1105 return retval;
1106}
1107
1108static void handle_icall(struct cardstate *cs, struct bc_state *bcs,
1109 struct at_state_t **p_at_state)
1110{
1111 int retval;
1112 struct at_state_t *at_state = *p_at_state;
1113
1114 retval = gigaset_isdn_icall(at_state);
1115 switch (retval) {
1116 case ICALL_ACCEPT:
1117 break;
1118 default:
1119 err("internal error: disposition=%d", retval);
1120 /* --v-- fall through --v-- */
1121 case ICALL_IGNORE:
1122 case ICALL_REJECT:
1123 /* hang up actively
1124 * Device doc says that would reject the call.
1125 * In fact it doesn't.
1126 */
1127 at_state->pending_commands |= PC_HUP;
1128 atomic_set(&cs->commands_pending, 1);
1129 break;
1130 }
1131}
1132
1133static int do_lock(struct cardstate *cs)
1134{
1135 int mode;
1136 int i;
1137
1138 switch (atomic_read(&cs->mstate)) {
1139 case MS_UNINITIALIZED:
1140 case MS_READY:
1141 if (cs->cur_at_seq || !list_empty(&cs->temp_at_states) ||
1142 cs->at_state.pending_commands)
1143 return -EBUSY;
1144
1145 for (i = 0; i < cs->channels; ++i)
1146 if (cs->bcs[i].at_state.pending_commands)
1147 return -EBUSY;
1148
1149 if (!gigaset_get_channels(cs))
1150 return -EBUSY;
1151
1152 break;
1153 case MS_LOCKED:
1154 //retval = -EACCES;
1155 break;
1156 default:
1157 return -EBUSY;
1158 }
1159
1160 mode = atomic_read(&cs->mode);
1161 atomic_set(&cs->mstate, MS_LOCKED);
1162 atomic_set(&cs->mode, M_UNKNOWN);
1163 //FIXME reset card state / at states / bcs states
1164
1165 return mode;
1166}
1167
1168static int do_unlock(struct cardstate *cs)
1169{
1170 if (atomic_read(&cs->mstate) != MS_LOCKED)
1171 return -EINVAL;
1172
1173 atomic_set(&cs->mstate, MS_UNINITIALIZED);
1174 atomic_set(&cs->mode, M_UNKNOWN);
1175 gigaset_free_channels(cs);
1176 //FIXME reset card state / at states / bcs states
1177 if (atomic_read(&cs->connected))
1178 schedule_init(cs, MS_INIT);
1179
1180 return 0;
1181}
1182
1183static void do_action(int action, struct cardstate *cs,
1184 struct bc_state *bcs,
1185 struct at_state_t **p_at_state, char **pp_command,
1186 int *p_genresp, int *p_resp_code,
1187 struct event_t *ev)
1188{
1189 struct at_state_t *at_state = *p_at_state;
1190 struct at_state_t *at_state2;
1191 unsigned long flags;
1192
1193 int channel;
1194
1195 unsigned char *s, *e;
1196 int i;
1197 unsigned long val;
1198
1199 switch (action) {
1200 case ACT_NOTHING:
1201 break;
1202 case ACT_TIMEOUT:
1203 at_state->waiting = 1;
1204 break;
1205 case ACT_INIT:
1206 //FIXME setup everything
1207 cs->at_state.pending_commands &= ~PC_INIT;
1208 cs->cur_at_seq = SEQ_NONE;
1209 atomic_set(&cs->mode, M_UNIMODEM);
1210 if (!atomic_read(&cs->cidmode)) {
1211 gigaset_free_channels(cs);
1212 atomic_set(&cs->mstate, MS_READY);
1213 break;
1214 }
1215 cs->at_state.pending_commands |= PC_CIDMODE;
1216 atomic_set(&cs->commands_pending, 1); //FIXME
1217 dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
1218 break;
1219 case ACT_FAILINIT:
1220 warn("Could not initialize the device.");
1221 cs->dle = 0;
1222 init_failed(cs, M_UNKNOWN);
1223 cs->cur_at_seq = SEQ_NONE;
1224 break;
1225 case ACT_CONFIGMODE:
1226 init_failed(cs, M_CONFIG);
1227 cs->cur_at_seq = SEQ_NONE;
1228 break;
1229 case ACT_SETDLE1:
1230 cs->dle = 1;
1231 /* cs->inbuf[0].inputstate |= INS_command | INS_DLE_command; */
1232 cs->inbuf[0].inputstate &=
1233 ~(INS_command | INS_DLE_command);
1234 break;
1235 case ACT_SETDLE0:
1236 cs->dle = 0;
1237 cs->inbuf[0].inputstate =
1238 (cs->inbuf[0].inputstate & ~INS_DLE_command)
1239 | INS_command;
1240 break;
1241 case ACT_CMODESET:
1242 if (atomic_read(&cs->mstate) == MS_INIT ||
1243 atomic_read(&cs->mstate) == MS_RECOVER) {
1244 gigaset_free_channels(cs);
1245 atomic_set(&cs->mstate, MS_READY);
1246 }
1247 atomic_set(&cs->mode, M_CID);
1248 cs->cur_at_seq = SEQ_NONE;
1249 break;
1250 case ACT_UMODESET:
1251 atomic_set(&cs->mode, M_UNIMODEM);
1252 cs->cur_at_seq = SEQ_NONE;
1253 break;
1254 case ACT_FAILCMODE:
1255 cs->cur_at_seq = SEQ_NONE;
1256 if (atomic_read(&cs->mstate) == MS_INIT ||
1257 atomic_read(&cs->mstate) == MS_RECOVER) {
1258 init_failed(cs, M_UNKNOWN);
1259 break;
1260 }
1261 if (!reinit_and_retry(cs, -1))
1262 schedule_init(cs, MS_RECOVER);
1263 break;
1264 case ACT_FAILUMODE:
1265 cs->cur_at_seq = SEQ_NONE;
1266 schedule_init(cs, MS_RECOVER);
1267 break;
1268 case ACT_HUPMODEM:
1269 /* send "+++" (hangup in unimodem mode) */
1270 cs->ops->write_cmd(cs, "+++", 3, NULL);
1271 break;
1272 case ACT_RING:
1273 /* get fresh AT state structure for new CID */
1274 at_state2 = get_free_channel(cs, ev->parameter);
1275 if (!at_state2) {
1276 warn("RING ignored: "
1277 "could not allocate channel structure");
1278 break;
1279 }
1280
1281 /* initialize AT state structure
1282 * note that bcs may be NULL if no B channel is free
1283 */
1284 at_state2->ConState = 700;
1285 kfree(at_state2->str_var[STR_NMBR]);
1286 at_state2->str_var[STR_NMBR] = NULL;
1287 kfree(at_state2->str_var[STR_ZCPN]);
1288 at_state2->str_var[STR_ZCPN] = NULL;
1289 kfree(at_state2->str_var[STR_ZBC]);
1290 at_state2->str_var[STR_ZBC] = NULL;
1291 kfree(at_state2->str_var[STR_ZHLC]);
1292 at_state2->str_var[STR_ZHLC] = NULL;
1293 at_state2->int_var[VAR_ZCTP] = -1;
1294
1295 spin_lock_irqsave(&cs->lock, flags);
1296 at_state2->timer_expires = RING_TIMEOUT;
1297 at_state2->timer_active = 1;
1298 spin_unlock_irqrestore(&cs->lock, flags);
1299 break;
1300 case ACT_ICALL:
1301 handle_icall(cs, bcs, p_at_state);
1302 at_state = *p_at_state;
1303 break;
1304 case ACT_FAILSDOWN:
1305 warn("Could not shut down the device.");
1306 /* fall through */
1307 case ACT_FAKESDOWN:
1308 case ACT_SDOWN:
1309 cs->cur_at_seq = SEQ_NONE;
1310 finish_shutdown(cs);
1311 break;
1312 case ACT_CONNECT:
1313 if (cs->onechannel) {
1314 at_state->pending_commands |= PC_DLE1;
1315 atomic_set(&cs->commands_pending, 1);
1316 break;
1317 }
1318 bcs->chstate |= CHS_D_UP;
1319 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
1320 cs->ops->init_bchannel(bcs);
1321 break;
1322 case ACT_DLE1:
1323 cs->cur_at_seq = SEQ_NONE;
1324 bcs = cs->bcs + cs->curchannel;
1325
1326 bcs->chstate |= CHS_D_UP;
1327 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
1328 cs->ops->init_bchannel(bcs);
1329 break;
1330 case ACT_FAKEHUP:
1331 at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
1332 /* fall through */
1333 case ACT_DISCONNECT:
1334 cs->cur_at_seq = SEQ_NONE;
1335 at_state->cid = -1;
1336 if (bcs && cs->onechannel && cs->dle) {
1337 /* Check for other open channels not needed:
1338 * DLE only used for M10x with one B channel.
1339 */
1340 at_state->pending_commands |= PC_DLE0;
1341 atomic_set(&cs->commands_pending, 1);
1342 } else {
1343 disconnect(p_at_state);
1344 at_state = *p_at_state;
1345 }
1346 break;
1347 case ACT_FAKEDLE0:
1348 at_state->int_var[VAR_ZDLE] = 0;
1349 cs->dle = 0;
1350 /* fall through */
1351 case ACT_DLE0:
1352 cs->cur_at_seq = SEQ_NONE;
1353 at_state2 = &cs->bcs[cs->curchannel].at_state;
1354 disconnect(&at_state2);
1355 break;
1356 case ACT_ABORTHUP:
1357 cs->cur_at_seq = SEQ_NONE;
1358 warn("Could not hang up.");
1359 at_state->cid = -1;
1360 if (bcs && cs->onechannel)
1361 at_state->pending_commands |= PC_DLE0;
1362 else {
1363 disconnect(p_at_state);
1364 at_state = *p_at_state;
1365 }
1366 schedule_init(cs, MS_RECOVER);
1367 break;
1368 case ACT_FAILDLE0:
1369 cs->cur_at_seq = SEQ_NONE;
1370 warn("Could not leave DLE mode.");
1371 at_state2 = &cs->bcs[cs->curchannel].at_state;
1372 disconnect(&at_state2);
1373 schedule_init(cs, MS_RECOVER);
1374 break;
1375 case ACT_FAILDLE1:
1376 cs->cur_at_seq = SEQ_NONE;
1377 warn("Could not enter DLE mode. Try to hang up.");
1378 channel = cs->curchannel;
1379 cs->bcs[channel].at_state.pending_commands |= PC_HUP;
1380 atomic_set(&cs->commands_pending, 1);
1381 break;
1382
1383 case ACT_CID: /* got cid; start dialing */
1384 cs->cur_at_seq = SEQ_NONE;
1385 channel = cs->curchannel;
1386 if (ev->parameter > 0 && ev->parameter <= 65535) {
1387 cs->bcs[channel].at_state.cid = ev->parameter;
1388 cs->bcs[channel].at_state.pending_commands |=
1389 PC_DIAL;
1390 atomic_set(&cs->commands_pending, 1);
1391 break;
1392 }
1393 /* fall through */
1394 case ACT_FAILCID:
1395 cs->cur_at_seq = SEQ_NONE;
1396 channel = cs->curchannel;
1397 if (!reinit_and_retry(cs, channel)) {
1398 warn("Could not get a call id. Dialing not possible");
1399 at_state2 = &cs->bcs[channel].at_state;
1400 disconnect(&at_state2);
1401 }
1402 break;
1403 case ACT_ABORTCID:
1404 cs->cur_at_seq = SEQ_NONE;
1405 at_state2 = &cs->bcs[cs->curchannel].at_state;
1406 disconnect(&at_state2);
1407 break;
1408
1409 case ACT_DIALING:
1410 case ACT_ACCEPTED:
1411 cs->cur_at_seq = SEQ_NONE;
1412 break;
1413
1414 case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL processing */
1415 disconnect(p_at_state);
1416 at_state = *p_at_state;
1417 break;
1418
1419 case ACT_ABORTDIAL: /* error/timeout during dial preparation */
1420 cs->cur_at_seq = SEQ_NONE;
1421 at_state->pending_commands |= PC_HUP;
1422 atomic_set(&cs->commands_pending, 1);
1423 break;
1424
1425 case ACT_REMOTEREJECT: /* DISCONNECT_IND after dialling */
1426 case ACT_CONNTIMEOUT: /* timeout waiting for ZSAU=ACTIVE */
1427 case ACT_REMOTEHUP: /* DISCONNECT_IND with established connection */
1428 at_state->pending_commands |= PC_HUP;
1429 atomic_set(&cs->commands_pending, 1);
1430 break;
1431 case ACT_GETSTRING: /* warning: RING, ZDLE, ... are not handled properly any more */
1432 at_state->getstring = 1;
1433 break;
1434 case ACT_SETVER:
1435 if (!ev->ptr) {
1436 *p_genresp = 1;
1437 *p_resp_code = RSP_ERROR;
1438 break;
1439 }
1440 s = ev->ptr;
1441
1442 if (!strcmp(s, "OK")) {
1443 *p_genresp = 1;
1444 *p_resp_code = RSP_ERROR;
1445 break;
1446 }
1447
1448 for (i = 0; i < 4; ++i) {
1449 val = simple_strtoul(s, (char **) &e, 10);
1450 if (val > INT_MAX || e == s)
1451 break;
1452 if (i == 3) {
1453 if (*e)
1454 break;
1455 } else if (*e != '.')
1456 break;
1457 else
1458 s = e + 1;
1459 cs->fwver[i] = val;
1460 }
1461 if (i != 4) {
1462 *p_genresp = 1;
1463 *p_resp_code = RSP_ERROR;
1464 break;
1465 }
1466 /*at_state->getstring = 1;*/
1467 cs->gotfwver = 0;
1468 break;
1469 case ACT_GOTVER:
1470 if (cs->gotfwver == 0) {
1471 cs->gotfwver = 1;
1472 dbg(DEBUG_ANY,
1473 "firmware version %02d.%03d.%02d.%02d",
1474 cs->fwver[0], cs->fwver[1],
1475 cs->fwver[2], cs->fwver[3]);
1476 break;
1477 }
1478 /* fall through */
1479 case ACT_FAILVER:
1480 cs->gotfwver = -1;
1481 err("could not read firmware version.");
1482 break;
1483#ifdef CONFIG_GIGASET_DEBUG
1484 case ACT_ERROR:
1485 *p_genresp = 1;
1486 *p_resp_code = RSP_ERROR;
1487 break;
1488 case ACT_TEST:
1489 {
1490 static int count = 3; //2; //1;
1491 *p_genresp = 1;
1492 *p_resp_code = count ? RSP_ERROR : RSP_OK;
1493 if (count > 0)
1494 --count;
1495 }
1496 break;
1497#endif
1498 case ACT_DEBUG:
1499 dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d",
1500 __func__, ev->type, at_state->ConState);
1501 break;
1502 case ACT_WARN:
1503 warn("%s: resp_code %d in ConState %d!",
1504 __func__, ev->type, at_state->ConState);
1505 break;
1506 case ACT_ZCAU:
1507 warn("cause code %04x in connection state %d.",
1508 ev->parameter, at_state->ConState);
1509 break;
1510
1511 /* events from the LL */
1512 case ACT_DIAL:
1513 start_dial(at_state, ev->ptr, ev->parameter);
1514 break;
1515 case ACT_ACCEPT:
1516 start_accept(at_state);
1517 break;
1518 case ACT_PROTO_L2:
1519 dbg(DEBUG_CMD,
1520 "set protocol to %u", (unsigned) ev->parameter);
1521 at_state->bcs->proto2 = ev->parameter;
1522 break;
1523 case ACT_HUP:
1524 at_state->pending_commands |= PC_HUP;
1525 atomic_set(&cs->commands_pending, 1); //FIXME
1526 dbg(DEBUG_CMD, "Scheduling PC_HUP");
1527 break;
1528
1529 /* hotplug events */
1530 case ACT_STOP:
1531 do_stop(cs);
1532 break;
1533 case ACT_START:
1534 do_start(cs);
1535 break;
1536
1537 /* events from the interface */ // FIXME without ACT_xxxx?
1538 case ACT_IF_LOCK:
1539 cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
1540 cs->waiting = 0;
1541 wake_up(&cs->waitqueue);
1542 break;
1543 case ACT_IF_VER:
1544 if (ev->parameter != 0)
1545 cs->cmd_result = -EINVAL;
1546 else if (cs->gotfwver != 1) {
1547 cs->cmd_result = -ENOENT;
1548 } else {
1549 memcpy(ev->arg, cs->fwver, sizeof cs->fwver);
1550 cs->cmd_result = 0;
1551 }
1552 cs->waiting = 0;
1553 wake_up(&cs->waitqueue);
1554 break;
1555
1556 /* events from the proc file system */ // FIXME without ACT_xxxx?
1557 case ACT_PROC_CIDMODE:
1558 if (ev->parameter != atomic_read(&cs->cidmode)) {
1559 atomic_set(&cs->cidmode, ev->parameter);
1560 if (ev->parameter) {
1561 cs->at_state.pending_commands |= PC_CIDMODE;
1562 dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
1563 } else {
1564 cs->at_state.pending_commands |= PC_UMMODE;
1565 dbg(DEBUG_CMD, "Scheduling PC_UMMODE");
1566 }
1567 atomic_set(&cs->commands_pending, 1);
1568 }
1569 cs->waiting = 0;
1570 wake_up(&cs->waitqueue);
1571 break;
1572
1573 /* events from the hardware drivers */
1574 case ACT_NOTIFY_BC_DOWN:
1575 bchannel_down(bcs);
1576 break;
1577 case ACT_NOTIFY_BC_UP:
1578 bchannel_up(bcs);
1579 break;
1580 case ACT_SHUTDOWN:
1581 do_shutdown(cs);
1582 break;
1583
1584
1585 default:
1586 if (action >= ACT_CMD && action < ACT_CMD + AT_NUM) {
1587 *pp_command = at_state->bcs->commands[action - ACT_CMD];
1588 if (!*pp_command) {
1589 *p_genresp = 1;
1590 *p_resp_code = RSP_NULL;
1591 }
1592 } else
1593 err("%s: action==%d!", __func__, action);
1594 }
1595}
1596
1597/* State machine to do the calling and hangup procedure */
1598static void process_event(struct cardstate *cs, struct event_t *ev)
1599{
1600 struct bc_state *bcs;
1601 char *p_command = NULL;
1602 struct reply_t *rep;
1603 int rcode;
1604 int genresp = 0;
1605 int resp_code = RSP_ERROR;
1606 int sendcid;
1607 struct at_state_t *at_state;
1608 int index;
1609 int curact;
1610 unsigned long flags;
1611
1612 IFNULLRET(cs);
1613 IFNULLRET(ev);
1614
1615 if (ev->cid >= 0) {
1616 at_state = at_state_from_cid(cs, ev->cid);
1617 if (!at_state) {
1618 gigaset_add_event(cs, &cs->at_state, RSP_WRONG_CID,
1619 NULL, 0, NULL);
1620 return;
1621 }
1622 } else {
1623 at_state = ev->at_state;
1624 if (at_state_invalid(cs, at_state)) {
1625 dbg(DEBUG_ANY,
1626 "event for invalid at_state %p", at_state);
1627 return;
1628 }
1629 }
1630
1631 dbg(DEBUG_CMD,
1632 "connection state %d, event %d", at_state->ConState, ev->type);
1633
1634 bcs = at_state->bcs;
1635 sendcid = at_state->cid;
1636
1637 /* Setting the pointer to the dial array */
1638 rep = at_state->replystruct;
1639 IFNULLRET(rep);
1640
1641 if (ev->type == EV_TIMEOUT) {
1642 if (ev->parameter != atomic_read(&at_state->timer_index)
1643 || !at_state->timer_active) {
1644 ev->type = RSP_NONE; /* old timeout */
1645 dbg(DEBUG_ANY, "old timeout");
1646 } else if (!at_state->waiting)
1647 dbg(DEBUG_ANY, "timeout occured");
1648 else
1649 dbg(DEBUG_ANY, "stopped waiting");
1650 }
1651
1652 /* if the response belongs to a variable in at_state->int_var[VAR_XXXX] or at_state->str_var[STR_XXXX], set it */
1653 if (ev->type >= RSP_VAR && ev->type < RSP_VAR + VAR_NUM) {
1654 index = ev->type - RSP_VAR;
1655 at_state->int_var[index] = ev->parameter;
1656 } else if (ev->type >= RSP_STR && ev->type < RSP_STR + STR_NUM) {
1657 index = ev->type - RSP_STR;
1658 kfree(at_state->str_var[index]);
1659 at_state->str_var[index] = ev->ptr;
1660 ev->ptr = NULL; /* prevent process_events() from deallocating ptr */
1661 }
1662
1663 if (ev->type == EV_TIMEOUT || ev->type == RSP_STRING)
1664 at_state->getstring = 0;
1665
1666 /* Search row in dial array which matches modem response and current constate */
1667 for (;; rep++) {
1668 rcode = rep->resp_code;
1669 /* dbg (DEBUG_ANY, "rcode %d", rcode); */
1670 if (rcode == RSP_LAST) {
1671 /* found nothing...*/
1672 warn("%s: rcode=RSP_LAST: resp_code %d in ConState %d!",
1673 __func__, ev->type, at_state->ConState);
1674 return;
1675 }
1676 if ((rcode == RSP_ANY || rcode == ev->type)
1677 && ((int) at_state->ConState >= rep->min_ConState)
1678 && (rep->max_ConState < 0
1679 || (int) at_state->ConState <= rep->max_ConState)
1680 && (rep->parameter < 0 || rep->parameter == ev->parameter))
1681 break;
1682 }
1683
1684 p_command = rep->command;
1685
1686 at_state->waiting = 0;
1687 for (curact = 0; curact < MAXACT; ++curact) {
1688 /* The row tells us what we should do ..
1689 */
1690 do_action(rep->action[curact], cs, bcs, &at_state, &p_command, &genresp, &resp_code, ev);
1691 if (!at_state)
1692 break; /* may be freed after disconnect */
1693 }
1694
1695 if (at_state) {
1696 /* Jump to the next con-state regarding the array */
1697 if (rep->new_ConState >= 0)
1698 at_state->ConState = rep->new_ConState;
1699
1700 if (genresp) {
1701 spin_lock_irqsave(&cs->lock, flags);
1702 at_state->timer_expires = 0; //FIXME
1703 at_state->timer_active = 0; //FIXME
1704 spin_unlock_irqrestore(&cs->lock, flags);
1705 gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL);
1706 } else {
1707 /* Send command to modem if not NULL... */
1708 if (p_command/*rep->command*/) {
1709 if (atomic_read(&cs->connected))
1710 send_command(cs, p_command,
1711 sendcid, cs->dle,
1712 GFP_ATOMIC);
1713 else
1714 gigaset_add_event(cs, at_state,
1715 RSP_NODEV,
1716 NULL, 0, NULL);
1717 }
1718
1719 spin_lock_irqsave(&cs->lock, flags);
1720 if (!rep->timeout) {
1721 at_state->timer_expires = 0;
1722 at_state->timer_active = 0;
1723 } else if (rep->timeout > 0) { /* new timeout */
1724 at_state->timer_expires = rep->timeout * 10;
1725 at_state->timer_active = 1;
1726 new_index(&at_state->timer_index,
1727 MAX_TIMER_INDEX);
1728 }
1729 spin_unlock_irqrestore(&cs->lock, flags);
1730 }
1731 }
1732}
1733
1734static void schedule_sequence(struct cardstate *cs,
1735 struct at_state_t *at_state, int sequence)
1736{
1737 cs->cur_at_seq = sequence;
1738 gigaset_add_event(cs, at_state, RSP_INIT, NULL, sequence, NULL);
1739}
1740
1741static void process_command_flags(struct cardstate *cs)
1742{
1743 struct at_state_t *at_state = NULL;
1744 struct bc_state *bcs;
1745 int i;
1746 int sequence;
1747
1748 IFNULLRET(cs);
1749
1750 atomic_set(&cs->commands_pending, 0);
1751
1752 if (cs->cur_at_seq) {
1753 dbg(DEBUG_CMD, "not searching scheduled commands: busy");
1754 return;
1755 }
1756
1757 dbg(DEBUG_CMD, "searching scheduled commands");
1758
1759 sequence = SEQ_NONE;
1760
1761 /* clear pending_commands and hangup channels on shutdown */
1762 if (cs->at_state.pending_commands & PC_SHUTDOWN) {
1763 cs->at_state.pending_commands &= ~PC_CIDMODE;
1764 for (i = 0; i < cs->channels; ++i) {
1765 bcs = cs->bcs + i;
1766 at_state = &bcs->at_state;
1767 at_state->pending_commands &=
1768 ~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
1769 if (at_state->cid > 0)
1770 at_state->pending_commands |= PC_HUP;
1771 if (at_state->pending_commands & PC_CID) {
1772 at_state->pending_commands |= PC_NOCID;
1773 at_state->pending_commands &= ~PC_CID;
1774 }
1775 }
1776 }
1777
1778 /* clear pending_commands and hangup channels on reset */
1779 if (cs->at_state.pending_commands & PC_INIT) {
1780 cs->at_state.pending_commands &= ~PC_CIDMODE;
1781 for (i = 0; i < cs->channels; ++i) {
1782 bcs = cs->bcs + i;
1783 at_state = &bcs->at_state;
1784 at_state->pending_commands &=
1785 ~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
1786 if (at_state->cid > 0)
1787 at_state->pending_commands |= PC_HUP;
1788 if (atomic_read(&cs->mstate) == MS_RECOVER) {
1789 if (at_state->pending_commands & PC_CID) {
1790 at_state->pending_commands |= PC_NOCID;
1791 at_state->pending_commands &= ~PC_CID;
1792 }
1793 }
1794 }
1795 }
1796
1797 /* only switch back to unimodem mode, if no commands are pending and no channels are up */
1798 if (cs->at_state.pending_commands == PC_UMMODE
1799 && !atomic_read(&cs->cidmode)
1800 && list_empty(&cs->temp_at_states)
1801 && atomic_read(&cs->mode) == M_CID) {
1802 sequence = SEQ_UMMODE;
1803 at_state = &cs->at_state;
1804 for (i = 0; i < cs->channels; ++i) {
1805 bcs = cs->bcs + i;
1806 if (bcs->at_state.pending_commands ||
1807 bcs->at_state.cid > 0) {
1808 sequence = SEQ_NONE;
1809 break;
1810 }
1811 }
1812 }
1813 cs->at_state.pending_commands &= ~PC_UMMODE;
1814 if (sequence != SEQ_NONE) {
1815 schedule_sequence(cs, at_state, sequence);
1816 return;
1817 }
1818
1819 for (i = 0; i < cs->channels; ++i) {
1820 bcs = cs->bcs + i;
1821 if (bcs->at_state.pending_commands & PC_HUP) {
1822 bcs->at_state.pending_commands &= ~PC_HUP;
1823 if (bcs->at_state.pending_commands & PC_CID) {
1824 /* not yet dialing: PC_NOCID is sufficient */
1825 bcs->at_state.pending_commands |= PC_NOCID;
1826 bcs->at_state.pending_commands &= ~PC_CID;
1827 } else {
1828 schedule_sequence(cs, &bcs->at_state, SEQ_HUP);
1829 return;
1830 }
1831 }
1832 if (bcs->at_state.pending_commands & PC_NOCID) {
1833 bcs->at_state.pending_commands &= ~PC_NOCID;
1834 cs->curchannel = bcs->channel;
1835 schedule_sequence(cs, &cs->at_state, SEQ_NOCID);
1836 return;
1837 } else if (bcs->at_state.pending_commands & PC_DLE0) {
1838 bcs->at_state.pending_commands &= ~PC_DLE0;
1839 cs->curchannel = bcs->channel;
1840 schedule_sequence(cs, &cs->at_state, SEQ_DLE0);
1841 return;
1842 }
1843 }
1844
1845 list_for_each_entry(at_state, &cs->temp_at_states, list)
1846 if (at_state->pending_commands & PC_HUP) {
1847 at_state->pending_commands &= ~PC_HUP;
1848 schedule_sequence(cs, at_state, SEQ_HUP);
1849 return;
1850 }
1851
1852 if (cs->at_state.pending_commands & PC_INIT) {
1853 cs->at_state.pending_commands &= ~PC_INIT;
1854 cs->dle = 0; //FIXME
1855 cs->inbuf->inputstate = INS_command;
1856 //FIXME reset card state (or -> LOCK0)?
1857 schedule_sequence(cs, &cs->at_state, SEQ_INIT);
1858 return;
1859 }
1860 if (cs->at_state.pending_commands & PC_SHUTDOWN) {
1861 cs->at_state.pending_commands &= ~PC_SHUTDOWN;
1862 schedule_sequence(cs, &cs->at_state, SEQ_SHUTDOWN);
1863 return;
1864 }
1865 if (cs->at_state.pending_commands & PC_CIDMODE) {
1866 cs->at_state.pending_commands &= ~PC_CIDMODE;
1867 if (atomic_read(&cs->mode) == M_UNIMODEM) {
1868#if 0
1869 cs->retry_count = 2;
1870#else
1871 cs->retry_count = 1;
1872#endif
1873 schedule_sequence(cs, &cs->at_state, SEQ_CIDMODE);
1874 return;
1875 }
1876 }
1877
1878 for (i = 0; i < cs->channels; ++i) {
1879 bcs = cs->bcs + i;
1880 if (bcs->at_state.pending_commands & PC_DLE1) {
1881 bcs->at_state.pending_commands &= ~PC_DLE1;
1882 cs->curchannel = bcs->channel;
1883 schedule_sequence(cs, &cs->at_state, SEQ_DLE1);
1884 return;
1885 }
1886 if (bcs->at_state.pending_commands & PC_ACCEPT) {
1887 bcs->at_state.pending_commands &= ~PC_ACCEPT;
1888 schedule_sequence(cs, &bcs->at_state, SEQ_ACCEPT);
1889 return;
1890 }
1891 if (bcs->at_state.pending_commands & PC_DIAL) {
1892 bcs->at_state.pending_commands &= ~PC_DIAL;
1893 schedule_sequence(cs, &bcs->at_state, SEQ_DIAL);
1894 return;
1895 }
1896 if (bcs->at_state.pending_commands & PC_CID) {
1897 switch (atomic_read(&cs->mode)) {
1898 case M_UNIMODEM:
1899 cs->at_state.pending_commands |= PC_CIDMODE;
1900 dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
1901 atomic_set(&cs->commands_pending, 1);
1902 return;
1903#ifdef GIG_MAYINITONDIAL
1904 case M_UNKNOWN:
1905 schedule_init(cs, MS_INIT);
1906 return;
1907#endif
1908 }
1909 bcs->at_state.pending_commands &= ~PC_CID;
1910 cs->curchannel = bcs->channel;
1911#ifdef GIG_RETRYCID
1912 cs->retry_count = 2;
1913#else
1914 cs->retry_count = 1;
1915#endif
1916 schedule_sequence(cs, &cs->at_state, SEQ_CID);
1917 return;
1918 }
1919 }
1920}
1921
1922static void process_events(struct cardstate *cs)
1923{
1924 struct event_t *ev;
1925 unsigned head, tail;
1926 int i;
1927 int check_flags = 0;
1928 int was_busy;
1929
1930 /* no locking needed (only one reader) */
1931 head = atomic_read(&cs->ev_head);
1932
1933 for (i = 0; i < 2 * MAX_EVENTS; ++i) {
1934 tail = atomic_read(&cs->ev_tail);
1935 if (tail == head) {
1936 if (!check_flags && !atomic_read(&cs->commands_pending))
1937 break;
1938 check_flags = 0;
1939 process_command_flags(cs);
1940 tail = atomic_read(&cs->ev_tail);
1941 if (tail == head) {
1942 if (!atomic_read(&cs->commands_pending))
1943 break;
1944 continue;
1945 }
1946 }
1947
1948 ev = cs->events + head;
1949 was_busy = cs->cur_at_seq != SEQ_NONE;
1950 process_event(cs, ev);
1951 kfree(ev->ptr);
1952 ev->ptr = NULL;
1953 if (was_busy && cs->cur_at_seq == SEQ_NONE)
1954 check_flags = 1;
1955
1956 head = (head + 1) % MAX_EVENTS;
1957 atomic_set(&cs->ev_head, head);
1958 }
1959
1960 if (i == 2 * MAX_EVENTS) {
1961 err("infinite loop in process_events; aborting.");
1962 }
1963}
1964
1965/* tasklet scheduled on any event received from the Gigaset device
1966 * parameter:
1967 * data ISDN controller state structure
1968 */
1969void gigaset_handle_event(unsigned long data)
1970{
1971 struct cardstate *cs = (struct cardstate *) data;
1972
1973 IFNULLRET(cs);
1974 IFNULLRET(cs->inbuf);
1975
1976 /* handle incoming data on control/common channel */
1977 if (atomic_read(&cs->inbuf->head) != atomic_read(&cs->inbuf->tail)) {
1978 dbg(DEBUG_INTR, "processing new data");
1979 cs->ops->handle_input(cs->inbuf);
1980 }
1981
1982 process_events(cs);
1983}
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
new file mode 100644
index 000000000000..729edcdb6dac
--- /dev/null
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -0,0 +1,938 @@
1/* Siemens Gigaset 307x driver
2 * Common header file for all connection variants
3 *
4 * Written by Stefan Eilers <Eilers.Stefan@epost.de>
5 * and Hansjoerg Lipp <hjlipp@web.de>
6 *
7 * Version: $Id: gigaset.h,v 1.97.4.26 2006/02/04 18:28:16 hjlipp Exp $
8 * ===========================================================================
9 */
10
11#ifndef GIGASET_H
12#define GIGASET_H
13
14#include <linux/config.h>
15#include <linux/kernel.h>
16#include <linux/compiler.h>
17#include <linux/types.h>
18#include <asm/atomic.h>
19#include <linux/spinlock.h>
20#include <linux/isdnif.h>
21#include <linux/usb.h>
22#include <linux/skbuff.h>
23#include <linux/netdevice.h>
24#include <linux/ppp_defs.h>
25#include <linux/timer.h>
26#include <linux/interrupt.h>
27#include <linux/tty.h>
28#include <linux/tty_driver.h>
29#include <linux/list.h>
30
31#define GIG_VERSION {0,5,0,0}
32#define GIG_COMPAT {0,4,0,0}
33
34#define MAX_REC_PARAMS 10 /* Max. number of params in response string */
35#define MAX_RESP_SIZE 512 /* Max. size of a response string */
36#define HW_HDR_LEN 2 /* Header size used to store ack info */
37
38#define MAX_EVENTS 64 /* size of event queue */
39
40#define RBUFSIZE 8192
41#define SBUFSIZE 4096 /* sk_buff payload size */
42
43#define MAX_BUF_SIZE (SBUFSIZE - 2) /* Max. size of a data packet from LL */
44#define TRANSBUFSIZE 768 /* bytes per skb for transparent receive */
45
46/* compile time options */
47#define GIG_MAJOR 0
48
49#define GIG_MAYINITONDIAL
50#define GIG_RETRYCID
51#define GIG_X75
52
53#define MAX_TIMER_INDEX 1000
54#define MAX_SEQ_INDEX 1000
55
56#define GIG_TICK (HZ / 10)
57
58/* timeout values (unit: 1 sec) */
59#define INIT_TIMEOUT 1
60
61/* timeout values (unit: 0.1 sec) */
62#define RING_TIMEOUT 3 /* for additional parameters to RING */
63#define BAS_TIMEOUT 20 /* for response to Base USB ops */
64#define ATRDY_TIMEOUT 3 /* for HD_READY_SEND_ATDATA */
65
66#define BAS_RETRY 3 /* max. retries for base USB ops */
67
68#define MAXACT 3
69
70#define IFNULL(a) if (unlikely(!(a)))
71#define IFNULLRET(a) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); return; }
72#define IFNULLRETVAL(a,b) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); return (b); }
73#define IFNULLCONT(a) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); continue; }
74#define IFNULLGOTO(a,b) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); goto b; }
75
76extern int gigaset_debuglevel; /* "needs" cast to (enum debuglevel) */
77
78/* any combination of these can be given with the 'debug=' parameter to insmod, e.g.
79 * 'insmod usb_gigaset.o debug=0x2c' will set DEBUG_OPEN, DEBUG_CMD and DEBUG_INTR. */
80enum debuglevel { /* up to 24 bits (atomic_t) */
81 DEBUG_REG = 0x0002, /* serial port I/O register operations */
82 DEBUG_OPEN = 0x0004, /* open/close serial port */
83 DEBUG_INTR = 0x0008, /* interrupt processing */
84 DEBUG_INTR_DUMP = 0x0010, /* Activating hexdump debug output on interrupt
85 requests, not available as run-time option */
86 DEBUG_CMD = 0x00020, /* sent/received LL commands */
87 DEBUG_STREAM = 0x00040, /* application data stream I/O events */
88 DEBUG_STREAM_DUMP = 0x00080, /* application data stream content */
89 DEBUG_LLDATA = 0x00100, /* sent/received LL data */
90 DEBUG_INTR_0 = 0x00200, /* serial port output interrupt processing */
91 DEBUG_DRIVER = 0x00400, /* driver structure */
92 DEBUG_HDLC = 0x00800, /* M10x HDLC processing */
93 DEBUG_WRITE = 0x01000, /* M105 data write */
94 DEBUG_TRANSCMD = 0x02000, /*AT-COMMANDS+RESPONSES*/
95 DEBUG_MCMD = 0x04000, /*COMMANDS THAT ARE SENT VERY OFTEN*/
96 DEBUG_INIT = 0x08000, /* (de)allocation+initialization of data structures */
97 DEBUG_LOCK = 0x10000, /* semaphore operations */
98 DEBUG_OUTPUT = 0x20000, /* output to device */
99 DEBUG_ISO = 0x40000, /* isochronous transfers */
100 DEBUG_IF = 0x80000, /* character device operations */
101 DEBUG_USBREQ = 0x100000, /* USB communication (except payload data) */
102 DEBUG_LOCKCMD = 0x200000, /* AT commands and responses when MS_LOCKED */
103
104 DEBUG_ANY = 0x3fffff, /* print message if any of the others is activated */
105};
106
107#ifdef CONFIG_GIGASET_DEBUG
108#define DEBUG_DEFAULT (DEBUG_INIT | DEBUG_TRANSCMD | DEBUG_CMD | DEBUG_USBREQ)
109//#define DEBUG_DEFAULT (DEBUG_LOCK | DEBUG_INIT | DEBUG_TRANSCMD | DEBUG_CMD | DEBUF_IF | DEBUG_DRIVER | DEBUG_OUTPUT | DEBUG_INTR)
110#else
111#define DEBUG_DEFAULT 0
112#endif
113
114/* redefine syslog macros to prepend module name instead of entire source path */
115/* The space before the comma in ", ##" is needed by gcc 2.95 */
116#undef info
117#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg)
118
119#undef notice
120#define notice(format, arg...) printk(KERN_NOTICE "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg)
121
122#undef warn
123#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg)
124
125#undef err
126#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg)
127
128#undef dbg
129#ifdef CONFIG_GIGASET_DEBUG
130#define dbg(level, format, arg...) do { if (unlikely(((enum debuglevel)gigaset_debuglevel) & (level))) \
131 printk(KERN_DEBUG "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg); } while (0)
132#else
133#define dbg(level, format, arg...) do {} while (0)
134#endif
135
136void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
137 size_t len, const unsigned char *buf, int from_user);
138
139/* connection state */
140#define ZSAU_NONE 0
141#define ZSAU_DISCONNECT_IND 4
142#define ZSAU_OUTGOING_CALL_PROCEEDING 1
143#define ZSAU_PROCEEDING 1
144#define ZSAU_CALL_DELIVERED 2
145#define ZSAU_ACTIVE 3
146#define ZSAU_NULL 5
147#define ZSAU_DISCONNECT_REQ 6
148#define ZSAU_UNKNOWN -1
149
150/* USB control transfer requests */
151#define OUT_VENDOR_REQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
152#define IN_VENDOR_REQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
153
154/* int-in-events 3070 */
155#define HD_B1_FLOW_CONTROL 0x80
156#define HD_B2_FLOW_CONTROL 0x81
157#define HD_RECEIVEATDATA_ACK (0x35) // 3070 // att: HD_RECEIVE>>AT<<DATA_ACK
158#define HD_READY_SEND_ATDATA (0x36) // 3070
159#define HD_OPEN_ATCHANNEL_ACK (0x37) // 3070
160#define HD_CLOSE_ATCHANNEL_ACK (0x38) // 3070
161#define HD_DEVICE_INIT_OK (0x11) // ISurf USB + 3070
162#define HD_OPEN_B1CHANNEL_ACK (0x51) // ISurf USB + 3070
163#define HD_OPEN_B2CHANNEL_ACK (0x52) // ISurf USB + 3070
164#define HD_CLOSE_B1CHANNEL_ACK (0x53) // ISurf USB + 3070
165#define HD_CLOSE_B2CHANNEL_ACK (0x54) // ISurf USB + 3070
166// Powermangment
167#define HD_SUSPEND_END (0x61) // ISurf USB
168// Configuration
169#define HD_RESET_INTERRUPT_PIPE_ACK (0xFF) // ISurf USB + 3070
170
171/* control requests 3070 */
172#define HD_OPEN_B1CHANNEL (0x23) // ISurf USB + 3070
173#define HD_CLOSE_B1CHANNEL (0x24) // ISurf USB + 3070
174#define HD_OPEN_B2CHANNEL (0x25) // ISurf USB + 3070
175#define HD_CLOSE_B2CHANNEL (0x26) // ISurf USB + 3070
176#define HD_RESET_INTERRUPT_PIPE (0x27) // ISurf USB + 3070
177#define HD_DEVICE_INIT_ACK (0x34) // ISurf USB + 3070
178#define HD_WRITE_ATMESSAGE (0x12) // 3070
179#define HD_READ_ATMESSAGE (0x13) // 3070
180#define HD_OPEN_ATCHANNEL (0x28) // 3070
181#define HD_CLOSE_ATCHANNEL (0x29) // 3070
182
183/* USB frames for isochronous transfer */
184#define BAS_FRAMETIME 1 /* number of milliseconds between frames */
185#define BAS_NUMFRAMES 8 /* number of frames per URB */
186#define BAS_MAXFRAME 16 /* allocated bytes per frame */
187#define BAS_NORMFRAME 8 /* send size without flow control */
188#define BAS_HIGHFRAME 10 /* " " with positive flow control */
189#define BAS_LOWFRAME 5 /* " " with negative flow control */
190#define BAS_CORRFRAMES 4 /* flow control multiplicator */
191
192#define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) /* size of isochronous input buffer per URB */
193#define BAS_OUTBUFSIZE 4096 /* size of common isochronous output buffer */
194#define BAS_OUTBUFPAD BAS_MAXFRAME /* size of pad area for isochronous output buffer */
195
196#define BAS_INURBS 3
197#define BAS_OUTURBS 3
198
199/* variable commands in struct bc_state */
200#define AT_ISO 0
201#define AT_DIAL 1
202#define AT_MSN 2
203#define AT_BC 3
204#define AT_PROTO 4
205#define AT_TYPE 5
206#define AT_HLC 6
207#define AT_NUM 7
208
209/* variables in struct at_state_t */
210#define VAR_ZSAU 0
211#define VAR_ZDLE 1
212#define VAR_ZVLS 2
213#define VAR_ZCTP 3
214#define VAR_NUM 4
215
216#define STR_NMBR 0
217#define STR_ZCPN 1
218#define STR_ZCON 2
219#define STR_ZBC 3
220#define STR_ZHLC 4
221#define STR_NUM 5
222
223#define EV_TIMEOUT -105
224#define EV_IF_VER -106
225#define EV_PROC_CIDMODE -107
226#define EV_SHUTDOWN -108
227#define EV_START -110
228#define EV_STOP -111
229#define EV_IF_LOCK -112
230#define EV_PROTO_L2 -113
231#define EV_ACCEPT -114
232#define EV_DIAL -115
233#define EV_HUP -116
234#define EV_BC_OPEN -117
235#define EV_BC_CLOSED -118
236
237/* input state */
238#define INS_command 0x0001
239#define INS_DLE_char 0x0002
240#define INS_byte_stuff 0x0004
241#define INS_have_data 0x0008
242#define INS_skip_frame 0x0010
243#define INS_DLE_command 0x0020
244#define INS_flag_hunt 0x0040
245
246/* channel state */
247#define CHS_D_UP 0x01
248#define CHS_B_UP 0x02
249#define CHS_NOTIFY_LL 0x04
250
251#define ICALL_REJECT 0
252#define ICALL_ACCEPT 1
253#define ICALL_IGNORE 2
254
255/* device state */
256#define MS_UNINITIALIZED 0
257#define MS_INIT 1
258#define MS_LOCKED 2
259#define MS_SHUTDOWN 3
260#define MS_RECOVER 4
261#define MS_READY 5
262
263/* mode */
264#define M_UNKNOWN 0
265#define M_CONFIG 1
266#define M_UNIMODEM 2
267#define M_CID 3
268
269/* start mode */
270#define SM_LOCKED 0
271#define SM_ISDN 1 /* default */
272
273struct gigaset_ops;
274struct gigaset_driver;
275
276struct usb_cardstate;
277struct ser_cardstate;
278struct bas_cardstate;
279
280struct bc_state;
281struct usb_bc_state;
282struct ser_bc_state;
283struct bas_bc_state;
284
285struct reply_t {
286 int resp_code; /* RSP_XXXX */
287 int min_ConState; /* <0 => ignore */
288 int max_ConState; /* <0 => ignore */
289 int parameter; /* e.g. ZSAU_XXXX <0: ignore*/
290 int new_ConState; /* <0 => ignore */
291 int timeout; /* >0 => *HZ; <=0 => TOUT_XXXX*/
292 int action[MAXACT]; /* ACT_XXXX */
293 char *command; /* NULL==none */
294};
295
296extern struct reply_t gigaset_tab_cid_m10x[];
297extern struct reply_t gigaset_tab_nocid_m10x[];
298
299struct inbuf_t {
300 unsigned char *rcvbuf; /* usb-gigaset receive buffer */
301 struct bc_state *bcs;
302 struct cardstate *cs;
303 int inputstate;
304
305 atomic_t head, tail;
306 unsigned char data[RBUFSIZE];
307};
308
309/* isochronous write buffer structure
310 * circular buffer with pad area for extraction of complete USB frames
311 * - data[read..nextread-1] is valid data already submitted to the USB subsystem
312 * - data[nextread..write-1] is valid data yet to be sent
313 * - data[write] is the next byte to write to
314 * - in byte-oriented L2 procotols, it is completely free
315 * - in bit-oriented L2 procotols, it may contain a partial byte of valid data
316 * - data[write+1..read-1] is free
317 * - wbits is the number of valid data bits in data[write], starting at the LSB
318 * - writesem is the semaphore for writing to the buffer:
319 * if writesem <= 0, data[write..read-1] is currently being written to
320 * - idle contains the byte value to repeat when the end of valid data is
321 * reached; if nextread==write (buffer contains no data to send), either the
322 * BAS_OUTBUFPAD bytes immediately before data[write] (if write>=BAS_OUTBUFPAD)
323 * or those of the pad area (if write<BAS_OUTBUFPAD) are also filled with that
324 * value
325 * - optionally, the following statistics on the buffer's usage can be collected:
326 * maxfill: maximum number of bytes occupied
327 * idlefills: number of times a frame of idle bytes is prepared
328 * emptygets: number of times the buffer was empty when a data frame was requested
329 * backtoback: number of times two data packets were entered into the buffer
330 * without intervening idle flags
331 * nakedback: set if no idle flags have been inserted since the last data packet
332 */
333struct isowbuf_t {
334 atomic_t read;
335 atomic_t nextread;
336 atomic_t write;
337 atomic_t writesem;
338 int wbits;
339 unsigned char data[BAS_OUTBUFSIZE + BAS_OUTBUFPAD];
340 unsigned char idle;
341};
342
343/* isochronous write URB context structure
344 * data to be stored along with the URB and retrieved when it is returned
345 * as completed by the USB subsystem
346 * - urb: pointer to the URB itself
347 * - bcs: pointer to the B Channel control structure
348 * - limit: end of write buffer area covered by this URB
349 */
350struct isow_urbctx_t {
351 struct urb *urb;
352 struct bc_state *bcs;
353 int limit;
354};
355
356/* AT state structure
357 * data associated with the state of an ISDN connection, whether or not
358 * it is currently assigned a B channel
359 */
360struct at_state_t {
361 struct list_head list;
362 int waiting;
363 int getstring;
364 atomic_t timer_index;
365 unsigned long timer_expires;
366 int timer_active;
367 unsigned int ConState; /* State of connection */
368 struct reply_t *replystruct;
369 int cid;
370 int int_var[VAR_NUM]; /* see VAR_XXXX */
371 char *str_var[STR_NUM]; /* see STR_XXXX */
372 unsigned pending_commands; /* see PC_XXXX */
373 atomic_t seq_index;
374
375 struct cardstate *cs;
376 struct bc_state *bcs;
377};
378
379struct resp_type_t {
380 unsigned char *response;
381 int resp_code; /* RSP_XXXX */
382 int type; /* RT_XXXX */
383};
384
385struct prot_skb {
386 atomic_t empty;
387 struct semaphore *sem;
388 struct sk_buff *skb;
389};
390
391struct event_t {
392 int type;
393 void *ptr, *arg;
394 int parameter;
395 int cid;
396 struct at_state_t *at_state;
397};
398
399/* This buffer holds all information about the used B-Channel */
400struct bc_state {
401 struct sk_buff *tx_skb; /* Current transfer buffer to modem */
402 struct sk_buff_head squeue; /* B-Channel send Queue */
403
404 /* Variables for debugging .. */
405 int corrupted; /* Counter for corrupted packages */
406 int trans_down; /* Counter of packages (downstream) */
407 int trans_up; /* Counter of packages (upstream) */
408
409 struct at_state_t at_state;
410 unsigned long rcvbytes;
411
412 __u16 fcs;
413 struct sk_buff *skb;
414 int inputstate; /* see INS_XXXX */
415
416 int channel;
417
418 struct cardstate *cs;
419
420 unsigned chstate; /* bitmap (CHS_*) */
421 int ignore;
422 unsigned proto2; /* Layer 2 protocol (ISDN_PROTO_L2_*) */
423 char *commands[AT_NUM]; /* see AT_XXXX */
424
425#ifdef CONFIG_GIGASET_DEBUG
426 int emptycount;
427#endif
428 int busy;
429 int use_count;
430
431 /* hardware drivers */
432 union {
433 struct ser_bc_state *ser; /* private data of serial hardware driver */
434 struct usb_bc_state *usb; /* private data of usb hardware driver */
435 struct bas_bc_state *bas;
436 } hw;
437};
438
439struct cardstate {
440 struct gigaset_driver *driver;
441 unsigned minor_index;
442
443 const struct gigaset_ops *ops;
444
445 /* Stuff to handle communication */
446 //wait_queue_head_t initwait;
447 wait_queue_head_t waitqueue;
448 int waiting;
449 atomic_t mode; /* see M_XXXX */
450 atomic_t mstate; /* Modem state: see MS_XXXX */
451 /* only changed by the event layer */
452 int cmd_result;
453
454 int channels;
455 struct bc_state *bcs; /* Array of struct bc_state */
456
457 int onechannel; /* data and commands transmitted in one stream (M10x) */
458
459 spinlock_t lock;
460 struct at_state_t at_state; /* at_state_t for cid == 0 */
461 struct list_head temp_at_states; /* list of temporary "struct at_state_t"s without B channel */
462
463 struct inbuf_t *inbuf;
464
465 struct cmdbuf_t *cmdbuf, *lastcmdbuf;
466 spinlock_t cmdlock;
467 unsigned curlen, cmdbytes;
468
469 unsigned open_count;
470 struct tty_struct *tty;
471 struct tasklet_struct if_wake_tasklet;
472 unsigned control_state;
473
474 unsigned fwver[4];
475 int gotfwver;
476
477 atomic_t running; /* !=0 if events are handled */
478 atomic_t connected; /* !=0 if hardware is connected */
479
480 atomic_t cidmode;
481
482 int myid; /* id for communication with LL */
483 isdn_if iif;
484
485 struct reply_t *tabnocid;
486 struct reply_t *tabcid;
487 int cs_init;
488 int ignoreframes; /* frames to ignore after setting up the B channel */
489 struct semaphore sem; /* locks this structure: */
490 /* connected is not changed, */
491 /* hardware_up is not changed, */
492 /* MState is not changed to or from MS_LOCKED */
493
494 struct timer_list timer;
495 int retry_count;
496 int dle; /* !=0 if modem commands/responses are dle encoded */
497 int cur_at_seq; /* sequence of AT commands being processed */
498 int curchannel; /* channel, those commands are meant for */
499 atomic_t commands_pending; /* flag(s) in xxx.commands_pending have been set */
500 struct tasklet_struct event_tasklet; /* tasklet for serializing AT commands. Scheduled
501 * -> for modem reponses (and incomming data for M10x)
502 * -> on timeout
503 * -> after setting bits in xxx.at_state.pending_command
504 * (e.g. command from LL) */
505 struct tasklet_struct write_tasklet; /* tasklet for serial output
506 * (not used in base driver) */
507
508 /* event queue */
509 struct event_t events[MAX_EVENTS];
510 atomic_t ev_tail, ev_head;
511 spinlock_t ev_lock;
512
513 /* current modem response */
514 unsigned char respdata[MAX_RESP_SIZE];
515 unsigned cbytes;
516
517 /* hardware drivers */
518 union {
519 struct usb_cardstate *usb; /* private data of USB hardware driver */
520 struct ser_cardstate *ser; /* private data of serial hardware driver */
521 struct bas_cardstate *bas; /* private data of base hardware driver */
522 } hw;
523};
524
525struct gigaset_driver {
526 struct list_head list;
527 spinlock_t lock; /* locks minor tables and blocked */
528 //struct semaphore sem; /* locks this structure */
529 struct tty_driver *tty;
530 unsigned have_tty;
531 unsigned minor;
532 unsigned minors;
533 struct cardstate *cs;
534 unsigned *flags;
535 int blocked;
536
537 const struct gigaset_ops *ops;
538 struct module *owner;
539};
540
541struct cmdbuf_t {
542 struct cmdbuf_t *next, *prev;
543 int len, offset;
544 struct tasklet_struct *wake_tasklet;
545 unsigned char buf[0];
546};
547
548struct bas_bc_state {
549 /* isochronous output state */
550 atomic_t running;
551 atomic_t corrbytes;
552 spinlock_t isooutlock;
553 struct isow_urbctx_t isoouturbs[BAS_OUTURBS];
554 struct isow_urbctx_t *isooutdone, *isooutfree, *isooutovfl;
555 struct isowbuf_t *isooutbuf;
556 unsigned numsub; /* submitted URB counter (for diagnostic messages only) */
557 struct tasklet_struct sent_tasklet;
558
559 /* isochronous input state */
560 spinlock_t isoinlock;
561 struct urb *isoinurbs[BAS_INURBS];
562 unsigned char isoinbuf[BAS_INBUFSIZE * BAS_INURBS];
563 struct urb *isoindone; /* completed isoc read URB */
564 int loststatus; /* status of dropped URB */
565 unsigned isoinlost; /* number of bytes lost */
566 /* state of bit unstuffing algorithm (in addition to BC_state.inputstate) */
567 unsigned seqlen; /* number of '1' bits not yet unstuffed */
568 unsigned inbyte, inbits; /* collected bits for next byte */
569 /* statistics */
570 unsigned goodbytes; /* bytes correctly received */
571 unsigned alignerrs; /* frames with incomplete byte at end */
572 unsigned fcserrs; /* FCS errors */
573 unsigned frameerrs; /* framing errors */
574 unsigned giants; /* long frames */
575 unsigned runts; /* short frames */
576 unsigned aborts; /* HDLC aborts */
577 unsigned shared0s; /* '0' bits shared between flags */
578 unsigned stolen0s; /* '0' stuff bits also serving as leading flag bits */
579 struct tasklet_struct rcvd_tasklet;
580};
581
582struct gigaset_ops {
583 /* Called from ev-layer.c/interface.c for sending AT commands to the device */
584 int (*write_cmd)(struct cardstate *cs,
585 const unsigned char *buf, int len,
586 struct tasklet_struct *wake_tasklet);
587
588 /* Called from interface.c for additional device control */
589 int (*write_room)(struct cardstate *cs);
590 int (*chars_in_buffer)(struct cardstate *cs);
591 int (*brkchars)(struct cardstate *cs, const unsigned char buf[6]);
592
593 /* Called from ev-layer.c after setting up connection
594 * Should call gigaset_bchannel_up(), when finished. */
595 int (*init_bchannel)(struct bc_state *bcs);
596
597 /* Called from ev-layer.c after hanging up
598 * Should call gigaset_bchannel_down(), when finished. */
599 int (*close_bchannel)(struct bc_state *bcs);
600
601 /* Called by gigaset_initcs() for setting up bcs->hw.xxx */
602 int (*initbcshw)(struct bc_state *bcs);
603
604 /* Called by gigaset_freecs() for freeing bcs->hw.xxx */
605 int (*freebcshw)(struct bc_state *bcs);
606
607 /* Called by gigaset_stop() or gigaset_bchannel_down() for resetting bcs->hw.xxx */
608 void (*reinitbcshw)(struct bc_state *bcs);
609
610 /* Called by gigaset_initcs() for setting up cs->hw.xxx */
611 int (*initcshw)(struct cardstate *cs);
612
613 /* Called by gigaset_freecs() for freeing cs->hw.xxx */
614 void (*freecshw)(struct cardstate *cs);
615
616 ///* Called by gigaset_stop() for killing URBs, shutting down the device, ...
617 // hardwareup: ==0: don't try to shut down the device, hardware is really not accessible
618 // !=0: hardware still up */
619 //void (*stophw)(struct cardstate *cs, int hardwareup);
620
621 /* Called from common.c/interface.c for additional serial port control */
622 int (*set_modem_ctrl)(struct cardstate *cs, unsigned old_state, unsigned new_state);
623 int (*baud_rate)(struct cardstate *cs, unsigned cflag);
624 int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag);
625
626 /* Called from i4l.c to put an skb into the send-queue. */
627 int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb);
628
629 /* Called from ev-layer.c to process a block of data
630 * received through the common/control channel. */
631 void (*handle_input)(struct inbuf_t *inbuf);
632
633};
634
635/* = Common structures and definitions ======================================= */
636
637/* Parser states for DLE-Event:
638 * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "."
639 * <DLE_FLAG>: 0x10
640 * <EVENT>: ((a-z)* | (A-Z)* | (0-10)*)+
641 */
642#define DLE_FLAG 0x10
643
644/* ===========================================================================
645 * Functions implemented in asyncdata.c
646 */
647
648/* Called from i4l.c to put an skb into the send-queue.
649 * After sending gigaset_skb_sent() should be called. */
650int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb);
651
652/* Called from ev-layer.c to process a block of data
653 * received through the common/control channel. */
654void gigaset_m10x_input(struct inbuf_t *inbuf);
655
656/* ===========================================================================
657 * Functions implemented in isocdata.c
658 */
659
660/* Called from i4l.c to put an skb into the send-queue.
661 * After sending gigaset_skb_sent() should be called. */
662int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb);
663
664/* Called from ev-layer.c to process a block of data
665 * received through the common/control channel. */
666void gigaset_isoc_input(struct inbuf_t *inbuf);
667
668/* Called from bas-gigaset.c to process a block of data
669 * received through the isochronous channel */
670void gigaset_isoc_receive(unsigned char *src, unsigned count, struct bc_state *bcs);
671
672/* Called from bas-gigaset.c to put a block of data
673 * into the isochronous output buffer */
674int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len);
675
676/* Called from bas-gigaset.c to initialize the isochronous output buffer */
677void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle);
678
679/* Called from bas-gigaset.c to retrieve a block of bytes for sending */
680int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size);
681
682/* ===========================================================================
683 * Functions implemented in i4l.c/gigaset.h
684 */
685
686/* Called by gigaset_initcs() for setting up with the isdn4linux subsystem */
687int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid);
688
689/* Called from xxx-gigaset.c to indicate completion of sending an skb */
690void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
691
692/* Called from common.c/ev-layer.c to indicate events relevant to the LL */
693int gigaset_isdn_icall(struct at_state_t *at_state);
694int gigaset_isdn_setup_accept(struct at_state_t *at_state);
695int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data);
696
697void gigaset_i4l_cmd(struct cardstate *cs, int cmd);
698void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd);
699
700
701static inline void gigaset_isdn_rcv_err(struct bc_state *bcs)
702{
703 isdn_ctrl response;
704
705 /* error -> LL */
706 dbg(DEBUG_CMD, "sending L1ERR");
707 response.driver = bcs->cs->myid;
708 response.command = ISDN_STAT_L1ERR;
709 response.arg = bcs->channel;
710 response.parm.errcode = ISDN_STAT_L1ERR_RECV;
711 bcs->cs->iif.statcallb(&response);
712}
713
714/* ===========================================================================
715 * Functions implemented in ev-layer.c
716 */
717
718/* tasklet called from common.c to process queued events */
719void gigaset_handle_event(unsigned long data);
720
721/* called from isocdata.c / asyncdata.c
722 * when a complete modem response line has been received */
723void gigaset_handle_modem_response(struct cardstate *cs);
724
725/* ===========================================================================
726 * Functions implemented in proc.c
727 */
728
729/* initialize sysfs for device */
730void gigaset_init_dev_sysfs(struct usb_interface *interface);
731void gigaset_free_dev_sysfs(struct usb_interface *interface);
732
733/* ===========================================================================
734 * Functions implemented in common.c/gigaset.h
735 */
736
737void gigaset_bcs_reinit(struct bc_state *bcs);
738void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
739 struct cardstate *cs, int cid);
740int gigaset_get_channel(struct bc_state *bcs);
741void gigaset_free_channel(struct bc_state *bcs);
742int gigaset_get_channels(struct cardstate *cs);
743void gigaset_free_channels(struct cardstate *cs);
744void gigaset_block_channels(struct cardstate *cs);
745
746/* Allocate and initialize driver structure. */
747struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
748 const char *procname,
749 const char *devname,
750 const char *devfsname,
751 const struct gigaset_ops *ops,
752 struct module *owner);
753
754/* Deallocate driver structure. */
755void gigaset_freedriver(struct gigaset_driver *drv);
756void gigaset_debugdrivers(void);
757struct cardstate *gigaset_get_cs_by_minor(unsigned minor);
758struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty);
759struct cardstate *gigaset_get_cs_by_id(int id);
760
761/* For drivers without fixed assignment device<->cardstate (usb) */
762struct cardstate *gigaset_getunassignedcs(struct gigaset_driver *drv);
763void gigaset_unassign(struct cardstate *cs);
764void gigaset_blockdriver(struct gigaset_driver *drv);
765
766/* Allocate and initialize card state. Calls hardware dependent gigaset_init[b]cs(). */
767struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
768 int onechannel, int ignoreframes,
769 int cidmode, const char *modulename);
770
771/* Free card state. Calls hardware dependent gigaset_free[b]cs(). */
772void gigaset_freecs(struct cardstate *cs);
773
774/* Tell common.c that hardware and driver are ready. */
775int gigaset_start(struct cardstate *cs);
776
777/* Tell common.c that the device is not present any more. */
778void gigaset_stop(struct cardstate *cs);
779
780/* Tell common.c that the driver is being unloaded. */
781void gigaset_shutdown(struct cardstate *cs);
782
783/* Tell common.c that an skb has been sent. */
784void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
785
786/* Append event to the queue.
787 * Returns NULL on failure or a pointer to the event on success.
788 * ptr must be kmalloc()ed (and not be freed by the caller).
789 */
790struct event_t *gigaset_add_event(struct cardstate *cs,
791 struct at_state_t *at_state, int type,
792 void *ptr, int parameter, void *arg);
793
794/* Called on CONFIG1 command from frontend. */
795int gigaset_enterconfigmode(struct cardstate *cs); //0: success <0: errorcode
796
797/* cs->lock must not be locked */
798static inline void gigaset_schedule_event(struct cardstate *cs)
799{
800 unsigned long flags;
801 spin_lock_irqsave(&cs->lock, flags);
802 if (atomic_read(&cs->running))
803 tasklet_schedule(&cs->event_tasklet);
804 spin_unlock_irqrestore(&cs->lock, flags);
805}
806
807/* Tell common.c that B channel has been closed. */
808/* cs->lock must not be locked */
809static inline void gigaset_bchannel_down(struct bc_state *bcs)
810{
811 gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_CLOSED, NULL, 0, NULL);
812
813 dbg(DEBUG_CMD, "scheduling BC_CLOSED");
814 gigaset_schedule_event(bcs->cs);
815}
816
817/* Tell common.c that B channel has been opened. */
818/* cs->lock must not be locked */
819static inline void gigaset_bchannel_up(struct bc_state *bcs)
820{
821 gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_OPEN, NULL, 0, NULL);
822
823 dbg(DEBUG_CMD, "scheduling BC_OPEN");
824 gigaset_schedule_event(bcs->cs);
825}
826
827/* handling routines for sk_buff */
828/* ============================= */
829
830/* private version of __skb_put()
831 * append 'len' bytes to the content of 'skb', already knowing that the
832 * existing buffer can accomodate them
833 * returns a pointer to the location where the new bytes should be copied to
834 * This function does not take any locks so it must be called with the
835 * appropriate locks held only.
836 */
837static inline unsigned char *gigaset_skb_put_quick(struct sk_buff *skb,
838 unsigned int len)
839{
840 unsigned char *tmp = skb->tail;
841 /*SKB_LINEAR_ASSERT(skb);*/ /* not needed here */
842 skb->tail += len;
843 skb->len += len;
844 return tmp;
845}
846
847/* pass received skb to LL
848 * Warning: skb must not be accessed anymore!
849 */
850static inline void gigaset_rcv_skb(struct sk_buff *skb,
851 struct cardstate *cs,
852 struct bc_state *bcs)
853{
854 cs->iif.rcvcallb_skb(cs->myid, bcs->channel, skb);
855 bcs->trans_down++;
856}
857
858/* handle reception of corrupted skb
859 * Warning: skb must not be accessed anymore!
860 */
861static inline void gigaset_rcv_error(struct sk_buff *procskb,
862 struct cardstate *cs,
863 struct bc_state *bcs)
864{
865 if (procskb)
866 dev_kfree_skb(procskb);
867
868 if (bcs->ignore)
869 --bcs->ignore;
870 else {
871 ++bcs->corrupted;
872 gigaset_isdn_rcv_err(bcs);
873 }
874}
875
876
877/* bitwise byte inversion table */
878extern __u8 gigaset_invtab[]; /* in common.c */
879
880
881/* append received bytes to inbuf */
882static inline int gigaset_fill_inbuf(struct inbuf_t *inbuf,
883 const unsigned char *src,
884 unsigned numbytes)
885{
886 unsigned n, head, tail, bytesleft;
887
888 dbg(DEBUG_INTR, "received %u bytes", numbytes);
889
890 if (!numbytes)
891 return 0;
892
893 bytesleft = numbytes;
894 tail = atomic_read(&inbuf->tail);
895 head = atomic_read(&inbuf->head);
896 dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
897
898 while (bytesleft) {
899 if (head > tail)
900 n = head - 1 - tail;
901 else if (head == 0)
902 n = (RBUFSIZE-1) - tail;
903 else
904 n = RBUFSIZE - tail;
905 if (!n) {
906 err("buffer overflow (%u bytes lost)", bytesleft);
907 break;
908 }
909 if (n > bytesleft)
910 n = bytesleft;
911 memcpy(inbuf->data + tail, src, n);
912 bytesleft -= n;
913 tail = (tail + n) % RBUFSIZE;
914 src += n;
915 }
916 dbg(DEBUG_INTR, "setting tail to %u", tail);
917 atomic_set(&inbuf->tail, tail);
918 return numbytes != bytesleft;
919}
920
921/* ===========================================================================
922 * Functions implemented in interface.c
923 */
924
925/* initialize interface */
926void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
927 const char *devname, const char *devfsname);
928/* release interface */
929void gigaset_if_freedriver(struct gigaset_driver *drv);
930/* add minor */
931void gigaset_if_init(struct cardstate *cs);
932/* remove minor */
933void gigaset_if_free(struct cardstate *cs);
934/* device received data */
935void gigaset_if_receive(struct cardstate *cs,
936 unsigned char *buffer, size_t len);
937
938#endif
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
new file mode 100644
index 000000000000..731a675f21b0
--- /dev/null
+++ b/drivers/isdn/gigaset/i4l.c
@@ -0,0 +1,567 @@
1/*
2 * Stuff used by all variants of the driver
3 *
4 * Copyright (c) 2001 by Stefan Eilers (Eilers.Stefan@epost.de),
5 * Hansjoerg Lipp (hjlipp@web.de),
6 * Tilman Schmidt (tilman@imap.cc).
7 *
8 * =====================================================================
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 * =====================================================================
14 * ToDo: ...
15 * =====================================================================
16 * Version: $Id: i4l.c,v 1.3.2.9 2006/02/04 18:28:16 hjlipp Exp $
17 * =====================================================================
18 */
19
20#include "gigaset.h"
21
22/* == Handling of I4L IO ============================================================================*/
23
24/* writebuf_from_LL
25 * called by LL to transmit data on an open channel
26 * inserts the buffer data into the send queue and starts the transmission
27 * Note that this operation must not sleep!
28 * When the buffer is processed completely, gigaset_skb_sent() should be called.
29 * parameters:
30 * driverID driver ID as assigned by LL
31 * channel channel number
32 * ack if != 0 LL wants to be notified on completion via statcallb(ISDN_STAT_BSENT)
33 * skb skb containing data to send
34 * return value:
35 * number of accepted bytes
36 * 0 if temporarily unable to accept data (out of buffer space)
37 * <0 on error (eg. -EINVAL)
38 */
39static int writebuf_from_LL(int driverID, int channel, int ack, struct sk_buff *skb)
40{
41 struct cardstate *cs;
42 struct bc_state *bcs;
43 unsigned len;
44 unsigned skblen;
45
46 if (!(cs = gigaset_get_cs_by_id(driverID))) {
47 err("%s: invalid driver ID (%d)", __func__, driverID);
48 return -ENODEV;
49 }
50 if (channel < 0 || channel >= cs->channels) {
51 err("%s: invalid channel ID (%d)", __func__, channel);
52 return -ENODEV;
53 }
54 bcs = &cs->bcs[channel];
55 len = skb->len;
56
57 dbg(DEBUG_LLDATA,
58 "Receiving data from LL (id: %d, channel: %d, ack: %d, size: %d)",
59 driverID, channel, ack, len);
60
61 if (!len) {
62 if (ack)
63 warn("not ACKing empty packet from LL");
64 return 0;
65 }
66 if (len > MAX_BUF_SIZE) {
67 err("%s: packet too large (%d bytes)", __func__, channel);
68 return -EINVAL;
69 }
70
71 if (!atomic_read(&cs->connected))
72 return -ENODEV;
73
74 skblen = ack ? len : 0;
75 skb->head[0] = skblen & 0xff;
76 skb->head[1] = skblen >> 8;
77 dbg(DEBUG_MCMD, "skb: len=%u, skblen=%u: %02x %02x", len, skblen,
78 (unsigned) skb->head[0], (unsigned) skb->head[1]);
79
80 /* pass to device-specific module */
81 return cs->ops->send_skb(bcs, skb);
82}
83
84void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
85{
86 unsigned len;
87 isdn_ctrl response;
88
89 ++bcs->trans_up;
90
91 if (skb->len)
92 warn("%s: skb->len==%d", __func__, skb->len);
93
94 len = (unsigned char) skb->head[0] |
95 (unsigned) (unsigned char) skb->head[1] << 8;
96 if (len) {
97 dbg(DEBUG_MCMD,
98 "Acknowledge sending to LL (id: %d, channel: %d size: %u)",
99 bcs->cs->myid, bcs->channel, len);
100
101 response.driver = bcs->cs->myid;
102 response.command = ISDN_STAT_BSENT;
103 response.arg = bcs->channel;
104 response.parm.length = len;
105 bcs->cs->iif.statcallb(&response);
106 }
107}
108EXPORT_SYMBOL_GPL(gigaset_skb_sent);
109
110/* This function will be called by LL to send commands
111 * NOTE: LL ignores the returned value, for commands other than ISDN_CMD_IOCTL,
112 * so don't put too much effort into it.
113 */
114static int command_from_LL(isdn_ctrl *cntrl)
115{
116 struct cardstate *cs = gigaset_get_cs_by_id(cntrl->driver);
117 //isdn_ctrl response;
118 //unsigned long flags;
119 struct bc_state *bcs;
120 int retval = 0;
121 struct setup_parm *sp;
122
123 //dbg(DEBUG_ANY, "Gigaset_HW: Receiving command");
124 gigaset_debugdrivers();
125
126 /* Terminate this call if no device is present. Bt if the command is "ISDN_CMD_LOCK" or
127 * "ISDN_CMD_UNLOCK" then execute it due to the fact that they are device independent !
128 */
129 //FIXME "remove test for &connected"
130 if ((!cs || !atomic_read(&cs->connected))) {
131 warn("LL tried to access unknown device with nr. %d",
132 cntrl->driver);
133 return -ENODEV;
134 }
135
136 switch (cntrl->command) {
137 case ISDN_CMD_IOCTL:
138
139 dbg(DEBUG_ANY, "ISDN_CMD_IOCTL (driver:%d,arg: %ld)",
140 cntrl->driver, cntrl->arg);
141
142 warn("ISDN_CMD_IOCTL is not supported.");
143 return -EINVAL;
144
145 case ISDN_CMD_DIAL:
146 dbg(DEBUG_ANY, "ISDN_CMD_DIAL (driver: %d, channel: %ld, "
147 "phone: %s,ownmsn: %s, si1: %d, si2: %d)",
148 cntrl->driver, cntrl->arg,
149 cntrl->parm.setup.phone, cntrl->parm.setup.eazmsn,
150 cntrl->parm.setup.si1, cntrl->parm.setup.si2);
151
152 if (cntrl->arg >= cs->channels) {
153 err("invalid channel (%d)", (int) cntrl->arg);
154 return -EINVAL;
155 }
156
157 bcs = cs->bcs + cntrl->arg;
158
159 if (!gigaset_get_channel(bcs)) {
160 err("channel not free");
161 return -EBUSY;
162 }
163
164 sp = kmalloc(sizeof *sp, GFP_ATOMIC);
165 if (!sp) {
166 gigaset_free_channel(bcs);
167 err("ISDN_CMD_DIAL: out of memory");
168 return -ENOMEM;
169 }
170 *sp = cntrl->parm.setup;
171
172 if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, sp,
173 atomic_read(&bcs->at_state.seq_index),
174 NULL)) {
175 //FIXME what should we do?
176 kfree(sp);
177 gigaset_free_channel(bcs);
178 return -ENOMEM;
179 }
180
181 dbg(DEBUG_CMD, "scheduling DIAL");
182 gigaset_schedule_event(cs);
183 break;
184 case ISDN_CMD_ACCEPTD: //FIXME
185 dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTD");
186
187 if (cntrl->arg >= cs->channels) {
188 err("invalid channel (%d)", (int) cntrl->arg);
189 return -EINVAL;
190 }
191
192 if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state,
193 EV_ACCEPT, NULL, 0, NULL)) {
194 //FIXME what should we do?
195 return -ENOMEM;
196 }
197
198 dbg(DEBUG_CMD, "scheduling ACCEPT");
199 gigaset_schedule_event(cs);
200
201 break;
202 case ISDN_CMD_ACCEPTB:
203 dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTB");
204 break;
205 case ISDN_CMD_HANGUP:
206 dbg(DEBUG_ANY,
207 "ISDN_CMD_HANGUP (channel: %d)", (int) cntrl->arg);
208
209 if (cntrl->arg >= cs->channels) {
210 err("ISDN_CMD_HANGUP: invalid channel (%u)",
211 (unsigned) cntrl->arg);
212 return -EINVAL;
213 }
214
215 if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state,
216 EV_HUP, NULL, 0, NULL)) {
217 //FIXME what should we do?
218 return -ENOMEM;
219 }
220
221 dbg(DEBUG_CMD, "scheduling HUP");
222 gigaset_schedule_event(cs);
223
224 break;
225 case ISDN_CMD_CLREAZ: /* Do not signal incoming signals */ //FIXME
226 dbg(DEBUG_ANY, "ISDN_CMD_CLREAZ");
227 break;
228 case ISDN_CMD_SETEAZ: /* Signal incoming calls for given MSN */ //FIXME
229 dbg(DEBUG_ANY,
230 "ISDN_CMD_SETEAZ (id:%d, channel: %ld, number: %s)",
231 cntrl->driver, cntrl->arg, cntrl->parm.num);
232 break;
233 case ISDN_CMD_SETL2: /* Set L2 to given protocol */
234 dbg(DEBUG_ANY, "ISDN_CMD_SETL2 (Channel: %ld, Proto: %lx)",
235 cntrl->arg & 0xff, (cntrl->arg >> 8));
236
237 if ((cntrl->arg & 0xff) >= cs->channels) {
238 err("invalid channel (%u)",
239 (unsigned) cntrl->arg & 0xff);
240 return -EINVAL;
241 }
242
243 if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg & 0xff].at_state,
244 EV_PROTO_L2, NULL, cntrl->arg >> 8,
245 NULL)) {
246 //FIXME what should we do?
247 return -ENOMEM;
248 }
249
250 dbg(DEBUG_CMD, "scheduling PROTO_L2");
251 gigaset_schedule_event(cs);
252 break;
253 case ISDN_CMD_SETL3: /* Set L3 to given protocol */
254 dbg(DEBUG_ANY, "ISDN_CMD_SETL3 (Channel: %ld, Proto: %lx)",
255 cntrl->arg & 0xff, (cntrl->arg >> 8));
256
257 if ((cntrl->arg & 0xff) >= cs->channels) {
258 err("invalid channel (%u)",
259 (unsigned) cntrl->arg & 0xff);
260 return -EINVAL;
261 }
262
263 if (cntrl->arg >> 8 != ISDN_PROTO_L3_TRANS) {
264 err("invalid protocol %lu", cntrl->arg >> 8);
265 return -EINVAL;
266 }
267
268 break;
269 case ISDN_CMD_PROCEED:
270 dbg(DEBUG_ANY, "ISDN_CMD_PROCEED"); //FIXME
271 break;
272 case ISDN_CMD_ALERT:
273 dbg(DEBUG_ANY, "ISDN_CMD_ALERT"); //FIXME
274 if (cntrl->arg >= cs->channels) {
275 err("invalid channel (%d)", (int) cntrl->arg);
276 return -EINVAL;
277 }
278 //bcs = cs->bcs + cntrl->arg;
279 //bcs->proto2 = -1;
280 // FIXME
281 break;
282 case ISDN_CMD_REDIR:
283 dbg(DEBUG_ANY, "ISDN_CMD_REDIR"); //FIXME
284 break;
285 case ISDN_CMD_PROT_IO:
286 dbg(DEBUG_ANY, "ISDN_CMD_PROT_IO");
287 break;
288 case ISDN_CMD_FAXCMD:
289 dbg(DEBUG_ANY, "ISDN_CMD_FAXCMD");
290 break;
291 case ISDN_CMD_GETL2:
292 dbg(DEBUG_ANY, "ISDN_CMD_GETL2");
293 break;
294 case ISDN_CMD_GETL3:
295 dbg(DEBUG_ANY, "ISDN_CMD_GETL3");
296 break;
297 case ISDN_CMD_GETEAZ:
298 dbg(DEBUG_ANY, "ISDN_CMD_GETEAZ");
299 break;
300 case ISDN_CMD_SETSIL:
301 dbg(DEBUG_ANY, "ISDN_CMD_SETSIL");
302 break;
303 case ISDN_CMD_GETSIL:
304 dbg(DEBUG_ANY, "ISDN_CMD_GETSIL");
305 break;
306 default:
307 err("unknown command %d from LL",
308 cntrl->command);
309 return -EINVAL;
310 }
311
312 return retval;
313}
314
315void gigaset_i4l_cmd(struct cardstate *cs, int cmd)
316{
317 isdn_ctrl command;
318
319 command.driver = cs->myid;
320 command.command = cmd;
321 command.arg = 0;
322 cs->iif.statcallb(&command);
323}
324
325void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd)
326{
327 isdn_ctrl command;
328
329 command.driver = bcs->cs->myid;
330 command.command = cmd;
331 command.arg = bcs->channel;
332 bcs->cs->iif.statcallb(&command);
333}
334
335int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data)
336{
337 struct bc_state *bcs = at_state->bcs;
338 unsigned proto;
339 const char *bc;
340 size_t length[AT_NUM];
341 size_t l;
342 int i;
343 struct setup_parm *sp = data;
344
345 switch (bcs->proto2) {
346 case ISDN_PROTO_L2_HDLC:
347 proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */
348 break;
349 case ISDN_PROTO_L2_TRANS:
350 proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */
351 break;
352 default:
353 err("invalid protocol: %u", bcs->proto2);
354 return -EINVAL;
355 }
356
357 switch (sp->si1) {
358 case 1: /* audio */
359 bc = "9090A3"; /* 3.1 kHz audio, A-law */
360 break;
361 case 7: /* data */
362 default: /* hope the app knows what it is doing */
363 bc = "8890"; /* unrestricted digital information */
364 }
365 //FIXME add missing si1 values from 1TR6, inspect si2, set HLC/LLC
366
367 length[AT_DIAL ] = 1 + strlen(sp->phone) + 1 + 1;
368 l = strlen(sp->eazmsn);
369 length[AT_MSN ] = l ? 6 + l + 1 + 1 : 0;
370 length[AT_BC ] = 5 + strlen(bc) + 1 + 1;
371 length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */
372 length[AT_ISO ] = 6 + 1 + 1 + 1; /* channel: 1 character */
373 length[AT_TYPE ] = 6 + 1 + 1 + 1; /* call type: 1 character */
374 length[AT_HLC ] = 0;
375
376 for (i = 0; i < AT_NUM; ++i) {
377 kfree(bcs->commands[i]);
378 bcs->commands[i] = NULL;
379 if (length[i] &&
380 !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) {
381 err("out of memory");
382 return -ENOMEM;
383 }
384 }
385
386 /* type = 1: extern, 0: intern, 2: recall, 3: door, 4: centrex */
387 if (sp->phone[0] == '*' && sp->phone[1] == '*') {
388 /* internal call: translate ** prefix to CTP value */
389 snprintf(bcs->commands[AT_DIAL], length[AT_DIAL],
390 "D%s\r", sp->phone+2);
391 strncpy(bcs->commands[AT_TYPE], "^SCTP=0\r", length[AT_TYPE]);
392 } else {
393 snprintf(bcs->commands[AT_DIAL], length[AT_DIAL],
394 "D%s\r", sp->phone);
395 strncpy(bcs->commands[AT_TYPE], "^SCTP=1\r", length[AT_TYPE]);
396 }
397
398 if (bcs->commands[AT_MSN])
399 snprintf(bcs->commands[AT_MSN], length[AT_MSN], "^SMSN=%s\r", sp->eazmsn);
400 snprintf(bcs->commands[AT_BC ], length[AT_BC ], "^SBC=%s\r", bc);
401 snprintf(bcs->commands[AT_PROTO], length[AT_PROTO], "^SBPR=%u\r", proto);
402 snprintf(bcs->commands[AT_ISO ], length[AT_ISO ], "^SISO=%u\r", (unsigned)bcs->channel + 1);
403
404 return 0;
405}
406
407int gigaset_isdn_setup_accept(struct at_state_t *at_state)
408{
409 unsigned proto;
410 size_t length[AT_NUM];
411 int i;
412 struct bc_state *bcs = at_state->bcs;
413
414 switch (bcs->proto2) {
415 case ISDN_PROTO_L2_HDLC:
416 proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */
417 break;
418 case ISDN_PROTO_L2_TRANS:
419 proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */
420 break;
421 default:
422 err("invalid protocol: %u", bcs->proto2);
423 return -EINVAL;
424 }
425
426 length[AT_DIAL ] = 0;
427 length[AT_MSN ] = 0;
428 length[AT_BC ] = 0;
429 length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */
430 length[AT_ISO ] = 6 + 1 + 1 + 1; /* channel: 1 character */
431 length[AT_TYPE ] = 0;
432 length[AT_HLC ] = 0;
433
434 for (i = 0; i < AT_NUM; ++i) {
435 kfree(bcs->commands[i]);
436 bcs->commands[i] = NULL;
437 if (length[i] &&
438 !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) {
439 err("out of memory");
440 return -ENOMEM;
441 }
442 }
443
444 snprintf(bcs->commands[AT_PROTO], length[AT_PROTO], "^SBPR=%u\r", proto);
445 snprintf(bcs->commands[AT_ISO ], length[AT_ISO ], "^SISO=%u\r", (unsigned) bcs->channel + 1);
446
447 return 0;
448}
449
450int gigaset_isdn_icall(struct at_state_t *at_state)
451{
452 struct cardstate *cs = at_state->cs;
453 struct bc_state *bcs = at_state->bcs;
454 isdn_ctrl response;
455 int retval;
456
457 /* fill ICALL structure */
458 response.parm.setup.si1 = 0; /* default: unknown */
459 response.parm.setup.si2 = 0;
460 response.parm.setup.screen = 0; //FIXME how to set these?
461 response.parm.setup.plan = 0;
462 if (!at_state->str_var[STR_ZBC]) {
463 /* no BC (internal call): assume speech, A-law */
464 response.parm.setup.si1 = 1;
465 } else if (!strcmp(at_state->str_var[STR_ZBC], "8890")) {
466 /* unrestricted digital information */
467 response.parm.setup.si1 = 7;
468 } else if (!strcmp(at_state->str_var[STR_ZBC], "8090A3")) {
469 /* speech, A-law */
470 response.parm.setup.si1 = 1;
471 } else if (!strcmp(at_state->str_var[STR_ZBC], "9090A3")) {
472 /* 3,1 kHz audio, A-law */
473 response.parm.setup.si1 = 1;
474 response.parm.setup.si2 = 2;
475 } else {
476 warn("RING ignored - unsupported BC %s",
477 at_state->str_var[STR_ZBC]);
478 return ICALL_IGNORE;
479 }
480 if (at_state->str_var[STR_NMBR]) {
481 strncpy(response.parm.setup.phone, at_state->str_var[STR_NMBR],
482 sizeof response.parm.setup.phone - 1);
483 response.parm.setup.phone[sizeof response.parm.setup.phone - 1] = 0;
484 } else
485 response.parm.setup.phone[0] = 0;
486 if (at_state->str_var[STR_ZCPN]) {
487 strncpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN],
488 sizeof response.parm.setup.eazmsn - 1);
489 response.parm.setup.eazmsn[sizeof response.parm.setup.eazmsn - 1] = 0;
490 } else
491 response.parm.setup.eazmsn[0] = 0;
492
493 if (!bcs) {
494 notice("no channel for incoming call");
495 dbg(DEBUG_CMD, "Sending ICALLW");
496 response.command = ISDN_STAT_ICALLW;
497 response.arg = 0; //FIXME
498 } else {
499 dbg(DEBUG_CMD, "Sending ICALL");
500 response.command = ISDN_STAT_ICALL;
501 response.arg = bcs->channel; //FIXME
502 }
503 response.driver = cs->myid;
504 retval = cs->iif.statcallb(&response);
505 dbg(DEBUG_CMD, "Response: %d", retval);
506 switch (retval) {
507 case 0: /* no takers */
508 return ICALL_IGNORE;
509 case 1: /* alerting */
510 bcs->chstate |= CHS_NOTIFY_LL;
511 return ICALL_ACCEPT;
512 case 2: /* reject */
513 return ICALL_REJECT;
514 case 3: /* incomplete */
515 warn("LL requested unsupported feature: Incomplete Number");
516 return ICALL_IGNORE;
517 case 4: /* proceeding */
518 /* Gigaset will send ALERTING anyway.
519 * There doesn't seem to be a way to avoid this.
520 */
521 return ICALL_ACCEPT;
522 case 5: /* deflect */
523 warn("LL requested unsupported feature: Call Deflection");
524 return ICALL_IGNORE;
525 default:
526 err("LL error %d on ICALL", retval);
527 return ICALL_IGNORE;
528 }
529}
530
531/* Set Callback function pointer */
532int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid)
533{
534 isdn_if *iif = &cs->iif;
535
536 dbg(DEBUG_ANY, "Register driver capabilities to LL");
537
538 //iif->id[sizeof(iif->id) - 1]=0;
539 //strncpy(iif->id, isdnid, sizeof(iif->id) - 1);
540 if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index)
541 >= sizeof iif->id)
542 return -ENOMEM; //FIXME EINVAL/...??
543
544 iif->owner = THIS_MODULE;
545 iif->channels = cs->channels; /* I am supporting just one channel *//* I was supporting...*/
546 iif->maxbufsize = MAX_BUF_SIZE;
547 iif->features = ISDN_FEATURE_L2_TRANS | /* Our device is very advanced, therefore */
548 ISDN_FEATURE_L2_HDLC |
549#ifdef GIG_X75
550 ISDN_FEATURE_L2_X75I |
551#endif
552 ISDN_FEATURE_L3_TRANS |
553 ISDN_FEATURE_P_EURO;
554 iif->hl_hdrlen = HW_HDR_LEN; /* Area for storing ack */
555 iif->command = command_from_LL;
556 iif->writebuf_skb = writebuf_from_LL;
557 iif->writecmd = NULL; /* Don't support isdnctrl */
558 iif->readstat = NULL; /* Don't support isdnctrl */
559 iif->rcvcallb_skb = NULL; /* Will be set by LL */
560 iif->statcallb = NULL; /* Will be set by LL */
561
562 if (!register_isdn(iif))
563 return 0;
564
565 cs->myid = iif->channels; /* Set my device id */
566 return 1;
567}
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
new file mode 100644
index 000000000000..3a81d9c65141
--- /dev/null
+++ b/drivers/isdn/gigaset/interface.c
@@ -0,0 +1,718 @@
1/*
2 * interface to user space for the gigaset driver
3 *
4 * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de>
5 *
6 * =====================================================================
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 * =====================================================================
12 * Version: $Id: interface.c,v 1.14.4.15 2006/02/04 18:28:16 hjlipp Exp $
13 * =====================================================================
14 */
15
16#include "gigaset.h"
17#include <linux/gigaset_dev.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20
21/*** our ioctls ***/
22
23static int if_lock(struct cardstate *cs, int *arg)
24{
25 int cmd = *arg;
26
27 dbg(DEBUG_IF, "%u: if_lock (%d)", cs->minor_index, cmd);
28
29 if (cmd > 1)
30 return -EINVAL;
31
32 if (cmd < 0) {
33 *arg = atomic_read(&cs->mstate) == MS_LOCKED; //FIXME remove?
34 return 0;
35 }
36
37 if (!cmd && atomic_read(&cs->mstate) == MS_LOCKED
38 && atomic_read(&cs->connected)) {
39 cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR|TIOCM_RTS);
40 cs->ops->baud_rate(cs, B115200);
41 cs->ops->set_line_ctrl(cs, CS8);
42 cs->control_state = TIOCM_DTR|TIOCM_RTS;
43 }
44
45 cs->waiting = 1;
46 if (!gigaset_add_event(cs, &cs->at_state, EV_IF_LOCK,
47 NULL, cmd, NULL)) {
48 cs->waiting = 0;
49 return -ENOMEM;
50 }
51
52 dbg(DEBUG_CMD, "scheduling IF_LOCK");
53 gigaset_schedule_event(cs);
54
55 wait_event(cs->waitqueue, !cs->waiting);
56
57 if (cs->cmd_result >= 0) {
58 *arg = cs->cmd_result;
59 return 0;
60 }
61
62 return cs->cmd_result;
63}
64
65static int if_version(struct cardstate *cs, unsigned arg[4])
66{
67 static const unsigned version[4] = GIG_VERSION;
68 static const unsigned compat[4] = GIG_COMPAT;
69 unsigned cmd = arg[0];
70
71 dbg(DEBUG_IF, "%u: if_version (%d)", cs->minor_index, cmd);
72
73 switch (cmd) {
74 case GIGVER_DRIVER:
75 memcpy(arg, version, sizeof version);
76 return 0;
77 case GIGVER_COMPAT:
78 memcpy(arg, compat, sizeof compat);
79 return 0;
80 case GIGVER_FWBASE:
81 cs->waiting = 1;
82 if (!gigaset_add_event(cs, &cs->at_state, EV_IF_VER,
83 NULL, 0, arg)) {
84 cs->waiting = 0;
85 return -ENOMEM;
86 }
87
88 dbg(DEBUG_CMD, "scheduling IF_VER");
89 gigaset_schedule_event(cs);
90
91 wait_event(cs->waitqueue, !cs->waiting);
92
93 if (cs->cmd_result >= 0)
94 return 0;
95
96 return cs->cmd_result;
97 default:
98 return -EINVAL;
99 }
100}
101
102static int if_config(struct cardstate *cs, int *arg)
103{
104 dbg(DEBUG_IF, "%u: if_config (%d)", cs->minor_index, *arg);
105
106 if (*arg != 1)
107 return -EINVAL;
108
109 if (atomic_read(&cs->mstate) != MS_LOCKED)
110 return -EBUSY;
111
112 *arg = 0;
113 return gigaset_enterconfigmode(cs);
114}
115
116/*** the terminal driver ***/
117/* stolen from usbserial and some other tty drivers */
118
119static int if_open(struct tty_struct *tty, struct file *filp);
120static void if_close(struct tty_struct *tty, struct file *filp);
121static int if_ioctl(struct tty_struct *tty, struct file *file,
122 unsigned int cmd, unsigned long arg);
123static int if_write_room(struct tty_struct *tty);
124static int if_chars_in_buffer(struct tty_struct *tty);
125static void if_throttle(struct tty_struct *tty);
126static void if_unthrottle(struct tty_struct *tty);
127static void if_set_termios(struct tty_struct *tty, struct termios *old);
128static int if_tiocmget(struct tty_struct *tty, struct file *file);
129static int if_tiocmset(struct tty_struct *tty, struct file *file,
130 unsigned int set, unsigned int clear);
131static int if_write(struct tty_struct *tty,
132 const unsigned char *buf, int count);
133
134static struct tty_operations if_ops = {
135 .open = if_open,
136 .close = if_close,
137 .ioctl = if_ioctl,
138 .write = if_write,
139 .write_room = if_write_room,
140 .chars_in_buffer = if_chars_in_buffer,
141 .set_termios = if_set_termios,
142 .throttle = if_throttle,
143 .unthrottle = if_unthrottle,
144#if 0
145 .break_ctl = serial_break,
146#endif
147 .tiocmget = if_tiocmget,
148 .tiocmset = if_tiocmset,
149};
150
151static int if_open(struct tty_struct *tty, struct file *filp)
152{
153 struct cardstate *cs;
154 unsigned long flags;
155
156 dbg(DEBUG_IF, "%d+%d: %s()", tty->driver->minor_start, tty->index,
157 __FUNCTION__);
158
159 tty->driver_data = NULL;
160
161 cs = gigaset_get_cs_by_tty(tty);
162 if (!cs)
163 return -ENODEV;
164
165 if (down_interruptible(&cs->sem))
166 return -ERESTARTSYS; // FIXME -EINTR?
167 tty->driver_data = cs;
168
169 ++cs->open_count;
170
171 if (cs->open_count == 1) {
172 spin_lock_irqsave(&cs->lock, flags);
173 cs->tty = tty;
174 spin_unlock_irqrestore(&cs->lock, flags);
175 tty->low_latency = 1; //FIXME test
176 //FIXME
177 }
178
179 up(&cs->sem);
180 return 0;
181}
182
183static void if_close(struct tty_struct *tty, struct file *filp)
184{
185 struct cardstate *cs;
186 unsigned long flags;
187
188 cs = (struct cardstate *) tty->driver_data;
189 if (!cs) {
190 err("cs==NULL in %s", __FUNCTION__);
191 return;
192 }
193
194 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
195
196 down(&cs->sem);
197
198 if (!cs->open_count)
199 warn("%s: device not opened", __FUNCTION__);
200 else {
201 if (!--cs->open_count) {
202 spin_lock_irqsave(&cs->lock, flags);
203 cs->tty = NULL;
204 spin_unlock_irqrestore(&cs->lock, flags);
205 //FIXME
206 }
207 }
208
209 up(&cs->sem);
210}
211
212static int if_ioctl(struct tty_struct *tty, struct file *file,
213 unsigned int cmd, unsigned long arg)
214{
215 struct cardstate *cs;
216 int retval = -ENODEV;
217 int int_arg;
218 unsigned char buf[6];
219 unsigned version[4];
220
221 cs = (struct cardstate *) tty->driver_data;
222 if (!cs) {
223 err("cs==NULL in %s", __FUNCTION__);
224 return -ENODEV;
225 }
226
227 dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __FUNCTION__, cmd);
228
229 if (down_interruptible(&cs->sem))
230 return -ERESTARTSYS; // FIXME -EINTR?
231
232 if (!cs->open_count)
233 warn("%s: device not opened", __FUNCTION__);
234 else {
235 retval = 0;
236 switch (cmd) {
237 case GIGASET_REDIR:
238 retval = get_user(int_arg, (int __user *) arg);
239 if (retval >= 0)
240 retval = if_lock(cs, &int_arg);
241 if (retval >= 0)
242 retval = put_user(int_arg, (int __user *) arg);
243 break;
244 case GIGASET_CONFIG:
245 retval = get_user(int_arg, (int __user *) arg);
246 if (retval >= 0)
247 retval = if_config(cs, &int_arg);
248 if (retval >= 0)
249 retval = put_user(int_arg, (int __user *) arg);
250 break;
251 case GIGASET_BRKCHARS:
252 //FIXME test if MS_LOCKED
253 gigaset_dbg_buffer(DEBUG_IF, "GIGASET_BRKCHARS",
254 6, (const unsigned char *) arg, 1);
255 if (!atomic_read(&cs->connected)) {
256 dbg(DEBUG_ANY, "can't communicate with unplugged device");
257 retval = -ENODEV;
258 break;
259 }
260 retval = copy_from_user(&buf,
261 (const unsigned char __user *) arg, 6)
262 ? -EFAULT : 0;
263 if (retval >= 0)
264 retval = cs->ops->brkchars(cs, buf);
265 break;
266 case GIGASET_VERSION:
267 retval = copy_from_user(version, (unsigned __user *) arg,
268 sizeof version) ? -EFAULT : 0;
269 if (retval >= 0)
270 retval = if_version(cs, version);
271 if (retval >= 0)
272 retval = copy_to_user((unsigned __user *) arg, version,
273 sizeof version)
274 ? -EFAULT : 0;
275 break;
276 default:
277 dbg(DEBUG_ANY, "%s: arg not supported - 0x%04x",
278 __FUNCTION__, cmd);
279 retval = -ENOIOCTLCMD;
280 }
281 }
282
283 up(&cs->sem);
284
285 return retval;
286}
287
288static int if_tiocmget(struct tty_struct *tty, struct file *file)
289{
290 struct cardstate *cs;
291 int retval;
292
293 cs = (struct cardstate *) tty->driver_data;
294 if (!cs) {
295 err("cs==NULL in %s", __FUNCTION__);
296 return -ENODEV;
297 }
298
299 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
300
301 if (down_interruptible(&cs->sem))
302 return -ERESTARTSYS; // FIXME -EINTR?
303
304 // FIXME read from device?
305 retval = cs->control_state & (TIOCM_RTS|TIOCM_DTR);
306
307 up(&cs->sem);
308
309 return retval;
310}
311
312static int if_tiocmset(struct tty_struct *tty, struct file *file,
313 unsigned int set, unsigned int clear)
314{
315 struct cardstate *cs;
316 int retval;
317 unsigned mc;
318
319 cs = (struct cardstate *) tty->driver_data;
320 if (!cs) {
321 err("cs==NULL in %s", __FUNCTION__);
322 return -ENODEV;
323 }
324
325 dbg(DEBUG_IF,
326 "%u: %s(0x%x, 0x%x)", cs->minor_index, __FUNCTION__, set, clear);
327
328 if (down_interruptible(&cs->sem))
329 return -ERESTARTSYS; // FIXME -EINTR?
330
331 if (!atomic_read(&cs->connected)) {
332 dbg(DEBUG_ANY, "can't communicate with unplugged device");
333 retval = -ENODEV;
334 } else {
335 mc = (cs->control_state | set) & ~clear & (TIOCM_RTS|TIOCM_DTR);
336 retval = cs->ops->set_modem_ctrl(cs, cs->control_state, mc);
337 cs->control_state = mc;
338 }
339
340 up(&cs->sem);
341
342 return retval;
343}
344
345static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
346{
347 struct cardstate *cs;
348 int retval = -ENODEV;
349
350 cs = (struct cardstate *) tty->driver_data;
351 if (!cs) {
352 err("cs==NULL in %s", __FUNCTION__);
353 return -ENODEV;
354 }
355
356 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
357
358 if (down_interruptible(&cs->sem))
359 return -ERESTARTSYS; // FIXME -EINTR?
360
361 if (!cs->open_count)
362 warn("%s: device not opened", __FUNCTION__);
363 else if (atomic_read(&cs->mstate) != MS_LOCKED) {
364 warn("can't write to unlocked device");
365 retval = -EBUSY;
366 } else if (!atomic_read(&cs->connected)) {
367 dbg(DEBUG_ANY, "can't write to unplugged device");
368 retval = -EBUSY; //FIXME
369 } else {
370 retval = cs->ops->write_cmd(cs, buf, count,
371 &cs->if_wake_tasklet);
372 }
373
374 up(&cs->sem);
375
376 return retval;
377}
378
379static int if_write_room(struct tty_struct *tty)
380{
381 struct cardstate *cs;
382 int retval = -ENODEV;
383
384 cs = (struct cardstate *) tty->driver_data;
385 if (!cs) {
386 err("cs==NULL in %s", __FUNCTION__);
387 return -ENODEV;
388 }
389
390 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
391
392 if (down_interruptible(&cs->sem))
393 return -ERESTARTSYS; // FIXME -EINTR?
394
395 if (!cs->open_count)
396 warn("%s: device not opened", __FUNCTION__);
397 else if (atomic_read(&cs->mstate) != MS_LOCKED) {
398 warn("can't write to unlocked device");
399 retval = -EBUSY; //FIXME
400 } else if (!atomic_read(&cs->connected)) {
401 dbg(DEBUG_ANY, "can't write to unplugged device");
402 retval = -EBUSY; //FIXME
403 } else
404 retval = cs->ops->write_room(cs);
405
406 up(&cs->sem);
407
408 return retval;
409}
410
411static int if_chars_in_buffer(struct tty_struct *tty)
412{
413 struct cardstate *cs;
414 int retval = -ENODEV;
415
416 cs = (struct cardstate *) tty->driver_data;
417 if (!cs) {
418 err("cs==NULL in %s", __FUNCTION__);
419 return -ENODEV;
420 }
421
422 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
423
424 if (down_interruptible(&cs->sem))
425 return -ERESTARTSYS; // FIXME -EINTR?
426
427 if (!cs->open_count)
428 warn("%s: device not opened", __FUNCTION__);
429 else if (atomic_read(&cs->mstate) != MS_LOCKED) {
430 warn("can't write to unlocked device");
431 retval = -EBUSY;
432 } else if (!atomic_read(&cs->connected)) {
433 dbg(DEBUG_ANY, "can't write to unplugged device");
434 retval = -EBUSY; //FIXME
435 } else
436 retval = cs->ops->chars_in_buffer(cs);
437
438 up(&cs->sem);
439
440 return retval;
441}
442
443static void if_throttle(struct tty_struct *tty)
444{
445 struct cardstate *cs;
446
447 cs = (struct cardstate *) tty->driver_data;
448 if (!cs) {
449 err("cs==NULL in %s", __FUNCTION__);
450 return;
451 }
452
453 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
454
455 down(&cs->sem);
456
457 if (!cs->open_count)
458 warn("%s: device not opened", __FUNCTION__);
459 else {
460 //FIXME
461 }
462
463 up(&cs->sem);
464}
465
466static void if_unthrottle(struct tty_struct *tty)
467{
468 struct cardstate *cs;
469
470 cs = (struct cardstate *) tty->driver_data;
471 if (!cs) {
472 err("cs==NULL in %s", __FUNCTION__);
473 return;
474 }
475
476 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
477
478 down(&cs->sem);
479
480 if (!cs->open_count)
481 warn("%s: device not opened", __FUNCTION__);
482 else {
483 //FIXME
484 }
485
486 up(&cs->sem);
487}
488
489static void if_set_termios(struct tty_struct *tty, struct termios *old)
490{
491 struct cardstate *cs;
492 unsigned int iflag;
493 unsigned int cflag;
494 unsigned int old_cflag;
495 unsigned int control_state, new_state;
496
497 cs = (struct cardstate *) tty->driver_data;
498 if (!cs) {
499 err("cs==NULL in %s", __FUNCTION__);
500 return;
501 }
502
503 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
504
505 down(&cs->sem);
506
507 if (!cs->open_count) {
508 warn("%s: device not opened", __FUNCTION__);
509 goto out;
510 }
511
512 if (!atomic_read(&cs->connected)) {
513 dbg(DEBUG_ANY, "can't communicate with unplugged device");
514 goto out;
515 }
516
517 // stolen from mct_u232.c
518 iflag = tty->termios->c_iflag;
519 cflag = tty->termios->c_cflag;
520 old_cflag = old ? old->c_cflag : cflag; //FIXME?
521 dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x", cs->minor_index,
522 iflag, cflag, old_cflag);
523
524 /* get a local copy of the current port settings */
525 control_state = cs->control_state;
526
527 /*
528 * Update baud rate.
529 * Do not attempt to cache old rates and skip settings,
530 * disconnects screw such tricks up completely.
531 * Premature optimization is the root of all evil.
532 */
533
534 /* reassert DTR and (maybe) RTS on transition from B0 */
535 if ((old_cflag & CBAUD) == B0) {
536 new_state = control_state | TIOCM_DTR;
537 /* don't set RTS if using hardware flow control */
538 if (!(old_cflag & CRTSCTS))
539 new_state |= TIOCM_RTS;
540 dbg(DEBUG_IF, "%u: from B0 - set DTR%s", cs->minor_index,
541 (new_state & TIOCM_RTS) ? " only" : "/RTS");
542 cs->ops->set_modem_ctrl(cs, control_state, new_state);
543 control_state = new_state;
544 }
545
546 cs->ops->baud_rate(cs, cflag & CBAUD);
547
548 if ((cflag & CBAUD) == B0) {
549 /* Drop RTS and DTR */
550 dbg(DEBUG_IF, "%u: to B0 - drop DTR/RTS", cs->minor_index);
551 new_state = control_state & ~(TIOCM_DTR | TIOCM_RTS);
552 cs->ops->set_modem_ctrl(cs, control_state, new_state);
553 control_state = new_state;
554 }
555
556 /*
557 * Update line control register (LCR)
558 */
559
560 cs->ops->set_line_ctrl(cs, cflag);
561
562#if 0
563 //FIXME this hangs M101 [ts 2005-03-09]
564 //FIXME do we need this?
565 /*
566 * Set flow control: well, I do not really now how to handle DTR/RTS.
567 * Just do what we have seen with SniffUSB on Win98.
568 */
569 /* Drop DTR/RTS if no flow control otherwise assert */
570 dbg(DEBUG_IF, "%u: control_state %x", cs->minor_index, control_state);
571 new_state = control_state;
572 if ((iflag & IXOFF) || (iflag & IXON) || (cflag & CRTSCTS))
573 new_state |= TIOCM_DTR | TIOCM_RTS;
574 else
575 new_state &= ~(TIOCM_DTR | TIOCM_RTS);
576 if (new_state != control_state) {
577 dbg(DEBUG_IF, "%u: new_state %x", cs->minor_index, new_state);
578 gigaset_set_modem_ctrl(cs, control_state, new_state); // FIXME: mct_u232.c sets the old state here. is this a bug?
579 control_state = new_state;
580 }
581#endif
582
583 /* save off the modified port settings */
584 cs->control_state = control_state;
585
586out:
587 up(&cs->sem);
588}
589
590
591/* wakeup tasklet for the write operation */
592static void if_wake(unsigned long data)
593{
594 struct cardstate *cs = (struct cardstate *) data;
595 struct tty_struct *tty;
596
597 tty = cs->tty;
598 if (!tty)
599 return;
600
601 if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
602 tty->ldisc.write_wakeup) {
603 dbg(DEBUG_IF, "write wakeup call");
604 tty->ldisc.write_wakeup(tty);
605 }
606
607 wake_up_interruptible(&tty->write_wait);
608}
609
610/*** interface to common ***/
611
612void gigaset_if_init(struct cardstate *cs)
613{
614 struct gigaset_driver *drv;
615
616 drv = cs->driver;
617 if (!drv->have_tty)
618 return;
619
620 tasklet_init(&cs->if_wake_tasklet, &if_wake, (unsigned long) cs);
621 tty_register_device(drv->tty, cs->minor_index, NULL);
622}
623
624void gigaset_if_free(struct cardstate *cs)
625{
626 struct gigaset_driver *drv;
627
628 drv = cs->driver;
629 if (!drv->have_tty)
630 return;
631
632 tasklet_disable(&cs->if_wake_tasklet);
633 tasklet_kill(&cs->if_wake_tasklet);
634 tty_unregister_device(drv->tty, cs->minor_index);
635}
636
637void gigaset_if_receive(struct cardstate *cs,
638 unsigned char *buffer, size_t len)
639{
640 unsigned long flags;
641 struct tty_struct *tty;
642
643 spin_lock_irqsave(&cs->lock, flags);
644 if ((tty = cs->tty) == NULL)
645 dbg(DEBUG_ANY, "receive on closed device");
646 else {
647 tty_buffer_request_room(tty, len);
648 tty_insert_flip_string(tty, buffer, len);
649 tty_flip_buffer_push(tty);
650 }
651 spin_unlock_irqrestore(&cs->lock, flags);
652}
653EXPORT_SYMBOL_GPL(gigaset_if_receive);
654
655/* gigaset_if_initdriver
656 * Initialize tty interface.
657 * parameters:
658 * drv Driver
659 * procname Name of the driver (e.g. for /proc/tty/drivers)
660 * devname Name of the device files (prefix without minor number)
661 * devfsname Devfs name of the device files without %d
662 */
663void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
664 const char *devname, const char *devfsname)
665{
666 unsigned minors = drv->minors;
667 int ret;
668 struct tty_driver *tty;
669
670 drv->have_tty = 0;
671
672 if ((drv->tty = alloc_tty_driver(minors)) == NULL)
673 goto enomem;
674 tty = drv->tty;
675
676 tty->magic = TTY_DRIVER_MAGIC,
677 tty->major = GIG_MAJOR,
678 tty->type = TTY_DRIVER_TYPE_SERIAL,
679 tty->subtype = SERIAL_TYPE_NORMAL,
680 tty->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS,
681
682 tty->driver_name = procname;
683 tty->name = devname;
684 tty->minor_start = drv->minor;
685 tty->num = drv->minors;
686
687 tty->owner = THIS_MODULE;
688 tty->devfs_name = devfsname;
689
690 tty->init_termios = tty_std_termios; //FIXME
691 tty->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; //FIXME
692 tty_set_operations(tty, &if_ops);
693
694 ret = tty_register_driver(tty);
695 if (ret < 0) {
696 warn("failed to register tty driver (error %d)", ret);
697 goto error;
698 }
699 dbg(DEBUG_IF, "tty driver initialized");
700 drv->have_tty = 1;
701 return;
702
703enomem:
704 warn("could not allocate tty structures");
705error:
706 if (drv->tty)
707 put_tty_driver(drv->tty);
708}
709
710void gigaset_if_freedriver(struct gigaset_driver *drv)
711{
712 if (!drv->have_tty)
713 return;
714
715 drv->have_tty = 0;
716 tty_unregister_driver(drv->tty);
717 put_tty_driver(drv->tty);
718}
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
new file mode 100644
index 000000000000..5744eb91b315
--- /dev/null
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -0,0 +1,1009 @@
1/*
2 * Common data handling layer for bas_gigaset
3 *
4 * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
5 * Hansjoerg Lipp <hjlipp@web.de>.
6 *
7 * =====================================================================
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 * =====================================================================
13 * ToDo: ...
14 * =====================================================================
15 * Version: $Id: isocdata.c,v 1.2.2.5 2005/11/13 23:05:19 hjlipp Exp $
16 * =====================================================================
17 */
18
19#include "gigaset.h"
20#include <linux/crc-ccitt.h>
21
22/* access methods for isowbuf_t */
23/* ============================ */
24
25/* initialize buffer structure
26 */
27void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle)
28{
29 atomic_set(&iwb->read, 0);
30 atomic_set(&iwb->nextread, 0);
31 atomic_set(&iwb->write, 0);
32 atomic_set(&iwb->writesem, 1);
33 iwb->wbits = 0;
34 iwb->idle = idle;
35 memset(iwb->data + BAS_OUTBUFSIZE, idle, BAS_OUTBUFPAD);
36}
37
38/* compute number of bytes which can be appended to buffer
39 * so that there is still room to append a maximum frame of flags
40 */
41static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
42{
43 int read, write, freebytes;
44
45 read = atomic_read(&iwb->read);
46 write = atomic_read(&iwb->write);
47 if ((freebytes = read - write) > 0) {
48 /* no wraparound: need padding space within regular area */
49 return freebytes - BAS_OUTBUFPAD;
50 } else if (read < BAS_OUTBUFPAD) {
51 /* wraparound: can use space up to end of regular area */
52 return BAS_OUTBUFSIZE - write;
53 } else {
54 /* following the wraparound yields more space */
55 return freebytes + BAS_OUTBUFSIZE - BAS_OUTBUFPAD;
56 }
57}
58
59/* compare two offsets within the buffer
60 * The buffer is seen as circular, with the read position as start
61 * returns -1/0/1 if position a </=/> position b without crossing 'read'
62 */
63static inline int isowbuf_poscmp(struct isowbuf_t *iwb, int a, int b)
64{
65 int read;
66 if (a == b)
67 return 0;
68 read = atomic_read(&iwb->read);
69 if (a < b) {
70 if (a < read && read <= b)
71 return +1;
72 else
73 return -1;
74 } else {
75 if (b < read && read <= a)
76 return -1;
77 else
78 return +1;
79 }
80}
81
82/* start writing
83 * acquire the write semaphore
84 * return true if acquired, false if busy
85 */
86static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
87{
88 if (!atomic_dec_and_test(&iwb->writesem)) {
89 atomic_inc(&iwb->writesem);
90 dbg(DEBUG_ISO,
91 "%s: couldn't acquire iso write semaphore", __func__);
92 return 0;
93 }
94#ifdef CONFIG_GIGASET_DEBUG
95 dbg(DEBUG_ISO,
96 "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d",
97 __func__, iwb->data[atomic_read(&iwb->write)], iwb->wbits);
98#endif
99 return 1;
100}
101
102/* finish writing
103 * release the write semaphore and update the maximum buffer fill level
104 * returns the current write position
105 */
106static inline int isowbuf_donewrite(struct isowbuf_t *iwb)
107{
108 int write = atomic_read(&iwb->write);
109 atomic_inc(&iwb->writesem);
110 return write;
111}
112
113/* append bits to buffer without any checks
114 * - data contains bits to append, starting at LSB
115 * - nbits is number of bits to append (0..24)
116 * must be called with the write semaphore held
117 * If more than nbits bits are set in data, the extraneous bits are set in the
118 * buffer too, but the write position is only advanced by nbits.
119 */
120static inline void isowbuf_putbits(struct isowbuf_t *iwb, u32 data, int nbits)
121{
122 int write = atomic_read(&iwb->write);
123 data <<= iwb->wbits;
124 data |= iwb->data[write];
125 nbits += iwb->wbits;
126 while (nbits >= 8) {
127 iwb->data[write++] = data & 0xff;
128 write %= BAS_OUTBUFSIZE;
129 data >>= 8;
130 nbits -= 8;
131 }
132 iwb->wbits = nbits;
133 iwb->data[write] = data & 0xff;
134 atomic_set(&iwb->write, write);
135}
136
137/* put final flag on HDLC bitstream
138 * also sets the idle fill byte to the correspondingly shifted flag pattern
139 * must be called with the write semaphore held
140 */
141static inline void isowbuf_putflag(struct isowbuf_t *iwb)
142{
143 int write;
144
145 /* add two flags, thus reliably covering one byte */
146 isowbuf_putbits(iwb, 0x7e7e, 8);
147 /* recover the idle flag byte */
148 write = atomic_read(&iwb->write);
149 iwb->idle = iwb->data[write];
150 dbg(DEBUG_ISO, "idle fill byte %02x", iwb->idle);
151 /* mask extraneous bits in buffer */
152 iwb->data[write] &= (1 << iwb->wbits) - 1;
153}
154
155/* retrieve a block of bytes for sending
156 * The requested number of bytes is provided as a contiguous block.
157 * If necessary, the frame is filled to the requested number of bytes
158 * with the idle value.
159 * returns offset to frame, < 0 on busy or error
160 */
161int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
162{
163 int read, write, limit, src, dst;
164 unsigned char pbyte;
165
166 read = atomic_read(&iwb->nextread);
167 write = atomic_read(&iwb->write);
168 if (likely(read == write)) {
169 //dbg(DEBUG_STREAM, "%s: send buffer empty", __func__);
170 /* return idle frame */
171 return read < BAS_OUTBUFPAD ?
172 BAS_OUTBUFSIZE : read - BAS_OUTBUFPAD;
173 }
174
175 limit = read + size;
176 dbg(DEBUG_STREAM,
177 "%s: read=%d write=%d limit=%d", __func__, read, write, limit);
178#ifdef CONFIG_GIGASET_DEBUG
179 if (unlikely(size < 0 || size > BAS_OUTBUFPAD)) {
180 err("invalid size %d", size);
181 return -EINVAL;
182 }
183 src = atomic_read(&iwb->read);
184 if (unlikely(limit > BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
185 (read < src && limit >= src))) {
186 err("isoc write buffer frame reservation violated");
187 return -EFAULT;
188 }
189#endif
190
191 if (read < write) {
192 /* no wraparound in valid data */
193 if (limit >= write) {
194 /* append idle frame */
195 if (!isowbuf_startwrite(iwb))
196 return -EBUSY;
197 /* write position could have changed */
198 if (limit >= (write = atomic_read(&iwb->write))) {
199 pbyte = iwb->data[write]; /* save partial byte */
200 limit = write + BAS_OUTBUFPAD;
201 dbg(DEBUG_STREAM,
202 "%s: filling %d->%d with %02x",
203 __func__, write, limit, iwb->idle);
204 if (write + BAS_OUTBUFPAD < BAS_OUTBUFSIZE)
205 memset(iwb->data + write, iwb->idle,
206 BAS_OUTBUFPAD);
207 else {
208 /* wraparound, fill entire pad area */
209 memset(iwb->data + write, iwb->idle,
210 BAS_OUTBUFSIZE + BAS_OUTBUFPAD
211 - write);
212 limit = 0;
213 }
214 dbg(DEBUG_STREAM, "%s: restoring %02x at %d",
215 __func__, pbyte, limit);
216 iwb->data[limit] = pbyte; /* restore partial byte */
217 atomic_set(&iwb->write, limit);
218 }
219 isowbuf_donewrite(iwb);
220 }
221 } else {
222 /* valid data wraparound */
223 if (limit >= BAS_OUTBUFSIZE) {
224 /* copy wrapped part into pad area */
225 src = 0;
226 dst = BAS_OUTBUFSIZE;
227 while (dst < limit && src < write)
228 iwb->data[dst++] = iwb->data[src++];
229 if (dst <= limit) {
230 /* fill pad area with idle byte */
231 memset(iwb->data + dst, iwb->idle,
232 BAS_OUTBUFSIZE + BAS_OUTBUFPAD - dst);
233 }
234 limit = src;
235 }
236 }
237 atomic_set(&iwb->nextread, limit);
238 return read;
239}
240
241/* dump_bytes
242 * write hex bytes to syslog for debugging
243 */
244static inline void dump_bytes(enum debuglevel level, const char *tag,
245 unsigned char *bytes, int count)
246{
247#ifdef CONFIG_GIGASET_DEBUG
248 unsigned char c;
249 static char dbgline[3 * 32 + 1];
250 static const char hexdigit[] = "0123456789abcdef";
251 int i = 0;
252 IFNULLRET(tag);
253 IFNULLRET(bytes);
254 while (count-- > 0) {
255 if (i > sizeof(dbgline) - 4) {
256 dbgline[i] = '\0';
257 dbg(level, "%s:%s", tag, dbgline);
258 i = 0;
259 }
260 c = *bytes++;
261 dbgline[i] = (i && !(i % 12)) ? '-' : ' ';
262 i++;
263 dbgline[i++] = hexdigit[(c >> 4) & 0x0f];
264 dbgline[i++] = hexdigit[c & 0x0f];
265 }
266 dbgline[i] = '\0';
267 dbg(level, "%s:%s", tag, dbgline);
268#endif
269}
270
271/*============================================================================*/
272
273/* bytewise HDLC bitstuffing via table lookup
274 * lookup table: 5 subtables for 0..4 preceding consecutive '1' bits
275 * index: 256*(number of preceding '1' bits) + (next byte to stuff)
276 * value: bit 9.. 0 = result bits
277 * bit 12..10 = number of trailing '1' bits in result
278 * bit 14..13 = number of bits added by stuffing
279 */
280static u16 stufftab[5 * 256] = {
281// previous 1s = 0:
282 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
283 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f,
284 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
285 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x205f,
286 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
287 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x209f,
288 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
289 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20df,
290 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x048f,
291 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x251f,
292 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x04af,
293 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x255f,
294 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x08cf,
295 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x299f,
296 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef,
297 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf,
298
299// previous 1s = 1:
300 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f,
301 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f,
302 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f,
303 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x206f,
304 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x208f,
305 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20af,
306 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20cf,
307 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20ef,
308 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x250f,
309 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x252f,
310 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x254f,
311 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x256f,
312 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x298f,
313 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29af,
314 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf,
315 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef,
316
317// previous 1s = 2:
318 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017,
319 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037,
320 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057,
321 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x2067, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x2077,
322 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x2087, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x2097,
323 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x20a7, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20b7,
324 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x20c7, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20d7,
325 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x20e7, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20f7,
326 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x2507, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x2517,
327 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x2527, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x2537,
328 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x2547, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x2557,
329 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x2567, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x2577,
330 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x2987, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x2997,
331 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x29a7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29b7,
332 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7,
333 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7,
334
335// previous 1s = 3:
336 0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b,
337 0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b,
338 0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b,
339 0x0030, 0x0031, 0x0032, 0x2063, 0x0034, 0x0035, 0x0036, 0x206b, 0x0038, 0x0039, 0x003a, 0x2073, 0x003c, 0x003d, 0x203e, 0x207b,
340 0x0040, 0x0041, 0x0042, 0x2083, 0x0044, 0x0045, 0x0046, 0x208b, 0x0048, 0x0049, 0x004a, 0x2093, 0x004c, 0x004d, 0x004e, 0x209b,
341 0x0050, 0x0051, 0x0052, 0x20a3, 0x0054, 0x0055, 0x0056, 0x20ab, 0x0058, 0x0059, 0x005a, 0x20b3, 0x005c, 0x005d, 0x005e, 0x20bb,
342 0x0060, 0x0061, 0x0062, 0x20c3, 0x0064, 0x0065, 0x0066, 0x20cb, 0x0068, 0x0069, 0x006a, 0x20d3, 0x006c, 0x006d, 0x006e, 0x20db,
343 0x0070, 0x0071, 0x0072, 0x20e3, 0x0074, 0x0075, 0x0076, 0x20eb, 0x0078, 0x0079, 0x007a, 0x20f3, 0x207c, 0x207d, 0x20be, 0x40fb,
344 0x0480, 0x0481, 0x0482, 0x2503, 0x0484, 0x0485, 0x0486, 0x250b, 0x0488, 0x0489, 0x048a, 0x2513, 0x048c, 0x048d, 0x048e, 0x251b,
345 0x0490, 0x0491, 0x0492, 0x2523, 0x0494, 0x0495, 0x0496, 0x252b, 0x0498, 0x0499, 0x049a, 0x2533, 0x049c, 0x049d, 0x049e, 0x253b,
346 0x04a0, 0x04a1, 0x04a2, 0x2543, 0x04a4, 0x04a5, 0x04a6, 0x254b, 0x04a8, 0x04a9, 0x04aa, 0x2553, 0x04ac, 0x04ad, 0x04ae, 0x255b,
347 0x04b0, 0x04b1, 0x04b2, 0x2563, 0x04b4, 0x04b5, 0x04b6, 0x256b, 0x04b8, 0x04b9, 0x04ba, 0x2573, 0x04bc, 0x04bd, 0x253e, 0x257b,
348 0x08c0, 0x08c1, 0x08c2, 0x2983, 0x08c4, 0x08c5, 0x08c6, 0x298b, 0x08c8, 0x08c9, 0x08ca, 0x2993, 0x08cc, 0x08cd, 0x08ce, 0x299b,
349 0x08d0, 0x08d1, 0x08d2, 0x29a3, 0x08d4, 0x08d5, 0x08d6, 0x29ab, 0x08d8, 0x08d9, 0x08da, 0x29b3, 0x08dc, 0x08dd, 0x08de, 0x29bb,
350 0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb,
351 0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb,
352
353// previous 1s = 4:
354 0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d,
355 0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d,
356 0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d,
357 0x0030, 0x2061, 0x0032, 0x2065, 0x0034, 0x2069, 0x0036, 0x206d, 0x0038, 0x2071, 0x003a, 0x2075, 0x003c, 0x2079, 0x203e, 0x407d,
358 0x0040, 0x2081, 0x0042, 0x2085, 0x0044, 0x2089, 0x0046, 0x208d, 0x0048, 0x2091, 0x004a, 0x2095, 0x004c, 0x2099, 0x004e, 0x209d,
359 0x0050, 0x20a1, 0x0052, 0x20a5, 0x0054, 0x20a9, 0x0056, 0x20ad, 0x0058, 0x20b1, 0x005a, 0x20b5, 0x005c, 0x20b9, 0x005e, 0x20bd,
360 0x0060, 0x20c1, 0x0062, 0x20c5, 0x0064, 0x20c9, 0x0066, 0x20cd, 0x0068, 0x20d1, 0x006a, 0x20d5, 0x006c, 0x20d9, 0x006e, 0x20dd,
361 0x0070, 0x20e1, 0x0072, 0x20e5, 0x0074, 0x20e9, 0x0076, 0x20ed, 0x0078, 0x20f1, 0x007a, 0x20f5, 0x207c, 0x40f9, 0x20be, 0x417d,
362 0x0480, 0x2501, 0x0482, 0x2505, 0x0484, 0x2509, 0x0486, 0x250d, 0x0488, 0x2511, 0x048a, 0x2515, 0x048c, 0x2519, 0x048e, 0x251d,
363 0x0490, 0x2521, 0x0492, 0x2525, 0x0494, 0x2529, 0x0496, 0x252d, 0x0498, 0x2531, 0x049a, 0x2535, 0x049c, 0x2539, 0x049e, 0x253d,
364 0x04a0, 0x2541, 0x04a2, 0x2545, 0x04a4, 0x2549, 0x04a6, 0x254d, 0x04a8, 0x2551, 0x04aa, 0x2555, 0x04ac, 0x2559, 0x04ae, 0x255d,
365 0x04b0, 0x2561, 0x04b2, 0x2565, 0x04b4, 0x2569, 0x04b6, 0x256d, 0x04b8, 0x2571, 0x04ba, 0x2575, 0x04bc, 0x2579, 0x253e, 0x467d,
366 0x08c0, 0x2981, 0x08c2, 0x2985, 0x08c4, 0x2989, 0x08c6, 0x298d, 0x08c8, 0x2991, 0x08ca, 0x2995, 0x08cc, 0x2999, 0x08ce, 0x299d,
367 0x08d0, 0x29a1, 0x08d2, 0x29a5, 0x08d4, 0x29a9, 0x08d6, 0x29ad, 0x08d8, 0x29b1, 0x08da, 0x29b5, 0x08dc, 0x29b9, 0x08de, 0x29bd,
368 0x0ce0, 0x2dc1, 0x0ce2, 0x2dc5, 0x0ce4, 0x2dc9, 0x0ce6, 0x2dcd, 0x0ce8, 0x2dd1, 0x0cea, 0x2dd5, 0x0cec, 0x2dd9, 0x0cee, 0x2ddd,
369 0x10f0, 0x31e1, 0x10f2, 0x31e5, 0x10f4, 0x31e9, 0x10f6, 0x31ed, 0x20f8, 0x41f1, 0x20fa, 0x41f5, 0x257c, 0x46f9, 0x29be, 0x4b7d
370};
371
372/* hdlc_bitstuff_byte
373 * perform HDLC bitstuffing for one input byte (8 bits, LSB first)
374 * parameters:
375 * cin input byte
376 * ones number of trailing '1' bits in result before this step
377 * iwb pointer to output buffer structure (write semaphore must be held)
378 * return value:
379 * number of trailing '1' bits in result after this step
380 */
381
382static inline int hdlc_bitstuff_byte(struct isowbuf_t *iwb, unsigned char cin,
383 int ones)
384{
385 u16 stuff;
386 int shiftinc, newones;
387
388 /* get stuffing information for input byte
389 * value: bit 9.. 0 = result bits
390 * bit 12..10 = number of trailing '1' bits in result
391 * bit 14..13 = number of bits added by stuffing
392 */
393 stuff = stufftab[256 * ones + cin];
394 shiftinc = (stuff >> 13) & 3;
395 newones = (stuff >> 10) & 7;
396 stuff &= 0x3ff;
397
398 /* append stuffed byte to output stream */
399 isowbuf_putbits(iwb, stuff, 8 + shiftinc);
400 return newones;
401}
402
403/* hdlc_buildframe
404 * Perform HDLC framing with bitstuffing on a byte buffer
405 * The input buffer is regarded as a sequence of bits, starting with the least
406 * significant bit of the first byte and ending with the most significant bit
407 * of the last byte. A 16 bit FCS is appended as defined by RFC 1662.
408 * Whenever five consecutive '1' bits appear in the resulting bit sequence, a
409 * '0' bit is inserted after them.
410 * The resulting bit string and a closing flag pattern (PPP_FLAG, '01111110')
411 * are appended to the output buffer starting at the given bit position, which
412 * is assumed to already contain a leading flag.
413 * The output buffer must have sufficient length; count + count/5 + 6 bytes
414 * starting at *out are safe and are verified to be present.
415 * parameters:
416 * in input buffer
417 * count number of bytes in input buffer
418 * iwb pointer to output buffer structure (write semaphore must be held)
419 * return value:
420 * position of end of packet in output buffer on success,
421 * -EAGAIN if write semaphore busy or buffer full
422 */
423
424static inline int hdlc_buildframe(struct isowbuf_t *iwb,
425 unsigned char *in, int count)
426{
427 int ones;
428 u16 fcs;
429 int end;
430 unsigned char c;
431
432 if (isowbuf_freebytes(iwb) < count + count / 5 + 6 ||
433 !isowbuf_startwrite(iwb)) {
434 dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN",
435 __func__, isowbuf_freebytes(iwb));
436 return -EAGAIN;
437 }
438
439 dump_bytes(DEBUG_STREAM, "snd data", in, count);
440
441 /* bitstuff and checksum input data */
442 fcs = PPP_INITFCS;
443 ones = 0;
444 while (count-- > 0) {
445 c = *in++;
446 ones = hdlc_bitstuff_byte(iwb, c, ones);
447 fcs = crc_ccitt_byte(fcs, c);
448 }
449
450 /* bitstuff and append FCS (complemented, least significant byte first) */
451 fcs ^= 0xffff;
452 ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones);
453 ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones);
454
455 /* put closing flag and repeat byte for flag idle */
456 isowbuf_putflag(iwb);
457 end = isowbuf_donewrite(iwb);
458 dump_bytes(DEBUG_STREAM_DUMP, "isowbuf", iwb->data, end + 1);
459 return end;
460}
461
462/* trans_buildframe
463 * Append a block of 'transparent' data to the output buffer,
464 * inverting the bytes.
465 * The output buffer must have sufficient length; count bytes
466 * starting at *out are safe and are verified to be present.
467 * parameters:
468 * in input buffer
469 * count number of bytes in input buffer
470 * iwb pointer to output buffer structure (write semaphore must be held)
471 * return value:
472 * position of end of packet in output buffer on success,
473 * -EAGAIN if write semaphore busy or buffer full
474 */
475
476static inline int trans_buildframe(struct isowbuf_t *iwb,
477 unsigned char *in, int count)
478{
479 int write;
480 unsigned char c;
481
482 if (unlikely(count <= 0))
483 return atomic_read(&iwb->write); /* better ideas? */
484
485 if (isowbuf_freebytes(iwb) < count ||
486 !isowbuf_startwrite(iwb)) {
487 dbg(DEBUG_ISO, "can't put %d bytes", count);
488 return -EAGAIN;
489 }
490
491 dbg(DEBUG_STREAM, "put %d bytes", count);
492 write = atomic_read(&iwb->write);
493 do {
494 c = gigaset_invtab[*in++];
495 iwb->data[write++] = c;
496 write %= BAS_OUTBUFSIZE;
497 } while (--count > 0);
498 atomic_set(&iwb->write, write);
499 iwb->idle = c;
500
501 return isowbuf_donewrite(iwb);
502}
503
504int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len)
505{
506 int result;
507
508 switch (bcs->proto2) {
509 case ISDN_PROTO_L2_HDLC:
510 result = hdlc_buildframe(bcs->hw.bas->isooutbuf, in, len);
511 dbg(DEBUG_ISO, "%s: %d bytes HDLC -> %d", __func__, len, result);
512 break;
513 default: /* assume transparent */
514 result = trans_buildframe(bcs->hw.bas->isooutbuf, in, len);
515 dbg(DEBUG_ISO, "%s: %d bytes trans -> %d", __func__, len, result);
516 }
517 return result;
518}
519
520/* hdlc_putbyte
521 * append byte c to current skb of B channel structure *bcs, updating fcs
522 */
523static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs)
524{
525 bcs->fcs = crc_ccitt_byte(bcs->fcs, c);
526 if (unlikely(bcs->skb == NULL)) {
527 /* skipping */
528 return;
529 }
530 if (unlikely(bcs->skb->len == SBUFSIZE)) {
531 warn("received oversized packet discarded");
532 bcs->hw.bas->giants++;
533 dev_kfree_skb_any(bcs->skb);
534 bcs->skb = NULL;
535 return;
536 }
537 *gigaset_skb_put_quick(bcs->skb, 1) = c;
538}
539
540/* hdlc_flush
541 * drop partial HDLC data packet
542 */
543static inline void hdlc_flush(struct bc_state *bcs)
544{
545 /* clear skb or allocate new if not skipping */
546 if (likely(bcs->skb != NULL))
547 skb_trim(bcs->skb, 0);
548 else if (!bcs->ignore) {
549 if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
550 skb_reserve(bcs->skb, HW_HDR_LEN);
551 else
552 err("could not allocate skb");
553 }
554
555 /* reset packet state */
556 bcs->fcs = PPP_INITFCS;
557}
558
559/* hdlc_done
560 * process completed HDLC data packet
561 */
562static inline void hdlc_done(struct bc_state *bcs)
563{
564 struct sk_buff *procskb;
565
566 if (unlikely(bcs->ignore)) {
567 bcs->ignore--;
568 hdlc_flush(bcs);
569 return;
570 }
571
572 if ((procskb = bcs->skb) == NULL) {
573 /* previous error */
574 dbg(DEBUG_ISO, "%s: skb=NULL", __func__);
575 gigaset_rcv_error(NULL, bcs->cs, bcs);
576 } else if (procskb->len < 2) {
577 notice("received short frame (%d octets)", procskb->len);
578 bcs->hw.bas->runts++;
579 gigaset_rcv_error(procskb, bcs->cs, bcs);
580 } else if (bcs->fcs != PPP_GOODFCS) {
581 notice("frame check error (0x%04x)", bcs->fcs);
582 bcs->hw.bas->fcserrs++;
583 gigaset_rcv_error(procskb, bcs->cs, bcs);
584 } else {
585 procskb->len -= 2; /* subtract FCS */
586 procskb->tail -= 2;
587 dbg(DEBUG_ISO,
588 "%s: good frame (%d octets)", __func__, procskb->len);
589 dump_bytes(DEBUG_STREAM,
590 "rcv data", procskb->data, procskb->len);
591 bcs->hw.bas->goodbytes += procskb->len;
592 gigaset_rcv_skb(procskb, bcs->cs, bcs);
593 }
594
595 if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
596 skb_reserve(bcs->skb, HW_HDR_LEN);
597 else
598 err("could not allocate skb");
599 bcs->fcs = PPP_INITFCS;
600}
601
602/* hdlc_frag
603 * drop HDLC data packet with non-integral last byte
604 */
605static inline void hdlc_frag(struct bc_state *bcs, unsigned inbits)
606{
607 if (unlikely(bcs->ignore)) {
608 bcs->ignore--;
609 hdlc_flush(bcs);
610 return;
611 }
612
613 notice("received partial byte (%d bits)", inbits);
614 bcs->hw.bas->alignerrs++;
615 gigaset_rcv_error(bcs->skb, bcs->cs, bcs);
616
617 if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
618 skb_reserve(bcs->skb, HW_HDR_LEN);
619 else
620 err("could not allocate skb");
621 bcs->fcs = PPP_INITFCS;
622}
623
624/* bit counts lookup table for HDLC bit unstuffing
625 * index: input byte
626 * value: bit 0..3 = number of consecutive '1' bits starting from LSB
627 * bit 4..6 = number of consecutive '1' bits starting from MSB
628 * (replacing 8 by 7 to make it fit; the algorithm won't care)
629 * bit 7 set if there are 5 or more "interior" consecutive '1' bits
630 */
631static unsigned char bitcounts[256] = {
632 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
633 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05,
634 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
635 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x80, 0x06,
636 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
637 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05,
638 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
639 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x80, 0x81, 0x80, 0x07,
640 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14,
641 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x15,
642 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14,
643 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x90, 0x16,
644 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x24,
645 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x25,
646 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x33, 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x34,
647 0x40, 0x41, 0x40, 0x42, 0x40, 0x41, 0x40, 0x43, 0x50, 0x51, 0x50, 0x52, 0x60, 0x61, 0x70, 0x78
648};
649
650/* hdlc_unpack
651 * perform HDLC frame processing (bit unstuffing, flag detection, FCS calculation)
652 * on a sequence of received data bytes (8 bits each, LSB first)
653 * pass on successfully received, complete frames as SKBs via gigaset_rcv_skb
654 * notify of errors via gigaset_rcv_error
655 * tally frames, errors etc. in BC structure counters
656 * parameters:
657 * src received data
658 * count number of received bytes
659 * bcs receiving B channel structure
660 */
661static inline void hdlc_unpack(unsigned char *src, unsigned count,
662 struct bc_state *bcs)
663{
664 struct bas_bc_state *ubc;
665 int inputstate;
666 unsigned seqlen, inbyte, inbits;
667
668 IFNULLRET(bcs);
669 ubc = bcs->hw.bas;
670 IFNULLRET(ubc);
671
672 /* load previous state:
673 * inputstate = set of flag bits:
674 * - INS_flag_hunt: no complete opening flag received since connection setup or last abort
675 * - INS_have_data: at least one complete data byte received since last flag
676 * seqlen = number of consecutive '1' bits in last 7 input stream bits (0..7)
677 * inbyte = accumulated partial data byte (if !INS_flag_hunt)
678 * inbits = number of valid bits in inbyte, starting at LSB (0..6)
679 */
680 inputstate = bcs->inputstate;
681 seqlen = ubc->seqlen;
682 inbyte = ubc->inbyte;
683 inbits = ubc->inbits;
684
685 /* bit unstuffing a byte a time
686 * Take your time to understand this; it's straightforward but tedious.
687 * The "bitcounts" lookup table is used to speed up the counting of
688 * leading and trailing '1' bits.
689 */
690 while (count--) {
691 unsigned char c = *src++;
692 unsigned char tabentry = bitcounts[c];
693 unsigned lead1 = tabentry & 0x0f;
694 unsigned trail1 = (tabentry >> 4) & 0x0f;
695
696 seqlen += lead1;
697
698 if (unlikely(inputstate & INS_flag_hunt)) {
699 if (c == PPP_FLAG) {
700 /* flag-in-one */
701 inputstate &= ~(INS_flag_hunt | INS_have_data);
702 inbyte = 0;
703 inbits = 0;
704 } else if (seqlen == 6 && trail1 != 7) {
705 /* flag completed & not followed by abort */
706 inputstate &= ~(INS_flag_hunt | INS_have_data);
707 inbyte = c >> (lead1 + 1);
708 inbits = 7 - lead1;
709 if (trail1 >= 8) {
710 /* interior stuffing: omitting the MSB handles most cases */
711 inbits--;
712 /* correct the incorrectly handled cases individually */
713 switch (c) {
714 case 0xbe:
715 inbyte = 0x3f;
716 break;
717 }
718 }
719 }
720 /* else: continue flag-hunting */
721 } else if (likely(seqlen < 5 && trail1 < 7)) {
722 /* streamlined case: 8 data bits, no stuffing */
723 inbyte |= c << inbits;
724 hdlc_putbyte(inbyte & 0xff, bcs);
725 inputstate |= INS_have_data;
726 inbyte >>= 8;
727 /* inbits unchanged */
728 } else if (likely(seqlen == 6 && inbits == 7 - lead1 &&
729 trail1 + 1 == inbits &&
730 !(inputstate & INS_have_data))) {
731 /* streamlined case: flag idle - state unchanged */
732 } else if (unlikely(seqlen > 6)) {
733 /* abort sequence */
734 ubc->aborts++;
735 hdlc_flush(bcs);
736 inputstate |= INS_flag_hunt;
737 } else if (seqlen == 6) {
738 /* closing flag, including (6 - lead1) '1's and one '0' from inbits */
739 if (inbits > 7 - lead1) {
740 hdlc_frag(bcs, inbits + lead1 - 7);
741 inputstate &= ~INS_have_data;
742 } else {
743 if (inbits < 7 - lead1)
744 ubc->stolen0s ++;
745 if (inputstate & INS_have_data) {
746 hdlc_done(bcs);
747 inputstate &= ~INS_have_data;
748 }
749 }
750
751 if (c == PPP_FLAG) {
752 /* complete flag, LSB overlaps preceding flag */
753 ubc->shared0s ++;
754 inbits = 0;
755 inbyte = 0;
756 } else if (trail1 != 7) {
757 /* remaining bits */
758 inbyte = c >> (lead1 + 1);
759 inbits = 7 - lead1;
760 if (trail1 >= 8) {
761 /* interior stuffing: omitting the MSB handles most cases */
762 inbits--;
763 /* correct the incorrectly handled cases individually */
764 switch (c) {
765 case 0xbe:
766 inbyte = 0x3f;
767 break;
768 }
769 }
770 } else {
771 /* abort sequence follows, skb already empty anyway */
772 ubc->aborts++;
773 inputstate |= INS_flag_hunt;
774 }
775 } else { /* (seqlen < 6) && (seqlen == 5 || trail1 >= 7) */
776
777 if (c == PPP_FLAG) {
778 /* complete flag */
779 if (seqlen == 5)
780 ubc->stolen0s++;
781 if (inbits) {
782 hdlc_frag(bcs, inbits);
783 inbits = 0;
784 inbyte = 0;
785 } else if (inputstate & INS_have_data)
786 hdlc_done(bcs);
787 inputstate &= ~INS_have_data;
788 } else if (trail1 == 7) {
789 /* abort sequence */
790 ubc->aborts++;
791 hdlc_flush(bcs);
792 inputstate |= INS_flag_hunt;
793 } else {
794 /* stuffed data */
795 if (trail1 < 7) { /* => seqlen == 5 */
796 /* stuff bit at position lead1, no interior stuffing */
797 unsigned char mask = (1 << lead1) - 1;
798 c = (c & mask) | ((c & ~mask) >> 1);
799 inbyte |= c << inbits;
800 inbits += 7;
801 } else if (seqlen < 5) { /* trail1 >= 8 */
802 /* interior stuffing: omitting the MSB handles most cases */
803 /* correct the incorrectly handled cases individually */
804 switch (c) {
805 case 0xbe:
806 c = 0x7e;
807 break;
808 }
809 inbyte |= c << inbits;
810 inbits += 7;
811 } else { /* seqlen == 5 && trail1 >= 8 */
812
813 /* stuff bit at lead1 *and* interior stuffing */
814 switch (c) { /* unstuff individually */
815 case 0x7d:
816 c = 0x3f;
817 break;
818 case 0xbe:
819 c = 0x3f;
820 break;
821 case 0x3e:
822 c = 0x1f;
823 break;
824 case 0x7c:
825 c = 0x3e;
826 break;
827 }
828 inbyte |= c << inbits;
829 inbits += 6;
830 }
831 if (inbits >= 8) {
832 inbits -= 8;
833 hdlc_putbyte(inbyte & 0xff, bcs);
834 inputstate |= INS_have_data;
835 inbyte >>= 8;
836 }
837 }
838 }
839 seqlen = trail1 & 7;
840 }
841
842 /* save new state */
843 bcs->inputstate = inputstate;
844 ubc->seqlen = seqlen;
845 ubc->inbyte = inbyte;
846 ubc->inbits = inbits;
847}
848
849/* trans_receive
850 * pass on received USB frame transparently as SKB via gigaset_rcv_skb
851 * invert bytes
852 * tally frames, errors etc. in BC structure counters
853 * parameters:
854 * src received data
855 * count number of received bytes
856 * bcs receiving B channel structure
857 */
858static inline void trans_receive(unsigned char *src, unsigned count,
859 struct bc_state *bcs)
860{
861 struct sk_buff *skb;
862 int dobytes;
863 unsigned char *dst;
864
865 if (unlikely(bcs->ignore)) {
866 bcs->ignore--;
867 hdlc_flush(bcs);
868 return;
869 }
870 if (unlikely((skb = bcs->skb) == NULL)) {
871 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN);
872 if (!skb) {
873 err("could not allocate skb");
874 return;
875 }
876 skb_reserve(skb, HW_HDR_LEN);
877 }
878 bcs->hw.bas->goodbytes += skb->len;
879 dobytes = TRANSBUFSIZE - skb->len;
880 while (count > 0) {
881 dst = skb_put(skb, count < dobytes ? count : dobytes);
882 while (count > 0 && dobytes > 0) {
883 *dst++ = gigaset_invtab[*src++];
884 count--;
885 dobytes--;
886 }
887 if (dobytes == 0) {
888 gigaset_rcv_skb(skb, bcs->cs, bcs);
889 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN);
890 if (!skb) {
891 err("could not allocate skb");
892 return;
893 }
894 skb_reserve(bcs->skb, HW_HDR_LEN);
895 dobytes = TRANSBUFSIZE;
896 }
897 }
898}
899
900void gigaset_isoc_receive(unsigned char *src, unsigned count, struct bc_state *bcs)
901{
902 switch (bcs->proto2) {
903 case ISDN_PROTO_L2_HDLC:
904 hdlc_unpack(src, count, bcs);
905 break;
906 default: /* assume transparent */
907 trans_receive(src, count, bcs);
908 }
909}
910
911/* == data input =========================================================== */
912
913static void cmd_loop(unsigned char *src, int numbytes, struct inbuf_t *inbuf)
914{
915 struct cardstate *cs = inbuf->cs;
916 unsigned cbytes = cs->cbytes;
917
918 while (numbytes--) {
919 /* copy next character, check for end of line */
920 switch (cs->respdata[cbytes] = *src++) {
921 case '\r':
922 case '\n':
923 /* end of line */
924 dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)",
925 __func__, cbytes);
926 cs->cbytes = cbytes;
927 gigaset_handle_modem_response(cs);
928 cbytes = 0;
929 break;
930 default:
931 /* advance in line buffer, checking for overflow */
932 if (cbytes < MAX_RESP_SIZE - 1)
933 cbytes++;
934 else
935 warn("response too large");
936 }
937 }
938
939 /* save state */
940 cs->cbytes = cbytes;
941}
942
943
944/* process a block of data received through the control channel
945 */
946void gigaset_isoc_input(struct inbuf_t *inbuf)
947{
948 struct cardstate *cs = inbuf->cs;
949 unsigned tail, head, numbytes;
950 unsigned char *src;
951
952 head = atomic_read(&inbuf->head);
953 while (head != (tail = atomic_read(&inbuf->tail))) {
954 dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
955 if (head > tail)
956 tail = RBUFSIZE;
957 src = inbuf->data + head;
958 numbytes = tail - head;
959 dbg(DEBUG_INTR, "processing %u bytes", numbytes);
960
961 if (atomic_read(&cs->mstate) == MS_LOCKED) {
962 gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response",
963 numbytes, src, 0);
964 gigaset_if_receive(inbuf->cs, src, numbytes);
965 } else {
966 gigaset_dbg_buffer(DEBUG_CMD, "received response",
967 numbytes, src, 0);
968 cmd_loop(src, numbytes, inbuf);
969 }
970
971 head += numbytes;
972 if (head == RBUFSIZE)
973 head = 0;
974 dbg(DEBUG_INTR, "setting head to %u", head);
975 atomic_set(&inbuf->head, head);
976 }
977}
978
979
980/* == data output ========================================================== */
981
982/* gigaset_send_skb
983 * called by common.c to queue an skb for sending
984 * and start transmission if necessary
985 * parameters:
986 * B Channel control structure
987 * skb
988 * return value:
989 * number of bytes accepted for sending
990 * (skb->len if ok, 0 if out of buffer space)
991 * or error code (< 0, eg. -EINVAL)
992 */
993int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb)
994{
995 int len;
996
997 IFNULLRETVAL(bcs, -EFAULT);
998 IFNULLRETVAL(skb, -EFAULT);
999 len = skb->len;
1000
1001 skb_queue_tail(&bcs->squeue, skb);
1002 dbg(DEBUG_ISO,
1003 "%s: skb queued, qlen=%d", __func__, skb_queue_len(&bcs->squeue));
1004
1005 /* tasklet submits URB if necessary */
1006 tasklet_schedule(&bcs->hw.bas->sent_tasklet);
1007
1008 return len; /* ok so far */
1009}
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c
new file mode 100644
index 000000000000..c6915fa2be6c
--- /dev/null
+++ b/drivers/isdn/gigaset/proc.c
@@ -0,0 +1,81 @@
1/*
2 * Stuff used by all variants of the driver
3 *
4 * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>,
5 * Hansjoerg Lipp <hjlipp@web.de>,
6 * Tilman Schmidt <tilman@imap.cc>.
7 *
8 * =====================================================================
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 * =====================================================================
14 * ToDo: ...
15 * =====================================================================
16 * Version: $Id: proc.c,v 1.5.2.13 2006/02/04 18:28:16 hjlipp Exp $
17 * =====================================================================
18 */
19
20#include "gigaset.h"
21#include <linux/ctype.h>
22
23static ssize_t show_cidmode(struct device *dev, struct device_attribute *attr, char *buf)
24{
25 struct usb_interface *intf = to_usb_interface(dev);
26 struct cardstate *cs = usb_get_intfdata(intf);
27 return sprintf(buf, "%d\n", atomic_read(&cs->cidmode)); // FIXME use scnprintf for 13607 bit architectures (if PAGE_SIZE==4096)
28}
29
30static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
31{
32 struct usb_interface *intf = to_usb_interface(dev);
33 struct cardstate *cs = usb_get_intfdata(intf);
34 long int value;
35 char *end;
36
37 value = simple_strtol(buf, &end, 0);
38 while (*end)
39 if (!isspace(*end++))
40 return -EINVAL;
41 if (value < 0 || value > 1)
42 return -EINVAL;
43
44 if (down_interruptible(&cs->sem))
45 return -ERESTARTSYS; // FIXME -EINTR?
46
47 cs->waiting = 1;
48 if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE,
49 NULL, value, NULL)) {
50 cs->waiting = 0;
51 up(&cs->sem);
52 return -ENOMEM;
53 }
54
55 dbg(DEBUG_CMD, "scheduling PROC_CIDMODE");
56 gigaset_schedule_event(cs);
57
58 wait_event(cs->waitqueue, !cs->waiting);
59
60 up(&cs->sem);
61
62 return count;
63}
64
65static DEVICE_ATTR(cidmode, S_IRUGO|S_IWUSR, show_cidmode, set_cidmode);
66
67/* free sysfs for device */
68void gigaset_free_dev_sysfs(struct usb_interface *interface)
69{
70 dbg(DEBUG_INIT, "removing sysfs entries");
71 device_remove_file(&interface->dev, &dev_attr_cidmode);
72}
73EXPORT_SYMBOL_GPL(gigaset_free_dev_sysfs);
74
75/* initialize sysfs for device */
76void gigaset_init_dev_sysfs(struct usb_interface *interface)
77{
78 dbg(DEBUG_INIT, "setting up sysfs");
79 device_create_file(&interface->dev, &dev_attr_cidmode);
80}
81EXPORT_SYMBOL_GPL(gigaset_init_dev_sysfs);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
new file mode 100644
index 000000000000..323fc7349dec
--- /dev/null
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -0,0 +1,1008 @@
1/*
2 * USB driver for Gigaset 307x directly or using M105 Data.
3 *
4 * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>
5 * and Hansjoerg Lipp <hjlipp@web.de>.
6 *
7 * This driver was derived from the USB skeleton driver by
8 * Greg Kroah-Hartman <greg@kroah.com>
9 *
10 * =====================================================================
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2 of
14 * the License, or (at your option) any later version.
15 * =====================================================================
16 * ToDo: ...
17 * =====================================================================
18 * Version: $Id: usb-gigaset.c,v 1.85.4.18 2006/02/04 18:28:16 hjlipp Exp $
19 * =====================================================================
20 */
21
22#include "gigaset.h"
23
24#include <linux/errno.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/usb.h>
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30
31/* Version Information */
32#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers <Eilers.Stefan@epost.de>"
33#define DRIVER_DESC "USB Driver for Gigaset 307x using M105"
34
35/* Module parameters */
36
37static int startmode = SM_ISDN;
38static int cidmode = 1;
39
40module_param(startmode, int, S_IRUGO);
41module_param(cidmode, int, S_IRUGO);
42MODULE_PARM_DESC(startmode, "start in isdn4linux mode");
43MODULE_PARM_DESC(cidmode, "Call-ID mode");
44
45#define GIGASET_MINORS 1
46#define GIGASET_MINOR 8
47#define GIGASET_MODULENAME "usb_gigaset"
48#define GIGASET_DEVFSNAME "gig/usb/"
49#define GIGASET_DEVNAME "ttyGU"
50
51#define IF_WRITEBUF 2000 //FIXME // WAKEUP_CHARS: 256
52
53/* Values for the Gigaset M105 Data */
54#define USB_M105_VENDOR_ID 0x0681
55#define USB_M105_PRODUCT_ID 0x0009
56
57/* table of devices that work with this driver */
58static struct usb_device_id gigaset_table [] = {
59 { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) },
60 { } /* Terminating entry */
61};
62
63MODULE_DEVICE_TABLE(usb, gigaset_table);
64
65/* Get a minor range for your devices from the usb maintainer */
66#define USB_SKEL_MINOR_BASE 200
67
68
69/*
70 * Control requests (empty fields: 00)
71 *
72 * RT|RQ|VALUE|INDEX|LEN |DATA
73 * In:
74 * C1 08 01
75 * Get flags (1 byte). Bits: 0=dtr,1=rts,3-7:?
76 * C1 0F ll ll
77 * Get device information/status (llll: 0x200 and 0x40 seen).
78 * Real size: I only saw MIN(llll,0x64).
79 * Contents: seems to be always the same...
80 * offset 0x00: Length of this structure (0x64) (len: 1,2,3 bytes)
81 * offset 0x3c: String (16 bit chars): "MCCI USB Serial V2.0"
82 * rest: ?
83 * Out:
84 * 41 11
85 * Initialize/reset device ?
86 * 41 00 xx 00
87 * ? (xx=00 or 01; 01 on start, 00 on close)
88 * 41 07 vv mm
89 * Set/clear flags vv=value, mm=mask (see RQ 08)
90 * 41 12 xx
91 * Used before the following configuration requests are issued
92 * (with xx=0x0f). I've seen other values<0xf, though.
93 * 41 01 xx xx
94 * Set baud rate. xxxx=ceil(0x384000/rate)=trunc(0x383fff/rate)+1.
95 * 41 03 ps bb
96 * Set byte size and parity. p: 0x20=even,0x10=odd,0x00=no parity
97 * [ 0x30: m, 0x40: s ]
98 * [s: 0: 1 stop bit; 1: 1.5; 2: 2]
99 * bb: bits/byte (seen 7 and 8)
100 * 41 13 -- -- -- -- 10 00 ww 00 00 00 xx 00 00 00 yy 00 00 00 zz 00 00 00
101 * ??
102 * Initialization: 01, 40, 00, 00
103 * Open device: 00 40, 00, 00
104 * yy and zz seem to be equal, either 0x00 or 0x0a
105 * (ww,xx) pairs seen: (00,00), (00,40), (01,40), (09,80), (19,80)
106 * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13
107 * Used after every "configuration sequence" (RQ 12, RQs 01/03/13).
108 * xx is usually 0x00 but was 0x7e before starting data transfer
109 * in unimodem mode. So, this might be an array of characters that need
110 * special treatment ("commit all bufferd data"?), 11=^Q, 13=^S.
111 *
112 * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two
113 * flags per packet.
114 */
115
116static int gigaset_probe(struct usb_interface *interface,
117 const struct usb_device_id *id);
118static void gigaset_disconnect(struct usb_interface *interface);
119
120static struct gigaset_driver *driver = NULL;
121static struct cardstate *cardstate = NULL;
122
123/* usb specific object needed to register this driver with the usb subsystem */
124static struct usb_driver gigaset_usb_driver = {
125 .name = GIGASET_MODULENAME,
126 .probe = gigaset_probe,
127 .disconnect = gigaset_disconnect,
128 .id_table = gigaset_table,
129};
130
131struct usb_cardstate {
132 struct usb_device *udev; /* save off the usb device pointer */
133 struct usb_interface *interface; /* the interface for this device */
134 atomic_t busy; /* bulk output in progress */
135
136 /* Output buffer for commands (M105: and data)*/
137 unsigned char *bulk_out_buffer; /* the buffer to send data */
138 int bulk_out_size; /* the size of the send buffer */
139 __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
140 struct urb *bulk_out_urb; /* the urb used to transmit data */
141
142 /* Input buffer for command responses (M105: and data)*/
143 int rcvbuf_size; /* the size of the receive buffer */
144 struct urb *read_urb; /* the urb used to receive data */
145 __u8 int_in_endpointAddr; /* the address of the bulk in endpoint */
146
147 char bchars[6]; /* req. 0x19 */
148};
149
150struct usb_bc_state {};
151
152static inline unsigned tiocm_to_gigaset(unsigned state)
153{
154 return ((state & TIOCM_DTR) ? 1 : 0) | ((state & TIOCM_RTS) ? 2 : 0);
155}
156
157#ifdef CONFIG_GIGASET_UNDOCREQ
158/* WARNING: EXPERIMENTAL! */
159static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
160 unsigned new_state)
161{
162 unsigned mask, val;
163 int r;
164
165 mask = tiocm_to_gigaset(old_state ^ new_state);
166 val = tiocm_to_gigaset(new_state);
167
168 dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask);
169 r = usb_control_msg(cs->hw.usb->udev,
170 usb_sndctrlpipe(cs->hw.usb->udev, 0), 7, 0x41,
171 (val & 0xff) | ((mask & 0xff) << 8), 0,
172 NULL, 0, 2000 /*timeout??*/); // don't use this in an interrupt/BH
173 if (r < 0)
174 return r;
175 //..
176 return 0;
177}
178
179static int set_value(struct cardstate *cs, u8 req, u16 val)
180{
181 int r, r2;
182
183 dbg(DEBUG_USBREQ, "request %02x (%04x)", (unsigned)req, (unsigned)val);
184 r = usb_control_msg(cs->hw.usb->udev,
185 usb_sndctrlpipe(cs->hw.usb->udev, 0), 0x12, 0x41,
186 0xf /*?*/, 0,
187 NULL, 0, 2000 /*?*/); /* no idea, what this does */
188 if (r < 0) {
189 err("error %d on request 0x12", -r);
190 return r;
191 }
192
193 r = usb_control_msg(cs->hw.usb->udev,
194 usb_sndctrlpipe(cs->hw.usb->udev, 0), req, 0x41,
195 val, 0,
196 NULL, 0, 2000 /*?*/);
197 if (r < 0)
198 err("error %d on request 0x%02x", -r, (unsigned)req);
199
200 r2 = usb_control_msg(cs->hw.usb->udev,
201 usb_sndctrlpipe(cs->hw.usb->udev, 0), 0x19, 0x41,
202 0, 0, cs->hw.usb->bchars, 6, 2000 /*?*/);
203 if (r2 < 0)
204 err("error %d on request 0x19", -r2);
205
206 return r < 0 ? r : (r2 < 0 ? r2 : 0);
207}
208
209/* WARNING: HIGHLY EXPERIMENTAL! */
210// don't use this in an interrupt/BH
211static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
212{
213 u16 val;
214 u32 rate;
215
216 cflag &= CBAUD;
217
218 switch (cflag) {
219 //FIXME more values?
220 case B300: rate = 300; break;
221 case B600: rate = 600; break;
222 case B1200: rate = 1200; break;
223 case B2400: rate = 2400; break;
224 case B4800: rate = 4800; break;
225 case B9600: rate = 9600; break;
226 case B19200: rate = 19200; break;
227 case B38400: rate = 38400; break;
228 case B57600: rate = 57600; break;
229 case B115200: rate = 115200; break;
230 default:
231 rate = 9600;
232 err("unsupported baudrate request 0x%x,"
233 " using default of B9600", cflag);
234 }
235
236 val = 0x383fff / rate + 1;
237
238 return set_value(cs, 1, val);
239}
240
241/* WARNING: HIGHLY EXPERIMENTAL! */
242// don't use this in an interrupt/BH
243static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
244{
245 u16 val = 0;
246
247 /* set the parity */
248 if (cflag & PARENB)
249 val |= (cflag & PARODD) ? 0x10 : 0x20;
250
251 /* set the number of data bits */
252 switch (cflag & CSIZE) {
253 case CS5:
254 val |= 5 << 8; break;
255 case CS6:
256 val |= 6 << 8; break;
257 case CS7:
258 val |= 7 << 8; break;
259 case CS8:
260 val |= 8 << 8; break;
261 default:
262 err("CSIZE was not CS5-CS8, using default of 8");
263 val |= 8 << 8;
264 break;
265 }
266
267 /* set the number of stop bits */
268 if (cflag & CSTOPB) {
269 if ((cflag & CSIZE) == CS5)
270 val |= 1; /* 1.5 stop bits */ //FIXME is this okay?
271 else
272 val |= 2; /* 2 stop bits */
273 }
274
275 return set_value(cs, 3, val);
276}
277
278#else
279static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
280 unsigned new_state)
281{
282 return -EINVAL;
283}
284
285static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
286{
287 return -EINVAL;
288}
289
290static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
291{
292 return -EINVAL;
293}
294#endif
295
296
297 /*================================================================================================================*/
298static int gigaset_init_bchannel(struct bc_state *bcs)
299{
300 /* nothing to do for M10x */
301 gigaset_bchannel_up(bcs);
302 return 0;
303}
304
305static int gigaset_close_bchannel(struct bc_state *bcs)
306{
307 /* nothing to do for M10x */
308 gigaset_bchannel_down(bcs);
309 return 0;
310}
311
312//void send_ack_to_LL(void *data);
313static int write_modem(struct cardstate *cs);
314static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb);
315
316
317/* Handling of send queue. If there is already a skb opened, put data to
318 * the transfer buffer by calling "write_modem". Otherwise take a new skb out of the queue.
319 * This function will be called by the ISR via "transmit_chars" (USB: B-Channel Bulk callback handler
320 * via immediate task queue) or by writebuf_from_LL if the LL wants to transmit data.
321 */
322static void gigaset_modem_fill(unsigned long data)
323{
324 struct cardstate *cs = (struct cardstate *) data;
325 struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
326 struct cmdbuf_t *cb;
327 unsigned long flags;
328 int again;
329
330 dbg(DEBUG_OUTPUT, "modem_fill");
331
332 if (atomic_read(&cs->hw.usb->busy)) {
333 dbg(DEBUG_OUTPUT, "modem_fill: busy");
334 return;
335 }
336
337 do {
338 again = 0;
339 if (!bcs->tx_skb) { /* no skb is being sent */
340 spin_lock_irqsave(&cs->cmdlock, flags);
341 cb = cs->cmdbuf;
342 spin_unlock_irqrestore(&cs->cmdlock, flags);
343 if (cb) { /* commands to send? */
344 dbg(DEBUG_OUTPUT, "modem_fill: cb");
345 if (send_cb(cs, cb) < 0) {
346 dbg(DEBUG_OUTPUT,
347 "modem_fill: send_cb failed");
348 again = 1; /* no callback will be called! */
349 }
350 } else { /* skbs to send? */
351 bcs->tx_skb = skb_dequeue(&bcs->squeue);
352 if (bcs->tx_skb)
353 dbg(DEBUG_INTR,
354 "Dequeued skb (Adr: %lx)!",
355 (unsigned long) bcs->tx_skb);
356 }
357 }
358
359 if (bcs->tx_skb) {
360 dbg(DEBUG_OUTPUT, "modem_fill: tx_skb");
361 if (write_modem(cs) < 0) {
362 dbg(DEBUG_OUTPUT,
363 "modem_fill: write_modem failed");
364 // FIXME should we tell the LL?
365 again = 1; /* no callback will be called! */
366 }
367 }
368 } while (again);
369}
370
371/**
372 * gigaset_read_int_callback
373 *
374 * It is called if the data was received from the device. This is almost similiar to
375 * the interrupt service routine in the serial device.
376 */
377static void gigaset_read_int_callback(struct urb *urb, struct pt_regs *regs)
378{
379 int resubmit = 0;
380 int r;
381 struct cardstate *cs;
382 unsigned numbytes;
383 unsigned char *src;
384 //unsigned long flags;
385 struct inbuf_t *inbuf;
386
387 IFNULLRET(urb);
388 inbuf = (struct inbuf_t *) urb->context;
389 IFNULLRET(inbuf);
390 //spin_lock_irqsave(&inbuf->lock, flags);
391 cs = inbuf->cs;
392 IFNULLGOTO(cs, exit);
393 IFNULLGOTO(cardstate, exit);
394
395 if (!atomic_read(&cs->connected)) {
396 err("%s: disconnected", __func__);
397 goto exit;
398 }
399
400 if (!urb->status) {
401 numbytes = urb->actual_length;
402
403 if (numbytes) {
404 src = inbuf->rcvbuf;
405 if (unlikely(*src))
406 warn("%s: There was no leading 0, but 0x%02x!",
407 __func__, (unsigned) *src);
408 ++src; /* skip leading 0x00 */
409 --numbytes;
410 if (gigaset_fill_inbuf(inbuf, src, numbytes)) {
411 dbg(DEBUG_INTR, "%s-->BH", __func__);
412 gigaset_schedule_event(inbuf->cs);
413 }
414 } else
415 dbg(DEBUG_INTR, "Received zero block length");
416 resubmit = 1;
417 } else {
418 /* The urb might have been killed. */
419 dbg(DEBUG_ANY, "%s - nonzero read bulk status received: %d",
420 __func__, urb->status);
421 if (urb->status != -ENOENT) /* not killed */
422 resubmit = 1;
423 }
424exit:
425 //spin_unlock_irqrestore(&inbuf->lock, flags);
426 if (resubmit) {
427 r = usb_submit_urb(urb, SLAB_ATOMIC);
428 if (r)
429 err("error %d when resubmitting urb.", -r);
430 }
431}
432
433
434/* This callback routine is called when data was transmitted to a B-Channel.
435 * Therefore it has to check if there is still data to transmit. This
436 * happens by calling modem_fill via task queue.
437 *
438 */
439static void gigaset_write_bulk_callback(struct urb *urb, struct pt_regs *regs)
440{
441 struct cardstate *cs = (struct cardstate *) urb->context;
442
443 IFNULLRET(cs);
444#ifdef CONFIG_GIGASET_DEBUG
445 if (!atomic_read(&cs->connected)) {
446 err("%s:not connected", __func__);
447 return;
448 }
449#endif
450 if (urb->status)
451 err("bulk transfer failed (status %d)", -urb->status); /* That's all we can do. Communication problems
452 are handeled by timeouts or network protocols */
453
454 atomic_set(&cs->hw.usb->busy, 0);
455 tasklet_schedule(&cs->write_tasklet);
456}
457
458static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
459{
460 struct cmdbuf_t *tcb;
461 unsigned long flags;
462 int count;
463 int status = -ENOENT; // FIXME
464 struct usb_cardstate *ucs = cs->hw.usb;
465
466 do {
467 if (!cb->len) {
468 tcb = cb;
469
470 spin_lock_irqsave(&cs->cmdlock, flags);
471 cs->cmdbytes -= cs->curlen;
472 dbg(DEBUG_OUTPUT, "send_cb: sent %u bytes, %u left",
473 cs->curlen, cs->cmdbytes);
474 cs->cmdbuf = cb = cb->next;
475 if (cb) {
476 cb->prev = NULL;
477 cs->curlen = cb->len;
478 } else {
479 cs->lastcmdbuf = NULL;
480 cs->curlen = 0;
481 }
482 spin_unlock_irqrestore(&cs->cmdlock, flags);
483
484 if (tcb->wake_tasklet)
485 tasklet_schedule(tcb->wake_tasklet);
486 kfree(tcb);
487 }
488 if (cb) {
489 count = min(cb->len, ucs->bulk_out_size);
490 usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
491 usb_sndbulkpipe(ucs->udev,
492 ucs->bulk_out_endpointAddr & 0x0f),
493 cb->buf + cb->offset, count,
494 gigaset_write_bulk_callback, cs);
495
496 cb->offset += count;
497 cb->len -= count;
498 atomic_set(&ucs->busy, 1);
499 dbg(DEBUG_OUTPUT, "send_cb: send %d bytes", count);
500
501 status = usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC);
502 if (status) {
503 atomic_set(&ucs->busy, 0);
504 err("could not submit urb (error %d).",
505 -status);
506 cb->len = 0; /* skip urb => remove cb+wakeup in next loop cycle */
507 }
508 }
509 } while (cb && status); /* bei Fehler naechster Befehl //FIXME: ist das OK? */
510
511 return status;
512}
513
514/* Write string into transbuf and send it to modem.
515 */
516static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
517 int len, struct tasklet_struct *wake_tasklet)
518{
519 struct cmdbuf_t *cb;
520 unsigned long flags;
521
522 gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ?
523 DEBUG_TRANSCMD : DEBUG_LOCKCMD,
524 "CMD Transmit", len, buf, 0);
525
526 if (!atomic_read(&cs->connected)) {
527 err("%s: not connected", __func__);
528 return -ENODEV;
529 }
530
531 if (len <= 0)
532 return 0;
533
534 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
535 err("%s: out of memory", __func__);
536 return -ENOMEM;
537 }
538
539 memcpy(cb->buf, buf, len);
540 cb->len = len;
541 cb->offset = 0;
542 cb->next = NULL;
543 cb->wake_tasklet = wake_tasklet;
544
545 spin_lock_irqsave(&cs->cmdlock, flags);
546 cb->prev = cs->lastcmdbuf;
547 if (cs->lastcmdbuf)
548 cs->lastcmdbuf->next = cb;
549 else {
550 cs->cmdbuf = cb;
551 cs->curlen = len;
552 }
553 cs->cmdbytes += len;
554 cs->lastcmdbuf = cb;
555 spin_unlock_irqrestore(&cs->cmdlock, flags);
556
557 tasklet_schedule(&cs->write_tasklet);
558 return len;
559}
560
561static int gigaset_write_room(struct cardstate *cs)
562{
563 unsigned long flags;
564 unsigned bytes;
565
566 spin_lock_irqsave(&cs->cmdlock, flags);
567 bytes = cs->cmdbytes;
568 spin_unlock_irqrestore(&cs->cmdlock, flags);
569
570 return bytes < IF_WRITEBUF ? IF_WRITEBUF - bytes : 0;
571}
572
573static int gigaset_chars_in_buffer(struct cardstate *cs)
574{
575 return cs->cmdbytes;
576}
577
578static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
579{
580#ifdef CONFIG_GIGASET_UNDOCREQ
581 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf, 0);
582 memcpy(cs->hw.usb->bchars, buf, 6);
583 return usb_control_msg(cs->hw.usb->udev,
584 usb_sndctrlpipe(cs->hw.usb->udev, 0), 0x19, 0x41,
585 0, 0, &buf, 6, 2000);
586#else
587 return -EINVAL;
588#endif
589}
590
591static int gigaset_freebcshw(struct bc_state *bcs)
592{
593 if (!bcs->hw.usb)
594 return 0;
595 //FIXME
596 kfree(bcs->hw.usb);
597 return 1;
598}
599
600/* Initialize the b-channel structure */
601static int gigaset_initbcshw(struct bc_state *bcs)
602{
603 bcs->hw.usb = kmalloc(sizeof(struct usb_bc_state), GFP_KERNEL);
604 if (!bcs->hw.usb)
605 return 0;
606
607 //bcs->hw.usb->trans_flg = READY_TO_TRNSMIT; /* B-Channel ready to transmit */
608 return 1;
609}
610
611static void gigaset_reinitbcshw(struct bc_state *bcs)
612{
613}
614
615static void gigaset_freecshw(struct cardstate *cs)
616{
617 //FIXME
618 tasklet_kill(&cs->write_tasklet);
619 kfree(cs->hw.usb);
620}
621
622static int gigaset_initcshw(struct cardstate *cs)
623{
624 struct usb_cardstate *ucs;
625
626 cs->hw.usb = ucs =
627 kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
628 if (!ucs)
629 return 0;
630
631 ucs->bchars[0] = 0;
632 ucs->bchars[1] = 0;
633 ucs->bchars[2] = 0;
634 ucs->bchars[3] = 0;
635 ucs->bchars[4] = 0x11;
636 ucs->bchars[5] = 0x13;
637 ucs->bulk_out_buffer = NULL;
638 ucs->bulk_out_urb = NULL;
639 //ucs->urb_cmd_out = NULL;
640 ucs->read_urb = NULL;
641 tasklet_init(&cs->write_tasklet,
642 &gigaset_modem_fill, (unsigned long) cs);
643
644 return 1;
645}
646
647/* Writes the data of the current open skb into the modem.
648 * We have to protect against multiple calls until the
649 * callback handler () is called , due to the fact that we
650 * are just allowed to send data once to an endpoint. Therefore
651 * we using "trans_flg" to synchonize ...
652 */
653static int write_modem(struct cardstate *cs)
654{
655 int ret;
656 int count;
657 struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
658 struct usb_cardstate *ucs = cs->hw.usb;
659 //unsigned long flags;
660
661 IFNULLRETVAL(bcs->tx_skb, -EINVAL);
662
663 dbg(DEBUG_WRITE, "len: %d...", bcs->tx_skb->len);
664
665 ret = -ENODEV;
666 IFNULLGOTO(ucs->bulk_out_buffer, error);
667 IFNULLGOTO(ucs->bulk_out_urb, error);
668 ret = 0;
669
670 if (!bcs->tx_skb->len) {
671 dev_kfree_skb_any(bcs->tx_skb);
672 bcs->tx_skb = NULL;
673 return -EINVAL;
674 }
675
676 /* Copy data to bulk out buffer and // FIXME copying not necessary
677 * transmit data
678 */
679 count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
680 memcpy(ucs->bulk_out_buffer, bcs->tx_skb->data, count);
681 skb_pull(bcs->tx_skb, count);
682
683 usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
684 usb_sndbulkpipe(ucs->udev,
685 ucs->bulk_out_endpointAddr & 0x0f),
686 ucs->bulk_out_buffer, count,
687 gigaset_write_bulk_callback, cs);
688 atomic_set(&ucs->busy, 1);
689 dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count);
690
691 ret = usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC);
692 if (ret) {
693 err("could not submit urb (error %d).", -ret);
694 atomic_set(&ucs->busy, 0);
695 }
696 if (!bcs->tx_skb->len) {
697 /* skb sent completely */
698 gigaset_skb_sent(bcs, bcs->tx_skb); //FIXME also, when ret<0?
699
700 dbg(DEBUG_INTR,
701 "kfree skb (Adr: %lx)!", (unsigned long) bcs->tx_skb);
702 dev_kfree_skb_any(bcs->tx_skb);
703 bcs->tx_skb = NULL;
704 }
705
706 return ret;
707error:
708 dev_kfree_skb_any(bcs->tx_skb);
709 bcs->tx_skb = NULL;
710 return ret;
711
712}
713
714static int gigaset_probe(struct usb_interface *interface,
715 const struct usb_device_id *id)
716{
717 int retval;
718 struct usb_device *udev = interface_to_usbdev(interface);
719 unsigned int ifnum;
720 struct usb_host_interface *hostif;
721 struct cardstate *cs = NULL;
722 struct usb_cardstate *ucs = NULL;
723 //struct usb_interface_descriptor *iface_desc;
724 struct usb_endpoint_descriptor *endpoint;
725 //isdn_ctrl command;
726 int buffer_size;
727 int alt;
728 //unsigned long flags;
729
730 info("%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)",
731 __func__, le16_to_cpu(udev->descriptor.idVendor),
732 le16_to_cpu(udev->descriptor.idProduct));
733
734 retval = -ENODEV; //FIXME
735
736 /* See if the device offered us matches what we can accept */
737 if ((le16_to_cpu(udev->descriptor.idVendor != USB_M105_VENDOR_ID)) ||
738 (le16_to_cpu(udev->descriptor.idProduct != USB_M105_PRODUCT_ID)))
739 return -ENODEV;
740
741 /* this starts to become ascii art... */
742 hostif = interface->cur_altsetting;
743 alt = hostif->desc.bAlternateSetting;
744 ifnum = hostif->desc.bInterfaceNumber; // FIXME ?
745
746 if (alt != 0 || ifnum != 0) {
747 warn("ifnum %d, alt %d", ifnum, alt);
748 return -ENODEV;
749 }
750
751 /* Reject application specific intefaces
752 *
753 */
754 if (hostif->desc.bInterfaceClass != 255) {
755 info("%s: Device matched, but iface_desc[%d]->bInterfaceClass==%d !",
756 __func__, ifnum, hostif->desc.bInterfaceClass);
757 return -ENODEV;
758 }
759
760 info("%s: Device matched ... !", __func__);
761
762 cs = gigaset_getunassignedcs(driver);
763 if (!cs) {
764 warn("No free cardstate!");
765 return -ENODEV;
766 }
767 ucs = cs->hw.usb;
768
769#if 0
770 if (usb_set_configuration(udev, udev->config[0].desc.bConfigurationValue) < 0) {
771 warn("set_configuration failed");
772 goto error;
773 }
774
775
776 if (usb_set_interface(udev, ifnum/*==0*/, alt/*==0*/) < 0) {
777 warn("usb_set_interface failed, device %d interface %d altsetting %d",
778 udev->devnum, ifnum, alt);
779 goto error;
780 }
781#endif
782
783 /* set up the endpoint information */
784 /* check out the endpoints */
785 /* We will get 2 endpoints: One for sending commands to the device (bulk out) and one to
786 * poll messages from the device(int in).
787 * Therefore we will have an almost similiar situation as with our serial port handler.
788 * If an connection will be established, we will have to create data in/out pipes
789 * dynamically...
790 */
791
792 endpoint = &hostif->endpoint[0].desc;
793
794 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
795 ucs->bulk_out_size = buffer_size;
796 ucs->bulk_out_endpointAddr = endpoint->bEndpointAddress;
797 ucs->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
798 if (!ucs->bulk_out_buffer) {
799 err("Couldn't allocate bulk_out_buffer");
800 retval = -ENOMEM;
801 goto error;
802 }
803
804 ucs->bulk_out_urb = usb_alloc_urb(0, SLAB_KERNEL);
805 if (!ucs->bulk_out_urb) {
806 err("Couldn't allocate bulk_out_buffer");
807 retval = -ENOMEM;
808 goto error;
809 }
810
811 endpoint = &hostif->endpoint[1].desc;
812
813 atomic_set(&ucs->busy, 0);
814 ucs->udev = udev;
815 ucs->interface = interface;
816
817 ucs->read_urb = usb_alloc_urb(0, SLAB_KERNEL);
818 if (!ucs->read_urb) {
819 err("No free urbs available");
820 retval = -ENOMEM;
821 goto error;
822 }
823 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
824 ucs->rcvbuf_size = buffer_size;
825 ucs->int_in_endpointAddr = endpoint->bEndpointAddress;
826 cs->inbuf[0].rcvbuf = kmalloc(buffer_size, GFP_KERNEL);
827 if (!cs->inbuf[0].rcvbuf) {
828 err("Couldn't allocate rcvbuf");
829 retval = -ENOMEM;
830 goto error;
831 }
832 /* Fill the interrupt urb and send it to the core */
833 usb_fill_int_urb(ucs->read_urb, udev,
834 usb_rcvintpipe(udev,
835 endpoint->bEndpointAddress & 0x0f),
836 cs->inbuf[0].rcvbuf, buffer_size,
837 gigaset_read_int_callback,
838 cs->inbuf + 0, endpoint->bInterval);
839
840 retval = usb_submit_urb(ucs->read_urb, SLAB_KERNEL);
841 if (retval) {
842 err("Could not submit URB!");
843 goto error;
844 }
845
846 /* tell common part that the device is ready */
847 if (startmode == SM_LOCKED)
848 atomic_set(&cs->mstate, MS_LOCKED);
849 if (!gigaset_start(cs)) {
850 tasklet_kill(&cs->write_tasklet);
851 retval = -ENODEV; //FIXME
852 goto error;
853 }
854
855 /* save address of controller structure */
856 usb_set_intfdata(interface, cs);
857
858 /* set up device sysfs */
859 gigaset_init_dev_sysfs(interface);
860 return 0;
861
862error:
863 if (ucs->read_urb)
864 usb_kill_urb(ucs->read_urb);
865 kfree(ucs->bulk_out_buffer);
866 if (ucs->bulk_out_urb != NULL)
867 usb_free_urb(ucs->bulk_out_urb);
868 kfree(cs->inbuf[0].rcvbuf);
869 if (ucs->read_urb != NULL)
870 usb_free_urb(ucs->read_urb);
871 ucs->read_urb = ucs->bulk_out_urb = NULL;
872 cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL;
873 gigaset_unassign(cs);
874 return retval;
875}
876
877/**
878 * skel_disconnect
879 */
880static void gigaset_disconnect(struct usb_interface *interface)
881{
882 struct cardstate *cs;
883 struct usb_cardstate *ucs;
884
885 cs = usb_get_intfdata(interface);
886
887 /* clear device sysfs */
888 gigaset_free_dev_sysfs(interface);
889
890 usb_set_intfdata(interface, NULL);
891 ucs = cs->hw.usb;
892 usb_kill_urb(ucs->read_urb);
893 //info("GigaSet USB device #%d will be disconnected", minor);
894
895 gigaset_stop(cs);
896
897 tasklet_kill(&cs->write_tasklet);
898
899 usb_kill_urb(ucs->bulk_out_urb); /* FIXME: nur, wenn noetig */
900 //usb_kill_urb(ucs->urb_cmd_out); /* FIXME: nur, wenn noetig */
901
902 kfree(ucs->bulk_out_buffer);
903 if (ucs->bulk_out_urb != NULL)
904 usb_free_urb(ucs->bulk_out_urb);
905 //if(ucs->urb_cmd_out != NULL)
906 // usb_free_urb(ucs->urb_cmd_out);
907 kfree(cs->inbuf[0].rcvbuf);
908 if (ucs->read_urb != NULL)
909 usb_free_urb(ucs->read_urb);
910 ucs->read_urb = ucs->bulk_out_urb/*=ucs->urb_cmd_out*/=NULL;
911 cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL;
912
913 gigaset_unassign(cs);
914}
915
916static struct gigaset_ops ops = {
917 gigaset_write_cmd,
918 gigaset_write_room,
919 gigaset_chars_in_buffer,
920 gigaset_brkchars,
921 gigaset_init_bchannel,
922 gigaset_close_bchannel,
923 gigaset_initbcshw,
924 gigaset_freebcshw,
925 gigaset_reinitbcshw,
926 gigaset_initcshw,
927 gigaset_freecshw,
928 gigaset_set_modem_ctrl,
929 gigaset_baud_rate,
930 gigaset_set_line_ctrl,
931 gigaset_m10x_send_skb,
932 gigaset_m10x_input,
933};
934
935/**
936 * usb_gigaset_init
937 * This function is called while kernel-module is loaded
938 */
939static int __init usb_gigaset_init(void)
940{
941 int result;
942
943 /* allocate memory for our driver state and intialize it */
944 if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
945 GIGASET_MODULENAME, GIGASET_DEVNAME,
946 GIGASET_DEVFSNAME, &ops,
947 THIS_MODULE)) == NULL)
948 goto error;
949
950 /* allocate memory for our device state and intialize it */
951 cardstate = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
952 if (!cardstate)
953 goto error;
954
955 /* register this driver with the USB subsystem */
956 result = usb_register(&gigaset_usb_driver);
957 if (result < 0) {
958 err("usb_gigaset: usb_register failed (error %d)",
959 -result);
960 goto error;
961 }
962
963 info(DRIVER_AUTHOR);
964 info(DRIVER_DESC);
965 return 0;
966
967error: if (cardstate)
968 gigaset_freecs(cardstate);
969 cardstate = NULL;
970 if (driver)
971 gigaset_freedriver(driver);
972 driver = NULL;
973 return -1;
974}
975
976
977/**
978 * usb_gigaset_exit
979 * This function is called while unloading the kernel-module
980 */
981static void __exit usb_gigaset_exit(void)
982{
983 gigaset_blockdriver(driver); /* => probe will fail
984 * => no gigaset_start any more
985 */
986
987 gigaset_shutdown(cardstate);
988 /* from now on, no isdn callback should be possible */
989
990 /* deregister this driver with the USB subsystem */
991 usb_deregister(&gigaset_usb_driver);
992 /* this will call the disconnect-callback */
993 /* from now on, no disconnect/probe callback should be running */
994
995 gigaset_freecs(cardstate);
996 cardstate = NULL;
997 gigaset_freedriver(driver);
998 driver = NULL;
999}
1000
1001
1002module_init(usb_gigaset_init);
1003module_exit(usb_gigaset_exit);
1004
1005MODULE_AUTHOR(DRIVER_AUTHOR);
1006MODULE_DESCRIPTION(DRIVER_DESC);
1007
1008MODULE_LICENSE("GPL");
diff --git a/drivers/isdn/hardware/avm/avmcard.h b/drivers/isdn/hardware/avm/avmcard.h
index 296d6a6f749f..3b431723c7cb 100644
--- a/drivers/isdn/hardware/avm/avmcard.h
+++ b/drivers/isdn/hardware/avm/avmcard.h
@@ -437,9 +437,7 @@ static inline unsigned int t1_get_slice(unsigned int base,
437#endif 437#endif
438 dp += i; 438 dp += i;
439 i = 0; 439 i = 0;
440 if (i == 0) 440 break;
441 break;
442 /* fall through */
443 default: 441 default:
444 *dp++ = b1_get_byte(base); 442 *dp++ = b1_get_byte(base);
445 i--; 443 i--;
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index dc7ef957e897..dbcca287ee2c 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -387,8 +387,7 @@ static void hdlc_fill_fifo(struct fritz_bcs *bcs)
387 387
388 DBG(0x40, "hdlc_fill_fifo"); 388 DBG(0x40, "hdlc_fill_fifo");
389 389
390 if (skb->len == 0) 390 BUG_ON(skb->len == 0);
391 BUG();
392 391
393 bcs->ctrl.sr.cmd &= ~HDLC_CMD_XME; 392 bcs->ctrl.sr.cmd &= ~HDLC_CMD_XME;
394 if (bcs->tx_skb->len > bcs->fifo_size) { 393 if (bcs->tx_skb->len > bcs->fifo_size) {
@@ -630,9 +629,7 @@ static void fritz_b_l2l1(struct hisax_if *ifc, int pr, void *arg)
630 629
631 switch (pr) { 630 switch (pr) {
632 case PH_DATA | REQUEST: 631 case PH_DATA | REQUEST:
633 if (bcs->tx_skb) 632 BUG_ON(bcs->tx_skb);
634 BUG();
635
636 bcs->tx_skb = skb; 633 bcs->tx_skb = skb;
637 DBG_SKB(1, skb); 634 DBG_SKB(1, skb);
638 hdlc_fill_fifo(bcs); 635 hdlc_fill_fifo(bcs);
diff --git a/drivers/isdn/hisax/hisax_isac.c b/drivers/isdn/hisax/hisax_isac.c
index f4972f6c1f5d..81eac344bb03 100644
--- a/drivers/isdn/hisax/hisax_isac.c
+++ b/drivers/isdn/hisax/hisax_isac.c
@@ -476,12 +476,10 @@ static void isac_fill_fifo(struct isac *isac)
476 unsigned char cmd; 476 unsigned char cmd;
477 u_char *ptr; 477 u_char *ptr;
478 478
479 if (!isac->tx_skb) 479 BUG_ON(!isac->tx_skb);
480 BUG();
481 480
482 count = isac->tx_skb->len; 481 count = isac->tx_skb->len;
483 if (count <= 0) 482 BUG_ON(count <= 0);
484 BUG();
485 483
486 DBG(DBG_IRQ, "count %d", count); 484 DBG(DBG_IRQ, "count %d", count);
487 485
@@ -859,8 +857,7 @@ void isac_d_l2l1(struct hisax_if *hisax_d_if, int pr, void *arg)
859 dev_kfree_skb(skb); 857 dev_kfree_skb(skb);
860 break; 858 break;
861 } 859 }
862 if (isac->tx_skb) 860 BUG_ON(isac->tx_skb);
863 BUG();
864 861
865 isac->tx_skb = skb; 862 isac->tx_skb = skb;
866 isac_fill_fifo(isac); 863 isac_fill_fifo(isac);
diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c
index 657817a591fe..22fd5db18d48 100644
--- a/drivers/isdn/hisax/st5481_b.c
+++ b/drivers/isdn/hisax/st5481_b.c
@@ -356,9 +356,7 @@ void st5481_b_l2l1(struct hisax_if *ifc, int pr, void *arg)
356 356
357 switch (pr) { 357 switch (pr) {
358 case PH_DATA | REQUEST: 358 case PH_DATA | REQUEST:
359 if (bcs->b_out.tx_skb) 359 BUG_ON(bcs->b_out.tx_skb);
360 BUG();
361
362 bcs->b_out.tx_skb = skb; 360 bcs->b_out.tx_skb = skb;
363 break; 361 break;
364 case PH_ACTIVATE | REQUEST: 362 case PH_ACTIVATE | REQUEST:
diff --git a/drivers/isdn/hisax/st5481_d.c b/drivers/isdn/hisax/st5481_d.c
index 941f7022ada1..493dc94992e5 100644
--- a/drivers/isdn/hisax/st5481_d.c
+++ b/drivers/isdn/hisax/st5481_d.c
@@ -596,9 +596,7 @@ void st5481_d_l2l1(struct hisax_if *hisax_d_if, int pr, void *arg)
596 break; 596 break;
597 case PH_DATA | REQUEST: 597 case PH_DATA | REQUEST:
598 DBG(2, "PH_DATA REQUEST len %d", skb->len); 598 DBG(2, "PH_DATA REQUEST len %d", skb->len);
599 if (adapter->d_out.tx_skb) 599 BUG_ON(adapter->d_out.tx_skb);
600 BUG();
601
602 adapter->d_out.tx_skb = skb; 600 adapter->d_out.tx_skb = skb;
603 FsmEvent(&adapter->d_out.fsm, EV_DOUT_START_XMIT, NULL); 601 FsmEvent(&adapter->d_out.fsm, EV_DOUT_START_XMIT, NULL);
604 break; 602 break;
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index 1789b607f090..a4f7288a1fc8 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -139,3 +139,4 @@ source "drivers/isdn/hysdn/Kconfig"
139 139
140endmenu 140endmenu
141 141
142source "drivers/isdn/gigaset/Kconfig"
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index b9fed8a3bcc6..a0927d1b7a0c 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -974,8 +974,7 @@ void isdn_ppp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buf
974 int slot; 974 int slot;
975 int proto; 975 int proto;
976 976
977 if (net_dev->local->master) 977 BUG_ON(net_dev->local->master); // we're called with the master device always
978 BUG(); // we're called with the master device always
979 978
980 slot = lp->ppp_slot; 979 slot = lp->ppp_slot;
981 if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { 980 if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
@@ -2527,8 +2526,7 @@ static struct sk_buff *isdn_ppp_decompress(struct sk_buff *skb,struct ippp_struc
2527 printk(KERN_DEBUG "ippp: no decompressor defined!\n"); 2526 printk(KERN_DEBUG "ippp: no decompressor defined!\n");
2528 return skb; 2527 return skb;
2529 } 2528 }
2530 if (!stat) // if we have a compressor, stat has been set as well 2529 BUG_ON(!stat); // if we have a compressor, stat has been set as well
2531 BUG();
2532 2530
2533 if((master && *proto == PPP_COMP) || (!master && *proto == PPP_COMPFRAG) ) { 2531 if((master && *proto == PPP_COMP) || (!master && *proto == PPP_COMPFRAG) ) {
2534 // compressed packets are compressed by their protocol type 2532 // compressed packets are compressed by their protocol type
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index d2ead1776c16..34fcabac5fdb 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -80,7 +80,7 @@ static struct adb_driver *adb_driver_list[] = {
80static struct class *adb_dev_class; 80static struct class *adb_dev_class;
81 81
82struct adb_driver *adb_controller; 82struct adb_driver *adb_controller;
83struct notifier_block *adb_client_list = NULL; 83BLOCKING_NOTIFIER_HEAD(adb_client_list);
84static int adb_got_sleep; 84static int adb_got_sleep;
85static int adb_inited; 85static int adb_inited;
86static pid_t adb_probe_task_pid; 86static pid_t adb_probe_task_pid;
@@ -354,7 +354,8 @@ adb_notify_sleep(struct pmu_sleep_notifier *self, int when)
354 /* Stop autopoll */ 354 /* Stop autopoll */
355 if (adb_controller->autopoll) 355 if (adb_controller->autopoll)
356 adb_controller->autopoll(0); 356 adb_controller->autopoll(0);
357 ret = notifier_call_chain(&adb_client_list, ADB_MSG_POWERDOWN, NULL); 357 ret = blocking_notifier_call_chain(&adb_client_list,
358 ADB_MSG_POWERDOWN, NULL);
358 if (ret & NOTIFY_STOP_MASK) { 359 if (ret & NOTIFY_STOP_MASK) {
359 up(&adb_probe_mutex); 360 up(&adb_probe_mutex);
360 return PBOOK_SLEEP_REFUSE; 361 return PBOOK_SLEEP_REFUSE;
@@ -391,7 +392,8 @@ do_adb_reset_bus(void)
391 if (adb_controller->autopoll) 392 if (adb_controller->autopoll)
392 adb_controller->autopoll(0); 393 adb_controller->autopoll(0);
393 394
394 nret = notifier_call_chain(&adb_client_list, ADB_MSG_PRE_RESET, NULL); 395 nret = blocking_notifier_call_chain(&adb_client_list,
396 ADB_MSG_PRE_RESET, NULL);
395 if (nret & NOTIFY_STOP_MASK) { 397 if (nret & NOTIFY_STOP_MASK) {
396 if (adb_controller->autopoll) 398 if (adb_controller->autopoll)
397 adb_controller->autopoll(autopoll_devs); 399 adb_controller->autopoll(autopoll_devs);
@@ -426,7 +428,8 @@ do_adb_reset_bus(void)
426 } 428 }
427 up(&adb_handler_sem); 429 up(&adb_handler_sem);
428 430
429 nret = notifier_call_chain(&adb_client_list, ADB_MSG_POST_RESET, NULL); 431 nret = blocking_notifier_call_chain(&adb_client_list,
432 ADB_MSG_POST_RESET, NULL);
430 if (nret & NOTIFY_STOP_MASK) 433 if (nret & NOTIFY_STOP_MASK)
431 return -EBUSY; 434 return -EBUSY;
432 435
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index c0b46bceb5df..f5779a73184d 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -1214,7 +1214,8 @@ static int __init adbhid_init(void)
1214 1214
1215 adbhid_probe(); 1215 adbhid_probe();
1216 1216
1217 notifier_chain_register(&adb_client_list, &adbhid_adb_notifier); 1217 blocking_notifier_chain_register(&adb_client_list,
1218 &adbhid_adb_notifier);
1218 1219
1219 return 0; 1220 return 0;
1220} 1221}
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 4eb05d7143d8..f4516ca7aa3a 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/sysdev.h> 36#include <linux/sysdev.h>
37#include <linux/poll.h> 37#include <linux/poll.h>
38#include <linux/mutex.h>
38 39
39#include <asm/byteorder.h> 40#include <asm/byteorder.h>
40#include <asm/io.h> 41#include <asm/io.h>
@@ -92,7 +93,7 @@ struct smu_device {
92 * for now, just hard code that 93 * for now, just hard code that
93 */ 94 */
94static struct smu_device *smu; 95static struct smu_device *smu;
95static DECLARE_MUTEX(smu_part_access); 96static DEFINE_MUTEX(smu_part_access);
96 97
97static void smu_i2c_retry(unsigned long data); 98static void smu_i2c_retry(unsigned long data);
98 99
@@ -976,11 +977,11 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size,
976 977
977 if (interruptible) { 978 if (interruptible) {
978 int rc; 979 int rc;
979 rc = down_interruptible(&smu_part_access); 980 rc = mutex_lock_interruptible(&smu_part_access);
980 if (rc) 981 if (rc)
981 return ERR_PTR(rc); 982 return ERR_PTR(rc);
982 } else 983 } else
983 down(&smu_part_access); 984 mutex_lock(&smu_part_access);
984 985
985 part = (struct smu_sdbp_header *)get_property(smu->of_node, 986 part = (struct smu_sdbp_header *)get_property(smu->of_node,
986 pname, size); 987 pname, size);
@@ -990,7 +991,7 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size,
990 if (part != NULL && size) 991 if (part != NULL && size)
991 *size = part->len << 2; 992 *size = part->len << 2;
992 } 993 }
993 up(&smu_part_access); 994 mutex_unlock(&smu_part_access);
994 return part; 995 return part;
995} 996}
996 997
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 4f5f3abc9cb3..0b5ff553e39a 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -187,7 +187,7 @@ extern int disable_kernel_backlight;
187 187
188int __fake_sleep; 188int __fake_sleep;
189int asleep; 189int asleep;
190struct notifier_block *sleep_notifier_list; 190BLOCKING_NOTIFIER_HEAD(sleep_notifier_list);
191 191
192#ifdef CONFIG_ADB 192#ifdef CONFIG_ADB
193static int adb_dev_map = 0; 193static int adb_dev_map = 0;
diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c
index f08e52f2107b..35b70323e7e3 100644
--- a/drivers/macintosh/via-pmu68k.c
+++ b/drivers/macintosh/via-pmu68k.c
@@ -102,7 +102,7 @@ static int pmu_kind = PMU_UNKNOWN;
102static int pmu_fully_inited = 0; 102static int pmu_fully_inited = 0;
103 103
104int asleep; 104int asleep;
105struct notifier_block *sleep_notifier_list; 105BLOCKING_NOTIFIER_HEAD(sleep_notifier_list);
106 106
107static int pmu_probe(void); 107static int pmu_probe(void);
108static int pmu_init(void); 108static int pmu_init(void);
@@ -913,7 +913,8 @@ int powerbook_sleep(void)
913 struct adb_request sleep_req; 913 struct adb_request sleep_req;
914 914
915 /* Notify device drivers */ 915 /* Notify device drivers */
916 ret = notifier_call_chain(&sleep_notifier_list, PBOOK_SLEEP, NULL); 916 ret = blocking_notifier_call_chain(&sleep_notifier_list,
917 PBOOK_SLEEP, NULL);
917 if (ret & NOTIFY_STOP_MASK) 918 if (ret & NOTIFY_STOP_MASK)
918 return -EBUSY; 919 return -EBUSY;
919 920
@@ -984,7 +985,7 @@ int powerbook_sleep(void)
984 enable_irq(i); 985 enable_irq(i);
985 986
986 /* Notify drivers */ 987 /* Notify drivers */
987 notifier_call_chain(&sleep_notifier_list, PBOOK_WAKE, NULL); 988 blocking_notifier_call_chain(&sleep_notifier_list, PBOOK_WAKE, NULL);
988 989
989 /* reenable ADB autopoll */ 990 /* reenable ADB autopoll */
990 pmu_adb_autopoll(adb_dev_map); 991 pmu_adb_autopoll(adb_dev_map);
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index 6c0ba04bc57a..ab3faa702d58 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -52,7 +52,7 @@
52static LIST_HEAD(wf_controls); 52static LIST_HEAD(wf_controls);
53static LIST_HEAD(wf_sensors); 53static LIST_HEAD(wf_sensors);
54static DEFINE_MUTEX(wf_lock); 54static DEFINE_MUTEX(wf_lock);
55static struct notifier_block *wf_client_list; 55static BLOCKING_NOTIFIER_HEAD(wf_client_list);
56static int wf_client_count; 56static int wf_client_count;
57static unsigned int wf_overtemp; 57static unsigned int wf_overtemp;
58static unsigned int wf_overtemp_counter; 58static unsigned int wf_overtemp_counter;
@@ -68,7 +68,7 @@ static struct platform_device wf_platform_device = {
68 68
69static inline void wf_notify(int event, void *param) 69static inline void wf_notify(int event, void *param)
70{ 70{
71 notifier_call_chain(&wf_client_list, event, param); 71 blocking_notifier_call_chain(&wf_client_list, event, param);
72} 72}
73 73
74int wf_critical_overtemp(void) 74int wf_critical_overtemp(void)
@@ -398,7 +398,7 @@ int wf_register_client(struct notifier_block *nb)
398 struct wf_sensor *sr; 398 struct wf_sensor *sr;
399 399
400 mutex_lock(&wf_lock); 400 mutex_lock(&wf_lock);
401 rc = notifier_chain_register(&wf_client_list, nb); 401 rc = blocking_notifier_chain_register(&wf_client_list, nb);
402 if (rc != 0) 402 if (rc != 0)
403 goto bail; 403 goto bail;
404 wf_client_count++; 404 wf_client_count++;
@@ -417,7 +417,7 @@ EXPORT_SYMBOL_GPL(wf_register_client);
417int wf_unregister_client(struct notifier_block *nb) 417int wf_unregister_client(struct notifier_block *nb)
418{ 418{
419 mutex_lock(&wf_lock); 419 mutex_lock(&wf_lock);
420 notifier_chain_unregister(&wf_client_list, nb); 420 blocking_notifier_chain_unregister(&wf_client_list, nb);
421 wf_client_count++; 421 wf_client_count++;
422 if (wf_client_count == 0) 422 if (wf_client_count == 0)
423 wf_stop_thread(); 423 wf_stop_thread();
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index ac43f98062fd..fd2aae150ccc 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -127,6 +127,32 @@ config MD_RAID5
127 127
128 If unsure, say Y. 128 If unsure, say Y.
129 129
130config MD_RAID5_RESHAPE
131 bool "Support adding drives to a raid-5 array (experimental)"
132 depends on MD_RAID5 && EXPERIMENTAL
133 ---help---
134 A RAID-5 set can be expanded by adding extra drives. This
135 requires "restriping" the array which means (almost) every
136 block must be written to a different place.
137
138 This option allows such restriping to be done while the array
139 is online. However it is still EXPERIMENTAL code. It should
140 work, but please be sure that you have backups.
141
142 You will need a version of mdadm newer than 2.3.1. During the
143 early stage of reshape there is a critical section where live data
144 is being over-written. A crash during this time needs extra care
145 for recovery. The newer mdadm takes a copy of the data in the
146 critical section and will restore it, if necessary, after a crash.
147
148 The mdadm usage is e.g.
149 mdadm --grow /dev/md1 --raid-disks=6
150 to grow '/dev/md1' to having 6 disks.
151
152 Note: The array can only be expanded, not contracted.
153 There should be enough spares already present to make the new
154 array workable.
155
130config MD_RAID6 156config MD_RAID6
131 tristate "RAID-6 mode" 157 tristate "RAID-6 mode"
132 depends on BLK_DEV_MD 158 depends on BLK_DEV_MD
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index e1c18aa1d712..f8ffaee20ff8 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -89,16 +89,6 @@ int bitmap_active(struct bitmap *bitmap)
89} 89}
90 90
91#define WRITE_POOL_SIZE 256 91#define WRITE_POOL_SIZE 256
92/* mempool for queueing pending writes on the bitmap file */
93static void *write_pool_alloc(gfp_t gfp_flags, void *data)
94{
95 return kmalloc(sizeof(struct page_list), gfp_flags);
96}
97
98static void write_pool_free(void *ptr, void *data)
99{
100 kfree(ptr);
101}
102 92
103/* 93/*
104 * just a placeholder - calls kmalloc for bitmap pages 94 * just a placeholder - calls kmalloc for bitmap pages
@@ -1564,8 +1554,8 @@ int bitmap_create(mddev_t *mddev)
1564 spin_lock_init(&bitmap->write_lock); 1554 spin_lock_init(&bitmap->write_lock);
1565 INIT_LIST_HEAD(&bitmap->complete_pages); 1555 INIT_LIST_HEAD(&bitmap->complete_pages);
1566 init_waitqueue_head(&bitmap->write_wait); 1556 init_waitqueue_head(&bitmap->write_wait);
1567 bitmap->write_pool = mempool_create(WRITE_POOL_SIZE, write_pool_alloc, 1557 bitmap->write_pool = mempool_create_kmalloc_pool(WRITE_POOL_SIZE,
1568 write_pool_free, NULL); 1558 sizeof(struct page_list));
1569 err = -ENOMEM; 1559 err = -ENOMEM;
1570 if (!bitmap->write_pool) 1560 if (!bitmap->write_pool)
1571 goto error; 1561 goto error;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index e7a650f9ca07..61a590bb6241 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -94,20 +94,6 @@ struct crypt_config {
94static kmem_cache_t *_crypt_io_pool; 94static kmem_cache_t *_crypt_io_pool;
95 95
96/* 96/*
97 * Mempool alloc and free functions for the page
98 */
99static void *mempool_alloc_page(gfp_t gfp_mask, void *data)
100{
101 return alloc_page(gfp_mask);
102}
103
104static void mempool_free_page(void *page, void *data)
105{
106 __free_page(page);
107}
108
109
110/*
111 * Different IV generation algorithms: 97 * Different IV generation algorithms:
112 * 98 *
113 * plain: the initial vector is the 32-bit low-endian version of the sector 99 * plain: the initial vector is the 32-bit low-endian version of the sector
@@ -532,6 +518,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
532 char *ivopts; 518 char *ivopts;
533 unsigned int crypto_flags; 519 unsigned int crypto_flags;
534 unsigned int key_size; 520 unsigned int key_size;
521 unsigned long long tmpll;
535 522
536 if (argc != 5) { 523 if (argc != 5) {
537 ti->error = PFX "Not enough arguments"; 524 ti->error = PFX "Not enough arguments";
@@ -630,15 +617,13 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
630 } 617 }
631 } 618 }
632 619
633 cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, 620 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
634 mempool_free_slab, _crypt_io_pool);
635 if (!cc->io_pool) { 621 if (!cc->io_pool) {
636 ti->error = PFX "Cannot allocate crypt io mempool"; 622 ti->error = PFX "Cannot allocate crypt io mempool";
637 goto bad3; 623 goto bad3;
638 } 624 }
639 625
640 cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page, 626 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
641 mempool_free_page, NULL);
642 if (!cc->page_pool) { 627 if (!cc->page_pool) {
643 ti->error = PFX "Cannot allocate page mempool"; 628 ti->error = PFX "Cannot allocate page mempool";
644 goto bad4; 629 goto bad4;
@@ -649,15 +634,17 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
649 goto bad5; 634 goto bad5;
650 } 635 }
651 636
652 if (sscanf(argv[2], SECTOR_FORMAT, &cc->iv_offset) != 1) { 637 if (sscanf(argv[2], "%llu", &tmpll) != 1) {
653 ti->error = PFX "Invalid iv_offset sector"; 638 ti->error = PFX "Invalid iv_offset sector";
654 goto bad5; 639 goto bad5;
655 } 640 }
641 cc->iv_offset = tmpll;
656 642
657 if (sscanf(argv[4], SECTOR_FORMAT, &cc->start) != 1) { 643 if (sscanf(argv[4], "%llu", &tmpll) != 1) {
658 ti->error = PFX "Invalid device sector"; 644 ti->error = PFX "Invalid device sector";
659 goto bad5; 645 goto bad5;
660 } 646 }
647 cc->start = tmpll;
661 648
662 if (dm_get_device(ti, argv[3], cc->start, ti->len, 649 if (dm_get_device(ti, argv[3], cc->start, ti->len,
663 dm_table_get_mode(ti->table), &cc->dev)) { 650 dm_table_get_mode(ti->table), &cc->dev)) {
@@ -901,8 +888,8 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
901 result[sz++] = '-'; 888 result[sz++] = '-';
902 } 889 }
903 890
904 DMEMIT(" " SECTOR_FORMAT " %s " SECTOR_FORMAT, 891 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
905 cc->iv_offset, cc->dev->name, cc->start); 892 cc->dev->name, (unsigned long long)cc->start);
906 break; 893 break;
907 } 894 }
908 return 0; 895 return 0;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 4809b209fbb1..da663d2ff552 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -32,16 +32,6 @@ struct io {
32static unsigned _num_ios; 32static unsigned _num_ios;
33static mempool_t *_io_pool; 33static mempool_t *_io_pool;
34 34
35static void *alloc_io(gfp_t gfp_mask, void *pool_data)
36{
37 return kmalloc(sizeof(struct io), gfp_mask);
38}
39
40static void free_io(void *element, void *pool_data)
41{
42 kfree(element);
43}
44
45static unsigned int pages_to_ios(unsigned int pages) 35static unsigned int pages_to_ios(unsigned int pages)
46{ 36{
47 return 4 * pages; /* too many ? */ 37 return 4 * pages; /* too many ? */
@@ -65,7 +55,8 @@ static int resize_pool(unsigned int new_ios)
65 55
66 } else { 56 } else {
67 /* create new pool */ 57 /* create new pool */
68 _io_pool = mempool_create(new_ios, alloc_io, free_io, NULL); 58 _io_pool = mempool_create_kmalloc_pool(new_ios,
59 sizeof(struct io));
69 if (!_io_pool) 60 if (!_io_pool)
70 return -ENOMEM; 61 return -ENOMEM;
71 62
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 442e2be6052e..8edd6435414d 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -15,6 +15,7 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/devfs_fs_kernel.h> 16#include <linux/devfs_fs_kernel.h>
17#include <linux/dm-ioctl.h> 17#include <linux/dm-ioctl.h>
18#include <linux/hdreg.h>
18 19
19#include <asm/uaccess.h> 20#include <asm/uaccess.h>
20 21
@@ -244,9 +245,9 @@ static void __hash_remove(struct hash_cell *hc)
244 dm_table_put(table); 245 dm_table_put(table);
245 } 246 }
246 247
247 dm_put(hc->md);
248 if (hc->new_map) 248 if (hc->new_map)
249 dm_table_put(hc->new_map); 249 dm_table_put(hc->new_map);
250 dm_put(hc->md);
250 free_cell(hc); 251 free_cell(hc);
251} 252}
252 253
@@ -600,12 +601,22 @@ static int dev_create(struct dm_ioctl *param, size_t param_size)
600 */ 601 */
601static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) 602static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
602{ 603{
604 struct mapped_device *md;
605 void *mdptr = NULL;
606
603 if (*param->uuid) 607 if (*param->uuid)
604 return __get_uuid_cell(param->uuid); 608 return __get_uuid_cell(param->uuid);
605 else if (*param->name) 609
610 if (*param->name)
606 return __get_name_cell(param->name); 611 return __get_name_cell(param->name);
607 else 612
608 return dm_get_mdptr(huge_decode_dev(param->dev)); 613 md = dm_get_md(huge_decode_dev(param->dev));
614 if (md) {
615 mdptr = dm_get_mdptr(md);
616 dm_put(md);
617 }
618
619 return mdptr;
609} 620}
610 621
611static struct mapped_device *find_device(struct dm_ioctl *param) 622static struct mapped_device *find_device(struct dm_ioctl *param)
@@ -690,6 +701,54 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
690 return dm_hash_rename(param->name, new_name); 701 return dm_hash_rename(param->name, new_name);
691} 702}
692 703
704static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
705{
706 int r = -EINVAL, x;
707 struct mapped_device *md;
708 struct hd_geometry geometry;
709 unsigned long indata[4];
710 char *geostr = (char *) param + param->data_start;
711
712 md = find_device(param);
713 if (!md)
714 return -ENXIO;
715
716 if (geostr < (char *) (param + 1) ||
717 invalid_str(geostr, (void *) param + param_size)) {
718 DMWARN("Invalid geometry supplied.");
719 goto out;
720 }
721
722 x = sscanf(geostr, "%lu %lu %lu %lu", indata,
723 indata + 1, indata + 2, indata + 3);
724
725 if (x != 4) {
726 DMWARN("Unable to interpret geometry settings.");
727 goto out;
728 }
729
730 if (indata[0] > 65535 || indata[1] > 255 ||
731 indata[2] > 255 || indata[3] > ULONG_MAX) {
732 DMWARN("Geometry exceeds range limits.");
733 goto out;
734 }
735
736 geometry.cylinders = indata[0];
737 geometry.heads = indata[1];
738 geometry.sectors = indata[2];
739 geometry.start = indata[3];
740
741 r = dm_set_geometry(md, &geometry);
742 if (!r)
743 r = __dev_status(md, param);
744
745 param->data_size = 0;
746
747out:
748 dm_put(md);
749 return r;
750}
751
693static int do_suspend(struct dm_ioctl *param) 752static int do_suspend(struct dm_ioctl *param)
694{ 753{
695 int r = 0; 754 int r = 0;
@@ -975,33 +1034,43 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
975 int r; 1034 int r;
976 struct hash_cell *hc; 1035 struct hash_cell *hc;
977 struct dm_table *t; 1036 struct dm_table *t;
1037 struct mapped_device *md;
978 1038
979 r = dm_table_create(&t, get_mode(param), param->target_count); 1039 md = find_device(param);
1040 if (!md)
1041 return -ENXIO;
1042
1043 r = dm_table_create(&t, get_mode(param), param->target_count, md);
980 if (r) 1044 if (r)
981 return r; 1045 goto out;
982 1046
983 r = populate_table(t, param, param_size); 1047 r = populate_table(t, param, param_size);
984 if (r) { 1048 if (r) {
985 dm_table_put(t); 1049 dm_table_put(t);
986 return r; 1050 goto out;
987 } 1051 }
988 1052
989 down_write(&_hash_lock); 1053 down_write(&_hash_lock);
990 hc = __find_device_hash_cell(param); 1054 hc = dm_get_mdptr(md);
991 if (!hc) { 1055 if (!hc || hc->md != md) {
992 DMWARN("device doesn't appear to be in the dev hash table."); 1056 DMWARN("device has been removed from the dev hash table.");
993 up_write(&_hash_lock);
994 dm_table_put(t); 1057 dm_table_put(t);
995 return -ENXIO; 1058 up_write(&_hash_lock);
1059 r = -ENXIO;
1060 goto out;
996 } 1061 }
997 1062
998 if (hc->new_map) 1063 if (hc->new_map)
999 dm_table_put(hc->new_map); 1064 dm_table_put(hc->new_map);
1000 hc->new_map = t; 1065 hc->new_map = t;
1066 up_write(&_hash_lock);
1067
1001 param->flags |= DM_INACTIVE_PRESENT_FLAG; 1068 param->flags |= DM_INACTIVE_PRESENT_FLAG;
1069 r = __dev_status(md, param);
1070
1071out:
1072 dm_put(md);
1002 1073
1003 r = __dev_status(hc->md, param);
1004 up_write(&_hash_lock);
1005 return r; 1074 return r;
1006} 1075}
1007 1076
@@ -1214,7 +1283,8 @@ static ioctl_fn lookup_ioctl(unsigned int cmd)
1214 1283
1215 {DM_LIST_VERSIONS_CMD, list_versions}, 1284 {DM_LIST_VERSIONS_CMD, list_versions},
1216 1285
1217 {DM_TARGET_MSG_CMD, target_message} 1286 {DM_TARGET_MSG_CMD, target_message},
1287 {DM_DEV_SET_GEOMETRY_CMD, dev_set_geometry}
1218 }; 1288 };
1219 1289
1220 return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn; 1290 return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 6a2cd5dc8a63..daf586c0898d 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -26,6 +26,7 @@ struct linear_c {
26static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) 26static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
27{ 27{
28 struct linear_c *lc; 28 struct linear_c *lc;
29 unsigned long long tmp;
29 30
30 if (argc != 2) { 31 if (argc != 2) {
31 ti->error = "dm-linear: Invalid argument count"; 32 ti->error = "dm-linear: Invalid argument count";
@@ -38,10 +39,11 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
38 return -ENOMEM; 39 return -ENOMEM;
39 } 40 }
40 41
41 if (sscanf(argv[1], SECTOR_FORMAT, &lc->start) != 1) { 42 if (sscanf(argv[1], "%llu", &tmp) != 1) {
42 ti->error = "dm-linear: Invalid device sector"; 43 ti->error = "dm-linear: Invalid device sector";
43 goto bad; 44 goto bad;
44 } 45 }
46 lc->start = tmp;
45 47
46 if (dm_get_device(ti, argv[0], lc->start, ti->len, 48 if (dm_get_device(ti, argv[0], lc->start, ti->len,
47 dm_table_get_mode(ti->table), &lc->dev)) { 49 dm_table_get_mode(ti->table), &lc->dev)) {
@@ -87,8 +89,8 @@ static int linear_status(struct dm_target *ti, status_type_t type,
87 break; 89 break;
88 90
89 case STATUSTYPE_TABLE: 91 case STATUSTYPE_TABLE:
90 snprintf(result, maxlen, "%s " SECTOR_FORMAT, lc->dev->name, 92 snprintf(result, maxlen, "%s %llu", lc->dev->name,
91 lc->start); 93 (unsigned long long)lc->start);
92 break; 94 break;
93 } 95 }
94 return 0; 96 return 0;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index f72a82fb9434..1816f30678ed 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -179,8 +179,7 @@ static struct multipath *alloc_multipath(void)
179 m->queue_io = 1; 179 m->queue_io = 1;
180 INIT_WORK(&m->process_queued_ios, process_queued_ios, m); 180 INIT_WORK(&m->process_queued_ios, process_queued_ios, m);
181 INIT_WORK(&m->trigger_event, trigger_event, m); 181 INIT_WORK(&m->trigger_event, trigger_event, m);
182 m->mpio_pool = mempool_create(MIN_IOS, mempool_alloc_slab, 182 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
183 mempool_free_slab, _mpio_cache);
184 if (!m->mpio_pool) { 183 if (!m->mpio_pool) {
185 kfree(m); 184 kfree(m);
186 return NULL; 185 return NULL;
diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c
index a28c1c2b4ef5..f10a0c89b3f4 100644
--- a/drivers/md/dm-path-selector.c
+++ b/drivers/md/dm-path-selector.c
@@ -86,8 +86,7 @@ void dm_put_path_selector(struct path_selector_type *pst)
86 if (--psi->use == 0) 86 if (--psi->use == 0)
87 module_put(psi->pst.module); 87 module_put(psi->pst.module);
88 88
89 if (psi->use < 0) 89 BUG_ON(psi->use < 0);
90 BUG();
91 90
92out: 91out:
93 up_read(&_ps_lock); 92 up_read(&_ps_lock);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 6cfa8d435d55..d12cf3e5e076 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -122,16 +122,6 @@ static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
122/* FIXME move this */ 122/* FIXME move this */
123static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); 123static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
124 124
125static void *region_alloc(gfp_t gfp_mask, void *pool_data)
126{
127 return kmalloc(sizeof(struct region), gfp_mask);
128}
129
130static void region_free(void *element, void *pool_data)
131{
132 kfree(element);
133}
134
135#define MIN_REGIONS 64 125#define MIN_REGIONS 64
136#define MAX_RECOVERY 1 126#define MAX_RECOVERY 1
137static int rh_init(struct region_hash *rh, struct mirror_set *ms, 127static int rh_init(struct region_hash *rh, struct mirror_set *ms,
@@ -173,8 +163,8 @@ static int rh_init(struct region_hash *rh, struct mirror_set *ms,
173 INIT_LIST_HEAD(&rh->quiesced_regions); 163 INIT_LIST_HEAD(&rh->quiesced_regions);
174 INIT_LIST_HEAD(&rh->recovered_regions); 164 INIT_LIST_HEAD(&rh->recovered_regions);
175 165
176 rh->region_pool = mempool_create(MIN_REGIONS, region_alloc, 166 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
177 region_free, NULL); 167 sizeof(struct region));
178 if (!rh->region_pool) { 168 if (!rh->region_pool) {
179 vfree(rh->buckets); 169 vfree(rh->buckets);
180 rh->buckets = NULL; 170 rh->buckets = NULL;
@@ -412,9 +402,21 @@ static void rh_dec(struct region_hash *rh, region_t region)
412 402
413 spin_lock_irqsave(&rh->region_lock, flags); 403 spin_lock_irqsave(&rh->region_lock, flags);
414 if (atomic_dec_and_test(&reg->pending)) { 404 if (atomic_dec_and_test(&reg->pending)) {
405 /*
406 * There is no pending I/O for this region.
407 * We can move the region to corresponding list for next action.
408 * At this point, the region is not yet connected to any list.
409 *
410 * If the state is RH_NOSYNC, the region should be kept off
411 * from clean list.
412 * The hash entry for RH_NOSYNC will remain in memory
413 * until the region is recovered or the map is reloaded.
414 */
415
416 /* do nothing for RH_NOSYNC */
415 if (reg->state == RH_RECOVERING) { 417 if (reg->state == RH_RECOVERING) {
416 list_add_tail(&reg->list, &rh->quiesced_regions); 418 list_add_tail(&reg->list, &rh->quiesced_regions);
417 } else { 419 } else if (reg->state == RH_DIRTY) {
418 reg->state = RH_CLEAN; 420 reg->state = RH_CLEAN;
419 list_add(&reg->list, &rh->clean_regions); 421 list_add(&reg->list, &rh->clean_regions);
420 } 422 }
@@ -932,9 +934,9 @@ static inline int _check_region_size(struct dm_target *ti, uint32_t size)
932static int get_mirror(struct mirror_set *ms, struct dm_target *ti, 934static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
933 unsigned int mirror, char **argv) 935 unsigned int mirror, char **argv)
934{ 936{
935 sector_t offset; 937 unsigned long long offset;
936 938
937 if (sscanf(argv[1], SECTOR_FORMAT, &offset) != 1) { 939 if (sscanf(argv[1], "%llu", &offset) != 1) {
938 ti->error = "dm-mirror: Invalid offset"; 940 ti->error = "dm-mirror: Invalid offset";
939 return -EINVAL; 941 return -EINVAL;
940 } 942 }
@@ -1201,16 +1203,17 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1201 for (m = 0; m < ms->nr_mirrors; m++) 1203 for (m = 0; m < ms->nr_mirrors; m++)
1202 DMEMIT("%s ", ms->mirror[m].dev->name); 1204 DMEMIT("%s ", ms->mirror[m].dev->name);
1203 1205
1204 DMEMIT(SECTOR_FORMAT "/" SECTOR_FORMAT, 1206 DMEMIT("%llu/%llu",
1205 ms->rh.log->type->get_sync_count(ms->rh.log), 1207 (unsigned long long)ms->rh.log->type->
1206 ms->nr_regions); 1208 get_sync_count(ms->rh.log),
1209 (unsigned long long)ms->nr_regions);
1207 break; 1210 break;
1208 1211
1209 case STATUSTYPE_TABLE: 1212 case STATUSTYPE_TABLE:
1210 DMEMIT("%d ", ms->nr_mirrors); 1213 DMEMIT("%d ", ms->nr_mirrors);
1211 for (m = 0; m < ms->nr_mirrors; m++) 1214 for (m = 0; m < ms->nr_mirrors; m++)
1212 DMEMIT("%s " SECTOR_FORMAT " ", 1215 DMEMIT("%s %llu ", ms->mirror[m].dev->name,
1213 ms->mirror[m].dev->name, ms->mirror[m].offset); 1216 (unsigned long long)ms->mirror[m].offset);
1214 } 1217 }
1215 1218
1216 return 0; 1219 return 0;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index f3759dd7828e..08312b46463a 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -49,11 +49,26 @@ struct pending_exception {
49 struct bio_list snapshot_bios; 49 struct bio_list snapshot_bios;
50 50
51 /* 51 /*
52 * Other pending_exceptions that are processing this 52 * Short-term queue of pending exceptions prior to submission.
53 * chunk. When this list is empty, we know we can
54 * complete the origins.
55 */ 53 */
56 struct list_head siblings; 54 struct list_head list;
55
56 /*
57 * The primary pending_exception is the one that holds
58 * the sibling_count and the list of origin_bios for a
59 * group of pending_exceptions. It is always last to get freed.
60 * These fields get set up when writing to the origin.
61 */
62 struct pending_exception *primary_pe;
63
64 /*
65 * Number of pending_exceptions processing this chunk.
66 * When this drops to zero we must complete the origin bios.
67 * If incrementing or decrementing this, hold pe->snap->lock for
68 * the sibling concerned and not pe->primary_pe->snap->lock unless
69 * they are the same.
70 */
71 atomic_t sibling_count;
57 72
58 /* Pointer back to snapshot context */ 73 /* Pointer back to snapshot context */
59 struct dm_snapshot *snap; 74 struct dm_snapshot *snap;
@@ -377,6 +392,8 @@ static void read_snapshot_metadata(struct dm_snapshot *s)
377 down_write(&s->lock); 392 down_write(&s->lock);
378 s->valid = 0; 393 s->valid = 0;
379 up_write(&s->lock); 394 up_write(&s->lock);
395
396 dm_table_event(s->table);
380 } 397 }
381} 398}
382 399
@@ -542,8 +559,12 @@ static void snapshot_dtr(struct dm_target *ti)
542{ 559{
543 struct dm_snapshot *s = (struct dm_snapshot *) ti->private; 560 struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
544 561
562 /* Prevent further origin writes from using this snapshot. */
563 /* After this returns there can be no new kcopyd jobs. */
545 unregister_snapshot(s); 564 unregister_snapshot(s);
546 565
566 kcopyd_client_destroy(s->kcopyd_client);
567
547 exit_exception_table(&s->pending, pending_cache); 568 exit_exception_table(&s->pending, pending_cache);
548 exit_exception_table(&s->complete, exception_cache); 569 exit_exception_table(&s->complete, exception_cache);
549 570
@@ -552,7 +573,7 @@ static void snapshot_dtr(struct dm_target *ti)
552 573
553 dm_put_device(ti, s->origin); 574 dm_put_device(ti, s->origin);
554 dm_put_device(ti, s->cow); 575 dm_put_device(ti, s->cow);
555 kcopyd_client_destroy(s->kcopyd_client); 576
556 kfree(s); 577 kfree(s);
557} 578}
558 579
@@ -586,78 +607,117 @@ static void error_bios(struct bio *bio)
586 } 607 }
587} 608}
588 609
610static inline void error_snapshot_bios(struct pending_exception *pe)
611{
612 error_bios(bio_list_get(&pe->snapshot_bios));
613}
614
589static struct bio *__flush_bios(struct pending_exception *pe) 615static struct bio *__flush_bios(struct pending_exception *pe)
590{ 616{
591 struct pending_exception *sibling; 617 /*
618 * If this pe is involved in a write to the origin and
619 * it is the last sibling to complete then release
620 * the bios for the original write to the origin.
621 */
622
623 if (pe->primary_pe &&
624 atomic_dec_and_test(&pe->primary_pe->sibling_count))
625 return bio_list_get(&pe->primary_pe->origin_bios);
626
627 return NULL;
628}
629
630static void __invalidate_snapshot(struct dm_snapshot *s,
631 struct pending_exception *pe, int err)
632{
633 if (!s->valid)
634 return;
592 635
593 if (list_empty(&pe->siblings)) 636 if (err == -EIO)
594 return bio_list_get(&pe->origin_bios); 637 DMERR("Invalidating snapshot: Error reading/writing.");
638 else if (err == -ENOMEM)
639 DMERR("Invalidating snapshot: Unable to allocate exception.");
595 640
596 sibling = list_entry(pe->siblings.next, 641 if (pe)
597 struct pending_exception, siblings); 642 remove_exception(&pe->e);
598 643
599 list_del(&pe->siblings); 644 if (s->store.drop_snapshot)
645 s->store.drop_snapshot(&s->store);
600 646
601 /* This is fine as long as kcopyd is single-threaded. If kcopyd 647 s->valid = 0;
602 * becomes multi-threaded, we'll need some locking here.
603 */
604 bio_list_merge(&sibling->origin_bios, &pe->origin_bios);
605 648
606 return NULL; 649 dm_table_event(s->table);
607} 650}
608 651
609static void pending_complete(struct pending_exception *pe, int success) 652static void pending_complete(struct pending_exception *pe, int success)
610{ 653{
611 struct exception *e; 654 struct exception *e;
655 struct pending_exception *primary_pe;
612 struct dm_snapshot *s = pe->snap; 656 struct dm_snapshot *s = pe->snap;
613 struct bio *flush = NULL; 657 struct bio *flush = NULL;
614 658
615 if (success) { 659 if (!success) {
616 e = alloc_exception(); 660 /* Read/write error - snapshot is unusable */
617 if (!e) {
618 DMWARN("Unable to allocate exception.");
619 down_write(&s->lock);
620 s->store.drop_snapshot(&s->store);
621 s->valid = 0;
622 flush = __flush_bios(pe);
623 up_write(&s->lock);
624
625 error_bios(bio_list_get(&pe->snapshot_bios));
626 goto out;
627 }
628 *e = pe->e;
629
630 /*
631 * Add a proper exception, and remove the
632 * in-flight exception from the list.
633 */
634 down_write(&s->lock); 661 down_write(&s->lock);
635 insert_exception(&s->complete, e); 662 __invalidate_snapshot(s, pe, -EIO);
636 remove_exception(&pe->e);
637 flush = __flush_bios(pe); 663 flush = __flush_bios(pe);
638
639 /* Submit any pending write bios */
640 up_write(&s->lock); 664 up_write(&s->lock);
641 665
642 flush_bios(bio_list_get(&pe->snapshot_bios)); 666 error_snapshot_bios(pe);
643 } else { 667 goto out;
644 /* Read/write error - snapshot is unusable */ 668 }
669
670 e = alloc_exception();
671 if (!e) {
645 down_write(&s->lock); 672 down_write(&s->lock);
646 if (s->valid) 673 __invalidate_snapshot(s, pe, -ENOMEM);
647 DMERR("Error reading/writing snapshot");
648 s->store.drop_snapshot(&s->store);
649 s->valid = 0;
650 remove_exception(&pe->e);
651 flush = __flush_bios(pe); 674 flush = __flush_bios(pe);
652 up_write(&s->lock); 675 up_write(&s->lock);
653 676
654 error_bios(bio_list_get(&pe->snapshot_bios)); 677 error_snapshot_bios(pe);
678 goto out;
679 }
680 *e = pe->e;
655 681
656 dm_table_event(s->table); 682 /*
683 * Add a proper exception, and remove the
684 * in-flight exception from the list.
685 */
686 down_write(&s->lock);
687 if (!s->valid) {
688 flush = __flush_bios(pe);
689 up_write(&s->lock);
690
691 free_exception(e);
692
693 error_snapshot_bios(pe);
694 goto out;
657 } 695 }
658 696
697 insert_exception(&s->complete, e);
698 remove_exception(&pe->e);
699 flush = __flush_bios(pe);
700
701 up_write(&s->lock);
702
703 /* Submit any pending write bios */
704 flush_bios(bio_list_get(&pe->snapshot_bios));
705
659 out: 706 out:
660 free_pending_exception(pe); 707 primary_pe = pe->primary_pe;
708
709 /*
710 * Free the pe if it's not linked to an origin write or if
711 * it's not itself a primary pe.
712 */
713 if (!primary_pe || primary_pe != pe)
714 free_pending_exception(pe);
715
716 /*
717 * Free the primary pe if nothing references it.
718 */
719 if (primary_pe && !atomic_read(&primary_pe->sibling_count))
720 free_pending_exception(primary_pe);
661 721
662 if (flush) 722 if (flush)
663 flush_bios(flush); 723 flush_bios(flush);
@@ -734,38 +794,45 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
734 if (e) { 794 if (e) {
735 /* cast the exception to a pending exception */ 795 /* cast the exception to a pending exception */
736 pe = container_of(e, struct pending_exception, e); 796 pe = container_of(e, struct pending_exception, e);
797 goto out;
798 }
737 799
738 } else { 800 /*
739 /* 801 * Create a new pending exception, we don't want
740 * Create a new pending exception, we don't want 802 * to hold the lock while we do this.
741 * to hold the lock while we do this. 803 */
742 */ 804 up_write(&s->lock);
743 up_write(&s->lock); 805 pe = alloc_pending_exception();
744 pe = alloc_pending_exception(); 806 down_write(&s->lock);
745 down_write(&s->lock);
746 807
747 e = lookup_exception(&s->pending, chunk); 808 if (!s->valid) {
748 if (e) { 809 free_pending_exception(pe);
749 free_pending_exception(pe); 810 return NULL;
750 pe = container_of(e, struct pending_exception, e); 811 }
751 } else {
752 pe->e.old_chunk = chunk;
753 bio_list_init(&pe->origin_bios);
754 bio_list_init(&pe->snapshot_bios);
755 INIT_LIST_HEAD(&pe->siblings);
756 pe->snap = s;
757 pe->started = 0;
758
759 if (s->store.prepare_exception(&s->store, &pe->e)) {
760 free_pending_exception(pe);
761 s->valid = 0;
762 return NULL;
763 }
764 812
765 insert_exception(&s->pending, &pe->e); 813 e = lookup_exception(&s->pending, chunk);
766 } 814 if (e) {
815 free_pending_exception(pe);
816 pe = container_of(e, struct pending_exception, e);
817 goto out;
818 }
819
820 pe->e.old_chunk = chunk;
821 bio_list_init(&pe->origin_bios);
822 bio_list_init(&pe->snapshot_bios);
823 pe->primary_pe = NULL;
824 atomic_set(&pe->sibling_count, 1);
825 pe->snap = s;
826 pe->started = 0;
827
828 if (s->store.prepare_exception(&s->store, &pe->e)) {
829 free_pending_exception(pe);
830 return NULL;
767 } 831 }
768 832
833 insert_exception(&s->pending, &pe->e);
834
835 out:
769 return pe; 836 return pe;
770} 837}
771 838
@@ -782,13 +849,15 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
782{ 849{
783 struct exception *e; 850 struct exception *e;
784 struct dm_snapshot *s = (struct dm_snapshot *) ti->private; 851 struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
852 int copy_needed = 0;
785 int r = 1; 853 int r = 1;
786 chunk_t chunk; 854 chunk_t chunk;
787 struct pending_exception *pe; 855 struct pending_exception *pe = NULL;
788 856
789 chunk = sector_to_chunk(s, bio->bi_sector); 857 chunk = sector_to_chunk(s, bio->bi_sector);
790 858
791 /* Full snapshots are not usable */ 859 /* Full snapshots are not usable */
860 /* To get here the table must be live so s->active is always set. */
792 if (!s->valid) 861 if (!s->valid)
793 return -EIO; 862 return -EIO;
794 863
@@ -806,36 +875,41 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
806 * to copy an exception */ 875 * to copy an exception */
807 down_write(&s->lock); 876 down_write(&s->lock);
808 877
878 if (!s->valid) {
879 r = -EIO;
880 goto out_unlock;
881 }
882
809 /* If the block is already remapped - use that, else remap it */ 883 /* If the block is already remapped - use that, else remap it */
810 e = lookup_exception(&s->complete, chunk); 884 e = lookup_exception(&s->complete, chunk);
811 if (e) { 885 if (e) {
812 remap_exception(s, e, bio); 886 remap_exception(s, e, bio);
813 up_write(&s->lock); 887 goto out_unlock;
814 888 }
815 } else { 889
816 pe = __find_pending_exception(s, bio); 890 pe = __find_pending_exception(s, bio);
817 891 if (!pe) {
818 if (!pe) { 892 __invalidate_snapshot(s, pe, -ENOMEM);
819 if (s->store.drop_snapshot) 893 r = -EIO;
820 s->store.drop_snapshot(&s->store); 894 goto out_unlock;
821 s->valid = 0; 895 }
822 r = -EIO; 896
823 up_write(&s->lock); 897 remap_exception(s, &pe->e, bio);
824 } else { 898 bio_list_add(&pe->snapshot_bios, bio);
825 remap_exception(s, &pe->e, bio); 899
826 bio_list_add(&pe->snapshot_bios, bio); 900 if (!pe->started) {
827 901 /* this is protected by snap->lock */
828 if (!pe->started) { 902 pe->started = 1;
829 /* this is protected by snap->lock */ 903 copy_needed = 1;
830 pe->started = 1;
831 up_write(&s->lock);
832 start_copy(pe);
833 } else
834 up_write(&s->lock);
835 r = 0;
836 }
837 } 904 }
838 905
906 r = 0;
907
908 out_unlock:
909 up_write(&s->lock);
910
911 if (copy_needed)
912 start_copy(pe);
839 } else { 913 } else {
840 /* 914 /*
841 * FIXME: this read path scares me because we 915 * FIXME: this read path scares me because we
@@ -847,6 +921,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
847 /* Do reads */ 921 /* Do reads */
848 down_read(&s->lock); 922 down_read(&s->lock);
849 923
924 if (!s->valid) {
925 up_read(&s->lock);
926 return -EIO;
927 }
928
850 /* See if it it has been remapped */ 929 /* See if it it has been remapped */
851 e = lookup_exception(&s->complete, chunk); 930 e = lookup_exception(&s->complete, chunk);
852 if (e) 931 if (e)
@@ -884,9 +963,9 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
884 snap->store.fraction_full(&snap->store, 963 snap->store.fraction_full(&snap->store,
885 &numerator, 964 &numerator,
886 &denominator); 965 &denominator);
887 snprintf(result, maxlen, 966 snprintf(result, maxlen, "%llu/%llu",
888 SECTOR_FORMAT "/" SECTOR_FORMAT, 967 (unsigned long long)numerator,
889 numerator, denominator); 968 (unsigned long long)denominator);
890 } 969 }
891 else 970 else
892 snprintf(result, maxlen, "Unknown"); 971 snprintf(result, maxlen, "Unknown");
@@ -899,9 +978,10 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
899 * to make private copies if the output is to 978 * to make private copies if the output is to
900 * make sense. 979 * make sense.
901 */ 980 */
902 snprintf(result, maxlen, "%s %s %c " SECTOR_FORMAT, 981 snprintf(result, maxlen, "%s %s %c %llu",
903 snap->origin->name, snap->cow->name, 982 snap->origin->name, snap->cow->name,
904 snap->type, snap->chunk_size); 983 snap->type,
984 (unsigned long long)snap->chunk_size);
905 break; 985 break;
906 } 986 }
907 987
@@ -911,40 +991,27 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
911/*----------------------------------------------------------------- 991/*-----------------------------------------------------------------
912 * Origin methods 992 * Origin methods
913 *---------------------------------------------------------------*/ 993 *---------------------------------------------------------------*/
914static void list_merge(struct list_head *l1, struct list_head *l2)
915{
916 struct list_head *l1_n, *l2_p;
917
918 l1_n = l1->next;
919 l2_p = l2->prev;
920
921 l1->next = l2;
922 l2->prev = l1;
923
924 l2_p->next = l1_n;
925 l1_n->prev = l2_p;
926}
927
928static int __origin_write(struct list_head *snapshots, struct bio *bio) 994static int __origin_write(struct list_head *snapshots, struct bio *bio)
929{ 995{
930 int r = 1, first = 1; 996 int r = 1, first = 0;
931 struct dm_snapshot *snap; 997 struct dm_snapshot *snap;
932 struct exception *e; 998 struct exception *e;
933 struct pending_exception *pe, *last = NULL; 999 struct pending_exception *pe, *next_pe, *primary_pe = NULL;
934 chunk_t chunk; 1000 chunk_t chunk;
1001 LIST_HEAD(pe_queue);
935 1002
936 /* Do all the snapshots on this origin */ 1003 /* Do all the snapshots on this origin */
937 list_for_each_entry (snap, snapshots, list) { 1004 list_for_each_entry (snap, snapshots, list) {
938 1005
1006 down_write(&snap->lock);
1007
939 /* Only deal with valid and active snapshots */ 1008 /* Only deal with valid and active snapshots */
940 if (!snap->valid || !snap->active) 1009 if (!snap->valid || !snap->active)
941 continue; 1010 goto next_snapshot;
942 1011
943 /* Nothing to do if writing beyond end of snapshot */ 1012 /* Nothing to do if writing beyond end of snapshot */
944 if (bio->bi_sector >= dm_table_get_size(snap->table)) 1013 if (bio->bi_sector >= dm_table_get_size(snap->table))
945 continue; 1014 goto next_snapshot;
946
947 down_write(&snap->lock);
948 1015
949 /* 1016 /*
950 * Remember, different snapshots can have 1017 * Remember, different snapshots can have
@@ -956,49 +1023,75 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
956 * Check exception table to see if block 1023 * Check exception table to see if block
957 * is already remapped in this snapshot 1024 * is already remapped in this snapshot
958 * and trigger an exception if not. 1025 * and trigger an exception if not.
1026 *
1027 * sibling_count is initialised to 1 so pending_complete()
1028 * won't destroy the primary_pe while we're inside this loop.
959 */ 1029 */
960 e = lookup_exception(&snap->complete, chunk); 1030 e = lookup_exception(&snap->complete, chunk);
961 if (!e) { 1031 if (e)
962 pe = __find_pending_exception(snap, bio); 1032 goto next_snapshot;
963 if (!pe) { 1033
964 snap->store.drop_snapshot(&snap->store); 1034 pe = __find_pending_exception(snap, bio);
965 snap->valid = 0; 1035 if (!pe) {
966 1036 __invalidate_snapshot(snap, pe, ENOMEM);
967 } else { 1037 goto next_snapshot;
968 if (last) 1038 }
969 list_merge(&pe->siblings, 1039
970 &last->siblings); 1040 if (!primary_pe) {
971 1041 /*
972 last = pe; 1042 * Either every pe here has same
973 r = 0; 1043 * primary_pe or none has one yet.
1044 */
1045 if (pe->primary_pe)
1046 primary_pe = pe->primary_pe;
1047 else {
1048 primary_pe = pe;
1049 first = 1;
974 } 1050 }
1051
1052 bio_list_add(&primary_pe->origin_bios, bio);
1053
1054 r = 0;
1055 }
1056
1057 if (!pe->primary_pe) {
1058 atomic_inc(&primary_pe->sibling_count);
1059 pe->primary_pe = primary_pe;
1060 }
1061
1062 if (!pe->started) {
1063 pe->started = 1;
1064 list_add_tail(&pe->list, &pe_queue);
975 } 1065 }
976 1066
1067 next_snapshot:
977 up_write(&snap->lock); 1068 up_write(&snap->lock);
978 } 1069 }
979 1070
1071 if (!primary_pe)
1072 goto out;
1073
980 /* 1074 /*
981 * Now that we have a complete pe list we can start the copying. 1075 * If this is the first time we're processing this chunk and
1076 * sibling_count is now 1 it means all the pending exceptions
1077 * got completed while we were in the loop above, so it falls to
1078 * us here to remove the primary_pe and submit any origin_bios.
982 */ 1079 */
983 if (last) { 1080
984 pe = last; 1081 if (first && atomic_dec_and_test(&primary_pe->sibling_count)) {
985 do { 1082 flush_bios(bio_list_get(&primary_pe->origin_bios));
986 down_write(&pe->snap->lock); 1083 free_pending_exception(primary_pe);
987 if (first) 1084 /* If we got here, pe_queue is necessarily empty. */
988 bio_list_add(&pe->origin_bios, bio); 1085 goto out;
989 if (!pe->started) {
990 pe->started = 1;
991 up_write(&pe->snap->lock);
992 start_copy(pe);
993 } else
994 up_write(&pe->snap->lock);
995 first = 0;
996 pe = list_entry(pe->siblings.next,
997 struct pending_exception, siblings);
998
999 } while (pe != last);
1000 } 1086 }
1001 1087
1088 /*
1089 * Now that we have a complete pe list we can start the copying.
1090 */
1091 list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
1092 start_copy(pe);
1093
1094 out:
1002 return r; 1095 return r;
1003} 1096}
1004 1097
@@ -1174,8 +1267,7 @@ static int __init dm_snapshot_init(void)
1174 goto bad4; 1267 goto bad4;
1175 } 1268 }
1176 1269
1177 pending_pool = mempool_create(128, mempool_alloc_slab, 1270 pending_pool = mempool_create_slab_pool(128, pending_cache);
1178 mempool_free_slab, pending_cache);
1179 if (!pending_pool) { 1271 if (!pending_pool) {
1180 DMERR("Couldn't create pending pool."); 1272 DMERR("Couldn't create pending pool.");
1181 r = -ENOMEM; 1273 r = -ENOMEM;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 697aacafb02a..08328a8f5a3c 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -49,9 +49,9 @@ static inline struct stripe_c *alloc_context(unsigned int stripes)
49static int get_stripe(struct dm_target *ti, struct stripe_c *sc, 49static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
50 unsigned int stripe, char **argv) 50 unsigned int stripe, char **argv)
51{ 51{
52 sector_t start; 52 unsigned long long start;
53 53
54 if (sscanf(argv[1], SECTOR_FORMAT, &start) != 1) 54 if (sscanf(argv[1], "%llu", &start) != 1)
55 return -EINVAL; 55 return -EINVAL;
56 56
57 if (dm_get_device(ti, argv[0], start, sc->stripe_width, 57 if (dm_get_device(ti, argv[0], start, sc->stripe_width,
@@ -103,7 +103,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
103 return -EINVAL; 103 return -EINVAL;
104 } 104 }
105 105
106 if (((uint32_t)ti->len) & (chunk_size - 1)) { 106 if (ti->len & (chunk_size - 1)) {
107 ti->error = "dm-stripe: Target length not divisible by " 107 ti->error = "dm-stripe: Target length not divisible by "
108 "chunk size"; 108 "chunk size";
109 return -EINVAL; 109 return -EINVAL;
@@ -201,10 +201,11 @@ static int stripe_status(struct dm_target *ti,
201 break; 201 break;
202 202
203 case STATUSTYPE_TABLE: 203 case STATUSTYPE_TABLE:
204 DMEMIT("%d " SECTOR_FORMAT, sc->stripes, sc->chunk_mask + 1); 204 DMEMIT("%d %llu", sc->stripes,
205 (unsigned long long)sc->chunk_mask + 1);
205 for (i = 0; i < sc->stripes; i++) 206 for (i = 0; i < sc->stripes; i++)
206 DMEMIT(" %s " SECTOR_FORMAT, sc->stripe[i].dev->name, 207 DMEMIT(" %s %llu", sc->stripe[i].dev->name,
207 sc->stripe[i].physical_start); 208 (unsigned long long)sc->stripe[i].physical_start);
208 break; 209 break;
209 } 210 }
210 return 0; 211 return 0;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 9b1e2f5ca630..8f56a54cf0ce 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -14,6 +14,7 @@
14#include <linux/ctype.h> 14#include <linux/ctype.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/mutex.h>
17#include <asm/atomic.h> 18#include <asm/atomic.h>
18 19
19#define MAX_DEPTH 16 20#define MAX_DEPTH 16
@@ -22,6 +23,7 @@
22#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) 23#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
23 24
24struct dm_table { 25struct dm_table {
26 struct mapped_device *md;
25 atomic_t holders; 27 atomic_t holders;
26 28
27 /* btree table */ 29 /* btree table */
@@ -97,6 +99,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
97 99
98 lhs->seg_boundary_mask = 100 lhs->seg_boundary_mask =
99 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); 101 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
102
103 lhs->no_cluster |= rhs->no_cluster;
100} 104}
101 105
102/* 106/*
@@ -204,7 +208,8 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
204 return 0; 208 return 0;
205} 209}
206 210
207int dm_table_create(struct dm_table **result, int mode, unsigned num_targets) 211int dm_table_create(struct dm_table **result, int mode,
212 unsigned num_targets, struct mapped_device *md)
208{ 213{
209 struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL); 214 struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
210 215
@@ -227,6 +232,7 @@ int dm_table_create(struct dm_table **result, int mode, unsigned num_targets)
227 } 232 }
228 233
229 t->mode = mode; 234 t->mode = mode;
235 t->md = md;
230 *result = t; 236 *result = t;
231 return 0; 237 return 0;
232} 238}
@@ -345,20 +351,19 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev)
345/* 351/*
346 * Open a device so we can use it as a map destination. 352 * Open a device so we can use it as a map destination.
347 */ 353 */
348static int open_dev(struct dm_dev *d, dev_t dev) 354static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md)
349{ 355{
350 static char *_claim_ptr = "I belong to device-mapper"; 356 static char *_claim_ptr = "I belong to device-mapper";
351 struct block_device *bdev; 357 struct block_device *bdev;
352 358
353 int r; 359 int r;
354 360
355 if (d->bdev) 361 BUG_ON(d->bdev);
356 BUG();
357 362
358 bdev = open_by_devnum(dev, d->mode); 363 bdev = open_by_devnum(dev, d->mode);
359 if (IS_ERR(bdev)) 364 if (IS_ERR(bdev))
360 return PTR_ERR(bdev); 365 return PTR_ERR(bdev);
361 r = bd_claim(bdev, _claim_ptr); 366 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
362 if (r) 367 if (r)
363 blkdev_put(bdev); 368 blkdev_put(bdev);
364 else 369 else
@@ -369,12 +374,12 @@ static int open_dev(struct dm_dev *d, dev_t dev)
369/* 374/*
370 * Close a device that we've been using. 375 * Close a device that we've been using.
371 */ 376 */
372static void close_dev(struct dm_dev *d) 377static void close_dev(struct dm_dev *d, struct mapped_device *md)
373{ 378{
374 if (!d->bdev) 379 if (!d->bdev)
375 return; 380 return;
376 381
377 bd_release(d->bdev); 382 bd_release_from_disk(d->bdev, dm_disk(md));
378 blkdev_put(d->bdev); 383 blkdev_put(d->bdev);
379 d->bdev = NULL; 384 d->bdev = NULL;
380} 385}
@@ -395,7 +400,7 @@ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
395 * careful to leave things as they were if we fail to reopen the 400 * careful to leave things as they were if we fail to reopen the
396 * device. 401 * device.
397 */ 402 */
398static int upgrade_mode(struct dm_dev *dd, int new_mode) 403static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md)
399{ 404{
400 int r; 405 int r;
401 struct dm_dev dd_copy; 406 struct dm_dev dd_copy;
@@ -405,9 +410,9 @@ static int upgrade_mode(struct dm_dev *dd, int new_mode)
405 410
406 dd->mode |= new_mode; 411 dd->mode |= new_mode;
407 dd->bdev = NULL; 412 dd->bdev = NULL;
408 r = open_dev(dd, dev); 413 r = open_dev(dd, dev, md);
409 if (!r) 414 if (!r)
410 close_dev(&dd_copy); 415 close_dev(&dd_copy, md);
411 else 416 else
412 *dd = dd_copy; 417 *dd = dd_copy;
413 418
@@ -427,8 +432,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
427 struct dm_dev *dd; 432 struct dm_dev *dd;
428 unsigned int major, minor; 433 unsigned int major, minor;
429 434
430 if (!t) 435 BUG_ON(!t);
431 BUG();
432 436
433 if (sscanf(path, "%u:%u", &major, &minor) == 2) { 437 if (sscanf(path, "%u:%u", &major, &minor) == 2) {
434 /* Extract the major/minor numbers */ 438 /* Extract the major/minor numbers */
@@ -450,7 +454,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
450 dd->mode = mode; 454 dd->mode = mode;
451 dd->bdev = NULL; 455 dd->bdev = NULL;
452 456
453 if ((r = open_dev(dd, dev))) { 457 if ((r = open_dev(dd, dev, t->md))) {
454 kfree(dd); 458 kfree(dd);
455 return r; 459 return r;
456 } 460 }
@@ -461,7 +465,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
461 list_add(&dd->list, &t->devices); 465 list_add(&dd->list, &t->devices);
462 466
463 } else if (dd->mode != (mode | dd->mode)) { 467 } else if (dd->mode != (mode | dd->mode)) {
464 r = upgrade_mode(dd, mode); 468 r = upgrade_mode(dd, mode, t->md);
465 if (r) 469 if (r)
466 return r; 470 return r;
467 } 471 }
@@ -525,6 +529,8 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
525 rs->seg_boundary_mask = 529 rs->seg_boundary_mask =
526 min_not_zero(rs->seg_boundary_mask, 530 min_not_zero(rs->seg_boundary_mask,
527 q->seg_boundary_mask); 531 q->seg_boundary_mask);
532
533 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
528 } 534 }
529 535
530 return r; 536 return r;
@@ -536,7 +542,7 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
536void dm_put_device(struct dm_target *ti, struct dm_dev *dd) 542void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
537{ 543{
538 if (atomic_dec_and_test(&dd->count)) { 544 if (atomic_dec_and_test(&dd->count)) {
539 close_dev(dd); 545 close_dev(dd, ti->table->md);
540 list_del(&dd->list); 546 list_del(&dd->list);
541 kfree(dd); 547 kfree(dd);
542 } 548 }
@@ -765,14 +771,14 @@ int dm_table_complete(struct dm_table *t)
765 return r; 771 return r;
766} 772}
767 773
768static DECLARE_MUTEX(_event_lock); 774static DEFINE_MUTEX(_event_lock);
769void dm_table_event_callback(struct dm_table *t, 775void dm_table_event_callback(struct dm_table *t,
770 void (*fn)(void *), void *context) 776 void (*fn)(void *), void *context)
771{ 777{
772 down(&_event_lock); 778 mutex_lock(&_event_lock);
773 t->event_fn = fn; 779 t->event_fn = fn;
774 t->event_context = context; 780 t->event_context = context;
775 up(&_event_lock); 781 mutex_unlock(&_event_lock);
776} 782}
777 783
778void dm_table_event(struct dm_table *t) 784void dm_table_event(struct dm_table *t)
@@ -783,10 +789,10 @@ void dm_table_event(struct dm_table *t)
783 */ 789 */
784 BUG_ON(in_interrupt()); 790 BUG_ON(in_interrupt());
785 791
786 down(&_event_lock); 792 mutex_lock(&_event_lock);
787 if (t->event_fn) 793 if (t->event_fn)
788 t->event_fn(t->event_context); 794 t->event_fn(t->event_context);
789 up(&_event_lock); 795 mutex_unlock(&_event_lock);
790} 796}
791 797
792sector_t dm_table_get_size(struct dm_table *t) 798sector_t dm_table_get_size(struct dm_table *t)
@@ -834,6 +840,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
834 q->hardsect_size = t->limits.hardsect_size; 840 q->hardsect_size = t->limits.hardsect_size;
835 q->max_segment_size = t->limits.max_segment_size; 841 q->max_segment_size = t->limits.max_segment_size;
836 q->seg_boundary_mask = t->limits.seg_boundary_mask; 842 q->seg_boundary_mask = t->limits.seg_boundary_mask;
843 if (t->limits.no_cluster)
844 q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
845 else
846 q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER);
847
837} 848}
838 849
839unsigned int dm_table_get_num_targets(struct dm_table *t) 850unsigned int dm_table_get_num_targets(struct dm_table *t)
@@ -945,12 +956,20 @@ int dm_table_flush_all(struct dm_table *t)
945 return ret; 956 return ret;
946} 957}
947 958
959struct mapped_device *dm_table_get_md(struct dm_table *t)
960{
961 dm_get(t->md);
962
963 return t->md;
964}
965
948EXPORT_SYMBOL(dm_vcalloc); 966EXPORT_SYMBOL(dm_vcalloc);
949EXPORT_SYMBOL(dm_get_device); 967EXPORT_SYMBOL(dm_get_device);
950EXPORT_SYMBOL(dm_put_device); 968EXPORT_SYMBOL(dm_put_device);
951EXPORT_SYMBOL(dm_table_event); 969EXPORT_SYMBOL(dm_table_event);
952EXPORT_SYMBOL(dm_table_get_size); 970EXPORT_SYMBOL(dm_table_get_size);
953EXPORT_SYMBOL(dm_table_get_mode); 971EXPORT_SYMBOL(dm_table_get_mode);
972EXPORT_SYMBOL(dm_table_get_md);
954EXPORT_SYMBOL(dm_table_put); 973EXPORT_SYMBOL(dm_table_put);
955EXPORT_SYMBOL(dm_table_get); 974EXPORT_SYMBOL(dm_table_get);
956EXPORT_SYMBOL(dm_table_unplug_all); 975EXPORT_SYMBOL(dm_table_unplug_all);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8c82373f7ff3..4d710b7a133b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/mutex.h>
13#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
14#include <linux/blkpg.h> 15#include <linux/blkpg.h>
15#include <linux/bio.h> 16#include <linux/bio.h>
@@ -17,6 +18,7 @@
17#include <linux/mempool.h> 18#include <linux/mempool.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
19#include <linux/idr.h> 20#include <linux/idr.h>
21#include <linux/hdreg.h>
20#include <linux/blktrace_api.h> 22#include <linux/blktrace_api.h>
21 23
22static const char *_name = DM_NAME; 24static const char *_name = DM_NAME;
@@ -69,6 +71,7 @@ struct mapped_device {
69 71
70 request_queue_t *queue; 72 request_queue_t *queue;
71 struct gendisk *disk; 73 struct gendisk *disk;
74 char name[16];
72 75
73 void *interface_ptr; 76 void *interface_ptr;
74 77
@@ -101,6 +104,9 @@ struct mapped_device {
101 */ 104 */
102 struct super_block *frozen_sb; 105 struct super_block *frozen_sb;
103 struct block_device *suspended_bdev; 106 struct block_device *suspended_bdev;
107
108 /* forced geometry settings */
109 struct hd_geometry geometry;
104}; 110};
105 111
106#define MIN_IOS 256 112#define MIN_IOS 256
@@ -226,6 +232,13 @@ static int dm_blk_close(struct inode *inode, struct file *file)
226 return 0; 232 return 0;
227} 233}
228 234
235static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
236{
237 struct mapped_device *md = bdev->bd_disk->private_data;
238
239 return dm_get_geometry(md, geo);
240}
241
229static inline struct dm_io *alloc_io(struct mapped_device *md) 242static inline struct dm_io *alloc_io(struct mapped_device *md)
230{ 243{
231 return mempool_alloc(md->io_pool, GFP_NOIO); 244 return mempool_alloc(md->io_pool, GFP_NOIO);
@@ -312,6 +325,33 @@ struct dm_table *dm_get_table(struct mapped_device *md)
312 return t; 325 return t;
313} 326}
314 327
328/*
329 * Get the geometry associated with a dm device
330 */
331int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
332{
333 *geo = md->geometry;
334
335 return 0;
336}
337
338/*
339 * Set the geometry of a device.
340 */
341int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
342{
343 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
344
345 if (geo->start > sz) {
346 DMWARN("Start sector is beyond the geometry limits.");
347 return -EINVAL;
348 }
349
350 md->geometry = *geo;
351
352 return 0;
353}
354
315/*----------------------------------------------------------------- 355/*-----------------------------------------------------------------
316 * CRUD START: 356 * CRUD START:
317 * A more elegant soln is in the works that uses the queue 357 * A more elegant soln is in the works that uses the queue
@@ -704,14 +744,14 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
704/*----------------------------------------------------------------- 744/*-----------------------------------------------------------------
705 * An IDR is used to keep track of allocated minor numbers. 745 * An IDR is used to keep track of allocated minor numbers.
706 *---------------------------------------------------------------*/ 746 *---------------------------------------------------------------*/
707static DECLARE_MUTEX(_minor_lock); 747static DEFINE_MUTEX(_minor_lock);
708static DEFINE_IDR(_minor_idr); 748static DEFINE_IDR(_minor_idr);
709 749
710static void free_minor(unsigned int minor) 750static void free_minor(unsigned int minor)
711{ 751{
712 down(&_minor_lock); 752 mutex_lock(&_minor_lock);
713 idr_remove(&_minor_idr, minor); 753 idr_remove(&_minor_idr, minor);
714 up(&_minor_lock); 754 mutex_unlock(&_minor_lock);
715} 755}
716 756
717/* 757/*
@@ -724,7 +764,7 @@ static int specific_minor(struct mapped_device *md, unsigned int minor)
724 if (minor >= (1 << MINORBITS)) 764 if (minor >= (1 << MINORBITS))
725 return -EINVAL; 765 return -EINVAL;
726 766
727 down(&_minor_lock); 767 mutex_lock(&_minor_lock);
728 768
729 if (idr_find(&_minor_idr, minor)) { 769 if (idr_find(&_minor_idr, minor)) {
730 r = -EBUSY; 770 r = -EBUSY;
@@ -749,7 +789,7 @@ static int specific_minor(struct mapped_device *md, unsigned int minor)
749 } 789 }
750 790
751out: 791out:
752 up(&_minor_lock); 792 mutex_unlock(&_minor_lock);
753 return r; 793 return r;
754} 794}
755 795
@@ -758,7 +798,7 @@ static int next_free_minor(struct mapped_device *md, unsigned int *minor)
758 int r; 798 int r;
759 unsigned int m; 799 unsigned int m;
760 800
761 down(&_minor_lock); 801 mutex_lock(&_minor_lock);
762 802
763 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 803 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
764 if (!r) { 804 if (!r) {
@@ -780,7 +820,7 @@ static int next_free_minor(struct mapped_device *md, unsigned int *minor)
780 *minor = m; 820 *minor = m;
781 821
782out: 822out:
783 up(&_minor_lock); 823 mutex_unlock(&_minor_lock);
784 return r; 824 return r;
785} 825}
786 826
@@ -823,13 +863,11 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
823 md->queue->unplug_fn = dm_unplug_all; 863 md->queue->unplug_fn = dm_unplug_all;
824 md->queue->issue_flush_fn = dm_flush_all; 864 md->queue->issue_flush_fn = dm_flush_all;
825 865
826 md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, 866 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
827 mempool_free_slab, _io_cache);
828 if (!md->io_pool) 867 if (!md->io_pool)
829 goto bad2; 868 goto bad2;
830 869
831 md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab, 870 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
832 mempool_free_slab, _tio_cache);
833 if (!md->tio_pool) 871 if (!md->tio_pool)
834 goto bad3; 872 goto bad3;
835 873
@@ -844,6 +882,7 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
844 md->disk->private_data = md; 882 md->disk->private_data = md;
845 sprintf(md->disk->disk_name, "dm-%d", minor); 883 sprintf(md->disk->disk_name, "dm-%d", minor);
846 add_disk(md->disk); 884 add_disk(md->disk);
885 format_dev_t(md->name, MKDEV(_major, minor));
847 886
848 atomic_set(&md->pending, 0); 887 atomic_set(&md->pending, 0);
849 init_waitqueue_head(&md->wait); 888 init_waitqueue_head(&md->wait);
@@ -906,6 +945,13 @@ static int __bind(struct mapped_device *md, struct dm_table *t)
906 sector_t size; 945 sector_t size;
907 946
908 size = dm_table_get_size(t); 947 size = dm_table_get_size(t);
948
949 /*
950 * Wipe any geometry if the size of the table changed.
951 */
952 if (size != get_capacity(md->disk))
953 memset(&md->geometry, 0, sizeof(md->geometry));
954
909 __set_size(md, size); 955 __set_size(md, size);
910 if (size == 0) 956 if (size == 0)
911 return 0; 957 return 0;
@@ -969,13 +1015,13 @@ static struct mapped_device *dm_find_md(dev_t dev)
969 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 1015 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
970 return NULL; 1016 return NULL;
971 1017
972 down(&_minor_lock); 1018 mutex_lock(&_minor_lock);
973 1019
974 md = idr_find(&_minor_idr, minor); 1020 md = idr_find(&_minor_idr, minor);
975 if (!md || (dm_disk(md)->first_minor != minor)) 1021 if (!md || (dm_disk(md)->first_minor != minor))
976 md = NULL; 1022 md = NULL;
977 1023
978 up(&_minor_lock); 1024 mutex_unlock(&_minor_lock);
979 1025
980 return md; 1026 return md;
981} 1027}
@@ -990,15 +1036,9 @@ struct mapped_device *dm_get_md(dev_t dev)
990 return md; 1036 return md;
991} 1037}
992 1038
993void *dm_get_mdptr(dev_t dev) 1039void *dm_get_mdptr(struct mapped_device *md)
994{ 1040{
995 struct mapped_device *md; 1041 return md->interface_ptr;
996 void *mdptr = NULL;
997
998 md = dm_find_md(dev);
999 if (md)
1000 mdptr = md->interface_ptr;
1001 return mdptr;
1002} 1042}
1003 1043
1004void dm_set_mdptr(struct mapped_device *md, void *ptr) 1044void dm_set_mdptr(struct mapped_device *md, void *ptr)
@@ -1013,18 +1053,18 @@ void dm_get(struct mapped_device *md)
1013 1053
1014void dm_put(struct mapped_device *md) 1054void dm_put(struct mapped_device *md)
1015{ 1055{
1016 struct dm_table *map = dm_get_table(md); 1056 struct dm_table *map;
1017 1057
1018 if (atomic_dec_and_test(&md->holders)) { 1058 if (atomic_dec_and_test(&md->holders)) {
1059 map = dm_get_table(md);
1019 if (!dm_suspended(md)) { 1060 if (!dm_suspended(md)) {
1020 dm_table_presuspend_targets(map); 1061 dm_table_presuspend_targets(map);
1021 dm_table_postsuspend_targets(map); 1062 dm_table_postsuspend_targets(map);
1022 } 1063 }
1023 __unbind(md); 1064 __unbind(md);
1065 dm_table_put(map);
1024 free_dev(md); 1066 free_dev(md);
1025 } 1067 }
1026
1027 dm_table_put(map);
1028} 1068}
1029 1069
1030/* 1070/*
@@ -1109,6 +1149,7 @@ int dm_suspend(struct mapped_device *md, int do_lockfs)
1109{ 1149{
1110 struct dm_table *map = NULL; 1150 struct dm_table *map = NULL;
1111 DECLARE_WAITQUEUE(wait, current); 1151 DECLARE_WAITQUEUE(wait, current);
1152 struct bio *def;
1112 int r = -EINVAL; 1153 int r = -EINVAL;
1113 1154
1114 down(&md->suspend_lock); 1155 down(&md->suspend_lock);
@@ -1168,9 +1209,11 @@ int dm_suspend(struct mapped_device *md, int do_lockfs)
1168 /* were we interrupted ? */ 1209 /* were we interrupted ? */
1169 r = -EINTR; 1210 r = -EINTR;
1170 if (atomic_read(&md->pending)) { 1211 if (atomic_read(&md->pending)) {
1212 clear_bit(DMF_BLOCK_IO, &md->flags);
1213 def = bio_list_get(&md->deferred);
1214 __flush_deferred_io(md, def);
1171 up_write(&md->io_lock); 1215 up_write(&md->io_lock);
1172 unlock_fs(md); 1216 unlock_fs(md);
1173 clear_bit(DMF_BLOCK_IO, &md->flags);
1174 goto out; 1217 goto out;
1175 } 1218 }
1176 up_write(&md->io_lock); 1219 up_write(&md->io_lock);
@@ -1264,6 +1307,7 @@ int dm_suspended(struct mapped_device *md)
1264static struct block_device_operations dm_blk_dops = { 1307static struct block_device_operations dm_blk_dops = {
1265 .open = dm_blk_open, 1308 .open = dm_blk_open,
1266 .release = dm_blk_close, 1309 .release = dm_blk_close,
1310 .getgeo = dm_blk_getgeo,
1267 .owner = THIS_MODULE 1311 .owner = THIS_MODULE
1268}; 1312};
1269 1313
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 4eaf075da217..fd90bc8f9e45 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -14,6 +14,7 @@
14#include <linux/device-mapper.h> 14#include <linux/device-mapper.h>
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/blkdev.h> 16#include <linux/blkdev.h>
17#include <linux/hdreg.h>
17 18
18#define DM_NAME "device-mapper" 19#define DM_NAME "device-mapper"
19#define DMWARN(f, x...) printk(KERN_WARNING DM_NAME ": " f "\n" , ## x) 20#define DMWARN(f, x...) printk(KERN_WARNING DM_NAME ": " f "\n" , ## x)
@@ -23,16 +24,6 @@
23#define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 24#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
24 0 : scnprintf(result + sz, maxlen - sz, x)) 25 0 : scnprintf(result + sz, maxlen - sz, x))
25 26
26/*
27 * FIXME: I think this should be with the definition of sector_t
28 * in types.h.
29 */
30#ifdef CONFIG_LBD
31#define SECTOR_FORMAT "%llu"
32#else
33#define SECTOR_FORMAT "%lu"
34#endif
35
36#define SECTOR_SHIFT 9 27#define SECTOR_SHIFT 9
37 28
38/* 29/*
@@ -57,7 +48,7 @@ struct mapped_device;
57int dm_create(struct mapped_device **md); 48int dm_create(struct mapped_device **md);
58int dm_create_with_minor(unsigned int minor, struct mapped_device **md); 49int dm_create_with_minor(unsigned int minor, struct mapped_device **md);
59void dm_set_mdptr(struct mapped_device *md, void *ptr); 50void dm_set_mdptr(struct mapped_device *md, void *ptr);
60void *dm_get_mdptr(dev_t dev); 51void *dm_get_mdptr(struct mapped_device *md);
61struct mapped_device *dm_get_md(dev_t dev); 52struct mapped_device *dm_get_md(dev_t dev);
62 53
63/* 54/*
@@ -95,11 +86,18 @@ int dm_wait_event(struct mapped_device *md, int event_nr);
95struct gendisk *dm_disk(struct mapped_device *md); 86struct gendisk *dm_disk(struct mapped_device *md);
96int dm_suspended(struct mapped_device *md); 87int dm_suspended(struct mapped_device *md);
97 88
89/*
90 * Geometry functions.
91 */
92int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
93int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
94
98/*----------------------------------------------------------------- 95/*-----------------------------------------------------------------
99 * Functions for manipulating a table. Tables are also reference 96 * Functions for manipulating a table. Tables are also reference
100 * counted. 97 * counted.
101 *---------------------------------------------------------------*/ 98 *---------------------------------------------------------------*/
102int dm_table_create(struct dm_table **result, int mode, unsigned num_targets); 99int dm_table_create(struct dm_table **result, int mode,
100 unsigned num_targets, struct mapped_device *md);
103 101
104void dm_table_get(struct dm_table *t); 102void dm_table_get(struct dm_table *t);
105void dm_table_put(struct dm_table *t); 103void dm_table_put(struct dm_table *t);
@@ -117,6 +115,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q);
117unsigned int dm_table_get_num_targets(struct dm_table *t); 115unsigned int dm_table_get_num_targets(struct dm_table *t);
118struct list_head *dm_table_get_devices(struct dm_table *t); 116struct list_head *dm_table_get_devices(struct dm_table *t);
119int dm_table_get_mode(struct dm_table *t); 117int dm_table_get_mode(struct dm_table *t);
118struct mapped_device *dm_table_get_md(struct dm_table *t);
120void dm_table_presuspend_targets(struct dm_table *t); 119void dm_table_presuspend_targets(struct dm_table *t);
121void dm_table_postsuspend_targets(struct dm_table *t); 120void dm_table_postsuspend_targets(struct dm_table *t);
122void dm_table_resume_targets(struct dm_table *t); 121void dm_table_resume_targets(struct dm_table *t);
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index 8b3515f394a6..72480a48d88b 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -22,6 +22,7 @@
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/workqueue.h> 24#include <linux/workqueue.h>
25#include <linux/mutex.h>
25 26
26#include "kcopyd.h" 27#include "kcopyd.h"
27 28
@@ -44,6 +45,9 @@ struct kcopyd_client {
44 struct page_list *pages; 45 struct page_list *pages;
45 unsigned int nr_pages; 46 unsigned int nr_pages;
46 unsigned int nr_free_pages; 47 unsigned int nr_free_pages;
48
49 wait_queue_head_t destroyq;
50 atomic_t nr_jobs;
47}; 51};
48 52
49static struct page_list *alloc_pl(void) 53static struct page_list *alloc_pl(void)
@@ -227,8 +231,7 @@ static int jobs_init(void)
227 if (!_job_cache) 231 if (!_job_cache)
228 return -ENOMEM; 232 return -ENOMEM;
229 233
230 _job_pool = mempool_create(MIN_JOBS, mempool_alloc_slab, 234 _job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
231 mempool_free_slab, _job_cache);
232 if (!_job_pool) { 235 if (!_job_pool) {
233 kmem_cache_destroy(_job_cache); 236 kmem_cache_destroy(_job_cache);
234 return -ENOMEM; 237 return -ENOMEM;
@@ -293,10 +296,15 @@ static int run_complete_job(struct kcopyd_job *job)
293 int read_err = job->read_err; 296 int read_err = job->read_err;
294 unsigned int write_err = job->write_err; 297 unsigned int write_err = job->write_err;
295 kcopyd_notify_fn fn = job->fn; 298 kcopyd_notify_fn fn = job->fn;
299 struct kcopyd_client *kc = job->kc;
296 300
297 kcopyd_put_pages(job->kc, job->pages); 301 kcopyd_put_pages(kc, job->pages);
298 mempool_free(job, _job_pool); 302 mempool_free(job, _job_pool);
299 fn(read_err, write_err, context); 303 fn(read_err, write_err, context);
304
305 if (atomic_dec_and_test(&kc->nr_jobs))
306 wake_up(&kc->destroyq);
307
300 return 0; 308 return 0;
301} 309}
302 310
@@ -431,6 +439,7 @@ static void do_work(void *ignored)
431 */ 439 */
432static void dispatch_job(struct kcopyd_job *job) 440static void dispatch_job(struct kcopyd_job *job)
433{ 441{
442 atomic_inc(&job->kc->nr_jobs);
434 push(&_pages_jobs, job); 443 push(&_pages_jobs, job);
435 wake(); 444 wake();
436} 445}
@@ -573,68 +582,68 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
573/*----------------------------------------------------------------- 582/*-----------------------------------------------------------------
574 * Unit setup 583 * Unit setup
575 *---------------------------------------------------------------*/ 584 *---------------------------------------------------------------*/
576static DECLARE_MUTEX(_client_lock); 585static DEFINE_MUTEX(_client_lock);
577static LIST_HEAD(_clients); 586static LIST_HEAD(_clients);
578 587
579static void client_add(struct kcopyd_client *kc) 588static void client_add(struct kcopyd_client *kc)
580{ 589{
581 down(&_client_lock); 590 mutex_lock(&_client_lock);
582 list_add(&kc->list, &_clients); 591 list_add(&kc->list, &_clients);
583 up(&_client_lock); 592 mutex_unlock(&_client_lock);
584} 593}
585 594
586static void client_del(struct kcopyd_client *kc) 595static void client_del(struct kcopyd_client *kc)
587{ 596{
588 down(&_client_lock); 597 mutex_lock(&_client_lock);
589 list_del(&kc->list); 598 list_del(&kc->list);
590 up(&_client_lock); 599 mutex_unlock(&_client_lock);
591} 600}
592 601
593static DECLARE_MUTEX(kcopyd_init_lock); 602static DEFINE_MUTEX(kcopyd_init_lock);
594static int kcopyd_clients = 0; 603static int kcopyd_clients = 0;
595 604
596static int kcopyd_init(void) 605static int kcopyd_init(void)
597{ 606{
598 int r; 607 int r;
599 608
600 down(&kcopyd_init_lock); 609 mutex_lock(&kcopyd_init_lock);
601 610
602 if (kcopyd_clients) { 611 if (kcopyd_clients) {
603 /* Already initialized. */ 612 /* Already initialized. */
604 kcopyd_clients++; 613 kcopyd_clients++;
605 up(&kcopyd_init_lock); 614 mutex_unlock(&kcopyd_init_lock);
606 return 0; 615 return 0;
607 } 616 }
608 617
609 r = jobs_init(); 618 r = jobs_init();
610 if (r) { 619 if (r) {
611 up(&kcopyd_init_lock); 620 mutex_unlock(&kcopyd_init_lock);
612 return r; 621 return r;
613 } 622 }
614 623
615 _kcopyd_wq = create_singlethread_workqueue("kcopyd"); 624 _kcopyd_wq = create_singlethread_workqueue("kcopyd");
616 if (!_kcopyd_wq) { 625 if (!_kcopyd_wq) {
617 jobs_exit(); 626 jobs_exit();
618 up(&kcopyd_init_lock); 627 mutex_unlock(&kcopyd_init_lock);
619 return -ENOMEM; 628 return -ENOMEM;
620 } 629 }
621 630
622 kcopyd_clients++; 631 kcopyd_clients++;
623 INIT_WORK(&_kcopyd_work, do_work, NULL); 632 INIT_WORK(&_kcopyd_work, do_work, NULL);
624 up(&kcopyd_init_lock); 633 mutex_unlock(&kcopyd_init_lock);
625 return 0; 634 return 0;
626} 635}
627 636
628static void kcopyd_exit(void) 637static void kcopyd_exit(void)
629{ 638{
630 down(&kcopyd_init_lock); 639 mutex_lock(&kcopyd_init_lock);
631 kcopyd_clients--; 640 kcopyd_clients--;
632 if (!kcopyd_clients) { 641 if (!kcopyd_clients) {
633 jobs_exit(); 642 jobs_exit();
634 destroy_workqueue(_kcopyd_wq); 643 destroy_workqueue(_kcopyd_wq);
635 _kcopyd_wq = NULL; 644 _kcopyd_wq = NULL;
636 } 645 }
637 up(&kcopyd_init_lock); 646 mutex_unlock(&kcopyd_init_lock);
638} 647}
639 648
640int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result) 649int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
@@ -670,6 +679,9 @@ int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
670 return r; 679 return r;
671 } 680 }
672 681
682 init_waitqueue_head(&kc->destroyq);
683 atomic_set(&kc->nr_jobs, 0);
684
673 client_add(kc); 685 client_add(kc);
674 *result = kc; 686 *result = kc;
675 return 0; 687 return 0;
@@ -677,6 +689,9 @@ int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
677 689
678void kcopyd_client_destroy(struct kcopyd_client *kc) 690void kcopyd_client_destroy(struct kcopyd_client *kc)
679{ 691{
692 /* Wait for completion of all jobs submitted by this client. */
693 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
694
680 dm_io_put(kc->nr_pages); 695 dm_io_put(kc->nr_pages);
681 client_free_pages(kc); 696 client_free_pages(kc);
682 client_del(kc); 697 client_del(kc);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5ed2228745cb..039e071c1007 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -43,6 +43,7 @@
43#include <linux/buffer_head.h> /* for invalidate_bdev */ 43#include <linux/buffer_head.h> /* for invalidate_bdev */
44#include <linux/suspend.h> 44#include <linux/suspend.h>
45#include <linux/poll.h> 45#include <linux/poll.h>
46#include <linux/mutex.h>
46 47
47#include <linux/init.h> 48#include <linux/init.h>
48 49
@@ -158,11 +159,12 @@ static int start_readonly;
158 */ 159 */
159static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 160static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
160static atomic_t md_event_count; 161static atomic_t md_event_count;
161static void md_new_event(mddev_t *mddev) 162void md_new_event(mddev_t *mddev)
162{ 163{
163 atomic_inc(&md_event_count); 164 atomic_inc(&md_event_count);
164 wake_up(&md_event_waiters); 165 wake_up(&md_event_waiters);
165} 166}
167EXPORT_SYMBOL_GPL(md_new_event);
166 168
167/* 169/*
168 * Enables to iterate over all existing md arrays 170 * Enables to iterate over all existing md arrays
@@ -253,7 +255,7 @@ static mddev_t * mddev_find(dev_t unit)
253 else 255 else
254 new->md_minor = MINOR(unit) >> MdpMinorShift; 256 new->md_minor = MINOR(unit) >> MdpMinorShift;
255 257
256 init_MUTEX(&new->reconfig_sem); 258 mutex_init(&new->reconfig_mutex);
257 INIT_LIST_HEAD(&new->disks); 259 INIT_LIST_HEAD(&new->disks);
258 INIT_LIST_HEAD(&new->all_mddevs); 260 INIT_LIST_HEAD(&new->all_mddevs);
259 init_timer(&new->safemode_timer); 261 init_timer(&new->safemode_timer);
@@ -266,6 +268,7 @@ static mddev_t * mddev_find(dev_t unit)
266 kfree(new); 268 kfree(new);
267 return NULL; 269 return NULL;
268 } 270 }
271 set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
269 272
270 blk_queue_make_request(new->queue, md_fail_request); 273 blk_queue_make_request(new->queue, md_fail_request);
271 274
@@ -274,22 +277,22 @@ static mddev_t * mddev_find(dev_t unit)
274 277
275static inline int mddev_lock(mddev_t * mddev) 278static inline int mddev_lock(mddev_t * mddev)
276{ 279{
277 return down_interruptible(&mddev->reconfig_sem); 280 return mutex_lock_interruptible(&mddev->reconfig_mutex);
278} 281}
279 282
280static inline void mddev_lock_uninterruptible(mddev_t * mddev) 283static inline void mddev_lock_uninterruptible(mddev_t * mddev)
281{ 284{
282 down(&mddev->reconfig_sem); 285 mutex_lock(&mddev->reconfig_mutex);
283} 286}
284 287
285static inline int mddev_trylock(mddev_t * mddev) 288static inline int mddev_trylock(mddev_t * mddev)
286{ 289{
287 return down_trylock(&mddev->reconfig_sem); 290 return mutex_trylock(&mddev->reconfig_mutex);
288} 291}
289 292
290static inline void mddev_unlock(mddev_t * mddev) 293static inline void mddev_unlock(mddev_t * mddev)
291{ 294{
292 up(&mddev->reconfig_sem); 295 mutex_unlock(&mddev->reconfig_mutex);
293 296
294 md_wakeup_thread(mddev->thread); 297 md_wakeup_thread(mddev->thread);
295} 298}
@@ -660,7 +663,8 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
660 } 663 }
661 664
662 if (sb->major_version != 0 || 665 if (sb->major_version != 0 ||
663 sb->minor_version != 90) { 666 sb->minor_version < 90 ||
667 sb->minor_version > 91) {
664 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 668 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
665 sb->major_version, sb->minor_version, 669 sb->major_version, sb->minor_version,
666 b); 670 b);
@@ -745,6 +749,20 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
745 mddev->bitmap_offset = 0; 749 mddev->bitmap_offset = 0;
746 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 750 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
747 751
752 if (mddev->minor_version >= 91) {
753 mddev->reshape_position = sb->reshape_position;
754 mddev->delta_disks = sb->delta_disks;
755 mddev->new_level = sb->new_level;
756 mddev->new_layout = sb->new_layout;
757 mddev->new_chunk = sb->new_chunk;
758 } else {
759 mddev->reshape_position = MaxSector;
760 mddev->delta_disks = 0;
761 mddev->new_level = mddev->level;
762 mddev->new_layout = mddev->layout;
763 mddev->new_chunk = mddev->chunk_size;
764 }
765
748 if (sb->state & (1<<MD_SB_CLEAN)) 766 if (sb->state & (1<<MD_SB_CLEAN))
749 mddev->recovery_cp = MaxSector; 767 mddev->recovery_cp = MaxSector;
750 else { 768 else {
@@ -764,7 +782,8 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
764 782
765 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 783 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
766 mddev->bitmap_file == NULL) { 784 mddev->bitmap_file == NULL) {
767 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6 785 if (mddev->level != 1 && mddev->level != 4
786 && mddev->level != 5 && mddev->level != 6
768 && mddev->level != 10) { 787 && mddev->level != 10) {
769 /* FIXME use a better test */ 788 /* FIXME use a better test */
770 printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); 789 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
@@ -838,7 +857,6 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
838 857
839 sb->md_magic = MD_SB_MAGIC; 858 sb->md_magic = MD_SB_MAGIC;
840 sb->major_version = mddev->major_version; 859 sb->major_version = mddev->major_version;
841 sb->minor_version = mddev->minor_version;
842 sb->patch_version = mddev->patch_version; 860 sb->patch_version = mddev->patch_version;
843 sb->gvalid_words = 0; /* ignored */ 861 sb->gvalid_words = 0; /* ignored */
844 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 862 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
@@ -857,6 +875,17 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
857 sb->events_hi = (mddev->events>>32); 875 sb->events_hi = (mddev->events>>32);
858 sb->events_lo = (u32)mddev->events; 876 sb->events_lo = (u32)mddev->events;
859 877
878 if (mddev->reshape_position == MaxSector)
879 sb->minor_version = 90;
880 else {
881 sb->minor_version = 91;
882 sb->reshape_position = mddev->reshape_position;
883 sb->new_level = mddev->new_level;
884 sb->delta_disks = mddev->delta_disks;
885 sb->new_layout = mddev->new_layout;
886 sb->new_chunk = mddev->new_chunk;
887 }
888 mddev->minor_version = sb->minor_version;
860 if (mddev->in_sync) 889 if (mddev->in_sync)
861 { 890 {
862 sb->recovery_cp = mddev->recovery_cp; 891 sb->recovery_cp = mddev->recovery_cp;
@@ -893,10 +922,9 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
893 d->raid_disk = rdev2->raid_disk; 922 d->raid_disk = rdev2->raid_disk;
894 else 923 else
895 d->raid_disk = rdev2->desc_nr; /* compatibility */ 924 d->raid_disk = rdev2->desc_nr; /* compatibility */
896 if (test_bit(Faulty, &rdev2->flags)) { 925 if (test_bit(Faulty, &rdev2->flags))
897 d->state = (1<<MD_DISK_FAULTY); 926 d->state = (1<<MD_DISK_FAULTY);
898 failed++; 927 else if (test_bit(In_sync, &rdev2->flags)) {
899 } else if (test_bit(In_sync, &rdev2->flags)) {
900 d->state = (1<<MD_DISK_ACTIVE); 928 d->state = (1<<MD_DISK_ACTIVE);
901 d->state |= (1<<MD_DISK_SYNC); 929 d->state |= (1<<MD_DISK_SYNC);
902 active++; 930 active++;
@@ -1102,6 +1130,20 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1102 } 1130 }
1103 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1131 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1104 } 1132 }
1133 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1134 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1135 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1136 mddev->new_level = le32_to_cpu(sb->new_level);
1137 mddev->new_layout = le32_to_cpu(sb->new_layout);
1138 mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1139 } else {
1140 mddev->reshape_position = MaxSector;
1141 mddev->delta_disks = 0;
1142 mddev->new_level = mddev->level;
1143 mddev->new_layout = mddev->layout;
1144 mddev->new_chunk = mddev->chunk_size;
1145 }
1146
1105 } else if (mddev->pers == NULL) { 1147 } else if (mddev->pers == NULL) {
1106 /* Insist of good event counter while assembling */ 1148 /* Insist of good event counter while assembling */
1107 __u64 ev1 = le64_to_cpu(sb->events); 1149 __u64 ev1 = le64_to_cpu(sb->events);
@@ -1173,6 +1215,14 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1173 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1215 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1174 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1216 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1175 } 1217 }
1218 if (mddev->reshape_position != MaxSector) {
1219 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1220 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1221 sb->new_layout = cpu_to_le32(mddev->new_layout);
1222 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1223 sb->new_level = cpu_to_le32(mddev->new_level);
1224 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1225 }
1176 1226
1177 max_dev = 0; 1227 max_dev = 0;
1178 ITERATE_RDEV(mddev,rdev2,tmp) 1228 ITERATE_RDEV(mddev,rdev2,tmp)
@@ -1301,6 +1351,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1301 else 1351 else
1302 ko = &rdev->bdev->bd_disk->kobj; 1352 ko = &rdev->bdev->bd_disk->kobj;
1303 sysfs_create_link(&rdev->kobj, ko, "block"); 1353 sysfs_create_link(&rdev->kobj, ko, "block");
1354 bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
1304 return 0; 1355 return 0;
1305} 1356}
1306 1357
@@ -1311,6 +1362,7 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1311 MD_BUG(); 1362 MD_BUG();
1312 return; 1363 return;
1313 } 1364 }
1365 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1314 list_del_init(&rdev->same_set); 1366 list_del_init(&rdev->same_set);
1315 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1367 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1316 rdev->mddev = NULL; 1368 rdev->mddev = NULL;
@@ -1493,7 +1545,7 @@ static void sync_sbs(mddev_t * mddev)
1493 } 1545 }
1494} 1546}
1495 1547
1496static void md_update_sb(mddev_t * mddev) 1548void md_update_sb(mddev_t * mddev)
1497{ 1549{
1498 int err; 1550 int err;
1499 struct list_head *tmp; 1551 struct list_head *tmp;
@@ -1570,6 +1622,7 @@ repeat:
1570 wake_up(&mddev->sb_wait); 1622 wake_up(&mddev->sb_wait);
1571 1623
1572} 1624}
1625EXPORT_SYMBOL_GPL(md_update_sb);
1573 1626
1574/* words written to sysfs files may, or my not, be \n terminated. 1627/* words written to sysfs files may, or my not, be \n terminated.
1575 * We want to accept with case. For this we use cmd_match. 1628 * We want to accept with case. For this we use cmd_match.
@@ -2162,7 +2215,9 @@ action_show(mddev_t *mddev, char *page)
2162 char *type = "idle"; 2215 char *type = "idle";
2163 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 2216 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2164 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) { 2217 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
2165 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2218 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2219 type = "reshape";
2220 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2166 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2221 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2167 type = "resync"; 2222 type = "resync";
2168 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 2223 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
@@ -2193,7 +2248,14 @@ action_store(mddev_t *mddev, const char *page, size_t len)
2193 return -EBUSY; 2248 return -EBUSY;
2194 else if (cmd_match(page, "resync") || cmd_match(page, "recover")) 2249 else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
2195 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2250 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2196 else { 2251 else if (cmd_match(page, "reshape")) {
2252 int err;
2253 if (mddev->pers->start_reshape == NULL)
2254 return -EINVAL;
2255 err = mddev->pers->start_reshape(mddev);
2256 if (err)
2257 return err;
2258 } else {
2197 if (cmd_match(page, "check")) 2259 if (cmd_match(page, "check"))
2198 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 2260 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2199 else if (cmd_match(page, "repair")) 2261 else if (cmd_match(page, "repair"))
@@ -2304,6 +2366,63 @@ sync_completed_show(mddev_t *mddev, char *page)
2304static struct md_sysfs_entry 2366static struct md_sysfs_entry
2305md_sync_completed = __ATTR_RO(sync_completed); 2367md_sync_completed = __ATTR_RO(sync_completed);
2306 2368
2369static ssize_t
2370suspend_lo_show(mddev_t *mddev, char *page)
2371{
2372 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
2373}
2374
2375static ssize_t
2376suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
2377{
2378 char *e;
2379 unsigned long long new = simple_strtoull(buf, &e, 10);
2380
2381 if (mddev->pers->quiesce == NULL)
2382 return -EINVAL;
2383 if (buf == e || (*e && *e != '\n'))
2384 return -EINVAL;
2385 if (new >= mddev->suspend_hi ||
2386 (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
2387 mddev->suspend_lo = new;
2388 mddev->pers->quiesce(mddev, 2);
2389 return len;
2390 } else
2391 return -EINVAL;
2392}
2393static struct md_sysfs_entry md_suspend_lo =
2394__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
2395
2396
2397static ssize_t
2398suspend_hi_show(mddev_t *mddev, char *page)
2399{
2400 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
2401}
2402
2403static ssize_t
2404suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
2405{
2406 char *e;
2407 unsigned long long new = simple_strtoull(buf, &e, 10);
2408
2409 if (mddev->pers->quiesce == NULL)
2410 return -EINVAL;
2411 if (buf == e || (*e && *e != '\n'))
2412 return -EINVAL;
2413 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
2414 (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
2415 mddev->suspend_hi = new;
2416 mddev->pers->quiesce(mddev, 1);
2417 mddev->pers->quiesce(mddev, 0);
2418 return len;
2419 } else
2420 return -EINVAL;
2421}
2422static struct md_sysfs_entry md_suspend_hi =
2423__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2424
2425
2307static struct attribute *md_default_attrs[] = { 2426static struct attribute *md_default_attrs[] = {
2308 &md_level.attr, 2427 &md_level.attr,
2309 &md_raid_disks.attr, 2428 &md_raid_disks.attr,
@@ -2321,6 +2440,8 @@ static struct attribute *md_redundancy_attrs[] = {
2321 &md_sync_max.attr, 2440 &md_sync_max.attr,
2322 &md_sync_speed.attr, 2441 &md_sync_speed.attr,
2323 &md_sync_completed.attr, 2442 &md_sync_completed.attr,
2443 &md_suspend_lo.attr,
2444 &md_suspend_hi.attr,
2324 NULL, 2445 NULL,
2325}; 2446};
2326static struct attribute_group md_redundancy_group = { 2447static struct attribute_group md_redundancy_group = {
@@ -2380,7 +2501,7 @@ int mdp_major = 0;
2380 2501
2381static struct kobject *md_probe(dev_t dev, int *part, void *data) 2502static struct kobject *md_probe(dev_t dev, int *part, void *data)
2382{ 2503{
2383 static DECLARE_MUTEX(disks_sem); 2504 static DEFINE_MUTEX(disks_mutex);
2384 mddev_t *mddev = mddev_find(dev); 2505 mddev_t *mddev = mddev_find(dev);
2385 struct gendisk *disk; 2506 struct gendisk *disk;
2386 int partitioned = (MAJOR(dev) != MD_MAJOR); 2507 int partitioned = (MAJOR(dev) != MD_MAJOR);
@@ -2390,15 +2511,15 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
2390 if (!mddev) 2511 if (!mddev)
2391 return NULL; 2512 return NULL;
2392 2513
2393 down(&disks_sem); 2514 mutex_lock(&disks_mutex);
2394 if (mddev->gendisk) { 2515 if (mddev->gendisk) {
2395 up(&disks_sem); 2516 mutex_unlock(&disks_mutex);
2396 mddev_put(mddev); 2517 mddev_put(mddev);
2397 return NULL; 2518 return NULL;
2398 } 2519 }
2399 disk = alloc_disk(1 << shift); 2520 disk = alloc_disk(1 << shift);
2400 if (!disk) { 2521 if (!disk) {
2401 up(&disks_sem); 2522 mutex_unlock(&disks_mutex);
2402 mddev_put(mddev); 2523 mddev_put(mddev);
2403 return NULL; 2524 return NULL;
2404 } 2525 }
@@ -2416,7 +2537,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
2416 disk->queue = mddev->queue; 2537 disk->queue = mddev->queue;
2417 add_disk(disk); 2538 add_disk(disk);
2418 mddev->gendisk = disk; 2539 mddev->gendisk = disk;
2419 up(&disks_sem); 2540 mutex_unlock(&disks_mutex);
2420 mddev->kobj.parent = &disk->kobj; 2541 mddev->kobj.parent = &disk->kobj;
2421 mddev->kobj.k_name = NULL; 2542 mddev->kobj.k_name = NULL;
2422 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md"); 2543 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
@@ -2539,6 +2660,14 @@ static int do_md_run(mddev_t * mddev)
2539 mddev->level = pers->level; 2660 mddev->level = pers->level;
2540 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 2661 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
2541 2662
2663 if (mddev->reshape_position != MaxSector &&
2664 pers->start_reshape == NULL) {
2665 /* This personality cannot handle reshaping... */
2666 mddev->pers = NULL;
2667 module_put(pers->owner);
2668 return -EINVAL;
2669 }
2670
2542 mddev->recovery = 0; 2671 mddev->recovery = 0;
2543 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ 2672 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
2544 mddev->barriers_work = 1; 2673 mddev->barriers_work = 1;
@@ -2772,7 +2901,6 @@ static void autorun_array(mddev_t *mddev)
2772 */ 2901 */
2773static void autorun_devices(int part) 2902static void autorun_devices(int part)
2774{ 2903{
2775 struct list_head candidates;
2776 struct list_head *tmp; 2904 struct list_head *tmp;
2777 mdk_rdev_t *rdev0, *rdev; 2905 mdk_rdev_t *rdev0, *rdev;
2778 mddev_t *mddev; 2906 mddev_t *mddev;
@@ -2781,6 +2909,7 @@ static void autorun_devices(int part)
2781 printk(KERN_INFO "md: autorun ...\n"); 2909 printk(KERN_INFO "md: autorun ...\n");
2782 while (!list_empty(&pending_raid_disks)) { 2910 while (!list_empty(&pending_raid_disks)) {
2783 dev_t dev; 2911 dev_t dev;
2912 LIST_HEAD(candidates);
2784 rdev0 = list_entry(pending_raid_disks.next, 2913 rdev0 = list_entry(pending_raid_disks.next,
2785 mdk_rdev_t, same_set); 2914 mdk_rdev_t, same_set);
2786 2915
@@ -3427,11 +3556,18 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
3427 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 3556 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
3428 mddev->bitmap_offset = 0; 3557 mddev->bitmap_offset = 0;
3429 3558
3559 mddev->reshape_position = MaxSector;
3560
3430 /* 3561 /*
3431 * Generate a 128 bit UUID 3562 * Generate a 128 bit UUID
3432 */ 3563 */
3433 get_random_bytes(mddev->uuid, 16); 3564 get_random_bytes(mddev->uuid, 16);
3434 3565
3566 mddev->new_level = mddev->level;
3567 mddev->new_chunk = mddev->chunk_size;
3568 mddev->new_layout = mddev->layout;
3569 mddev->delta_disks = 0;
3570
3435 return 0; 3571 return 0;
3436} 3572}
3437 3573
@@ -3440,6 +3576,7 @@ static int update_size(mddev_t *mddev, unsigned long size)
3440 mdk_rdev_t * rdev; 3576 mdk_rdev_t * rdev;
3441 int rv; 3577 int rv;
3442 struct list_head *tmp; 3578 struct list_head *tmp;
3579 int fit = (size == 0);
3443 3580
3444 if (mddev->pers->resize == NULL) 3581 if (mddev->pers->resize == NULL)
3445 return -EINVAL; 3582 return -EINVAL;
@@ -3457,7 +3594,6 @@ static int update_size(mddev_t *mddev, unsigned long size)
3457 return -EBUSY; 3594 return -EBUSY;
3458 ITERATE_RDEV(mddev,rdev,tmp) { 3595 ITERATE_RDEV(mddev,rdev,tmp) {
3459 sector_t avail; 3596 sector_t avail;
3460 int fit = (size == 0);
3461 if (rdev->sb_offset > rdev->data_offset) 3597 if (rdev->sb_offset > rdev->data_offset)
3462 avail = (rdev->sb_offset*2) - rdev->data_offset; 3598 avail = (rdev->sb_offset*2) - rdev->data_offset;
3463 else 3599 else
@@ -3487,14 +3623,16 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
3487{ 3623{
3488 int rv; 3624 int rv;
3489 /* change the number of raid disks */ 3625 /* change the number of raid disks */
3490 if (mddev->pers->reshape == NULL) 3626 if (mddev->pers->check_reshape == NULL)
3491 return -EINVAL; 3627 return -EINVAL;
3492 if (raid_disks <= 0 || 3628 if (raid_disks <= 0 ||
3493 raid_disks >= mddev->max_disks) 3629 raid_disks >= mddev->max_disks)
3494 return -EINVAL; 3630 return -EINVAL;
3495 if (mddev->sync_thread) 3631 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
3496 return -EBUSY; 3632 return -EBUSY;
3497 rv = mddev->pers->reshape(mddev, raid_disks); 3633 mddev->delta_disks = raid_disks - mddev->raid_disks;
3634
3635 rv = mddev->pers->check_reshape(mddev);
3498 return rv; 3636 return rv;
3499} 3637}
3500 3638
@@ -4041,7 +4179,10 @@ static void status_unused(struct seq_file *seq)
4041 4179
4042static void status_resync(struct seq_file *seq, mddev_t * mddev) 4180static void status_resync(struct seq_file *seq, mddev_t * mddev)
4043{ 4181{
4044 unsigned long max_blocks, resync, res, dt, db, rt; 4182 sector_t max_blocks, resync, res;
4183 unsigned long dt, db, rt;
4184 int scale;
4185 unsigned int per_milli;
4045 4186
4046 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2; 4187 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
4047 4188
@@ -4057,9 +4198,22 @@ static void status_resync(struct seq_file *seq, mddev_t * mddev)
4057 MD_BUG(); 4198 MD_BUG();
4058 return; 4199 return;
4059 } 4200 }
4060 res = (resync/1024)*1000/(max_blocks/1024 + 1); 4201 /* Pick 'scale' such that (resync>>scale)*1000 will fit
4202 * in a sector_t, and (max_blocks>>scale) will fit in a
4203 * u32, as those are the requirements for sector_div.
4204 * Thus 'scale' must be at least 10
4205 */
4206 scale = 10;
4207 if (sizeof(sector_t) > sizeof(unsigned long)) {
4208 while ( max_blocks/2 > (1ULL<<(scale+32)))
4209 scale++;
4210 }
4211 res = (resync>>scale)*1000;
4212 sector_div(res, (u32)((max_blocks>>scale)+1));
4213
4214 per_milli = res;
4061 { 4215 {
4062 int i, x = res/50, y = 20-x; 4216 int i, x = per_milli/50, y = 20-x;
4063 seq_printf(seq, "["); 4217 seq_printf(seq, "[");
4064 for (i = 0; i < x; i++) 4218 for (i = 0; i < x; i++)
4065 seq_printf(seq, "="); 4219 seq_printf(seq, "=");
@@ -4068,10 +4222,14 @@ static void status_resync(struct seq_file *seq, mddev_t * mddev)
4068 seq_printf(seq, "."); 4222 seq_printf(seq, ".");
4069 seq_printf(seq, "] "); 4223 seq_printf(seq, "] ");
4070 } 4224 }
4071 seq_printf(seq, " %s =%3lu.%lu%% (%lu/%lu)", 4225 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
4226 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
4227 "reshape" :
4072 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 4228 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
4073 "resync" : "recovery"), 4229 "resync" : "recovery")),
4074 res/10, res % 10, resync, max_blocks); 4230 per_milli/10, per_milli % 10,
4231 (unsigned long long) resync,
4232 (unsigned long long) max_blocks);
4075 4233
4076 /* 4234 /*
4077 * We do not want to overflow, so the order of operands and 4235 * We do not want to overflow, so the order of operands and
@@ -4085,7 +4243,7 @@ static void status_resync(struct seq_file *seq, mddev_t * mddev)
4085 dt = ((jiffies - mddev->resync_mark) / HZ); 4243 dt = ((jiffies - mddev->resync_mark) / HZ);
4086 if (!dt) dt++; 4244 if (!dt) dt++;
4087 db = resync - (mddev->resync_mark_cnt/2); 4245 db = resync - (mddev->resync_mark_cnt/2);
4088 rt = (dt * ((max_blocks-resync) / (db/100+1)))/100; 4246 rt = (dt * ((unsigned long)(max_blocks-resync) / (db/100+1)))/100;
4089 4247
4090 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6); 4248 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
4091 4249
@@ -4442,7 +4600,7 @@ static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
4442 4600
4443#define SYNC_MARKS 10 4601#define SYNC_MARKS 10
4444#define SYNC_MARK_STEP (3*HZ) 4602#define SYNC_MARK_STEP (3*HZ)
4445static void md_do_sync(mddev_t *mddev) 4603void md_do_sync(mddev_t *mddev)
4446{ 4604{
4447 mddev_t *mddev2; 4605 mddev_t *mddev2;
4448 unsigned int currspeed = 0, 4606 unsigned int currspeed = 0,
@@ -4522,7 +4680,9 @@ static void md_do_sync(mddev_t *mddev)
4522 */ 4680 */
4523 max_sectors = mddev->resync_max_sectors; 4681 max_sectors = mddev->resync_max_sectors;
4524 mddev->resync_mismatches = 0; 4682 mddev->resync_mismatches = 0;
4525 } else 4683 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4684 max_sectors = mddev->size << 1;
4685 else
4526 /* recovery follows the physical size of devices */ 4686 /* recovery follows the physical size of devices */
4527 max_sectors = mddev->size << 1; 4687 max_sectors = mddev->size << 1;
4528 4688
@@ -4658,6 +4818,8 @@ static void md_do_sync(mddev_t *mddev)
4658 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 4818 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
4659 4819
4660 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 4820 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
4821 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
4822 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
4661 mddev->curr_resync > 2 && 4823 mddev->curr_resync > 2 &&
4662 mddev->curr_resync >= mddev->recovery_cp) { 4824 mddev->curr_resync >= mddev->recovery_cp) {
4663 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4825 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -4675,6 +4837,7 @@ static void md_do_sync(mddev_t *mddev)
4675 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 4837 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
4676 md_wakeup_thread(mddev->thread); 4838 md_wakeup_thread(mddev->thread);
4677} 4839}
4840EXPORT_SYMBOL_GPL(md_do_sync);
4678 4841
4679 4842
4680/* 4843/*
@@ -4730,7 +4893,7 @@ void md_check_recovery(mddev_t *mddev)
4730 )) 4893 ))
4731 return; 4894 return;
4732 4895
4733 if (mddev_trylock(mddev)==0) { 4896 if (mddev_trylock(mddev)) {
4734 int spares =0; 4897 int spares =0;
4735 4898
4736 spin_lock_irq(&mddev->write_lock); 4899 spin_lock_irq(&mddev->write_lock);
@@ -4866,7 +5029,7 @@ static int md_notify_reboot(struct notifier_block *this,
4866 printk(KERN_INFO "md: stopping all md devices.\n"); 5029 printk(KERN_INFO "md: stopping all md devices.\n");
4867 5030
4868 ITERATE_MDDEV(mddev,tmp) 5031 ITERATE_MDDEV(mddev,tmp)
4869 if (mddev_trylock(mddev)==0) 5032 if (mddev_trylock(mddev))
4870 do_md_stop (mddev, 1); 5033 do_md_stop (mddev, 1);
4871 /* 5034 /*
4872 * certain more exotic SCSI devices are known to be 5035 * certain more exotic SCSI devices are known to be
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 96f7af4ae400..1cc9de44ce86 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -35,18 +35,6 @@
35#define NR_RESERVED_BUFS 32 35#define NR_RESERVED_BUFS 32
36 36
37 37
38static void *mp_pool_alloc(gfp_t gfp_flags, void *data)
39{
40 struct multipath_bh *mpb;
41 mpb = kzalloc(sizeof(*mpb), gfp_flags);
42 return mpb;
43}
44
45static void mp_pool_free(void *mpb, void *data)
46{
47 kfree(mpb);
48}
49
50static int multipath_map (multipath_conf_t *conf) 38static int multipath_map (multipath_conf_t *conf)
51{ 39{
52 int i, disks = conf->raid_disks; 40 int i, disks = conf->raid_disks;
@@ -494,9 +482,8 @@ static int multipath_run (mddev_t *mddev)
494 } 482 }
495 mddev->degraded = conf->raid_disks = conf->working_disks; 483 mddev->degraded = conf->raid_disks = conf->working_disks;
496 484
497 conf->pool = mempool_create(NR_RESERVED_BUFS, 485 conf->pool = mempool_create_kzalloc_pool(NR_RESERVED_BUFS,
498 mp_pool_alloc, mp_pool_free, 486 sizeof(struct multipath_bh));
499 NULL);
500 if (conf->pool == NULL) { 487 if (conf->pool == NULL) {
501 printk(KERN_ERR 488 printk(KERN_ERR
502 "multipath: couldn't allocate memory for %s\n", 489 "multipath: couldn't allocate memory for %s\n",
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 5d88329e3c7a..3cb0872a845d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1402,6 +1402,9 @@ static void raid1d(mddev_t *mddev)
1402 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1402 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1403 clear_bit(R1BIO_Barrier, &r1_bio->state); 1403 clear_bit(R1BIO_Barrier, &r1_bio->state);
1404 for (i=0; i < conf->raid_disks; i++) 1404 for (i=0; i < conf->raid_disks; i++)
1405 if (r1_bio->bios[i])
1406 atomic_inc(&r1_bio->remaining);
1407 for (i=0; i < conf->raid_disks; i++)
1405 if (r1_bio->bios[i]) { 1408 if (r1_bio->bios[i]) {
1406 struct bio_vec *bvec; 1409 struct bio_vec *bvec;
1407 int j; 1410 int j;
@@ -1789,6 +1792,11 @@ static int run(mddev_t *mddev)
1789 mdname(mddev), mddev->level); 1792 mdname(mddev), mddev->level);
1790 goto out; 1793 goto out;
1791 } 1794 }
1795 if (mddev->reshape_position != MaxSector) {
1796 printk("raid1: %s: reshape_position set but not supported\n",
1797 mdname(mddev));
1798 goto out;
1799 }
1792 /* 1800 /*
1793 * copy the already verified devices into our private RAID1 1801 * copy the already verified devices into our private RAID1
1794 * bookkeeping area. [whatever we allocate in run(), 1802 * bookkeeping area. [whatever we allocate in run(),
@@ -1971,7 +1979,7 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors)
1971 return 0; 1979 return 0;
1972} 1980}
1973 1981
1974static int raid1_reshape(mddev_t *mddev, int raid_disks) 1982static int raid1_reshape(mddev_t *mddev)
1975{ 1983{
1976 /* We need to: 1984 /* We need to:
1977 * 1/ resize the r1bio_pool 1985 * 1/ resize the r1bio_pool
@@ -1988,10 +1996,22 @@ static int raid1_reshape(mddev_t *mddev, int raid_disks)
1988 struct pool_info *newpoolinfo; 1996 struct pool_info *newpoolinfo;
1989 mirror_info_t *newmirrors; 1997 mirror_info_t *newmirrors;
1990 conf_t *conf = mddev_to_conf(mddev); 1998 conf_t *conf = mddev_to_conf(mddev);
1991 int cnt; 1999 int cnt, raid_disks;
1992 2000
1993 int d, d2; 2001 int d, d2;
1994 2002
2003 /* Cannot change chunk_size, layout, or level */
2004 if (mddev->chunk_size != mddev->new_chunk ||
2005 mddev->layout != mddev->new_layout ||
2006 mddev->level != mddev->new_level) {
2007 mddev->new_chunk = mddev->chunk_size;
2008 mddev->new_layout = mddev->layout;
2009 mddev->new_level = mddev->level;
2010 return -EINVAL;
2011 }
2012
2013 raid_disks = mddev->raid_disks + mddev->delta_disks;
2014
1995 if (raid_disks < conf->raid_disks) { 2015 if (raid_disks < conf->raid_disks) {
1996 cnt=0; 2016 cnt=0;
1997 for (d= 0; d < conf->raid_disks; d++) 2017 for (d= 0; d < conf->raid_disks; d++)
@@ -2038,6 +2058,7 @@ static int raid1_reshape(mddev_t *mddev, int raid_disks)
2038 2058
2039 mddev->degraded += (raid_disks - conf->raid_disks); 2059 mddev->degraded += (raid_disks - conf->raid_disks);
2040 conf->raid_disks = mddev->raid_disks = raid_disks; 2060 conf->raid_disks = mddev->raid_disks = raid_disks;
2061 mddev->delta_disks = 0;
2041 2062
2042 conf->last_used = 0; /* just make sure it is in-range */ 2063 conf->last_used = 0; /* just make sure it is in-range */
2043 lower_barrier(conf); 2064 lower_barrier(conf);
@@ -2079,7 +2100,7 @@ static struct mdk_personality raid1_personality =
2079 .spare_active = raid1_spare_active, 2100 .spare_active = raid1_spare_active,
2080 .sync_request = sync_request, 2101 .sync_request = sync_request,
2081 .resize = raid1_resize, 2102 .resize = raid1_resize,
2082 .reshape = raid1_reshape, 2103 .check_reshape = raid1_reshape,
2083 .quiesce = raid1_quiesce, 2104 .quiesce = raid1_quiesce,
2084}; 2105};
2085 2106
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2dba305daf3c..dae740adaf65 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -22,6 +22,7 @@
22#include <linux/raid/raid5.h> 22#include <linux/raid/raid5.h>
23#include <linux/highmem.h> 23#include <linux/highmem.h>
24#include <linux/bitops.h> 24#include <linux/bitops.h>
25#include <linux/kthread.h>
25#include <asm/atomic.h> 26#include <asm/atomic.h>
26 27
27#include <linux/raid/bitmap.h> 28#include <linux/raid/bitmap.h>
@@ -93,11 +94,11 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
93 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 94 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
94 md_wakeup_thread(conf->mddev->thread); 95 md_wakeup_thread(conf->mddev->thread);
95 } 96 }
96 list_add_tail(&sh->lru, &conf->inactive_list);
97 atomic_dec(&conf->active_stripes); 97 atomic_dec(&conf->active_stripes);
98 if (!conf->inactive_blocked || 98 if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
99 atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4)) 99 list_add_tail(&sh->lru, &conf->inactive_list);
100 wake_up(&conf->wait_for_stripe); 100 wake_up(&conf->wait_for_stripe);
101 }
101 } 102 }
102 } 103 }
103} 104}
@@ -178,10 +179,10 @@ static int grow_buffers(struct stripe_head *sh, int num)
178 179
179static void raid5_build_block (struct stripe_head *sh, int i); 180static void raid5_build_block (struct stripe_head *sh, int i);
180 181
181static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx) 182static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks)
182{ 183{
183 raid5_conf_t *conf = sh->raid_conf; 184 raid5_conf_t *conf = sh->raid_conf;
184 int disks = conf->raid_disks, i; 185 int i;
185 186
186 if (atomic_read(&sh->count) != 0) 187 if (atomic_read(&sh->count) != 0)
187 BUG(); 188 BUG();
@@ -198,7 +199,9 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
198 sh->pd_idx = pd_idx; 199 sh->pd_idx = pd_idx;
199 sh->state = 0; 200 sh->state = 0;
200 201
201 for (i=disks; i--; ) { 202 sh->disks = disks;
203
204 for (i = sh->disks; i--; ) {
202 struct r5dev *dev = &sh->dev[i]; 205 struct r5dev *dev = &sh->dev[i];
203 206
204 if (dev->toread || dev->towrite || dev->written || 207 if (dev->toread || dev->towrite || dev->written ||
@@ -215,7 +218,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
215 insert_hash(conf, sh); 218 insert_hash(conf, sh);
216} 219}
217 220
218static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector) 221static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks)
219{ 222{
220 struct stripe_head *sh; 223 struct stripe_head *sh;
221 struct hlist_node *hn; 224 struct hlist_node *hn;
@@ -223,7 +226,7 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
223 CHECK_DEVLOCK(); 226 CHECK_DEVLOCK();
224 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); 227 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
225 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 228 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
226 if (sh->sector == sector) 229 if (sh->sector == sector && sh->disks == disks)
227 return sh; 230 return sh;
228 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); 231 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
229 return NULL; 232 return NULL;
@@ -232,8 +235,8 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
232static void unplug_slaves(mddev_t *mddev); 235static void unplug_slaves(mddev_t *mddev);
233static void raid5_unplug_device(request_queue_t *q); 236static void raid5_unplug_device(request_queue_t *q);
234 237
235static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, 238static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
236 int pd_idx, int noblock) 239 int pd_idx, int noblock)
237{ 240{
238 struct stripe_head *sh; 241 struct stripe_head *sh;
239 242
@@ -245,7 +248,7 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
245 wait_event_lock_irq(conf->wait_for_stripe, 248 wait_event_lock_irq(conf->wait_for_stripe,
246 conf->quiesce == 0, 249 conf->quiesce == 0,
247 conf->device_lock, /* nothing */); 250 conf->device_lock, /* nothing */);
248 sh = __find_stripe(conf, sector); 251 sh = __find_stripe(conf, sector, disks);
249 if (!sh) { 252 if (!sh) {
250 if (!conf->inactive_blocked) 253 if (!conf->inactive_blocked)
251 sh = get_free_stripe(conf); 254 sh = get_free_stripe(conf);
@@ -259,11 +262,11 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
259 < (conf->max_nr_stripes *3/4) 262 < (conf->max_nr_stripes *3/4)
260 || !conf->inactive_blocked), 263 || !conf->inactive_blocked),
261 conf->device_lock, 264 conf->device_lock,
262 unplug_slaves(conf->mddev); 265 unplug_slaves(conf->mddev)
263 ); 266 );
264 conf->inactive_blocked = 0; 267 conf->inactive_blocked = 0;
265 } else 268 } else
266 init_stripe(sh, sector, pd_idx); 269 init_stripe(sh, sector, pd_idx, disks);
267 } else { 270 } else {
268 if (atomic_read(&sh->count)) { 271 if (atomic_read(&sh->count)) {
269 if (!list_empty(&sh->lru)) 272 if (!list_empty(&sh->lru))
@@ -271,9 +274,8 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
271 } else { 274 } else {
272 if (!test_bit(STRIPE_HANDLE, &sh->state)) 275 if (!test_bit(STRIPE_HANDLE, &sh->state))
273 atomic_inc(&conf->active_stripes); 276 atomic_inc(&conf->active_stripes);
274 if (list_empty(&sh->lru)) 277 if (!list_empty(&sh->lru))
275 BUG(); 278 list_del_init(&sh->lru);
276 list_del_init(&sh->lru);
277 } 279 }
278 } 280 }
279 } while (sh == NULL); 281 } while (sh == NULL);
@@ -300,6 +302,7 @@ static int grow_one_stripe(raid5_conf_t *conf)
300 kmem_cache_free(conf->slab_cache, sh); 302 kmem_cache_free(conf->slab_cache, sh);
301 return 0; 303 return 0;
302 } 304 }
305 sh->disks = conf->raid_disks;
303 /* we just created an active stripe so... */ 306 /* we just created an active stripe so... */
304 atomic_set(&sh->count, 1); 307 atomic_set(&sh->count, 1);
305 atomic_inc(&conf->active_stripes); 308 atomic_inc(&conf->active_stripes);
@@ -313,14 +316,16 @@ static int grow_stripes(raid5_conf_t *conf, int num)
313 kmem_cache_t *sc; 316 kmem_cache_t *sc;
314 int devs = conf->raid_disks; 317 int devs = conf->raid_disks;
315 318
316 sprintf(conf->cache_name, "raid5/%s", mdname(conf->mddev)); 319 sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev));
317 320 sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev));
318 sc = kmem_cache_create(conf->cache_name, 321 conf->active_name = 0;
322 sc = kmem_cache_create(conf->cache_name[conf->active_name],
319 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 323 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
320 0, 0, NULL, NULL); 324 0, 0, NULL, NULL);
321 if (!sc) 325 if (!sc)
322 return 1; 326 return 1;
323 conf->slab_cache = sc; 327 conf->slab_cache = sc;
328 conf->pool_size = devs;
324 while (num--) { 329 while (num--) {
325 if (!grow_one_stripe(conf)) 330 if (!grow_one_stripe(conf))
326 return 1; 331 return 1;
@@ -328,6 +333,129 @@ static int grow_stripes(raid5_conf_t *conf, int num)
328 return 0; 333 return 0;
329} 334}
330 335
336#ifdef CONFIG_MD_RAID5_RESHAPE
337static int resize_stripes(raid5_conf_t *conf, int newsize)
338{
339 /* Make all the stripes able to hold 'newsize' devices.
340 * New slots in each stripe get 'page' set to a new page.
341 *
342 * This happens in stages:
343 * 1/ create a new kmem_cache and allocate the required number of
344 * stripe_heads.
345 * 2/ gather all the old stripe_heads and tranfer the pages across
346 * to the new stripe_heads. This will have the side effect of
347 * freezing the array as once all stripe_heads have been collected,
348 * no IO will be possible. Old stripe heads are freed once their
349 * pages have been transferred over, and the old kmem_cache is
350 * freed when all stripes are done.
351 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
352 * we simple return a failre status - no need to clean anything up.
353 * 4/ allocate new pages for the new slots in the new stripe_heads.
354 * If this fails, we don't bother trying the shrink the
355 * stripe_heads down again, we just leave them as they are.
356 * As each stripe_head is processed the new one is released into
357 * active service.
358 *
359 * Once step2 is started, we cannot afford to wait for a write,
360 * so we use GFP_NOIO allocations.
361 */
362 struct stripe_head *osh, *nsh;
363 LIST_HEAD(newstripes);
364 struct disk_info *ndisks;
365 int err = 0;
366 kmem_cache_t *sc;
367 int i;
368
369 if (newsize <= conf->pool_size)
370 return 0; /* never bother to shrink */
371
372 /* Step 1 */
373 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
374 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
375 0, 0, NULL, NULL);
376 if (!sc)
377 return -ENOMEM;
378
379 for (i = conf->max_nr_stripes; i; i--) {
380 nsh = kmem_cache_alloc(sc, GFP_KERNEL);
381 if (!nsh)
382 break;
383
384 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
385
386 nsh->raid_conf = conf;
387 spin_lock_init(&nsh->lock);
388
389 list_add(&nsh->lru, &newstripes);
390 }
391 if (i) {
392 /* didn't get enough, give up */
393 while (!list_empty(&newstripes)) {
394 nsh = list_entry(newstripes.next, struct stripe_head, lru);
395 list_del(&nsh->lru);
396 kmem_cache_free(sc, nsh);
397 }
398 kmem_cache_destroy(sc);
399 return -ENOMEM;
400 }
401 /* Step 2 - Must use GFP_NOIO now.
402 * OK, we have enough stripes, start collecting inactive
403 * stripes and copying them over
404 */
405 list_for_each_entry(nsh, &newstripes, lru) {
406 spin_lock_irq(&conf->device_lock);
407 wait_event_lock_irq(conf->wait_for_stripe,
408 !list_empty(&conf->inactive_list),
409 conf->device_lock,
410 unplug_slaves(conf->mddev)
411 );
412 osh = get_free_stripe(conf);
413 spin_unlock_irq(&conf->device_lock);
414 atomic_set(&nsh->count, 1);
415 for(i=0; i<conf->pool_size; i++)
416 nsh->dev[i].page = osh->dev[i].page;
417 for( ; i<newsize; i++)
418 nsh->dev[i].page = NULL;
419 kmem_cache_free(conf->slab_cache, osh);
420 }
421 kmem_cache_destroy(conf->slab_cache);
422
423 /* Step 3.
424 * At this point, we are holding all the stripes so the array
425 * is completely stalled, so now is a good time to resize
426 * conf->disks.
427 */
428 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
429 if (ndisks) {
430 for (i=0; i<conf->raid_disks; i++)
431 ndisks[i] = conf->disks[i];
432 kfree(conf->disks);
433 conf->disks = ndisks;
434 } else
435 err = -ENOMEM;
436
437 /* Step 4, return new stripes to service */
438 while(!list_empty(&newstripes)) {
439 nsh = list_entry(newstripes.next, struct stripe_head, lru);
440 list_del_init(&nsh->lru);
441 for (i=conf->raid_disks; i < newsize; i++)
442 if (nsh->dev[i].page == NULL) {
443 struct page *p = alloc_page(GFP_NOIO);
444 nsh->dev[i].page = p;
445 if (!p)
446 err = -ENOMEM;
447 }
448 release_stripe(nsh);
449 }
450 /* critical section pass, GFP_NOIO no longer needed */
451
452 conf->slab_cache = sc;
453 conf->active_name = 1-conf->active_name;
454 conf->pool_size = newsize;
455 return err;
456}
457#endif
458
331static int drop_one_stripe(raid5_conf_t *conf) 459static int drop_one_stripe(raid5_conf_t *conf)
332{ 460{
333 struct stripe_head *sh; 461 struct stripe_head *sh;
@@ -339,7 +467,7 @@ static int drop_one_stripe(raid5_conf_t *conf)
339 return 0; 467 return 0;
340 if (atomic_read(&sh->count)) 468 if (atomic_read(&sh->count))
341 BUG(); 469 BUG();
342 shrink_buffers(sh, conf->raid_disks); 470 shrink_buffers(sh, conf->pool_size);
343 kmem_cache_free(conf->slab_cache, sh); 471 kmem_cache_free(conf->slab_cache, sh);
344 atomic_dec(&conf->active_stripes); 472 atomic_dec(&conf->active_stripes);
345 return 1; 473 return 1;
@@ -360,7 +488,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
360{ 488{
361 struct stripe_head *sh = bi->bi_private; 489 struct stripe_head *sh = bi->bi_private;
362 raid5_conf_t *conf = sh->raid_conf; 490 raid5_conf_t *conf = sh->raid_conf;
363 int disks = conf->raid_disks, i; 491 int disks = sh->disks, i;
364 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 492 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
365 493
366 if (bi->bi_size) 494 if (bi->bi_size)
@@ -458,7 +586,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
458{ 586{
459 struct stripe_head *sh = bi->bi_private; 587 struct stripe_head *sh = bi->bi_private;
460 raid5_conf_t *conf = sh->raid_conf; 588 raid5_conf_t *conf = sh->raid_conf;
461 int disks = conf->raid_disks, i; 589 int disks = sh->disks, i;
462 unsigned long flags; 590 unsigned long flags;
463 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 591 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
464 592
@@ -612,7 +740,7 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
612static sector_t compute_blocknr(struct stripe_head *sh, int i) 740static sector_t compute_blocknr(struct stripe_head *sh, int i)
613{ 741{
614 raid5_conf_t *conf = sh->raid_conf; 742 raid5_conf_t *conf = sh->raid_conf;
615 int raid_disks = conf->raid_disks, data_disks = raid_disks - 1; 743 int raid_disks = sh->disks, data_disks = raid_disks - 1;
616 sector_t new_sector = sh->sector, check; 744 sector_t new_sector = sh->sector, check;
617 int sectors_per_chunk = conf->chunk_size >> 9; 745 int sectors_per_chunk = conf->chunk_size >> 9;
618 sector_t stripe; 746 sector_t stripe;
@@ -713,8 +841,7 @@ static void copy_data(int frombio, struct bio *bio,
713 841
714static void compute_block(struct stripe_head *sh, int dd_idx) 842static void compute_block(struct stripe_head *sh, int dd_idx)
715{ 843{
716 raid5_conf_t *conf = sh->raid_conf; 844 int i, count, disks = sh->disks;
717 int i, count, disks = conf->raid_disks;
718 void *ptr[MAX_XOR_BLOCKS], *p; 845 void *ptr[MAX_XOR_BLOCKS], *p;
719 846
720 PRINTK("compute_block, stripe %llu, idx %d\n", 847 PRINTK("compute_block, stripe %llu, idx %d\n",
@@ -744,7 +871,7 @@ static void compute_block(struct stripe_head *sh, int dd_idx)
744static void compute_parity(struct stripe_head *sh, int method) 871static void compute_parity(struct stripe_head *sh, int method)
745{ 872{
746 raid5_conf_t *conf = sh->raid_conf; 873 raid5_conf_t *conf = sh->raid_conf;
747 int i, pd_idx = sh->pd_idx, disks = conf->raid_disks, count; 874 int i, pd_idx = sh->pd_idx, disks = sh->disks, count;
748 void *ptr[MAX_XOR_BLOCKS]; 875 void *ptr[MAX_XOR_BLOCKS];
749 struct bio *chosen; 876 struct bio *chosen;
750 877
@@ -910,6 +1037,20 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
910 return 0; 1037 return 0;
911} 1038}
912 1039
1040static void end_reshape(raid5_conf_t *conf);
1041
1042static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
1043{
1044 int sectors_per_chunk = conf->chunk_size >> 9;
1045 sector_t x = stripe;
1046 int pd_idx, dd_idx;
1047 int chunk_offset = sector_div(x, sectors_per_chunk);
1048 stripe = x;
1049 raid5_compute_sector(stripe*(disks-1)*sectors_per_chunk
1050 + chunk_offset, disks, disks-1, &dd_idx, &pd_idx, conf);
1051 return pd_idx;
1052}
1053
913 1054
914/* 1055/*
915 * handle_stripe - do things to a stripe. 1056 * handle_stripe - do things to a stripe.
@@ -932,11 +1073,11 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
932static void handle_stripe(struct stripe_head *sh) 1073static void handle_stripe(struct stripe_head *sh)
933{ 1074{
934 raid5_conf_t *conf = sh->raid_conf; 1075 raid5_conf_t *conf = sh->raid_conf;
935 int disks = conf->raid_disks; 1076 int disks = sh->disks;
936 struct bio *return_bi= NULL; 1077 struct bio *return_bi= NULL;
937 struct bio *bi; 1078 struct bio *bi;
938 int i; 1079 int i;
939 int syncing; 1080 int syncing, expanding, expanded;
940 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0; 1081 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
941 int non_overwrite = 0; 1082 int non_overwrite = 0;
942 int failed_num=0; 1083 int failed_num=0;
@@ -951,6 +1092,8 @@ static void handle_stripe(struct stripe_head *sh)
951 clear_bit(STRIPE_DELAYED, &sh->state); 1092 clear_bit(STRIPE_DELAYED, &sh->state);
952 1093
953 syncing = test_bit(STRIPE_SYNCING, &sh->state); 1094 syncing = test_bit(STRIPE_SYNCING, &sh->state);
1095 expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
1096 expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
954 /* Now to look around and see what can be done */ 1097 /* Now to look around and see what can be done */
955 1098
956 rcu_read_lock(); 1099 rcu_read_lock();
@@ -1143,13 +1286,14 @@ static void handle_stripe(struct stripe_head *sh)
1143 * parity, or to satisfy requests 1286 * parity, or to satisfy requests
1144 * or to load a block that is being partially written. 1287 * or to load a block that is being partially written.
1145 */ 1288 */
1146 if (to_read || non_overwrite || (syncing && (uptodate < disks))) { 1289 if (to_read || non_overwrite || (syncing && (uptodate < disks)) || expanding) {
1147 for (i=disks; i--;) { 1290 for (i=disks; i--;) {
1148 dev = &sh->dev[i]; 1291 dev = &sh->dev[i];
1149 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 1292 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1150 (dev->toread || 1293 (dev->toread ||
1151 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 1294 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
1152 syncing || 1295 syncing ||
1296 expanding ||
1153 (failed && (sh->dev[failed_num].toread || 1297 (failed && (sh->dev[failed_num].toread ||
1154 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags)))) 1298 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
1155 ) 1299 )
@@ -1339,13 +1483,77 @@ static void handle_stripe(struct stripe_head *sh)
1339 set_bit(R5_Wantwrite, &dev->flags); 1483 set_bit(R5_Wantwrite, &dev->flags);
1340 set_bit(R5_ReWrite, &dev->flags); 1484 set_bit(R5_ReWrite, &dev->flags);
1341 set_bit(R5_LOCKED, &dev->flags); 1485 set_bit(R5_LOCKED, &dev->flags);
1486 locked++;
1342 } else { 1487 } else {
1343 /* let's read it back */ 1488 /* let's read it back */
1344 set_bit(R5_Wantread, &dev->flags); 1489 set_bit(R5_Wantread, &dev->flags);
1345 set_bit(R5_LOCKED, &dev->flags); 1490 set_bit(R5_LOCKED, &dev->flags);
1491 locked++;
1346 } 1492 }
1347 } 1493 }
1348 1494
1495 if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
1496 /* Need to write out all blocks after computing parity */
1497 sh->disks = conf->raid_disks;
1498 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks);
1499 compute_parity(sh, RECONSTRUCT_WRITE);
1500 for (i= conf->raid_disks; i--;) {
1501 set_bit(R5_LOCKED, &sh->dev[i].flags);
1502 locked++;
1503 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1504 }
1505 clear_bit(STRIPE_EXPANDING, &sh->state);
1506 } else if (expanded) {
1507 clear_bit(STRIPE_EXPAND_READY, &sh->state);
1508 atomic_dec(&conf->reshape_stripes);
1509 wake_up(&conf->wait_for_overlap);
1510 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
1511 }
1512
1513 if (expanding && locked == 0) {
1514 /* We have read all the blocks in this stripe and now we need to
1515 * copy some of them into a target stripe for expand.
1516 */
1517 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
1518 for (i=0; i< sh->disks; i++)
1519 if (i != sh->pd_idx) {
1520 int dd_idx, pd_idx, j;
1521 struct stripe_head *sh2;
1522
1523 sector_t bn = compute_blocknr(sh, i);
1524 sector_t s = raid5_compute_sector(bn, conf->raid_disks,
1525 conf->raid_disks-1,
1526 &dd_idx, &pd_idx, conf);
1527 sh2 = get_active_stripe(conf, s, conf->raid_disks, pd_idx, 1);
1528 if (sh2 == NULL)
1529 /* so far only the early blocks of this stripe
1530 * have been requested. When later blocks
1531 * get requested, we will try again
1532 */
1533 continue;
1534 if(!test_bit(STRIPE_EXPANDING, &sh2->state) ||
1535 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
1536 /* must have already done this block */
1537 release_stripe(sh2);
1538 continue;
1539 }
1540 memcpy(page_address(sh2->dev[dd_idx].page),
1541 page_address(sh->dev[i].page),
1542 STRIPE_SIZE);
1543 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
1544 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
1545 for (j=0; j<conf->raid_disks; j++)
1546 if (j != sh2->pd_idx &&
1547 !test_bit(R5_Expanded, &sh2->dev[j].flags))
1548 break;
1549 if (j == conf->raid_disks) {
1550 set_bit(STRIPE_EXPAND_READY, &sh2->state);
1551 set_bit(STRIPE_HANDLE, &sh2->state);
1552 }
1553 release_stripe(sh2);
1554 }
1555 }
1556
1349 spin_unlock(&sh->lock); 1557 spin_unlock(&sh->lock);
1350 1558
1351 while ((bi=return_bi)) { 1559 while ((bi=return_bi)) {
@@ -1384,7 +1592,7 @@ static void handle_stripe(struct stripe_head *sh)
1384 rcu_read_unlock(); 1592 rcu_read_unlock();
1385 1593
1386 if (rdev) { 1594 if (rdev) {
1387 if (syncing) 1595 if (syncing || expanding || expanded)
1388 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 1596 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1389 1597
1390 bi->bi_bdev = rdev->bdev; 1598 bi->bi_bdev = rdev->bdev;
@@ -1526,17 +1734,16 @@ static inline void raid5_plug_device(raid5_conf_t *conf)
1526 spin_unlock_irq(&conf->device_lock); 1734 spin_unlock_irq(&conf->device_lock);
1527} 1735}
1528 1736
1529static int make_request (request_queue_t *q, struct bio * bi) 1737static int make_request(request_queue_t *q, struct bio * bi)
1530{ 1738{
1531 mddev_t *mddev = q->queuedata; 1739 mddev_t *mddev = q->queuedata;
1532 raid5_conf_t *conf = mddev_to_conf(mddev); 1740 raid5_conf_t *conf = mddev_to_conf(mddev);
1533 const unsigned int raid_disks = conf->raid_disks;
1534 const unsigned int data_disks = raid_disks - 1;
1535 unsigned int dd_idx, pd_idx; 1741 unsigned int dd_idx, pd_idx;
1536 sector_t new_sector; 1742 sector_t new_sector;
1537 sector_t logical_sector, last_sector; 1743 sector_t logical_sector, last_sector;
1538 struct stripe_head *sh; 1744 struct stripe_head *sh;
1539 const int rw = bio_data_dir(bi); 1745 const int rw = bio_data_dir(bi);
1746 int remaining;
1540 1747
1541 if (unlikely(bio_barrier(bi))) { 1748 if (unlikely(bio_barrier(bi))) {
1542 bio_endio(bi, bi->bi_size, -EOPNOTSUPP); 1749 bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
@@ -1555,20 +1762,77 @@ static int make_request (request_queue_t *q, struct bio * bi)
1555 1762
1556 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 1763 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
1557 DEFINE_WAIT(w); 1764 DEFINE_WAIT(w);
1558 1765 int disks;
1559 new_sector = raid5_compute_sector(logical_sector,
1560 raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1561 1766
1767 retry:
1768 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
1769 if (likely(conf->expand_progress == MaxSector))
1770 disks = conf->raid_disks;
1771 else {
1772 /* spinlock is needed as expand_progress may be
1773 * 64bit on a 32bit platform, and so it might be
1774 * possible to see a half-updated value
1775 * Ofcourse expand_progress could change after
1776 * the lock is dropped, so once we get a reference
1777 * to the stripe that we think it is, we will have
1778 * to check again.
1779 */
1780 spin_lock_irq(&conf->device_lock);
1781 disks = conf->raid_disks;
1782 if (logical_sector >= conf->expand_progress)
1783 disks = conf->previous_raid_disks;
1784 else {
1785 if (logical_sector >= conf->expand_lo) {
1786 spin_unlock_irq(&conf->device_lock);
1787 schedule();
1788 goto retry;
1789 }
1790 }
1791 spin_unlock_irq(&conf->device_lock);
1792 }
1793 new_sector = raid5_compute_sector(logical_sector, disks, disks - 1,
1794 &dd_idx, &pd_idx, conf);
1562 PRINTK("raid5: make_request, sector %llu logical %llu\n", 1795 PRINTK("raid5: make_request, sector %llu logical %llu\n",
1563 (unsigned long long)new_sector, 1796 (unsigned long long)new_sector,
1564 (unsigned long long)logical_sector); 1797 (unsigned long long)logical_sector);
1565 1798
1566 retry: 1799 sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK));
1567 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
1568 sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK));
1569 if (sh) { 1800 if (sh) {
1570 if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 1801 if (unlikely(conf->expand_progress != MaxSector)) {
1571 /* Add failed due to overlap. Flush everything 1802 /* expansion might have moved on while waiting for a
1803 * stripe, so we must do the range check again.
1804 * Expansion could still move past after this
1805 * test, but as we are holding a reference to
1806 * 'sh', we know that if that happens,
1807 * STRIPE_EXPANDING will get set and the expansion
1808 * won't proceed until we finish with the stripe.
1809 */
1810 int must_retry = 0;
1811 spin_lock_irq(&conf->device_lock);
1812 if (logical_sector < conf->expand_progress &&
1813 disks == conf->previous_raid_disks)
1814 /* mismatch, need to try again */
1815 must_retry = 1;
1816 spin_unlock_irq(&conf->device_lock);
1817 if (must_retry) {
1818 release_stripe(sh);
1819 goto retry;
1820 }
1821 }
1822 /* FIXME what if we get a false positive because these
1823 * are being updated.
1824 */
1825 if (logical_sector >= mddev->suspend_lo &&
1826 logical_sector < mddev->suspend_hi) {
1827 release_stripe(sh);
1828 schedule();
1829 goto retry;
1830 }
1831
1832 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
1833 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
1834 /* Stripe is busy expanding or
1835 * add failed due to overlap. Flush everything
1572 * and wait a while 1836 * and wait a while
1573 */ 1837 */
1574 raid5_unplug_device(mddev->queue); 1838 raid5_unplug_device(mddev->queue);
@@ -1580,7 +1844,6 @@ static int make_request (request_queue_t *q, struct bio * bi)
1580 raid5_plug_device(conf); 1844 raid5_plug_device(conf);
1581 handle_stripe(sh); 1845 handle_stripe(sh);
1582 release_stripe(sh); 1846 release_stripe(sh);
1583
1584 } else { 1847 } else {
1585 /* cannot get stripe for read-ahead, just give-up */ 1848 /* cannot get stripe for read-ahead, just give-up */
1586 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1849 clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -1590,7 +1853,9 @@ static int make_request (request_queue_t *q, struct bio * bi)
1590 1853
1591 } 1854 }
1592 spin_lock_irq(&conf->device_lock); 1855 spin_lock_irq(&conf->device_lock);
1593 if (--bi->bi_phys_segments == 0) { 1856 remaining = --bi->bi_phys_segments;
1857 spin_unlock_irq(&conf->device_lock);
1858 if (remaining == 0) {
1594 int bytes = bi->bi_size; 1859 int bytes = bi->bi_size;
1595 1860
1596 if ( bio_data_dir(bi) == WRITE ) 1861 if ( bio_data_dir(bi) == WRITE )
@@ -1598,7 +1863,6 @@ static int make_request (request_queue_t *q, struct bio * bi)
1598 bi->bi_size = 0; 1863 bi->bi_size = 0;
1599 bi->bi_end_io(bi, bytes, 0); 1864 bi->bi_end_io(bi, bytes, 0);
1600 } 1865 }
1601 spin_unlock_irq(&conf->device_lock);
1602 return 0; 1866 return 0;
1603} 1867}
1604 1868
@@ -1607,12 +1871,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1607{ 1871{
1608 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 1872 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1609 struct stripe_head *sh; 1873 struct stripe_head *sh;
1610 int sectors_per_chunk = conf->chunk_size >> 9; 1874 int pd_idx;
1611 sector_t x; 1875 sector_t first_sector, last_sector;
1612 unsigned long stripe;
1613 int chunk_offset;
1614 int dd_idx, pd_idx;
1615 sector_t first_sector;
1616 int raid_disks = conf->raid_disks; 1876 int raid_disks = conf->raid_disks;
1617 int data_disks = raid_disks-1; 1877 int data_disks = raid_disks-1;
1618 sector_t max_sector = mddev->size << 1; 1878 sector_t max_sector = mddev->size << 1;
@@ -1621,6 +1881,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1621 if (sector_nr >= max_sector) { 1881 if (sector_nr >= max_sector) {
1622 /* just being told to finish up .. nothing much to do */ 1882 /* just being told to finish up .. nothing much to do */
1623 unplug_slaves(mddev); 1883 unplug_slaves(mddev);
1884 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
1885 end_reshape(conf);
1886 return 0;
1887 }
1624 1888
1625 if (mddev->curr_resync < max_sector) /* aborted */ 1889 if (mddev->curr_resync < max_sector) /* aborted */
1626 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 1890 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
@@ -1631,6 +1895,123 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1631 1895
1632 return 0; 1896 return 0;
1633 } 1897 }
1898
1899 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
1900 /* reshaping is quite different to recovery/resync so it is
1901 * handled quite separately ... here.
1902 *
1903 * On each call to sync_request, we gather one chunk worth of
1904 * destination stripes and flag them as expanding.
1905 * Then we find all the source stripes and request reads.
1906 * As the reads complete, handle_stripe will copy the data
1907 * into the destination stripe and release that stripe.
1908 */
1909 int i;
1910 int dd_idx;
1911 sector_t writepos, safepos, gap;
1912
1913 if (sector_nr == 0 &&
1914 conf->expand_progress != 0) {
1915 /* restarting in the middle, skip the initial sectors */
1916 sector_nr = conf->expand_progress;
1917 sector_div(sector_nr, conf->raid_disks-1);
1918 *skipped = 1;
1919 return sector_nr;
1920 }
1921
1922 /* we update the metadata when there is more than 3Meg
1923 * in the block range (that is rather arbitrary, should
1924 * probably be time based) or when the data about to be
1925 * copied would over-write the source of the data at
1926 * the front of the range.
1927 * i.e. one new_stripe forward from expand_progress new_maps
1928 * to after where expand_lo old_maps to
1929 */
1930 writepos = conf->expand_progress +
1931 conf->chunk_size/512*(conf->raid_disks-1);
1932 sector_div(writepos, conf->raid_disks-1);
1933 safepos = conf->expand_lo;
1934 sector_div(safepos, conf->previous_raid_disks-1);
1935 gap = conf->expand_progress - conf->expand_lo;
1936
1937 if (writepos >= safepos ||
1938 gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) {
1939 /* Cannot proceed until we've updated the superblock... */
1940 wait_event(conf->wait_for_overlap,
1941 atomic_read(&conf->reshape_stripes)==0);
1942 mddev->reshape_position = conf->expand_progress;
1943 mddev->sb_dirty = 1;
1944 md_wakeup_thread(mddev->thread);
1945 wait_event(mddev->sb_wait, mddev->sb_dirty == 0 ||
1946 kthread_should_stop());
1947 spin_lock_irq(&conf->device_lock);
1948 conf->expand_lo = mddev->reshape_position;
1949 spin_unlock_irq(&conf->device_lock);
1950 wake_up(&conf->wait_for_overlap);
1951 }
1952
1953 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
1954 int j;
1955 int skipped = 0;
1956 pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks);
1957 sh = get_active_stripe(conf, sector_nr+i,
1958 conf->raid_disks, pd_idx, 0);
1959 set_bit(STRIPE_EXPANDING, &sh->state);
1960 atomic_inc(&conf->reshape_stripes);
1961 /* If any of this stripe is beyond the end of the old
1962 * array, then we need to zero those blocks
1963 */
1964 for (j=sh->disks; j--;) {
1965 sector_t s;
1966 if (j == sh->pd_idx)
1967 continue;
1968 s = compute_blocknr(sh, j);
1969 if (s < (mddev->array_size<<1)) {
1970 skipped = 1;
1971 continue;
1972 }
1973 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
1974 set_bit(R5_Expanded, &sh->dev[j].flags);
1975 set_bit(R5_UPTODATE, &sh->dev[j].flags);
1976 }
1977 if (!skipped) {
1978 set_bit(STRIPE_EXPAND_READY, &sh->state);
1979 set_bit(STRIPE_HANDLE, &sh->state);
1980 }
1981 release_stripe(sh);
1982 }
1983 spin_lock_irq(&conf->device_lock);
1984 conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1);
1985 spin_unlock_irq(&conf->device_lock);
1986 /* Ok, those stripe are ready. We can start scheduling
1987 * reads on the source stripes.
1988 * The source stripes are determined by mapping the first and last
1989 * block on the destination stripes.
1990 */
1991 raid_disks = conf->previous_raid_disks;
1992 data_disks = raid_disks - 1;
1993 first_sector =
1994 raid5_compute_sector(sector_nr*(conf->raid_disks-1),
1995 raid_disks, data_disks,
1996 &dd_idx, &pd_idx, conf);
1997 last_sector =
1998 raid5_compute_sector((sector_nr+conf->chunk_size/512)
1999 *(conf->raid_disks-1) -1,
2000 raid_disks, data_disks,
2001 &dd_idx, &pd_idx, conf);
2002 if (last_sector >= (mddev->size<<1))
2003 last_sector = (mddev->size<<1)-1;
2004 while (first_sector <= last_sector) {
2005 pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks);
2006 sh = get_active_stripe(conf, first_sector,
2007 conf->previous_raid_disks, pd_idx, 0);
2008 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2009 set_bit(STRIPE_HANDLE, &sh->state);
2010 release_stripe(sh);
2011 first_sector += STRIPE_SECTORS;
2012 }
2013 return conf->chunk_size>>9;
2014 }
1634 /* if there is 1 or more failed drives and we are trying 2015 /* if there is 1 or more failed drives and we are trying
1635 * to resync, then assert that we are finished, because there is 2016 * to resync, then assert that we are finished, because there is
1636 * nothing we can do. 2017 * nothing we can do.
@@ -1649,16 +2030,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1649 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 2030 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
1650 } 2031 }
1651 2032
1652 x = sector_nr; 2033 pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks);
1653 chunk_offset = sector_div(x, sectors_per_chunk); 2034 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1);
1654 stripe = x;
1655 BUG_ON(x != stripe);
1656
1657 first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
1658 + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1659 sh = get_active_stripe(conf, sector_nr, pd_idx, 1);
1660 if (sh == NULL) { 2035 if (sh == NULL) {
1661 sh = get_active_stripe(conf, sector_nr, pd_idx, 0); 2036 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0);
1662 /* make sure we don't swamp the stripe cache if someone else 2037 /* make sure we don't swamp the stripe cache if someone else
1663 * is trying to get access 2038 * is trying to get access
1664 */ 2039 */
@@ -1822,11 +2197,64 @@ static int run(mddev_t *mddev)
1822 return -EIO; 2197 return -EIO;
1823 } 2198 }
1824 2199
1825 mddev->private = kzalloc(sizeof (raid5_conf_t) 2200 if (mddev->reshape_position != MaxSector) {
1826 + mddev->raid_disks * sizeof(struct disk_info), 2201 /* Check that we can continue the reshape.
1827 GFP_KERNEL); 2202 * Currently only disks can change, it must
2203 * increase, and we must be past the point where
2204 * a stripe over-writes itself
2205 */
2206 sector_t here_new, here_old;
2207 int old_disks;
2208
2209 if (mddev->new_level != mddev->level ||
2210 mddev->new_layout != mddev->layout ||
2211 mddev->new_chunk != mddev->chunk_size) {
2212 printk(KERN_ERR "raid5: %s: unsupported reshape required - aborting.\n",
2213 mdname(mddev));
2214 return -EINVAL;
2215 }
2216 if (mddev->delta_disks <= 0) {
2217 printk(KERN_ERR "raid5: %s: unsupported reshape (reduce disks) required - aborting.\n",
2218 mdname(mddev));
2219 return -EINVAL;
2220 }
2221 old_disks = mddev->raid_disks - mddev->delta_disks;
2222 /* reshape_position must be on a new-stripe boundary, and one
2223 * further up in new geometry must map after here in old geometry.
2224 */
2225 here_new = mddev->reshape_position;
2226 if (sector_div(here_new, (mddev->chunk_size>>9)*(mddev->raid_disks-1))) {
2227 printk(KERN_ERR "raid5: reshape_position not on a stripe boundary\n");
2228 return -EINVAL;
2229 }
2230 /* here_new is the stripe we will write to */
2231 here_old = mddev->reshape_position;
2232 sector_div(here_old, (mddev->chunk_size>>9)*(old_disks-1));
2233 /* here_old is the first stripe that we might need to read from */
2234 if (here_new >= here_old) {
2235 /* Reading from the same stripe as writing to - bad */
2236 printk(KERN_ERR "raid5: reshape_position too early for auto-recovery - aborting.\n");
2237 return -EINVAL;
2238 }
2239 printk(KERN_INFO "raid5: reshape will continue\n");
2240 /* OK, we should be able to continue; */
2241 }
2242
2243
2244 mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL);
1828 if ((conf = mddev->private) == NULL) 2245 if ((conf = mddev->private) == NULL)
1829 goto abort; 2246 goto abort;
2247 if (mddev->reshape_position == MaxSector) {
2248 conf->previous_raid_disks = conf->raid_disks = mddev->raid_disks;
2249 } else {
2250 conf->raid_disks = mddev->raid_disks;
2251 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
2252 }
2253
2254 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info),
2255 GFP_KERNEL);
2256 if (!conf->disks)
2257 goto abort;
1830 2258
1831 conf->mddev = mddev; 2259 conf->mddev = mddev;
1832 2260
@@ -1847,7 +2275,7 @@ static int run(mddev_t *mddev)
1847 2275
1848 ITERATE_RDEV(mddev,rdev,tmp) { 2276 ITERATE_RDEV(mddev,rdev,tmp) {
1849 raid_disk = rdev->raid_disk; 2277 raid_disk = rdev->raid_disk;
1850 if (raid_disk >= mddev->raid_disks 2278 if (raid_disk >= conf->raid_disks
1851 || raid_disk < 0) 2279 || raid_disk < 0)
1852 continue; 2280 continue;
1853 disk = conf->disks + raid_disk; 2281 disk = conf->disks + raid_disk;
@@ -1863,7 +2291,6 @@ static int run(mddev_t *mddev)
1863 } 2291 }
1864 } 2292 }
1865 2293
1866 conf->raid_disks = mddev->raid_disks;
1867 /* 2294 /*
1868 * 0 for a fully functional array, 1 for a degraded array. 2295 * 0 for a fully functional array, 1 for a degraded array.
1869 */ 2296 */
@@ -1873,6 +2300,7 @@ static int run(mddev_t *mddev)
1873 conf->level = mddev->level; 2300 conf->level = mddev->level;
1874 conf->algorithm = mddev->layout; 2301 conf->algorithm = mddev->layout;
1875 conf->max_nr_stripes = NR_STRIPES; 2302 conf->max_nr_stripes = NR_STRIPES;
2303 conf->expand_progress = mddev->reshape_position;
1876 2304
1877 /* device size must be a multiple of chunk size */ 2305 /* device size must be a multiple of chunk size */
1878 mddev->size &= ~(mddev->chunk_size/1024 -1); 2306 mddev->size &= ~(mddev->chunk_size/1024 -1);
@@ -1945,6 +2373,21 @@ static int run(mddev_t *mddev)
1945 2373
1946 print_raid5_conf(conf); 2374 print_raid5_conf(conf);
1947 2375
2376 if (conf->expand_progress != MaxSector) {
2377 printk("...ok start reshape thread\n");
2378 conf->expand_lo = conf->expand_progress;
2379 atomic_set(&conf->reshape_stripes, 0);
2380 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
2381 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2382 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
2383 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
2384 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
2385 "%s_reshape");
2386 /* FIXME if md_register_thread fails?? */
2387 md_wakeup_thread(mddev->sync_thread);
2388
2389 }
2390
1948 /* read-ahead size must cover two whole stripes, which is 2391 /* read-ahead size must cover two whole stripes, which is
1949 * 2 * (n-1) * chunksize where 'n' is the number of raid devices 2392 * 2 * (n-1) * chunksize where 'n' is the number of raid devices
1950 */ 2393 */
@@ -1960,12 +2403,13 @@ static int run(mddev_t *mddev)
1960 2403
1961 mddev->queue->unplug_fn = raid5_unplug_device; 2404 mddev->queue->unplug_fn = raid5_unplug_device;
1962 mddev->queue->issue_flush_fn = raid5_issue_flush; 2405 mddev->queue->issue_flush_fn = raid5_issue_flush;
2406 mddev->array_size = mddev->size * (conf->previous_raid_disks - 1);
1963 2407
1964 mddev->array_size = mddev->size * (mddev->raid_disks - 1);
1965 return 0; 2408 return 0;
1966abort: 2409abort:
1967 if (conf) { 2410 if (conf) {
1968 print_raid5_conf(conf); 2411 print_raid5_conf(conf);
2412 kfree(conf->disks);
1969 kfree(conf->stripe_hashtbl); 2413 kfree(conf->stripe_hashtbl);
1970 kfree(conf); 2414 kfree(conf);
1971 } 2415 }
@@ -1986,6 +2430,7 @@ static int stop(mddev_t *mddev)
1986 kfree(conf->stripe_hashtbl); 2430 kfree(conf->stripe_hashtbl);
1987 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 2431 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
1988 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 2432 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
2433 kfree(conf->disks);
1989 kfree(conf); 2434 kfree(conf);
1990 mddev->private = NULL; 2435 mddev->private = NULL;
1991 return 0; 2436 return 0;
@@ -2001,7 +2446,7 @@ static void print_sh (struct stripe_head *sh)
2001 printk("sh %llu, count %d.\n", 2446 printk("sh %llu, count %d.\n",
2002 (unsigned long long)sh->sector, atomic_read(&sh->count)); 2447 (unsigned long long)sh->sector, atomic_read(&sh->count));
2003 printk("sh %llu, ", (unsigned long long)sh->sector); 2448 printk("sh %llu, ", (unsigned long long)sh->sector);
2004 for (i = 0; i < sh->raid_conf->raid_disks; i++) { 2449 for (i = 0; i < sh->disks; i++) {
2005 printk("(cache%d: %p %ld) ", 2450 printk("(cache%d: %p %ld) ",
2006 i, sh->dev[i].page, sh->dev[i].flags); 2451 i, sh->dev[i].page, sh->dev[i].flags);
2007 } 2452 }
@@ -2132,7 +2577,7 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2132 /* 2577 /*
2133 * find the disk ... 2578 * find the disk ...
2134 */ 2579 */
2135 for (disk=0; disk < mddev->raid_disks; disk++) 2580 for (disk=0; disk < conf->raid_disks; disk++)
2136 if ((p=conf->disks + disk)->rdev == NULL) { 2581 if ((p=conf->disks + disk)->rdev == NULL) {
2137 clear_bit(In_sync, &rdev->flags); 2582 clear_bit(In_sync, &rdev->flags);
2138 rdev->raid_disk = disk; 2583 rdev->raid_disk = disk;
@@ -2168,11 +2613,146 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
2168 return 0; 2613 return 0;
2169} 2614}
2170 2615
2616#ifdef CONFIG_MD_RAID5_RESHAPE
2617static int raid5_check_reshape(mddev_t *mddev)
2618{
2619 raid5_conf_t *conf = mddev_to_conf(mddev);
2620 int err;
2621
2622 if (mddev->delta_disks < 0 ||
2623 mddev->new_level != mddev->level)
2624 return -EINVAL; /* Cannot shrink array or change level yet */
2625 if (mddev->delta_disks == 0)
2626 return 0; /* nothing to do */
2627
2628 /* Can only proceed if there are plenty of stripe_heads.
2629 * We need a minimum of one full stripe,, and for sensible progress
2630 * it is best to have about 4 times that.
2631 * If we require 4 times, then the default 256 4K stripe_heads will
2632 * allow for chunk sizes up to 256K, which is probably OK.
2633 * If the chunk size is greater, user-space should request more
2634 * stripe_heads first.
2635 */
2636 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes ||
2637 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
2638 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
2639 (mddev->chunk_size / STRIPE_SIZE)*4);
2640 return -ENOSPC;
2641 }
2642
2643 err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
2644 if (err)
2645 return err;
2646
2647 /* looks like we might be able to manage this */
2648 return 0;
2649}
2650
2651static int raid5_start_reshape(mddev_t *mddev)
2652{
2653 raid5_conf_t *conf = mddev_to_conf(mddev);
2654 mdk_rdev_t *rdev;
2655 struct list_head *rtmp;
2656 int spares = 0;
2657 int added_devices = 0;
2658
2659 if (mddev->degraded ||
2660 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2661 return -EBUSY;
2662
2663 ITERATE_RDEV(mddev, rdev, rtmp)
2664 if (rdev->raid_disk < 0 &&
2665 !test_bit(Faulty, &rdev->flags))
2666 spares++;
2667
2668 if (spares < mddev->delta_disks-1)
2669 /* Not enough devices even to make a degraded array
2670 * of that size
2671 */
2672 return -EINVAL;
2673
2674 atomic_set(&conf->reshape_stripes, 0);
2675 spin_lock_irq(&conf->device_lock);
2676 conf->previous_raid_disks = conf->raid_disks;
2677 conf->raid_disks += mddev->delta_disks;
2678 conf->expand_progress = 0;
2679 conf->expand_lo = 0;
2680 spin_unlock_irq(&conf->device_lock);
2681
2682 /* Add some new drives, as many as will fit.
2683 * We know there are enough to make the newly sized array work.
2684 */
2685 ITERATE_RDEV(mddev, rdev, rtmp)
2686 if (rdev->raid_disk < 0 &&
2687 !test_bit(Faulty, &rdev->flags)) {
2688 if (raid5_add_disk(mddev, rdev)) {
2689 char nm[20];
2690 set_bit(In_sync, &rdev->flags);
2691 conf->working_disks++;
2692 added_devices++;
2693 sprintf(nm, "rd%d", rdev->raid_disk);
2694 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
2695 } else
2696 break;
2697 }
2698
2699 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices;
2700 mddev->raid_disks = conf->raid_disks;
2701 mddev->reshape_position = 0;
2702 mddev->sb_dirty = 1;
2703
2704 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
2705 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2706 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
2707 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
2708 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
2709 "%s_reshape");
2710 if (!mddev->sync_thread) {
2711 mddev->recovery = 0;
2712 spin_lock_irq(&conf->device_lock);
2713 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
2714 conf->expand_progress = MaxSector;
2715 spin_unlock_irq(&conf->device_lock);
2716 return -EAGAIN;
2717 }
2718 md_wakeup_thread(mddev->sync_thread);
2719 md_new_event(mddev);
2720 return 0;
2721}
2722#endif
2723
2724static void end_reshape(raid5_conf_t *conf)
2725{
2726 struct block_device *bdev;
2727
2728 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
2729 conf->mddev->array_size = conf->mddev->size * (conf->raid_disks-1);
2730 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1);
2731 conf->mddev->changed = 1;
2732
2733 bdev = bdget_disk(conf->mddev->gendisk, 0);
2734 if (bdev) {
2735 mutex_lock(&bdev->bd_inode->i_mutex);
2736 i_size_write(bdev->bd_inode, conf->mddev->array_size << 10);
2737 mutex_unlock(&bdev->bd_inode->i_mutex);
2738 bdput(bdev);
2739 }
2740 spin_lock_irq(&conf->device_lock);
2741 conf->expand_progress = MaxSector;
2742 spin_unlock_irq(&conf->device_lock);
2743 conf->mddev->reshape_position = MaxSector;
2744 }
2745}
2746
2171static void raid5_quiesce(mddev_t *mddev, int state) 2747static void raid5_quiesce(mddev_t *mddev, int state)
2172{ 2748{
2173 raid5_conf_t *conf = mddev_to_conf(mddev); 2749 raid5_conf_t *conf = mddev_to_conf(mddev);
2174 2750
2175 switch(state) { 2751 switch(state) {
2752 case 2: /* resume for a suspend */
2753 wake_up(&conf->wait_for_overlap);
2754 break;
2755
2176 case 1: /* stop all writes */ 2756 case 1: /* stop all writes */
2177 spin_lock_irq(&conf->device_lock); 2757 spin_lock_irq(&conf->device_lock);
2178 conf->quiesce = 1; 2758 conf->quiesce = 1;
@@ -2186,6 +2766,7 @@ static void raid5_quiesce(mddev_t *mddev, int state)
2186 spin_lock_irq(&conf->device_lock); 2766 spin_lock_irq(&conf->device_lock);
2187 conf->quiesce = 0; 2767 conf->quiesce = 0;
2188 wake_up(&conf->wait_for_stripe); 2768 wake_up(&conf->wait_for_stripe);
2769 wake_up(&conf->wait_for_overlap);
2189 spin_unlock_irq(&conf->device_lock); 2770 spin_unlock_irq(&conf->device_lock);
2190 break; 2771 break;
2191 } 2772 }
@@ -2206,6 +2787,10 @@ static struct mdk_personality raid5_personality =
2206 .spare_active = raid5_spare_active, 2787 .spare_active = raid5_spare_active,
2207 .sync_request = sync_request, 2788 .sync_request = sync_request,
2208 .resize = raid5_resize, 2789 .resize = raid5_resize,
2790#ifdef CONFIG_MD_RAID5_RESHAPE
2791 .check_reshape = raid5_check_reshape,
2792 .start_reshape = raid5_start_reshape,
2793#endif
2209 .quiesce = raid5_quiesce, 2794 .quiesce = raid5_quiesce,
2210}; 2795};
2211 2796
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index cd477ebf2ee4..6df4930fddec 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -331,9 +331,9 @@ static int grow_stripes(raid6_conf_t *conf, int num)
331 kmem_cache_t *sc; 331 kmem_cache_t *sc;
332 int devs = conf->raid_disks; 332 int devs = conf->raid_disks;
333 333
334 sprintf(conf->cache_name, "raid6/%s", mdname(conf->mddev)); 334 sprintf(conf->cache_name[0], "raid6/%s", mdname(conf->mddev));
335 335
336 sc = kmem_cache_create(conf->cache_name, 336 sc = kmem_cache_create(conf->cache_name[0],
337 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 337 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
338 0, 0, NULL, NULL); 338 0, 0, NULL, NULL);
339 if (!sc) 339 if (!sc)
@@ -2006,11 +2006,14 @@ static int run(mddev_t *mddev)
2006 return -EIO; 2006 return -EIO;
2007 } 2007 }
2008 2008
2009 mddev->private = kzalloc(sizeof (raid6_conf_t) 2009 mddev->private = kzalloc(sizeof (raid6_conf_t), GFP_KERNEL);
2010 + mddev->raid_disks * sizeof(struct disk_info),
2011 GFP_KERNEL);
2012 if ((conf = mddev->private) == NULL) 2010 if ((conf = mddev->private) == NULL)
2013 goto abort; 2011 goto abort;
2012 conf->disks = kzalloc(mddev->raid_disks * sizeof(struct disk_info),
2013 GFP_KERNEL);
2014 if (!conf->disks)
2015 goto abort;
2016
2014 conf->mddev = mddev; 2017 conf->mddev = mddev;
2015 2018
2016 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 2019 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
@@ -2158,6 +2161,7 @@ abort:
2158 print_raid6_conf(conf); 2161 print_raid6_conf(conf);
2159 safe_put_page(conf->spare_page); 2162 safe_put_page(conf->spare_page);
2160 kfree(conf->stripe_hashtbl); 2163 kfree(conf->stripe_hashtbl);
2164 kfree(conf->disks);
2161 kfree(conf); 2165 kfree(conf);
2162 } 2166 }
2163 mddev->private = NULL; 2167 mddev->private = NULL;
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index 54f8b95717b0..96fe0ecae250 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -86,7 +86,7 @@ static int dvb_device_open(struct inode *inode, struct file *file)
86 86
87 if (dvbdev && dvbdev->fops) { 87 if (dvbdev && dvbdev->fops) {
88 int err = 0; 88 int err = 0;
89 struct file_operations *old_fops; 89 const struct file_operations *old_fops;
90 90
91 file->private_data = dvbdev; 91 file->private_data = dvbdev;
92 old_fops = file->f_op; 92 old_fops = file->f_op;
diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c
index 75e3d41382f2..5f87dd5f1d0b 100644
--- a/drivers/media/video/videodev.c
+++ b/drivers/media/video/videodev.c
@@ -97,7 +97,7 @@ static int video_open(struct inode *inode, struct file *file)
97 unsigned int minor = iminor(inode); 97 unsigned int minor = iminor(inode);
98 int err = 0; 98 int err = 0;
99 struct video_device *vfl; 99 struct video_device *vfl;
100 struct file_operations *old_fops; 100 const struct file_operations *old_fops;
101 101
102 if(minor>=VIDEO_NUM_DEVICES) 102 if(minor>=VIDEO_NUM_DEVICES)
103 return -ENODEV; 103 return -ENODEV;
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index b09fb6307153..7d4c5497785b 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -1179,10 +1179,9 @@ static int __init i2o_block_init(void)
1179 goto exit; 1179 goto exit;
1180 } 1180 }
1181 1181
1182 i2o_blk_req_pool.pool = mempool_create(I2O_BLOCK_REQ_MEMPOOL_SIZE, 1182 i2o_blk_req_pool.pool =
1183 mempool_alloc_slab, 1183 mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE,
1184 mempool_free_slab, 1184 i2o_blk_req_pool.slab);
1185 i2o_blk_req_pool.slab);
1186 if (!i2o_blk_req_pool.pool) { 1185 if (!i2o_blk_req_pool.pool) {
1187 osm_err("can't init request mempool\n"); 1186 osm_err("can't init request mempool\n");
1188 rc = -ENOMEM; 1187 rc = -ENOMEM;
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 2a0c42b8cda5..3d2e76eea93e 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -56,7 +56,7 @@
56typedef struct _i2o_proc_entry_t { 56typedef struct _i2o_proc_entry_t {
57 char *name; /* entry name */ 57 char *name; /* entry name */
58 mode_t mode; /* mode */ 58 mode_t mode; /* mode */
59 struct file_operations *fops; /* open function */ 59 const struct file_operations *fops; /* open function */
60} i2o_proc_entry; 60} i2o_proc_entry;
61 61
62/* global I2O /proc/i2o entry */ 62/* global I2O /proc/i2o entry */
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c
index f295401fac21..7fd7a43e38de 100644
--- a/drivers/misc/ibmasm/heartbeat.c
+++ b/drivers/misc/ibmasm/heartbeat.c
@@ -52,12 +52,13 @@ static struct notifier_block panic_notifier = { panic_happened, NULL, 1 };
52 52
53void ibmasm_register_panic_notifier(void) 53void ibmasm_register_panic_notifier(void)
54{ 54{
55 notifier_chain_register(&panic_notifier_list, &panic_notifier); 55 atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier);
56} 56}
57 57
58void ibmasm_unregister_panic_notifier(void) 58void ibmasm_unregister_panic_notifier(void)
59{ 59{
60 notifier_chain_unregister(&panic_notifier_list, &panic_notifier); 60 atomic_notifier_chain_unregister(&panic_notifier_list,
61 &panic_notifier);
61} 62}
62 63
63 64
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 5c550fcac2c4..26a230b6ff80 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -101,7 +101,7 @@ static struct super_operations ibmasmfs_s_ops = {
101 .drop_inode = generic_delete_inode, 101 .drop_inode = generic_delete_inode,
102}; 102};
103 103
104static struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations; 104static const struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations;
105 105
106static struct file_system_type ibmasmfs_type = { 106static struct file_system_type ibmasmfs_type = {
107 .owner = THIS_MODULE, 107 .owner = THIS_MODULE,
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index 205bb7083335..0f6bb2e625d8 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -25,9 +25,8 @@ config MTD_JEDECPROBE
25 compatible with the Common Flash Interface, but will use the common 25 compatible with the Common Flash Interface, but will use the common
26 CFI-targetted flash drivers for any chips which are identified which 26 CFI-targetted flash drivers for any chips which are identified which
27 are in fact compatible in all but the probe method. This actually 27 are in fact compatible in all but the probe method. This actually
28 covers most AMD/Fujitsu-compatible chips, and will shortly cover also 28 covers most AMD/Fujitsu-compatible chips and also non-CFI
29 non-CFI Intel chips (that code is in MTD CVS and should shortly be sent 29 Intel chips.
30 for inclusion in Linus' tree)
31 30
32config MTD_GEN_PROBE 31config MTD_GEN_PROBE
33 tristate 32 tristate
diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c
index b51c757817d8..efb221692641 100644
--- a/drivers/mtd/maps/dilnetpc.c
+++ b/drivers/mtd/maps/dilnetpc.c
@@ -218,8 +218,8 @@ static void dnp_set_vpp(struct map_info *not_used, int on)
218 { 218 {
219 if(--vpp_counter == 0) 219 if(--vpp_counter == 0)
220 setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x4); 220 setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x4);
221 else if(vpp_counter < 0) 221 else
222 BUG(); 222 BUG_ON(vpp_counter < 0);
223 } 223 }
224 spin_unlock_irq(&dnpc_spin); 224 spin_unlock_irq(&dnpc_spin);
225} 225}
@@ -240,8 +240,8 @@ static void adnp_set_vpp(struct map_info *not_used, int on)
240 { 240 {
241 if(--vpp_counter == 0) 241 if(--vpp_counter == 0)
242 setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x8); 242 setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x8);
243 else if(vpp_counter < 0) 243 else
244 BUG(); 244 BUG_ON(vpp_counter < 0);
245 } 245 }
246 spin_unlock_irq(&dnpc_spin); 246 spin_unlock_irq(&dnpc_spin);
247} 247}
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 7f3ff500b68e..840dd66ce2dc 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -450,8 +450,7 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
450 450
451 kfree(tr->blkcore_priv); 451 kfree(tr->blkcore_priv);
452 452
453 if (!list_empty(&tr->devs)) 453 BUG_ON(!list_empty(&tr->devs));
454 BUG();
455 return 0; 454 return 0;
456} 455}
457 456
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index b1bf8c411de7..9af840364a74 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -477,8 +477,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
477 } 477 }
478 478
479 /* must never happen since size limit has been verified above */ 479 /* must never happen since size limit has been verified above */
480 if (i >= concat->num_subdev) 480 BUG_ON(i >= concat->num_subdev);
481 BUG();
482 481
483 /* now do the erase: */ 482 /* now do the erase: */
484 err = 0; 483 err = 0;
@@ -500,8 +499,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
500 if ((err = concat_dev_erase(subdev, erase))) { 499 if ((err = concat_dev_erase(subdev, erase))) {
501 /* sanity check: should never happen since 500 /* sanity check: should never happen since
502 * block alignment has been checked above */ 501 * block alignment has been checked above */
503 if (err == -EINVAL) 502 BUG_ON(err == -EINVAL);
504 BUG();
505 if (erase->fail_addr != 0xffffffff) 503 if (erase->fail_addr != 0xffffffff)
506 instr->fail_addr = erase->fail_addr + offset; 504 instr->fail_addr = erase->fail_addr + offset;
507 break; 505 break;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index d339308539fa..70f63891b19c 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -196,8 +196,6 @@
196 196
197 197
198#define DRV_NAME "3c59x" 198#define DRV_NAME "3c59x"
199#define DRV_VERSION "LK1.1.19"
200#define DRV_RELDATE "10 Nov 2002"
201 199
202 200
203 201
@@ -275,10 +273,8 @@ static char version[] __devinitdata =
275DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n"; 273DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n";
276 274
277MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 275MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
278MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver " 276MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver ");
279 DRV_VERSION " " DRV_RELDATE);
280MODULE_LICENSE("GPL"); 277MODULE_LICENSE("GPL");
281MODULE_VERSION(DRV_VERSION);
282 278
283 279
284/* Operational parameter that usually are not changed. */ 280/* Operational parameter that usually are not changed. */
@@ -904,7 +900,6 @@ static void acpi_set_WOL(struct net_device *dev);
904static struct ethtool_ops vortex_ethtool_ops; 900static struct ethtool_ops vortex_ethtool_ops;
905static void set_8021q_mode(struct net_device *dev, int enable); 901static void set_8021q_mode(struct net_device *dev, int enable);
906 902
907
908/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ 903/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
909/* Option count limit only -- unlimited interfaces are supported. */ 904/* Option count limit only -- unlimited interfaces are supported. */
910#define MAX_UNITS 8 905#define MAX_UNITS 8
@@ -919,8 +914,6 @@ static int global_full_duplex = -1;
919static int global_enable_wol = -1; 914static int global_enable_wol = -1;
920static int global_use_mmio = -1; 915static int global_use_mmio = -1;
921 916
922/* #define dev_alloc_skb dev_alloc_skb_debug */
923
924/* Variables to work-around the Compaq PCI BIOS32 problem. */ 917/* Variables to work-around the Compaq PCI BIOS32 problem. */
925static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900; 918static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900;
926static struct net_device *compaq_net_device; 919static struct net_device *compaq_net_device;
@@ -976,7 +969,7 @@ static void poll_vortex(struct net_device *dev)
976 969
977#ifdef CONFIG_PM 970#ifdef CONFIG_PM
978 971
979static int vortex_suspend (struct pci_dev *pdev, pm_message_t state) 972static int vortex_suspend(struct pci_dev *pdev, pm_message_t state)
980{ 973{
981 struct net_device *dev = pci_get_drvdata(pdev); 974 struct net_device *dev = pci_get_drvdata(pdev);
982 975
@@ -994,7 +987,7 @@ static int vortex_suspend (struct pci_dev *pdev, pm_message_t state)
994 return 0; 987 return 0;
995} 988}
996 989
997static int vortex_resume (struct pci_dev *pdev) 990static int vortex_resume(struct pci_dev *pdev)
998{ 991{
999 struct net_device *dev = pci_get_drvdata(pdev); 992 struct net_device *dev = pci_get_drvdata(pdev);
1000 struct vortex_private *vp = netdev_priv(dev); 993 struct vortex_private *vp = netdev_priv(dev);
@@ -1027,8 +1020,8 @@ static struct eisa_device_id vortex_eisa_ids[] = {
1027 { "" } 1020 { "" }
1028}; 1021};
1029 1022
1030static int vortex_eisa_probe (struct device *device); 1023static int vortex_eisa_probe(struct device *device);
1031static int vortex_eisa_remove (struct device *device); 1024static int vortex_eisa_remove(struct device *device);
1032 1025
1033static struct eisa_driver vortex_eisa_driver = { 1026static struct eisa_driver vortex_eisa_driver = {
1034 .id_table = vortex_eisa_ids, 1027 .id_table = vortex_eisa_ids,
@@ -1039,12 +1032,12 @@ static struct eisa_driver vortex_eisa_driver = {
1039 } 1032 }
1040}; 1033};
1041 1034
1042static int vortex_eisa_probe (struct device *device) 1035static int vortex_eisa_probe(struct device *device)
1043{ 1036{
1044 void __iomem *ioaddr; 1037 void __iomem *ioaddr;
1045 struct eisa_device *edev; 1038 struct eisa_device *edev;
1046 1039
1047 edev = to_eisa_device (device); 1040 edev = to_eisa_device(device);
1048 1041
1049 if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME)) 1042 if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME))
1050 return -EBUSY; 1043 return -EBUSY;
@@ -1053,7 +1046,7 @@ static int vortex_eisa_probe (struct device *device)
1053 1046
1054 if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12, 1047 if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12,
1055 edev->id.driver_data, vortex_cards_found)) { 1048 edev->id.driver_data, vortex_cards_found)) {
1056 release_region (edev->base_addr, VORTEX_TOTAL_SIZE); 1049 release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
1057 return -ENODEV; 1050 return -ENODEV;
1058 } 1051 }
1059 1052
@@ -1062,15 +1055,15 @@ static int vortex_eisa_probe (struct device *device)
1062 return 0; 1055 return 0;
1063} 1056}
1064 1057
1065static int vortex_eisa_remove (struct device *device) 1058static int vortex_eisa_remove(struct device *device)
1066{ 1059{
1067 struct eisa_device *edev; 1060 struct eisa_device *edev;
1068 struct net_device *dev; 1061 struct net_device *dev;
1069 struct vortex_private *vp; 1062 struct vortex_private *vp;
1070 void __iomem *ioaddr; 1063 void __iomem *ioaddr;
1071 1064
1072 edev = to_eisa_device (device); 1065 edev = to_eisa_device(device);
1073 dev = eisa_get_drvdata (edev); 1066 dev = eisa_get_drvdata(edev);
1074 1067
1075 if (!dev) { 1068 if (!dev) {
1076 printk("vortex_eisa_remove called for Compaq device!\n"); 1069 printk("vortex_eisa_remove called for Compaq device!\n");
@@ -1080,17 +1073,17 @@ static int vortex_eisa_remove (struct device *device)
1080 vp = netdev_priv(dev); 1073 vp = netdev_priv(dev);
1081 ioaddr = vp->ioaddr; 1074 ioaddr = vp->ioaddr;
1082 1075
1083 unregister_netdev (dev); 1076 unregister_netdev(dev);
1084 iowrite16 (TotalReset|0x14, ioaddr + EL3_CMD); 1077 iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
1085 release_region (dev->base_addr, VORTEX_TOTAL_SIZE); 1078 release_region(dev->base_addr, VORTEX_TOTAL_SIZE);
1086 1079
1087 free_netdev (dev); 1080 free_netdev(dev);
1088 return 0; 1081 return 0;
1089} 1082}
1090#endif 1083#endif
1091 1084
1092/* returns count found (>= 0), or negative on error */ 1085/* returns count found (>= 0), or negative on error */
1093static int __init vortex_eisa_init (void) 1086static int __init vortex_eisa_init(void)
1094{ 1087{
1095 int eisa_found = 0; 1088 int eisa_found = 0;
1096 int orig_cards_found = vortex_cards_found; 1089 int orig_cards_found = vortex_cards_found;
@@ -1121,7 +1114,7 @@ static int __init vortex_eisa_init (void)
1121} 1114}
1122 1115
1123/* returns count (>= 0), or negative on error */ 1116/* returns count (>= 0), or negative on error */
1124static int __devinit vortex_init_one (struct pci_dev *pdev, 1117static int __devinit vortex_init_one(struct pci_dev *pdev,
1125 const struct pci_device_id *ent) 1118 const struct pci_device_id *ent)
1126{ 1119{
1127 int rc, unit, pci_bar; 1120 int rc, unit, pci_bar;
@@ -1129,7 +1122,7 @@ static int __devinit vortex_init_one (struct pci_dev *pdev,
1129 void __iomem *ioaddr; 1122 void __iomem *ioaddr;
1130 1123
1131 /* wake up and enable device */ 1124 /* wake up and enable device */
1132 rc = pci_enable_device (pdev); 1125 rc = pci_enable_device(pdev);
1133 if (rc < 0) 1126 if (rc < 0)
1134 goto out; 1127 goto out;
1135 1128
@@ -1151,7 +1144,7 @@ static int __devinit vortex_init_one (struct pci_dev *pdev,
1151 rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, 1144 rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq,
1152 ent->driver_data, unit); 1145 ent->driver_data, unit);
1153 if (rc < 0) { 1146 if (rc < 0) {
1154 pci_disable_device (pdev); 1147 pci_disable_device(pdev);
1155 goto out; 1148 goto out;
1156 } 1149 }
1157 1150
@@ -1236,7 +1229,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1236 if (print_info) 1229 if (print_info)
1237 printk (KERN_INFO "See Documentation/networking/vortex.txt\n"); 1230 printk (KERN_INFO "See Documentation/networking/vortex.txt\n");
1238 1231
1239 printk(KERN_INFO "%s: 3Com %s %s at %p. Vers " DRV_VERSION "\n", 1232 printk(KERN_INFO "%s: 3Com %s %s at %p.\n",
1240 print_name, 1233 print_name,
1241 pdev ? "PCI" : "EISA", 1234 pdev ? "PCI" : "EISA",
1242 vci->name, 1235 vci->name,
@@ -1266,7 +1259,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1266 1259
1267 /* enable bus-mastering if necessary */ 1260 /* enable bus-mastering if necessary */
1268 if (vci->flags & PCI_USES_MASTER) 1261 if (vci->flags & PCI_USES_MASTER)
1269 pci_set_master (pdev); 1262 pci_set_master(pdev);
1270 1263
1271 if (vci->drv_flags & IS_VORTEX) { 1264 if (vci->drv_flags & IS_VORTEX) {
1272 u8 pci_latency; 1265 u8 pci_latency;
@@ -1310,7 +1303,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1310 if (pdev) 1303 if (pdev)
1311 pci_set_drvdata(pdev, dev); 1304 pci_set_drvdata(pdev, dev);
1312 if (edev) 1305 if (edev)
1313 eisa_set_drvdata (edev, dev); 1306 eisa_set_drvdata(edev, dev);
1314 1307
1315 vp->media_override = 7; 1308 vp->media_override = 7;
1316 if (option >= 0) { 1309 if (option >= 0) {
@@ -1335,7 +1328,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1335 vp->enable_wol = 1; 1328 vp->enable_wol = 1;
1336 } 1329 }
1337 1330
1338 vp->force_fd = vp->full_duplex; 1331 vp->mii.force_media = vp->full_duplex;
1339 vp->options = option; 1332 vp->options = option;
1340 /* Read the station address from the EEPROM. */ 1333 /* Read the station address from the EEPROM. */
1341 EL3WINDOW(0); 1334 EL3WINDOW(0);
@@ -1625,6 +1618,46 @@ issue_and_wait(struct net_device *dev, int cmd)
1625} 1618}
1626 1619
1627static void 1620static void
1621vortex_set_duplex(struct net_device *dev)
1622{
1623 struct vortex_private *vp = netdev_priv(dev);
1624 void __iomem *ioaddr = vp->ioaddr;
1625
1626 printk(KERN_INFO "%s: setting %s-duplex.\n",
1627 dev->name, (vp->full_duplex) ? "full" : "half");
1628
1629 EL3WINDOW(3);
1630 /* Set the full-duplex bit. */
1631 iowrite16(((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1632 (vp->large_frames ? 0x40 : 0) |
1633 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
1634 0x100 : 0),
1635 ioaddr + Wn3_MAC_Ctrl);
1636
1637 issue_and_wait(dev, TxReset);
1638 /*
1639 * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
1640 */
1641 issue_and_wait(dev, RxReset|0x04);
1642}
1643
1644static void vortex_check_media(struct net_device *dev, unsigned int init)
1645{
1646 struct vortex_private *vp = netdev_priv(dev);
1647 unsigned int ok_to_print = 0;
1648
1649 if (vortex_debug > 3)
1650 ok_to_print = 1;
1651
1652 if (mii_check_media(&vp->mii, ok_to_print, init)) {
1653 vp->full_duplex = vp->mii.full_duplex;
1654 vortex_set_duplex(dev);
1655 } else if (init) {
1656 vortex_set_duplex(dev);
1657 }
1658}
1659
1660static void
1628vortex_up(struct net_device *dev) 1661vortex_up(struct net_device *dev)
1629{ 1662{
1630 struct vortex_private *vp = netdev_priv(dev); 1663 struct vortex_private *vp = netdev_priv(dev);
@@ -1684,53 +1717,20 @@ vortex_up(struct net_device *dev)
1684 printk(KERN_DEBUG "%s: Initial media type %s.\n", 1717 printk(KERN_DEBUG "%s: Initial media type %s.\n",
1685 dev->name, media_tbl[dev->if_port].name); 1718 dev->name, media_tbl[dev->if_port].name);
1686 1719
1687 vp->full_duplex = vp->force_fd; 1720 vp->full_duplex = vp->mii.force_media;
1688 config = BFINS(config, dev->if_port, 20, 4); 1721 config = BFINS(config, dev->if_port, 20, 4);
1689 if (vortex_debug > 6) 1722 if (vortex_debug > 6)
1690 printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config); 1723 printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config);
1691 iowrite32(config, ioaddr + Wn3_Config); 1724 iowrite32(config, ioaddr + Wn3_Config);
1692 1725
1726 netif_carrier_off(dev);
1693 if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { 1727 if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1694 int mii_reg1, mii_reg5;
1695 EL3WINDOW(4); 1728 EL3WINDOW(4);
1696 /* Read BMSR (reg1) only to clear old status. */ 1729 vortex_check_media(dev, 1);
1697 mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
1698 mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
1699 if (mii_reg5 == 0xffff || mii_reg5 == 0x0000) {
1700 netif_carrier_off(dev); /* No MII device or no link partner report */
1701 } else {
1702 mii_reg5 &= vp->advertising;
1703 if ((mii_reg5 & 0x0100) != 0 /* 100baseTx-FD */
1704 || (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */
1705 vp->full_duplex = 1;
1706 netif_carrier_on(dev);
1707 }
1708 vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
1709 if (vortex_debug > 1)
1710 printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
1711 " info1 %04x, setting %s-duplex.\n",
1712 dev->name, vp->phys[0],
1713 mii_reg1, mii_reg5,
1714 vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half");
1715 EL3WINDOW(3);
1716 }
1717
1718 /* Set the full-duplex bit. */
1719 iowrite16( ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1720 (vp->large_frames ? 0x40 : 0) |
1721 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
1722 ioaddr + Wn3_MAC_Ctrl);
1723
1724 if (vortex_debug > 1) {
1725 printk(KERN_DEBUG "%s: vortex_up() InternalConfig %8.8x.\n",
1726 dev->name, config);
1727 } 1730 }
1731 else
1732 vortex_set_duplex(dev);
1728 1733
1729 issue_and_wait(dev, TxReset);
1730 /*
1731 * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
1732 */
1733 issue_and_wait(dev, RxReset|0x04);
1734 1734
1735 iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD); 1735 iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
1736 1736
@@ -1805,7 +1805,6 @@ vortex_up(struct net_device *dev)
1805 set_8021q_mode(dev, 1); 1805 set_8021q_mode(dev, 1);
1806 iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ 1806 iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
1807 1807
1808// issue_and_wait(dev, SetTxStart|0x07ff);
1809 iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ 1808 iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
1810 iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ 1809 iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
1811 /* Allow status bits to be seen. */ 1810 /* Allow status bits to be seen. */
@@ -1892,7 +1891,7 @@ vortex_timer(unsigned long data)
1892 void __iomem *ioaddr = vp->ioaddr; 1891 void __iomem *ioaddr = vp->ioaddr;
1893 int next_tick = 60*HZ; 1892 int next_tick = 60*HZ;
1894 int ok = 0; 1893 int ok = 0;
1895 int media_status, mii_status, old_window; 1894 int media_status, old_window;
1896 1895
1897 if (vortex_debug > 2) { 1896 if (vortex_debug > 2) {
1898 printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n", 1897 printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n",
@@ -1900,8 +1899,6 @@ vortex_timer(unsigned long data)
1900 printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo); 1899 printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
1901 } 1900 }
1902 1901
1903 if (vp->medialock)
1904 goto leave_media_alone;
1905 disable_irq(dev->irq); 1902 disable_irq(dev->irq);
1906 old_window = ioread16(ioaddr + EL3_CMD) >> 13; 1903 old_window = ioread16(ioaddr + EL3_CMD) >> 13;
1907 EL3WINDOW(4); 1904 EL3WINDOW(4);
@@ -1924,44 +1921,9 @@ vortex_timer(unsigned long data)
1924 break; 1921 break;
1925 case XCVR_MII: case XCVR_NWAY: 1922 case XCVR_MII: case XCVR_NWAY:
1926 { 1923 {
1927 spin_lock_bh(&vp->lock);
1928 mii_status = mdio_read(dev, vp->phys[0], MII_BMSR);
1929 if (!(mii_status & BMSR_LSTATUS)) {
1930 /* Re-read to get actual link status */
1931 mii_status = mdio_read(dev, vp->phys[0], MII_BMSR);
1932 }
1933 ok = 1; 1924 ok = 1;
1934 if (vortex_debug > 2) 1925 spin_lock_bh(&vp->lock);
1935 printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n", 1926 vortex_check_media(dev, 0);
1936 dev->name, mii_status);
1937 if (mii_status & BMSR_LSTATUS) {
1938 int mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
1939 if (! vp->force_fd && mii_reg5 != 0xffff) {
1940 int duplex;
1941
1942 mii_reg5 &= vp->advertising;
1943 duplex = (mii_reg5&0x0100) || (mii_reg5 & 0x01C0) == 0x0040;
1944 if (vp->full_duplex != duplex) {
1945 vp->full_duplex = duplex;
1946 printk(KERN_INFO "%s: Setting %s-duplex based on MII "
1947 "#%d link partner capability of %4.4x.\n",
1948 dev->name, vp->full_duplex ? "full" : "half",
1949 vp->phys[0], mii_reg5);
1950 /* Set the full-duplex bit. */
1951 EL3WINDOW(3);
1952 iowrite16( (vp->full_duplex ? 0x20 : 0) |
1953 (vp->large_frames ? 0x40 : 0) |
1954 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
1955 ioaddr + Wn3_MAC_Ctrl);
1956 if (vortex_debug > 1)
1957 printk(KERN_DEBUG "Setting duplex in Wn3_MAC_Ctrl\n");
1958 /* AKPM: bug: should reset Tx and Rx after setting Duplex. Page 180 */
1959 }
1960 }
1961 netif_carrier_on(dev);
1962 } else {
1963 netif_carrier_off(dev);
1964 }
1965 spin_unlock_bh(&vp->lock); 1927 spin_unlock_bh(&vp->lock);
1966 } 1928 }
1967 break; 1929 break;
@@ -1971,7 +1933,14 @@ vortex_timer(unsigned long data)
1971 dev->name, media_tbl[dev->if_port].name, media_status); 1933 dev->name, media_tbl[dev->if_port].name, media_status);
1972 ok = 1; 1934 ok = 1;
1973 } 1935 }
1974 if ( ! ok) { 1936
1937 if (!netif_carrier_ok(dev))
1938 next_tick = 5*HZ;
1939
1940 if (vp->medialock)
1941 goto leave_media_alone;
1942
1943 if (!ok) {
1975 unsigned int config; 1944 unsigned int config;
1976 1945
1977 do { 1946 do {
@@ -2004,14 +1973,14 @@ vortex_timer(unsigned long data)
2004 printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config); 1973 printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config);
2005 /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ 1974 /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */
2006 } 1975 }
2007 EL3WINDOW(old_window);
2008 enable_irq(dev->irq);
2009 1976
2010leave_media_alone: 1977leave_media_alone:
2011 if (vortex_debug > 2) 1978 if (vortex_debug > 2)
2012 printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n", 1979 printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n",
2013 dev->name, media_tbl[dev->if_port].name); 1980 dev->name, media_tbl[dev->if_port].name);
2014 1981
1982 EL3WINDOW(old_window);
1983 enable_irq(dev->irq);
2015 mod_timer(&vp->timer, RUN_AT(next_tick)); 1984 mod_timer(&vp->timer, RUN_AT(next_tick));
2016 if (vp->deferred) 1985 if (vp->deferred)
2017 iowrite16(FakeIntr, ioaddr + EL3_CMD); 1986 iowrite16(FakeIntr, ioaddr + EL3_CMD);
@@ -2206,7 +2175,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2206 if (vp->bus_master) { 2175 if (vp->bus_master) {
2207 /* Set the bus-master controller to transfer the packet. */ 2176 /* Set the bus-master controller to transfer the packet. */
2208 int len = (skb->len + 3) & ~3; 2177 int len = (skb->len + 3) & ~3;
2209 iowrite32( vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE), 2178 iowrite32(vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE),
2210 ioaddr + Wn7_MasterAddr); 2179 ioaddr + Wn7_MasterAddr);
2211 iowrite16(len, ioaddr + Wn7_MasterLen); 2180 iowrite16(len, ioaddr + Wn7_MasterLen);
2212 vp->tx_skb = skb; 2181 vp->tx_skb = skb;
@@ -2983,20 +2952,6 @@ static int vortex_nway_reset(struct net_device *dev)
2983 return rc; 2952 return rc;
2984} 2953}
2985 2954
2986static u32 vortex_get_link(struct net_device *dev)
2987{
2988 struct vortex_private *vp = netdev_priv(dev);
2989 void __iomem *ioaddr = vp->ioaddr;
2990 unsigned long flags;
2991 int rc;
2992
2993 spin_lock_irqsave(&vp->lock, flags);
2994 EL3WINDOW(4);
2995 rc = mii_link_ok(&vp->mii);
2996 spin_unlock_irqrestore(&vp->lock, flags);
2997 return rc;
2998}
2999
3000static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2955static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3001{ 2956{
3002 struct vortex_private *vp = netdev_priv(dev); 2957 struct vortex_private *vp = netdev_priv(dev);
@@ -3077,7 +3032,6 @@ static void vortex_get_drvinfo(struct net_device *dev,
3077 struct vortex_private *vp = netdev_priv(dev); 3032 struct vortex_private *vp = netdev_priv(dev);
3078 3033
3079 strcpy(info->driver, DRV_NAME); 3034 strcpy(info->driver, DRV_NAME);
3080 strcpy(info->version, DRV_VERSION);
3081 if (VORTEX_PCI(vp)) { 3035 if (VORTEX_PCI(vp)) {
3082 strcpy(info->bus_info, pci_name(VORTEX_PCI(vp))); 3036 strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
3083 } else { 3037 } else {
@@ -3098,9 +3052,9 @@ static struct ethtool_ops vortex_ethtool_ops = {
3098 .get_stats_count = vortex_get_stats_count, 3052 .get_stats_count = vortex_get_stats_count,
3099 .get_settings = vortex_get_settings, 3053 .get_settings = vortex_get_settings,
3100 .set_settings = vortex_set_settings, 3054 .set_settings = vortex_set_settings,
3101 .get_link = vortex_get_link, 3055 .get_link = ethtool_op_get_link,
3102 .nway_reset = vortex_nway_reset, 3056 .nway_reset = vortex_nway_reset,
3103 .get_perm_addr = ethtool_op_get_perm_addr, 3057 .get_perm_addr = ethtool_op_get_perm_addr,
3104}; 3058};
3105 3059
3106#ifdef CONFIG_PCI 3060#ifdef CONFIG_PCI
@@ -3301,7 +3255,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
3301 } 3255 }
3302 return; 3256 return;
3303} 3257}
3304 3258
3305/* ACPI: Advanced Configuration and Power Interface. */ 3259/* ACPI: Advanced Configuration and Power Interface. */
3306/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */ 3260/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
3307static void acpi_set_WOL(struct net_device *dev) 3261static void acpi_set_WOL(struct net_device *dev)
@@ -3325,7 +3279,7 @@ static void acpi_set_WOL(struct net_device *dev)
3325} 3279}
3326 3280
3327 3281
3328static void __devexit vortex_remove_one (struct pci_dev *pdev) 3282static void __devexit vortex_remove_one(struct pci_dev *pdev)
3329{ 3283{
3330 struct net_device *dev = pci_get_drvdata(pdev); 3284 struct net_device *dev = pci_get_drvdata(pdev);
3331 struct vortex_private *vp; 3285 struct vortex_private *vp;
@@ -3381,7 +3335,7 @@ static int vortex_have_pci;
3381static int vortex_have_eisa; 3335static int vortex_have_eisa;
3382 3336
3383 3337
3384static int __init vortex_init (void) 3338static int __init vortex_init(void)
3385{ 3339{
3386 int pci_rc, eisa_rc; 3340 int pci_rc, eisa_rc;
3387 3341
@@ -3397,14 +3351,14 @@ static int __init vortex_init (void)
3397} 3351}
3398 3352
3399 3353
3400static void __exit vortex_eisa_cleanup (void) 3354static void __exit vortex_eisa_cleanup(void)
3401{ 3355{
3402 struct vortex_private *vp; 3356 struct vortex_private *vp;
3403 void __iomem *ioaddr; 3357 void __iomem *ioaddr;
3404 3358
3405#ifdef CONFIG_EISA 3359#ifdef CONFIG_EISA
3406 /* Take care of the EISA devices */ 3360 /* Take care of the EISA devices */
3407 eisa_driver_unregister (&vortex_eisa_driver); 3361 eisa_driver_unregister(&vortex_eisa_driver);
3408#endif 3362#endif
3409 3363
3410 if (compaq_net_device) { 3364 if (compaq_net_device) {
@@ -3412,33 +3366,24 @@ static void __exit vortex_eisa_cleanup (void)
3412 ioaddr = ioport_map(compaq_net_device->base_addr, 3366 ioaddr = ioport_map(compaq_net_device->base_addr,
3413 VORTEX_TOTAL_SIZE); 3367 VORTEX_TOTAL_SIZE);
3414 3368
3415 unregister_netdev (compaq_net_device); 3369 unregister_netdev(compaq_net_device);
3416 iowrite16 (TotalReset, ioaddr + EL3_CMD); 3370 iowrite16(TotalReset, ioaddr + EL3_CMD);
3417 release_region(compaq_net_device->base_addr, 3371 release_region(compaq_net_device->base_addr,
3418 VORTEX_TOTAL_SIZE); 3372 VORTEX_TOTAL_SIZE);
3419 3373
3420 free_netdev (compaq_net_device); 3374 free_netdev(compaq_net_device);
3421 } 3375 }
3422} 3376}
3423 3377
3424 3378
3425static void __exit vortex_cleanup (void) 3379static void __exit vortex_cleanup(void)
3426{ 3380{
3427 if (vortex_have_pci) 3381 if (vortex_have_pci)
3428 pci_unregister_driver (&vortex_driver); 3382 pci_unregister_driver(&vortex_driver);
3429 if (vortex_have_eisa) 3383 if (vortex_have_eisa)
3430 vortex_eisa_cleanup (); 3384 vortex_eisa_cleanup();
3431} 3385}
3432 3386
3433 3387
3434module_init(vortex_init); 3388module_init(vortex_init);
3435module_exit(vortex_cleanup); 3389module_exit(vortex_cleanup);
3436
3437
3438/*
3439 * Local variables:
3440 * c-indent-level: 4
3441 * c-basic-offset: 4
3442 * tab-width: 4
3443 * End:
3444 */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2d0ac169a86c..f13a539dc169 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3159,7 +3159,7 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave
3159 * bond_netdev_event: handle netdev notifier chain events. 3159 * bond_netdev_event: handle netdev notifier chain events.
3160 * 3160 *
3161 * This function receives events for the netdev chain. The caller (an 3161 * This function receives events for the netdev chain. The caller (an
3162 * ioctl handler calling notifier_call_chain) holds the necessary 3162 * ioctl handler calling blocking_notifier_call_chain) holds the necessary
3163 * locks for us to safely manipulate the slave devices (RTNL lock, 3163 * locks for us to safely manipulate the slave devices (RTNL lock,
3164 * dev_probe_lock). 3164 * dev_probe_lock).
3165 */ 3165 */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index e7fc28b07e5a..7627a75f4f7c 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -134,6 +134,7 @@
134#include <linux/random.h> 134#include <linux/random.h>
135#include <linux/init.h> 135#include <linux/init.h>
136#include <linux/if_vlan.h> 136#include <linux/if_vlan.h>
137#include <linux/dma-mapping.h>
137 138
138#include <asm/irq.h> 139#include <asm/irq.h>
139#include <asm/io.h> 140#include <asm/io.h>
@@ -2932,7 +2933,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2932 if (id->driver_data & DEV_HAS_HIGH_DMA) { 2933 if (id->driver_data & DEV_HAS_HIGH_DMA) {
2933 /* packet format 3: supports 40-bit addressing */ 2934 /* packet format 3: supports 40-bit addressing */
2934 np->desc_ver = DESC_VER_3; 2935 np->desc_ver = DESC_VER_3;
2935 if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) { 2936 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
2936 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", 2937 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
2937 pci_name(pci_dev)); 2938 pci_name(pci_dev));
2938 } else { 2939 } else {
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 9b8295ee06ef..ae71ed57c12d 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -44,6 +44,7 @@
44#include <linux/ip.h> 44#include <linux/ip.h>
45#include <linux/tcp.h> 45#include <linux/tcp.h>
46#include <linux/udp.h> 46#include <linux/udp.h>
47#include <linux/dma-mapping.h>
47 48
48#ifdef CONFIG_SERIAL_8250 49#ifdef CONFIG_SERIAL_8250
49#include <linux/serial_core.h> 50#include <linux/serial_core.h>
@@ -1195,17 +1196,17 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1195 int err, pci_using_dac; 1196 int err, pci_using_dac;
1196 1197
1197 /* Configure DMA attributes. */ 1198 /* Configure DMA attributes. */
1198 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL); 1199 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1199 if (!err) { 1200 if (!err) {
1200 pci_using_dac = 1; 1201 pci_using_dac = 1;
1201 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL); 1202 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1202 if (err < 0) { 1203 if (err < 0) {
1203 printk(KERN_ERR "%s: Unable to obtain 64 bit DMA " 1204 printk(KERN_ERR "%s: Unable to obtain 64 bit DMA "
1204 "for consistent allocations\n", pci_name(pdev)); 1205 "for consistent allocations\n", pci_name(pdev));
1205 goto out; 1206 goto out;
1206 } 1207 }
1207 } else { 1208 } else {
1208 err = pci_set_dma_mask(pdev, 0xffffffffULL); 1209 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1209 if (err) { 1210 if (err) {
1210 printk(KERN_ERR "%s: No usable DMA configuration, " 1211 printk(KERN_ERR "%s: No usable DMA configuration, "
1211 "aborting.\n", pci_name(pdev)); 1212 "aborting.\n", pci_name(pdev));
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 83141a3ff546..cc7ff8f00e42 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -207,7 +207,7 @@ static int __init nsc_ircc_init(void)
207 /* Register with PnP subsystem to detect disable ports */ 207 /* Register with PnP subsystem to detect disable ports */
208 ret = pnp_register_driver(&nsc_ircc_pnp_driver); 208 ret = pnp_register_driver(&nsc_ircc_pnp_driver);
209 209
210 if (ret >= 0) 210 if (!ret)
211 pnp_registered = 1; 211 pnp_registered = 1;
212 212
213 ret = -ENODEV; 213 ret = -ENODEV;
@@ -812,7 +812,7 @@ static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info)
812 int cfg_base = info->cfg_base; 812 int cfg_base = info->cfg_base;
813 int enabled; 813 int enabled;
814 814
815 /* User is shure about his config... accept it. */ 815 /* User is sure about his config... accept it. */
816 IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): " 816 IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): "
817 "io=0x%04x, irq=%d, dma=%d\n", 817 "io=0x%04x, irq=%d, dma=%d\n",
818 __FUNCTION__, info->fir_base, info->irq, info->dma); 818 __FUNCTION__, info->fir_base, info->irq, info->dma);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 0c13795dca38..b79d6e8d3045 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -172,7 +172,7 @@ static struct net_device_stats *get_stats(struct net_device *dev)
172 172
173 memset(stats, 0, sizeof(struct net_device_stats)); 173 memset(stats, 0, sizeof(struct net_device_stats));
174 174
175 for_each_cpu(i) { 175 for_each_possible_cpu(i) {
176 struct net_device_stats *lb_stats; 176 struct net_device_stats *lb_stats;
177 177
178 lb_stats = &per_cpu(loopback_stats, i); 178 lb_stats = &per_cpu(loopback_stats, i);
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 0fede50abd3e..8e9b1a537dee 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1828,10 +1828,10 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
1828 int using_dac = 0; 1828 int using_dac = 0;
1829 1829
1830 /* See if we can set the dma mask early on; failure is fatal. */ 1830 /* See if we can set the dma mask early on; failure is fatal. */
1831 if (sizeof(dma_addr_t) == 8 && 1831 if (sizeof(dma_addr_t) == 8 &&
1832 !pci_set_dma_mask(pci_dev, 0xffffffffffffffffULL)) { 1832 !pci_set_dma_mask(pci_dev, DMA_64BIT_MASK)) {
1833 using_dac = 1; 1833 using_dac = 1;
1834 } else if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1834 } else if (!pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
1835 using_dac = 0; 1835 using_dac = 0;
1836 } else { 1836 } else {
1837 printk(KERN_WARNING "ns83820.c: pci_set_dma_mask failed!\n"); 1837 printk(KERN_WARNING "ns83820.c: pci_set_dma_mask failed!\n");
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 253440a98022..b82191d2bee1 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1693,7 +1693,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance, struct pt_regs
1693 * 1693 *
1694 * Process receive interrupt events, 1694 * Process receive interrupt events,
1695 * put buffer to higher layer and refill buffer pool 1695 * put buffer to higher layer and refill buffer pool
1696 * Note: This fucntion is called by interrupt handler, 1696 * Note: This function is called by interrupt handler,
1697 * don't do "too much" work here 1697 * don't do "too much" work here
1698 */ 1698 */
1699 1699
@@ -1840,7 +1840,7 @@ static int sis900_rx(struct net_device *net_dev)
1840 * 1840 *
1841 * Check for error condition and free socket buffer etc 1841 * Check for error condition and free socket buffer etc
1842 * schedule for more transmission as needed 1842 * schedule for more transmission as needed
1843 * Note: This fucntion is called by interrupt handler, 1843 * Note: This function is called by interrupt handler,
1844 * don't do "too much" work here 1844 * don't do "too much" work here
1845 */ 1845 */
1846 1846
@@ -2283,7 +2283,7 @@ static void set_rx_mode(struct net_device *net_dev)
2283 int i, table_entries; 2283 int i, table_entries;
2284 u32 rx_mode; 2284 u32 rx_mode;
2285 2285
2286 /* 635 Hash Table entires = 256(2^16) */ 2286 /* 635 Hash Table entries = 256(2^16) */
2287 if((sis_priv->chipset_rev >= SIS635A_900_REV) || 2287 if((sis_priv->chipset_rev >= SIS635A_900_REV) ||
2288 (sis_priv->chipset_rev == SIS900B_900_REV)) 2288 (sis_priv->chipset_rev == SIS900B_900_REV))
2289 table_entries = 16; 2289 table_entries = 16;
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index ee48bfd67349..d1a86a080a65 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -513,7 +513,7 @@ struct mii_phy {
513 u_char *rst; /* Start of reset sequence in SROM */ 513 u_char *rst; /* Start of reset sequence in SROM */
514 u_int mc; /* Media Capabilities */ 514 u_int mc; /* Media Capabilities */
515 u_int ana; /* NWay Advertisement */ 515 u_int ana; /* NWay Advertisement */
516 u_int fdx; /* Full DupleX capabilites for each media */ 516 u_int fdx; /* Full DupleX capabilities for each media */
517 u_int ttm; /* Transmit Threshold Mode for each media */ 517 u_int ttm; /* Transmit Threshold Mode for each media */
518 u_int mci; /* 21142 MII Connector Interrupt info */ 518 u_int mci; /* 21142 MII Connector Interrupt info */
519}; 519};
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c
index 55f4a9a631bc..ab985023fcca 100644
--- a/drivers/net/tulip/pnic2.c
+++ b/drivers/net/tulip/pnic2.c
@@ -199,7 +199,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
199 /* negotiation ended successfully */ 199 /* negotiation ended successfully */
200 200
201 /* get the link partners reply and mask out all but 201 /* get the link partners reply and mask out all but
202 * bits 24-21 which show the partners capabilites 202 * bits 24-21 which show the partners capabilities
203 * and match those to what we advertised 203 * and match those to what we advertised
204 * 204 *
205 * then begin to interpret the results of the negotiation. 205 * then begin to interpret the results of the negotiation.
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index cde35dd87906..c1ce87a5f8d3 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -208,7 +208,7 @@ static const struct typhoon_card_info typhoon_card_info[] __devinitdata = {
208}; 208};
209 209
210/* Notes on the new subsystem numbering scheme: 210/* Notes on the new subsystem numbering scheme:
211 * bits 0-1 indicate crypto capabilites: (0) variable, (1) DES, or (2) 3DES 211 * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
212 * bit 4 indicates if this card has secured firmware (we don't support it) 212 * bit 4 indicates if this card has secured firmware (we don't support it)
213 * bit 8 indicates if this is a (0) copper or (1) fiber card 213 * bit 8 indicates if this is a (0) copper or (1) fiber card
214 * bits 12-16 indicate card type: (0) client and (1) server 214 * bits 12-16 indicate card type: (0) client and (1) server
@@ -788,7 +788,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
788 /* we have two rings to choose from, but we only use txLo for now 788 /* we have two rings to choose from, but we only use txLo for now
789 * If we start using the Hi ring as well, we'll need to update 789 * If we start using the Hi ring as well, we'll need to update
790 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(), 790 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
791 * and TXHI_ENTIRES to match, as well as update the TSO code below 791 * and TXHI_ENTRIES to match, as well as update the TSO code below
792 * to get the right DMA address 792 * to get the right DMA address
793 */ 793 */
794 txRing = &tp->txLoRing; 794 txRing = &tp->txLoRing;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 1ff5de076d21..4505540e3c59 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -105,6 +105,7 @@
105#include <linux/delay.h> 105#include <linux/delay.h>
106#include <net/syncppp.h> 106#include <net/syncppp.h>
107#include <linux/hdlc.h> 107#include <linux/hdlc.h>
108#include <linux/mutex.h>
108 109
109/* Version */ 110/* Version */
110static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n"; 111static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n";
@@ -112,7 +113,7 @@ static int debug;
112static int quartz; 113static int quartz;
113 114
114#ifdef CONFIG_DSCC4_PCI_RST 115#ifdef CONFIG_DSCC4_PCI_RST
115static DECLARE_MUTEX(dscc4_sem); 116static DEFINE_MUTEX(dscc4_mutex);
116static u32 dscc4_pci_config_store[16]; 117static u32 dscc4_pci_config_store[16];
117#endif 118#endif
118 119
@@ -1018,7 +1019,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
1018{ 1019{
1019 int i; 1020 int i;
1020 1021
1021 down(&dscc4_sem); 1022 mutex_lock(&dscc4_mutex);
1022 for (i = 0; i < 16; i++) 1023 for (i = 0; i < 16; i++)
1023 pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i); 1024 pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i);
1024 1025
@@ -1039,7 +1040,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
1039 1040
1040 for (i = 0; i < 16; i++) 1041 for (i = 0; i < 16; i++)
1041 pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]); 1042 pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
1042 up(&dscc4_sem); 1043 mutex_unlock(&dscc4_mutex);
1043} 1044}
1044#else 1045#else
1045#define dscc4_pci_reset(pdev,ioaddr) do {} while (0) 1046#define dscc4_pci_reset(pdev,ioaddr) do {} while (0)
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 9d3b51c3ef54..29a756dd979b 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -577,8 +577,8 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
577 We set both dma_mask and consistent_dma_mask to 28 bits 577 We set both dma_mask and consistent_dma_mask to 28 bits
578 and pray pci_alloc_consistent() will use this info. It should 578 and pray pci_alloc_consistent() will use this info. It should
579 work on most platforms */ 579 work on most platforms */
580 if (pci_set_consistent_dma_mask(pdev, 0x0FFFFFFF) || 580 if (pci_set_consistent_dma_mask(pdev, DMA_28BIT_MASK) ||
581 pci_set_dma_mask(pdev, 0x0FFFFFFF)) { 581 pci_set_dma_mask(pdev, DMA_28BIT_MASK)) {
582 printk(KERN_ERR "wanXL: No usable DMA configuration\n"); 582 printk(KERN_ERR "wanXL: No usable DMA configuration\n");
583 return -EIO; 583 return -EIO;
584 } 584 }
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 6fd0bf736830..8dfdfbd5966c 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -3858,7 +3858,7 @@ static int orinoco_ioctl_setscan(struct net_device *dev,
3858 unsigned long flags; 3858 unsigned long flags;
3859 3859
3860 /* Note : you may have realised that, as this is a SET operation, 3860 /* Note : you may have realised that, as this is a SET operation,
3861 * this is priviledged and therefore a normal user can't 3861 * this is privileged and therefore a normal user can't
3862 * perform scanning. 3862 * perform scanning.
3863 * This is not an error, while the device perform scanning, 3863 * This is not an error, while the device perform scanning,
3864 * traffic doesn't flow, so it's a perfect DoS... 3864 * traffic doesn't flow, so it's a perfect DoS...
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index e5bb9f5ae429..989599ad33ef 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -747,7 +747,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
747 747
748 if (essid->length) { 748 if (essid->length) {
749 dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */ 749 dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */
750 /* if it is to big, trunk it */ 750 /* if it is too big, trunk it */
751 dwrq->length = min((u8)IW_ESSID_MAX_SIZE, essid->length); 751 dwrq->length = min((u8)IW_ESSID_MAX_SIZE, essid->length);
752 } else { 752 } else {
753 dwrq->flags = 0; 753 dwrq->flags = 0;
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index b41d666fea3c..bfa0cc319a09 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -22,6 +22,7 @@
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/init.h> /* For __init, __exit */ 24#include <linux/init.h> /* For __init, __exit */
25#include <linux/dma-mapping.h>
25 26
26#include "prismcompat.h" 27#include "prismcompat.h"
27#include "islpci_dev.h" 28#include "islpci_dev.h"
@@ -124,7 +125,7 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
124 } 125 }
125 126
126 /* enable PCI DMA */ 127 /* enable PCI DMA */
127 if (pci_set_dma_mask(pdev, 0xffffffff)) { 128 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
128 printk(KERN_ERR "%s: 32-bit PCI DMA not supported", DRV_NAME); 129 printk(KERN_ERR "%s: 32-bit PCI DMA not supported", DRV_NAME);
129 goto do_pci_disable_device; 130 goto do_pci_disable_device;
130 } 131 }
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 330d3869b41e..fc4bc9b94c74 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -217,11 +217,10 @@ static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
217 cpu_buf->tracing = 0; 217 cpu_buf->tracing = 0;
218} 218}
219 219
220void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) 220void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
221 unsigned long event, int is_kernel)
221{ 222{
222 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; 223 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
223 unsigned long pc = profile_pc(regs);
224 int is_kernel = !user_mode(regs);
225 224
226 if (!backtrace_depth) { 225 if (!backtrace_depth) {
227 log_sample(cpu_buf, pc, is_kernel, event); 226 log_sample(cpu_buf, pc, is_kernel, event);
@@ -238,6 +237,14 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
238 oprofile_end_trace(cpu_buf); 237 oprofile_end_trace(cpu_buf);
239} 238}
240 239
240void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
241{
242 int is_kernel = !user_mode(regs);
243 unsigned long pc = profile_pc(regs);
244
245 oprofile_add_ext_sample(pc, regs, event, is_kernel);
246}
247
241void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) 248void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
242{ 249{
243 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; 250 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index e94b1e4a2a84..f0acb661c253 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -22,7 +22,7 @@ void oprofile_reset_stats(void)
22 struct oprofile_cpu_buffer * cpu_buf; 22 struct oprofile_cpu_buffer * cpu_buf;
23 int i; 23 int i;
24 24
25 for_each_cpu(i) { 25 for_each_possible_cpu(i) {
26 cpu_buf = &cpu_buffer[i]; 26 cpu_buf = &cpu_buffer[i];
27 cpu_buf->sample_received = 0; 27 cpu_buf->sample_received = 0;
28 cpu_buf->sample_lost_overflow = 0; 28 cpu_buf->sample_lost_overflow = 0;
@@ -46,7 +46,7 @@ void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
46 if (!dir) 46 if (!dir)
47 return; 47 return;
48 48
49 for_each_cpu(i) { 49 for_each_possible_cpu(i) {
50 cpu_buf = &cpu_buffer[i]; 50 cpu_buf = &cpu_buffer[i];
51 snprintf(buf, 10, "cpu%d", i); 51 snprintf(buf, 10, "cpu%d", i);
52 cpudir = oprofilefs_mkdir(sb, dir, buf); 52 cpudir = oprofilefs_mkdir(sb, dir, buf);
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index d6bae699749a..b62da9b0cbf0 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -130,7 +130,7 @@ static struct file_operations ulong_ro_fops = {
130 130
131 131
132static struct dentry * __oprofilefs_create_file(struct super_block * sb, 132static struct dentry * __oprofilefs_create_file(struct super_block * sb,
133 struct dentry * root, char const * name, struct file_operations * fops, 133 struct dentry * root, char const * name, const struct file_operations * fops,
134 int perm) 134 int perm)
135{ 135{
136 struct dentry * dentry; 136 struct dentry * dentry;
@@ -203,7 +203,7 @@ int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
203 203
204 204
205int oprofilefs_create_file(struct super_block * sb, struct dentry * root, 205int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
206 char const * name, struct file_operations * fops) 206 char const * name, const struct file_operations * fops)
207{ 207{
208 if (!__oprofilefs_create_file(sb, root, name, fops, 0644)) 208 if (!__oprofilefs_create_file(sb, root, name, fops, 0644))
209 return -EFAULT; 209 return -EFAULT;
@@ -212,7 +212,7 @@ int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
212 212
213 213
214int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root, 214int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root,
215 char const * name, struct file_operations * fops, int perm) 215 char const * name, const struct file_operations * fops, int perm)
216{ 216{
217 if (!__oprofilefs_create_file(sb, root, name, fops, perm)) 217 if (!__oprofilefs_create_file(sb, root, name, fops, perm))
218 return -EFAULT; 218 return -EFAULT;
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 3627a2d7f79f..298f2ddb2c17 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -499,11 +499,16 @@ static int led_halt(struct notifier_block *, unsigned long, void *);
499static struct notifier_block led_notifier = { 499static struct notifier_block led_notifier = {
500 .notifier_call = led_halt, 500 .notifier_call = led_halt,
501}; 501};
502static int notifier_disabled = 0;
502 503
503static int led_halt(struct notifier_block *nb, unsigned long event, void *buf) 504static int led_halt(struct notifier_block *nb, unsigned long event, void *buf)
504{ 505{
505 char *txt; 506 char *txt;
506 507
508 if (notifier_disabled)
509 return NOTIFY_OK;
510
511 notifier_disabled = 1;
507 switch (event) { 512 switch (event) {
508 case SYS_RESTART: txt = "SYSTEM RESTART"; 513 case SYS_RESTART: txt = "SYSTEM RESTART";
509 break; 514 break;
@@ -527,7 +532,6 @@ static int led_halt(struct notifier_block *nb, unsigned long event, void *buf)
527 if (led_func_ptr) 532 if (led_func_ptr)
528 led_func_ptr(0xff); /* turn all LEDs ON */ 533 led_func_ptr(0xff); /* turn all LEDs ON */
529 534
530 unregister_reboot_notifier(&led_notifier);
531 return NOTIFY_OK; 535 return NOTIFY_OK;
532} 536}
533 537
@@ -758,6 +762,12 @@ not_found:
758 return 1; 762 return 1;
759} 763}
760 764
765static void __exit led_exit(void)
766{
767 unregister_reboot_notifier(&led_notifier);
768 return;
769}
770
761#ifdef CONFIG_PROC_FS 771#ifdef CONFIG_PROC_FS
762module_init(led_create_procfs) 772module_init(led_create_procfs)
763#endif 773#endif
diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
index 54b2b7f20b96..0bcab83b4080 100644
--- a/drivers/parisc/power.c
+++ b/drivers/parisc/power.c
@@ -251,7 +251,8 @@ static int __init power_init(void)
251 } 251 }
252 252
253 /* Register a call for panic conditions. */ 253 /* Register a call for panic conditions. */
254 notifier_chain_register(&panic_notifier_list, &parisc_panic_block); 254 atomic_notifier_chain_register(&panic_notifier_list,
255 &parisc_panic_block);
255 256
256 tasklet_enable(&power_tasklet); 257 tasklet_enable(&power_tasklet);
257 258
@@ -264,7 +265,8 @@ static void __exit power_exit(void)
264 return; 265 return;
265 266
266 tasklet_disable(&power_tasklet); 267 tasklet_disable(&power_tasklet);
267 notifier_chain_unregister(&panic_notifier_list, &parisc_panic_block); 268 atomic_notifier_chain_unregister(&panic_notifier_list,
269 &parisc_panic_block);
268 power_tasklet.func = NULL; 270 power_tasklet.func = NULL;
269 pdc_soft_power_button(0); 271 pdc_soft_power_button(0);
270} 272}
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 9302b8fd7461..d5890027f8af 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -3126,9 +3126,9 @@ parport_pc_find_isa_ports (int autoirq, int autodma)
3126 * autoirq is PARPORT_IRQ_NONE, PARPORT_IRQ_AUTO, or PARPORT_IRQ_PROBEONLY 3126 * autoirq is PARPORT_IRQ_NONE, PARPORT_IRQ_AUTO, or PARPORT_IRQ_PROBEONLY
3127 * autodma is PARPORT_DMA_NONE or PARPORT_DMA_AUTO 3127 * autodma is PARPORT_DMA_NONE or PARPORT_DMA_AUTO
3128 */ 3128 */
3129static int __init parport_pc_find_ports (int autoirq, int autodma) 3129static void __init parport_pc_find_ports (int autoirq, int autodma)
3130{ 3130{
3131 int count = 0, r; 3131 int count = 0, err;
3132 3132
3133#ifdef CONFIG_PARPORT_PC_SUPERIO 3133#ifdef CONFIG_PARPORT_PC_SUPERIO
3134 detect_and_report_winbond (); 3134 detect_and_report_winbond ();
@@ -3140,23 +3140,17 @@ static int __init parport_pc_find_ports (int autoirq, int autodma)
3140 3140
3141 /* PnP ports, skip detection if SuperIO already found them */ 3141 /* PnP ports, skip detection if SuperIO already found them */
3142 if (!count) { 3142 if (!count) {
3143 r = pnp_register_driver (&parport_pc_pnp_driver); 3143 err = pnp_register_driver (&parport_pc_pnp_driver);
3144 if (r >= 0) { 3144 if (!err)
3145 pnp_registered_parport = 1; 3145 pnp_registered_parport = 1;
3146 count += r;
3147 }
3148 } 3146 }
3149 3147
3150 /* ISA ports and whatever (see asm/parport.h). */ 3148 /* ISA ports and whatever (see asm/parport.h). */
3151 count += parport_pc_find_nonpci_ports (autoirq, autodma); 3149 parport_pc_find_nonpci_ports (autoirq, autodma);
3152
3153 r = pci_register_driver (&parport_pc_pci_driver);
3154 if (r)
3155 return r;
3156 pci_registered_parport = 1;
3157 count += 1;
3158 3150
3159 return count; 3151 err = pci_register_driver (&parport_pc_pci_driver);
3152 if (!err)
3153 pci_registered_parport = 1;
3160} 3154}
3161 3155
3162/* 3156/*
@@ -3381,8 +3375,6 @@ __setup("parport_init_mode=",parport_init_mode_setup);
3381 3375
3382static int __init parport_pc_init(void) 3376static int __init parport_pc_init(void)
3383{ 3377{
3384 int count = 0;
3385
3386 if (parse_parport_params()) 3378 if (parse_parport_params())
3387 return -EINVAL; 3379 return -EINVAL;
3388 3380
@@ -3395,12 +3387,11 @@ static int __init parport_pc_init(void)
3395 break; 3387 break;
3396 if ((io_hi[i]) == PARPORT_IOHI_AUTO) 3388 if ((io_hi[i]) == PARPORT_IOHI_AUTO)
3397 io_hi[i] = 0x400 + io[i]; 3389 io_hi[i] = 0x400 + io[i];
3398 if (parport_pc_probe_port(io[i], io_hi[i], 3390 parport_pc_probe_port(io[i], io_hi[i],
3399 irqval[i], dmaval[i], NULL)) 3391 irqval[i], dmaval[i], NULL);
3400 count++;
3401 } 3392 }
3402 } else 3393 } else
3403 count += parport_pc_find_ports (irqval[0], dmaval[0]); 3394 parport_pc_find_ports (irqval[0], dmaval[0]);
3404 3395
3405 return 0; 3396 return 0;
3406} 3397}
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index ea62bed6bc83..bbbfd79adbaf 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -32,6 +32,7 @@
32#include <linux/kmod.h> 32#include <linux/kmod.h>
33 33
34#include <linux/spinlock.h> 34#include <linux/spinlock.h>
35#include <linux/mutex.h>
35#include <asm/irq.h> 36#include <asm/irq.h>
36 37
37#undef PARPORT_PARANOID 38#undef PARPORT_PARANOID
@@ -50,7 +51,7 @@ static DEFINE_SPINLOCK(full_list_lock);
50 51
51static LIST_HEAD(drivers); 52static LIST_HEAD(drivers);
52 53
53static DECLARE_MUTEX(registration_lock); 54static DEFINE_MUTEX(registration_lock);
54 55
55/* What you can do to a port that's gone away.. */ 56/* What you can do to a port that's gone away.. */
56static void dead_write_lines (struct parport *p, unsigned char b){} 57static void dead_write_lines (struct parport *p, unsigned char b){}
@@ -158,11 +159,11 @@ int parport_register_driver (struct parport_driver *drv)
158 if (list_empty(&portlist)) 159 if (list_empty(&portlist))
159 get_lowlevel_driver (); 160 get_lowlevel_driver ();
160 161
161 down(&registration_lock); 162 mutex_lock(&registration_lock);
162 list_for_each_entry(port, &portlist, list) 163 list_for_each_entry(port, &portlist, list)
163 drv->attach(port); 164 drv->attach(port);
164 list_add(&drv->list, &drivers); 165 list_add(&drv->list, &drivers);
165 up(&registration_lock); 166 mutex_unlock(&registration_lock);
166 167
167 return 0; 168 return 0;
168} 169}
@@ -188,11 +189,11 @@ void parport_unregister_driver (struct parport_driver *drv)
188{ 189{
189 struct parport *port; 190 struct parport *port;
190 191
191 down(&registration_lock); 192 mutex_lock(&registration_lock);
192 list_del_init(&drv->list); 193 list_del_init(&drv->list);
193 list_for_each_entry(port, &portlist, list) 194 list_for_each_entry(port, &portlist, list)
194 drv->detach(port); 195 drv->detach(port);
195 up(&registration_lock); 196 mutex_unlock(&registration_lock);
196} 197}
197 198
198static void free_port (struct parport *port) 199static void free_port (struct parport *port)
@@ -366,7 +367,7 @@ void parport_announce_port (struct parport *port)
366#endif 367#endif
367 368
368 parport_proc_register(port); 369 parport_proc_register(port);
369 down(&registration_lock); 370 mutex_lock(&registration_lock);
370 spin_lock_irq(&parportlist_lock); 371 spin_lock_irq(&parportlist_lock);
371 list_add_tail(&port->list, &portlist); 372 list_add_tail(&port->list, &portlist);
372 for (i = 1; i < 3; i++) { 373 for (i = 1; i < 3; i++) {
@@ -383,7 +384,7 @@ void parport_announce_port (struct parport *port)
383 if (slave) 384 if (slave)
384 attach_driver_chain(slave); 385 attach_driver_chain(slave);
385 } 386 }
386 up(&registration_lock); 387 mutex_unlock(&registration_lock);
387} 388}
388 389
389/** 390/**
@@ -409,7 +410,7 @@ void parport_remove_port(struct parport *port)
409{ 410{
410 int i; 411 int i;
411 412
412 down(&registration_lock); 413 mutex_lock(&registration_lock);
413 414
414 /* Spread the word. */ 415 /* Spread the word. */
415 detach_driver_chain (port); 416 detach_driver_chain (port);
@@ -436,7 +437,7 @@ void parport_remove_port(struct parport *port)
436 } 437 }
437 spin_unlock(&parportlist_lock); 438 spin_unlock(&parportlist_lock);
438 439
439 up(&registration_lock); 440 mutex_unlock(&registration_lock);
440 441
441 parport_proc_unregister(port); 442 parport_proc_unregister(port);
442 443
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 3eefe2cec72d..46825fee3ae4 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -19,7 +19,7 @@
19#include <linux/string.h> 19#include <linux/string.h>
20 20
21#include <asm/pci-bridge.h> 21#include <asm/pci-bridge.h>
22#include <asm/semaphore.h> 22#include <linux/mutex.h>
23#include <asm/rtas.h> 23#include <asm/rtas.h>
24#include <asm/vio.h> 24#include <asm/vio.h>
25 25
@@ -27,7 +27,7 @@
27#include "rpaphp.h" 27#include "rpaphp.h"
28#include "rpadlpar.h" 28#include "rpadlpar.h"
29 29
30static DECLARE_MUTEX(rpadlpar_sem); 30static DEFINE_MUTEX(rpadlpar_mutex);
31 31
32#define DLPAR_MODULE_NAME "rpadlpar_io" 32#define DLPAR_MODULE_NAME "rpadlpar_io"
33 33
@@ -300,7 +300,7 @@ int dlpar_add_slot(char *drc_name)
300 int node_type; 300 int node_type;
301 int rc = -EIO; 301 int rc = -EIO;
302 302
303 if (down_interruptible(&rpadlpar_sem)) 303 if (mutex_lock_interruptible(&rpadlpar_mutex))
304 return -ERESTARTSYS; 304 return -ERESTARTSYS;
305 305
306 /* Find newly added node */ 306 /* Find newly added node */
@@ -324,7 +324,7 @@ int dlpar_add_slot(char *drc_name)
324 324
325 printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name); 325 printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name);
326exit: 326exit:
327 up(&rpadlpar_sem); 327 mutex_unlock(&rpadlpar_mutex);
328 return rc; 328 return rc;
329} 329}
330 330
@@ -417,7 +417,7 @@ int dlpar_remove_slot(char *drc_name)
417 int node_type; 417 int node_type;
418 int rc = 0; 418 int rc = 0;
419 419
420 if (down_interruptible(&rpadlpar_sem)) 420 if (mutex_lock_interruptible(&rpadlpar_mutex))
421 return -ERESTARTSYS; 421 return -ERESTARTSYS;
422 422
423 dn = find_dlpar_node(drc_name, &node_type); 423 dn = find_dlpar_node(drc_name, &node_type);
@@ -439,7 +439,7 @@ int dlpar_remove_slot(char *drc_name)
439 } 439 }
440 printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); 440 printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name);
441exit: 441exit:
442 up(&rpadlpar_sem); 442 mutex_unlock(&rpadlpar_mutex);
443 return rc; 443 return rc;
444} 444}
445 445
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index c402da8e78ae..8cb9abde736b 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -15,6 +15,7 @@
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/proc_fs.h> 16#include <linux/proc_fs.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/mutex.h>
18 19
19#include <asm/sn/addrs.h> 20#include <asm/sn/addrs.h>
20#include <asm/sn/l1.h> 21#include <asm/sn/l1.h>
@@ -81,7 +82,7 @@ static struct hotplug_slot_ops sn_hotplug_slot_ops = {
81 .get_power_status = get_power_status, 82 .get_power_status = get_power_status,
82}; 83};
83 84
84static DECLARE_MUTEX(sn_hotplug_sem); 85static DEFINE_MUTEX(sn_hotplug_mutex);
85 86
86static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot, 87static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot,
87 char *buf) 88 char *buf)
@@ -346,7 +347,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
346 int rc; 347 int rc;
347 348
348 /* Serialize the Linux PCI infrastructure */ 349 /* Serialize the Linux PCI infrastructure */
349 down(&sn_hotplug_sem); 350 mutex_lock(&sn_hotplug_mutex);
350 351
351 /* 352 /*
352 * Power-on and initialize the slot in the SN 353 * Power-on and initialize the slot in the SN
@@ -354,7 +355,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
354 */ 355 */
355 rc = sn_slot_enable(bss_hotplug_slot, slot->device_num); 356 rc = sn_slot_enable(bss_hotplug_slot, slot->device_num);
356 if (rc) { 357 if (rc) {
357 up(&sn_hotplug_sem); 358 mutex_unlock(&sn_hotplug_mutex);
358 return rc; 359 return rc;
359 } 360 }
360 361
@@ -362,7 +363,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
362 PCI_DEVFN(slot->device_num + 1, 0)); 363 PCI_DEVFN(slot->device_num + 1, 0));
363 if (!num_funcs) { 364 if (!num_funcs) {
364 dev_dbg(slot->pci_bus->self, "no device in slot\n"); 365 dev_dbg(slot->pci_bus->self, "no device in slot\n");
365 up(&sn_hotplug_sem); 366 mutex_unlock(&sn_hotplug_mutex);
366 return -ENODEV; 367 return -ENODEV;
367 } 368 }
368 369
@@ -402,7 +403,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
402 if (new_ppb) 403 if (new_ppb)
403 pci_bus_add_devices(new_bus); 404 pci_bus_add_devices(new_bus);
404 405
405 up(&sn_hotplug_sem); 406 mutex_unlock(&sn_hotplug_mutex);
406 407
407 if (rc == 0) 408 if (rc == 0)
408 dev_dbg(slot->pci_bus->self, 409 dev_dbg(slot->pci_bus->self,
@@ -422,7 +423,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
422 int rc; 423 int rc;
423 424
424 /* Acquire update access to the bus */ 425 /* Acquire update access to the bus */
425 down(&sn_hotplug_sem); 426 mutex_lock(&sn_hotplug_mutex);
426 427
427 /* is it okay to bring this slot down? */ 428 /* is it okay to bring this slot down? */
428 rc = sn_slot_disable(bss_hotplug_slot, slot->device_num, 429 rc = sn_slot_disable(bss_hotplug_slot, slot->device_num,
@@ -450,7 +451,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
450 PCI_REQ_SLOT_DISABLE); 451 PCI_REQ_SLOT_DISABLE);
451 leaving: 452 leaving:
452 /* Release the bus lock */ 453 /* Release the bus lock */
453 up(&sn_hotplug_sem); 454 mutex_unlock(&sn_hotplug_mutex);
454 455
455 return rc; 456 return rc;
456} 457}
@@ -462,9 +463,9 @@ static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
462 struct pcibus_info *pcibus_info; 463 struct pcibus_info *pcibus_info;
463 464
464 pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); 465 pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
465 down(&sn_hotplug_sem); 466 mutex_lock(&sn_hotplug_mutex);
466 *value = pcibus_info->pbi_enabled_devices & (1 << slot->device_num); 467 *value = pcibus_info->pbi_enabled_devices & (1 << slot->device_num);
467 up(&sn_hotplug_sem); 468 mutex_unlock(&sn_hotplug_mutex);
468 return 0; 469 return 0;
469} 470}
470 471
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index b68eef251614..bb19c64073c6 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -47,7 +47,7 @@ static void card_remove(struct pnp_dev * dev)
47{ 47{
48 dev->card_link = NULL; 48 dev->card_link = NULL;
49} 49}
50 50
51static void card_remove_first(struct pnp_dev * dev) 51static void card_remove_first(struct pnp_dev * dev)
52{ 52{
53 struct pnp_card_driver * drv = to_pnp_card_driver(dev->driver); 53 struct pnp_card_driver * drv = to_pnp_card_driver(dev->driver);
@@ -361,7 +361,7 @@ static int card_resume(struct pnp_dev *dev)
361 361
362int pnp_register_card_driver(struct pnp_card_driver * drv) 362int pnp_register_card_driver(struct pnp_card_driver * drv)
363{ 363{
364 int count; 364 int error;
365 struct list_head *pos, *temp; 365 struct list_head *pos, *temp;
366 366
367 drv->link.name = drv->name; 367 drv->link.name = drv->name;
@@ -372,21 +372,19 @@ int pnp_register_card_driver(struct pnp_card_driver * drv)
372 drv->link.suspend = drv->suspend ? card_suspend : NULL; 372 drv->link.suspend = drv->suspend ? card_suspend : NULL;
373 drv->link.resume = drv->resume ? card_resume : NULL; 373 drv->link.resume = drv->resume ? card_resume : NULL;
374 374
375 count = pnp_register_driver(&drv->link); 375 error = pnp_register_driver(&drv->link);
376 if (count < 0) 376 if (error < 0)
377 return count; 377 return error;
378 378
379 spin_lock(&pnp_lock); 379 spin_lock(&pnp_lock);
380 list_add_tail(&drv->global_list, &pnp_card_drivers); 380 list_add_tail(&drv->global_list, &pnp_card_drivers);
381 spin_unlock(&pnp_lock); 381 spin_unlock(&pnp_lock);
382 382
383 count = 0;
384
385 list_for_each_safe(pos,temp,&pnp_cards){ 383 list_for_each_safe(pos,temp,&pnp_cards){
386 struct pnp_card *card = list_entry(pos, struct pnp_card, global_list); 384 struct pnp_card *card = list_entry(pos, struct pnp_card, global_list);
387 count += card_probe(card,drv); 385 card_probe(card,drv);
388 } 386 }
389 return count; 387 return 0;
390} 388}
391 389
392/** 390/**
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 7cafacdd12b0..e54c15383193 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -201,31 +201,14 @@ struct bus_type pnp_bus_type = {
201 .resume = pnp_bus_resume, 201 .resume = pnp_bus_resume,
202}; 202};
203 203
204
205static int count_devices(struct device * dev, void * c)
206{
207 int * count = c;
208 (*count)++;
209 return 0;
210}
211
212int pnp_register_driver(struct pnp_driver *drv) 204int pnp_register_driver(struct pnp_driver *drv)
213{ 205{
214 int count;
215
216 pnp_dbg("the driver '%s' has been registered", drv->name); 206 pnp_dbg("the driver '%s' has been registered", drv->name);
217 207
218 drv->driver.name = drv->name; 208 drv->driver.name = drv->name;
219 drv->driver.bus = &pnp_bus_type; 209 drv->driver.bus = &pnp_bus_type;
220 210
221 count = driver_register(&drv->driver); 211 return driver_register(&drv->driver);
222
223 /* get the number of initial matches */
224 if (count >= 0){
225 count = 0;
226 driver_for_each_device(&drv->driver, NULL, &count, count_devices);
227 }
228 return count;
229} 212}
230 213
231void pnp_unregister_driver(struct pnp_driver *drv) 214void pnp_unregister_driver(struct pnp_driver *drv)
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index b1b4b683cbdd..ac7c2bb6c69e 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -42,6 +42,7 @@
42#include <linux/delay.h> 42#include <linux/delay.h>
43#include <linux/init.h> 43#include <linux/init.h>
44#include <linux/isapnp.h> 44#include <linux/isapnp.h>
45#include <linux/mutex.h>
45#include <asm/io.h> 46#include <asm/io.h>
46 47
47#if 0 48#if 0
@@ -92,7 +93,7 @@ MODULE_LICENSE("GPL");
92#define _LTAG_FIXEDMEM32RANGE 0x86 93#define _LTAG_FIXEDMEM32RANGE 0x86
93 94
94static unsigned char isapnp_checksum_value; 95static unsigned char isapnp_checksum_value;
95static DECLARE_MUTEX(isapnp_cfg_mutex); 96static DEFINE_MUTEX(isapnp_cfg_mutex);
96static int isapnp_detected; 97static int isapnp_detected;
97static int isapnp_csn_count; 98static int isapnp_csn_count;
98 99
@@ -903,7 +904,7 @@ int isapnp_cfg_begin(int csn, int logdev)
903{ 904{
904 if (csn < 1 || csn > isapnp_csn_count || logdev > 10) 905 if (csn < 1 || csn > isapnp_csn_count || logdev > 10)
905 return -EINVAL; 906 return -EINVAL;
906 down(&isapnp_cfg_mutex); 907 mutex_lock(&isapnp_cfg_mutex);
907 isapnp_wait(); 908 isapnp_wait();
908 isapnp_key(); 909 isapnp_key();
909 isapnp_wake(csn); 910 isapnp_wake(csn);
@@ -929,7 +930,7 @@ int isapnp_cfg_begin(int csn, int logdev)
929int isapnp_cfg_end(void) 930int isapnp_cfg_end(void)
930{ 931{
931 isapnp_wait(); 932 isapnp_wait();
932 up(&isapnp_cfg_mutex); 933 mutex_unlock(&isapnp_cfg_mutex);
933 return 0; 934 return 0;
934} 935}
935 936
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
new file mode 100644
index 000000000000..929dd8090578
--- /dev/null
+++ b/drivers/rtc/Kconfig
@@ -0,0 +1,165 @@
1\#
2# RTC class/drivers configuration
3#
4
5menu "Real Time Clock"
6
7config RTC_LIB
8 tristate
9
10config RTC_CLASS
11 tristate "RTC class"
12 depends on EXPERIMENTAL
13 default n
14 select RTC_LIB
15 help
16 Generic RTC class support. If you say yes here, you will
17 be allowed to plug one or more RTCs to your system. You will
18 probably want to enable one of more of the interfaces below.
19
20 This driver can also be built as a module. If so, the module
21 will be called rtc-class.
22
23config RTC_HCTOSYS
24 bool "Set system time from RTC on startup"
25 depends on RTC_CLASS = y
26 default y
27 help
28 If you say yes here, the system time will be set using
29 the value read from the specified RTC device. This is useful
30 in order to avoid unnecessary fschk runs.
31
32config RTC_HCTOSYS_DEVICE
33 string "The RTC to read the time from"
34 depends on RTC_HCTOSYS = y
35 default "rtc0"
36 help
37 The RTC device that will be used as the source for
38 the system time, usually rtc0.
39
40comment "RTC interfaces"
41 depends on RTC_CLASS
42
43config RTC_INTF_SYSFS
44 tristate "sysfs"
45 depends on RTC_CLASS && SYSFS
46 default RTC_CLASS
47 help
48 Say yes here if you want to use your RTC using the sysfs
49 interface, /sys/class/rtc/rtcX .
50
51 This driver can also be built as a module. If so, the module
52 will be called rtc-sysfs.
53
54config RTC_INTF_PROC
55 tristate "proc"
56 depends on RTC_CLASS && PROC_FS
57 default RTC_CLASS
58 help
59 Say yes here if you want to use your RTC using the proc
60 interface, /proc/driver/rtc .
61
62 This driver can also be built as a module. If so, the module
63 will be called rtc-proc.
64
65config RTC_INTF_DEV
66 tristate "dev"
67 depends on RTC_CLASS
68 default RTC_CLASS
69 help
70 Say yes here if you want to use your RTC using the dev
71 interface, /dev/rtc .
72
73 This driver can also be built as a module. If so, the module
74 will be called rtc-dev.
75
76comment "RTC drivers"
77 depends on RTC_CLASS
78
79config RTC_DRV_X1205
80 tristate "Xicor/Intersil X1205"
81 depends on RTC_CLASS && I2C
82 help
83 If you say yes here you get support for the
84 Xicor/Intersil X1205 RTC chip.
85
86 This driver can also be built as a module. If so, the module
87 will be called rtc-x1205.
88
89config RTC_DRV_DS1672
90 tristate "Dallas/Maxim DS1672"
91 depends on RTC_CLASS && I2C
92 help
93 If you say yes here you get support for the
94 Dallas/Maxim DS1672 timekeeping chip.
95
96 This driver can also be built as a module. If so, the module
97 will be called rtc-ds1672.
98
99config RTC_DRV_PCF8563
100 tristate "Philips PCF8563/Epson RTC8564"
101 depends on RTC_CLASS && I2C
102 help
103 If you say yes here you get support for the
104 Philips PCF8563 RTC chip. The Epson RTC8564
105 should work as well.
106
107 This driver can also be built as a module. If so, the module
108 will be called rtc-pcf8563.
109
110config RTC_DRV_RS5C372
111 tristate "Ricoh RS5C372A/B"
112 depends on RTC_CLASS && I2C
113 help
114 If you say yes here you get support for the
115 Ricoh RS5C372A and RS5C372B RTC chips.
116
117 This driver can also be built as a module. If so, the module
118 will be called rtc-rs5c372.
119
120config RTC_DRV_M48T86
121 tristate "ST M48T86/Dallas DS12887"
122 depends on RTC_CLASS
123 help
124 If you say Y here you will get support for the
125 ST M48T86 and Dallas DS12887 RTC chips.
126
127 This driver can also be built as a module. If so, the module
128 will be called rtc-m48t86.
129
130config RTC_DRV_EP93XX
131 tristate "Cirrus Logic EP93XX"
132 depends on RTC_CLASS && ARCH_EP93XX
133 help
134 If you say yes here you get support for the
135 RTC embedded in the Cirrus Logic EP93XX processors.
136
137 This driver can also be built as a module. If so, the module
138 will be called rtc-ep93xx.
139
140config RTC_DRV_SA1100
141 tristate "SA11x0/PXA2xx"
142 depends on RTC_CLASS && (ARCH_SA1100 || ARCH_PXA)
143 help
144 If you say Y here you will get access to the real time clock
145 built into your SA11x0 or PXA2xx CPU.
146
147 To compile this driver as a module, choose M here: the
148 module will be called rtc-sa1100.
149
150config RTC_DRV_TEST
151 tristate "Test driver/device"
152 depends on RTC_CLASS
153 help
154 If you say yes here you get support for the
155 RTC test driver. It's a software RTC which can be
156 used to test the RTC subsystem APIs. It gets
157 the time from the system clock.
158 You want this driver only if you are doing development
159 on the RTC subsystem. Please read the source code
160 for further details.
161
162 This driver can also be built as a module. If so, the module
163 will be called rtc-test.
164
165endmenu
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
new file mode 100644
index 000000000000..8d4c7fe88d58
--- /dev/null
+++ b/drivers/rtc/Makefile
@@ -0,0 +1,21 @@
1#
2# Makefile for RTC class/drivers.
3#
4
5obj-$(CONFIG_RTC_LIB) += rtc-lib.o
6obj-$(CONFIG_RTC_HCTOSYS) += hctosys.o
7obj-$(CONFIG_RTC_CLASS) += rtc-core.o
8rtc-core-y := class.o interface.o
9
10obj-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
11obj-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o
12obj-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o
13
14obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
15obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
16obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o
17obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
18obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
19obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
20obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
21obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
new file mode 100644
index 000000000000..8533936d50d8
--- /dev/null
+++ b/drivers/rtc/class.c
@@ -0,0 +1,145 @@
1/*
2 * RTC subsystem, base class
3 *
4 * Copyright (C) 2005 Tower Technologies
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
6 *
7 * class skeleton from drivers/hwmon/hwmon.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/module.h>
15#include <linux/rtc.h>
16#include <linux/kdev_t.h>
17#include <linux/idr.h>
18
19static DEFINE_IDR(rtc_idr);
20static DEFINE_MUTEX(idr_lock);
21struct class *rtc_class;
22
23static void rtc_device_release(struct class_device *class_dev)
24{
25 struct rtc_device *rtc = to_rtc_device(class_dev);
26 mutex_lock(&idr_lock);
27 idr_remove(&rtc_idr, rtc->id);
28 mutex_unlock(&idr_lock);
29 kfree(rtc);
30}
31
32/**
33 * rtc_device_register - register w/ RTC class
34 * @dev: the device to register
35 *
36 * rtc_device_unregister() must be called when the class device is no
37 * longer needed.
38 *
39 * Returns the pointer to the new struct class device.
40 */
41struct rtc_device *rtc_device_register(const char *name, struct device *dev,
42 struct rtc_class_ops *ops,
43 struct module *owner)
44{
45 struct rtc_device *rtc;
46 int id, err;
47
48 if (idr_pre_get(&rtc_idr, GFP_KERNEL) == 0) {
49 err = -ENOMEM;
50 goto exit;
51 }
52
53
54 mutex_lock(&idr_lock);
55 err = idr_get_new(&rtc_idr, NULL, &id);
56 mutex_unlock(&idr_lock);
57
58 if (err < 0)
59 goto exit;
60
61 id = id & MAX_ID_MASK;
62
63 rtc = kzalloc(sizeof(struct rtc_device), GFP_KERNEL);
64 if (rtc == NULL) {
65 err = -ENOMEM;
66 goto exit_idr;
67 }
68
69 rtc->id = id;
70 rtc->ops = ops;
71 rtc->owner = owner;
72 rtc->class_dev.dev = dev;
73 rtc->class_dev.class = rtc_class;
74 rtc->class_dev.release = rtc_device_release;
75
76 mutex_init(&rtc->ops_lock);
77 spin_lock_init(&rtc->irq_lock);
78 spin_lock_init(&rtc->irq_task_lock);
79
80 strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
81 snprintf(rtc->class_dev.class_id, BUS_ID_SIZE, "rtc%d", id);
82
83 err = class_device_register(&rtc->class_dev);
84 if (err)
85 goto exit_kfree;
86
87 dev_info(dev, "rtc core: registered %s as %s\n",
88 rtc->name, rtc->class_dev.class_id);
89
90 return rtc;
91
92exit_kfree:
93 kfree(rtc);
94
95exit_idr:
96 idr_remove(&rtc_idr, id);
97
98exit:
99 return ERR_PTR(err);
100}
101EXPORT_SYMBOL_GPL(rtc_device_register);
102
103
104/**
105 * rtc_device_unregister - removes the previously registered RTC class device
106 *
107 * @rtc: the RTC class device to destroy
108 */
109void rtc_device_unregister(struct rtc_device *rtc)
110{
111 mutex_lock(&rtc->ops_lock);
112 rtc->ops = NULL;
113 mutex_unlock(&rtc->ops_lock);
114 class_device_unregister(&rtc->class_dev);
115}
116EXPORT_SYMBOL_GPL(rtc_device_unregister);
117
118int rtc_interface_register(struct class_interface *intf)
119{
120 intf->class = rtc_class;
121 return class_interface_register(intf);
122}
123EXPORT_SYMBOL_GPL(rtc_interface_register);
124
125static int __init rtc_init(void)
126{
127 rtc_class = class_create(THIS_MODULE, "rtc");
128 if (IS_ERR(rtc_class)) {
129 printk(KERN_ERR "%s: couldn't create class\n", __FILE__);
130 return PTR_ERR(rtc_class);
131 }
132 return 0;
133}
134
135static void __exit rtc_exit(void)
136{
137 class_destroy(rtc_class);
138}
139
140module_init(rtc_init);
141module_exit(rtc_exit);
142
143MODULE_AUTHOR("Alessandro Zummo <a.zummo@towerteh.it>");
144MODULE_DESCRIPTION("RTC class support");
145MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
new file mode 100644
index 000000000000..d02fe9a0001f
--- /dev/null
+++ b/drivers/rtc/hctosys.c
@@ -0,0 +1,69 @@
1/*
2 * RTC subsystem, initialize system time on startup
3 *
4 * Copyright (C) 2005 Tower Technologies
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10*/
11
12#include <linux/rtc.h>
13
14/* IMPORTANT: the RTC only stores whole seconds. It is arbitrary
15 * whether it stores the most close value or the value with partial
16 * seconds truncated. However, it is important that we use it to store
17 * the truncated value. This is because otherwise it is necessary,
18 * in an rtc sync function, to read both xtime.tv_sec and
19 * xtime.tv_nsec. On some processors (i.e. ARM), an atomic read
20 * of >32bits is not possible. So storing the most close value would
21 * slow down the sync API. So here we have the truncated value and
22 * the best guess is to add 0.5s.
23 */
24
25static int __init rtc_hctosys(void)
26{
27 int err;
28 struct rtc_time tm;
29 struct class_device *class_dev = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
30
31 if (class_dev == NULL) {
32 printk("%s: unable to open rtc device (%s)\n",
33 __FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
34 return -ENODEV;
35 }
36
37 err = rtc_read_time(class_dev, &tm);
38 if (err == 0) {
39 err = rtc_valid_tm(&tm);
40 if (err == 0) {
41 struct timespec tv;
42
43 tv.tv_nsec = NSEC_PER_SEC >> 1;
44
45 rtc_tm_to_time(&tm, &tv.tv_sec);
46
47 do_settimeofday(&tv);
48
49 dev_info(class_dev->dev,
50 "setting the system clock to "
51 "%d-%02d-%02d %02d:%02d:%02d (%u)\n",
52 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
53 tm.tm_hour, tm.tm_min, tm.tm_sec,
54 (unsigned int) tv.tv_sec);
55 }
56 else
57 dev_err(class_dev->dev,
58 "hctosys: invalid date/time\n");
59 }
60 else
61 dev_err(class_dev->dev,
62 "hctosys: unable to read the hardware clock\n");
63
64 rtc_class_close(class_dev);
65
66 return 0;
67}
68
69late_initcall(rtc_hctosys);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
new file mode 100644
index 000000000000..56e490709b87
--- /dev/null
+++ b/drivers/rtc/interface.c
@@ -0,0 +1,277 @@
1/*
2 * RTC subsystem, interface functions
3 *
4 * Copyright (C) 2005 Tower Technologies
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
6 *
7 * based on arch/arm/common/rtctime.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/rtc.h>
15
16int rtc_read_time(struct class_device *class_dev, struct rtc_time *tm)
17{
18 int err;
19 struct rtc_device *rtc = to_rtc_device(class_dev);
20
21 err = mutex_lock_interruptible(&rtc->ops_lock);
22 if (err)
23 return -EBUSY;
24
25 if (!rtc->ops)
26 err = -ENODEV;
27 else if (!rtc->ops->read_time)
28 err = -EINVAL;
29 else {
30 memset(tm, 0, sizeof(struct rtc_time));
31 err = rtc->ops->read_time(class_dev->dev, tm);
32 }
33
34 mutex_unlock(&rtc->ops_lock);
35 return err;
36}
37EXPORT_SYMBOL_GPL(rtc_read_time);
38
39int rtc_set_time(struct class_device *class_dev, struct rtc_time *tm)
40{
41 int err;
42 struct rtc_device *rtc = to_rtc_device(class_dev);
43
44 err = rtc_valid_tm(tm);
45 if (err != 0)
46 return err;
47
48 err = mutex_lock_interruptible(&rtc->ops_lock);
49 if (err)
50 return -EBUSY;
51
52 if (!rtc->ops)
53 err = -ENODEV;
54 else if (!rtc->ops->set_time)
55 err = -EINVAL;
56 else
57 err = rtc->ops->set_time(class_dev->dev, tm);
58
59 mutex_unlock(&rtc->ops_lock);
60 return err;
61}
62EXPORT_SYMBOL_GPL(rtc_set_time);
63
64int rtc_set_mmss(struct class_device *class_dev, unsigned long secs)
65{
66 int err;
67 struct rtc_device *rtc = to_rtc_device(class_dev);
68
69 err = mutex_lock_interruptible(&rtc->ops_lock);
70 if (err)
71 return -EBUSY;
72
73 if (!rtc->ops)
74 err = -ENODEV;
75 else if (rtc->ops->set_mmss)
76 err = rtc->ops->set_mmss(class_dev->dev, secs);
77 else if (rtc->ops->read_time && rtc->ops->set_time) {
78 struct rtc_time new, old;
79
80 err = rtc->ops->read_time(class_dev->dev, &old);
81 if (err == 0) {
82 rtc_time_to_tm(secs, &new);
83
84 /*
85 * avoid writing when we're going to change the day of
86 * the month. We will retry in the next minute. This
87 * basically means that if the RTC must not drift
88 * by more than 1 minute in 11 minutes.
89 */
90 if (!((old.tm_hour == 23 && old.tm_min == 59) ||
91 (new.tm_hour == 23 && new.tm_min == 59)))
92 err = rtc->ops->set_time(class_dev->dev, &new);
93 }
94 }
95 else
96 err = -EINVAL;
97
98 mutex_unlock(&rtc->ops_lock);
99
100 return err;
101}
102EXPORT_SYMBOL_GPL(rtc_set_mmss);
103
104int rtc_read_alarm(struct class_device *class_dev, struct rtc_wkalrm *alarm)
105{
106 int err;
107 struct rtc_device *rtc = to_rtc_device(class_dev);
108
109 err = mutex_lock_interruptible(&rtc->ops_lock);
110 if (err)
111 return -EBUSY;
112
113 if (rtc->ops == NULL)
114 err = -ENODEV;
115 else if (!rtc->ops->read_alarm)
116 err = -EINVAL;
117 else {
118 memset(alarm, 0, sizeof(struct rtc_wkalrm));
119 err = rtc->ops->read_alarm(class_dev->dev, alarm);
120 }
121
122 mutex_unlock(&rtc->ops_lock);
123 return err;
124}
125EXPORT_SYMBOL_GPL(rtc_read_alarm);
126
127int rtc_set_alarm(struct class_device *class_dev, struct rtc_wkalrm *alarm)
128{
129 int err;
130 struct rtc_device *rtc = to_rtc_device(class_dev);
131
132 err = mutex_lock_interruptible(&rtc->ops_lock);
133 if (err)
134 return -EBUSY;
135
136 if (!rtc->ops)
137 err = -ENODEV;
138 else if (!rtc->ops->set_alarm)
139 err = -EINVAL;
140 else
141 err = rtc->ops->set_alarm(class_dev->dev, alarm);
142
143 mutex_unlock(&rtc->ops_lock);
144 return err;
145}
146EXPORT_SYMBOL_GPL(rtc_set_alarm);
147
148void rtc_update_irq(struct class_device *class_dev,
149 unsigned long num, unsigned long events)
150{
151 struct rtc_device *rtc = to_rtc_device(class_dev);
152
153 spin_lock(&rtc->irq_lock);
154 rtc->irq_data = (rtc->irq_data + (num << 8)) | events;
155 spin_unlock(&rtc->irq_lock);
156
157 spin_lock(&rtc->irq_task_lock);
158 if (rtc->irq_task)
159 rtc->irq_task->func(rtc->irq_task->private_data);
160 spin_unlock(&rtc->irq_task_lock);
161
162 wake_up_interruptible(&rtc->irq_queue);
163 kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
164}
165EXPORT_SYMBOL_GPL(rtc_update_irq);
166
167struct class_device *rtc_class_open(char *name)
168{
169 struct class_device *class_dev = NULL,
170 *class_dev_tmp;
171
172 down(&rtc_class->sem);
173 list_for_each_entry(class_dev_tmp, &rtc_class->children, node) {
174 if (strncmp(class_dev_tmp->class_id, name, BUS_ID_SIZE) == 0) {
175 class_dev = class_dev_tmp;
176 break;
177 }
178 }
179
180 if (class_dev) {
181 if (!try_module_get(to_rtc_device(class_dev)->owner))
182 class_dev = NULL;
183 }
184 up(&rtc_class->sem);
185
186 return class_dev;
187}
188EXPORT_SYMBOL_GPL(rtc_class_open);
189
190void rtc_class_close(struct class_device *class_dev)
191{
192 module_put(to_rtc_device(class_dev)->owner);
193}
194EXPORT_SYMBOL_GPL(rtc_class_close);
195
196int rtc_irq_register(struct class_device *class_dev, struct rtc_task *task)
197{
198 int retval = -EBUSY;
199 struct rtc_device *rtc = to_rtc_device(class_dev);
200
201 if (task == NULL || task->func == NULL)
202 return -EINVAL;
203
204 spin_lock(&rtc->irq_task_lock);
205 if (rtc->irq_task == NULL) {
206 rtc->irq_task = task;
207 retval = 0;
208 }
209 spin_unlock(&rtc->irq_task_lock);
210
211 return retval;
212}
213EXPORT_SYMBOL_GPL(rtc_irq_register);
214
215void rtc_irq_unregister(struct class_device *class_dev, struct rtc_task *task)
216{
217 struct rtc_device *rtc = to_rtc_device(class_dev);
218
219 spin_lock(&rtc->irq_task_lock);
220 if (rtc->irq_task == task)
221 rtc->irq_task = NULL;
222 spin_unlock(&rtc->irq_task_lock);
223}
224EXPORT_SYMBOL_GPL(rtc_irq_unregister);
225
226int rtc_irq_set_state(struct class_device *class_dev, struct rtc_task *task, int enabled)
227{
228 int err = 0;
229 unsigned long flags;
230 struct rtc_device *rtc = to_rtc_device(class_dev);
231
232 spin_lock_irqsave(&rtc->irq_task_lock, flags);
233 if (rtc->irq_task != task)
234 err = -ENXIO;
235 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
236
237 if (err == 0)
238 err = rtc->ops->irq_set_state(class_dev->dev, enabled);
239
240 return err;
241}
242EXPORT_SYMBOL_GPL(rtc_irq_set_state);
243
244int rtc_irq_set_freq(struct class_device *class_dev, struct rtc_task *task, int freq)
245{
246 int err = 0, tmp = 0;
247 unsigned long flags;
248 struct rtc_device *rtc = to_rtc_device(class_dev);
249
250 /* allowed range is 2-8192 */
251 if (freq < 2 || freq > 8192)
252 return -EINVAL;
253/*
254 FIXME: this does not belong here, will move where appropriate
255 at a later stage. It cannot hurt right now, trust me :)
256 if ((freq > rtc_max_user_freq) && (!capable(CAP_SYS_RESOURCE)))
257 return -EACCES;
258*/
259 /* check if freq is a power of 2 */
260 while (freq > (1 << tmp))
261 tmp++;
262
263 if (freq != (1 << tmp))
264 return -EINVAL;
265
266 spin_lock_irqsave(&rtc->irq_task_lock, flags);
267 if (rtc->irq_task != task)
268 err = -ENXIO;
269 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
270
271 if (err == 0) {
272 err = rtc->ops->irq_set_freq(class_dev->dev, freq);
273 if (err == 0)
274 rtc->irq_freq = freq;
275 }
276 return err;
277}
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
new file mode 100644
index 000000000000..b1e3e6179e56
--- /dev/null
+++ b/drivers/rtc/rtc-dev.c
@@ -0,0 +1,382 @@
1/*
2 * RTC subsystem, dev interface
3 *
4 * Copyright (C) 2005 Tower Technologies
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
6 *
7 * based on arch/arm/common/rtctime.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/module.h>
15#include <linux/rtc.h>
16
17static struct class *rtc_dev_class;
18static dev_t rtc_devt;
19
20#define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */
21
22static int rtc_dev_open(struct inode *inode, struct file *file)
23{
24 int err;
25 struct rtc_device *rtc = container_of(inode->i_cdev,
26 struct rtc_device, char_dev);
27 struct rtc_class_ops *ops = rtc->ops;
28
29 /* We keep the lock as long as the device is in use
30 * and return immediately if busy
31 */
32 if (!(mutex_trylock(&rtc->char_lock)))
33 return -EBUSY;
34
35 file->private_data = &rtc->class_dev;
36
37 err = ops->open ? ops->open(rtc->class_dev.dev) : 0;
38 if (err == 0) {
39 spin_lock_irq(&rtc->irq_lock);
40 rtc->irq_data = 0;
41 spin_unlock_irq(&rtc->irq_lock);
42
43 return 0;
44 }
45
46 /* something has gone wrong, release the lock */
47 mutex_unlock(&rtc->char_lock);
48 return err;
49}
50
51
52static ssize_t
53rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
54{
55 struct rtc_device *rtc = to_rtc_device(file->private_data);
56
57 DECLARE_WAITQUEUE(wait, current);
58 unsigned long data;
59 ssize_t ret;
60
61 if (count < sizeof(unsigned long))
62 return -EINVAL;
63
64 add_wait_queue(&rtc->irq_queue, &wait);
65 do {
66 __set_current_state(TASK_INTERRUPTIBLE);
67
68 spin_lock_irq(&rtc->irq_lock);
69 data = rtc->irq_data;
70 rtc->irq_data = 0;
71 spin_unlock_irq(&rtc->irq_lock);
72
73 if (data != 0) {
74 ret = 0;
75 break;
76 }
77 if (file->f_flags & O_NONBLOCK) {
78 ret = -EAGAIN;
79 break;
80 }
81 if (signal_pending(current)) {
82 ret = -ERESTARTSYS;
83 break;
84 }
85 schedule();
86 } while (1);
87 set_current_state(TASK_RUNNING);
88 remove_wait_queue(&rtc->irq_queue, &wait);
89
90 if (ret == 0) {
91 /* Check for any data updates */
92 if (rtc->ops->read_callback)
93 data = rtc->ops->read_callback(rtc->class_dev.dev, data);
94
95 ret = put_user(data, (unsigned long __user *)buf);
96 if (ret == 0)
97 ret = sizeof(unsigned long);
98 }
99 return ret;
100}
101
102static unsigned int rtc_dev_poll(struct file *file, poll_table *wait)
103{
104 struct rtc_device *rtc = to_rtc_device(file->private_data);
105 unsigned long data;
106
107 poll_wait(file, &rtc->irq_queue, wait);
108
109 data = rtc->irq_data;
110
111 return (data != 0) ? (POLLIN | POLLRDNORM) : 0;
112}
113
114static int rtc_dev_ioctl(struct inode *inode, struct file *file,
115 unsigned int cmd, unsigned long arg)
116{
117 int err = 0;
118 struct class_device *class_dev = file->private_data;
119 struct rtc_device *rtc = to_rtc_device(class_dev);
120 struct rtc_class_ops *ops = rtc->ops;
121 struct rtc_time tm;
122 struct rtc_wkalrm alarm;
123 void __user *uarg = (void __user *) arg;
124
125 /* avoid conflicting IRQ users */
126 if (cmd == RTC_PIE_ON || cmd == RTC_PIE_OFF || cmd == RTC_IRQP_SET) {
127 spin_lock(&rtc->irq_task_lock);
128 if (rtc->irq_task)
129 err = -EBUSY;
130 spin_unlock(&rtc->irq_task_lock);
131
132 if (err < 0)
133 return err;
134 }
135
136 /* try the driver's ioctl interface */
137 if (ops->ioctl) {
138 err = ops->ioctl(class_dev->dev, cmd, arg);
139 if (err != -EINVAL)
140 return err;
141 }
142
143 /* if the driver does not provide the ioctl interface
144 * or if that particular ioctl was not implemented
145 * (-EINVAL), we will try to emulate here.
146 */
147
148 switch (cmd) {
149 case RTC_ALM_READ:
150 err = rtc_read_alarm(class_dev, &alarm);
151 if (err < 0)
152 return err;
153
154 if (copy_to_user(uarg, &alarm.time, sizeof(tm)))
155 return -EFAULT;
156 break;
157
158 case RTC_ALM_SET:
159 if (copy_from_user(&alarm.time, uarg, sizeof(tm)))
160 return -EFAULT;
161
162 alarm.enabled = 0;
163 alarm.pending = 0;
164 alarm.time.tm_mday = -1;
165 alarm.time.tm_mon = -1;
166 alarm.time.tm_year = -1;
167 alarm.time.tm_wday = -1;
168 alarm.time.tm_yday = -1;
169 alarm.time.tm_isdst = -1;
170 err = rtc_set_alarm(class_dev, &alarm);
171 break;
172
173 case RTC_RD_TIME:
174 err = rtc_read_time(class_dev, &tm);
175 if (err < 0)
176 return err;
177
178 if (copy_to_user(uarg, &tm, sizeof(tm)))
179 return -EFAULT;
180 break;
181
182 case RTC_SET_TIME:
183 if (!capable(CAP_SYS_TIME))
184 return -EACCES;
185
186 if (copy_from_user(&tm, uarg, sizeof(tm)))
187 return -EFAULT;
188
189 err = rtc_set_time(class_dev, &tm);
190 break;
191#if 0
192 case RTC_EPOCH_SET:
193#ifndef rtc_epoch
194 /*
195 * There were no RTC clocks before 1900.
196 */
197 if (arg < 1900) {
198 err = -EINVAL;
199 break;
200 }
201 if (!capable(CAP_SYS_TIME)) {
202 err = -EACCES;
203 break;
204 }
205 rtc_epoch = arg;
206 err = 0;
207#endif
208 break;
209
210 case RTC_EPOCH_READ:
211 err = put_user(rtc_epoch, (unsigned long __user *)uarg);
212 break;
213#endif
214 case RTC_WKALM_SET:
215 if (copy_from_user(&alarm, uarg, sizeof(alarm)))
216 return -EFAULT;
217
218 err = rtc_set_alarm(class_dev, &alarm);
219 break;
220
221 case RTC_WKALM_RD:
222 err = rtc_read_alarm(class_dev, &alarm);
223 if (err < 0)
224 return err;
225
226 if (copy_to_user(uarg, &alarm, sizeof(alarm)))
227 return -EFAULT;
228 break;
229
230 default:
231 err = -EINVAL;
232 break;
233 }
234
235 return err;
236}
237
238static int rtc_dev_release(struct inode *inode, struct file *file)
239{
240 struct rtc_device *rtc = to_rtc_device(file->private_data);
241
242 if (rtc->ops->release)
243 rtc->ops->release(rtc->class_dev.dev);
244
245 mutex_unlock(&rtc->char_lock);
246 return 0;
247}
248
249static int rtc_dev_fasync(int fd, struct file *file, int on)
250{
251 struct rtc_device *rtc = to_rtc_device(file->private_data);
252 return fasync_helper(fd, file, on, &rtc->async_queue);
253}
254
255static struct file_operations rtc_dev_fops = {
256 .owner = THIS_MODULE,
257 .llseek = no_llseek,
258 .read = rtc_dev_read,
259 .poll = rtc_dev_poll,
260 .ioctl = rtc_dev_ioctl,
261 .open = rtc_dev_open,
262 .release = rtc_dev_release,
263 .fasync = rtc_dev_fasync,
264};
265
266/* insertion/removal hooks */
267
268static int rtc_dev_add_device(struct class_device *class_dev,
269 struct class_interface *class_intf)
270{
271 int err = 0;
272 struct rtc_device *rtc = to_rtc_device(class_dev);
273
274 if (rtc->id >= RTC_DEV_MAX) {
275 dev_err(class_dev->dev, "too many RTCs\n");
276 return -EINVAL;
277 }
278
279 mutex_init(&rtc->char_lock);
280 spin_lock_init(&rtc->irq_lock);
281 init_waitqueue_head(&rtc->irq_queue);
282
283 cdev_init(&rtc->char_dev, &rtc_dev_fops);
284 rtc->char_dev.owner = rtc->owner;
285
286 if (cdev_add(&rtc->char_dev, MKDEV(MAJOR(rtc_devt), rtc->id), 1)) {
287 cdev_del(&rtc->char_dev);
288 dev_err(class_dev->dev,
289 "failed to add char device %d:%d\n",
290 MAJOR(rtc_devt), rtc->id);
291 return -ENODEV;
292 }
293
294 rtc->rtc_dev = class_device_create(rtc_dev_class, NULL,
295 MKDEV(MAJOR(rtc_devt), rtc->id),
296 class_dev->dev, "rtc%d", rtc->id);
297 if (IS_ERR(rtc->rtc_dev)) {
298 dev_err(class_dev->dev, "cannot create rtc_dev device\n");
299 err = PTR_ERR(rtc->rtc_dev);
300 goto err_cdev_del;
301 }
302
303 dev_info(class_dev->dev, "rtc intf: dev (%d:%d)\n",
304 MAJOR(rtc->rtc_dev->devt),
305 MINOR(rtc->rtc_dev->devt));
306
307 return 0;
308
309err_cdev_del:
310
311 cdev_del(&rtc->char_dev);
312 return err;
313}
314
315static void rtc_dev_remove_device(struct class_device *class_dev,
316 struct class_interface *class_intf)
317{
318 struct rtc_device *rtc = to_rtc_device(class_dev);
319
320 if (rtc->rtc_dev) {
321 dev_dbg(class_dev->dev, "removing char %d:%d\n",
322 MAJOR(rtc->rtc_dev->devt),
323 MINOR(rtc->rtc_dev->devt));
324
325 class_device_unregister(rtc->rtc_dev);
326 cdev_del(&rtc->char_dev);
327 }
328}
329
330/* interface registration */
331
332static struct class_interface rtc_dev_interface = {
333 .add = &rtc_dev_add_device,
334 .remove = &rtc_dev_remove_device,
335};
336
337static int __init rtc_dev_init(void)
338{
339 int err;
340
341 rtc_dev_class = class_create(THIS_MODULE, "rtc-dev");
342 if (IS_ERR(rtc_dev_class))
343 return PTR_ERR(rtc_dev_class);
344
345 err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc");
346 if (err < 0) {
347 printk(KERN_ERR "%s: failed to allocate char dev region\n",
348 __FILE__);
349 goto err_destroy_class;
350 }
351
352 err = rtc_interface_register(&rtc_dev_interface);
353 if (err < 0) {
354 printk(KERN_ERR "%s: failed to register the interface\n",
355 __FILE__);
356 goto err_unregister_chrdev;
357 }
358
359 return 0;
360
361err_unregister_chrdev:
362 unregister_chrdev_region(rtc_devt, RTC_DEV_MAX);
363
364err_destroy_class:
365 class_destroy(rtc_dev_class);
366
367 return err;
368}
369
370static void __exit rtc_dev_exit(void)
371{
372 class_interface_unregister(&rtc_dev_interface);
373 class_destroy(rtc_dev_class);
374 unregister_chrdev_region(rtc_devt, RTC_DEV_MAX);
375}
376
377module_init(rtc_dev_init);
378module_exit(rtc_dev_exit);
379
380MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
381MODULE_DESCRIPTION("RTC class dev interface");
382MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
new file mode 100644
index 000000000000..358695a416f3
--- /dev/null
+++ b/drivers/rtc/rtc-ds1672.c
@@ -0,0 +1,233 @@
1/*
2 * An rtc/i2c driver for the Dallas DS1672
3 * Copyright 2005 Alessandro Zummo
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/i2c.h>
12#include <linux/rtc.h>
13
14#define DRV_VERSION "0.2"
15
16/* Addresses to scan: none. This chip cannot be detected. */
17static unsigned short normal_i2c[] = { I2C_CLIENT_END };
18
19/* Insmod parameters */
20I2C_CLIENT_INSMOD;
21
22/* Registers */
23
24#define DS1672_REG_CNT_BASE 0
25#define DS1672_REG_CONTROL 4
26#define DS1672_REG_TRICKLE 5
27
28
29/* Prototypes */
30static int ds1672_probe(struct i2c_adapter *adapter, int address, int kind);
31
32/*
33 * In the routines that deal directly with the ds1672 hardware, we use
34 * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch
35 * Epoch is initialized as 2000. Time is set to UTC.
36 */
37static int ds1672_get_datetime(struct i2c_client *client, struct rtc_time *tm)
38{
39 unsigned long time;
40 unsigned char addr = DS1672_REG_CNT_BASE;
41 unsigned char buf[4];
42
43 struct i2c_msg msgs[] = {
44 { client->addr, 0, 1, &addr }, /* setup read ptr */
45 { client->addr, I2C_M_RD, 4, buf }, /* read date */
46 };
47
48 /* read date registers */
49 if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
50 dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
51 return -EIO;
52 }
53
54 dev_dbg(&client->dev,
55 "%s: raw read data - counters=%02x,%02x,%02x,%02x\n"
56 __FUNCTION__,
57 buf[0], buf[1], buf[2], buf[3]);
58
59 time = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
60
61 rtc_time_to_tm(time, tm);
62
63 dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
64 "mday=%d, mon=%d, year=%d, wday=%d\n",
65 __FUNCTION__,
66 tm->tm_sec, tm->tm_min, tm->tm_hour,
67 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
68
69 return 0;
70}
71
72static int ds1672_set_mmss(struct i2c_client *client, unsigned long secs)
73{
74 int xfer;
75 unsigned char buf[5];
76
77 buf[0] = DS1672_REG_CNT_BASE;
78 buf[1] = secs & 0x000000FF;
79 buf[2] = (secs & 0x0000FF00) >> 8;
80 buf[3] = (secs & 0x00FF0000) >> 16;
81 buf[4] = (secs & 0xFF000000) >> 24;
82
83 xfer = i2c_master_send(client, buf, 5);
84 if (xfer != 5) {
85 dev_err(&client->dev, "%s: send: %d\n", __FUNCTION__, xfer);
86 return -EIO;
87 }
88
89 return 0;
90}
91
92static int ds1672_set_datetime(struct i2c_client *client, struct rtc_time *tm)
93{
94 unsigned long secs;
95
96 dev_dbg(&client->dev,
97 "%s: secs=%d, mins=%d, hours=%d, ",
98 "mday=%d, mon=%d, year=%d, wday=%d\n",
99 __FUNCTION__,
100 tm->tm_sec, tm->tm_min, tm->tm_hour,
101 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
102
103 rtc_tm_to_time(tm, &secs);
104
105 return ds1672_set_mmss(client, secs);
106}
107
108static int ds1672_rtc_read_time(struct device *dev, struct rtc_time *tm)
109{
110 return ds1672_get_datetime(to_i2c_client(dev), tm);
111}
112
113static int ds1672_rtc_set_time(struct device *dev, struct rtc_time *tm)
114{
115 return ds1672_set_datetime(to_i2c_client(dev), tm);
116}
117
118static int ds1672_rtc_set_mmss(struct device *dev, unsigned long secs)
119{
120 return ds1672_set_mmss(to_i2c_client(dev), secs);
121}
122
123static struct rtc_class_ops ds1672_rtc_ops = {
124 .read_time = ds1672_rtc_read_time,
125 .set_time = ds1672_rtc_set_time,
126 .set_mmss = ds1672_rtc_set_mmss,
127};
128
129static int ds1672_attach(struct i2c_adapter *adapter)
130{
131 dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
132 return i2c_probe(adapter, &addr_data, ds1672_probe);
133}
134
135static int ds1672_detach(struct i2c_client *client)
136{
137 int err;
138 struct rtc_device *rtc = i2c_get_clientdata(client);
139
140 dev_dbg(&client->dev, "%s\n", __FUNCTION__);
141
142 if (rtc)
143 rtc_device_unregister(rtc);
144
145 if ((err = i2c_detach_client(client)))
146 return err;
147
148 kfree(client);
149
150 return 0;
151}
152
153static struct i2c_driver ds1672_driver = {
154 .driver = {
155 .name = "ds1672",
156 },
157 .id = I2C_DRIVERID_DS1672,
158 .attach_adapter = &ds1672_attach,
159 .detach_client = &ds1672_detach,
160};
161
162static int ds1672_probe(struct i2c_adapter *adapter, int address, int kind)
163{
164 int err = 0;
165 struct i2c_client *client;
166 struct rtc_device *rtc;
167
168 dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
169
170 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
171 err = -ENODEV;
172 goto exit;
173 }
174
175 if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) {
176 err = -ENOMEM;
177 goto exit;
178 }
179
180 /* I2C client */
181 client->addr = address;
182 client->driver = &ds1672_driver;
183 client->adapter = adapter;
184
185 strlcpy(client->name, ds1672_driver.driver.name, I2C_NAME_SIZE);
186
187 /* Inform the i2c layer */
188 if ((err = i2c_attach_client(client)))
189 goto exit_kfree;
190
191 dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
192
193 rtc = rtc_device_register(ds1672_driver.driver.name, &client->dev,
194 &ds1672_rtc_ops, THIS_MODULE);
195
196 if (IS_ERR(rtc)) {
197 err = PTR_ERR(rtc);
198 dev_err(&client->dev,
199 "unable to register the class device\n");
200 goto exit_detach;
201 }
202
203 i2c_set_clientdata(client, rtc);
204
205 return 0;
206
207exit_detach:
208 i2c_detach_client(client);
209
210exit_kfree:
211 kfree(client);
212
213exit:
214 return err;
215}
216
217static int __init ds1672_init(void)
218{
219 return i2c_add_driver(&ds1672_driver);
220}
221
222static void __exit ds1672_exit(void)
223{
224 i2c_del_driver(&ds1672_driver);
225}
226
227MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
228MODULE_DESCRIPTION("Dallas/Maxim DS1672 timekeeper driver");
229MODULE_LICENSE("GPL");
230MODULE_VERSION(DRV_VERSION);
231
232module_init(ds1672_init);
233module_exit(ds1672_exit);
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
new file mode 100644
index 000000000000..0dd80ea686a9
--- /dev/null
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -0,0 +1,162 @@
1/*
2 * A driver for the RTC embedded in the Cirrus Logic EP93XX processors
3 * Copyright (c) 2006 Tower Technologies
4 *
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/rtc.h>
14#include <linux/platform_device.h>
15#include <asm/hardware.h>
16
17#define EP93XX_RTC_REG(x) (EP93XX_RTC_BASE + (x))
18#define EP93XX_RTC_DATA EP93XX_RTC_REG(0x0000)
19#define EP93XX_RTC_LOAD EP93XX_RTC_REG(0x000C)
20#define EP93XX_RTC_SWCOMP EP93XX_RTC_REG(0x0108)
21
22#define DRV_VERSION "0.2"
23
24static int ep93xx_get_swcomp(struct device *dev, unsigned short *preload,
25 unsigned short *delete)
26{
27 unsigned short comp = __raw_readl(EP93XX_RTC_SWCOMP);
28
29 if (preload)
30 *preload = comp & 0xffff;
31
32 if (delete)
33 *delete = (comp >> 16) & 0x1f;
34
35 return 0;
36}
37
38static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
39{
40 unsigned long time = __raw_readl(EP93XX_RTC_DATA);
41
42 rtc_time_to_tm(time, tm);
43 return 0;
44}
45
46static int ep93xx_rtc_set_mmss(struct device *dev, unsigned long secs)
47{
48 __raw_writel(secs + 1, EP93XX_RTC_LOAD);
49 return 0;
50}
51
52static int ep93xx_rtc_set_time(struct device *dev, struct rtc_time *tm)
53{
54 int err;
55 unsigned long secs;
56
57 err = rtc_tm_to_time(tm, &secs);
58 if (err != 0)
59 return err;
60
61 return ep93xx_rtc_set_mmss(dev, secs);
62}
63
64static int ep93xx_rtc_proc(struct device *dev, struct seq_file *seq)
65{
66 unsigned short preload, delete;
67
68 ep93xx_get_swcomp(dev, &preload, &delete);
69
70 seq_printf(seq, "24hr\t\t: yes\n");
71 seq_printf(seq, "preload\t\t: %d\n", preload);
72 seq_printf(seq, "delete\t\t: %d\n", delete);
73
74 return 0;
75}
76
77static struct rtc_class_ops ep93xx_rtc_ops = {
78 .read_time = ep93xx_rtc_read_time,
79 .set_time = ep93xx_rtc_set_time,
80 .set_mmss = ep93xx_rtc_set_mmss,
81 .proc = ep93xx_rtc_proc,
82};
83
84static ssize_t ep93xx_sysfs_show_comp_preload(struct device *dev,
85 struct device_attribute *attr, char *buf)
86{
87 unsigned short preload;
88
89 ep93xx_get_swcomp(dev, &preload, NULL);
90
91 return sprintf(buf, "%d\n", preload);
92}
93static DEVICE_ATTR(comp_preload, S_IRUGO, ep93xx_sysfs_show_comp_preload, NULL);
94
95static ssize_t ep93xx_sysfs_show_comp_delete(struct device *dev,
96 struct device_attribute *attr, char *buf)
97{
98 unsigned short delete;
99
100 ep93xx_get_swcomp(dev, NULL, &delete);
101
102 return sprintf(buf, "%d\n", delete);
103}
104static DEVICE_ATTR(comp_delete, S_IRUGO, ep93xx_sysfs_show_comp_delete, NULL);
105
106
107static int __devinit ep93xx_rtc_probe(struct platform_device *dev)
108{
109 struct rtc_device *rtc = rtc_device_register("ep93xx",
110 &dev->dev, &ep93xx_rtc_ops, THIS_MODULE);
111
112 if (IS_ERR(rtc)) {
113 dev_err(&dev->dev, "unable to register\n");
114 return PTR_ERR(rtc);
115 }
116
117 platform_set_drvdata(dev, rtc);
118
119 device_create_file(&dev->dev, &dev_attr_comp_preload);
120 device_create_file(&dev->dev, &dev_attr_comp_delete);
121
122 return 0;
123}
124
125static int __devexit ep93xx_rtc_remove(struct platform_device *dev)
126{
127 struct rtc_device *rtc = platform_get_drvdata(dev);
128
129 if (rtc)
130 rtc_device_unregister(rtc);
131
132 platform_set_drvdata(dev, NULL);
133
134 return 0;
135}
136
137static struct platform_driver ep93xx_rtc_platform_driver = {
138 .driver = {
139 .name = "ep93xx-rtc",
140 .owner = THIS_MODULE,
141 },
142 .probe = ep93xx_rtc_probe,
143 .remove = __devexit_p(ep93xx_rtc_remove),
144};
145
146static int __init ep93xx_rtc_init(void)
147{
148 return platform_driver_register(&ep93xx_rtc_platform_driver);
149}
150
151static void __exit ep93xx_rtc_exit(void)
152{
153 platform_driver_unregister(&ep93xx_rtc_platform_driver);
154}
155
156MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
157MODULE_DESCRIPTION("EP93XX RTC driver");
158MODULE_LICENSE("GPL");
159MODULE_VERSION(DRV_VERSION);
160
161module_init(ep93xx_rtc_init);
162module_exit(ep93xx_rtc_exit);
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
new file mode 100644
index 000000000000..cfedc1d28ee1
--- /dev/null
+++ b/drivers/rtc/rtc-lib.c
@@ -0,0 +1,101 @@
1/*
2 * rtc and date/time utility functions
3 *
4 * Copyright (C) 2005-06 Tower Technologies
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
6 *
7 * based on arch/arm/common/rtctime.c and other bits
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/module.h>
15#include <linux/rtc.h>
16
17static const unsigned char rtc_days_in_month[] = {
18 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
19};
20
21#define LEAPS_THRU_END_OF(y) ((y)/4 - (y)/100 + (y)/400)
22#define LEAP_YEAR(year) ((!(year % 4) && (year % 100)) || !(year % 400))
23
24int rtc_month_days(unsigned int month, unsigned int year)
25{
26 return rtc_days_in_month[month] + (LEAP_YEAR(year) && month == 1);
27}
28EXPORT_SYMBOL(rtc_month_days);
29
30/*
31 * Convert seconds since 01-01-1970 00:00:00 to Gregorian date.
32 */
33void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
34{
35 register int days, month, year;
36
37 days = time / 86400;
38 time -= days * 86400;
39
40 /* day of the week, 1970-01-01 was a Thursday */
41 tm->tm_wday = (days + 4) % 7;
42
43 year = 1970 + days / 365;
44 days -= (year - 1970) * 365
45 + LEAPS_THRU_END_OF(year - 1)
46 - LEAPS_THRU_END_OF(1970 - 1);
47 if (days < 0) {
48 year -= 1;
49 days += 365 + LEAP_YEAR(year);
50 }
51 tm->tm_year = year - 1900;
52 tm->tm_yday = days + 1;
53
54 for (month = 0; month < 11; month++) {
55 int newdays;
56
57 newdays = days - rtc_month_days(month, year);
58 if (newdays < 0)
59 break;
60 days = newdays;
61 }
62 tm->tm_mon = month;
63 tm->tm_mday = days + 1;
64
65 tm->tm_hour = time / 3600;
66 time -= tm->tm_hour * 3600;
67 tm->tm_min = time / 60;
68 tm->tm_sec = time - tm->tm_min * 60;
69}
70EXPORT_SYMBOL(rtc_time_to_tm);
71
72/*
73 * Does the rtc_time represent a valid date/time?
74 */
75int rtc_valid_tm(struct rtc_time *tm)
76{
77 if (tm->tm_year < 70
78 || tm->tm_mon >= 12
79 || tm->tm_mday < 1
80 || tm->tm_mday > rtc_month_days(tm->tm_mon, tm->tm_year + 1900)
81 || tm->tm_hour >= 24
82 || tm->tm_min >= 60
83 || tm->tm_sec >= 60)
84 return -EINVAL;
85
86 return 0;
87}
88EXPORT_SYMBOL(rtc_valid_tm);
89
90/*
91 * Convert Gregorian date to seconds since 01-01-1970 00:00:00.
92 */
93int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
94{
95 *time = mktime(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
96 tm->tm_hour, tm->tm_min, tm->tm_sec);
97 return 0;
98}
99EXPORT_SYMBOL(rtc_tm_to_time);
100
101MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
new file mode 100644
index 000000000000..db445c872b1b
--- /dev/null
+++ b/drivers/rtc/rtc-m48t86.c
@@ -0,0 +1,209 @@
1/*
2 * ST M48T86 / Dallas DS12887 RTC driver
3 * Copyright (c) 2006 Tower Technologies
4 *
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This drivers only supports the clock running in BCD and 24H mode.
12 * If it will be ever adapted to binary and 12H mode, care must be taken
13 * to not introduce bugs.
14 */
15
16#include <linux/module.h>
17#include <linux/rtc.h>
18#include <linux/platform_device.h>
19#include <linux/m48t86.h>
20#include <linux/bcd.h>
21
22#define M48T86_REG_SEC 0x00
23#define M48T86_REG_SECALRM 0x01
24#define M48T86_REG_MIN 0x02
25#define M48T86_REG_MINALRM 0x03
26#define M48T86_REG_HOUR 0x04
27#define M48T86_REG_HOURALRM 0x05
28#define M48T86_REG_DOW 0x06 /* 1 = sunday */
29#define M48T86_REG_DOM 0x07
30#define M48T86_REG_MONTH 0x08 /* 1 - 12 */
31#define M48T86_REG_YEAR 0x09 /* 0 - 99 */
32#define M48T86_REG_A 0x0A
33#define M48T86_REG_B 0x0B
34#define M48T86_REG_C 0x0C
35#define M48T86_REG_D 0x0D
36
37#define M48T86_REG_B_H24 (1 << 1)
38#define M48T86_REG_B_DM (1 << 2)
39#define M48T86_REG_B_SET (1 << 7)
40#define M48T86_REG_D_VRT (1 << 7)
41
42#define DRV_VERSION "0.1"
43
44
45static int m48t86_rtc_read_time(struct device *dev, struct rtc_time *tm)
46{
47 unsigned char reg;
48 struct platform_device *pdev = to_platform_device(dev);
49 struct m48t86_ops *ops = pdev->dev.platform_data;
50
51 reg = ops->readb(M48T86_REG_B);
52
53 if (reg & M48T86_REG_B_DM) {
54 /* data (binary) mode */
55 tm->tm_sec = ops->readb(M48T86_REG_SEC);
56 tm->tm_min = ops->readb(M48T86_REG_MIN);
57 tm->tm_hour = ops->readb(M48T86_REG_HOUR) & 0x3F;
58 tm->tm_mday = ops->readb(M48T86_REG_DOM);
59 /* tm_mon is 0-11 */
60 tm->tm_mon = ops->readb(M48T86_REG_MONTH) - 1;
61 tm->tm_year = ops->readb(M48T86_REG_YEAR) + 100;
62 tm->tm_wday = ops->readb(M48T86_REG_DOW);
63 } else {
64 /* bcd mode */
65 tm->tm_sec = BCD2BIN(ops->readb(M48T86_REG_SEC));
66 tm->tm_min = BCD2BIN(ops->readb(M48T86_REG_MIN));
67 tm->tm_hour = BCD2BIN(ops->readb(M48T86_REG_HOUR) & 0x3F);
68 tm->tm_mday = BCD2BIN(ops->readb(M48T86_REG_DOM));
69 /* tm_mon is 0-11 */
70 tm->tm_mon = BCD2BIN(ops->readb(M48T86_REG_MONTH)) - 1;
71 tm->tm_year = BCD2BIN(ops->readb(M48T86_REG_YEAR)) + 100;
72 tm->tm_wday = BCD2BIN(ops->readb(M48T86_REG_DOW));
73 }
74
75 /* correct the hour if the clock is in 12h mode */
76 if (!(reg & M48T86_REG_B_H24))
77 if (ops->readb(M48T86_REG_HOUR) & 0x80)
78 tm->tm_hour += 12;
79
80 return 0;
81}
82
83static int m48t86_rtc_set_time(struct device *dev, struct rtc_time *tm)
84{
85 unsigned char reg;
86 struct platform_device *pdev = to_platform_device(dev);
87 struct m48t86_ops *ops = pdev->dev.platform_data;
88
89 reg = ops->readb(M48T86_REG_B);
90
91 /* update flag and 24h mode */
92 reg |= M48T86_REG_B_SET | M48T86_REG_B_H24;
93 ops->writeb(reg, M48T86_REG_B);
94
95 if (reg & M48T86_REG_B_DM) {
96 /* data (binary) mode */
97 ops->writeb(tm->tm_sec, M48T86_REG_SEC);
98 ops->writeb(tm->tm_min, M48T86_REG_MIN);
99 ops->writeb(tm->tm_hour, M48T86_REG_HOUR);
100 ops->writeb(tm->tm_mday, M48T86_REG_DOM);
101 ops->writeb(tm->tm_mon + 1, M48T86_REG_MONTH);
102 ops->writeb(tm->tm_year % 100, M48T86_REG_YEAR);
103 ops->writeb(tm->tm_wday, M48T86_REG_DOW);
104 } else {
105 /* bcd mode */
106 ops->writeb(BIN2BCD(tm->tm_sec), M48T86_REG_SEC);
107 ops->writeb(BIN2BCD(tm->tm_min), M48T86_REG_MIN);
108 ops->writeb(BIN2BCD(tm->tm_hour), M48T86_REG_HOUR);
109 ops->writeb(BIN2BCD(tm->tm_mday), M48T86_REG_DOM);
110 ops->writeb(BIN2BCD(tm->tm_mon + 1), M48T86_REG_MONTH);
111 ops->writeb(BIN2BCD(tm->tm_year % 100), M48T86_REG_YEAR);
112 ops->writeb(BIN2BCD(tm->tm_wday), M48T86_REG_DOW);
113 }
114
115 /* update ended */
116 reg &= ~M48T86_REG_B_SET;
117 ops->writeb(reg, M48T86_REG_B);
118
119 return 0;
120}
121
122static int m48t86_rtc_proc(struct device *dev, struct seq_file *seq)
123{
124 unsigned char reg;
125 struct platform_device *pdev = to_platform_device(dev);
126 struct m48t86_ops *ops = pdev->dev.platform_data;
127
128 reg = ops->readb(M48T86_REG_B);
129
130 seq_printf(seq, "24hr\t\t: %s\n",
131 (reg & M48T86_REG_B_H24) ? "yes" : "no");
132
133 seq_printf(seq, "mode\t\t: %s\n",
134 (reg & M48T86_REG_B_DM) ? "binary" : "bcd");
135
136 reg = ops->readb(M48T86_REG_D);
137
138 seq_printf(seq, "battery\t\t: %s\n",
139 (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted");
140
141 return 0;
142}
143
144static struct rtc_class_ops m48t86_rtc_ops = {
145 .read_time = m48t86_rtc_read_time,
146 .set_time = m48t86_rtc_set_time,
147 .proc = m48t86_rtc_proc,
148};
149
150static int __devinit m48t86_rtc_probe(struct platform_device *dev)
151{
152 unsigned char reg;
153 struct m48t86_ops *ops = dev->dev.platform_data;
154 struct rtc_device *rtc = rtc_device_register("m48t86",
155 &dev->dev, &m48t86_rtc_ops, THIS_MODULE);
156
157 if (IS_ERR(rtc)) {
158 dev_err(&dev->dev, "unable to register\n");
159 return PTR_ERR(rtc);
160 }
161
162 platform_set_drvdata(dev, rtc);
163
164 /* read battery status */
165 reg = ops->readb(M48T86_REG_D);
166 dev_info(&dev->dev, "battery %s\n",
167 (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted");
168
169 return 0;
170}
171
172static int __devexit m48t86_rtc_remove(struct platform_device *dev)
173{
174 struct rtc_device *rtc = platform_get_drvdata(dev);
175
176 if (rtc)
177 rtc_device_unregister(rtc);
178
179 platform_set_drvdata(dev, NULL);
180
181 return 0;
182}
183
184static struct platform_driver m48t86_rtc_platform_driver = {
185 .driver = {
186 .name = "rtc-m48t86",
187 .owner = THIS_MODULE,
188 },
189 .probe = m48t86_rtc_probe,
190 .remove = __devexit_p(m48t86_rtc_remove),
191};
192
193static int __init m48t86_rtc_init(void)
194{
195 return platform_driver_register(&m48t86_rtc_platform_driver);
196}
197
198static void __exit m48t86_rtc_exit(void)
199{
200 platform_driver_unregister(&m48t86_rtc_platform_driver);
201}
202
203MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
204MODULE_DESCRIPTION("M48T86 RTC driver");
205MODULE_LICENSE("GPL");
206MODULE_VERSION(DRV_VERSION);
207
208module_init(m48t86_rtc_init);
209module_exit(m48t86_rtc_exit);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
new file mode 100644
index 000000000000..d857d45bdbe8
--- /dev/null
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -0,0 +1,353 @@
1/*
2 * An I2C driver for the Philips PCF8563 RTC
3 * Copyright 2005-06 Tower Technologies
4 *
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
6 * Maintainers: http://www.nslu2-linux.org/
7 *
8 * based on the other drivers in this same directory.
9 *
10 * http://www.semiconductors.philips.com/acrobat/datasheets/PCF8563-04.pdf
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/i2c.h>
18#include <linux/bcd.h>
19#include <linux/rtc.h>
20
21#define DRV_VERSION "0.4.2"
22
23/* Addresses to scan: none
24 * This chip cannot be reliably autodetected. An empty eeprom
25 * located at 0x51 will pass the validation routine due to
26 * the way the registers are implemented.
27 */
28static unsigned short normal_i2c[] = { I2C_CLIENT_END };
29
30/* Module parameters */
31I2C_CLIENT_INSMOD;
32
33#define PCF8563_REG_ST1 0x00 /* status */
34#define PCF8563_REG_ST2 0x01
35
36#define PCF8563_REG_SC 0x02 /* datetime */
37#define PCF8563_REG_MN 0x03
38#define PCF8563_REG_HR 0x04
39#define PCF8563_REG_DM 0x05
40#define PCF8563_REG_DW 0x06
41#define PCF8563_REG_MO 0x07
42#define PCF8563_REG_YR 0x08
43
44#define PCF8563_REG_AMN 0x09 /* alarm */
45#define PCF8563_REG_AHR 0x0A
46#define PCF8563_REG_ADM 0x0B
47#define PCF8563_REG_ADW 0x0C
48
49#define PCF8563_REG_CLKO 0x0D /* clock out */
50#define PCF8563_REG_TMRC 0x0E /* timer control */
51#define PCF8563_REG_TMR 0x0F /* timer */
52
53#define PCF8563_SC_LV 0x80 /* low voltage */
54#define PCF8563_MO_C 0x80 /* century */
55
56static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind);
57static int pcf8563_detach(struct i2c_client *client);
58
59/*
60 * In the routines that deal directly with the pcf8563 hardware, we use
61 * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
62 */
63static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
64{
65 unsigned char buf[13] = { PCF8563_REG_ST1 };
66
67 struct i2c_msg msgs[] = {
68 { client->addr, 0, 1, buf }, /* setup read ptr */
69 { client->addr, I2C_M_RD, 13, buf }, /* read status + date */
70 };
71
72 /* read registers */
73 if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
74 dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
75 return -EIO;
76 }
77
78 if (buf[PCF8563_REG_SC] & PCF8563_SC_LV)
79 dev_info(&client->dev,
80 "low voltage detected, date/time is not reliable.\n");
81
82 dev_dbg(&client->dev,
83 "%s: raw data is st1=%02x, st2=%02x, sec=%02x, min=%02x, hr=%02x, "
84 "mday=%02x, wday=%02x, mon=%02x, year=%02x\n",
85 __FUNCTION__,
86 buf[0], buf[1], buf[2], buf[3],
87 buf[4], buf[5], buf[6], buf[7],
88 buf[8]);
89
90
91 tm->tm_sec = BCD2BIN(buf[PCF8563_REG_SC] & 0x7F);
92 tm->tm_min = BCD2BIN(buf[PCF8563_REG_MN] & 0x7F);
93 tm->tm_hour = BCD2BIN(buf[PCF8563_REG_HR] & 0x3F); /* rtc hr 0-23 */
94 tm->tm_mday = BCD2BIN(buf[PCF8563_REG_DM] & 0x3F);
95 tm->tm_wday = buf[PCF8563_REG_DW] & 0x07;
96 tm->tm_mon = BCD2BIN(buf[PCF8563_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
97 tm->tm_year = BCD2BIN(buf[PCF8563_REG_YR])
98 + (buf[PCF8563_REG_MO] & PCF8563_MO_C ? 100 : 0);
99
100 dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
101 "mday=%d, mon=%d, year=%d, wday=%d\n",
102 __FUNCTION__,
103 tm->tm_sec, tm->tm_min, tm->tm_hour,
104 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
105
106 /* the clock can give out invalid datetime, but we cannot return
107 * -EINVAL otherwise hwclock will refuse to set the time on bootup.
108 */
109 if (rtc_valid_tm(tm) < 0)
110 dev_err(&client->dev, "retrieved date/time is not valid.\n");
111
112 return 0;
113}
114
115static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
116{
117 int i, err;
118 unsigned char buf[9];
119
120 dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
121 "mday=%d, mon=%d, year=%d, wday=%d\n",
122 __FUNCTION__,
123 tm->tm_sec, tm->tm_min, tm->tm_hour,
124 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
125
126 /* hours, minutes and seconds */
127 buf[PCF8563_REG_SC] = BIN2BCD(tm->tm_sec);
128 buf[PCF8563_REG_MN] = BIN2BCD(tm->tm_min);
129 buf[PCF8563_REG_HR] = BIN2BCD(tm->tm_hour);
130
131 buf[PCF8563_REG_DM] = BIN2BCD(tm->tm_mday);
132
133 /* month, 1 - 12 */
134 buf[PCF8563_REG_MO] = BIN2BCD(tm->tm_mon + 1);
135
136 /* year and century */
137 buf[PCF8563_REG_YR] = BIN2BCD(tm->tm_year % 100);
138 if (tm->tm_year / 100)
139 buf[PCF8563_REG_MO] |= PCF8563_MO_C;
140
141 buf[PCF8563_REG_DW] = tm->tm_wday & 0x07;
142
143 /* write register's data */
144 for (i = 0; i < 7; i++) {
145 unsigned char data[2] = { PCF8563_REG_SC + i,
146 buf[PCF8563_REG_SC + i] };
147
148 err = i2c_master_send(client, data, sizeof(data));
149 if (err != sizeof(data)) {
150 dev_err(&client->dev,
151 "%s: err=%d addr=%02x, data=%02x\n",
152 __FUNCTION__, err, data[0], data[1]);
153 return -EIO;
154 }
155 };
156
157 return 0;
158}
159
160struct pcf8563_limit
161{
162 unsigned char reg;
163 unsigned char mask;
164 unsigned char min;
165 unsigned char max;
166};
167
168static int pcf8563_validate_client(struct i2c_client *client)
169{
170 int i;
171
172 static const struct pcf8563_limit pattern[] = {
173 /* register, mask, min, max */
174 { PCF8563_REG_SC, 0x7F, 0, 59 },
175 { PCF8563_REG_MN, 0x7F, 0, 59 },
176 { PCF8563_REG_HR, 0x3F, 0, 23 },
177 { PCF8563_REG_DM, 0x3F, 0, 31 },
178 { PCF8563_REG_MO, 0x1F, 0, 12 },
179 };
180
181 /* check limits (only registers with bcd values) */
182 for (i = 0; i < ARRAY_SIZE(pattern); i++) {
183 int xfer;
184 unsigned char value;
185 unsigned char buf = pattern[i].reg;
186
187 struct i2c_msg msgs[] = {
188 { client->addr, 0, 1, &buf },
189 { client->addr, I2C_M_RD, 1, &buf },
190 };
191
192 xfer = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
193
194 if (xfer != ARRAY_SIZE(msgs)) {
195 dev_err(&client->adapter->dev,
196 "%s: could not read register 0x%02X\n",
197 __FUNCTION__, pattern[i].reg);
198
199 return -EIO;
200 }
201
202 value = BCD2BIN(buf & pattern[i].mask);
203
204 if (value > pattern[i].max ||
205 value < pattern[i].min) {
206 dev_dbg(&client->adapter->dev,
207 "%s: pattern=%d, reg=%x, mask=0x%02x, min=%d, "
208 "max=%d, value=%d, raw=0x%02X\n",
209 __FUNCTION__, i, pattern[i].reg, pattern[i].mask,
210 pattern[i].min, pattern[i].max,
211 value, buf);
212
213 return -ENODEV;
214 }
215 }
216
217 return 0;
218}
219
220static int pcf8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
221{
222 return pcf8563_get_datetime(to_i2c_client(dev), tm);
223}
224
225static int pcf8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
226{
227 return pcf8563_set_datetime(to_i2c_client(dev), tm);
228}
229
230static int pcf8563_rtc_proc(struct device *dev, struct seq_file *seq)
231{
232 seq_printf(seq, "24hr\t\t: yes\n");
233 return 0;
234}
235
236static struct rtc_class_ops pcf8563_rtc_ops = {
237 .proc = pcf8563_rtc_proc,
238 .read_time = pcf8563_rtc_read_time,
239 .set_time = pcf8563_rtc_set_time,
240};
241
242static int pcf8563_attach(struct i2c_adapter *adapter)
243{
244 return i2c_probe(adapter, &addr_data, pcf8563_probe);
245}
246
247static struct i2c_driver pcf8563_driver = {
248 .driver = {
249 .name = "pcf8563",
250 },
251 .id = I2C_DRIVERID_PCF8563,
252 .attach_adapter = &pcf8563_attach,
253 .detach_client = &pcf8563_detach,
254};
255
256static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind)
257{
258 struct i2c_client *client;
259 struct rtc_device *rtc;
260
261 int err = 0;
262
263 dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
264
265 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
266 err = -ENODEV;
267 goto exit;
268 }
269
270 if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) {
271 err = -ENOMEM;
272 goto exit;
273 }
274
275 client->addr = address;
276 client->driver = &pcf8563_driver;
277 client->adapter = adapter;
278
279 strlcpy(client->name, pcf8563_driver.driver.name, I2C_NAME_SIZE);
280
281 /* Verify the chip is really an PCF8563 */
282 if (kind < 0) {
283 if (pcf8563_validate_client(client) < 0) {
284 err = -ENODEV;
285 goto exit_kfree;
286 }
287 }
288
289 /* Inform the i2c layer */
290 if ((err = i2c_attach_client(client)))
291 goto exit_kfree;
292
293 dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
294
295 rtc = rtc_device_register(pcf8563_driver.driver.name, &client->dev,
296 &pcf8563_rtc_ops, THIS_MODULE);
297
298 if (IS_ERR(rtc)) {
299 err = PTR_ERR(rtc);
300 dev_err(&client->dev,
301 "unable to register the class device\n");
302 goto exit_detach;
303 }
304
305 i2c_set_clientdata(client, rtc);
306
307 return 0;
308
309exit_detach:
310 i2c_detach_client(client);
311
312exit_kfree:
313 kfree(client);
314
315exit:
316 return err;
317}
318
319static int pcf8563_detach(struct i2c_client *client)
320{
321 int err;
322 struct rtc_device *rtc = i2c_get_clientdata(client);
323
324 dev_dbg(&client->dev, "%s\n", __FUNCTION__);
325
326 if (rtc)
327 rtc_device_unregister(rtc);
328
329 if ((err = i2c_detach_client(client)))
330 return err;
331
332 kfree(client);
333
334 return 0;
335}
336
337static int __init pcf8563_init(void)
338{
339 return i2c_add_driver(&pcf8563_driver);
340}
341
342static void __exit pcf8563_exit(void)
343{
344 i2c_del_driver(&pcf8563_driver);
345}
346
347MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
348MODULE_DESCRIPTION("Philips PCF8563/Epson RTC8564 RTC driver");
349MODULE_LICENSE("GPL");
350MODULE_VERSION(DRV_VERSION);
351
352module_init(pcf8563_init);
353module_exit(pcf8563_exit);
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c
new file mode 100644
index 000000000000..90b8a97a0919
--- /dev/null
+++ b/drivers/rtc/rtc-proc.c
@@ -0,0 +1,162 @@
1/*
2 * RTC subsystem, proc interface
3 *
4 * Copyright (C) 2005-06 Tower Technologies
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
6 *
7 * based on arch/arm/common/rtctime.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/module.h>
15#include <linux/rtc.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18
19static struct class_device *rtc_dev = NULL;
20static DEFINE_MUTEX(rtc_lock);
21
22static int rtc_proc_show(struct seq_file *seq, void *offset)
23{
24 int err;
25 struct class_device *class_dev = seq->private;
26 struct rtc_class_ops *ops = to_rtc_device(class_dev)->ops;
27 struct rtc_wkalrm alrm;
28 struct rtc_time tm;
29
30 err = rtc_read_time(class_dev, &tm);
31 if (err == 0) {
32 seq_printf(seq,
33 "rtc_time\t: %02d:%02d:%02d\n"
34 "rtc_date\t: %04d-%02d-%02d\n",
35 tm.tm_hour, tm.tm_min, tm.tm_sec,
36 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday);
37 }
38
39 err = rtc_read_alarm(class_dev, &alrm);
40 if (err == 0) {
41 seq_printf(seq, "alrm_time\t: ");
42 if ((unsigned int)alrm.time.tm_hour <= 24)
43 seq_printf(seq, "%02d:", alrm.time.tm_hour);
44 else
45 seq_printf(seq, "**:");
46 if ((unsigned int)alrm.time.tm_min <= 59)
47 seq_printf(seq, "%02d:", alrm.time.tm_min);
48 else
49 seq_printf(seq, "**:");
50 if ((unsigned int)alrm.time.tm_sec <= 59)
51 seq_printf(seq, "%02d\n", alrm.time.tm_sec);
52 else
53 seq_printf(seq, "**\n");
54
55 seq_printf(seq, "alrm_date\t: ");
56 if ((unsigned int)alrm.time.tm_year <= 200)
57 seq_printf(seq, "%04d-", alrm.time.tm_year + 1900);
58 else
59 seq_printf(seq, "****-");
60 if ((unsigned int)alrm.time.tm_mon <= 11)
61 seq_printf(seq, "%02d-", alrm.time.tm_mon + 1);
62 else
63 seq_printf(seq, "**-");
64 if ((unsigned int)alrm.time.tm_mday <= 31)
65 seq_printf(seq, "%02d\n", alrm.time.tm_mday);
66 else
67 seq_printf(seq, "**\n");
68 seq_printf(seq, "alrm_wakeup\t: %s\n",
69 alrm.enabled ? "yes" : "no");
70 seq_printf(seq, "alrm_pending\t: %s\n",
71 alrm.pending ? "yes" : "no");
72 }
73
74 if (ops->proc)
75 ops->proc(class_dev->dev, seq);
76
77 return 0;
78}
79
80static int rtc_proc_open(struct inode *inode, struct file *file)
81{
82 struct class_device *class_dev = PDE(inode)->data;
83
84 if (!try_module_get(THIS_MODULE))
85 return -ENODEV;
86
87 return single_open(file, rtc_proc_show, class_dev);
88}
89
90static int rtc_proc_release(struct inode *inode, struct file *file)
91{
92 int res = single_release(inode, file);
93 module_put(THIS_MODULE);
94 return res;
95}
96
97static struct file_operations rtc_proc_fops = {
98 .open = rtc_proc_open,
99 .read = seq_read,
100 .llseek = seq_lseek,
101 .release = rtc_proc_release,
102};
103
104static int rtc_proc_add_device(struct class_device *class_dev,
105 struct class_interface *class_intf)
106{
107 mutex_lock(&rtc_lock);
108 if (rtc_dev == NULL) {
109 struct proc_dir_entry *ent;
110
111 rtc_dev = class_dev;
112
113 ent = create_proc_entry("driver/rtc", 0, NULL);
114 if (ent) {
115 struct rtc_device *rtc = to_rtc_device(class_dev);
116
117 ent->proc_fops = &rtc_proc_fops;
118 ent->owner = rtc->owner;
119 ent->data = class_dev;
120
121 dev_info(class_dev->dev, "rtc intf: proc\n");
122 }
123 else
124 rtc_dev = NULL;
125 }
126 mutex_unlock(&rtc_lock);
127
128 return 0;
129}
130
131static void rtc_proc_remove_device(struct class_device *class_dev,
132 struct class_interface *class_intf)
133{
134 mutex_lock(&rtc_lock);
135 if (rtc_dev == class_dev) {
136 remove_proc_entry("driver/rtc", NULL);
137 rtc_dev = NULL;
138 }
139 mutex_unlock(&rtc_lock);
140}
141
142static struct class_interface rtc_proc_interface = {
143 .add = &rtc_proc_add_device,
144 .remove = &rtc_proc_remove_device,
145};
146
147static int __init rtc_proc_init(void)
148{
149 return rtc_interface_register(&rtc_proc_interface);
150}
151
152static void __exit rtc_proc_exit(void)
153{
154 class_interface_unregister(&rtc_proc_interface);
155}
156
157module_init(rtc_proc_init);
158module_exit(rtc_proc_exit);
159
160MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
161MODULE_DESCRIPTION("RTC class proc interface");
162MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
new file mode 100644
index 000000000000..396c8681f66c
--- /dev/null
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -0,0 +1,294 @@
1/*
2 * An I2C driver for the Ricoh RS5C372 RTC
3 *
4 * Copyright (C) 2005 Pavel Mironchik <pmironchik@optifacio.net>
5 * Copyright (C) 2006 Tower Technologies
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/i2c.h>
13#include <linux/rtc.h>
14#include <linux/bcd.h>
15
16#define DRV_VERSION "0.2"
17
18/* Addresses to scan */
19static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END };
20
21/* Insmod parameters */
22I2C_CLIENT_INSMOD;
23
24#define RS5C372_REG_SECS 0
25#define RS5C372_REG_MINS 1
26#define RS5C372_REG_HOURS 2
27#define RS5C372_REG_WDAY 3
28#define RS5C372_REG_DAY 4
29#define RS5C372_REG_MONTH 5
30#define RS5C372_REG_YEAR 6
31#define RS5C372_REG_TRIM 7
32
33#define RS5C372_TRIM_XSL 0x80
34#define RS5C372_TRIM_MASK 0x7F
35
36#define RS5C372_REG_BASE 0
37
38static int rs5c372_attach(struct i2c_adapter *adapter);
39static int rs5c372_detach(struct i2c_client *client);
40static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind);
41
42static struct i2c_driver rs5c372_driver = {
43 .driver = {
44 .name = "rs5c372",
45 },
46 .attach_adapter = &rs5c372_attach,
47 .detach_client = &rs5c372_detach,
48};
49
50static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm)
51{
52 unsigned char buf[7] = { RS5C372_REG_BASE };
53
54 /* this implements the 1st reading method, according
55 * to the datasheet. buf[0] is initialized with
56 * address ptr and transmission format register.
57 */
58 struct i2c_msg msgs[] = {
59 { client->addr, 0, 1, buf },
60 { client->addr, I2C_M_RD, 7, buf },
61 };
62
63 if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
64 dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
65 return -EIO;
66 }
67
68 tm->tm_sec = BCD2BIN(buf[RS5C372_REG_SECS] & 0x7f);
69 tm->tm_min = BCD2BIN(buf[RS5C372_REG_MINS] & 0x7f);
70 tm->tm_hour = BCD2BIN(buf[RS5C372_REG_HOURS] & 0x3f);
71 tm->tm_wday = BCD2BIN(buf[RS5C372_REG_WDAY] & 0x07);
72 tm->tm_mday = BCD2BIN(buf[RS5C372_REG_DAY] & 0x3f);
73
74 /* tm->tm_mon is zero-based */
75 tm->tm_mon = BCD2BIN(buf[RS5C372_REG_MONTH] & 0x1f) - 1;
76
77 /* year is 1900 + tm->tm_year */
78 tm->tm_year = BCD2BIN(buf[RS5C372_REG_YEAR]) + 100;
79
80 dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
81 "mday=%d, mon=%d, year=%d, wday=%d\n",
82 __FUNCTION__,
83 tm->tm_sec, tm->tm_min, tm->tm_hour,
84 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
85
86 return 0;
87}
88
89static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm)
90{
91 unsigned char buf[8] = { RS5C372_REG_BASE };
92
93 dev_dbg(&client->dev,
94 "%s: secs=%d, mins=%d, hours=%d ",
95 "mday=%d, mon=%d, year=%d, wday=%d\n",
96 __FUNCTION__, tm->tm_sec, tm->tm_min, tm->tm_hour,
97 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
98
99 buf[1] = BIN2BCD(tm->tm_sec);
100 buf[2] = BIN2BCD(tm->tm_min);
101 buf[3] = BIN2BCD(tm->tm_hour);
102 buf[4] = BIN2BCD(tm->tm_wday);
103 buf[5] = BIN2BCD(tm->tm_mday);
104 buf[6] = BIN2BCD(tm->tm_mon + 1);
105 buf[7] = BIN2BCD(tm->tm_year - 100);
106
107 if ((i2c_master_send(client, buf, 8)) != 8) {
108 dev_err(&client->dev, "%s: write error\n", __FUNCTION__);
109 return -EIO;
110 }
111
112 return 0;
113}
114
115static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim)
116{
117 unsigned char buf = RS5C372_REG_TRIM;
118
119 struct i2c_msg msgs[] = {
120 { client->addr, 0, 1, &buf },
121 { client->addr, I2C_M_RD, 1, &buf },
122 };
123
124 if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
125 dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
126 return -EIO;
127 }
128
129 dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, trim);
130
131 if (osc)
132 *osc = (buf & RS5C372_TRIM_XSL) ? 32000 : 32768;
133
134 if (trim)
135 *trim = buf & RS5C372_TRIM_MASK;
136
137 return 0;
138}
139
140static int rs5c372_rtc_read_time(struct device *dev, struct rtc_time *tm)
141{
142 return rs5c372_get_datetime(to_i2c_client(dev), tm);
143}
144
145static int rs5c372_rtc_set_time(struct device *dev, struct rtc_time *tm)
146{
147 return rs5c372_set_datetime(to_i2c_client(dev), tm);
148}
149
150static int rs5c372_rtc_proc(struct device *dev, struct seq_file *seq)
151{
152 int err, osc, trim;
153
154 seq_printf(seq, "24hr\t\t: yes\n");
155
156 if ((err = rs5c372_get_trim(to_i2c_client(dev), &osc, &trim)) == 0) {
157 seq_printf(seq, "%d.%03d KHz\n", osc / 1000, osc % 1000);
158 seq_printf(seq, "trim\t: %d\n", trim);
159 }
160
161 return 0;
162}
163
164static struct rtc_class_ops rs5c372_rtc_ops = {
165 .proc = rs5c372_rtc_proc,
166 .read_time = rs5c372_rtc_read_time,
167 .set_time = rs5c372_rtc_set_time,
168};
169
170static ssize_t rs5c372_sysfs_show_trim(struct device *dev,
171 struct device_attribute *attr, char *buf)
172{
173 int trim;
174
175 if (rs5c372_get_trim(to_i2c_client(dev), NULL, &trim) == 0)
176 return sprintf(buf, "0x%2x\n", trim);
177
178 return 0;
179}
180static DEVICE_ATTR(trim, S_IRUGO, rs5c372_sysfs_show_trim, NULL);
181
182static ssize_t rs5c372_sysfs_show_osc(struct device *dev,
183 struct device_attribute *attr, char *buf)
184{
185 int osc;
186
187 if (rs5c372_get_trim(to_i2c_client(dev), &osc, NULL) == 0)
188 return sprintf(buf, "%d.%03d KHz\n", osc / 1000, osc % 1000);
189
190 return 0;
191}
192static DEVICE_ATTR(osc, S_IRUGO, rs5c372_sysfs_show_osc, NULL);
193
194static int rs5c372_attach(struct i2c_adapter *adapter)
195{
196 dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
197 return i2c_probe(adapter, &addr_data, rs5c372_probe);
198}
199
200static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
201{
202 int err = 0;
203 struct i2c_client *client;
204 struct rtc_device *rtc;
205
206 dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
207
208 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
209 err = -ENODEV;
210 goto exit;
211 }
212
213 if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) {
214 err = -ENOMEM;
215 goto exit;
216 }
217
218 /* I2C client */
219 client->addr = address;
220 client->driver = &rs5c372_driver;
221 client->adapter = adapter;
222
223 strlcpy(client->name, rs5c372_driver.driver.name, I2C_NAME_SIZE);
224
225 /* Inform the i2c layer */
226 if ((err = i2c_attach_client(client)))
227 goto exit_kfree;
228
229 dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
230
231 rtc = rtc_device_register(rs5c372_driver.driver.name, &client->dev,
232 &rs5c372_rtc_ops, THIS_MODULE);
233
234 if (IS_ERR(rtc)) {
235 err = PTR_ERR(rtc);
236 dev_err(&client->dev,
237 "unable to register the class device\n");
238 goto exit_detach;
239 }
240
241 i2c_set_clientdata(client, rtc);
242
243 device_create_file(&client->dev, &dev_attr_trim);
244 device_create_file(&client->dev, &dev_attr_osc);
245
246 return 0;
247
248exit_detach:
249 i2c_detach_client(client);
250
251exit_kfree:
252 kfree(client);
253
254exit:
255 return err;
256}
257
258static int rs5c372_detach(struct i2c_client *client)
259{
260 int err;
261 struct rtc_device *rtc = i2c_get_clientdata(client);
262
263 dev_dbg(&client->dev, "%s\n", __FUNCTION__);
264
265 if (rtc)
266 rtc_device_unregister(rtc);
267
268 if ((err = i2c_detach_client(client)))
269 return err;
270
271 kfree(client);
272
273 return 0;
274}
275
276static __init int rs5c372_init(void)
277{
278 return i2c_add_driver(&rs5c372_driver);
279}
280
281static __exit void rs5c372_exit(void)
282{
283 i2c_del_driver(&rs5c372_driver);
284}
285
286module_init(rs5c372_init);
287module_exit(rs5c372_exit);
288
289MODULE_AUTHOR(
290 "Pavel Mironchik <pmironchik@optifacio.net>, "
291 "Alessandro Zummo <a.zummo@towertech.it>");
292MODULE_DESCRIPTION("Ricoh RS5C372 RTC driver");
293MODULE_LICENSE("GPL");
294MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
new file mode 100644
index 000000000000..83b2bb480a16
--- /dev/null
+++ b/drivers/rtc/rtc-sa1100.c
@@ -0,0 +1,388 @@
1/*
2 * Real Time Clock interface for StrongARM SA1x00 and XScale PXA2xx
3 *
4 * Copyright (c) 2000 Nils Faerber
5 *
6 * Based on rtc.c by Paul Gortmaker
7 *
8 * Original Driver by Nils Faerber <nils@kernelconcepts.de>
9 *
10 * Modifications from:
11 * CIH <cih@coventive.com>
12 * Nicolas Pitre <nico@cam.org>
13 * Andrew Christian <andrew.christian@hp.com>
14 *
15 * Converted to the RTC subsystem and Driver Model
16 * by Richard Purdie <rpurdie@rpsys.net>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24#include <linux/platform_device.h>
25#include <linux/module.h>
26#include <linux/rtc.h>
27#include <linux/init.h>
28#include <linux/fs.h>
29#include <linux/interrupt.h>
30#include <linux/string.h>
31#include <linux/pm.h>
32
33#include <asm/bitops.h>
34#include <asm/hardware.h>
35#include <asm/irq.h>
36#include <asm/rtc.h>
37
38#ifdef CONFIG_ARCH_PXA
39#include <asm/arch/pxa-regs.h>
40#endif
41
42#define TIMER_FREQ CLOCK_TICK_RATE
43#define RTC_DEF_DIVIDER 32768 - 1
44#define RTC_DEF_TRIM 0
45
46static unsigned long rtc_freq = 1024;
47static struct rtc_time rtc_alarm;
48static spinlock_t sa1100_rtc_lock = SPIN_LOCK_UNLOCKED;
49
50static int rtc_update_alarm(struct rtc_time *alrm)
51{
52 struct rtc_time alarm_tm, now_tm;
53 unsigned long now, time;
54 int ret;
55
56 do {
57 now = RCNR;
58 rtc_time_to_tm(now, &now_tm);
59 rtc_next_alarm_time(&alarm_tm, &now_tm, alrm);
60 ret = rtc_tm_to_time(&alarm_tm, &time);
61 if (ret != 0)
62 break;
63
64 RTSR = RTSR & (RTSR_HZE|RTSR_ALE|RTSR_AL);
65 RTAR = time;
66 } while (now != RCNR);
67
68 return ret;
69}
70
71static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id,
72 struct pt_regs *regs)
73{
74 struct platform_device *pdev = to_platform_device(dev_id);
75 struct rtc_device *rtc = platform_get_drvdata(pdev);
76 unsigned int rtsr;
77 unsigned long events = 0;
78
79 spin_lock(&sa1100_rtc_lock);
80
81 rtsr = RTSR;
82 /* clear interrupt sources */
83 RTSR = 0;
84 RTSR = (RTSR_AL | RTSR_HZ) & (rtsr >> 2);
85
86 /* clear alarm interrupt if it has occurred */
87 if (rtsr & RTSR_AL)
88 rtsr &= ~RTSR_ALE;
89 RTSR = rtsr & (RTSR_ALE | RTSR_HZE);
90
91 /* update irq data & counter */
92 if (rtsr & RTSR_AL)
93 events |= RTC_AF | RTC_IRQF;
94 if (rtsr & RTSR_HZ)
95 events |= RTC_UF | RTC_IRQF;
96
97 rtc_update_irq(&rtc->class_dev, 1, events);
98
99 if (rtsr & RTSR_AL && rtc_periodic_alarm(&rtc_alarm))
100 rtc_update_alarm(&rtc_alarm);
101
102 spin_unlock(&sa1100_rtc_lock);
103
104 return IRQ_HANDLED;
105}
106
107static int rtc_timer1_count;
108
109static irqreturn_t timer1_interrupt(int irq, void *dev_id,
110 struct pt_regs *regs)
111{
112 struct platform_device *pdev = to_platform_device(dev_id);
113 struct rtc_device *rtc = platform_get_drvdata(pdev);
114
115 /*
116 * If we match for the first time, rtc_timer1_count will be 1.
117 * Otherwise, we wrapped around (very unlikely but
118 * still possible) so compute the amount of missed periods.
119 * The match reg is updated only when the data is actually retrieved
120 * to avoid unnecessary interrupts.
121 */
122 OSSR = OSSR_M1; /* clear match on timer1 */
123
124 rtc_update_irq(&rtc->class_dev, rtc_timer1_count, RTC_PF | RTC_IRQF);
125
126 if (rtc_timer1_count == 1)
127 rtc_timer1_count = (rtc_freq * ((1<<30)/(TIMER_FREQ>>2)));
128
129 return IRQ_HANDLED;
130}
131
132static int sa1100_rtc_read_callback(struct device *dev, int data)
133{
134 if (data & RTC_PF) {
135 /* interpolate missed periods and set match for the next */
136 unsigned long period = TIMER_FREQ/rtc_freq;
137 unsigned long oscr = OSCR;
138 unsigned long osmr1 = OSMR1;
139 unsigned long missed = (oscr - osmr1)/period;
140 data += missed << 8;
141 OSSR = OSSR_M1; /* clear match on timer 1 */
142 OSMR1 = osmr1 + (missed + 1)*period;
143 /* Ensure we didn't miss another match in the mean time.
144 * Here we compare (match - OSCR) 8 instead of 0 --
145 * see comment in pxa_timer_interrupt() for explanation.
146 */
147 while( (signed long)((osmr1 = OSMR1) - OSCR) <= 8 ) {
148 data += 0x100;
149 OSSR = OSSR_M1; /* clear match on timer 1 */
150 OSMR1 = osmr1 + period;
151 }
152 }
153 return data;
154}
155
156static int sa1100_rtc_open(struct device *dev)
157{
158 int ret;
159
160 ret = request_irq(IRQ_RTC1Hz, sa1100_rtc_interrupt, SA_INTERRUPT,
161 "rtc 1Hz", dev);
162 if (ret) {
163 printk(KERN_ERR "rtc: IRQ%d already in use.\n", IRQ_RTC1Hz);
164 goto fail_ui;
165 }
166 ret = request_irq(IRQ_RTCAlrm, sa1100_rtc_interrupt, SA_INTERRUPT,
167 "rtc Alrm", dev);
168 if (ret) {
169 printk(KERN_ERR "rtc: IRQ%d already in use.\n", IRQ_RTCAlrm);
170 goto fail_ai;
171 }
172 ret = request_irq(IRQ_OST1, timer1_interrupt, SA_INTERRUPT,
173 "rtc timer", dev);
174 if (ret) {
175 printk(KERN_ERR "rtc: IRQ%d already in use.\n", IRQ_OST1);
176 goto fail_pi;
177 }
178 return 0;
179
180 fail_pi:
181 free_irq(IRQ_RTCAlrm, NULL);
182 fail_ai:
183 free_irq(IRQ_RTC1Hz, NULL);
184 fail_ui:
185 return ret;
186}
187
188static void sa1100_rtc_release(struct device *dev)
189{
190 spin_lock_irq(&sa1100_rtc_lock);
191 RTSR = 0;
192 OIER &= ~OIER_E1;
193 OSSR = OSSR_M1;
194 spin_unlock_irq(&sa1100_rtc_lock);
195
196 free_irq(IRQ_OST1, dev);
197 free_irq(IRQ_RTCAlrm, dev);
198 free_irq(IRQ_RTC1Hz, dev);
199}
200
201
202static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
203 unsigned long arg)
204{
205 switch(cmd) {
206 case RTC_AIE_OFF:
207 spin_lock_irq(&sa1100_rtc_lock);
208 RTSR &= ~RTSR_ALE;
209 spin_unlock_irq(&sa1100_rtc_lock);
210 return 0;
211 case RTC_AIE_ON:
212 spin_lock_irq(&sa1100_rtc_lock);
213 RTSR |= RTSR_ALE;
214 spin_unlock_irq(&sa1100_rtc_lock);
215 return 0;
216 case RTC_UIE_OFF:
217 spin_lock_irq(&sa1100_rtc_lock);
218 RTSR &= ~RTSR_HZE;
219 spin_unlock_irq(&sa1100_rtc_lock);
220 return 0;
221 case RTC_UIE_ON:
222 spin_lock_irq(&sa1100_rtc_lock);
223 RTSR |= RTSR_HZE;
224 spin_unlock_irq(&sa1100_rtc_lock);
225 return 0;
226 case RTC_PIE_OFF:
227 spin_lock_irq(&sa1100_rtc_lock);
228 OIER &= ~OIER_E1;
229 spin_unlock_irq(&sa1100_rtc_lock);
230 return 0;
231 case RTC_PIE_ON:
232 if ((rtc_freq > 64) && !capable(CAP_SYS_RESOURCE))
233 return -EACCES;
234 spin_lock_irq(&sa1100_rtc_lock);
235 OSMR1 = TIMER_FREQ/rtc_freq + OSCR;
236 OIER |= OIER_E1;
237 rtc_timer1_count = 1;
238 spin_unlock_irq(&sa1100_rtc_lock);
239 return 0;
240 case RTC_IRQP_READ:
241 return put_user(rtc_freq, (unsigned long *)arg);
242 case RTC_IRQP_SET:
243 if (arg < 1 || arg > TIMER_FREQ)
244 return -EINVAL;
245 if ((arg > 64) && (!capable(CAP_SYS_RESOURCE)))
246 return -EACCES;
247 rtc_freq = arg;
248 return 0;
249 }
250 return -EINVAL;
251}
252
253static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
254{
255 rtc_time_to_tm(RCNR, tm);
256 return 0;
257}
258
259static int sa1100_rtc_set_time(struct device *dev, struct rtc_time *tm)
260{
261 unsigned long time;
262 int ret;
263
264 ret = rtc_tm_to_time(tm, &time);
265 if (ret == 0)
266 RCNR = time;
267 return ret;
268}
269
270static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
271{
272 memcpy(&alrm->time, &rtc_alarm, sizeof(struct rtc_time));
273 alrm->pending = RTSR & RTSR_AL ? 1 : 0;
274 return 0;
275}
276
277static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
278{
279 int ret;
280
281 spin_lock_irq(&sa1100_rtc_lock);
282 ret = rtc_update_alarm(&alrm->time);
283 if (ret == 0) {
284 memcpy(&rtc_alarm, &alrm->time, sizeof(struct rtc_time));
285
286 if (alrm->enabled)
287 enable_irq_wake(IRQ_RTCAlrm);
288 else
289 disable_irq_wake(IRQ_RTCAlrm);
290 }
291 spin_unlock_irq(&sa1100_rtc_lock);
292
293 return ret;
294}
295
296static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
297{
298 seq_printf(seq, "trim/divider\t: 0x%08x\n", RTTR);
299 seq_printf(seq, "alarm_IRQ\t: %s\n",
300 (RTSR & RTSR_ALE) ? "yes" : "no" );
301 seq_printf(seq, "update_IRQ\t: %s\n",
302 (RTSR & RTSR_HZE) ? "yes" : "no");
303 seq_printf(seq, "periodic_IRQ\t: %s\n",
304 (OIER & OIER_E1) ? "yes" : "no");
305 seq_printf(seq, "periodic_freq\t: %ld\n", rtc_freq);
306
307 return 0;
308}
309
310static struct rtc_class_ops sa1100_rtc_ops = {
311 .open = sa1100_rtc_open,
312 .read_callback = sa1100_rtc_read_callback,
313 .release = sa1100_rtc_release,
314 .ioctl = sa1100_rtc_ioctl,
315 .read_time = sa1100_rtc_read_time,
316 .set_time = sa1100_rtc_set_time,
317 .read_alarm = sa1100_rtc_read_alarm,
318 .set_alarm = sa1100_rtc_set_alarm,
319 .proc = sa1100_rtc_proc,
320};
321
322static int sa1100_rtc_probe(struct platform_device *pdev)
323{
324 struct rtc_device *rtc;
325
326 /*
327 * According to the manual we should be able to let RTTR be zero
328 * and then a default diviser for a 32.768KHz clock is used.
329 * Apparently this doesn't work, at least for my SA1110 rev 5.
330 * If the clock divider is uninitialized then reset it to the
331 * default value to get the 1Hz clock.
332 */
333 if (RTTR == 0) {
334 RTTR = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16);
335 printk(KERN_WARNING "rtc: warning: initializing default clock divider/trim value\n");
336 /* The current RTC value probably doesn't make sense either */
337 RCNR = 0;
338 }
339
340 rtc = rtc_device_register(pdev->name, &pdev->dev, &sa1100_rtc_ops,
341 THIS_MODULE);
342
343 if (IS_ERR(rtc)) {
344 dev_err(&pdev->dev, "Unable to register the RTC device\n");
345 return PTR_ERR(rtc);
346 }
347
348 platform_set_drvdata(pdev, rtc);
349
350 dev_info(&pdev->dev, "SA11xx/PXA2xx RTC Registered\n");
351
352 return 0;
353}
354
355static int sa1100_rtc_remove(struct platform_device *pdev)
356{
357 struct rtc_device *rtc = platform_get_drvdata(pdev);
358
359 if (rtc)
360 rtc_device_unregister(rtc);
361
362 return 0;
363}
364
365static struct platform_driver sa1100_rtc_driver = {
366 .probe = sa1100_rtc_probe,
367 .remove = sa1100_rtc_remove,
368 .driver = {
369 .name = "sa1100-rtc",
370 },
371};
372
373static int __init sa1100_rtc_init(void)
374{
375 return platform_driver_register(&sa1100_rtc_driver);
376}
377
378static void __exit sa1100_rtc_exit(void)
379{
380 platform_driver_unregister(&sa1100_rtc_driver);
381}
382
383module_init(sa1100_rtc_init);
384module_exit(sa1100_rtc_exit);
385
386MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
387MODULE_DESCRIPTION("SA11x0/PXA2xx Realtime Clock Driver (RTC)");
388MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
new file mode 100644
index 000000000000..7c1f3d2e53c4
--- /dev/null
+++ b/drivers/rtc/rtc-sysfs.c
@@ -0,0 +1,124 @@
1/*
2 * RTC subsystem, sysfs interface
3 *
4 * Copyright (C) 2005 Tower Technologies
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10*/
11
12#include <linux/module.h>
13#include <linux/rtc.h>
14
15/* device attributes */
16
17static ssize_t rtc_sysfs_show_name(struct class_device *dev, char *buf)
18{
19 return sprintf(buf, "%s\n", to_rtc_device(dev)->name);
20}
21static CLASS_DEVICE_ATTR(name, S_IRUGO, rtc_sysfs_show_name, NULL);
22
23static ssize_t rtc_sysfs_show_date(struct class_device *dev, char *buf)
24{
25 ssize_t retval;
26 struct rtc_time tm;
27
28 retval = rtc_read_time(dev, &tm);
29 if (retval == 0) {
30 retval = sprintf(buf, "%04d-%02d-%02d\n",
31 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday);
32 }
33
34 return retval;
35}
36static CLASS_DEVICE_ATTR(date, S_IRUGO, rtc_sysfs_show_date, NULL);
37
38static ssize_t rtc_sysfs_show_time(struct class_device *dev, char *buf)
39{
40 ssize_t retval;
41 struct rtc_time tm;
42
43 retval = rtc_read_time(dev, &tm);
44 if (retval == 0) {
45 retval = sprintf(buf, "%02d:%02d:%02d\n",
46 tm.tm_hour, tm.tm_min, tm.tm_sec);
47 }
48
49 return retval;
50}
51static CLASS_DEVICE_ATTR(time, S_IRUGO, rtc_sysfs_show_time, NULL);
52
53static ssize_t rtc_sysfs_show_since_epoch(struct class_device *dev, char *buf)
54{
55 ssize_t retval;
56 struct rtc_time tm;
57
58 retval = rtc_read_time(dev, &tm);
59 if (retval == 0) {
60 unsigned long time;
61 rtc_tm_to_time(&tm, &time);
62 retval = sprintf(buf, "%lu\n", time);
63 }
64
65 return retval;
66}
67static CLASS_DEVICE_ATTR(since_epoch, S_IRUGO, rtc_sysfs_show_since_epoch, NULL);
68
69static struct attribute *rtc_attrs[] = {
70 &class_device_attr_name.attr,
71 &class_device_attr_date.attr,
72 &class_device_attr_time.attr,
73 &class_device_attr_since_epoch.attr,
74 NULL,
75};
76
77static struct attribute_group rtc_attr_group = {
78 .attrs = rtc_attrs,
79};
80
81static int __devinit rtc_sysfs_add_device(struct class_device *class_dev,
82 struct class_interface *class_intf)
83{
84 int err;
85
86 dev_info(class_dev->dev, "rtc intf: sysfs\n");
87
88 err = sysfs_create_group(&class_dev->kobj, &rtc_attr_group);
89 if (err)
90 dev_err(class_dev->dev,
91 "failed to create sysfs attributes\n");
92
93 return err;
94}
95
96static void rtc_sysfs_remove_device(struct class_device *class_dev,
97 struct class_interface *class_intf)
98{
99 sysfs_remove_group(&class_dev->kobj, &rtc_attr_group);
100}
101
102/* interface registration */
103
104static struct class_interface rtc_sysfs_interface = {
105 .add = &rtc_sysfs_add_device,
106 .remove = &rtc_sysfs_remove_device,
107};
108
109static int __init rtc_sysfs_init(void)
110{
111 return rtc_interface_register(&rtc_sysfs_interface);
112}
113
114static void __exit rtc_sysfs_exit(void)
115{
116 class_interface_unregister(&rtc_sysfs_interface);
117}
118
119module_init(rtc_sysfs_init);
120module_exit(rtc_sysfs_exit);
121
122MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
123MODULE_DESCRIPTION("RTC class sysfs interface");
124MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
new file mode 100644
index 000000000000..43d107487820
--- /dev/null
+++ b/drivers/rtc/rtc-test.c
@@ -0,0 +1,204 @@
1/*
2 * An RTC test device/driver
3 * Copyright (C) 2005 Tower Technologies
4 * Author: Alessandro Zummo <a.zummo@towertech.it>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/err.h>
13#include <linux/rtc.h>
14#include <linux/platform_device.h>
15
16static struct platform_device *test0 = NULL, *test1 = NULL;
17
18static int test_rtc_read_alarm(struct device *dev,
19 struct rtc_wkalrm *alrm)
20{
21 return 0;
22}
23
24static int test_rtc_set_alarm(struct device *dev,
25 struct rtc_wkalrm *alrm)
26{
27 return 0;
28}
29
30static int test_rtc_read_time(struct device *dev,
31 struct rtc_time *tm)
32{
33 rtc_time_to_tm(get_seconds(), tm);
34 return 0;
35}
36
37static int test_rtc_set_time(struct device *dev,
38 struct rtc_time *tm)
39{
40 return 0;
41}
42
43static int test_rtc_set_mmss(struct device *dev, unsigned long secs)
44{
45 return 0;
46}
47
48static int test_rtc_proc(struct device *dev, struct seq_file *seq)
49{
50 struct platform_device *plat_dev = to_platform_device(dev);
51
52 seq_printf(seq, "24hr\t\t: yes\n");
53 seq_printf(seq, "test\t\t: yes\n");
54 seq_printf(seq, "id\t\t: %d\n", plat_dev->id);
55
56 return 0;
57}
58
59static int test_rtc_ioctl(struct device *dev, unsigned int cmd,
60 unsigned long arg)
61{
62 /* We do support interrupts, they're generated
63 * using the sysfs interface.
64 */
65 switch (cmd) {
66 case RTC_PIE_ON:
67 case RTC_PIE_OFF:
68 case RTC_UIE_ON:
69 case RTC_UIE_OFF:
70 case RTC_AIE_ON:
71 case RTC_AIE_OFF:
72 return 0;
73
74 default:
75 return -EINVAL;
76 }
77}
78
79static struct rtc_class_ops test_rtc_ops = {
80 .proc = test_rtc_proc,
81 .read_time = test_rtc_read_time,
82 .set_time = test_rtc_set_time,
83 .read_alarm = test_rtc_read_alarm,
84 .set_alarm = test_rtc_set_alarm,
85 .set_mmss = test_rtc_set_mmss,
86 .ioctl = test_rtc_ioctl,
87};
88
89static ssize_t test_irq_show(struct device *dev,
90 struct device_attribute *attr, char *buf)
91{
92 return sprintf(buf, "%d\n", 42);
93}
94static ssize_t test_irq_store(struct device *dev,
95 struct device_attribute *attr,
96 const char *buf, size_t count)
97{
98 int retval;
99 struct platform_device *plat_dev = to_platform_device(dev);
100 struct rtc_device *rtc = platform_get_drvdata(plat_dev);
101
102 retval = count;
103 if (strncmp(buf, "tick", 4) == 0)
104 rtc_update_irq(&rtc->class_dev, 1, RTC_PF | RTC_IRQF);
105 else if (strncmp(buf, "alarm", 5) == 0)
106 rtc_update_irq(&rtc->class_dev, 1, RTC_AF | RTC_IRQF);
107 else if (strncmp(buf, "update", 6) == 0)
108 rtc_update_irq(&rtc->class_dev, 1, RTC_UF | RTC_IRQF);
109 else
110 retval = -EINVAL;
111
112 return retval;
113}
114static DEVICE_ATTR(irq, S_IRUGO | S_IWUSR, test_irq_show, test_irq_store);
115
116static int test_probe(struct platform_device *plat_dev)
117{
118 int err;
119 struct rtc_device *rtc = rtc_device_register("test", &plat_dev->dev,
120 &test_rtc_ops, THIS_MODULE);
121 if (IS_ERR(rtc)) {
122 err = PTR_ERR(rtc);
123 dev_err(&plat_dev->dev,
124 "unable to register the class device\n");
125 return err;
126 }
127 device_create_file(&plat_dev->dev, &dev_attr_irq);
128
129 platform_set_drvdata(plat_dev, rtc);
130
131 return 0;
132}
133
134static int __devexit test_remove(struct platform_device *plat_dev)
135{
136 struct rtc_device *rtc = platform_get_drvdata(plat_dev);
137
138 rtc_device_unregister(rtc);
139 device_remove_file(&plat_dev->dev, &dev_attr_irq);
140
141 return 0;
142}
143
144static struct platform_driver test_drv = {
145 .probe = test_probe,
146 .remove = __devexit_p(test_remove),
147 .driver = {
148 .name = "rtc-test",
149 .owner = THIS_MODULE,
150 },
151};
152
153static int __init test_init(void)
154{
155 int err;
156
157 if ((err = platform_driver_register(&test_drv)))
158 return err;
159
160 if ((test0 = platform_device_alloc("rtc-test", 0)) == NULL) {
161 err = -ENOMEM;
162 goto exit_driver_unregister;
163 }
164
165 if ((test1 = platform_device_alloc("rtc-test", 1)) == NULL) {
166 err = -ENOMEM;
167 goto exit_free_test0;
168 }
169
170 if ((err = platform_device_add(test0)))
171 goto exit_free_test1;
172
173 if ((err = platform_device_add(test1)))
174 goto exit_device_unregister;
175
176 return 0;
177
178exit_device_unregister:
179 platform_device_unregister(test0);
180
181exit_free_test1:
182 platform_device_put(test1);
183
184exit_free_test0:
185 platform_device_put(test0);
186
187exit_driver_unregister:
188 platform_driver_unregister(&test_drv);
189 return err;
190}
191
192static void __exit test_exit(void)
193{
194 platform_device_unregister(test0);
195 platform_device_unregister(test1);
196 platform_driver_unregister(&test_drv);
197}
198
199MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
200MODULE_DESCRIPTION("RTC test driver/device");
201MODULE_LICENSE("GPL");
202
203module_init(test_init);
204module_exit(test_exit);
diff --git a/drivers/i2c/chips/x1205.c b/drivers/rtc/rtc-x1205.c
index 245fffa92dbd..621d17afc0d9 100644
--- a/drivers/i2c/chips/x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -1,32 +1,25 @@
1/* 1/*
2 * x1205.c - An i2c driver for the Xicor X1205 RTC 2 * An i2c driver for the Xicor/Intersil X1205 RTC
3 * Copyright 2004 Karen Spearel 3 * Copyright 2004 Karen Spearel
4 * Copyright 2005 Alessandro Zummo 4 * Copyright 2005 Alessandro Zummo
5 * 5 *
6 * please send all reports to: 6 * please send all reports to:
7 * kas11 at tampabay dot rr dot com 7 * Karen Spearel <kas111 at gmail dot com>
8 * a dot zummo at towertech dot it 8 * Alessandro Zummo <a.zummo@towertech.it>
9 * 9 *
10 * based on the other drivers in this same directory. 10 * based on a lot of other RTC drivers.
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License version 2 as
14 * the Free Software Foundation; either version 2 of the License, or 14 * published by the Free Software Foundation.
15 * (at your option) any later version.
16 */ 15 */
17 16
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/slab.h>
21#include <linux/i2c.h> 17#include <linux/i2c.h>
22#include <linux/string.h>
23#include <linux/bcd.h> 18#include <linux/bcd.h>
24#include <linux/rtc.h> 19#include <linux/rtc.h>
25#include <linux/list.h> 20#include <linux/delay.h>
26 21
27#include <linux/x1205.h> 22#define DRV_VERSION "1.0.6"
28
29#define DRV_VERSION "0.9.9"
30 23
31/* Addresses to scan: none. This chip is located at 24/* Addresses to scan: none. This chip is located at
32 * 0x6f and uses a two bytes register addressing. 25 * 0x6f and uses a two bytes register addressing.
@@ -40,8 +33,6 @@ static unsigned short normal_i2c[] = { I2C_CLIENT_END };
40 33
41/* Insmod parameters */ 34/* Insmod parameters */
42I2C_CLIENT_INSMOD; 35I2C_CLIENT_INSMOD;
43I2C_CLIENT_MODULE_PARM(hctosys,
44 "Set the system time from the hardware clock upon initialization");
45 36
46/* offsets into CCR area */ 37/* offsets into CCR area */
47 38
@@ -101,107 +92,35 @@ I2C_CLIENT_MODULE_PARM(hctosys,
101static int x1205_attach(struct i2c_adapter *adapter); 92static int x1205_attach(struct i2c_adapter *adapter);
102static int x1205_detach(struct i2c_client *client); 93static int x1205_detach(struct i2c_client *client);
103static int x1205_probe(struct i2c_adapter *adapter, int address, int kind); 94static int x1205_probe(struct i2c_adapter *adapter, int address, int kind);
104static int x1205_command(struct i2c_client *client, unsigned int cmd,
105 void *arg);
106 95
107static struct i2c_driver x1205_driver = { 96static struct i2c_driver x1205_driver = {
108 .driver = { 97 .driver = {
109 .name = "x1205", 98 .name = "x1205",
110 }, 99 },
100 .id = I2C_DRIVERID_X1205,
111 .attach_adapter = &x1205_attach, 101 .attach_adapter = &x1205_attach,
112 .detach_client = &x1205_detach, 102 .detach_client = &x1205_detach,
113}; 103};
114 104
115struct x1205_data {
116 struct i2c_client client;
117 struct list_head list;
118 unsigned int epoch;
119};
120
121static const unsigned char days_in_mo[] =
122 { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
123
124static LIST_HEAD(x1205_clients);
125
126/* Workaround until the I2C subsytem will allow to send
127 * commands to a specific client. This function will send the command
128 * to the first client.
129 */
130int x1205_do_command(unsigned int cmd, void *arg)
131{
132 struct list_head *walk;
133 struct list_head *tmp;
134 struct x1205_data *data;
135
136 list_for_each_safe(walk, tmp, &x1205_clients) {
137 data = list_entry(walk, struct x1205_data, list);
138 return x1205_command(&data->client, cmd, arg);
139 }
140
141 return -ENODEV;
142}
143
144#define is_leap(year) \
145 ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
146
147/* make sure the rtc_time values are in bounds */
148static int x1205_validate_tm(struct rtc_time *tm)
149{
150 int year = tm->tm_year + 1900;
151
152 if ((tm->tm_year < 70) || (tm->tm_year > 255))
153 return -EINVAL;
154
155 if ((tm->tm_mon > 11) || (tm->tm_mday == 0))
156 return -EINVAL;
157
158 if (tm->tm_mday > days_in_mo[tm->tm_mon]
159 + ((tm->tm_mon == 1) && is_leap(year)))
160 return -EINVAL;
161
162 if ((tm->tm_hour >= 24) || (tm->tm_min >= 60) || (tm->tm_sec >= 60))
163 return -EINVAL;
164
165 return 0;
166}
167
168/* 105/*
169 * In the routines that deal directly with the x1205 hardware, we use 106 * In the routines that deal directly with the x1205 hardware, we use
170 * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch 107 * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch
171 * Epoch is initialized as 2000. Time is set to UTC. 108 * Epoch is initialized as 2000. Time is set to UTC.
172 */ 109 */
173static int x1205_get_datetime(struct i2c_client *client, struct rtc_time *tm, 110static int x1205_get_datetime(struct i2c_client *client, struct rtc_time *tm,
174 u8 reg_base) 111 unsigned char reg_base)
175{ 112{
176 unsigned char dt_addr[2] = { 0, reg_base }; 113 unsigned char dt_addr[2] = { 0, reg_base };
177 static unsigned char sr_addr[2] = { 0, X1205_REG_SR };
178 114
179 unsigned char buf[8], sr; 115 unsigned char buf[8];
180 116
181 struct i2c_msg msgs[] = { 117 struct i2c_msg msgs[] = {
182 { client->addr, 0, 2, sr_addr }, /* setup read ptr */
183 { client->addr, I2C_M_RD, 1, &sr }, /* read status */
184 { client->addr, 0, 2, dt_addr }, /* setup read ptr */ 118 { client->addr, 0, 2, dt_addr }, /* setup read ptr */
185 { client->addr, I2C_M_RD, 8, buf }, /* read date */ 119 { client->addr, I2C_M_RD, 8, buf }, /* read date */
186 }; 120 };
187 121
188 struct x1205_data *data = i2c_get_clientdata(client);
189
190 /* read status register */
191 if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
192 dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
193 return -EIO;
194 }
195
196 /* check for battery failure */
197 if (sr & X1205_SR_RTCF) {
198 dev_warn(&client->dev,
199 "Clock had a power failure, you must set the date.\n");
200 return -EINVAL;
201 }
202
203 /* read date registers */ 122 /* read date registers */
204 if ((i2c_transfer(client->adapter, &msgs[2], 2)) != 2) { 123 if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
205 dev_err(&client->dev, "%s: read error\n", __FUNCTION__); 124 dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
206 return -EIO; 125 return -EIO;
207 } 126 }
@@ -217,9 +136,9 @@ static int x1205_get_datetime(struct i2c_client *client, struct rtc_time *tm,
217 tm->tm_min = BCD2BIN(buf[CCR_MIN]); 136 tm->tm_min = BCD2BIN(buf[CCR_MIN]);
218 tm->tm_hour = BCD2BIN(buf[CCR_HOUR] & 0x3F); /* hr is 0-23 */ 137 tm->tm_hour = BCD2BIN(buf[CCR_HOUR] & 0x3F); /* hr is 0-23 */
219 tm->tm_mday = BCD2BIN(buf[CCR_MDAY]); 138 tm->tm_mday = BCD2BIN(buf[CCR_MDAY]);
220 tm->tm_mon = BCD2BIN(buf[CCR_MONTH]); 139 tm->tm_mon = BCD2BIN(buf[CCR_MONTH]) - 1; /* mon is 0-11 */
221 data->epoch = BCD2BIN(buf[CCR_Y2K]) * 100; 140 tm->tm_year = BCD2BIN(buf[CCR_YEAR])
222 tm->tm_year = BCD2BIN(buf[CCR_YEAR]) + data->epoch - 1900; 141 + (BCD2BIN(buf[CCR_Y2K]) * 100) - 1900;
223 tm->tm_wday = buf[CCR_WDAY]; 142 tm->tm_wday = buf[CCR_WDAY];
224 143
225 dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, " 144 dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
@@ -231,11 +150,28 @@ static int x1205_get_datetime(struct i2c_client *client, struct rtc_time *tm,
231 return 0; 150 return 0;
232} 151}
233 152
153static int x1205_get_status(struct i2c_client *client, unsigned char *sr)
154{
155 static unsigned char sr_addr[2] = { 0, X1205_REG_SR };
156
157 struct i2c_msg msgs[] = {
158 { client->addr, 0, 2, sr_addr }, /* setup read ptr */
159 { client->addr, I2C_M_RD, 1, sr }, /* read status */
160 };
161
162 /* read status register */
163 if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
164 dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
165 return -EIO;
166 }
167
168 return 0;
169}
170
234static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm, 171static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
235 int datetoo, u8 reg_base) 172 int datetoo, u8 reg_base)
236{ 173{
237 int i, err, xfer; 174 int i, xfer;
238
239 unsigned char buf[8]; 175 unsigned char buf[8];
240 176
241 static const unsigned char wel[3] = { 0, X1205_REG_SR, 177 static const unsigned char wel[3] = { 0, X1205_REG_SR,
@@ -246,17 +182,10 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
246 182
247 static const unsigned char diswe[3] = { 0, X1205_REG_SR, 0 }; 183 static const unsigned char diswe[3] = { 0, X1205_REG_SR, 0 };
248 184
249 struct x1205_data *data = i2c_get_clientdata(client); 185 dev_dbg(&client->dev,
250 186 "%s: secs=%d, mins=%d, hours=%d\n",
251 /* check if all values in the tm struct are correct */
252 if ((err = x1205_validate_tm(tm)) < 0)
253 return err;
254
255 dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
256 "mday=%d, mon=%d, year=%d, wday=%d\n",
257 __FUNCTION__, 187 __FUNCTION__,
258 tm->tm_sec, tm->tm_min, tm->tm_hour, 188 tm->tm_sec, tm->tm_min, tm->tm_hour);
259 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
260 189
261 buf[CCR_SEC] = BIN2BCD(tm->tm_sec); 190 buf[CCR_SEC] = BIN2BCD(tm->tm_sec);
262 buf[CCR_MIN] = BIN2BCD(tm->tm_min); 191 buf[CCR_MIN] = BIN2BCD(tm->tm_min);
@@ -266,26 +195,29 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
266 195
267 /* should we also set the date? */ 196 /* should we also set the date? */
268 if (datetoo) { 197 if (datetoo) {
198 dev_dbg(&client->dev,
199 "%s: mday=%d, mon=%d, year=%d, wday=%d\n",
200 __FUNCTION__,
201 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
202
269 buf[CCR_MDAY] = BIN2BCD(tm->tm_mday); 203 buf[CCR_MDAY] = BIN2BCD(tm->tm_mday);
270 204
271 /* month, 0 - 11 */ 205 /* month, 1 - 12 */
272 buf[CCR_MONTH] = BIN2BCD(tm->tm_mon); 206 buf[CCR_MONTH] = BIN2BCD(tm->tm_mon + 1);
273 207
274 /* year, since 1900 */ 208 /* year, since the rtc epoch*/
275 buf[CCR_YEAR] = BIN2BCD(tm->tm_year + 1900 - data->epoch); 209 buf[CCR_YEAR] = BIN2BCD(tm->tm_year % 100);
276 buf[CCR_WDAY] = tm->tm_wday & 0x07; 210 buf[CCR_WDAY] = tm->tm_wday & 0x07;
277 buf[CCR_Y2K] = BIN2BCD(data->epoch / 100); 211 buf[CCR_Y2K] = BIN2BCD(tm->tm_year / 100);
278 } 212 }
279 213
280 /* this sequence is required to unlock the chip */ 214 /* this sequence is required to unlock the chip */
281 xfer = i2c_master_send(client, wel, 3); 215 if ((xfer = i2c_master_send(client, wel, 3)) != 3) {
282 if (xfer != 3) {
283 dev_err(&client->dev, "%s: wel - %d\n", __FUNCTION__, xfer); 216 dev_err(&client->dev, "%s: wel - %d\n", __FUNCTION__, xfer);
284 return -EIO; 217 return -EIO;
285 } 218 }
286 219
287 xfer = i2c_master_send(client, rwel, 3); 220 if ((xfer = i2c_master_send(client, rwel, 3)) != 3) {
288 if (xfer != 3) {
289 dev_err(&client->dev, "%s: rwel - %d\n", __FUNCTION__, xfer); 221 dev_err(&client->dev, "%s: rwel - %d\n", __FUNCTION__, xfer);
290 return -EIO; 222 return -EIO;
291 } 223 }
@@ -305,8 +237,7 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
305 }; 237 };
306 238
307 /* disable further writes */ 239 /* disable further writes */
308 xfer = i2c_master_send(client, diswe, 3); 240 if ((xfer = i2c_master_send(client, diswe, 3)) != 3) {
309 if (xfer != 3) {
310 dev_err(&client->dev, "%s: diswe - %d\n", __FUNCTION__, xfer); 241 dev_err(&client->dev, "%s: diswe - %d\n", __FUNCTION__, xfer);
311 return -EIO; 242 return -EIO;
312 } 243 }
@@ -314,6 +245,20 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
314 return 0; 245 return 0;
315} 246}
316 247
248static int x1205_fix_osc(struct i2c_client *client)
249{
250 int err;
251 struct rtc_time tm;
252
253 tm.tm_hour = tm.tm_min = tm.tm_sec = 0;
254
255 if ((err = x1205_set_datetime(client, &tm, 0, X1205_CCR_BASE)) < 0)
256 dev_err(&client->dev,
257 "unable to restart the oscillator\n");
258
259 return err;
260}
261
317static int x1205_get_dtrim(struct i2c_client *client, int *trim) 262static int x1205_get_dtrim(struct i2c_client *client, int *trim)
318{ 263{
319 unsigned char dtr; 264 unsigned char dtr;
@@ -380,60 +325,9 @@ static int x1205_get_atrim(struct i2c_client *client, int *trim)
380 return 0; 325 return 0;
381} 326}
382 327
383static int x1205_hctosys(struct i2c_client *client)
384{
385 int err;
386
387 struct rtc_time tm;
388 struct timespec tv;
389
390 err = x1205_command(client, X1205_CMD_GETDATETIME, &tm);
391
392 if (err) {
393 dev_err(&client->dev,
394 "Unable to set the system clock\n");
395 return err;
396 }
397
398 /* IMPORTANT: the RTC only stores whole seconds. It is arbitrary
399 * whether it stores the most close value or the value with partial
400 * seconds truncated. However, it is important that we use it to store
401 * the truncated value. This is because otherwise it is necessary,
402 * in an rtc sync function, to read both xtime.tv_sec and
403 * xtime.tv_nsec. On some processors (i.e. ARM), an atomic read
404 * of >32bits is not possible. So storing the most close value would
405 * slow down the sync API. So here we have the truncated value and
406 * the best guess is to add 0.5s.
407 */
408
409 tv.tv_nsec = NSEC_PER_SEC >> 1;
410
411 /* WARNING: this is not the C library 'mktime' call, it is a built in
412 * inline function from include/linux/time.h. It expects (requires)
413 * the month to be in the range 1-12
414 */
415
416 tv.tv_sec = mktime(tm.tm_year + 1900, tm.tm_mon + 1,
417 tm.tm_mday, tm.tm_hour,
418 tm.tm_min, tm.tm_sec);
419
420 do_settimeofday(&tv);
421
422 dev_info(&client->dev,
423 "setting the system clock to %d-%d-%d %d:%d:%d\n",
424 tm.tm_year + 1900, tm.tm_mon + 1,
425 tm.tm_mday, tm.tm_hour, tm.tm_min,
426 tm.tm_sec);
427
428 return 0;
429}
430
431struct x1205_limit 328struct x1205_limit
432{ 329{
433 unsigned char reg; 330 unsigned char reg, mask, min, max;
434 unsigned char mask;
435 unsigned char min;
436 unsigned char max;
437}; 331};
438 332
439static int x1205_validate_client(struct i2c_client *client) 333static int x1205_validate_client(struct i2c_client *client)
@@ -477,11 +371,10 @@ static int x1205_validate_client(struct i2c_client *client)
477 { client->addr, I2C_M_RD, 1, &buf }, 371 { client->addr, I2C_M_RD, 1, &buf },
478 }; 372 };
479 373
480 xfer = i2c_transfer(client->adapter, msgs, 2); 374 if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) {
481 if (xfer != 2) {
482 dev_err(&client->adapter->dev, 375 dev_err(&client->adapter->dev,
483 "%s: could not read register %x\n", 376 "%s: could not read register %x\n",
484 __FUNCTION__, addr[1]); 377 __FUNCTION__, probe_zero_pattern[i]);
485 378
486 return -EIO; 379 return -EIO;
487 } 380 }
@@ -489,7 +382,7 @@ static int x1205_validate_client(struct i2c_client *client)
489 if ((buf & probe_zero_pattern[i+1]) != 0) { 382 if ((buf & probe_zero_pattern[i+1]) != 0) {
490 dev_err(&client->adapter->dev, 383 dev_err(&client->adapter->dev,
491 "%s: register=%02x, zero pattern=%d, value=%x\n", 384 "%s: register=%02x, zero pattern=%d, value=%x\n",
492 __FUNCTION__, addr[1], i, buf); 385 __FUNCTION__, probe_zero_pattern[i], i, buf);
493 386
494 return -ENODEV; 387 return -ENODEV;
495 } 388 }
@@ -506,12 +399,10 @@ static int x1205_validate_client(struct i2c_client *client)
506 { client->addr, I2C_M_RD, 1, &reg }, 399 { client->addr, I2C_M_RD, 1, &reg },
507 }; 400 };
508 401
509 xfer = i2c_transfer(client->adapter, msgs, 2); 402 if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) {
510
511 if (xfer != 2) {
512 dev_err(&client->adapter->dev, 403 dev_err(&client->adapter->dev,
513 "%s: could not read register %x\n", 404 "%s: could not read register %x\n",
514 __FUNCTION__, addr[1]); 405 __FUNCTION__, probe_limits_pattern[i].reg);
515 406
516 return -EIO; 407 return -EIO;
517 } 408 }
@@ -522,7 +413,8 @@ static int x1205_validate_client(struct i2c_client *client)
522 value < probe_limits_pattern[i].min) { 413 value < probe_limits_pattern[i].min) {
523 dev_dbg(&client->adapter->dev, 414 dev_dbg(&client->adapter->dev,
524 "%s: register=%x, lim pattern=%d, value=%d\n", 415 "%s: register=%x, lim pattern=%d, value=%d\n",
525 __FUNCTION__, addr[1], i, value); 416 __FUNCTION__, probe_limits_pattern[i].reg,
417 i, value);
526 418
527 return -ENODEV; 419 return -ENODEV;
528 } 420 }
@@ -531,37 +423,89 @@ static int x1205_validate_client(struct i2c_client *client)
531 return 0; 423 return 0;
532} 424}
533 425
534static int x1205_attach(struct i2c_adapter *adapter) 426static int x1205_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
535{ 427{
536 dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); 428 return x1205_get_datetime(to_i2c_client(dev),
429 &alrm->time, X1205_ALM0_BASE);
430}
537 431
538 return i2c_probe(adapter, &addr_data, x1205_probe); 432static int x1205_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
433{
434 return x1205_set_datetime(to_i2c_client(dev),
435 &alrm->time, 1, X1205_ALM0_BASE);
539} 436}
540 437
541int x1205_direct_attach(int adapter_id, 438static int x1205_rtc_read_time(struct device *dev, struct rtc_time *tm)
542 struct i2c_client_address_data *address_data)
543{ 439{
544 int err; 440 return x1205_get_datetime(to_i2c_client(dev),
545 struct i2c_adapter *adapter = i2c_get_adapter(adapter_id); 441 tm, X1205_CCR_BASE);
442}
546 443
547 if (adapter) { 444static int x1205_rtc_set_time(struct device *dev, struct rtc_time *tm)
548 err = i2c_probe(adapter, 445{
549 address_data, x1205_probe); 446 return x1205_set_datetime(to_i2c_client(dev),
447 tm, 1, X1205_CCR_BASE);
448}
550 449
551 i2c_put_adapter(adapter); 450static int x1205_rtc_proc(struct device *dev, struct seq_file *seq)
451{
452 int err, dtrim, atrim;
552 453
553 return err; 454 seq_printf(seq, "24hr\t\t: yes\n");
554 }
555 455
556 return -ENODEV; 456 if ((err = x1205_get_dtrim(to_i2c_client(dev), &dtrim)) == 0)
457 seq_printf(seq, "digital_trim\t: %d ppm\n", dtrim);
458
459 if ((err = x1205_get_atrim(to_i2c_client(dev), &atrim)) == 0)
460 seq_printf(seq, "analog_trim\t: %d.%02d pF\n",
461 atrim / 1000, atrim % 1000);
462 return 0;
557} 463}
558 464
559static int x1205_probe(struct i2c_adapter *adapter, int address, int kind) 465static struct rtc_class_ops x1205_rtc_ops = {
466 .proc = x1205_rtc_proc,
467 .read_time = x1205_rtc_read_time,
468 .set_time = x1205_rtc_set_time,
469 .read_alarm = x1205_rtc_read_alarm,
470 .set_alarm = x1205_rtc_set_alarm,
471};
472
473static ssize_t x1205_sysfs_show_atrim(struct device *dev,
474 struct device_attribute *attr, char *buf)
560{ 475{
561 struct i2c_client *client; 476 int atrim;
562 struct x1205_data *data; 477
478 if (x1205_get_atrim(to_i2c_client(dev), &atrim) == 0)
479 return sprintf(buf, "%d.%02d pF\n",
480 atrim / 1000, atrim % 1000);
481 return 0;
482}
483static DEVICE_ATTR(atrim, S_IRUGO, x1205_sysfs_show_atrim, NULL);
484
485static ssize_t x1205_sysfs_show_dtrim(struct device *dev,
486 struct device_attribute *attr, char *buf)
487{
488 int dtrim;
489
490 if (x1205_get_dtrim(to_i2c_client(dev), &dtrim) == 0)
491 return sprintf(buf, "%d ppm\n", dtrim);
492
493 return 0;
494}
495static DEVICE_ATTR(dtrim, S_IRUGO, x1205_sysfs_show_dtrim, NULL);
563 496
497static int x1205_attach(struct i2c_adapter *adapter)
498{
499 dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
500 return i2c_probe(adapter, &addr_data, x1205_probe);
501}
502
503static int x1205_probe(struct i2c_adapter *adapter, int address, int kind)
504{
564 int err = 0; 505 int err = 0;
506 unsigned char sr;
507 struct i2c_client *client;
508 struct rtc_device *rtc;
565 509
566 dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); 510 dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
567 511
@@ -570,22 +514,17 @@ static int x1205_probe(struct i2c_adapter *adapter, int address, int kind)
570 goto exit; 514 goto exit;
571 } 515 }
572 516
573 if (!(data = kzalloc(sizeof(struct x1205_data), GFP_KERNEL))) { 517 if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) {
574 err = -ENOMEM; 518 err = -ENOMEM;
575 goto exit; 519 goto exit;
576 } 520 }
577 521
578 /* Initialize our structures */ 522 /* I2C client */
579 data->epoch = 2000;
580
581 client = &data->client;
582 client->addr = address; 523 client->addr = address;
583 client->driver = &x1205_driver; 524 client->driver = &x1205_driver;
584 client->adapter = adapter; 525 client->adapter = adapter;
585 526
586 strlcpy(client->name, "x1205", I2C_NAME_SIZE); 527 strlcpy(client->name, x1205_driver.driver.name, I2C_NAME_SIZE);
587
588 i2c_set_clientdata(client, data);
589 528
590 /* Verify the chip is really an X1205 */ 529 /* Verify the chip is really an X1205 */
591 if (kind < 0) { 530 if (kind < 0) {
@@ -599,18 +538,43 @@ static int x1205_probe(struct i2c_adapter *adapter, int address, int kind)
599 if ((err = i2c_attach_client(client))) 538 if ((err = i2c_attach_client(client)))
600 goto exit_kfree; 539 goto exit_kfree;
601 540
602 list_add(&data->list, &x1205_clients);
603
604 dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); 541 dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
605 542
606 /* If requested, set the system time */ 543 rtc = rtc_device_register(x1205_driver.driver.name, &client->dev,
607 if (hctosys) 544 &x1205_rtc_ops, THIS_MODULE);
608 x1205_hctosys(client); 545
546 if (IS_ERR(rtc)) {
547 err = PTR_ERR(rtc);
548 dev_err(&client->dev,
549 "unable to register the class device\n");
550 goto exit_detach;
551 }
552
553 i2c_set_clientdata(client, rtc);
554
555 /* Check for power failures and eventualy enable the osc */
556 if ((err = x1205_get_status(client, &sr)) == 0) {
557 if (sr & X1205_SR_RTCF) {
558 dev_err(&client->dev,
559 "power failure detected, "
560 "please set the clock\n");
561 udelay(50);
562 x1205_fix_osc(client);
563 }
564 }
565 else
566 dev_err(&client->dev, "couldn't read status\n");
567
568 device_create_file(&client->dev, &dev_attr_atrim);
569 device_create_file(&client->dev, &dev_attr_dtrim);
609 570
610 return 0; 571 return 0;
611 572
573exit_detach:
574 i2c_detach_client(client);
575
612exit_kfree: 576exit_kfree:
613 kfree(data); 577 kfree(client);
614 578
615exit: 579exit:
616 return err; 580 return err;
@@ -619,61 +583,21 @@ exit:
619static int x1205_detach(struct i2c_client *client) 583static int x1205_detach(struct i2c_client *client)
620{ 584{
621 int err; 585 int err;
622 struct x1205_data *data = i2c_get_clientdata(client); 586 struct rtc_device *rtc = i2c_get_clientdata(client);
623 587
624 dev_dbg(&client->dev, "%s\n", __FUNCTION__); 588 dev_dbg(&client->dev, "%s\n", __FUNCTION__);
625 589
590 if (rtc)
591 rtc_device_unregister(rtc);
592
626 if ((err = i2c_detach_client(client))) 593 if ((err = i2c_detach_client(client)))
627 return err; 594 return err;
628 595
629 list_del(&data->list); 596 kfree(client);
630
631 kfree(data);
632 597
633 return 0; 598 return 0;
634} 599}
635 600
636static int x1205_command(struct i2c_client *client, unsigned int cmd,
637 void *param)
638{
639 if (param == NULL)
640 return -EINVAL;
641
642 if (!capable(CAP_SYS_TIME))
643 return -EACCES;
644
645 dev_dbg(&client->dev, "%s: cmd=%d\n", __FUNCTION__, cmd);
646
647 switch (cmd) {
648 case X1205_CMD_GETDATETIME:
649 return x1205_get_datetime(client, param, X1205_CCR_BASE);
650
651 case X1205_CMD_SETTIME:
652 return x1205_set_datetime(client, param, 0,
653 X1205_CCR_BASE);
654
655 case X1205_CMD_SETDATETIME:
656 return x1205_set_datetime(client, param, 1,
657 X1205_CCR_BASE);
658
659 case X1205_CMD_GETALARM:
660 return x1205_get_datetime(client, param, X1205_ALM0_BASE);
661
662 case X1205_CMD_SETALARM:
663 return x1205_set_datetime(client, param, 1,
664 X1205_ALM0_BASE);
665
666 case X1205_CMD_GETDTRIM:
667 return x1205_get_dtrim(client, param);
668
669 case X1205_CMD_GETATRIM:
670 return x1205_get_atrim(client, param);
671
672 default:
673 return -EINVAL;
674 }
675}
676
677static int __init x1205_init(void) 601static int __init x1205_init(void)
678{ 602{
679 return i2c_add_driver(&x1205_driver); 603 return i2c_add_driver(&x1205_driver);
@@ -685,14 +609,11 @@ static void __exit x1205_exit(void)
685} 609}
686 610
687MODULE_AUTHOR( 611MODULE_AUTHOR(
688 "Karen Spearel <kas11@tampabay.rr.com>, " 612 "Karen Spearel <kas111 at gmail dot com>, "
689 "Alessandro Zummo <a.zummo@towertech.it>"); 613 "Alessandro Zummo <a.zummo@towertech.it>");
690MODULE_DESCRIPTION("Xicor X1205 RTC driver"); 614MODULE_DESCRIPTION("Xicor/Intersil X1205 RTC driver");
691MODULE_LICENSE("GPL"); 615MODULE_LICENSE("GPL");
692MODULE_VERSION(DRV_VERSION); 616MODULE_VERSION(DRV_VERSION);
693 617
694EXPORT_SYMBOL_GPL(x1205_do_command);
695EXPORT_SYMBOL_GPL(x1205_direct_attach);
696
697module_init(x1205_init); 618module_init(x1205_init);
698module_exit(x1205_exit); 619module_exit(x1205_exit);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 2f720108a7e0..c1c6f1381150 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -437,8 +437,7 @@ dasd_forget_ranges(void)
437 spin_lock(&dasd_devmap_lock); 437 spin_lock(&dasd_devmap_lock);
438 for (i = 0; i < 256; i++) { 438 for (i = 0; i < 256; i++) {
439 list_for_each_entry_safe(devmap, n, &dasd_hashlists[i], list) { 439 list_for_each_entry_safe(devmap, n, &dasd_hashlists[i], list) {
440 if (devmap->device != NULL) 440 BUG_ON(devmap->device != NULL);
441 BUG();
442 list_del(&devmap->list); 441 list_del(&devmap->list);
443 kfree(devmap); 442 kfree(devmap);
444 } 443 }
@@ -547,8 +546,7 @@ dasd_delete_device(struct dasd_device *device)
547 546
548 /* First remove device pointer from devmap. */ 547 /* First remove device pointer from devmap. */
549 devmap = dasd_find_busid(device->cdev->dev.bus_id); 548 devmap = dasd_find_busid(device->cdev->dev.bus_id);
550 if (IS_ERR(devmap)) 549 BUG_ON(IS_ERR(devmap));
551 BUG();
552 spin_lock(&dasd_devmap_lock); 550 spin_lock(&dasd_devmap_lock);
553 if (devmap->device != device) { 551 if (devmap->device != device) {
554 spin_unlock(&dasd_devmap_lock); 552 spin_unlock(&dasd_devmap_lock);
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index bd06607a5dcc..eecb2afad5c2 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -28,6 +28,7 @@
28#include <linux/major.h> 28#include <linux/major.h>
29#include <linux/kdev_t.h> 29#include <linux/kdev_t.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/mutex.h>
31 32
32struct class *class3270; 33struct class *class3270;
33 34
@@ -59,7 +60,7 @@ struct raw3270 {
59#define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */ 60#define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */
60 61
61/* Semaphore to protect global data of raw3270 (devices, views, etc). */ 62/* Semaphore to protect global data of raw3270 (devices, views, etc). */
62static DECLARE_MUTEX(raw3270_sem); 63static DEFINE_MUTEX(raw3270_mutex);
63 64
64/* List of 3270 devices. */ 65/* List of 3270 devices. */
65static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices); 66static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices);
@@ -815,7 +816,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
815 * number for it. Note: there is no device with minor 0, 816 * number for it. Note: there is no device with minor 0,
816 * see special case for fs3270.c:fs3270_open(). 817 * see special case for fs3270.c:fs3270_open().
817 */ 818 */
818 down(&raw3270_sem); 819 mutex_lock(&raw3270_mutex);
819 /* Keep the list sorted. */ 820 /* Keep the list sorted. */
820 minor = RAW3270_FIRSTMINOR; 821 minor = RAW3270_FIRSTMINOR;
821 rp->minor = -1; 822 rp->minor = -1;
@@ -832,7 +833,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
832 rp->minor = minor; 833 rp->minor = minor;
833 list_add_tail(&rp->list, &raw3270_devices); 834 list_add_tail(&rp->list, &raw3270_devices);
834 } 835 }
835 up(&raw3270_sem); 836 mutex_unlock(&raw3270_mutex);
836 /* No free minor number? Then give up. */ 837 /* No free minor number? Then give up. */
837 if (rp->minor == -1) 838 if (rp->minor == -1)
838 return -EUSERS; 839 return -EUSERS;
@@ -1003,7 +1004,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
1003 1004
1004 if (minor <= 0) 1005 if (minor <= 0)
1005 return -ENODEV; 1006 return -ENODEV;
1006 down(&raw3270_sem); 1007 mutex_lock(&raw3270_mutex);
1007 rc = -ENODEV; 1008 rc = -ENODEV;
1008 list_for_each_entry(rp, &raw3270_devices, list) { 1009 list_for_each_entry(rp, &raw3270_devices, list) {
1009 if (rp->minor != minor) 1010 if (rp->minor != minor)
@@ -1024,7 +1025,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
1024 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 1025 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
1025 break; 1026 break;
1026 } 1027 }
1027 up(&raw3270_sem); 1028 mutex_unlock(&raw3270_mutex);
1028 return rc; 1029 return rc;
1029} 1030}
1030 1031
@@ -1038,7 +1039,7 @@ raw3270_find_view(struct raw3270_fn *fn, int minor)
1038 struct raw3270_view *view, *tmp; 1039 struct raw3270_view *view, *tmp;
1039 unsigned long flags; 1040 unsigned long flags;
1040 1041
1041 down(&raw3270_sem); 1042 mutex_lock(&raw3270_mutex);
1042 view = ERR_PTR(-ENODEV); 1043 view = ERR_PTR(-ENODEV);
1043 list_for_each_entry(rp, &raw3270_devices, list) { 1044 list_for_each_entry(rp, &raw3270_devices, list) {
1044 if (rp->minor != minor) 1045 if (rp->minor != minor)
@@ -1057,7 +1058,7 @@ raw3270_find_view(struct raw3270_fn *fn, int minor)
1057 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 1058 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
1058 break; 1059 break;
1059 } 1060 }
1060 up(&raw3270_sem); 1061 mutex_unlock(&raw3270_mutex);
1061 return view; 1062 return view;
1062} 1063}
1063 1064
@@ -1104,7 +1105,7 @@ raw3270_delete_device(struct raw3270 *rp)
1104 struct ccw_device *cdev; 1105 struct ccw_device *cdev;
1105 1106
1106 /* Remove from device chain. */ 1107 /* Remove from device chain. */
1107 down(&raw3270_sem); 1108 mutex_lock(&raw3270_mutex);
1108 if (rp->clttydev) 1109 if (rp->clttydev)
1109 class_device_destroy(class3270, 1110 class_device_destroy(class3270,
1110 MKDEV(IBM_TTY3270_MAJOR, rp->minor)); 1111 MKDEV(IBM_TTY3270_MAJOR, rp->minor));
@@ -1112,7 +1113,7 @@ raw3270_delete_device(struct raw3270 *rp)
1112 class_device_destroy(class3270, 1113 class_device_destroy(class3270,
1113 MKDEV(IBM_FS3270_MAJOR, rp->minor)); 1114 MKDEV(IBM_FS3270_MAJOR, rp->minor));
1114 list_del_init(&rp->list); 1115 list_del_init(&rp->list);
1115 up(&raw3270_sem); 1116 mutex_unlock(&raw3270_mutex);
1116 1117
1117 /* Disconnect from ccw_device. */ 1118 /* Disconnect from ccw_device. */
1118 cdev = rp->cdev; 1119 cdev = rp->cdev;
@@ -1208,13 +1209,13 @@ int raw3270_register_notifier(void (*notifier)(int, int))
1208 if (!np) 1209 if (!np)
1209 return -ENOMEM; 1210 return -ENOMEM;
1210 np->notifier = notifier; 1211 np->notifier = notifier;
1211 down(&raw3270_sem); 1212 mutex_lock(&raw3270_mutex);
1212 list_add_tail(&np->list, &raw3270_notifier); 1213 list_add_tail(&np->list, &raw3270_notifier);
1213 list_for_each_entry(rp, &raw3270_devices, list) { 1214 list_for_each_entry(rp, &raw3270_devices, list) {
1214 get_device(&rp->cdev->dev); 1215 get_device(&rp->cdev->dev);
1215 notifier(rp->minor, 1); 1216 notifier(rp->minor, 1);
1216 } 1217 }
1217 up(&raw3270_sem); 1218 mutex_unlock(&raw3270_mutex);
1218 return 0; 1219 return 0;
1219} 1220}
1220 1221
@@ -1222,14 +1223,14 @@ void raw3270_unregister_notifier(void (*notifier)(int, int))
1222{ 1223{
1223 struct raw3270_notifier *np; 1224 struct raw3270_notifier *np;
1224 1225
1225 down(&raw3270_sem); 1226 mutex_lock(&raw3270_mutex);
1226 list_for_each_entry(np, &raw3270_notifier, list) 1227 list_for_each_entry(np, &raw3270_notifier, list)
1227 if (np->notifier == notifier) { 1228 if (np->notifier == notifier) {
1228 list_del(&np->list); 1229 list_del(&np->list);
1229 kfree(np); 1230 kfree(np);
1230 break; 1231 break;
1231 } 1232 }
1232 up(&raw3270_sem); 1233 mutex_unlock(&raw3270_mutex);
1233} 1234}
1234 1235
1235/* 1236/*
@@ -1256,10 +1257,10 @@ raw3270_set_online (struct ccw_device *cdev)
1256 goto failure; 1257 goto failure;
1257 raw3270_create_attributes(rp); 1258 raw3270_create_attributes(rp);
1258 set_bit(RAW3270_FLAGS_READY, &rp->flags); 1259 set_bit(RAW3270_FLAGS_READY, &rp->flags);
1259 down(&raw3270_sem); 1260 mutex_lock(&raw3270_mutex);
1260 list_for_each_entry(np, &raw3270_notifier, list) 1261 list_for_each_entry(np, &raw3270_notifier, list)
1261 np->notifier(rp->minor, 1); 1262 np->notifier(rp->minor, 1);
1262 up(&raw3270_sem); 1263 mutex_unlock(&raw3270_mutex);
1263 return 0; 1264 return 0;
1264 1265
1265failure: 1266failure:
@@ -1307,10 +1308,10 @@ raw3270_remove (struct ccw_device *cdev)
1307 } 1308 }
1308 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1309 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1309 1310
1310 down(&raw3270_sem); 1311 mutex_lock(&raw3270_mutex);
1311 list_for_each_entry(np, &raw3270_notifier, list) 1312 list_for_each_entry(np, &raw3270_notifier, list)
1312 np->notifier(rp->minor, 0); 1313 np->notifier(rp->minor, 0);
1313 up(&raw3270_sem); 1314 mutex_unlock(&raw3270_mutex);
1314 1315
1315 /* Reset 3270 device. */ 1316 /* Reset 3270 device. */
1316 raw3270_reset_device(rp); 1317 raw3270_reset_device(rp);
@@ -1370,13 +1371,13 @@ raw3270_init(void)
1370 rc = ccw_driver_register(&raw3270_ccw_driver); 1371 rc = ccw_driver_register(&raw3270_ccw_driver);
1371 if (rc == 0) { 1372 if (rc == 0) {
1372 /* Create attributes for early (= console) device. */ 1373 /* Create attributes for early (= console) device. */
1373 down(&raw3270_sem); 1374 mutex_lock(&raw3270_mutex);
1374 class3270 = class_create(THIS_MODULE, "3270"); 1375 class3270 = class_create(THIS_MODULE, "3270");
1375 list_for_each_entry(rp, &raw3270_devices, list) { 1376 list_for_each_entry(rp, &raw3270_devices, list) {
1376 get_device(&rp->cdev->dev); 1377 get_device(&rp->cdev->dev);
1377 raw3270_create_attributes(rp); 1378 raw3270_create_attributes(rp);
1378 } 1379 }
1379 up(&raw3270_sem); 1380 mutex_unlock(&raw3270_mutex);
1380 } 1381 }
1381 return rc; 1382 return rc;
1382} 1383}
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 95b92f317b6f..395cfc6a344f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -829,18 +829,6 @@ zfcp_unit_dequeue(struct zfcp_unit *unit)
829 device_unregister(&unit->sysfs_device); 829 device_unregister(&unit->sysfs_device);
830} 830}
831 831
832static void *
833zfcp_mempool_alloc(gfp_t gfp_mask, void *size)
834{
835 return kmalloc((size_t) size, gfp_mask);
836}
837
838static void
839zfcp_mempool_free(void *element, void *size)
840{
841 kfree(element);
842}
843
844/* 832/*
845 * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI 833 * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI
846 * commands. 834 * commands.
@@ -853,51 +841,39 @@ static int
853zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) 841zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
854{ 842{
855 adapter->pool.fsf_req_erp = 843 adapter->pool.fsf_req_erp =
856 mempool_create(ZFCP_POOL_FSF_REQ_ERP_NR, 844 mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ERP_NR,
857 zfcp_mempool_alloc, zfcp_mempool_free, (void *) 845 sizeof(struct zfcp_fsf_req_pool_element));
858 sizeof(struct zfcp_fsf_req_pool_element)); 846 if (!adapter->pool.fsf_req_erp)
859
860 if (NULL == adapter->pool.fsf_req_erp)
861 return -ENOMEM; 847 return -ENOMEM;
862 848
863 adapter->pool.fsf_req_scsi = 849 adapter->pool.fsf_req_scsi =
864 mempool_create(ZFCP_POOL_FSF_REQ_SCSI_NR, 850 mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_SCSI_NR,
865 zfcp_mempool_alloc, zfcp_mempool_free, (void *) 851 sizeof(struct zfcp_fsf_req_pool_element));
866 sizeof(struct zfcp_fsf_req_pool_element)); 852 if (!adapter->pool.fsf_req_scsi)
867
868 if (NULL == adapter->pool.fsf_req_scsi)
869 return -ENOMEM; 853 return -ENOMEM;
870 854
871 adapter->pool.fsf_req_abort = 855 adapter->pool.fsf_req_abort =
872 mempool_create(ZFCP_POOL_FSF_REQ_ABORT_NR, 856 mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ABORT_NR,
873 zfcp_mempool_alloc, zfcp_mempool_free, (void *) 857 sizeof(struct zfcp_fsf_req_pool_element));
874 sizeof(struct zfcp_fsf_req_pool_element)); 858 if (!adapter->pool.fsf_req_abort)
875
876 if (NULL == adapter->pool.fsf_req_abort)
877 return -ENOMEM; 859 return -ENOMEM;
878 860
879 adapter->pool.fsf_req_status_read = 861 adapter->pool.fsf_req_status_read =
880 mempool_create(ZFCP_POOL_STATUS_READ_NR, 862 mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR,
881 zfcp_mempool_alloc, zfcp_mempool_free, 863 sizeof(struct zfcp_fsf_req));
882 (void *) sizeof(struct zfcp_fsf_req)); 864 if (!adapter->pool.fsf_req_status_read)
883
884 if (NULL == adapter->pool.fsf_req_status_read)
885 return -ENOMEM; 865 return -ENOMEM;
886 866
887 adapter->pool.data_status_read = 867 adapter->pool.data_status_read =
888 mempool_create(ZFCP_POOL_STATUS_READ_NR, 868 mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR,
889 zfcp_mempool_alloc, zfcp_mempool_free, 869 sizeof(struct fsf_status_read_buffer));
890 (void *) sizeof(struct fsf_status_read_buffer)); 870 if (!adapter->pool.data_status_read)
891
892 if (NULL == adapter->pool.data_status_read)
893 return -ENOMEM; 871 return -ENOMEM;
894 872
895 adapter->pool.data_gid_pn = 873 adapter->pool.data_gid_pn =
896 mempool_create(ZFCP_POOL_DATA_GID_PN_NR, 874 mempool_create_kmalloc_pool(ZFCP_POOL_DATA_GID_PN_NR,
897 zfcp_mempool_alloc, zfcp_mempool_free, (void *) 875 sizeof(struct zfcp_gid_pn_data));
898 sizeof(struct zfcp_gid_pn_data)); 876 if (!adapter->pool.data_gid_pn)
899
900 if (NULL == adapter->pool.data_gid_pn)
901 return -ENOMEM; 877 return -ENOMEM;
902 878
903 return 0; 879 return 0;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 0ab26d01877b..0d2b447c50ed 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1026,7 +1026,7 @@ static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1026 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH; 1026 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1027} /* End twa_free_request_id() */ 1027} /* End twa_free_request_id() */
1028 1028
1029/* This function will get parameter table entires from the firmware */ 1029/* This function will get parameter table entries from the firmware */
1030static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes) 1030static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1031{ 1031{
1032 TW_Command_Full *full_command_packet; 1032 TW_Command_Full *full_command_packet;
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 1c459343292b..bde3d5834ade 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -41,6 +41,8 @@
41#include <linux/stat.h> 41#include <linux/stat.h>
42#include <linux/pci.h> 42#include <linux/pci.h>
43#include <linux/spinlock.h> 43#include <linux/spinlock.h>
44#include <linux/jiffies.h>
45#include <linux/dma-mapping.h>
44#include <scsi/scsicam.h> 46#include <scsi/scsicam.h>
45 47
46#include <asm/dma.h> 48#include <asm/dma.h>
@@ -676,7 +678,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
676 if (pci_enable_device(PCI_Device)) 678 if (pci_enable_device(PCI_Device))
677 continue; 679 continue;
678 680
679 if (pci_set_dma_mask(PCI_Device, (u64) 0xffffffff)) 681 if (pci_set_dma_mask(PCI_Device, DMA_32BIT_MASK ))
680 continue; 682 continue;
681 683
682 Bus = PCI_Device->bus->number; 684 Bus = PCI_Device->bus->number;
@@ -831,7 +833,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
831 if (pci_enable_device(PCI_Device)) 833 if (pci_enable_device(PCI_Device))
832 continue; 834 continue;
833 835
834 if (pci_set_dma_mask(PCI_Device, (u64) 0xffffffff)) 836 if (pci_set_dma_mask(PCI_Device, DMA_32BIT_MASK))
835 continue; 837 continue;
836 838
837 Bus = PCI_Device->bus->number; 839 Bus = PCI_Device->bus->number;
@@ -885,7 +887,7 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
885 if (pci_enable_device(PCI_Device)) 887 if (pci_enable_device(PCI_Device))
886 continue; 888 continue;
887 889
888 if (pci_set_dma_mask(PCI_Device, (u64) 0xffffffff)) 890 if (pci_set_dma_mask(PCI_Device, DMA_32BIT_MASK))
889 continue; 891 continue;
890 892
891 Bus = PCI_Device->bus->number; 893 Bus = PCI_Device->bus->number;
@@ -2896,7 +2898,7 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
2896 */ 2898 */
2897 if (HostAdapter->ActiveCommands[TargetID] == 0) 2899 if (HostAdapter->ActiveCommands[TargetID] == 0)
2898 HostAdapter->LastSequencePoint[TargetID] = jiffies; 2900 HostAdapter->LastSequencePoint[TargetID] = jiffies;
2899 else if (jiffies - HostAdapter->LastSequencePoint[TargetID] > 4 * HZ) { 2901 else if (time_after(jiffies, HostAdapter->LastSequencePoint[TargetID] + 4 * HZ)) {
2900 HostAdapter->LastSequencePoint[TargetID] = jiffies; 2902 HostAdapter->LastSequencePoint[TargetID] = jiffies;
2901 QueueTag = BusLogic_OrderedQueueTag; 2903 QueueTag = BusLogic_OrderedQueueTag;
2902 } 2904 }
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 9f45ae1745da..3dce21c78737 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -89,6 +89,7 @@
89#include <linux/string.h> 89#include <linux/string.h>
90#include <linux/ioport.h> 90#include <linux/ioport.h>
91#include <linux/slab.h> 91#include <linux/slab.h>
92#include <linux/dma-mapping.h>
92 93
93#include <asm/io.h> 94#include <asm/io.h>
94#include <asm/irq.h> 95#include <asm/irq.h>
@@ -1052,7 +1053,7 @@ static int __devinit inia100_probe_one(struct pci_dev *pdev,
1052 1053
1053 if (pci_enable_device(pdev)) 1054 if (pci_enable_device(pdev))
1054 goto out; 1055 goto out;
1055 if (pci_set_dma_mask(pdev, 0xffffffffULL)) { 1056 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1056 printk(KERN_WARNING "Unable to set 32bit DMA " 1057 printk(KERN_WARNING "Unable to set 32bit DMA "
1057 "on inia100 adapter, ignoring.\n"); 1058 "on inia100 adapter, ignoring.\n");
1058 goto out_disable_device; 1059 goto out_disable_device;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a16f8ded8f1d..8df4a0ea3761 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/completion.h> 33#include <linux/completion.h>
34#include <linux/blkdev.h> 34#include <linux/blkdev.h>
35#include <linux/dma-mapping.h>
35#include <asm/semaphore.h> 36#include <asm/semaphore.h>
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37 38
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index c2596335549d..720330778648 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -45,6 +45,7 @@
45#include <linux/pci.h> 45#include <linux/pci.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/spinlock.h> 47#include <linux/spinlock.h>
48#include <linux/dma-mapping.h>
48#include <linux/syscalls.h> 49#include <linux/syscalls.h>
49#include <linux/delay.h> 50#include <linux/delay.h>
50#include <linux/smp_lock.h> 51#include <linux/smp_lock.h>
@@ -806,8 +807,8 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
806 * to driver communication memory to be allocated below 2gig 807 * to driver communication memory to be allocated below 2gig
807 */ 808 */
808 if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) 809 if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
809 if (pci_set_dma_mask(pdev, 0x7FFFFFFFULL) || 810 if (pci_set_dma_mask(pdev, DMA_31BIT_MASK) ||
810 pci_set_consistent_dma_mask(pdev, 0x7FFFFFFFULL)) 811 pci_set_consistent_dma_mask(pdev, DMA_31BIT_MASK))
811 goto out; 812 goto out;
812 813
813 pci_set_master(pdev); 814 pci_set_master(pdev);
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 5227a779c05c..a198d86667e9 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -28,6 +28,7 @@
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/blkdev.h> 30#include <linux/blkdev.h>
31#include <linux/dma-mapping.h>
31#include <asm/system.h> 32#include <asm/system.h>
32#include <asm/io.h> 33#include <asm/io.h>
33 34
@@ -2631,7 +2632,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2631 if (pci_enable_device(pdev)) 2632 if (pci_enable_device(pdev))
2632 return -EIO; 2633 return -EIO;
2633 2634
2634 if (!pci_set_dma_mask(pdev, 0xFFFFFFFFUL)) { 2635 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
2635 printk(KERN_INFO "atp870u: use 32bit DMA mask.\n"); 2636 printk(KERN_INFO "atp870u: use 32bit DMA mask.\n");
2636 } else { 2637 } else {
2637 printk(KERN_ERR "atp870u: DMA mask required but not available.\n"); 2638 printk(KERN_ERR "atp870u: DMA mask required but not available.\n");
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 6e6b293dcb28..b1b704a42efd 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -57,6 +57,7 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
57#include <linux/reboot.h> 57#include <linux/reboot.h>
58#include <linux/spinlock.h> 58#include <linux/spinlock.h>
59#include <linux/smp_lock.h> 59#include <linux/smp_lock.h>
60#include <linux/dma-mapping.h>
60 61
61#include <linux/timer.h> 62#include <linux/timer.h>
62#include <linux/string.h> 63#include <linux/string.h>
@@ -906,8 +907,8 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
906 } 907 }
907 908
908 pci_set_master(pDev); 909 pci_set_master(pDev);
909 if (pci_set_dma_mask(pDev, 0xffffffffffffffffULL) && 910 if (pci_set_dma_mask(pDev, DMA_64BIT_MASK) &&
910 pci_set_dma_mask(pDev, 0xffffffffULL)) 911 pci_set_dma_mask(pDev, DMA_32BIT_MASK))
911 return -EINVAL; 912 return -EINVAL;
912 913
913 base_addr0_phys = pci_resource_start(pDev,0); 914 base_addr0_phys = pci_resource_start(pDev,0);
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index b3f9de8f7595..059eeee4b554 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -490,6 +490,7 @@
490#include <linux/init.h> 490#include <linux/init.h>
491#include <linux/ctype.h> 491#include <linux/ctype.h>
492#include <linux/spinlock.h> 492#include <linux/spinlock.h>
493#include <linux/dma-mapping.h>
493#include <asm/byteorder.h> 494#include <asm/byteorder.h>
494#include <asm/dma.h> 495#include <asm/dma.h>
495#include <asm/io.h> 496#include <asm/io.h>
@@ -1426,7 +1427,7 @@ static int port_detect(unsigned long port_base, unsigned int j,
1426 1427
1427 if (ha->pdev) { 1428 if (ha->pdev) {
1428 pci_set_master(ha->pdev); 1429 pci_set_master(ha->pdev);
1429 if (pci_set_dma_mask(ha->pdev, 0xffffffff)) 1430 if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK))
1430 printk("%s: warning, pci_set_dma_mask failed.\n", 1431 printk("%s: warning, pci_set_dma_mask failed.\n",
1431 ha->board_name); 1432 ha->board_name);
1432 } 1433 }
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 62e3cda859af..d5740bbdef3e 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -388,6 +388,7 @@
388#include <linux/proc_fs.h> 388#include <linux/proc_fs.h>
389#include <linux/time.h> 389#include <linux/time.h>
390#include <linux/timer.h> 390#include <linux/timer.h>
391#include <linux/dma-mapping.h>
391#ifdef GDTH_RTC 392#ifdef GDTH_RTC
392#include <linux/mc146818rtc.h> 393#include <linux/mc146818rtc.h>
393#endif 394#endif
@@ -671,7 +672,7 @@ static struct file_operations gdth_fops = {
671static struct notifier_block gdth_notifier = { 672static struct notifier_block gdth_notifier = {
672 gdth_halt, NULL, 0 673 gdth_halt, NULL, 0
673}; 674};
674 675static int notifier_disabled = 0;
675 676
676static void gdth_delay(int milliseconds) 677static void gdth_delay(int milliseconds)
677{ 678{
@@ -4527,15 +4528,15 @@ static int __init gdth_detect(struct scsi_host_template *shtp)
4527 if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat &GDT_64BIT)|| 4528 if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat &GDT_64BIT)||
4528 /* 64-bit DMA only supported from FW >= x.43 */ 4529 /* 64-bit DMA only supported from FW >= x.43 */
4529 (!ha->dma64_support)) { 4530 (!ha->dma64_support)) {
4530 if (pci_set_dma_mask(pcistr[ctr].pdev, 0xffffffff)) { 4531 if (pci_set_dma_mask(pcistr[ctr].pdev, DMA_32BIT_MASK)) {
4531 printk(KERN_WARNING "GDT-PCI %d: Unable to set 32-bit DMA\n", hanum); 4532 printk(KERN_WARNING "GDT-PCI %d: Unable to set 32-bit DMA\n", hanum);
4532 err = TRUE; 4533 err = TRUE;
4533 } 4534 }
4534 } else { 4535 } else {
4535 shp->max_cmd_len = 16; 4536 shp->max_cmd_len = 16;
4536 if (!pci_set_dma_mask(pcistr[ctr].pdev, 0xffffffffffffffffULL)) { 4537 if (!pci_set_dma_mask(pcistr[ctr].pdev, DMA_64BIT_MASK)) {
4537 printk("GDT-PCI %d: 64-bit DMA enabled\n", hanum); 4538 printk("GDT-PCI %d: 64-bit DMA enabled\n", hanum);
4538 } else if (pci_set_dma_mask(pcistr[ctr].pdev, 0xffffffff)) { 4539 } else if (pci_set_dma_mask(pcistr[ctr].pdev, DMA_32BIT_MASK)) {
4539 printk(KERN_WARNING "GDT-PCI %d: Unable to set 64/32-bit DMA\n", hanum); 4540 printk(KERN_WARNING "GDT-PCI %d: Unable to set 64/32-bit DMA\n", hanum);
4540 err = TRUE; 4541 err = TRUE;
4541 } 4542 }
@@ -4595,13 +4596,13 @@ static int __init gdth_detect(struct scsi_host_template *shtp)
4595 add_timer(&gdth_timer); 4596 add_timer(&gdth_timer);
4596#endif 4597#endif
4597 major = register_chrdev(0,"gdth",&gdth_fops); 4598 major = register_chrdev(0,"gdth",&gdth_fops);
4599 notifier_disabled = 0;
4598 register_reboot_notifier(&gdth_notifier); 4600 register_reboot_notifier(&gdth_notifier);
4599 } 4601 }
4600 gdth_polling = FALSE; 4602 gdth_polling = FALSE;
4601 return gdth_ctr_vcount; 4603 return gdth_ctr_vcount;
4602} 4604}
4603 4605
4604
4605static int gdth_release(struct Scsi_Host *shp) 4606static int gdth_release(struct Scsi_Host *shp)
4606{ 4607{
4607 int hanum; 4608 int hanum;
@@ -5632,10 +5633,14 @@ static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
5632 char cmnd[MAX_COMMAND_SIZE]; 5633 char cmnd[MAX_COMMAND_SIZE];
5633#endif 5634#endif
5634 5635
5636 if (notifier_disabled)
5637 return NOTIFY_OK;
5638
5635 TRACE2(("gdth_halt() event %d\n",(int)event)); 5639 TRACE2(("gdth_halt() event %d\n",(int)event));
5636 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) 5640 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
5637 return NOTIFY_DONE; 5641 return NOTIFY_DONE;
5638 5642
5643 notifier_disabled = 1;
5639 printk("GDT-HA: Flushing all host drives .. "); 5644 printk("GDT-HA: Flushing all host drives .. ");
5640 for (hanum = 0; hanum < gdth_ctr_count; ++hanum) { 5645 for (hanum = 0; hanum < gdth_ctr_count; ++hanum) {
5641 gdth_flush(hanum); 5646 gdth_flush(hanum);
@@ -5679,7 +5684,6 @@ static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
5679#ifdef GDTH_STATISTICS 5684#ifdef GDTH_STATISTICS
5680 del_timer(&gdth_timer); 5685 del_timer(&gdth_timer);
5681#endif 5686#endif
5682 unregister_reboot_notifier(&gdth_notifier);
5683 return NOTIFY_OK; 5687 return NOTIFY_OK;
5684} 5688}
5685 5689
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index ea6f3c0e05d9..0cc7f65b584f 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -127,6 +127,7 @@
127#include <linux/sched.h> 127#include <linux/sched.h>
128#include <linux/slab.h> 128#include <linux/slab.h>
129#include <linux/jiffies.h> 129#include <linux/jiffies.h>
130#include <linux/dma-mapping.h>
130#include <asm/io.h> 131#include <asm/io.h>
131 132
132#include <scsi/scsi.h> 133#include <scsi/scsi.h>
@@ -2780,7 +2781,7 @@ static int tul_NewReturnNumberOfAdapters(void)
2780 if (((dRegValue & 0xFF00) >> 8) == 0xFF) 2781 if (((dRegValue & 0xFF00) >> 8) == 0xFF)
2781 dRegValue = 0; 2782 dRegValue = 0;
2782 wBIOS = (wBIOS << 8) + ((UWORD) ((dRegValue & 0xFF00) >> 8)); 2783 wBIOS = (wBIOS << 8) + ((UWORD) ((dRegValue & 0xFF00) >> 8));
2783 if (pci_set_dma_mask(pDev, 0xffffffff)) { 2784 if (pci_set_dma_mask(pDev, DMA_32BIT_MASK)) {
2784 printk(KERN_WARNING 2785 printk(KERN_WARNING
2785 "i91u: Could not set 32 bit DMA mask\n"); 2786 "i91u: Could not set 32 bit DMA mask\n");
2786 continue; 2787 continue;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 481708d527ae..a4c0b04cfdbd 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -179,6 +179,7 @@
179 179
180#include <linux/blkdev.h> 180#include <linux/blkdev.h>
181#include <linux/types.h> 181#include <linux/types.h>
182#include <linux/dma-mapping.h>
182 183
183#include <scsi/sg.h> 184#include <scsi/sg.h>
184 185
@@ -7284,10 +7285,10 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
7284 * are guaranteed to be < 4G. 7285 * are guaranteed to be < 4G.
7285 */ 7286 */
7286 if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) && 7287 if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
7287 !pci_set_dma_mask(ha->pcidev, 0xffffffffffffffffULL)) { 7288 !pci_set_dma_mask(ha->pcidev, DMA_64BIT_MASK)) {
7288 (ha)->flags |= IPS_HA_ENH_SG; 7289 (ha)->flags |= IPS_HA_ENH_SG;
7289 } else { 7290 } else {
7290 if (pci_set_dma_mask(ha->pcidev, 0xffffffffULL) != 0) { 7291 if (pci_set_dma_mask(ha->pcidev, DMA_32BIT_MASK) != 0) {
7291 printk(KERN_WARNING "Unable to set DMA Mask\n"); 7292 printk(KERN_WARNING "Unable to set DMA Mask\n");
7292 return ips_abort_init(ha, index); 7293 return ips_abort_init(ha, index);
7293 } 7294 }
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7b82ff090d42..2068b66822b7 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -3200,8 +3200,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
3200 * Data-Out PDU's within R2T-sequence can be quite big; 3200 * Data-Out PDU's within R2T-sequence can be quite big;
3201 * using mempool 3201 * using mempool
3202 */ 3202 */
3203 ctask->datapool = mempool_create(ISCSI_DTASK_DEFAULT_MAX, 3203 ctask->datapool = mempool_create_slab_pool(ISCSI_DTASK_DEFAULT_MAX,
3204 mempool_alloc_slab, mempool_free_slab, taskcache); 3204 taskcache);
3205 if (ctask->datapool == NULL) { 3205 if (ctask->datapool == NULL) {
3206 kfifo_free(ctask->r2tqueue); 3206 kfifo_free(ctask->r2tqueue);
3207 iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts); 3207 iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts);
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 352df47bcaca..07017658ac56 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -38,18 +38,6 @@
38#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ 38#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
39#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ 39#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
40 40
41static void *
42lpfc_pool_kmalloc(gfp_t gfp_flags, void *data)
43{
44 return kmalloc((unsigned long)data, gfp_flags);
45}
46
47static void
48lpfc_pool_kfree(void *obj, void *data)
49{
50 kfree(obj);
51}
52
53int 41int
54lpfc_mem_alloc(struct lpfc_hba * phba) 42lpfc_mem_alloc(struct lpfc_hba * phba)
55{ 43{
@@ -79,15 +67,13 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
79 pool->current_count++; 67 pool->current_count++;
80 } 68 }
81 69
82 phba->mbox_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE, 70 phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
83 lpfc_pool_kmalloc, lpfc_pool_kfree, 71 sizeof(LPFC_MBOXQ_t));
84 (void *)(unsigned long)sizeof(LPFC_MBOXQ_t));
85 if (!phba->mbox_mem_pool) 72 if (!phba->mbox_mem_pool)
86 goto fail_free_mbuf_pool; 73 goto fail_free_mbuf_pool;
87 74
88 phba->nlp_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE, 75 phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
89 lpfc_pool_kmalloc, lpfc_pool_kfree, 76 sizeof(struct lpfc_nodelist));
90 (void *)(unsigned long)sizeof(struct lpfc_nodelist));
91 if (!phba->nlp_mem_pool) 77 if (!phba->nlp_mem_pool)
92 goto fail_free_mbox_pool; 78 goto fail_free_mbox_pool;
93 79
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 7144674bc8e6..80b68a2481b3 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -45,6 +45,7 @@
45#include <linux/interrupt.h> 45#include <linux/interrupt.h>
46#include <linux/pci.h> 46#include <linux/pci.h>
47#include <linux/init.h> 47#include <linux/init.h>
48#include <linux/dma-mapping.h>
48#include <scsi/scsicam.h> 49#include <scsi/scsicam.h>
49 50
50#include "scsi.h" 51#include "scsi.h"
@@ -2094,7 +2095,7 @@ make_local_pdev(adapter_t *adapter, struct pci_dev **pdev)
2094 2095
2095 memcpy(*pdev, adapter->dev, sizeof(struct pci_dev)); 2096 memcpy(*pdev, adapter->dev, sizeof(struct pci_dev));
2096 2097
2097 if( pci_set_dma_mask(*pdev, 0xffffffff) != 0 ) { 2098 if( pci_set_dma_mask(*pdev, DMA_32BIT_MASK) != 0 ) {
2098 kfree(*pdev); 2099 kfree(*pdev);
2099 return -1; 2100 return -1;
2100 } 2101 }
@@ -4859,10 +4860,10 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4859 4860
4860 /* Set the Mode of addressing to 64 bit if we can */ 4861 /* Set the Mode of addressing to 64 bit if we can */
4861 if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) { 4862 if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
4862 pci_set_dma_mask(pdev, 0xffffffffffffffffULL); 4863 pci_set_dma_mask(pdev, DMA_64BIT_MASK);
4863 adapter->has_64bit_addr = 1; 4864 adapter->has_64bit_addr = 1;
4864 } else { 4865 } else {
4865 pci_set_dma_mask(pdev, 0xffffffff); 4866 pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4866 adapter->has_64bit_addr = 0; 4867 adapter->has_64bit_addr = 0;
4867 } 4868 }
4868 4869
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index a279ebb61447..30ee0ef4b459 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -38,6 +38,7 @@
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/ctype.h> 40#include <linux/ctype.h>
41#include <linux/dma-mapping.h>
41 42
42#include <asm/dma.h> 43#include <asm/dma.h>
43#include <asm/system.h> 44#include <asm/system.h>
@@ -2776,7 +2777,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
2776 /* 2777 /*
2777 * setup DMA 2778 * setup DMA
2778 */ 2779 */
2779 if (pci_set_dma_mask(PCIDEV, 0xffffffffUL) != 0) { 2780 if (pci_set_dma_mask(PCIDEV, DMA_32BIT_MASK) != 0) {
2780 nsp32_msg (KERN_ERR, "failed to set PCI DMA mask"); 2781 nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
2781 goto scsi_unregister; 2782 goto scsi_unregister;
2782 } 2783 }
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 66ea47a9c53c..e3bd4bc339f4 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -49,6 +49,7 @@ static const char * osst_version = "0.99.4";
49#include <linux/blkdev.h> 49#include <linux/blkdev.h>
50#include <linux/moduleparam.h> 50#include <linux/moduleparam.h>
51#include <linux/delay.h> 51#include <linux/delay.h>
52#include <linux/jiffies.h>
52#include <asm/uaccess.h> 53#include <asm/uaccess.h>
53#include <asm/dma.h> 54#include <asm/dma.h>
54#include <asm/system.h> 55#include <asm/system.h>
@@ -856,7 +857,7 @@ static int osst_wait_frame(struct osst_tape * STp, struct osst_request ** aSRpnt
856 ) && result >= 0) 857 ) && result >= 0)
857 { 858 {
858#if DEBUG 859#if DEBUG
859 if (debugging || jiffies - startwait >= 2*HZ/OSST_POLL_PER_SEC) 860 if (debugging || time_after_eq(jiffies, startwait + 2*HZ/OSST_POLL_PER_SEC))
860 printk (OSST_DEB_MSG 861 printk (OSST_DEB_MSG
861 "%s:D: Succ wait f fr %i (>%i): %i-%i %i (%i): %3li.%li s\n", 862 "%s:D: Succ wait f fr %i (>%i): %i-%i %i (%i): %3li.%li s\n",
862 name, curr, curr+minlast, STp->first_frame_position, 863 name, curr, curr+minlast, STp->first_frame_position,
@@ -867,7 +868,7 @@ static int osst_wait_frame(struct osst_tape * STp, struct osst_request ** aSRpnt
867 return 0; 868 return 0;
868 } 869 }
869#if DEBUG 870#if DEBUG
870 if (jiffies - startwait >= 2*HZ/OSST_POLL_PER_SEC && notyetprinted) 871 if (time_after_eq(jiffies, startwait + 2*HZ/OSST_POLL_PER_SEC) && notyetprinted)
871 { 872 {
872 printk (OSST_DEB_MSG "%s:D: Wait for frame %i (>%i): %i-%i %i (%i)\n", 873 printk (OSST_DEB_MSG "%s:D: Wait for frame %i (>%i): %i-%i %i (%i)\n",
873 name, curr, curr+minlast, STp->first_frame_position, 874 name, curr, curr+minlast, STp->first_frame_position,
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 05347eed9dd5..fee843fab1c7 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -18,6 +18,7 @@
18#include <linux/parport.h> 18#include <linux/parport.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/jiffies.h>
21#include <asm/io.h> 22#include <asm/io.h>
22 23
23#include <scsi/scsi.h> 24#include <scsi/scsi.h>
@@ -726,7 +727,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
726 retv--; 727 retv--;
727 728
728 if (retv) { 729 if (retv) {
729 if ((jiffies - dev->jstart) > (1 * HZ)) { 730 if (time_after(jiffies, dev->jstart + (1 * HZ))) {
730 printk 731 printk
731 ("ppa: Parallel port cable is unplugged!!\n"); 732 ("ppa: Parallel port cable is unplugged!!\n");
732 ppa_fail(dev, DID_BUS_BUSY); 733 ppa_fail(dev, DID_BUS_BUSY);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index e0230249fa0f..5a48e55f9418 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -350,6 +350,7 @@
350#include <linux/pci_ids.h> 350#include <linux/pci_ids.h>
351#include <linux/interrupt.h> 351#include <linux/interrupt.h>
352#include <linux/init.h> 352#include <linux/init.h>
353#include <linux/dma-mapping.h>
353 354
354#include <asm/io.h> 355#include <asm/io.h>
355#include <asm/irq.h> 356#include <asm/irq.h>
@@ -4321,7 +4322,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4321 4322
4322#ifdef QLA_64BIT_PTR 4323#ifdef QLA_64BIT_PTR
4323 if (pci_set_dma_mask(ha->pdev, (dma_addr_t) ~ 0ULL)) { 4324 if (pci_set_dma_mask(ha->pdev, (dma_addr_t) ~ 0ULL)) {
4324 if (pci_set_dma_mask(ha->pdev, 0xffffffff)) { 4325 if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) {
4325 printk(KERN_WARNING "scsi(%li): Unable to set a " 4326 printk(KERN_WARNING "scsi(%li): Unable to set a "
4326 "suitable DMA mask - aborting\n", ha->host_no); 4327 "suitable DMA mask - aborting\n", ha->host_no);
4327 error = -ENODEV; 4328 error = -ENODEV;
@@ -4331,7 +4332,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4331 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n", 4332 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4332 ha->host_no); 4333 ha->host_no);
4333#else 4334#else
4334 if (pci_set_dma_mask(ha->pdev, 0xffffffff)) { 4335 if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) {
4335 printk(KERN_WARNING "scsi(%li): Unable to set a " 4336 printk(KERN_WARNING "scsi(%li): Unable to set a "
4336 "suitable DMA mask - aborting\n", ha->host_no); 4337 "suitable DMA mask - aborting\n", ha->host_no);
4337 error = -ENODEV; 4338 error = -ENODEV;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 029bbf461bb2..017729c59a49 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2154,8 +2154,7 @@ qla2x00_allocate_sp_pool(scsi_qla_host_t *ha)
2154 int rval; 2154 int rval;
2155 2155
2156 rval = QLA_SUCCESS; 2156 rval = QLA_SUCCESS;
2157 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, 2157 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2158 mempool_free_slab, srb_cachep);
2159 if (ha->srb_mempool == NULL) { 2158 if (ha->srb_mempool == NULL) {
2160 qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n"); 2159 qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n");
2161 rval = QLA_FUNCTION_FAILED; 2160 rval = QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qlogicfc.c b/drivers/scsi/qlogicfc.c
index 94ef3f08d378..52b224a5d6fd 100644
--- a/drivers/scsi/qlogicfc.c
+++ b/drivers/scsi/qlogicfc.c
@@ -61,6 +61,8 @@
61#include <linux/unistd.h> 61#include <linux/unistd.h>
62#include <linux/spinlock.h> 62#include <linux/spinlock.h>
63#include <linux/interrupt.h> 63#include <linux/interrupt.h>
64#include <linux/dma-mapping.h>
65#include <linux/jiffies.h>
64#include <asm/io.h> 66#include <asm/io.h>
65#include <asm/irq.h> 67#include <asm/irq.h>
66#include "scsi.h" 68#include "scsi.h"
@@ -737,8 +739,8 @@ static int isp2x00_detect(struct scsi_host_template * tmpt)
737 continue; 739 continue;
738 740
739 /* Try to configure DMA attributes. */ 741 /* Try to configure DMA attributes. */
740 if (pci_set_dma_mask(pdev, 0xffffffffffffffffULL) && 742 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
741 pci_set_dma_mask(pdev, 0xffffffffULL)) 743 pci_set_dma_mask(pdev, DMA_32BIT_MASK))
742 continue; 744 continue;
743 745
744 host = scsi_register(tmpt, sizeof(struct isp2x00_hostdata)); 746 host = scsi_register(tmpt, sizeof(struct isp2x00_hostdata));
@@ -1325,7 +1327,7 @@ static int isp2x00_queuecommand(Scsi_Cmnd * Cmnd, void (*done) (Scsi_Cmnd *))
1325 cmd->control_flags = cpu_to_le16(CFLAG_READ); 1327 cmd->control_flags = cpu_to_le16(CFLAG_READ);
1326 1328
1327 if (Cmnd->device->tagged_supported) { 1329 if (Cmnd->device->tagged_supported) {
1328 if ((jiffies - hostdata->tag_ages[Cmnd->device->id]) > (2 * ISP_TIMEOUT)) { 1330 if (time_after(jiffies, hostdata->tag_ages[Cmnd->device->id] + (2 * ISP_TIMEOUT))) {
1329 cmd->control_flags |= cpu_to_le16(CFLAG_ORDERED_TAG); 1331 cmd->control_flags |= cpu_to_le16(CFLAG_ORDERED_TAG);
1330 hostdata->tag_ages[Cmnd->device->id] = jiffies; 1332 hostdata->tag_ages[Cmnd->device->id] = jiffies;
1331 } else 1333 } else
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 1fd5fc6d0fe3..c7e78dcf09df 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -24,6 +24,7 @@
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/jiffies.h>
27 28
28#include <asm/byteorder.h> 29#include <asm/byteorder.h>
29 30
@@ -1017,7 +1018,7 @@ static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd,
1017 if (Cmnd->device->tagged_supported) { 1018 if (Cmnd->device->tagged_supported) {
1018 if (qpti->cmd_count[Cmnd->device->id] == 0) 1019 if (qpti->cmd_count[Cmnd->device->id] == 0)
1019 qpti->tag_ages[Cmnd->device->id] = jiffies; 1020 qpti->tag_ages[Cmnd->device->id] = jiffies;
1020 if ((jiffies - qpti->tag_ages[Cmnd->device->id]) > (5*HZ)) { 1021 if (time_after(jiffies, qpti->tag_ages[Cmnd->device->id] + (5*HZ))) {
1021 cmd->control_flags = CFLAG_ORDERED_TAG; 1022 cmd->control_flags = CFLAG_ORDERED_TAG;
1022 qpti->tag_ages[Cmnd->device->id] = jiffies; 1023 qpti->tag_ages[Cmnd->device->id] = jiffies;
1023 } else 1024 } else
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ede158d08d9d..8f010a314a3d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1787,9 +1787,8 @@ int __init scsi_init_queue(void)
1787 sgp->name); 1787 sgp->name);
1788 } 1788 }
1789 1789
1790 sgp->pool = mempool_create(SG_MEMPOOL_SIZE, 1790 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1791 mempool_alloc_slab, mempool_free_slab, 1791 sgp->slab);
1792 sgp->slab);
1793 if (!sgp->pool) { 1792 if (!sgp->pool) {
1794 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1793 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1795 sgp->name); 1794 sgp->name);
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 5996d3cd0ed8..674b15c78f68 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1528,7 +1528,7 @@ static int serial8250_startup(struct uart_port *port)
1528 1528
1529 /* 1529 /*
1530 * Clear the FIFO buffers and disable them. 1530 * Clear the FIFO buffers and disable them.
1531 * (they will be reeanbled in set_termios()) 1531 * (they will be reenabled in set_termios())
1532 */ 1532 */
1533 serial8250_clear_fifos(up); 1533 serial8250_clear_fifos(up);
1534 1534
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c
index b848b7d94412..3bdee64d1a99 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/serial/serial_txx9.c
@@ -483,7 +483,7 @@ static int serial_txx9_startup(struct uart_port *port)
483 483
484 /* 484 /*
485 * Clear the FIFO buffers and disable them. 485 * Clear the FIFO buffers and disable them.
486 * (they will be reeanbled in set_termios()) 486 * (they will be reenabled in set_termios())
487 */ 487 */
488 sio_set(up, TXX9_SIFCR, 488 sio_set(up, TXX9_SIFCR,
489 TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE); 489 TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE);
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 9fe2283d91e5..1c4396c2962d 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -641,7 +641,7 @@ static int sunsu_startup(struct uart_port *port)
641 641
642 /* 642 /*
643 * Clear the FIFO buffers and disable them. 643 * Clear the FIFO buffers and disable them.
644 * (they will be reeanbled in set_termios()) 644 * (they will be reenabled in set_termios())
645 */ 645 */
646 if (uart_config[up->port.type].flags & UART_CLEAR_FIFO) { 646 if (uart_config[up->port.type].flags & UART_CLEAR_FIFO) {
647 serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO); 647 serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
diff --git a/drivers/telephony/phonedev.c b/drivers/telephony/phonedev.c
index 3c987f49f6b4..e166fffea86b 100644
--- a/drivers/telephony/phonedev.c
+++ b/drivers/telephony/phonedev.c
@@ -29,6 +29,7 @@
29#include <linux/kmod.h> 29#include <linux/kmod.h>
30#include <linux/sem.h> 30#include <linux/sem.h>
31#include <linux/devfs_fs_kernel.h> 31#include <linux/devfs_fs_kernel.h>
32#include <linux/mutex.h>
32 33
33#define PHONE_NUM_DEVICES 256 34#define PHONE_NUM_DEVICES 256
34 35
@@ -37,7 +38,7 @@
37 */ 38 */
38 39
39static struct phone_device *phone_device[PHONE_NUM_DEVICES]; 40static struct phone_device *phone_device[PHONE_NUM_DEVICES];
40static DECLARE_MUTEX(phone_lock); 41static DEFINE_MUTEX(phone_lock);
41 42
42/* 43/*
43 * Open a phone device. 44 * Open a phone device.
@@ -48,19 +49,19 @@ static int phone_open(struct inode *inode, struct file *file)
48 unsigned int minor = iminor(inode); 49 unsigned int minor = iminor(inode);
49 int err = 0; 50 int err = 0;
50 struct phone_device *p; 51 struct phone_device *p;
51 struct file_operations *old_fops, *new_fops = NULL; 52 const struct file_operations *old_fops, *new_fops = NULL;
52 53
53 if (minor >= PHONE_NUM_DEVICES) 54 if (minor >= PHONE_NUM_DEVICES)
54 return -ENODEV; 55 return -ENODEV;
55 56
56 down(&phone_lock); 57 mutex_lock(&phone_lock);
57 p = phone_device[minor]; 58 p = phone_device[minor];
58 if (p) 59 if (p)
59 new_fops = fops_get(p->f_op); 60 new_fops = fops_get(p->f_op);
60 if (!new_fops) { 61 if (!new_fops) {
61 up(&phone_lock); 62 mutex_unlock(&phone_lock);
62 request_module("char-major-%d-%d", PHONE_MAJOR, minor); 63 request_module("char-major-%d-%d", PHONE_MAJOR, minor);
63 down(&phone_lock); 64 mutex_lock(&phone_lock);
64 p = phone_device[minor]; 65 p = phone_device[minor];
65 if (p == NULL || (new_fops = fops_get(p->f_op)) == NULL) 66 if (p == NULL || (new_fops = fops_get(p->f_op)) == NULL)
66 { 67 {
@@ -78,7 +79,7 @@ static int phone_open(struct inode *inode, struct file *file)
78 } 79 }
79 fops_put(old_fops); 80 fops_put(old_fops);
80end: 81end:
81 up(&phone_lock); 82 mutex_unlock(&phone_lock);
82 return err; 83 return err;
83} 84}
84 85
@@ -100,18 +101,18 @@ int phone_register_device(struct phone_device *p, int unit)
100 end = unit + 1; /* enter the loop at least one time */ 101 end = unit + 1; /* enter the loop at least one time */
101 } 102 }
102 103
103 down(&phone_lock); 104 mutex_lock(&phone_lock);
104 for (i = base; i < end; i++) { 105 for (i = base; i < end; i++) {
105 if (phone_device[i] == NULL) { 106 if (phone_device[i] == NULL) {
106 phone_device[i] = p; 107 phone_device[i] = p;
107 p->minor = i; 108 p->minor = i;
108 devfs_mk_cdev(MKDEV(PHONE_MAJOR,i), 109 devfs_mk_cdev(MKDEV(PHONE_MAJOR,i),
109 S_IFCHR|S_IRUSR|S_IWUSR, "phone/%d", i); 110 S_IFCHR|S_IRUSR|S_IWUSR, "phone/%d", i);
110 up(&phone_lock); 111 mutex_unlock(&phone_lock);
111 return 0; 112 return 0;
112 } 113 }
113 } 114 }
114 up(&phone_lock); 115 mutex_unlock(&phone_lock);
115 return -ENFILE; 116 return -ENFILE;
116} 117}
117 118
@@ -121,12 +122,12 @@ int phone_register_device(struct phone_device *p, int unit)
121 122
122void phone_unregister_device(struct phone_device *pfd) 123void phone_unregister_device(struct phone_device *pfd)
123{ 124{
124 down(&phone_lock); 125 mutex_lock(&phone_lock);
125 if (phone_device[pfd->minor] != pfd) 126 if (phone_device[pfd->minor] != pfd)
126 panic("phone: bad unregister"); 127 panic("phone: bad unregister");
127 devfs_remove("phone/%d", pfd->minor); 128 devfs_remove("phone/%d", pfd->minor);
128 phone_device[pfd->minor] = NULL; 129 phone_device[pfd->minor] = NULL;
129 up(&phone_lock); 130 mutex_unlock(&phone_lock);
130} 131}
131 132
132 133
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 37b13368c814..b263a54a13c0 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -24,15 +24,15 @@
24#include "usb.h" 24#include "usb.h"
25 25
26#define MAX_USB_MINORS 256 26#define MAX_USB_MINORS 256
27static struct file_operations *usb_minors[MAX_USB_MINORS]; 27static const struct file_operations *usb_minors[MAX_USB_MINORS];
28static DEFINE_SPINLOCK(minor_lock); 28static DEFINE_SPINLOCK(minor_lock);
29 29
30static int usb_open(struct inode * inode, struct file * file) 30static int usb_open(struct inode * inode, struct file * file)
31{ 31{
32 int minor = iminor(inode); 32 int minor = iminor(inode);
33 struct file_operations *c; 33 const struct file_operations *c;
34 int err = -ENODEV; 34 int err = -ENODEV;
35 struct file_operations *old_fops, *new_fops = NULL; 35 const struct file_operations *old_fops, *new_fops = NULL;
36 36
37 spin_lock (&minor_lock); 37 spin_lock (&minor_lock);
38 c = usb_minors[minor]; 38 c = usb_minors[minor];
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 4b55285de9a0..fe0ed54fa0ae 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -16,57 +16,7 @@
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include "usb.h" 17#include "usb.h"
18 18
19 19static BLOCKING_NOTIFIER_HEAD(usb_notifier_list);
20static struct notifier_block *usb_notifier_list;
21static DEFINE_MUTEX(usb_notifier_lock);
22
23static void usb_notifier_chain_register(struct notifier_block **list,
24 struct notifier_block *n)
25{
26 mutex_lock(&usb_notifier_lock);
27 while (*list) {
28 if (n->priority > (*list)->priority)
29 break;
30 list = &((*list)->next);
31 }
32 n->next = *list;
33 *list = n;
34 mutex_unlock(&usb_notifier_lock);
35}
36
37static void usb_notifier_chain_unregister(struct notifier_block **nl,
38 struct notifier_block *n)
39{
40 mutex_lock(&usb_notifier_lock);
41 while ((*nl)!=NULL) {
42 if ((*nl)==n) {
43 *nl = n->next;
44 goto exit;
45 }
46 nl=&((*nl)->next);
47 }
48exit:
49 mutex_unlock(&usb_notifier_lock);
50}
51
52static int usb_notifier_call_chain(struct notifier_block **n,
53 unsigned long val, void *v)
54{
55 int ret=NOTIFY_DONE;
56 struct notifier_block *nb = *n;
57
58 mutex_lock(&usb_notifier_lock);
59 while (nb) {
60 ret = nb->notifier_call(nb,val,v);
61 if (ret&NOTIFY_STOP_MASK) {
62 goto exit;
63 }
64 nb = nb->next;
65 }
66exit:
67 mutex_unlock(&usb_notifier_lock);
68 return ret;
69}
70 20
71/** 21/**
72 * usb_register_notify - register a notifier callback whenever a usb change happens 22 * usb_register_notify - register a notifier callback whenever a usb change happens
@@ -76,7 +26,7 @@ exit:
76 */ 26 */
77void usb_register_notify(struct notifier_block *nb) 27void usb_register_notify(struct notifier_block *nb)
78{ 28{
79 usb_notifier_chain_register(&usb_notifier_list, nb); 29 blocking_notifier_chain_register(&usb_notifier_list, nb);
80} 30}
81EXPORT_SYMBOL_GPL(usb_register_notify); 31EXPORT_SYMBOL_GPL(usb_register_notify);
82 32
@@ -89,27 +39,28 @@ EXPORT_SYMBOL_GPL(usb_register_notify);
89 */ 39 */
90void usb_unregister_notify(struct notifier_block *nb) 40void usb_unregister_notify(struct notifier_block *nb)
91{ 41{
92 usb_notifier_chain_unregister(&usb_notifier_list, nb); 42 blocking_notifier_chain_unregister(&usb_notifier_list, nb);
93} 43}
94EXPORT_SYMBOL_GPL(usb_unregister_notify); 44EXPORT_SYMBOL_GPL(usb_unregister_notify);
95 45
96 46
97void usb_notify_add_device(struct usb_device *udev) 47void usb_notify_add_device(struct usb_device *udev)
98{ 48{
99 usb_notifier_call_chain(&usb_notifier_list, USB_DEVICE_ADD, udev); 49 blocking_notifier_call_chain(&usb_notifier_list, USB_DEVICE_ADD, udev);
100} 50}
101 51
102void usb_notify_remove_device(struct usb_device *udev) 52void usb_notify_remove_device(struct usb_device *udev)
103{ 53{
104 usb_notifier_call_chain(&usb_notifier_list, USB_DEVICE_REMOVE, udev); 54 blocking_notifier_call_chain(&usb_notifier_list,
55 USB_DEVICE_REMOVE, udev);
105} 56}
106 57
107void usb_notify_add_bus(struct usb_bus *ubus) 58void usb_notify_add_bus(struct usb_bus *ubus)
108{ 59{
109 usb_notifier_call_chain(&usb_notifier_list, USB_BUS_ADD, ubus); 60 blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_ADD, ubus);
110} 61}
111 62
112void usb_notify_remove_bus(struct usb_bus *ubus) 63void usb_notify_remove_bus(struct usb_bus *ubus)
113{ 64{
114 usb_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus); 65 blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus);
115} 66}
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index b44cfda76b61..3f618ce6998d 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1581,7 +1581,7 @@ restart:
1581 1581
1582static struct inode * 1582static struct inode *
1583gadgetfs_create_file (struct super_block *sb, char const *name, 1583gadgetfs_create_file (struct super_block *sb, char const *name,
1584 void *data, struct file_operations *fops, 1584 void *data, const struct file_operations *fops,
1585 struct dentry **dentry_p); 1585 struct dentry **dentry_p);
1586 1586
1587static int activate_ep_files (struct dev_data *dev) 1587static int activate_ep_files (struct dev_data *dev)
@@ -1955,7 +1955,7 @@ module_param (default_perm, uint, 0644);
1955 1955
1956static struct inode * 1956static struct inode *
1957gadgetfs_make_inode (struct super_block *sb, 1957gadgetfs_make_inode (struct super_block *sb,
1958 void *data, struct file_operations *fops, 1958 void *data, const struct file_operations *fops,
1959 int mode) 1959 int mode)
1960{ 1960{
1961 struct inode *inode = new_inode (sb); 1961 struct inode *inode = new_inode (sb);
@@ -1979,7 +1979,7 @@ gadgetfs_make_inode (struct super_block *sb,
1979 */ 1979 */
1980static struct inode * 1980static struct inode *
1981gadgetfs_create_file (struct super_block *sb, char const *name, 1981gadgetfs_create_file (struct super_block *sb, char const *name,
1982 void *data, struct file_operations *fops, 1982 void *data, const struct file_operations *fops,
1983 struct dentry **dentry_p) 1983 struct dentry **dentry_p)
1984{ 1984{
1985 struct dentry *dentry; 1985 struct dentry *dentry;
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 372527a83593..682bf2215660 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -158,7 +158,7 @@ static int ohci_s3c2410_hub_control (
158 "s3c2410_hub_control(%p,0x%04x,0x%04x,0x%04x,%p,%04x)\n", 158 "s3c2410_hub_control(%p,0x%04x,0x%04x,0x%04x,%p,%04x)\n",
159 hcd, typeReq, wValue, wIndex, buf, wLength); 159 hcd, typeReq, wValue, wIndex, buf, wLength);
160 160
161 /* if we are only an humble host without any special capabilites 161 /* if we are only an humble host without any special capabilities
162 * process the request straight away and exit */ 162 * process the request straight away and exit */
163 163
164 if (info == NULL) { 164 if (info == NULL) {
diff --git a/drivers/usb/net/zaurus.c b/drivers/usb/net/zaurus.c
index 9c5ab251370c..f7ac9d6b9856 100644
--- a/drivers/usb/net/zaurus.c
+++ b/drivers/usb/net/zaurus.c
@@ -217,7 +217,7 @@ static int blan_mdlm_bind(struct usbnet *dev, struct usb_interface *intf)
217 * with devices that use it and those that don't. 217 * with devices that use it and those that don't.
218 */ 218 */
219 if ((detail->bDetailData[1] & ~0x02) != 0x01) { 219 if ((detail->bDetailData[1] & ~0x02) != 0x01) {
220 /* bmDataCapabilites == 0 would be fine too, 220 /* bmDataCapabilities == 0 would be fine too,
221 * but framing is minidriver-coupled for now. 221 * but framing is minidriver-coupled for now.
222 */ 222 */
223bad_detail: 223bad_detail:
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index fdebd60a3250..22e9d696fdd2 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -70,6 +70,22 @@ config FB_MACMODES
70 depends on FB 70 depends on FB
71 default n 71 default n
72 72
73config FB_FIRMWARE_EDID
74 bool "Enable firmware EDID"
75 depends on FB
76 default y
77 ---help---
78 This enables access to the EDID transferred from the firmware.
79 On the i386, this is from the Video BIOS. Enable this if DDC/I2C
80 transfers do not work for your driver and if you are using
81 nvidiafb, i810fb or savagefb.
82
83 In general, choosing Y for this option is safe. If you
84 experience extremely long delays while booting before you get
85 something on your display, try setting this to N. Matrox cards in
86 combination with certain motherboards and monitors are known to
87 suffer from this problem.
88
73config FB_MODE_HELPERS 89config FB_MODE_HELPERS
74 bool "Enable Video Mode Handling Helpers" 90 bool "Enable Video Mode Handling Helpers"
75 depends on FB 91 depends on FB
@@ -1202,6 +1218,17 @@ config FB_AU1100
1202 bool "Au1100 LCD Driver" 1218 bool "Au1100 LCD Driver"
1203 depends on (FB = y) && EXPERIMENTAL && PCI && MIPS && MIPS_PB1100=y 1219 depends on (FB = y) && EXPERIMENTAL && PCI && MIPS && MIPS_PB1100=y
1204 1220
1221config FB_AU1200
1222 bool "Au1200 LCD Driver"
1223 depends on FB && MIPS && SOC_AU1200
1224 select FB_CFB_FILLRECT
1225 select FB_CFB_COPYAREA
1226 select FB_CFB_IMAGEBLIT
1227 help
1228 This is the framebuffer driver for the AMD Au1200 SOC. It can drive
1229 various panels and CRTs by passing in kernel cmd line option
1230 au1200fb:panel=<name>.
1231
1205source "drivers/video/geode/Kconfig" 1232source "drivers/video/geode/Kconfig"
1206 1233
1207config FB_FFB 1234config FB_FFB
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index aa434e725c0d..cb90218515ac 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_FB_ASILIANT) += asiliantfb.o
86obj-$(CONFIG_FB_PXA) += pxafb.o 86obj-$(CONFIG_FB_PXA) += pxafb.o
87obj-$(CONFIG_FB_W100) += w100fb.o 87obj-$(CONFIG_FB_W100) += w100fb.o
88obj-$(CONFIG_FB_AU1100) += au1100fb.o 88obj-$(CONFIG_FB_AU1100) += au1100fb.o
89obj-$(CONFIG_FB_AU1200) += au1200fb.o
89obj-$(CONFIG_FB_PMAG_AA) += pmag-aa-fb.o 90obj-$(CONFIG_FB_PMAG_AA) += pmag-aa-fb.o
90obj-$(CONFIG_FB_PMAG_BA) += pmag-ba-fb.o 91obj-$(CONFIG_FB_PMAG_BA) += pmag-ba-fb.o
91obj-$(CONFIG_FB_PMAGB_B) += pmagb-b-fb.o 92obj-$(CONFIG_FB_PMAGB_B) += pmagb-b-fb.o
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index 76448d6ae896..98baecccb3fd 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -1308,7 +1308,7 @@ static int __init acornfb_probe(struct platform_device *dev)
1308 /* 1308 /*
1309 * Try to select a suitable default mode 1309 * Try to select a suitable default mode
1310 */ 1310 */
1311 for (i = 0; i < sizeof(modedb) / sizeof(*modedb); i++) { 1311 for (i = 0; i < ARRAY_SIZE(modedb); i++) {
1312 unsigned long hs; 1312 unsigned long hs;
1313 1313
1314 hs = modedb[i].refresh * 1314 hs = modedb[i].refresh *
@@ -1380,7 +1380,7 @@ static int __init acornfb_probe(struct platform_device *dev)
1380 */ 1380 */
1381 free_unused_pages(PAGE_OFFSET + size, PAGE_OFFSET + MAX_SIZE); 1381 free_unused_pages(PAGE_OFFSET + size, PAGE_OFFSET + MAX_SIZE);
1382#endif 1382#endif
1383 1383
1384 fb_info.fix.smem_len = size; 1384 fb_info.fix.smem_len = size;
1385 current_par.palette_size = VIDC_PALETTE_SIZE; 1385 current_par.palette_size = VIDC_PALETTE_SIZE;
1386 1386
@@ -1391,7 +1391,7 @@ static int __init acornfb_probe(struct platform_device *dev)
1391 */ 1391 */
1392 do { 1392 do {
1393 rc = fb_find_mode(&fb_info.var, &fb_info, NULL, modedb, 1393 rc = fb_find_mode(&fb_info.var, &fb_info, NULL, modedb,
1394 sizeof(modedb) / sizeof(*modedb), 1394 ARRAY_SIZE(modedb),
1395 &acornfb_default_mode, DEFAULT_BPP); 1395 &acornfb_default_mode, DEFAULT_BPP);
1396 /* 1396 /*
1397 * If we found an exact match, all ok. 1397 * If we found an exact match, all ok.
@@ -1408,7 +1408,7 @@ static int __init acornfb_probe(struct platform_device *dev)
1408 break; 1408 break;
1409 1409
1410 rc = fb_find_mode(&fb_info.var, &fb_info, NULL, modedb, 1410 rc = fb_find_mode(&fb_info.var, &fb_info, NULL, modedb,
1411 sizeof(modedb) / sizeof(*modedb), 1411 ARRAY_SIZE(modedb),
1412 &acornfb_default_mode, DEFAULT_BPP); 1412 &acornfb_default_mode, DEFAULT_BPP);
1413 if (rc) 1413 if (rc)
1414 break; 1414 break;
diff --git a/drivers/video/asiliantfb.c b/drivers/video/asiliantfb.c
index c924d81f7978..29f9f0dfe3b4 100644
--- a/drivers/video/asiliantfb.c
+++ b/drivers/video/asiliantfb.c
@@ -353,8 +353,6 @@ struct chips_init_reg {
353 unsigned char data; 353 unsigned char data;
354}; 354};
355 355
356#define N_ELTS(x) (sizeof(x) / sizeof(x[0]))
357
358static struct chips_init_reg chips_init_sr[] = 356static struct chips_init_reg chips_init_sr[] =
359{ 357{
360 {0x00, 0x03}, /* Reset register */ 358 {0x00, 0x03}, /* Reset register */
@@ -460,22 +458,22 @@ static void __devinit chips_hw_init(struct fb_info *p)
460{ 458{
461 int i; 459 int i;
462 460
463 for (i = 0; i < N_ELTS(chips_init_xr); ++i) 461 for (i = 0; i < ARRAY_SIZE(chips_init_xr); ++i)
464 write_xr(chips_init_xr[i].addr, chips_init_xr[i].data); 462 write_xr(chips_init_xr[i].addr, chips_init_xr[i].data);
465 write_xr(0x81, 0x12); 463 write_xr(0x81, 0x12);
466 write_xr(0x82, 0x08); 464 write_xr(0x82, 0x08);
467 write_xr(0x20, 0x00); 465 write_xr(0x20, 0x00);
468 for (i = 0; i < N_ELTS(chips_init_sr); ++i) 466 for (i = 0; i < ARRAY_SIZE(chips_init_sr); ++i)
469 write_sr(chips_init_sr[i].addr, chips_init_sr[i].data); 467 write_sr(chips_init_sr[i].addr, chips_init_sr[i].data);
470 for (i = 0; i < N_ELTS(chips_init_gr); ++i) 468 for (i = 0; i < ARRAY_SIZE(chips_init_gr); ++i)
471 write_gr(chips_init_gr[i].addr, chips_init_gr[i].data); 469 write_gr(chips_init_gr[i].addr, chips_init_gr[i].data);
472 for (i = 0; i < N_ELTS(chips_init_ar); ++i) 470 for (i = 0; i < ARRAY_SIZE(chips_init_ar); ++i)
473 write_ar(chips_init_ar[i].addr, chips_init_ar[i].data); 471 write_ar(chips_init_ar[i].addr, chips_init_ar[i].data);
474 /* Enable video output in attribute index register */ 472 /* Enable video output in attribute index register */
475 writeb(0x20, mmio_base + 0x780); 473 writeb(0x20, mmio_base + 0x780);
476 for (i = 0; i < N_ELTS(chips_init_cr); ++i) 474 for (i = 0; i < ARRAY_SIZE(chips_init_cr); ++i)
477 write_cr(chips_init_cr[i].addr, chips_init_cr[i].data); 475 write_cr(chips_init_cr[i].addr, chips_init_cr[i].data);
478 for (i = 0; i < N_ELTS(chips_init_fr); ++i) 476 for (i = 0; i < ARRAY_SIZE(chips_init_fr); ++i)
479 write_fr(chips_init_fr[i].addr, chips_init_fr[i].data); 477 write_fr(chips_init_fr[i].addr, chips_init_fr[i].data);
480} 478}
481 479
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 620c9a934e0e..821c6da8e42c 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1725,9 +1725,9 @@ static int __init aty128_init(struct pci_dev *pdev, const struct pci_device_id *
1725 strcpy(video_card, "Rage128 XX "); 1725 strcpy(video_card, "Rage128 XX ");
1726 video_card[8] = ent->device >> 8; 1726 video_card[8] = ent->device >> 8;
1727 video_card[9] = ent->device & 0xFF; 1727 video_card[9] = ent->device & 0xFF;
1728 1728
1729 /* range check to make sure */ 1729 /* range check to make sure */
1730 if (ent->driver_data < (sizeof(r128_family)/sizeof(char *))) 1730 if (ent->driver_data < ARRAY_SIZE(r128_family))
1731 strncat(video_card, r128_family[ent->driver_data], sizeof(video_card)); 1731 strncat(video_card, r128_family[ent->driver_data], sizeof(video_card));
1732 1732
1733 printk(KERN_INFO "aty128fb: %s [chip rev 0x%x] ", video_card, chip_rev); 1733 printk(KERN_INFO "aty128fb: %s [chip rev 0x%x] ", video_card, chip_rev);
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 485be386a8ff..e799fcca365a 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -434,7 +434,7 @@ static int __devinit correct_chipset(struct atyfb_par *par)
434 const char *name; 434 const char *name;
435 int i; 435 int i;
436 436
437 for (i = sizeof(aty_chips) / sizeof(*aty_chips) - 1; i >= 0; i--) 437 for (i = ARRAY_SIZE(aty_chips) - 1; i >= 0; i--)
438 if (par->pci_id == aty_chips[i].pci_id) 438 if (par->pci_id == aty_chips[i].pci_id)
439 break; 439 break;
440 440
@@ -2168,10 +2168,10 @@ static void __init aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
2168 2168
2169 if (IS_XL(par->pci_id) || IS_MOBILITY(par->pci_id)) { 2169 if (IS_XL(par->pci_id) || IS_MOBILITY(par->pci_id)) {
2170 refresh_tbl = ragexl_tbl; 2170 refresh_tbl = ragexl_tbl;
2171 size = sizeof(ragexl_tbl)/sizeof(int); 2171 size = ARRAY_SIZE(ragexl_tbl);
2172 } else { 2172 } else {
2173 refresh_tbl = ragepro_tbl; 2173 refresh_tbl = ragepro_tbl;
2174 size = sizeof(ragepro_tbl)/sizeof(int); 2174 size = ARRAY_SIZE(ragepro_tbl);
2175 } 2175 }
2176 2176
2177 for (i=0; i < size; i++) { 2177 for (i=0; i < size; i++) {
@@ -2298,6 +2298,10 @@ static int __init aty_init(struct fb_info *info, const char *name)
2298 case CLK_ATI18818_1: 2298 case CLK_ATI18818_1:
2299 par->pll_ops = &aty_pll_ati18818_1; 2299 par->pll_ops = &aty_pll_ati18818_1;
2300 break; 2300 break;
2301 case CLK_IBMRGB514:
2302 par->pll_ops = &aty_pll_ibm514;
2303 break;
2304#if 0 /* dead code */
2301 case CLK_STG1703: 2305 case CLK_STG1703:
2302 par->pll_ops = &aty_pll_stg1703; 2306 par->pll_ops = &aty_pll_stg1703;
2303 break; 2307 break;
@@ -2307,9 +2311,7 @@ static int __init aty_init(struct fb_info *info, const char *name)
2307 case CLK_ATT20C408: 2311 case CLK_ATT20C408:
2308 par->pll_ops = &aty_pll_att20c408; 2312 par->pll_ops = &aty_pll_att20c408;
2309 break; 2313 break;
2310 case CLK_IBMRGB514: 2314#endif
2311 par->pll_ops = &aty_pll_ibm514;
2312 break;
2313 default: 2315 default:
2314 PRINTKI("aty_init: CLK type not implemented yet!"); 2316 PRINTKI("aty_init: CLK type not implemented yet!");
2315 par->pll_ops = &aty_pll_unsupported; 2317 par->pll_ops = &aty_pll_unsupported;
@@ -3397,7 +3399,7 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
3397 struct atyfb_par *par; 3399 struct atyfb_par *par;
3398 int i, rc = -ENOMEM; 3400 int i, rc = -ENOMEM;
3399 3401
3400 for (i = sizeof(aty_chips) / sizeof(*aty_chips) - 1; i >= 0; i--) 3402 for (i = ARRAY_SIZE(aty_chips); i >= 0; i--)
3401 if (pdev->device == aty_chips[i].pci_id) 3403 if (pdev->device == aty_chips[i].pci_id)
3402 break; 3404 break;
3403 3405
diff --git a/drivers/video/aty/mach64_gx.c b/drivers/video/aty/mach64_gx.c
index 01fdff79483b..2045639cb671 100644
--- a/drivers/video/aty/mach64_gx.c
+++ b/drivers/video/aty/mach64_gx.c
@@ -149,8 +149,7 @@ static int aty_var_to_pll_514(const struct fb_info *info, u32 vclk_per,
149 }; 149 };
150 int i; 150 int i;
151 151
152 for (i = 0; i < sizeof(RGB514_clocks) / sizeof(*RGB514_clocks); 152 for (i = 0; i < ARRAY_SIZE(RGB514_clocks); i++)
153 i++)
154 if (vclk_per <= RGB514_clocks[i].limit) { 153 if (vclk_per <= RGB514_clocks[i].limit) {
155 pll->ibm514.m = RGB514_clocks[i].m; 154 pll->ibm514.m = RGB514_clocks[i].m;
156 pll->ibm514.n = RGB514_clocks[i].n; 155 pll->ibm514.n = RGB514_clocks[i].n;
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index c9f0c5a07e6e..9a6b5b39b88e 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -1067,7 +1067,7 @@ static int radeon_setcolreg (unsigned regno, unsigned red, unsigned green,
1067 1067
1068 1068
1069 if (regno > 255) 1069 if (regno > 255)
1070 return 1; 1070 return -EINVAL;
1071 1071
1072 red >>= 8; 1072 red >>= 8;
1073 green >>= 8; 1073 green >>= 8;
@@ -1086,9 +1086,9 @@ static int radeon_setcolreg (unsigned regno, unsigned red, unsigned green,
1086 pindex = regno * 8; 1086 pindex = regno * 8;
1087 1087
1088 if (rinfo->depth == 16 && regno > 63) 1088 if (rinfo->depth == 16 && regno > 63)
1089 return 1; 1089 return -EINVAL;
1090 if (rinfo->depth == 15 && regno > 31) 1090 if (rinfo->depth == 15 && regno > 31)
1091 return 1; 1091 return -EINVAL;
1092 1092
1093 /* For 565, the green component is mixed one order 1093 /* For 565, the green component is mixed one order
1094 * below 1094 * below
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
new file mode 100644
index 000000000000..b367de30b98c
--- /dev/null
+++ b/drivers/video/au1200fb.c
@@ -0,0 +1,3844 @@
1/*
2 * BRIEF MODULE DESCRIPTION
3 * Au1200 LCD Driver.
4 *
5 * Copyright 2004-2005 AMD
6 * Author: AMD
7 *
8 * Based on:
9 * linux/drivers/video/skeletonfb.c -- Skeleton for a frame buffer device
10 * Created 28 Dec 1997 by Geert Uytterhoeven
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
24 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * You should have received a copy of the GNU General Public License along
29 * with this program; if not, write to the Free Software Foundation, Inc.,
30 * 675 Mass Ave, Cambridge, MA 02139, USA.
31 */
32
33#include <linux/module.h>
34#include <linux/platform_device.h>
35#include <linux/kernel.h>
36#include <linux/errno.h>
37#include <linux/string.h>
38#include <linux/mm.h>
39#include <linux/fb.h>
40#include <linux/init.h>
41#include <linux/interrupt.h>
42#include <linux/ctype.h>
43#include <linux/dma-mapping.h>
44
45#include <asm/mach-au1x00/au1000.h>
46#include "au1200fb.h"
47
48#ifdef CONFIG_PM
49#include <asm/mach-au1x00/au1xxx_pm.h>
50#endif
51
52#ifndef CONFIG_FB_AU1200_DEVS
53#define CONFIG_FB_AU1200_DEVS 4
54#endif
55
56#define DRIVER_NAME "au1200fb"
57#define DRIVER_DESC "LCD controller driver for AU1200 processors"
58
59#define DEBUG 1
60
61#define print_err(f, arg...) printk(KERN_ERR DRIVER_NAME ": " f "\n", ## arg)
62#define print_warn(f, arg...) printk(KERN_WARNING DRIVER_NAME ": " f "\n", ## arg)
63#define print_info(f, arg...) printk(KERN_INFO DRIVER_NAME ": " f "\n", ## arg)
64
65#if DEBUG
66#define print_dbg(f, arg...) printk(KERN_DEBUG __FILE__ ": " f "\n", ## arg)
67#else
68#define print_dbg(f, arg...) do {} while (0)
69#endif
70
71
72#define AU1200_LCD_FB_IOCTL 0x46FF
73
74#define AU1200_LCD_SET_SCREEN 1
75#define AU1200_LCD_GET_SCREEN 2
76#define AU1200_LCD_SET_WINDOW 3
77#define AU1200_LCD_GET_WINDOW 4
78#define AU1200_LCD_SET_PANEL 5
79#define AU1200_LCD_GET_PANEL 6
80
81#define SCREEN_SIZE (1<< 1)
82#define SCREEN_BACKCOLOR (1<< 2)
83#define SCREEN_BRIGHTNESS (1<< 3)
84#define SCREEN_COLORKEY (1<< 4)
85#define SCREEN_MASK (1<< 5)
86
87struct au1200_lcd_global_regs_t {
88 unsigned int flags;
89 unsigned int xsize;
90 unsigned int ysize;
91 unsigned int backcolor;
92 unsigned int brightness;
93 unsigned int colorkey;
94 unsigned int mask;
95 unsigned int panel_choice;
96 char panel_desc[80];
97
98};
99
100#define WIN_POSITION (1<< 0)
101#define WIN_ALPHA_COLOR (1<< 1)
102#define WIN_ALPHA_MODE (1<< 2)
103#define WIN_PRIORITY (1<< 3)
104#define WIN_CHANNEL (1<< 4)
105#define WIN_BUFFER_FORMAT (1<< 5)
106#define WIN_COLOR_ORDER (1<< 6)
107#define WIN_PIXEL_ORDER (1<< 7)
108#define WIN_SIZE (1<< 8)
109#define WIN_COLORKEY_MODE (1<< 9)
110#define WIN_DOUBLE_BUFFER_MODE (1<< 10)
111#define WIN_RAM_ARRAY_MODE (1<< 11)
112#define WIN_BUFFER_SCALE (1<< 12)
113#define WIN_ENABLE (1<< 13)
114
115struct au1200_lcd_window_regs_t {
116 unsigned int flags;
117 unsigned int xpos;
118 unsigned int ypos;
119 unsigned int alpha_color;
120 unsigned int alpha_mode;
121 unsigned int priority;
122 unsigned int channel;
123 unsigned int buffer_format;
124 unsigned int color_order;
125 unsigned int pixel_order;
126 unsigned int xsize;
127 unsigned int ysize;
128 unsigned int colorkey_mode;
129 unsigned int double_buffer_mode;
130 unsigned int ram_array_mode;
131 unsigned int xscale;
132 unsigned int yscale;
133 unsigned int enable;
134};
135
136
137struct au1200_lcd_iodata_t {
138 unsigned int subcmd;
139 struct au1200_lcd_global_regs_t global;
140 struct au1200_lcd_window_regs_t window;
141};
142
143#if defined(__BIG_ENDIAN)
144#define LCD_CONTROL_DEFAULT_PO LCD_CONTROL_PO_11
145#else
146#define LCD_CONTROL_DEFAULT_PO LCD_CONTROL_PO_00
147#endif
148#define LCD_CONTROL_DEFAULT_SBPPF LCD_CONTROL_SBPPF_565
149
150/* Private, per-framebuffer management information (independent of the panel itself) */
151struct au1200fb_device {
152 struct fb_info fb_info; /* FB driver info record */
153
154 int plane;
155 unsigned char* fb_mem; /* FrameBuffer memory map */
156 unsigned int fb_len;
157 dma_addr_t fb_phys;
158};
159
160static struct au1200fb_device _au1200fb_devices[CONFIG_FB_AU1200_DEVS];
161/********************************************************************/
162
163/* LCD controller restrictions */
164#define AU1200_LCD_MAX_XRES 1280
165#define AU1200_LCD_MAX_YRES 1024
166#define AU1200_LCD_MAX_BPP 32
167#define AU1200_LCD_MAX_CLK 96000000 /* fixme: this needs to go away ? */
168#define AU1200_LCD_NBR_PALETTE_ENTRIES 256
169
170/* Default number of visible screen buffer to allocate */
171#define AU1200FB_NBR_VIDEO_BUFFERS 1
172
173/********************************************************************/
174
175static struct au1200_lcd *lcd = (struct au1200_lcd *) AU1200_LCD_ADDR;
176static int window_index = 2; /* default is zero */
177static int panel_index = 2; /* default is zero */
178static struct window_settings *win;
179static struct panel_settings *panel;
180static int noblanking = 1;
181static int nohwcursor = 0;
182
183struct window_settings {
184 unsigned char name[64];
185 uint32 mode_backcolor;
186 uint32 mode_colorkey;
187 uint32 mode_colorkeymsk;
188 struct {
189 int xres;
190 int yres;
191 int xpos;
192 int ypos;
193 uint32 mode_winctrl1; /* winctrl1[FRM,CCO,PO,PIPE] */
194 uint32 mode_winenable;
195 } w[4];
196};
197
198#if defined(__BIG_ENDIAN)
199#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_00
200#else
201#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_01
202#endif
203
204extern int board_au1200fb_panel_init (void);
205extern int board_au1200fb_panel_shutdown (void);
206
207#ifdef CONFIG_PM
208int au1200fb_pm_callback(au1xxx_power_dev_t *dev,
209 au1xxx_request_t request, void *data);
210au1xxx_power_dev_t *LCD_pm_dev;
211#endif
212
213/*
214 * Default window configurations
215 */
216static struct window_settings windows[] = {
217 { /* Index 0 */
218 "0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx",
219 /* mode_backcolor */ 0x006600ff,
220 /* mode_colorkey,msk*/ 0, 0,
221 {
222 {
223 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
224 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
225 LCD_WINCTRL1_PO_16BPP,
226 /* mode_winenable*/ LCD_WINENABLE_WEN0,
227 },
228 {
229 /* xres, yres, xpos, ypos */ 100, 100, 100, 100,
230 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
231 LCD_WINCTRL1_PO_16BPP |
232 LCD_WINCTRL1_PIPE,
233 /* mode_winenable*/ LCD_WINENABLE_WEN1,
234 },
235 {
236 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
237 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
238 LCD_WINCTRL1_PO_16BPP,
239 /* mode_winenable*/ 0,
240 },
241 {
242 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
243 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
244 LCD_WINCTRL1_PO_16BPP |
245 LCD_WINCTRL1_PIPE,
246 /* mode_winenable*/ 0,
247 },
248 },
249 },
250
251 { /* Index 1 */
252 "0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx",
253 /* mode_backcolor */ 0x006600ff,
254 /* mode_colorkey,msk*/ 0, 0,
255 {
256 {
257 /* xres, yres, xpos, ypos */ 320, 240, 5, 5,
258 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_24BPP |
259 LCD_WINCTRL1_PO_00,
260 /* mode_winenable*/ LCD_WINENABLE_WEN0,
261 },
262 {
263 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
264 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565
265 | LCD_WINCTRL1_PO_16BPP,
266 /* mode_winenable*/ 0,
267 },
268 {
269 /* xres, yres, xpos, ypos */ 100, 100, 0, 0,
270 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
271 LCD_WINCTRL1_PO_16BPP |
272 LCD_WINCTRL1_PIPE,
273 /* mode_winenable*/ 0/*LCD_WINENABLE_WEN2*/,
274 },
275 {
276 /* xres, yres, xpos, ypos */ 200, 25, 0, 0,
277 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
278 LCD_WINCTRL1_PO_16BPP |
279 LCD_WINCTRL1_PIPE,
280 /* mode_winenable*/ 0,
281 },
282 },
283 },
284 { /* Index 2 */
285 "0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx",
286 /* mode_backcolor */ 0x006600ff,
287 /* mode_colorkey,msk*/ 0, 0,
288 {
289 {
290 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
291 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
292 LCD_WINCTRL1_PO_16BPP,
293 /* mode_winenable*/ LCD_WINENABLE_WEN0,
294 },
295 {
296 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
297 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
298 LCD_WINCTRL1_PO_16BPP,
299 /* mode_winenable*/ 0,
300 },
301 {
302 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
303 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_32BPP |
304 LCD_WINCTRL1_PO_00|LCD_WINCTRL1_PIPE,
305 /* mode_winenable*/ 0/*LCD_WINENABLE_WEN2*/,
306 },
307 {
308 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
309 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
310 LCD_WINCTRL1_PO_16BPP |
311 LCD_WINCTRL1_PIPE,
312 /* mode_winenable*/ 0,
313 },
314 },
315 },
316 /* Need VGA 640 @ 24bpp, @ 32bpp */
317 /* Need VGA 800 @ 24bpp, @ 32bpp */
318 /* Need VGA 1024 @ 24bpp, @ 32bpp */
319};
320
321/*
322 * Controller configurations for various panels.
323 */
324
325struct panel_settings
326{
327 const char name[25]; /* Full name <vendor>_<model> */
328
329 struct fb_monspecs monspecs; /* FB monitor specs */
330
331 /* panel timings */
332 uint32 mode_screen;
333 uint32 mode_horztiming;
334 uint32 mode_verttiming;
335 uint32 mode_clkcontrol;
336 uint32 mode_pwmdiv;
337 uint32 mode_pwmhi;
338 uint32 mode_outmask;
339 uint32 mode_fifoctrl;
340 uint32 mode_toyclksrc;
341 uint32 mode_backlight;
342 uint32 mode_auxpll;
343 int (*device_init)(void);
344 int (*device_shutdown)(void);
345#define Xres min_xres
346#define Yres min_yres
347 u32 min_xres; /* Minimum horizontal resolution */
348 u32 max_xres; /* Maximum horizontal resolution */
349 u32 min_yres; /* Minimum vertical resolution */
350 u32 max_yres; /* Maximum vertical resolution */
351};
352
353/********************************************************************/
354/* fixme: Maybe a modedb for the CRT ? otherwise panels should be as-is */
355
356/* List of panels known to work with the AU1200 LCD controller.
357 * To add a new panel, enter the same specifications as the
358 * Generic_TFT one, and MAKE SURE that it doesn't conflicts
359 * with the controller restrictions. Restrictions are:
360 *
361 * STN color panels: max_bpp <= 12
362 * STN mono panels: max_bpp <= 4
363 * TFT panels: max_bpp <= 16
364 * max_xres <= 800
365 * max_yres <= 600
366 */
367static struct panel_settings known_lcd_panels[] =
368{
369 [0] = { /* QVGA 320x240 H:33.3kHz V:110Hz */
370 .name = "QVGA_320x240",
371 .monspecs = {
372 .modedb = NULL,
373 .modedb_len = 0,
374 .hfmin = 30000,
375 .hfmax = 70000,
376 .vfmin = 60,
377 .vfmax = 60,
378 .dclkmin = 6000000,
379 .dclkmax = 28000000,
380 .input = FB_DISP_RGB,
381 },
382 .mode_screen = LCD_SCREEN_SX_N(320) |
383 LCD_SCREEN_SY_N(240),
384 .mode_horztiming = 0x00c4623b,
385 .mode_verttiming = 0x00502814,
386 .mode_clkcontrol = 0x00020002, /* /4=24Mhz */
387 .mode_pwmdiv = 0x00000000,
388 .mode_pwmhi = 0x00000000,
389 .mode_outmask = 0x00FFFFFF,
390 .mode_fifoctrl = 0x2f2f2f2f,
391 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
392 .mode_backlight = 0x00000000,
393 .mode_auxpll = 8, /* 96MHz AUXPLL */
394 .device_init = NULL,
395 .device_shutdown = NULL,
396 320, 320,
397 240, 240,
398 },
399
400 [1] = { /* VGA 640x480 H:30.3kHz V:58Hz */
401 .name = "VGA_640x480",
402 .monspecs = {
403 .modedb = NULL,
404 .modedb_len = 0,
405 .hfmin = 30000,
406 .hfmax = 70000,
407 .vfmin = 60,
408 .vfmax = 60,
409 .dclkmin = 6000000,
410 .dclkmax = 28000000,
411 .input = FB_DISP_RGB,
412 },
413 .mode_screen = 0x13f9df80,
414 .mode_horztiming = 0x003c5859,
415 .mode_verttiming = 0x00741201,
416 .mode_clkcontrol = 0x00020001, /* /4=24Mhz */
417 .mode_pwmdiv = 0x00000000,
418 .mode_pwmhi = 0x00000000,
419 .mode_outmask = 0x00FFFFFF,
420 .mode_fifoctrl = 0x2f2f2f2f,
421 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
422 .mode_backlight = 0x00000000,
423 .mode_auxpll = 8, /* 96MHz AUXPLL */
424 .device_init = NULL,
425 .device_shutdown = NULL,
426 640, 480,
427 640, 480,
428 },
429
430 [2] = { /* SVGA 800x600 H:46.1kHz V:69Hz */
431 .name = "SVGA_800x600",
432 .monspecs = {
433 .modedb = NULL,
434 .modedb_len = 0,
435 .hfmin = 30000,
436 .hfmax = 70000,
437 .vfmin = 60,
438 .vfmax = 60,
439 .dclkmin = 6000000,
440 .dclkmax = 28000000,
441 .input = FB_DISP_RGB,
442 },
443 .mode_screen = 0x18fa5780,
444 .mode_horztiming = 0x00dc7e77,
445 .mode_verttiming = 0x00584805,
446 .mode_clkcontrol = 0x00020000, /* /2=48Mhz */
447 .mode_pwmdiv = 0x00000000,
448 .mode_pwmhi = 0x00000000,
449 .mode_outmask = 0x00FFFFFF,
450 .mode_fifoctrl = 0x2f2f2f2f,
451 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
452 .mode_backlight = 0x00000000,
453 .mode_auxpll = 8, /* 96MHz AUXPLL */
454 .device_init = NULL,
455 .device_shutdown = NULL,
456 800, 800,
457 600, 600,
458 },
459
460 [3] = { /* XVGA 1024x768 H:56.2kHz V:70Hz */
461 .name = "XVGA_1024x768",
462 .monspecs = {
463 .modedb = NULL,
464 .modedb_len = 0,
465 .hfmin = 30000,
466 .hfmax = 70000,
467 .vfmin = 60,
468 .vfmax = 60,
469 .dclkmin = 6000000,
470 .dclkmax = 28000000,
471 .input = FB_DISP_RGB,
472 },
473 .mode_screen = 0x1ffaff80,
474 .mode_horztiming = 0x007d0e57,
475 .mode_verttiming = 0x00740a01,
476 .mode_clkcontrol = 0x000A0000, /* /1 */
477 .mode_pwmdiv = 0x00000000,
478 .mode_pwmhi = 0x00000000,
479 .mode_outmask = 0x00FFFFFF,
480 .mode_fifoctrl = 0x2f2f2f2f,
481 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
482 .mode_backlight = 0x00000000,
483 .mode_auxpll = 6, /* 72MHz AUXPLL */
484 .device_init = NULL,
485 .device_shutdown = NULL,
486 1024, 1024,
487 768, 768,
488 },
489
490 [4] = { /* XVGA XVGA 1280x1024 H:68.5kHz V:65Hz */
491 .name = "XVGA_1280x1024",
492 .monspecs = {
493 .modedb = NULL,
494 .modedb_len = 0,
495 .hfmin = 30000,
496 .hfmax = 70000,
497 .vfmin = 60,
498 .vfmax = 60,
499 .dclkmin = 6000000,
500 .dclkmax = 28000000,
501 .input = FB_DISP_RGB,
502 },
503 .mode_screen = 0x27fbff80,
504 .mode_horztiming = 0x00cdb2c7,
505 .mode_verttiming = 0x00600002,
506 .mode_clkcontrol = 0x000A0000, /* /1 */
507 .mode_pwmdiv = 0x00000000,
508 .mode_pwmhi = 0x00000000,
509 .mode_outmask = 0x00FFFFFF,
510 .mode_fifoctrl = 0x2f2f2f2f,
511 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
512 .mode_backlight = 0x00000000,
513 .mode_auxpll = 10, /* 120MHz AUXPLL */
514 .device_init = NULL,
515 .device_shutdown = NULL,
516 1280, 1280,
517 1024, 1024,
518 },
519
520 [5] = { /* Samsung 1024x768 TFT */
521 .name = "Samsung_1024x768_TFT",
522 .monspecs = {
523 .modedb = NULL,
524 .modedb_len = 0,
525 .hfmin = 30000,
526 .hfmax = 70000,
527 .vfmin = 60,
528 .vfmax = 60,
529 .dclkmin = 6000000,
530 .dclkmax = 28000000,
531 .input = FB_DISP_RGB,
532 },
533 .mode_screen = 0x1ffaff80,
534 .mode_horztiming = 0x018cc677,
535 .mode_verttiming = 0x00241217,
536 .mode_clkcontrol = 0x00000000, /* SCB 0x1 /4=24Mhz */
537 .mode_pwmdiv = 0x8000063f, /* SCB 0x0 */
538 .mode_pwmhi = 0x03400000, /* SCB 0x0 */
539 .mode_outmask = 0x00FFFFFF,
540 .mode_fifoctrl = 0x2f2f2f2f,
541 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
542 .mode_backlight = 0x00000000,
543 .mode_auxpll = 8, /* 96MHz AUXPLL */
544 .device_init = board_au1200fb_panel_init,
545 .device_shutdown = board_au1200fb_panel_shutdown,
546 1024, 1024,
547 768, 768,
548 },
549
550 [6] = { /* Toshiba 640x480 TFT */
551 .name = "Toshiba_640x480_TFT",
552 .monspecs = {
553 .modedb = NULL,
554 .modedb_len = 0,
555 .hfmin = 30000,
556 .hfmax = 70000,
557 .vfmin = 60,
558 .vfmax = 60,
559 .dclkmin = 6000000,
560 .dclkmax = 28000000,
561 .input = FB_DISP_RGB,
562 },
563 .mode_screen = LCD_SCREEN_SX_N(640) |
564 LCD_SCREEN_SY_N(480),
565 .mode_horztiming = LCD_HORZTIMING_HPW_N(96) |
566 LCD_HORZTIMING_HND1_N(13) | LCD_HORZTIMING_HND2_N(51),
567 .mode_verttiming = LCD_VERTTIMING_VPW_N(2) |
568 LCD_VERTTIMING_VND1_N(11) | LCD_VERTTIMING_VND2_N(32),
569 .mode_clkcontrol = 0x00000000, /* /4=24Mhz */
570 .mode_pwmdiv = 0x8000063f,
571 .mode_pwmhi = 0x03400000,
572 .mode_outmask = 0x00fcfcfc,
573 .mode_fifoctrl = 0x2f2f2f2f,
574 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
575 .mode_backlight = 0x00000000,
576 .mode_auxpll = 8, /* 96MHz AUXPLL */
577 .device_init = board_au1200fb_panel_init,
578 .device_shutdown = board_au1200fb_panel_shutdown,
579 640, 480,
580 640, 480,
581 },
582
583 [7] = { /* Sharp 320x240 TFT */
584 .name = "Sharp_320x240_TFT",
585 .monspecs = {
586 .modedb = NULL,
587 .modedb_len = 0,
588 .hfmin = 12500,
589 .hfmax = 20000,
590 .vfmin = 38,
591 .vfmax = 81,
592 .dclkmin = 4500000,
593 .dclkmax = 6800000,
594 .input = FB_DISP_RGB,
595 },
596 .mode_screen = LCD_SCREEN_SX_N(320) |
597 LCD_SCREEN_SY_N(240),
598 .mode_horztiming = LCD_HORZTIMING_HPW_N(60) |
599 LCD_HORZTIMING_HND1_N(13) | LCD_HORZTIMING_HND2_N(2),
600 .mode_verttiming = LCD_VERTTIMING_VPW_N(2) |
601 LCD_VERTTIMING_VND1_N(2) | LCD_VERTTIMING_VND2_N(5),
602 .mode_clkcontrol = LCD_CLKCONTROL_PCD_N(7), /*16=6Mhz*/
603 .mode_pwmdiv = 0x8000063f,
604 .mode_pwmhi = 0x03400000,
605 .mode_outmask = 0x00fcfcfc,
606 .mode_fifoctrl = 0x2f2f2f2f,
607 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
608 .mode_backlight = 0x00000000,
609 .mode_auxpll = 8, /* 96MHz AUXPLL */
610 .device_init = board_au1200fb_panel_init,
611 .device_shutdown = board_au1200fb_panel_shutdown,
612 320, 320,
613 240, 240,
614 },
615
616 [8] = { /* Toppoly TD070WGCB2 7" 856x480 TFT */
617 .name = "Toppoly_TD070WGCB2",
618 .monspecs = {
619 .modedb = NULL,
620 .modedb_len = 0,
621 .hfmin = 30000,
622 .hfmax = 70000,
623 .vfmin = 60,
624 .vfmax = 60,
625 .dclkmin = 6000000,
626 .dclkmax = 28000000,
627 .input = FB_DISP_RGB,
628 },
629 .mode_screen = LCD_SCREEN_SX_N(856) |
630 LCD_SCREEN_SY_N(480),
631 .mode_horztiming = LCD_HORZTIMING_HND2_N(43) |
632 LCD_HORZTIMING_HND1_N(43) | LCD_HORZTIMING_HPW_N(114),
633 .mode_verttiming = LCD_VERTTIMING_VND2_N(20) |
634 LCD_VERTTIMING_VND1_N(21) | LCD_VERTTIMING_VPW_N(4),
635 .mode_clkcontrol = 0x00020001, /* /4=24Mhz */
636 .mode_pwmdiv = 0x8000063f,
637 .mode_pwmhi = 0x03400000,
638 .mode_outmask = 0x00fcfcfc,
639 .mode_fifoctrl = 0x2f2f2f2f,
640 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
641 .mode_backlight = 0x00000000,
642 .mode_auxpll = 8, /* 96MHz AUXPLL */
643 .device_init = board_au1200fb_panel_init,
644 .device_shutdown = board_au1200fb_panel_shutdown,
645 856, 856,
646 480, 480,
647 },
648};
649
650#define NUM_PANELS (ARRAY_SIZE(known_lcd_panels))
651
652/********************************************************************/
653
654#ifdef CONFIG_PM
655static int set_brightness(unsigned int brightness)
656{
657 unsigned int hi1, divider;
658
659 /* limit brightness pwm duty to >= 30/1600 */
660 if (brightness < 30) {
661 brightness = 30;
662 }
663 divider = (lcd->pwmdiv & 0x3FFFF) + 1;
664 hi1 = (lcd->pwmhi >> 16) + 1;
665 hi1 = (((brightness & 0xFF) + 1) * divider >> 8);
666 lcd->pwmhi &= 0xFFFF;
667 lcd->pwmhi |= (hi1 << 16);
668
669 return brightness;
670}
671#endif /* CONFIG_PM */
672
673static int winbpp (unsigned int winctrl1)
674{
675 int bits = 0;
676
677 /* how many bits are needed for each pixel format */
678 switch (winctrl1 & LCD_WINCTRL1_FRM) {
679 case LCD_WINCTRL1_FRM_1BPP:
680 bits = 1;
681 break;
682 case LCD_WINCTRL1_FRM_2BPP:
683 bits = 2;
684 break;
685 case LCD_WINCTRL1_FRM_4BPP:
686 bits = 4;
687 break;
688 case LCD_WINCTRL1_FRM_8BPP:
689 bits = 8;
690 break;
691 case LCD_WINCTRL1_FRM_12BPP:
692 case LCD_WINCTRL1_FRM_16BPP655:
693 case LCD_WINCTRL1_FRM_16BPP565:
694 case LCD_WINCTRL1_FRM_16BPP556:
695 case LCD_WINCTRL1_FRM_16BPPI1555:
696 case LCD_WINCTRL1_FRM_16BPPI5551:
697 case LCD_WINCTRL1_FRM_16BPPA1555:
698 case LCD_WINCTRL1_FRM_16BPPA5551:
699 bits = 16;
700 break;
701 case LCD_WINCTRL1_FRM_24BPP:
702 case LCD_WINCTRL1_FRM_32BPP:
703 bits = 32;
704 break;
705 }
706
707 return bits;
708}
709
710static int fbinfo2index (struct fb_info *fb_info)
711{
712 int i;
713
714 for (i = 0; i < CONFIG_FB_AU1200_DEVS; ++i) {
715 if (fb_info == (struct fb_info *)(&_au1200fb_devices[i].fb_info))
716 return i;
717 }
718 printk("au1200fb: ERROR: fbinfo2index failed!\n");
719 return -1;
720}
721
722static int au1200_setlocation (struct au1200fb_device *fbdev, int plane,
723 int xpos, int ypos)
724{
725 uint32 winctrl0, winctrl1, winenable, fb_offset = 0;
726 int xsz, ysz;
727
728 /* FIX!!! NOT CHECKING FOR COMPLETE OFFSCREEN YET */
729
730 winctrl0 = lcd->window[plane].winctrl0;
731 winctrl1 = lcd->window[plane].winctrl1;
732 winctrl0 &= (LCD_WINCTRL0_A | LCD_WINCTRL0_AEN);
733 winctrl1 &= ~(LCD_WINCTRL1_SZX | LCD_WINCTRL1_SZY);
734
735 /* Check for off-screen adjustments */
736 xsz = win->w[plane].xres;
737 ysz = win->w[plane].yres;
738 if ((xpos + win->w[plane].xres) > panel->Xres) {
739 /* Off-screen to the right */
740 xsz = panel->Xres - xpos; /* off by 1 ??? */
741 /*printk("off screen right\n");*/
742 }
743
744 if ((ypos + win->w[plane].yres) > panel->Yres) {
745 /* Off-screen to the bottom */
746 ysz = panel->Yres - ypos; /* off by 1 ??? */
747 /*printk("off screen bottom\n");*/
748 }
749
750 if (xpos < 0) {
751 /* Off-screen to the left */
752 xsz = win->w[plane].xres + xpos;
753 fb_offset += (((0 - xpos) * winbpp(lcd->window[plane].winctrl1))/8);
754 xpos = 0;
755 /*printk("off screen left\n");*/
756 }
757
758 if (ypos < 0) {
759 /* Off-screen to the top */
760 ysz = win->w[plane].yres + ypos;
761 /* fixme: fb_offset += ((0-ypos)*fb_pars[plane].line_length); */
762 ypos = 0;
763 /*printk("off screen top\n");*/
764 }
765
766 /* record settings */
767 win->w[plane].xpos = xpos;
768 win->w[plane].ypos = ypos;
769
770 xsz -= 1;
771 ysz -= 1;
772 winctrl0 |= (xpos << 21);
773 winctrl0 |= (ypos << 10);
774 winctrl1 |= (xsz << 11);
775 winctrl1 |= (ysz << 0);
776
777 /* Disable the window while making changes, then restore WINEN */
778 winenable = lcd->winenable & (1 << plane);
779 au_sync();
780 lcd->winenable &= ~(1 << plane);
781 lcd->window[plane].winctrl0 = winctrl0;
782 lcd->window[plane].winctrl1 = winctrl1;
783 lcd->window[plane].winbuf0 =
784 lcd->window[plane].winbuf1 = fbdev->fb_phys;
785 lcd->window[plane].winbufctrl = 0; /* select winbuf0 */
786 lcd->winenable |= winenable;
787 au_sync();
788
789 return 0;
790}
791
792static void au1200_setpanel (struct panel_settings *newpanel)
793{
794 /*
795 * Perform global setup/init of LCD controller
796 */
797 uint32 winenable;
798
799 /* Make sure all windows disabled */
800 winenable = lcd->winenable;
801 lcd->winenable = 0;
802 au_sync();
803 /*
804 * Ensure everything is disabled before reconfiguring
805 */
806 if (lcd->screen & LCD_SCREEN_SEN) {
807 /* Wait for vertical sync period */
808 lcd->intstatus = LCD_INT_SS;
809 while ((lcd->intstatus & LCD_INT_SS) == 0) {
810 au_sync();
811 }
812
813 lcd->screen &= ~LCD_SCREEN_SEN; /*disable the controller*/
814
815 do {
816 lcd->intstatus = lcd->intstatus; /*clear interrupts*/
817 au_sync();
818 /*wait for controller to shut down*/
819 } while ((lcd->intstatus & LCD_INT_SD) == 0);
820
821 /* Call shutdown of current panel (if up) */
822 /* this must occur last, because if an external clock is driving
823 the controller, the clock cannot be turned off before first
824 shutting down the controller.
825 */
826 if (panel->device_shutdown != NULL)
827 panel->device_shutdown();
828 }
829
830 /* Newpanel == NULL indicates a shutdown operation only */
831 if (newpanel == NULL)
832 return;
833
834 panel = newpanel;
835
836 printk("Panel(%s), %dx%d\n", panel->name, panel->Xres, panel->Yres);
837
838 /*
839 * Setup clocking if internal LCD clock source (assumes sys_auxpll valid)
840 */
841 if (!(panel->mode_clkcontrol & LCD_CLKCONTROL_EXT))
842 {
843 uint32 sys_clksrc;
844 au_writel(panel->mode_auxpll, SYS_AUXPLL);
845 sys_clksrc = au_readl(SYS_CLKSRC) & ~0x0000001f;
846 sys_clksrc |= panel->mode_toyclksrc;
847 au_writel(sys_clksrc, SYS_CLKSRC);
848 }
849
850 /*
851 * Configure panel timings
852 */
853 lcd->screen = panel->mode_screen;
854 lcd->horztiming = panel->mode_horztiming;
855 lcd->verttiming = panel->mode_verttiming;
856 lcd->clkcontrol = panel->mode_clkcontrol;
857 lcd->pwmdiv = panel->mode_pwmdiv;
858 lcd->pwmhi = panel->mode_pwmhi;
859 lcd->outmask = panel->mode_outmask;
860 lcd->fifoctrl = panel->mode_fifoctrl;
861 au_sync();
862
863 /* fixme: Check window settings to make sure still valid
864 * for new geometry */
865#if 0
866 au1200_setlocation(fbdev, 0, win->w[0].xpos, win->w[0].ypos);
867 au1200_setlocation(fbdev, 1, win->w[1].xpos, win->w[1].ypos);
868 au1200_setlocation(fbdev, 2, win->w[2].xpos, win->w[2].ypos);
869 au1200_setlocation(fbdev, 3, win->w[3].xpos, win->w[3].ypos);
870#endif
871 lcd->winenable = winenable;
872
873 /*
874 * Re-enable screen now that it is configured
875 */
876 lcd->screen |= LCD_SCREEN_SEN;
877 au_sync();
878
879 /* Call init of panel */
880 if (panel->device_init != NULL) panel->device_init();
881
882 /* FIX!!!! not appropriate on panel change!!! Global setup/init */
883 lcd->intenable = 0;
884 lcd->intstatus = ~0;
885 lcd->backcolor = win->mode_backcolor;
886
887 /* Setup Color Key - FIX!!! */
888 lcd->colorkey = win->mode_colorkey;
889 lcd->colorkeymsk = win->mode_colorkeymsk;
890
891 /* Setup HWCursor - FIX!!! Need to support this eventually */
892 lcd->hwc.cursorctrl = 0;
893 lcd->hwc.cursorpos = 0;
894 lcd->hwc.cursorcolor0 = 0;
895 lcd->hwc.cursorcolor1 = 0;
896 lcd->hwc.cursorcolor2 = 0;
897 lcd->hwc.cursorcolor3 = 0;
898
899
900#if 0
901#define D(X) printk("%25s: %08X\n", #X, X)
902 D(lcd->screen);
903 D(lcd->horztiming);
904 D(lcd->verttiming);
905 D(lcd->clkcontrol);
906 D(lcd->pwmdiv);
907 D(lcd->pwmhi);
908 D(lcd->outmask);
909 D(lcd->fifoctrl);
910 D(lcd->window[0].winctrl0);
911 D(lcd->window[0].winctrl1);
912 D(lcd->window[0].winctrl2);
913 D(lcd->window[0].winbuf0);
914 D(lcd->window[0].winbuf1);
915 D(lcd->window[0].winbufctrl);
916 D(lcd->window[1].winctrl0);
917 D(lcd->window[1].winctrl1);
918 D(lcd->window[1].winctrl2);
919 D(lcd->window[1].winbuf0);
920 D(lcd->window[1].winbuf1);
921 D(lcd->window[1].winbufctrl);
922 D(lcd->window[2].winctrl0);
923 D(lcd->window[2].winctrl1);
924 D(lcd->window[2].winctrl2);
925 D(lcd->window[2].winbuf0);
926 D(lcd->window[2].winbuf1);
927 D(lcd->window[2].winbufctrl);
928 D(lcd->window[3].winctrl0);
929 D(lcd->window[3].winctrl1);
930 D(lcd->window[3].winctrl2);
931 D(lcd->window[3].winbuf0);
932 D(lcd->window[3].winbuf1);
933 D(lcd->window[3].winbufctrl);
934 D(lcd->winenable);
935 D(lcd->intenable);
936 D(lcd->intstatus);
937 D(lcd->backcolor);
938 D(lcd->winenable);
939 D(lcd->colorkey);
940 D(lcd->colorkeymsk);
941 D(lcd->hwc.cursorctrl);
942 D(lcd->hwc.cursorpos);
943 D(lcd->hwc.cursorcolor0);
944 D(lcd->hwc.cursorcolor1);
945 D(lcd->hwc.cursorcolor2);
946 D(lcd->hwc.cursorcolor3);
947#endif
948}
949
950static void au1200_setmode(struct au1200fb_device *fbdev)
951{
952 int plane = fbdev->plane;
953 /* Window/plane setup */
954 lcd->window[plane].winctrl1 = ( 0
955 | LCD_WINCTRL1_PRI_N(plane)
956 | win->w[plane].mode_winctrl1 /* FRM,CCO,PO,PIPE */
957 ) ;
958
959 au1200_setlocation(fbdev, plane, win->w[plane].xpos, win->w[plane].ypos);
960
961 lcd->window[plane].winctrl2 = ( 0
962 | LCD_WINCTRL2_CKMODE_00
963 | LCD_WINCTRL2_DBM
964 | LCD_WINCTRL2_BX_N( fbdev->fb_info.fix.line_length)
965 | LCD_WINCTRL2_SCX_1
966 | LCD_WINCTRL2_SCY_1
967 ) ;
968 lcd->winenable |= win->w[plane].mode_winenable;
969 au_sync();
970}
971
972
973/* Inline helpers */
974
975/*#define panel_is_dual(panel) ((panel->mode_screen & LCD_SCREEN_PT) == LCD_SCREEN_PT_010)*/
976/*#define panel_is_active(panel)((panel->mode_screen & LCD_SCREEN_PT) == LCD_SCREEN_PT_010)*/
977
978#define panel_is_color(panel) ((panel->mode_screen & LCD_SCREEN_PT) <= LCD_SCREEN_PT_CDSTN)
979
980/* Bitfields format supported by the controller. */
981static struct fb_bitfield rgb_bitfields[][4] = {
982 /* Red, Green, Blue, Transp */
983 [LCD_WINCTRL1_FRM_16BPP655 >> 25] =
984 { { 10, 6, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
985
986 [LCD_WINCTRL1_FRM_16BPP565 >> 25] =
987 { { 11, 5, 0 }, { 5, 6, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
988
989 [LCD_WINCTRL1_FRM_16BPP556 >> 25] =
990 { { 11, 5, 0 }, { 6, 5, 0 }, { 0, 6, 0 }, { 0, 0, 0 } },
991
992 [LCD_WINCTRL1_FRM_16BPPI1555 >> 25] =
993 { { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
994
995 [LCD_WINCTRL1_FRM_16BPPI5551 >> 25] =
996 { { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 0, 0 } },
997
998 [LCD_WINCTRL1_FRM_16BPPA1555 >> 25] =
999 { { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 15, 1, 0 } },
1000
1001 [LCD_WINCTRL1_FRM_16BPPA5551 >> 25] =
1002 { { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 1, 0 } },
1003
1004 [LCD_WINCTRL1_FRM_24BPP >> 25] =
1005 { { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 0, 0, 0 } },
1006
1007 [LCD_WINCTRL1_FRM_32BPP >> 25] =
1008 { { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 24, 0, 0 } },
1009};
1010
1011/*-------------------------------------------------------------------------*/
1012
1013/* Helpers */
1014
1015static void au1200fb_update_fbinfo(struct fb_info *fbi)
1016{
1017 /* FIX!!!! This also needs to take the window pixel format into account!!! */
1018
1019 /* Update var-dependent FB info */
1020 if (panel_is_color(panel)) {
1021 if (fbi->var.bits_per_pixel <= 8) {
1022 /* palettized */
1023 fbi->fix.visual = FB_VISUAL_PSEUDOCOLOR;
1024 fbi->fix.line_length = fbi->var.xres_virtual /
1025 (8/fbi->var.bits_per_pixel);
1026 } else {
1027 /* non-palettized */
1028 fbi->fix.visual = FB_VISUAL_TRUECOLOR;
1029 fbi->fix.line_length = fbi->var.xres_virtual * (fbi->var.bits_per_pixel / 8);
1030 }
1031 } else {
1032 /* mono FIX!!! mono 8 and 4 bits */
1033 fbi->fix.visual = FB_VISUAL_MONO10;
1034 fbi->fix.line_length = fbi->var.xres_virtual / 8;
1035 }
1036
1037 fbi->screen_size = fbi->fix.line_length * fbi->var.yres_virtual;
1038 print_dbg("line length: %d\n", fbi->fix.line_length);
1039 print_dbg("bits_per_pixel: %d\n", fbi->var.bits_per_pixel);
1040}
1041
1042/*-------------------------------------------------------------------------*/
1043
1044/* AU1200 framebuffer driver */
1045
1046/* fb_check_var
1047 * Validate var settings with hardware restrictions and modify it if necessary
1048 */
1049static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
1050 struct fb_info *fbi)
1051{
1052 struct au1200fb_device *fbdev = (struct au1200fb_device *)fbi;
1053 u32 pixclock;
1054 int screen_size, plane;
1055
1056 plane = fbdev->plane;
1057
1058 /* Make sure that the mode respect all LCD controller and
1059 * panel restrictions. */
1060 var->xres = win->w[plane].xres;
1061 var->yres = win->w[plane].yres;
1062
1063 /* No need for virtual resolution support */
1064 var->xres_virtual = var->xres;
1065 var->yres_virtual = var->yres;
1066
1067 var->bits_per_pixel = winbpp(win->w[plane].mode_winctrl1);
1068
1069 screen_size = var->xres_virtual * var->yres_virtual;
1070 if (var->bits_per_pixel > 8) screen_size *= (var->bits_per_pixel / 8);
1071 else screen_size /= (8/var->bits_per_pixel);
1072
1073 if (fbdev->fb_len < screen_size)
1074 return -EINVAL; /* Virtual screen is to big, abort */
1075
1076 /* FIX!!!! what are the implicaitons of ignoring this for windows ??? */
1077 /* The max LCD clock is fixed to 48MHz (value of AUX_CLK). The pixel
1078 * clock can only be obtain by dividing this value by an even integer.
1079 * Fallback to a slower pixel clock if necessary. */
1080 pixclock = max((u32)(PICOS2KHZ(var->pixclock) * 1000), fbi->monspecs.dclkmin);
1081 pixclock = min(pixclock, min(fbi->monspecs.dclkmax, (u32)AU1200_LCD_MAX_CLK/2));
1082
1083 if (AU1200_LCD_MAX_CLK % pixclock) {
1084 int diff = AU1200_LCD_MAX_CLK % pixclock;
1085 pixclock -= diff;
1086 }
1087
1088 var->pixclock = KHZ2PICOS(pixclock/1000);
1089#if 0
1090 if (!panel_is_active(panel)) {
1091 int pcd = AU1200_LCD_MAX_CLK / (pixclock * 2) - 1;
1092
1093 if (!panel_is_color(panel)
1094 && (panel->control_base & LCD_CONTROL_MPI) && (pcd < 3)) {
1095 /* STN 8bit mono panel support is up to 6MHz pixclock */
1096 var->pixclock = KHZ2PICOS(6000);
1097 } else if (!pcd) {
1098 /* Other STN panel support is up to 12MHz */
1099 var->pixclock = KHZ2PICOS(12000);
1100 }
1101 }
1102#endif
1103 /* Set bitfield accordingly */
1104 switch (var->bits_per_pixel) {
1105 case 16:
1106 {
1107 /* 16bpp True color.
1108 * These must be set to MATCH WINCTRL[FORM] */
1109 int idx;
1110 idx = (win->w[0].mode_winctrl1 & LCD_WINCTRL1_FRM) >> 25;
1111 var->red = rgb_bitfields[idx][0];
1112 var->green = rgb_bitfields[idx][1];
1113 var->blue = rgb_bitfields[idx][2];
1114 var->transp = rgb_bitfields[idx][3];
1115 break;
1116 }
1117
1118 case 32:
1119 {
1120 /* 32bpp True color.
1121 * These must be set to MATCH WINCTRL[FORM] */
1122 int idx;
1123 idx = (win->w[0].mode_winctrl1 & LCD_WINCTRL1_FRM) >> 25;
1124 var->red = rgb_bitfields[idx][0];
1125 var->green = rgb_bitfields[idx][1];
1126 var->blue = rgb_bitfields[idx][2];
1127 var->transp = rgb_bitfields[idx][3];
1128 break;
1129 }
1130 default:
1131 print_dbg("Unsupported depth %dbpp", var->bits_per_pixel);
1132 return -EINVAL;
1133 }
1134
1135 return 0;
1136}
1137
1138/* fb_set_par
1139 * Set hardware with var settings. This will enable the controller with a
1140 * specific mode, normally validated with the fb_check_var method
1141 */
1142static int au1200fb_fb_set_par(struct fb_info *fbi)
1143{
1144 struct au1200fb_device *fbdev = (struct au1200fb_device *)fbi;
1145
1146 au1200fb_update_fbinfo(fbi);
1147 au1200_setmode(fbdev);
1148
1149 return 0;
1150}
1151
1152/* fb_setcolreg
1153 * Set color in LCD palette.
1154 */
1155static int au1200fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
1156 unsigned blue, unsigned transp, struct fb_info *fbi)
1157{
1158 volatile u32 *palette = lcd->palette;
1159 u32 value;
1160
1161 if (regno > (AU1200_LCD_NBR_PALETTE_ENTRIES - 1))
1162 return -EINVAL;
1163
1164 if (fbi->var.grayscale) {
1165 /* Convert color to grayscale */
1166 red = green = blue =
1167 (19595 * red + 38470 * green + 7471 * blue) >> 16;
1168 }
1169
1170 if (fbi->fix.visual == FB_VISUAL_TRUECOLOR) {
1171 /* Place color in the pseudopalette */
1172 if (regno > 16)
1173 return -EINVAL;
1174
1175 palette = (u32*) fbi->pseudo_palette;
1176
1177 red >>= (16 - fbi->var.red.length);
1178 green >>= (16 - fbi->var.green.length);
1179 blue >>= (16 - fbi->var.blue.length);
1180
1181 value = (red << fbi->var.red.offset) |
1182 (green << fbi->var.green.offset)|
1183 (blue << fbi->var.blue.offset);
1184 value &= 0xFFFF;
1185
1186 } else if (1 /*FIX!!! panel_is_active(fbdev->panel)*/) {
1187 /* COLOR TFT PALLETTIZED (use RGB 565) */
1188 value = (red & 0xF800)|((green >> 5) &
1189 0x07E0)|((blue >> 11) & 0x001F);
1190 value &= 0xFFFF;
1191
1192 } else if (0 /*panel_is_color(fbdev->panel)*/) {
1193 /* COLOR STN MODE */
1194 value = 0x1234;
1195 value &= 0xFFF;
1196 } else {
1197 /* MONOCHROME MODE */
1198 value = (green >> 12) & 0x000F;
1199 value &= 0xF;
1200 }
1201
1202 palette[regno] = value;
1203
1204 return 0;
1205}
1206
1207/* fb_blank
1208 * Blank the screen. Depending on the mode, the screen will be
1209 * activated with the backlight color, or desactivated
1210 */
1211static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
1212{
1213 /* Short-circuit screen blanking */
1214 if (noblanking)
1215 return 0;
1216
1217 switch (blank_mode) {
1218
1219 case FB_BLANK_UNBLANK:
1220 case FB_BLANK_NORMAL:
1221 /* printk("turn on panel\n"); */
1222 au1200_setpanel(panel);
1223 break;
1224 case FB_BLANK_VSYNC_SUSPEND:
1225 case FB_BLANK_HSYNC_SUSPEND:
1226 case FB_BLANK_POWERDOWN:
1227 /* printk("turn off panel\n"); */
1228 au1200_setpanel(NULL);
1229 break;
1230 default:
1231 break;
1232
1233 }
1234
1235 /* FB_BLANK_NORMAL is a soft blank */
1236 return (blank_mode == FB_BLANK_NORMAL) ? -EINVAL : 0;
1237}
1238
1239/* fb_mmap
1240 * Map video memory in user space. We don't use the generic fb_mmap
1241 * method mainly to allow the use of the TLB streaming flag (CCA=6)
1242 */
1243static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
1244
1245{
1246 unsigned int len;
1247 unsigned long start=0, off;
1248 struct au1200fb_device *fbdev = (struct au1200fb_device *) info;
1249
1250#ifdef CONFIG_PM
1251 au1xxx_pm_access(LCD_pm_dev);
1252#endif
1253
1254 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
1255 return -EINVAL;
1256 }
1257
1258 start = fbdev->fb_phys & PAGE_MASK;
1259 len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
1260
1261 off = vma->vm_pgoff << PAGE_SHIFT;
1262
1263 if ((vma->vm_end - vma->vm_start + off) > len) {
1264 return -EINVAL;
1265 }
1266
1267 off += start;
1268 vma->vm_pgoff = off >> PAGE_SHIFT;
1269
1270 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1271 pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
1272
1273 vma->vm_flags |= VM_IO;
1274
1275 return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
1276 vma->vm_end - vma->vm_start,
1277 vma->vm_page_prot);
1278
1279 return 0;
1280}
1281
1282static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
1283{
1284
1285 unsigned int hi1, divider;
1286
1287 /* SCREEN_SIZE: user cannot reset size, must switch panel choice */
1288
1289 if (pdata->flags & SCREEN_BACKCOLOR)
1290 lcd->backcolor = pdata->backcolor;
1291
1292 if (pdata->flags & SCREEN_BRIGHTNESS) {
1293
1294 // limit brightness pwm duty to >= 30/1600
1295 if (pdata->brightness < 30) {
1296 pdata->brightness = 30;
1297 }
1298 divider = (lcd->pwmdiv & 0x3FFFF) + 1;
1299 hi1 = (lcd->pwmhi >> 16) + 1;
1300 hi1 = (((pdata->brightness & 0xFF)+1) * divider >> 8);
1301 lcd->pwmhi &= 0xFFFF;
1302 lcd->pwmhi |= (hi1 << 16);
1303 }
1304
1305 if (pdata->flags & SCREEN_COLORKEY)
1306 lcd->colorkey = pdata->colorkey;
1307
1308 if (pdata->flags & SCREEN_MASK)
1309 lcd->colorkeymsk = pdata->mask;
1310 au_sync();
1311}
1312
1313static void get_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
1314{
1315 unsigned int hi1, divider;
1316
1317 pdata->xsize = ((lcd->screen & LCD_SCREEN_SX) >> 19) + 1;
1318 pdata->ysize = ((lcd->screen & LCD_SCREEN_SY) >> 8) + 1;
1319
1320 pdata->backcolor = lcd->backcolor;
1321 pdata->colorkey = lcd->colorkey;
1322 pdata->mask = lcd->colorkeymsk;
1323
1324 // brightness
1325 hi1 = (lcd->pwmhi >> 16) + 1;
1326 divider = (lcd->pwmdiv & 0x3FFFF) + 1;
1327 pdata->brightness = ((hi1 << 8) / divider) - 1;
1328 au_sync();
1329}
1330
1331static void set_window(unsigned int plane,
1332 struct au1200_lcd_window_regs_t *pdata)
1333{
1334 unsigned int val, bpp;
1335
1336 /* Window control register 0 */
1337 if (pdata->flags & WIN_POSITION) {
1338 val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_OX |
1339 LCD_WINCTRL0_OY);
1340 val |= ((pdata->xpos << 21) & LCD_WINCTRL0_OX);
1341 val |= ((pdata->ypos << 10) & LCD_WINCTRL0_OY);
1342 lcd->window[plane].winctrl0 = val;
1343 }
1344 if (pdata->flags & WIN_ALPHA_COLOR) {
1345 val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_A);
1346 val |= ((pdata->alpha_color << 2) & LCD_WINCTRL0_A);
1347 lcd->window[plane].winctrl0 = val;
1348 }
1349 if (pdata->flags & WIN_ALPHA_MODE) {
1350 val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_AEN);
1351 val |= ((pdata->alpha_mode << 1) & LCD_WINCTRL0_AEN);
1352 lcd->window[plane].winctrl0 = val;
1353 }
1354
1355 /* Window control register 1 */
1356 if (pdata->flags & WIN_PRIORITY) {
1357 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PRI);
1358 val |= ((pdata->priority << 30) & LCD_WINCTRL1_PRI);
1359 lcd->window[plane].winctrl1 = val;
1360 }
1361 if (pdata->flags & WIN_CHANNEL) {
1362 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PIPE);
1363 val |= ((pdata->channel << 29) & LCD_WINCTRL1_PIPE);
1364 lcd->window[plane].winctrl1 = val;
1365 }
1366 if (pdata->flags & WIN_BUFFER_FORMAT) {
1367 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_FRM);
1368 val |= ((pdata->buffer_format << 25) & LCD_WINCTRL1_FRM);
1369 lcd->window[plane].winctrl1 = val;
1370 }
1371 if (pdata->flags & WIN_COLOR_ORDER) {
1372 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_CCO);
1373 val |= ((pdata->color_order << 24) & LCD_WINCTRL1_CCO);
1374 lcd->window[plane].winctrl1 = val;
1375 }
1376 if (pdata->flags & WIN_PIXEL_ORDER) {
1377 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PO);
1378 val |= ((pdata->pixel_order << 22) & LCD_WINCTRL1_PO);
1379 lcd->window[plane].winctrl1 = val;
1380 }
1381 if (pdata->flags & WIN_SIZE) {
1382 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_SZX |
1383 LCD_WINCTRL1_SZY);
1384 val |= (((pdata->xsize << 11) - 1) & LCD_WINCTRL1_SZX);
1385 val |= (((pdata->ysize) - 1) & LCD_WINCTRL1_SZY);
1386 lcd->window[plane].winctrl1 = val;
1387 /* program buffer line width */
1388 bpp = winbpp(val) / 8;
1389 val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_BX);
1390 val |= (((pdata->xsize * bpp) << 8) & LCD_WINCTRL2_BX);
1391 lcd->window[plane].winctrl2 = val;
1392 }
1393
1394 /* Window control register 2 */
1395 if (pdata->flags & WIN_COLORKEY_MODE) {
1396 val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_CKMODE);
1397 val |= ((pdata->colorkey_mode << 24) & LCD_WINCTRL2_CKMODE);
1398 lcd->window[plane].winctrl2 = val;
1399 }
1400 if (pdata->flags & WIN_DOUBLE_BUFFER_MODE) {
1401 val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_DBM);
1402 val |= ((pdata->double_buffer_mode << 23) & LCD_WINCTRL2_DBM);
1403 lcd->window[plane].winctrl2 = val;
1404 }
1405 if (pdata->flags & WIN_RAM_ARRAY_MODE) {
1406 val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_RAM);
1407 val |= ((pdata->ram_array_mode << 21) & LCD_WINCTRL2_RAM);
1408 lcd->window[plane].winctrl2 = val;
1409 }
1410
1411 /* Buffer line width programmed with WIN_SIZE */
1412
1413 if (pdata->flags & WIN_BUFFER_SCALE) {
1414 val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_SCX |
1415 LCD_WINCTRL2_SCY);
1416 val |= ((pdata->xsize << 11) & LCD_WINCTRL2_SCX);
1417 val |= ((pdata->ysize) & LCD_WINCTRL2_SCY);
1418 lcd->window[plane].winctrl2 = val;
1419 }
1420
1421 if (pdata->flags & WIN_ENABLE) {
1422 val = lcd->winenable;
1423 val &= ~(1<<plane);
1424 val |= (pdata->enable & 1) << plane;
1425 lcd->winenable = val;
1426 }
1427 au_sync();
1428}
1429
1430static void get_window(unsigned int plane,
1431 struct au1200_lcd_window_regs_t *pdata)
1432{
1433 /* Window control register 0 */
1434 pdata->xpos = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_OX) >> 21;
1435 pdata->ypos = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_OY) >> 10;
1436 pdata->alpha_color = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_A) >> 2;
1437 pdata->alpha_mode = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_AEN) >> 1;
1438
1439 /* Window control register 1 */
1440 pdata->priority = (lcd->window[plane].winctrl1& LCD_WINCTRL1_PRI) >> 30;
1441 pdata->channel = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_PIPE) >> 29;
1442 pdata->buffer_format = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_FRM) >> 25;
1443 pdata->color_order = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_CCO) >> 24;
1444 pdata->pixel_order = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_PO) >> 22;
1445 pdata->xsize = ((lcd->window[plane].winctrl1 & LCD_WINCTRL1_SZX) >> 11) + 1;
1446 pdata->ysize = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_SZY) + 1;
1447
1448 /* Window control register 2 */
1449 pdata->colorkey_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_CKMODE) >> 24;
1450 pdata->double_buffer_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_DBM) >> 23;
1451 pdata->ram_array_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_RAM) >> 21;
1452
1453 pdata->enable = (lcd->winenable >> plane) & 1;
1454 au_sync();
1455}
1456
1457static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
1458 unsigned long arg)
1459{
1460 int plane;
1461 int val;
1462
1463#ifdef CONFIG_PM
1464 au1xxx_pm_access(LCD_pm_dev);
1465#endif
1466
1467 plane = fbinfo2index(info);
1468 print_dbg("au1200fb: ioctl %d on plane %d\n", cmd, plane);
1469
1470 if (cmd == AU1200_LCD_FB_IOCTL) {
1471 struct au1200_lcd_iodata_t iodata;
1472
1473 if (copy_from_user(&iodata, (void __user *) arg, sizeof(iodata)))
1474 return -EFAULT;
1475
1476 print_dbg("FB IOCTL called\n");
1477
1478 switch (iodata.subcmd) {
1479 case AU1200_LCD_SET_SCREEN:
1480 print_dbg("AU1200_LCD_SET_SCREEN\n");
1481 set_global(cmd, &iodata.global);
1482 break;
1483
1484 case AU1200_LCD_GET_SCREEN:
1485 print_dbg("AU1200_LCD_GET_SCREEN\n");
1486 get_global(cmd, &iodata.global);
1487 break;
1488
1489 case AU1200_LCD_SET_WINDOW:
1490 print_dbg("AU1200_LCD_SET_WINDOW\n");
1491 set_window(plane, &iodata.window);
1492 break;
1493
1494 case AU1200_LCD_GET_WINDOW:
1495 print_dbg("AU1200_LCD_GET_WINDOW\n");
1496 get_window(plane, &iodata.window);
1497 break;
1498
1499 case AU1200_LCD_SET_PANEL:
1500 print_dbg("AU1200_LCD_SET_PANEL\n");
1501 if ((iodata.global.panel_choice >= 0) &&
1502 (iodata.global.panel_choice <
1503 NUM_PANELS))
1504 {
1505 struct panel_settings *newpanel;
1506 panel_index = iodata.global.panel_choice;
1507 newpanel = &known_lcd_panels[panel_index];
1508 au1200_setpanel(newpanel);
1509 }
1510 break;
1511
1512 case AU1200_LCD_GET_PANEL:
1513 print_dbg("AU1200_LCD_GET_PANEL\n");
1514 iodata.global.panel_choice = panel_index;
1515 break;
1516
1517 default:
1518 return -EINVAL;
1519 }
1520
1521 val = copy_to_user((void __user *) arg, &iodata, sizeof(iodata));
1522 if (val) {
1523 print_dbg("error: could not copy %d bytes\n", val);
1524 return -EFAULT;
1525 }
1526 }
1527
1528 return 0;
1529}
1530
1531
1532static struct fb_ops au1200fb_fb_ops = {
1533 .owner = THIS_MODULE,
1534 .fb_check_var = au1200fb_fb_check_var,
1535 .fb_set_par = au1200fb_fb_set_par,
1536 .fb_setcolreg = au1200fb_fb_setcolreg,
1537 .fb_blank = au1200fb_fb_blank,
1538 .fb_fillrect = cfb_fillrect,
1539 .fb_copyarea = cfb_copyarea,
1540 .fb_imageblit = cfb_imageblit,
1541 .fb_sync = NULL,
1542 .fb_ioctl = au1200fb_ioctl,
1543 .fb_mmap = au1200fb_fb_mmap,
1544};
1545
1546/*-------------------------------------------------------------------------*/
1547
1548static irqreturn_t au1200fb_handle_irq(int irq, void* dev_id, struct pt_regs *regs)
1549{
1550 /* Nothing to do for now, just clear any pending interrupt */
1551 lcd->intstatus = lcd->intstatus;
1552 au_sync();
1553
1554 return IRQ_HANDLED;
1555}
1556
1557/*-------------------------------------------------------------------------*/
1558
1559/* AU1200 LCD device probe helpers */
1560
1561static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
1562{
1563 struct fb_info *fbi = &fbdev->fb_info;
1564 int bpp;
1565
1566 memset(fbi, 0, sizeof(struct fb_info));
1567 fbi->fbops = &au1200fb_fb_ops;
1568
1569 bpp = winbpp(win->w[fbdev->plane].mode_winctrl1);
1570
1571 /* Copy monitor specs from panel data */
1572 /* fixme: we're setting up LCD controller windows, so these dont give a
1573 damn as to what the monitor specs are (the panel itself does, but that
1574 isnt done here...so maybe need a generic catchall monitor setting??? */
1575 memcpy(&fbi->monspecs, &panel->monspecs, sizeof(struct fb_monspecs));
1576
1577 /* We first try the user mode passed in argument. If that failed,
1578 * or if no one has been specified, we default to the first mode of the
1579 * panel list. Note that after this call, var data will be set */
1580 if (!fb_find_mode(&fbi->var,
1581 fbi,
1582 NULL, /* drv_info.opt_mode, */
1583 fbi->monspecs.modedb,
1584 fbi->monspecs.modedb_len,
1585 fbi->monspecs.modedb,
1586 bpp)) {
1587
1588 print_err("Cannot find valid mode for panel %s", panel->name);
1589 return -EFAULT;
1590 }
1591
1592 fbi->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
1593 if (!fbi->pseudo_palette) {
1594 return -ENOMEM;
1595 }
1596 memset(fbi->pseudo_palette, 0, sizeof(u32) * 16);
1597
1598 if (fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
1599 print_err("Fail to allocate colormap (%d entries)",
1600 AU1200_LCD_NBR_PALETTE_ENTRIES);
1601 kfree(fbi->pseudo_palette);
1602 return -EFAULT;
1603 }
1604
1605 strncpy(fbi->fix.id, "AU1200", sizeof(fbi->fix.id));
1606 fbi->fix.smem_start = fbdev->fb_phys;
1607 fbi->fix.smem_len = fbdev->fb_len;
1608 fbi->fix.type = FB_TYPE_PACKED_PIXELS;
1609 fbi->fix.xpanstep = 0;
1610 fbi->fix.ypanstep = 0;
1611 fbi->fix.mmio_start = 0;
1612 fbi->fix.mmio_len = 0;
1613 fbi->fix.accel = FB_ACCEL_NONE;
1614
1615 fbi->screen_base = (char __iomem *) fbdev->fb_mem;
1616
1617 au1200fb_update_fbinfo(fbi);
1618
1619 return 0;
1620}
1621
1622/*-------------------------------------------------------------------------*/
1623
1624/* AU1200 LCD controller device driver */
1625
1626static int au1200fb_drv_probe(struct device *dev)
1627{
1628 struct au1200fb_device *fbdev;
1629 unsigned long page;
1630 int bpp, plane, ret;
1631
1632 if (!dev)
1633 return -EINVAL;
1634
1635 for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane) {
1636 bpp = winbpp(win->w[plane].mode_winctrl1);
1637 if (win->w[plane].xres == 0)
1638 win->w[plane].xres = panel->Xres;
1639 if (win->w[plane].yres == 0)
1640 win->w[plane].yres = panel->Yres;
1641
1642 fbdev = &_au1200fb_devices[plane];
1643 memset(fbdev, 0, sizeof(struct au1200fb_device));
1644 fbdev->plane = plane;
1645
1646 /* Allocate the framebuffer to the maximum screen size */
1647 fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8;
1648
1649 fbdev->fb_mem = dma_alloc_noncoherent(dev,
1650 PAGE_ALIGN(fbdev->fb_len),
1651 &fbdev->fb_phys, GFP_KERNEL);
1652 if (!fbdev->fb_mem) {
1653 print_err("fail to allocate frambuffer (size: %dK))",
1654 fbdev->fb_len / 1024);
1655 return -ENOMEM;
1656 }
1657
1658 /*
1659 * Set page reserved so that mmap will work. This is necessary
1660 * since we'll be remapping normal memory.
1661 */
1662 for (page = (unsigned long)fbdev->fb_phys;
1663 page < PAGE_ALIGN((unsigned long)fbdev->fb_phys +
1664 fbdev->fb_len);
1665 page += PAGE_SIZE) {
1666 SetPageReserved(pfn_to_page(page >> PAGE_SHIFT)); /* LCD DMA is NOT coherent on Au1200 */
1667 }
1668 print_dbg("Framebuffer memory map at %p", fbdev->fb_mem);
1669 print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024);
1670
1671 /* Init FB data */
1672 if ((ret = au1200fb_init_fbinfo(fbdev)) < 0)
1673 goto failed;
1674
1675 /* Register new framebuffer */
1676 if ((ret = register_framebuffer(&fbdev->fb_info)) < 0) {
1677 print_err("cannot register new framebuffer");
1678 goto failed;
1679 }
1680
1681 au1200fb_fb_set_par(&fbdev->fb_info);
1682
1683#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
1684 if (plane == 0)
1685 if (fb_prepare_logo(&fbdev->fb_info, FB_ROTATE_UR)) {
1686 /* Start display and show logo on boot */
1687 fb_set_cmap(&fbdev->fb_info.cmap,
1688 &fbdev->fb_info);
1689
1690 fb_show_logo(&fbdev->fb_info, FB_ROTATE_UR);
1691 }
1692#endif
1693 }
1694
1695 /* Now hook interrupt too */
1696 if ((ret = request_irq(AU1200_LCD_INT, au1200fb_handle_irq,
1697 SA_INTERRUPT | SA_SHIRQ, "lcd", (void *)dev)) < 0) {
1698 print_err("fail to request interrupt line %d (err: %d)",
1699 AU1200_LCD_INT, ret);
1700 goto failed;
1701 }
1702
1703 return 0;
1704
1705failed:
1706 /* NOTE: This only does the current plane/window that failed; others are still active */
1707 if (fbdev->fb_mem)
1708 dma_free_noncoherent(dev, PAGE_ALIGN(fbdev->fb_len),
1709 fbdev->fb_mem, fbdev->fb_phys);
1710 if (fbdev->fb_info.cmap.len != 0)
1711 fb_dealloc_cmap(&fbdev->fb_info.cmap);
1712 if (fbdev->fb_info.pseudo_palette)
1713 kfree(fbdev->fb_info.pseudo_palette);
1714 if (plane == 0)
1715 free_irq(AU1200_LCD_INT, (void*)dev);
1716 return ret;
1717}
1718
1719static int au1200fb_drv_remove(struct device *dev)
1720{
1721 struct au1200fb_device *fbdev;
1722 int plane;
1723
1724 if (!dev)
1725 return -ENODEV;
1726
1727 /* Turn off the panel */
1728 au1200_setpanel(NULL);
1729
1730 for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane)
1731 {
1732 fbdev = &_au1200fb_devices[plane];
1733
1734 /* Clean up all probe data */
1735 unregister_framebuffer(&fbdev->fb_info);
1736 if (fbdev->fb_mem)
1737 dma_free_noncoherent(dev, PAGE_ALIGN(fbdev->fb_len),
1738 fbdev->fb_mem, fbdev->fb_phys);
1739 if (fbdev->fb_info.cmap.len != 0)
1740 fb_dealloc_cmap(&fbdev->fb_info.cmap);
1741 if (fbdev->fb_info.pseudo_palette)
1742 kfree(fbdev->fb_info.pseudo_palette);
1743 }
1744
1745 free_irq(AU1200_LCD_INT, (void *)dev);
1746
1747 return 0;
1748}
1749
1750#ifdef CONFIG_PM
1751static int au1200fb_drv_suspend(struct device *dev, u32 state, u32 level)
1752{
1753 /* TODO */
1754 return 0;
1755}
1756
1757static int au1200fb_drv_resume(struct device *dev, u32 level)
1758{
1759 /* TODO */
1760 return 0;
1761}
1762#endif /* CONFIG_PM */
1763
1764static struct device_driver au1200fb_driver = {
1765 .name = "au1200-lcd",
1766 .bus = &platform_bus_type,
1767 .probe = au1200fb_drv_probe,
1768 .remove = au1200fb_drv_remove,
1769#ifdef CONFIG_PM
1770 .suspend = au1200fb_drv_suspend,
1771 .resume = au1200fb_drv_resume,
1772#endif
1773};
1774
1775/*-------------------------------------------------------------------------*/
1776
1777/* Kernel driver */
1778
1779static void au1200fb_setup(void)
1780{
1781 char* options = NULL;
1782 char* this_opt;
1783 int num_panels = ARRAY_SIZE(known_lcd_panels);
1784 int panel_idx = -1;
1785
1786 fb_get_options(DRIVER_NAME, &options);
1787
1788 if (options) {
1789 while ((this_opt = strsep(&options,",")) != NULL) {
1790 /* Panel option - can be panel name,
1791 * "bs" for board-switch, or number/index */
1792 if (!strncmp(this_opt, "panel:", 6)) {
1793 int i;
1794 long int li;
1795 char *endptr;
1796 this_opt += 6;
1797 /* First check for index, which allows
1798 * to short circuit this mess */
1799 li = simple_strtol(this_opt, &endptr, 0);
1800 if (*endptr == '\0') {
1801 panel_idx = (int)li;
1802 }
1803 else if (strcmp(this_opt, "bs") == 0) {
1804 extern int board_au1200fb_panel(void);
1805 panel_idx = board_au1200fb_panel();
1806 }
1807
1808 else
1809 for (i = 0; i < num_panels; i++) {
1810 if (!strcmp(this_opt, known_lcd_panels[i].name)) {
1811 panel_idx = i;
1812 break;
1813 }
1814 }
1815
1816 if ((panel_idx < 0) || (panel_idx >= num_panels)) {
1817 print_warn("Panel %s not supported!", this_opt);
1818 }
1819 else
1820 panel_index = panel_idx;
1821 }
1822
1823 else if (strncmp(this_opt, "nohwcursor", 10) == 0) {
1824 nohwcursor = 1;
1825 }
1826
1827 /* Unsupported option */
1828 else {
1829 print_warn("Unsupported option \"%s\"", this_opt);
1830 }
1831 }
1832 }
1833}
1834
1835#ifdef CONFIG_PM
1836static int au1200fb_pm_callback(au1xxx_power_dev_t *dev,
1837 au1xxx_request_t request, void *data) {
1838 int retval = -1;
1839 unsigned int d = 0;
1840 unsigned int brightness = 0;
1841
1842 if (request == AU1XXX_PM_SLEEP) {
1843 board_au1200fb_panel_shutdown();
1844 }
1845 else if (request == AU1XXX_PM_WAKEUP) {
1846 if(dev->prev_state == SLEEP_STATE)
1847 {
1848 int plane;
1849 au1200_setpanel(panel);
1850 for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane) {
1851 struct au1200fb_device *fbdev;
1852 fbdev = &_au1200fb_devices[plane];
1853 au1200fb_fb_set_par(&fbdev->fb_info);
1854 }
1855 }
1856
1857 d = *((unsigned int*)data);
1858 if(d <=10) brightness = 26;
1859 else if(d<=20) brightness = 51;
1860 else if(d<=30) brightness = 77;
1861 else if(d<=40) brightness = 102;
1862 else if(d<=50) brightness = 128;
1863 else if(d<=60) brightness = 153;
1864 else if(d<=70) brightness = 179;
1865 else if(d<=80) brightness = 204;
1866 else if(d<=90) brightness = 230;
1867 else brightness = 255;
1868 set_brightness(brightness);
1869 } else if (request == AU1XXX_PM_GETSTATUS) {
1870 return dev->cur_state;
1871 } else if (request == AU1XXX_PM_ACCESS) {
1872 if (dev->cur_state != SLEEP_STATE)
1873 return retval;
1874 else {
1875 au1200_setpanel(panel);
1876 }
1877 } else if (request == AU1XXX_PM_IDLE) {
1878 } else if (request == AU1XXX_PM_CLEANUP) {
1879 }
1880
1881 return retval;
1882}
1883#endif
1884
1885static int __init au1200fb_init(void)
1886{
1887 print_info("" DRIVER_DESC "");
1888
1889 /* Setup driver with options */
1890 au1200fb_setup();
1891
1892 /* Point to the panel selected */
1893 panel = &known_lcd_panels[panel_index];
1894 win = &windows[window_index];
1895
1896 printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name);
1897 printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name);
1898
1899 /* Kickstart the panel, the framebuffers/windows come soon enough */
1900 au1200_setpanel(panel);
1901
1902 #ifdef CONFIG_PM
1903 LCD_pm_dev = new_au1xxx_power_device("LCD", &au1200fb_pm_callback, NULL);
1904 if ( LCD_pm_dev == NULL)
1905 printk(KERN_INFO "Unable to create a power management device entry for the au1200fb.\n");
1906 else
1907 printk(KERN_INFO "Power management device entry for the au1200fb loaded.\n");
1908 #endif
1909
1910 return driver_register(&au1200fb_driver);
1911}
1912
1913static void __exit au1200fb_cleanup(void)
1914{
1915 driver_unregister(&au1200fb_driver);
1916}
1917
1918module_init(au1200fb_init);
1919module_exit(au1200fb_cleanup);
1920
1921MODULE_DESCRIPTION(DRIVER_DESC);
1922MODULE_LICENSE("GPL");
1923/*
1924 * BRIEF MODULE DESCRIPTION
1925 * Au1200 LCD Driver.
1926 *
1927 * Copyright 2004-2005 AMD
1928 * Author: AMD
1929 *
1930 * Based on:
1931 * linux/drivers/video/skeletonfb.c -- Skeleton for a frame buffer device
1932 * Created 28 Dec 1997 by Geert Uytterhoeven
1933 *
1934 * This program is free software; you can redistribute it and/or modify it
1935 * under the terms of the GNU General Public License as published by the
1936 * Free Software Foundation; either version 2 of the License, or (at your
1937 * option) any later version.
1938 *
1939 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
1940 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
1941 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
1942 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1943 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
1944 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
1945 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
1946 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1947 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
1948 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1949 *
1950 * You should have received a copy of the GNU General Public License along
1951 * with this program; if not, write to the Free Software Foundation, Inc.,
1952 * 675 Mass Ave, Cambridge, MA 02139, USA.
1953 */
1954
1955#include <linux/module.h>
1956#include <linux/platform_device.h>
1957#include <linux/kernel.h>
1958#include <linux/errno.h>
1959#include <linux/string.h>
1960#include <linux/mm.h>
1961#include <linux/fb.h>
1962#include <linux/init.h>
1963#include <linux/interrupt.h>
1964#include <linux/ctype.h>
1965#include <linux/dma-mapping.h>
1966
1967#include <asm/mach-au1x00/au1000.h>
1968#include "au1200fb.h"
1969
1970#ifdef CONFIG_PM
1971#include <asm/mach-au1x00/au1xxx_pm.h>
1972#endif
1973
1974#ifndef CONFIG_FB_AU1200_DEVS
1975#define CONFIG_FB_AU1200_DEVS 4
1976#endif
1977
1978#define DRIVER_NAME "au1200fb"
1979#define DRIVER_DESC "LCD controller driver for AU1200 processors"
1980
1981#define DEBUG 1
1982
1983#define print_err(f, arg...) printk(KERN_ERR DRIVER_NAME ": " f "\n", ## arg)
1984#define print_warn(f, arg...) printk(KERN_WARNING DRIVER_NAME ": " f "\n", ## arg)
1985#define print_info(f, arg...) printk(KERN_INFO DRIVER_NAME ": " f "\n", ## arg)
1986
1987#if DEBUG
1988#define print_dbg(f, arg...) printk(KERN_DEBUG __FILE__ ": " f "\n", ## arg)
1989#else
1990#define print_dbg(f, arg...) do {} while (0)
1991#endif
1992
1993
1994#define AU1200_LCD_FB_IOCTL 0x46FF
1995
1996#define AU1200_LCD_SET_SCREEN 1
1997#define AU1200_LCD_GET_SCREEN 2
1998#define AU1200_LCD_SET_WINDOW 3
1999#define AU1200_LCD_GET_WINDOW 4
2000#define AU1200_LCD_SET_PANEL 5
2001#define AU1200_LCD_GET_PANEL 6
2002
2003#define SCREEN_SIZE (1<< 1)
2004#define SCREEN_BACKCOLOR (1<< 2)
2005#define SCREEN_BRIGHTNESS (1<< 3)
2006#define SCREEN_COLORKEY (1<< 4)
2007#define SCREEN_MASK (1<< 5)
2008
2009struct au1200_lcd_global_regs_t {
2010 unsigned int flags;
2011 unsigned int xsize;
2012 unsigned int ysize;
2013 unsigned int backcolor;
2014 unsigned int brightness;
2015 unsigned int colorkey;
2016 unsigned int mask;
2017 unsigned int panel_choice;
2018 char panel_desc[80];
2019
2020};
2021
2022#define WIN_POSITION (1<< 0)
2023#define WIN_ALPHA_COLOR (1<< 1)
2024#define WIN_ALPHA_MODE (1<< 2)
2025#define WIN_PRIORITY (1<< 3)
2026#define WIN_CHANNEL (1<< 4)
2027#define WIN_BUFFER_FORMAT (1<< 5)
2028#define WIN_COLOR_ORDER (1<< 6)
2029#define WIN_PIXEL_ORDER (1<< 7)
2030#define WIN_SIZE (1<< 8)
2031#define WIN_COLORKEY_MODE (1<< 9)
2032#define WIN_DOUBLE_BUFFER_MODE (1<< 10)
2033#define WIN_RAM_ARRAY_MODE (1<< 11)
2034#define WIN_BUFFER_SCALE (1<< 12)
2035#define WIN_ENABLE (1<< 13)
2036
2037struct au1200_lcd_window_regs_t {
2038 unsigned int flags;
2039 unsigned int xpos;
2040 unsigned int ypos;
2041 unsigned int alpha_color;
2042 unsigned int alpha_mode;
2043 unsigned int priority;
2044 unsigned int channel;
2045 unsigned int buffer_format;
2046 unsigned int color_order;
2047 unsigned int pixel_order;
2048 unsigned int xsize;
2049 unsigned int ysize;
2050 unsigned int colorkey_mode;
2051 unsigned int double_buffer_mode;
2052 unsigned int ram_array_mode;
2053 unsigned int xscale;
2054 unsigned int yscale;
2055 unsigned int enable;
2056};
2057
2058
2059struct au1200_lcd_iodata_t {
2060 unsigned int subcmd;
2061 struct au1200_lcd_global_regs_t global;
2062 struct au1200_lcd_window_regs_t window;
2063};
2064
2065#if defined(__BIG_ENDIAN)
2066#define LCD_CONTROL_DEFAULT_PO LCD_CONTROL_PO_11
2067#else
2068#define LCD_CONTROL_DEFAULT_PO LCD_CONTROL_PO_00
2069#endif
2070#define LCD_CONTROL_DEFAULT_SBPPF LCD_CONTROL_SBPPF_565
2071
2072/* Private, per-framebuffer management information (independent of the panel itself) */
2073struct au1200fb_device {
2074 struct fb_info fb_info; /* FB driver info record */
2075
2076 int plane;
2077 unsigned char* fb_mem; /* FrameBuffer memory map */
2078 unsigned int fb_len;
2079 dma_addr_t fb_phys;
2080};
2081
2082static struct au1200fb_device _au1200fb_devices[CONFIG_FB_AU1200_DEVS];
2083/********************************************************************/
2084
2085/* LCD controller restrictions */
2086#define AU1200_LCD_MAX_XRES 1280
2087#define AU1200_LCD_MAX_YRES 1024
2088#define AU1200_LCD_MAX_BPP 32
2089#define AU1200_LCD_MAX_CLK 96000000 /* fixme: this needs to go away ? */
2090#define AU1200_LCD_NBR_PALETTE_ENTRIES 256
2091
2092/* Default number of visible screen buffer to allocate */
2093#define AU1200FB_NBR_VIDEO_BUFFERS 1
2094
2095/********************************************************************/
2096
2097static struct au1200_lcd *lcd = (struct au1200_lcd *) AU1200_LCD_ADDR;
2098static int window_index = 2; /* default is zero */
2099static int panel_index = 2; /* default is zero */
2100static struct window_settings *win;
2101static struct panel_settings *panel;
2102static int noblanking = 1;
2103static int nohwcursor = 0;
2104
2105struct window_settings {
2106 unsigned char name[64];
2107 uint32 mode_backcolor;
2108 uint32 mode_colorkey;
2109 uint32 mode_colorkeymsk;
2110 struct {
2111 int xres;
2112 int yres;
2113 int xpos;
2114 int ypos;
2115 uint32 mode_winctrl1; /* winctrl1[FRM,CCO,PO,PIPE] */
2116 uint32 mode_winenable;
2117 } w[4];
2118};
2119
2120#if defined(__BIG_ENDIAN)
2121#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_00
2122#else
2123#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_01
2124#endif
2125
2126extern int board_au1200fb_panel_init (void);
2127extern int board_au1200fb_panel_shutdown (void);
2128
2129#ifdef CONFIG_PM
2130int au1200fb_pm_callback(au1xxx_power_dev_t *dev,
2131 au1xxx_request_t request, void *data);
2132au1xxx_power_dev_t *LCD_pm_dev;
2133#endif
2134
2135/*
2136 * Default window configurations
2137 */
2138static struct window_settings windows[] = {
2139 { /* Index 0 */
2140 "0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx",
2141 /* mode_backcolor */ 0x006600ff,
2142 /* mode_colorkey,msk*/ 0, 0,
2143 {
2144 {
2145 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2146 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2147 LCD_WINCTRL1_PO_16BPP,
2148 /* mode_winenable*/ LCD_WINENABLE_WEN0,
2149 },
2150 {
2151 /* xres, yres, xpos, ypos */ 100, 100, 100, 100,
2152 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2153 LCD_WINCTRL1_PO_16BPP |
2154 LCD_WINCTRL1_PIPE,
2155 /* mode_winenable*/ LCD_WINENABLE_WEN1,
2156 },
2157 {
2158 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2159 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2160 LCD_WINCTRL1_PO_16BPP,
2161 /* mode_winenable*/ 0,
2162 },
2163 {
2164 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2165 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2166 LCD_WINCTRL1_PO_16BPP |
2167 LCD_WINCTRL1_PIPE,
2168 /* mode_winenable*/ 0,
2169 },
2170 },
2171 },
2172
2173 { /* Index 1 */
2174 "0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx",
2175 /* mode_backcolor */ 0x006600ff,
2176 /* mode_colorkey,msk*/ 0, 0,
2177 {
2178 {
2179 /* xres, yres, xpos, ypos */ 320, 240, 5, 5,
2180 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_24BPP |
2181 LCD_WINCTRL1_PO_00,
2182 /* mode_winenable*/ LCD_WINENABLE_WEN0,
2183 },
2184 {
2185 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2186 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565
2187 | LCD_WINCTRL1_PO_16BPP,
2188 /* mode_winenable*/ 0,
2189 },
2190 {
2191 /* xres, yres, xpos, ypos */ 100, 100, 0, 0,
2192 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2193 LCD_WINCTRL1_PO_16BPP |
2194 LCD_WINCTRL1_PIPE,
2195 /* mode_winenable*/ 0/*LCD_WINENABLE_WEN2*/,
2196 },
2197 {
2198 /* xres, yres, xpos, ypos */ 200, 25, 0, 0,
2199 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2200 LCD_WINCTRL1_PO_16BPP |
2201 LCD_WINCTRL1_PIPE,
2202 /* mode_winenable*/ 0,
2203 },
2204 },
2205 },
2206 { /* Index 2 */
2207 "0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx",
2208 /* mode_backcolor */ 0x006600ff,
2209 /* mode_colorkey,msk*/ 0, 0,
2210 {
2211 {
2212 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2213 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2214 LCD_WINCTRL1_PO_16BPP,
2215 /* mode_winenable*/ LCD_WINENABLE_WEN0,
2216 },
2217 {
2218 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2219 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2220 LCD_WINCTRL1_PO_16BPP,
2221 /* mode_winenable*/ 0,
2222 },
2223 {
2224 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2225 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_32BPP |
2226 LCD_WINCTRL1_PO_00|LCD_WINCTRL1_PIPE,
2227 /* mode_winenable*/ 0/*LCD_WINENABLE_WEN2*/,
2228 },
2229 {
2230 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2231 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2232 LCD_WINCTRL1_PO_16BPP |
2233 LCD_WINCTRL1_PIPE,
2234 /* mode_winenable*/ 0,
2235 },
2236 },
2237 },
2238 /* Need VGA 640 @ 24bpp, @ 32bpp */
2239 /* Need VGA 800 @ 24bpp, @ 32bpp */
2240 /* Need VGA 1024 @ 24bpp, @ 32bpp */
2241};
2242
2243/*
2244 * Controller configurations for various panels.
2245 */
2246
2247struct panel_settings
2248{
2249 const char name[25]; /* Full name <vendor>_<model> */
2250
2251 struct fb_monspecs monspecs; /* FB monitor specs */
2252
2253 /* panel timings */
2254 uint32 mode_screen;
2255 uint32 mode_horztiming;
2256 uint32 mode_verttiming;
2257 uint32 mode_clkcontrol;
2258 uint32 mode_pwmdiv;
2259 uint32 mode_pwmhi;
2260 uint32 mode_outmask;
2261 uint32 mode_fifoctrl;
2262 uint32 mode_toyclksrc;
2263 uint32 mode_backlight;
2264 uint32 mode_auxpll;
2265 int (*device_init)(void);
2266 int (*device_shutdown)(void);
2267#define Xres min_xres
2268#define Yres min_yres
2269 u32 min_xres; /* Minimum horizontal resolution */
2270 u32 max_xres; /* Maximum horizontal resolution */
2271 u32 min_yres; /* Minimum vertical resolution */
2272 u32 max_yres; /* Maximum vertical resolution */
2273};
2274
2275/********************************************************************/
2276/* fixme: Maybe a modedb for the CRT ? otherwise panels should be as-is */
2277
2278/* List of panels known to work with the AU1200 LCD controller.
2279 * To add a new panel, enter the same specifications as the
2280 * Generic_TFT one, and MAKE SURE that it doesn't conflicts
2281 * with the controller restrictions. Restrictions are:
2282 *
2283 * STN color panels: max_bpp <= 12
2284 * STN mono panels: max_bpp <= 4
2285 * TFT panels: max_bpp <= 16
2286 * max_xres <= 800
2287 * max_yres <= 600
2288 */
2289static struct panel_settings known_lcd_panels[] =
2290{
2291 [0] = { /* QVGA 320x240 H:33.3kHz V:110Hz */
2292 .name = "QVGA_320x240",
2293 .monspecs = {
2294 .modedb = NULL,
2295 .modedb_len = 0,
2296 .hfmin = 30000,
2297 .hfmax = 70000,
2298 .vfmin = 60,
2299 .vfmax = 60,
2300 .dclkmin = 6000000,
2301 .dclkmax = 28000000,
2302 .input = FB_DISP_RGB,
2303 },
2304 .mode_screen = LCD_SCREEN_SX_N(320) |
2305 LCD_SCREEN_SY_N(240),
2306 .mode_horztiming = 0x00c4623b,
2307 .mode_verttiming = 0x00502814,
2308 .mode_clkcontrol = 0x00020002, /* /4=24Mhz */
2309 .mode_pwmdiv = 0x00000000,
2310 .mode_pwmhi = 0x00000000,
2311 .mode_outmask = 0x00FFFFFF,
2312 .mode_fifoctrl = 0x2f2f2f2f,
2313 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2314 .mode_backlight = 0x00000000,
2315 .mode_auxpll = 8, /* 96MHz AUXPLL */
2316 .device_init = NULL,
2317 .device_shutdown = NULL,
2318 320, 320,
2319 240, 240,
2320 },
2321
2322 [1] = { /* VGA 640x480 H:30.3kHz V:58Hz */
2323 .name = "VGA_640x480",
2324 .monspecs = {
2325 .modedb = NULL,
2326 .modedb_len = 0,
2327 .hfmin = 30000,
2328 .hfmax = 70000,
2329 .vfmin = 60,
2330 .vfmax = 60,
2331 .dclkmin = 6000000,
2332 .dclkmax = 28000000,
2333 .input = FB_DISP_RGB,
2334 },
2335 .mode_screen = 0x13f9df80,
2336 .mode_horztiming = 0x003c5859,
2337 .mode_verttiming = 0x00741201,
2338 .mode_clkcontrol = 0x00020001, /* /4=24Mhz */
2339 .mode_pwmdiv = 0x00000000,
2340 .mode_pwmhi = 0x00000000,
2341 .mode_outmask = 0x00FFFFFF,
2342 .mode_fifoctrl = 0x2f2f2f2f,
2343 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2344 .mode_backlight = 0x00000000,
2345 .mode_auxpll = 8, /* 96MHz AUXPLL */
2346 .device_init = NULL,
2347 .device_shutdown = NULL,
2348 640, 480,
2349 640, 480,
2350 },
2351
2352 [2] = { /* SVGA 800x600 H:46.1kHz V:69Hz */
2353 .name = "SVGA_800x600",
2354 .monspecs = {
2355 .modedb = NULL,
2356 .modedb_len = 0,
2357 .hfmin = 30000,
2358 .hfmax = 70000,
2359 .vfmin = 60,
2360 .vfmax = 60,
2361 .dclkmin = 6000000,
2362 .dclkmax = 28000000,
2363 .input = FB_DISP_RGB,
2364 },
2365 .mode_screen = 0x18fa5780,
2366 .mode_horztiming = 0x00dc7e77,
2367 .mode_verttiming = 0x00584805,
2368 .mode_clkcontrol = 0x00020000, /* /2=48Mhz */
2369 .mode_pwmdiv = 0x00000000,
2370 .mode_pwmhi = 0x00000000,
2371 .mode_outmask = 0x00FFFFFF,
2372 .mode_fifoctrl = 0x2f2f2f2f,
2373 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2374 .mode_backlight = 0x00000000,
2375 .mode_auxpll = 8, /* 96MHz AUXPLL */
2376 .device_init = NULL,
2377 .device_shutdown = NULL,
2378 800, 800,
2379 600, 600,
2380 },
2381
2382 [3] = { /* XVGA 1024x768 H:56.2kHz V:70Hz */
2383 .name = "XVGA_1024x768",
2384 .monspecs = {
2385 .modedb = NULL,
2386 .modedb_len = 0,
2387 .hfmin = 30000,
2388 .hfmax = 70000,
2389 .vfmin = 60,
2390 .vfmax = 60,
2391 .dclkmin = 6000000,
2392 .dclkmax = 28000000,
2393 .input = FB_DISP_RGB,
2394 },
2395 .mode_screen = 0x1ffaff80,
2396 .mode_horztiming = 0x007d0e57,
2397 .mode_verttiming = 0x00740a01,
2398 .mode_clkcontrol = 0x000A0000, /* /1 */
2399 .mode_pwmdiv = 0x00000000,
2400 .mode_pwmhi = 0x00000000,
2401 .mode_outmask = 0x00FFFFFF,
2402 .mode_fifoctrl = 0x2f2f2f2f,
2403 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2404 .mode_backlight = 0x00000000,
2405 .mode_auxpll = 6, /* 72MHz AUXPLL */
2406 .device_init = NULL,
2407 .device_shutdown = NULL,
2408 1024, 1024,
2409 768, 768,
2410 },
2411
2412 [4] = { /* XVGA XVGA 1280x1024 H:68.5kHz V:65Hz */
2413 .name = "XVGA_1280x1024",
2414 .monspecs = {
2415 .modedb = NULL,
2416 .modedb_len = 0,
2417 .hfmin = 30000,
2418 .hfmax = 70000,
2419 .vfmin = 60,
2420 .vfmax = 60,
2421 .dclkmin = 6000000,
2422 .dclkmax = 28000000,
2423 .input = FB_DISP_RGB,
2424 },
2425 .mode_screen = 0x27fbff80,
2426 .mode_horztiming = 0x00cdb2c7,
2427 .mode_verttiming = 0x00600002,
2428 .mode_clkcontrol = 0x000A0000, /* /1 */
2429 .mode_pwmdiv = 0x00000000,
2430 .mode_pwmhi = 0x00000000,
2431 .mode_outmask = 0x00FFFFFF,
2432 .mode_fifoctrl = 0x2f2f2f2f,
2433 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2434 .mode_backlight = 0x00000000,
2435 .mode_auxpll = 10, /* 120MHz AUXPLL */
2436 .device_init = NULL,
2437 .device_shutdown = NULL,
2438 1280, 1280,
2439 1024, 1024,
2440 },
2441
2442 [5] = { /* Samsung 1024x768 TFT */
2443 .name = "Samsung_1024x768_TFT",
2444 .monspecs = {
2445 .modedb = NULL,
2446 .modedb_len = 0,
2447 .hfmin = 30000,
2448 .hfmax = 70000,
2449 .vfmin = 60,
2450 .vfmax = 60,
2451 .dclkmin = 6000000,
2452 .dclkmax = 28000000,
2453 .input = FB_DISP_RGB,
2454 },
2455 .mode_screen = 0x1ffaff80,
2456 .mode_horztiming = 0x018cc677,
2457 .mode_verttiming = 0x00241217,
2458 .mode_clkcontrol = 0x00000000, /* SCB 0x1 /4=24Mhz */
2459 .mode_pwmdiv = 0x8000063f, /* SCB 0x0 */
2460 .mode_pwmhi = 0x03400000, /* SCB 0x0 */
2461 .mode_outmask = 0x00FFFFFF,
2462 .mode_fifoctrl = 0x2f2f2f2f,
2463 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2464 .mode_backlight = 0x00000000,
2465 .mode_auxpll = 8, /* 96MHz AUXPLL */
2466 .device_init = board_au1200fb_panel_init,
2467 .device_shutdown = board_au1200fb_panel_shutdown,
2468 1024, 1024,
2469 768, 768,
2470 },
2471
2472 [6] = { /* Toshiba 640x480 TFT */
2473 .name = "Toshiba_640x480_TFT",
2474 .monspecs = {
2475 .modedb = NULL,
2476 .modedb_len = 0,
2477 .hfmin = 30000,
2478 .hfmax = 70000,
2479 .vfmin = 60,
2480 .vfmax = 60,
2481 .dclkmin = 6000000,
2482 .dclkmax = 28000000,
2483 .input = FB_DISP_RGB,
2484 },
2485 .mode_screen = LCD_SCREEN_SX_N(640) |
2486 LCD_SCREEN_SY_N(480),
2487 .mode_horztiming = LCD_HORZTIMING_HPW_N(96) |
2488 LCD_HORZTIMING_HND1_N(13) | LCD_HORZTIMING_HND2_N(51),
2489 .mode_verttiming = LCD_VERTTIMING_VPW_N(2) |
2490 LCD_VERTTIMING_VND1_N(11) | LCD_VERTTIMING_VND2_N(32),
2491 .mode_clkcontrol = 0x00000000, /* /4=24Mhz */
2492 .mode_pwmdiv = 0x8000063f,
2493 .mode_pwmhi = 0x03400000,
2494 .mode_outmask = 0x00fcfcfc,
2495 .mode_fifoctrl = 0x2f2f2f2f,
2496 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2497 .mode_backlight = 0x00000000,
2498 .mode_auxpll = 8, /* 96MHz AUXPLL */
2499 .device_init = board_au1200fb_panel_init,
2500 .device_shutdown = board_au1200fb_panel_shutdown,
2501 640, 480,
2502 640, 480,
2503 },
2504
2505 [7] = { /* Sharp 320x240 TFT */
2506 .name = "Sharp_320x240_TFT",
2507 .monspecs = {
2508 .modedb = NULL,
2509 .modedb_len = 0,
2510 .hfmin = 12500,
2511 .hfmax = 20000,
2512 .vfmin = 38,
2513 .vfmax = 81,
2514 .dclkmin = 4500000,
2515 .dclkmax = 6800000,
2516 .input = FB_DISP_RGB,
2517 },
2518 .mode_screen = LCD_SCREEN_SX_N(320) |
2519 LCD_SCREEN_SY_N(240),
2520 .mode_horztiming = LCD_HORZTIMING_HPW_N(60) |
2521 LCD_HORZTIMING_HND1_N(13) | LCD_HORZTIMING_HND2_N(2),
2522 .mode_verttiming = LCD_VERTTIMING_VPW_N(2) |
2523 LCD_VERTTIMING_VND1_N(2) | LCD_VERTTIMING_VND2_N(5),
2524 .mode_clkcontrol = LCD_CLKCONTROL_PCD_N(7), /*16=6Mhz*/
2525 .mode_pwmdiv = 0x8000063f,
2526 .mode_pwmhi = 0x03400000,
2527 .mode_outmask = 0x00fcfcfc,
2528 .mode_fifoctrl = 0x2f2f2f2f,
2529 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2530 .mode_backlight = 0x00000000,
2531 .mode_auxpll = 8, /* 96MHz AUXPLL */
2532 .device_init = board_au1200fb_panel_init,
2533 .device_shutdown = board_au1200fb_panel_shutdown,
2534 320, 320,
2535 240, 240,
2536 },
2537
2538 [8] = { /* Toppoly TD070WGCB2 7" 856x480 TFT */
2539 .name = "Toppoly_TD070WGCB2",
2540 .monspecs = {
2541 .modedb = NULL,
2542 .modedb_len = 0,
2543 .hfmin = 30000,
2544 .hfmax = 70000,
2545 .vfmin = 60,
2546 .vfmax = 60,
2547 .dclkmin = 6000000,
2548 .dclkmax = 28000000,
2549 .input = FB_DISP_RGB,
2550 },
2551 .mode_screen = LCD_SCREEN_SX_N(856) |
2552 LCD_SCREEN_SY_N(480),
2553 .mode_horztiming = LCD_HORZTIMING_HND2_N(43) |
2554 LCD_HORZTIMING_HND1_N(43) | LCD_HORZTIMING_HPW_N(114),
2555 .mode_verttiming = LCD_VERTTIMING_VND2_N(20) |
2556 LCD_VERTTIMING_VND1_N(21) | LCD_VERTTIMING_VPW_N(4),
2557 .mode_clkcontrol = 0x00020001, /* /4=24Mhz */
2558 .mode_pwmdiv = 0x8000063f,
2559 .mode_pwmhi = 0x03400000,
2560 .mode_outmask = 0x00fcfcfc,
2561 .mode_fifoctrl = 0x2f2f2f2f,
2562 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2563 .mode_backlight = 0x00000000,
2564 .mode_auxpll = 8, /* 96MHz AUXPLL */
2565 .device_init = board_au1200fb_panel_init,
2566 .device_shutdown = board_au1200fb_panel_shutdown,
2567 856, 856,
2568 480, 480,
2569 },
2570};
2571
2572#define NUM_PANELS (ARRAY_SIZE(known_lcd_panels))
2573
2574/********************************************************************/
2575
2576#ifdef CONFIG_PM
2577static int set_brightness(unsigned int brightness)
2578{
2579 unsigned int hi1, divider;
2580
2581 /* limit brightness pwm duty to >= 30/1600 */
2582 if (brightness < 30) {
2583 brightness = 30;
2584 }
2585 divider = (lcd->pwmdiv & 0x3FFFF) + 1;
2586 hi1 = (lcd->pwmhi >> 16) + 1;
2587 hi1 = (((brightness & 0xFF) + 1) * divider >> 8);
2588 lcd->pwmhi &= 0xFFFF;
2589 lcd->pwmhi |= (hi1 << 16);
2590
2591 return brightness;
2592}
2593#endif /* CONFIG_PM */
2594
2595static int winbpp (unsigned int winctrl1)
2596{
2597 int bits = 0;
2598
2599 /* how many bits are needed for each pixel format */
2600 switch (winctrl1 & LCD_WINCTRL1_FRM) {
2601 case LCD_WINCTRL1_FRM_1BPP:
2602 bits = 1;
2603 break;
2604 case LCD_WINCTRL1_FRM_2BPP:
2605 bits = 2;
2606 break;
2607 case LCD_WINCTRL1_FRM_4BPP:
2608 bits = 4;
2609 break;
2610 case LCD_WINCTRL1_FRM_8BPP:
2611 bits = 8;
2612 break;
2613 case LCD_WINCTRL1_FRM_12BPP:
2614 case LCD_WINCTRL1_FRM_16BPP655:
2615 case LCD_WINCTRL1_FRM_16BPP565:
2616 case LCD_WINCTRL1_FRM_16BPP556:
2617 case LCD_WINCTRL1_FRM_16BPPI1555:
2618 case LCD_WINCTRL1_FRM_16BPPI5551:
2619 case LCD_WINCTRL1_FRM_16BPPA1555:
2620 case LCD_WINCTRL1_FRM_16BPPA5551:
2621 bits = 16;
2622 break;
2623 case LCD_WINCTRL1_FRM_24BPP:
2624 case LCD_WINCTRL1_FRM_32BPP:
2625 bits = 32;
2626 break;
2627 }
2628
2629 return bits;
2630}
2631
2632static int fbinfo2index (struct fb_info *fb_info)
2633{
2634 int i;
2635
2636 for (i = 0; i < CONFIG_FB_AU1200_DEVS; ++i) {
2637 if (fb_info == (struct fb_info *)(&_au1200fb_devices[i].fb_info))
2638 return i;
2639 }
2640 printk("au1200fb: ERROR: fbinfo2index failed!\n");
2641 return -1;
2642}
2643
2644static int au1200_setlocation (struct au1200fb_device *fbdev, int plane,
2645 int xpos, int ypos)
2646{
2647 uint32 winctrl0, winctrl1, winenable, fb_offset = 0;
2648 int xsz, ysz;
2649
2650 /* FIX!!! NOT CHECKING FOR COMPLETE OFFSCREEN YET */
2651
2652 winctrl0 = lcd->window[plane].winctrl0;
2653 winctrl1 = lcd->window[plane].winctrl1;
2654 winctrl0 &= (LCD_WINCTRL0_A | LCD_WINCTRL0_AEN);
2655 winctrl1 &= ~(LCD_WINCTRL1_SZX | LCD_WINCTRL1_SZY);
2656
2657 /* Check for off-screen adjustments */
2658 xsz = win->w[plane].xres;
2659 ysz = win->w[plane].yres;
2660 if ((xpos + win->w[plane].xres) > panel->Xres) {
2661 /* Off-screen to the right */
2662 xsz = panel->Xres - xpos; /* off by 1 ??? */
2663 /*printk("off screen right\n");*/
2664 }
2665
2666 if ((ypos + win->w[plane].yres) > panel->Yres) {
2667 /* Off-screen to the bottom */
2668 ysz = panel->Yres - ypos; /* off by 1 ??? */
2669 /*printk("off screen bottom\n");*/
2670 }
2671
2672 if (xpos < 0) {
2673 /* Off-screen to the left */
2674 xsz = win->w[plane].xres + xpos;
2675 fb_offset += (((0 - xpos) * winbpp(lcd->window[plane].winctrl1))/8);
2676 xpos = 0;
2677 /*printk("off screen left\n");*/
2678 }
2679
2680 if (ypos < 0) {
2681 /* Off-screen to the top */
2682 ysz = win->w[plane].yres + ypos;
2683 /* fixme: fb_offset += ((0-ypos)*fb_pars[plane].line_length); */
2684 ypos = 0;
2685 /*printk("off screen top\n");*/
2686 }
2687
2688 /* record settings */
2689 win->w[plane].xpos = xpos;
2690 win->w[plane].ypos = ypos;
2691
2692 xsz -= 1;
2693 ysz -= 1;
2694 winctrl0 |= (xpos << 21);
2695 winctrl0 |= (ypos << 10);
2696 winctrl1 |= (xsz << 11);
2697 winctrl1 |= (ysz << 0);
2698
2699 /* Disable the window while making changes, then restore WINEN */
2700 winenable = lcd->winenable & (1 << plane);
2701 au_sync();
2702 lcd->winenable &= ~(1 << plane);
2703 lcd->window[plane].winctrl0 = winctrl0;
2704 lcd->window[plane].winctrl1 = winctrl1;
2705 lcd->window[plane].winbuf0 =
2706 lcd->window[plane].winbuf1 = fbdev->fb_phys;
2707 lcd->window[plane].winbufctrl = 0; /* select winbuf0 */
2708 lcd->winenable |= winenable;
2709 au_sync();
2710
2711 return 0;
2712}
2713
2714static void au1200_setpanel (struct panel_settings *newpanel)
2715{
2716 /*
2717 * Perform global setup/init of LCD controller
2718 */
2719 uint32 winenable;
2720
2721 /* Make sure all windows disabled */
2722 winenable = lcd->winenable;
2723 lcd->winenable = 0;
2724 au_sync();
2725 /*
2726 * Ensure everything is disabled before reconfiguring
2727 */
2728 if (lcd->screen & LCD_SCREEN_SEN) {
2729 /* Wait for vertical sync period */
2730 lcd->intstatus = LCD_INT_SS;
2731 while ((lcd->intstatus & LCD_INT_SS) == 0) {
2732 au_sync();
2733 }
2734
2735 lcd->screen &= ~LCD_SCREEN_SEN; /*disable the controller*/
2736
2737 do {
2738 lcd->intstatus = lcd->intstatus; /*clear interrupts*/
2739 au_sync();
2740 /*wait for controller to shut down*/
2741 } while ((lcd->intstatus & LCD_INT_SD) == 0);
2742
2743 /* Call shutdown of current panel (if up) */
2744 /* this must occur last, because if an external clock is driving
2745 the controller, the clock cannot be turned off before first
2746 shutting down the controller.
2747 */
2748 if (panel->device_shutdown != NULL)
2749 panel->device_shutdown();
2750 }
2751
2752 /* Newpanel == NULL indicates a shutdown operation only */
2753 if (newpanel == NULL)
2754 return;
2755
2756 panel = newpanel;
2757
2758 printk("Panel(%s), %dx%d\n", panel->name, panel->Xres, panel->Yres);
2759
2760 /*
2761 * Setup clocking if internal LCD clock source (assumes sys_auxpll valid)
2762 */
2763 if (!(panel->mode_clkcontrol & LCD_CLKCONTROL_EXT))
2764 {
2765 uint32 sys_clksrc;
2766 au_writel(panel->mode_auxpll, SYS_AUXPLL);
2767 sys_clksrc = au_readl(SYS_CLKSRC) & ~0x0000001f;
2768 sys_clksrc |= panel->mode_toyclksrc;
2769 au_writel(sys_clksrc, SYS_CLKSRC);
2770 }
2771
2772 /*
2773 * Configure panel timings
2774 */
2775 lcd->screen = panel->mode_screen;
2776 lcd->horztiming = panel->mode_horztiming;
2777 lcd->verttiming = panel->mode_verttiming;
2778 lcd->clkcontrol = panel->mode_clkcontrol;
2779 lcd->pwmdiv = panel->mode_pwmdiv;
2780 lcd->pwmhi = panel->mode_pwmhi;
2781 lcd->outmask = panel->mode_outmask;
2782 lcd->fifoctrl = panel->mode_fifoctrl;
2783 au_sync();
2784
2785 /* fixme: Check window settings to make sure still valid
2786 * for new geometry */
2787#if 0
2788 au1200_setlocation(fbdev, 0, win->w[0].xpos, win->w[0].ypos);
2789 au1200_setlocation(fbdev, 1, win->w[1].xpos, win->w[1].ypos);
2790 au1200_setlocation(fbdev, 2, win->w[2].xpos, win->w[2].ypos);
2791 au1200_setlocation(fbdev, 3, win->w[3].xpos, win->w[3].ypos);
2792#endif
2793 lcd->winenable = winenable;
2794
2795 /*
2796 * Re-enable screen now that it is configured
2797 */
2798 lcd->screen |= LCD_SCREEN_SEN;
2799 au_sync();
2800
2801 /* Call init of panel */
2802 if (panel->device_init != NULL) panel->device_init();
2803
2804 /* FIX!!!! not appropriate on panel change!!! Global setup/init */
2805 lcd->intenable = 0;
2806 lcd->intstatus = ~0;
2807 lcd->backcolor = win->mode_backcolor;
2808
2809 /* Setup Color Key - FIX!!! */
2810 lcd->colorkey = win->mode_colorkey;
2811 lcd->colorkeymsk = win->mode_colorkeymsk;
2812
2813 /* Setup HWCursor - FIX!!! Need to support this eventually */
2814 lcd->hwc.cursorctrl = 0;
2815 lcd->hwc.cursorpos = 0;
2816 lcd->hwc.cursorcolor0 = 0;
2817 lcd->hwc.cursorcolor1 = 0;
2818 lcd->hwc.cursorcolor2 = 0;
2819 lcd->hwc.cursorcolor3 = 0;
2820
2821
2822#if 0
2823#define D(X) printk("%25s: %08X\n", #X, X)
2824 D(lcd->screen);
2825 D(lcd->horztiming);
2826 D(lcd->verttiming);
2827 D(lcd->clkcontrol);
2828 D(lcd->pwmdiv);
2829 D(lcd->pwmhi);
2830 D(lcd->outmask);
2831 D(lcd->fifoctrl);
2832 D(lcd->window[0].winctrl0);
2833 D(lcd->window[0].winctrl1);
2834 D(lcd->window[0].winctrl2);
2835 D(lcd->window[0].winbuf0);
2836 D(lcd->window[0].winbuf1);
2837 D(lcd->window[0].winbufctrl);
2838 D(lcd->window[1].winctrl0);
2839 D(lcd->window[1].winctrl1);
2840 D(lcd->window[1].winctrl2);
2841 D(lcd->window[1].winbuf0);
2842 D(lcd->window[1].winbuf1);
2843 D(lcd->window[1].winbufctrl);
2844 D(lcd->window[2].winctrl0);
2845 D(lcd->window[2].winctrl1);
2846 D(lcd->window[2].winctrl2);
2847 D(lcd->window[2].winbuf0);
2848 D(lcd->window[2].winbuf1);
2849 D(lcd->window[2].winbufctrl);
2850 D(lcd->window[3].winctrl0);
2851 D(lcd->window[3].winctrl1);
2852 D(lcd->window[3].winctrl2);
2853 D(lcd->window[3].winbuf0);
2854 D(lcd->window[3].winbuf1);
2855 D(lcd->window[3].winbufctrl);
2856 D(lcd->winenable);
2857 D(lcd->intenable);
2858 D(lcd->intstatus);
2859 D(lcd->backcolor);
2860 D(lcd->winenable);
2861 D(lcd->colorkey);
2862 D(lcd->colorkeymsk);
2863 D(lcd->hwc.cursorctrl);
2864 D(lcd->hwc.cursorpos);
2865 D(lcd->hwc.cursorcolor0);
2866 D(lcd->hwc.cursorcolor1);
2867 D(lcd->hwc.cursorcolor2);
2868 D(lcd->hwc.cursorcolor3);
2869#endif
2870}
2871
2872static void au1200_setmode(struct au1200fb_device *fbdev)
2873{
2874 int plane = fbdev->plane;
2875 /* Window/plane setup */
2876 lcd->window[plane].winctrl1 = ( 0
2877 | LCD_WINCTRL1_PRI_N(plane)
2878 | win->w[plane].mode_winctrl1 /* FRM,CCO,PO,PIPE */
2879 ) ;
2880
2881 au1200_setlocation(fbdev, plane, win->w[plane].xpos, win->w[plane].ypos);
2882
2883 lcd->window[plane].winctrl2 = ( 0
2884 | LCD_WINCTRL2_CKMODE_00
2885 | LCD_WINCTRL2_DBM
2886 | LCD_WINCTRL2_BX_N( fbdev->fb_info.fix.line_length)
2887 | LCD_WINCTRL2_SCX_1
2888 | LCD_WINCTRL2_SCY_1
2889 ) ;
2890 lcd->winenable |= win->w[plane].mode_winenable;
2891 au_sync();
2892}
2893
2894
2895/* Inline helpers */
2896
2897/*#define panel_is_dual(panel) ((panel->mode_screen & LCD_SCREEN_PT) == LCD_SCREEN_PT_010)*/
2898/*#define panel_is_active(panel)((panel->mode_screen & LCD_SCREEN_PT) == LCD_SCREEN_PT_010)*/
2899
2900#define panel_is_color(panel) ((panel->mode_screen & LCD_SCREEN_PT) <= LCD_SCREEN_PT_CDSTN)
2901
2902/* Bitfields format supported by the controller. */
2903static struct fb_bitfield rgb_bitfields[][4] = {
2904 /* Red, Green, Blue, Transp */
2905 [LCD_WINCTRL1_FRM_16BPP655 >> 25] =
2906 { { 10, 6, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
2907
2908 [LCD_WINCTRL1_FRM_16BPP565 >> 25] =
2909 { { 11, 5, 0 }, { 5, 6, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
2910
2911 [LCD_WINCTRL1_FRM_16BPP556 >> 25] =
2912 { { 11, 5, 0 }, { 6, 5, 0 }, { 0, 6, 0 }, { 0, 0, 0 } },
2913
2914 [LCD_WINCTRL1_FRM_16BPPI1555 >> 25] =
2915 { { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
2916
2917 [LCD_WINCTRL1_FRM_16BPPI5551 >> 25] =
2918 { { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 0, 0 } },
2919
2920 [LCD_WINCTRL1_FRM_16BPPA1555 >> 25] =
2921 { { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 15, 1, 0 } },
2922
2923 [LCD_WINCTRL1_FRM_16BPPA5551 >> 25] =
2924 { { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 1, 0 } },
2925
2926 [LCD_WINCTRL1_FRM_24BPP >> 25] =
2927 { { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 0, 0, 0 } },
2928
2929 [LCD_WINCTRL1_FRM_32BPP >> 25] =
2930 { { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 24, 0, 0 } },
2931};
2932
2933/*-------------------------------------------------------------------------*/
2934
2935/* Helpers */
2936
2937static void au1200fb_update_fbinfo(struct fb_info *fbi)
2938{
2939 /* FIX!!!! This also needs to take the window pixel format into account!!! */
2940
2941 /* Update var-dependent FB info */
2942 if (panel_is_color(panel)) {
2943 if (fbi->var.bits_per_pixel <= 8) {
2944 /* palettized */
2945 fbi->fix.visual = FB_VISUAL_PSEUDOCOLOR;
2946 fbi->fix.line_length = fbi->var.xres_virtual /
2947 (8/fbi->var.bits_per_pixel);
2948 } else {
2949 /* non-palettized */
2950 fbi->fix.visual = FB_VISUAL_TRUECOLOR;
2951 fbi->fix.line_length = fbi->var.xres_virtual * (fbi->var.bits_per_pixel / 8);
2952 }
2953 } else {
2954 /* mono FIX!!! mono 8 and 4 bits */
2955 fbi->fix.visual = FB_VISUAL_MONO10;
2956 fbi->fix.line_length = fbi->var.xres_virtual / 8;
2957 }
2958
2959 fbi->screen_size = fbi->fix.line_length * fbi->var.yres_virtual;
2960 print_dbg("line length: %d\n", fbi->fix.line_length);
2961 print_dbg("bits_per_pixel: %d\n", fbi->var.bits_per_pixel);
2962}
2963
2964/*-------------------------------------------------------------------------*/
2965
2966/* AU1200 framebuffer driver */
2967
2968/* fb_check_var
2969 * Validate var settings with hardware restrictions and modify it if necessary
2970 */
2971static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
2972 struct fb_info *fbi)
2973{
2974 struct au1200fb_device *fbdev = (struct au1200fb_device *)fbi;
2975 u32 pixclock;
2976 int screen_size, plane;
2977
2978 plane = fbdev->plane;
2979
2980 /* Make sure that the mode respect all LCD controller and
2981 * panel restrictions. */
2982 var->xres = win->w[plane].xres;
2983 var->yres = win->w[plane].yres;
2984
2985 /* No need for virtual resolution support */
2986 var->xres_virtual = var->xres;
2987 var->yres_virtual = var->yres;
2988
2989 var->bits_per_pixel = winbpp(win->w[plane].mode_winctrl1);
2990
2991 screen_size = var->xres_virtual * var->yres_virtual;
2992 if (var->bits_per_pixel > 8) screen_size *= (var->bits_per_pixel / 8);
2993 else screen_size /= (8/var->bits_per_pixel);
2994
2995 if (fbdev->fb_len < screen_size)
2996 return -EINVAL; /* Virtual screen is to big, abort */
2997
2998 /* FIX!!!! what are the implicaitons of ignoring this for windows ??? */
2999 /* The max LCD clock is fixed to 48MHz (value of AUX_CLK). The pixel
3000 * clock can only be obtain by dividing this value by an even integer.
3001 * Fallback to a slower pixel clock if necessary. */
3002 pixclock = max((u32)(PICOS2KHZ(var->pixclock) * 1000), fbi->monspecs.dclkmin);
3003 pixclock = min(pixclock, min(fbi->monspecs.dclkmax, (u32)AU1200_LCD_MAX_CLK/2));
3004
3005 if (AU1200_LCD_MAX_CLK % pixclock) {
3006 int diff = AU1200_LCD_MAX_CLK % pixclock;
3007 pixclock -= diff;
3008 }
3009
3010 var->pixclock = KHZ2PICOS(pixclock/1000);
3011#if 0
3012 if (!panel_is_active(panel)) {
3013 int pcd = AU1200_LCD_MAX_CLK / (pixclock * 2) - 1;
3014
3015 if (!panel_is_color(panel)
3016 && (panel->control_base & LCD_CONTROL_MPI) && (pcd < 3)) {
3017 /* STN 8bit mono panel support is up to 6MHz pixclock */
3018 var->pixclock = KHZ2PICOS(6000);
3019 } else if (!pcd) {
3020 /* Other STN panel support is up to 12MHz */
3021 var->pixclock = KHZ2PICOS(12000);
3022 }
3023 }
3024#endif
3025 /* Set bitfield accordingly */
3026 switch (var->bits_per_pixel) {
3027 case 16:
3028 {
3029 /* 16bpp True color.
3030 * These must be set to MATCH WINCTRL[FORM] */
3031 int idx;
3032 idx = (win->w[0].mode_winctrl1 & LCD_WINCTRL1_FRM) >> 25;
3033 var->red = rgb_bitfields[idx][0];
3034 var->green = rgb_bitfields[idx][1];
3035 var->blue = rgb_bitfields[idx][2];
3036 var->transp = rgb_bitfields[idx][3];
3037 break;
3038 }
3039
3040 case 32:
3041 {
3042 /* 32bpp True color.
3043 * These must be set to MATCH WINCTRL[FORM] */
3044 int idx;
3045 idx = (win->w[0].mode_winctrl1 & LCD_WINCTRL1_FRM) >> 25;
3046 var->red = rgb_bitfields[idx][0];
3047 var->green = rgb_bitfields[idx][1];
3048 var->blue = rgb_bitfields[idx][2];
3049 var->transp = rgb_bitfields[idx][3];
3050 break;
3051 }
3052 default:
3053 print_dbg("Unsupported depth %dbpp", var->bits_per_pixel);
3054 return -EINVAL;
3055 }
3056
3057 return 0;
3058}
3059
3060/* fb_set_par
3061 * Set hardware with var settings. This will enable the controller with a
3062 * specific mode, normally validated with the fb_check_var method
3063 */
3064static int au1200fb_fb_set_par(struct fb_info *fbi)
3065{
3066 struct au1200fb_device *fbdev = (struct au1200fb_device *)fbi;
3067
3068 au1200fb_update_fbinfo(fbi);
3069 au1200_setmode(fbdev);
3070
3071 return 0;
3072}
3073
3074/* fb_setcolreg
3075 * Set color in LCD palette.
3076 */
3077static int au1200fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
3078 unsigned blue, unsigned transp, struct fb_info *fbi)
3079{
3080 volatile u32 *palette = lcd->palette;
3081 u32 value;
3082
3083 if (regno > (AU1200_LCD_NBR_PALETTE_ENTRIES - 1))
3084 return -EINVAL;
3085
3086 if (fbi->var.grayscale) {
3087 /* Convert color to grayscale */
3088 red = green = blue =
3089 (19595 * red + 38470 * green + 7471 * blue) >> 16;
3090 }
3091
3092 if (fbi->fix.visual == FB_VISUAL_TRUECOLOR) {
3093 /* Place color in the pseudopalette */
3094 if (regno > 16)
3095 return -EINVAL;
3096
3097 palette = (u32*) fbi->pseudo_palette;
3098
3099 red >>= (16 - fbi->var.red.length);
3100 green >>= (16 - fbi->var.green.length);
3101 blue >>= (16 - fbi->var.blue.length);
3102
3103 value = (red << fbi->var.red.offset) |
3104 (green << fbi->var.green.offset)|
3105 (blue << fbi->var.blue.offset);
3106 value &= 0xFFFF;
3107
3108 } else if (1 /*FIX!!! panel_is_active(fbdev->panel)*/) {
3109 /* COLOR TFT PALLETTIZED (use RGB 565) */
3110 value = (red & 0xF800)|((green >> 5) &
3111 0x07E0)|((blue >> 11) & 0x001F);
3112 value &= 0xFFFF;
3113
3114 } else if (0 /*panel_is_color(fbdev->panel)*/) {
3115 /* COLOR STN MODE */
3116 value = 0x1234;
3117 value &= 0xFFF;
3118 } else {
3119 /* MONOCHROME MODE */
3120 value = (green >> 12) & 0x000F;
3121 value &= 0xF;
3122 }
3123
3124 palette[regno] = value;
3125
3126 return 0;
3127}
3128
3129/* fb_blank
3130 * Blank the screen. Depending on the mode, the screen will be
3131 * activated with the backlight color, or desactivated
3132 */
3133static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
3134{
3135 /* Short-circuit screen blanking */
3136 if (noblanking)
3137 return 0;
3138
3139 switch (blank_mode) {
3140
3141 case FB_BLANK_UNBLANK:
3142 case FB_BLANK_NORMAL:
3143 /* printk("turn on panel\n"); */
3144 au1200_setpanel(panel);
3145 break;
3146 case FB_BLANK_VSYNC_SUSPEND:
3147 case FB_BLANK_HSYNC_SUSPEND:
3148 case FB_BLANK_POWERDOWN:
3149 /* printk("turn off panel\n"); */
3150 au1200_setpanel(NULL);
3151 break;
3152 default:
3153 break;
3154
3155 }
3156
3157 /* FB_BLANK_NORMAL is a soft blank */
3158 return (blank_mode == FB_BLANK_NORMAL) ? -EINVAL : 0;
3159}
3160
3161/* fb_mmap
3162 * Map video memory in user space. We don't use the generic fb_mmap
3163 * method mainly to allow the use of the TLB streaming flag (CCA=6)
3164 */
3165static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
3166
3167{
3168 unsigned int len;
3169 unsigned long start=0, off;
3170 struct au1200fb_device *fbdev = (struct au1200fb_device *) info;
3171
3172#ifdef CONFIG_PM
3173 au1xxx_pm_access(LCD_pm_dev);
3174#endif
3175
3176 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
3177 return -EINVAL;
3178 }
3179
3180 start = fbdev->fb_phys & PAGE_MASK;
3181 len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
3182
3183 off = vma->vm_pgoff << PAGE_SHIFT;
3184
3185 if ((vma->vm_end - vma->vm_start + off) > len) {
3186 return -EINVAL;
3187 }
3188
3189 off += start;
3190 vma->vm_pgoff = off >> PAGE_SHIFT;
3191
3192 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3193 pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
3194
3195 vma->vm_flags |= VM_IO;
3196
3197 return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
3198 vma->vm_end - vma->vm_start,
3199 vma->vm_page_prot);
3200
3201 return 0;
3202}
3203
3204static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
3205{
3206
3207 unsigned int hi1, divider;
3208
3209 /* SCREEN_SIZE: user cannot reset size, must switch panel choice */
3210
3211 if (pdata->flags & SCREEN_BACKCOLOR)
3212 lcd->backcolor = pdata->backcolor;
3213
3214 if (pdata->flags & SCREEN_BRIGHTNESS) {
3215
3216 // limit brightness pwm duty to >= 30/1600
3217 if (pdata->brightness < 30) {
3218 pdata->brightness = 30;
3219 }
3220 divider = (lcd->pwmdiv & 0x3FFFF) + 1;
3221 hi1 = (lcd->pwmhi >> 16) + 1;
3222 hi1 = (((pdata->brightness & 0xFF)+1) * divider >> 8);
3223 lcd->pwmhi &= 0xFFFF;
3224 lcd->pwmhi |= (hi1 << 16);
3225 }
3226
3227 if (pdata->flags & SCREEN_COLORKEY)
3228 lcd->colorkey = pdata->colorkey;
3229
3230 if (pdata->flags & SCREEN_MASK)
3231 lcd->colorkeymsk = pdata->mask;
3232 au_sync();
3233}
3234
3235static void get_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
3236{
3237 unsigned int hi1, divider;
3238
3239 pdata->xsize = ((lcd->screen & LCD_SCREEN_SX) >> 19) + 1;
3240 pdata->ysize = ((lcd->screen & LCD_SCREEN_SY) >> 8) + 1;
3241
3242 pdata->backcolor = lcd->backcolor;
3243 pdata->colorkey = lcd->colorkey;
3244 pdata->mask = lcd->colorkeymsk;
3245
3246 // brightness
3247 hi1 = (lcd->pwmhi >> 16) + 1;
3248 divider = (lcd->pwmdiv & 0x3FFFF) + 1;
3249 pdata->brightness = ((hi1 << 8) / divider) - 1;
3250 au_sync();
3251}
3252
3253static void set_window(unsigned int plane,
3254 struct au1200_lcd_window_regs_t *pdata)
3255{
3256 unsigned int val, bpp;
3257
3258 /* Window control register 0 */
3259 if (pdata->flags & WIN_POSITION) {
3260 val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_OX |
3261 LCD_WINCTRL0_OY);
3262 val |= ((pdata->xpos << 21) & LCD_WINCTRL0_OX);
3263 val |= ((pdata->ypos << 10) & LCD_WINCTRL0_OY);
3264 lcd->window[plane].winctrl0 = val;
3265 }
3266 if (pdata->flags & WIN_ALPHA_COLOR) {
3267 val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_A);
3268 val |= ((pdata->alpha_color << 2) & LCD_WINCTRL0_A);
3269 lcd->window[plane].winctrl0 = val;
3270 }
3271 if (pdata->flags & WIN_ALPHA_MODE) {
3272 val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_AEN);
3273 val |= ((pdata->alpha_mode << 1) & LCD_WINCTRL0_AEN);
3274 lcd->window[plane].winctrl0 = val;
3275 }
3276
3277 /* Window control register 1 */
3278 if (pdata->flags & WIN_PRIORITY) {
3279 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PRI);
3280 val |= ((pdata->priority << 30) & LCD_WINCTRL1_PRI);
3281 lcd->window[plane].winctrl1 = val;
3282 }
3283 if (pdata->flags & WIN_CHANNEL) {
3284 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PIPE);
3285 val |= ((pdata->channel << 29) & LCD_WINCTRL1_PIPE);
3286 lcd->window[plane].winctrl1 = val;
3287 }
3288 if (pdata->flags & WIN_BUFFER_FORMAT) {
3289 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_FRM);
3290 val |= ((pdata->buffer_format << 25) & LCD_WINCTRL1_FRM);
3291 lcd->window[plane].winctrl1 = val;
3292 }
3293 if (pdata->flags & WIN_COLOR_ORDER) {
3294 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_CCO);
3295 val |= ((pdata->color_order << 24) & LCD_WINCTRL1_CCO);
3296 lcd->window[plane].winctrl1 = val;
3297 }
3298 if (pdata->flags & WIN_PIXEL_ORDER) {
3299 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PO);
3300 val |= ((pdata->pixel_order << 22) & LCD_WINCTRL1_PO);
3301 lcd->window[plane].winctrl1 = val;
3302 }
3303 if (pdata->flags & WIN_SIZE) {
3304 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_SZX |
3305 LCD_WINCTRL1_SZY);
3306 val |= (((pdata->xsize << 11) - 1) & LCD_WINCTRL1_SZX);
3307 val |= (((pdata->ysize) - 1) & LCD_WINCTRL1_SZY);
3308 lcd->window[plane].winctrl1 = val;
3309 /* program buffer line width */
3310 bpp = winbpp(val) / 8;
3311 val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_BX);
3312 val |= (((pdata->xsize * bpp) << 8) & LCD_WINCTRL2_BX);
3313 lcd->window[plane].winctrl2 = val;
3314 }
3315
3316 /* Window control register 2 */
3317 if (pdata->flags & WIN_COLORKEY_MODE) {
3318 val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_CKMODE);
3319 val |= ((pdata->colorkey_mode << 24) & LCD_WINCTRL2_CKMODE);
3320 lcd->window[plane].winctrl2 = val;
3321 }
3322 if (pdata->flags & WIN_DOUBLE_BUFFER_MODE) {
3323 val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_DBM);
3324 val |= ((pdata->double_buffer_mode << 23) & LCD_WINCTRL2_DBM);
3325 lcd->window[plane].winctrl2 = val;
3326 }
3327 if (pdata->flags & WIN_RAM_ARRAY_MODE) {
3328 val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_RAM);
3329 val |= ((pdata->ram_array_mode << 21) & LCD_WINCTRL2_RAM);
3330 lcd->window[plane].winctrl2 = val;
3331 }
3332
3333 /* Buffer line width programmed with WIN_SIZE */
3334
3335 if (pdata->flags & WIN_BUFFER_SCALE) {
3336 val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_SCX |
3337 LCD_WINCTRL2_SCY);
3338 val |= ((pdata->xsize << 11) & LCD_WINCTRL2_SCX);
3339 val |= ((pdata->ysize) & LCD_WINCTRL2_SCY);
3340 lcd->window[plane].winctrl2 = val;
3341 }
3342
3343 if (pdata->flags & WIN_ENABLE) {
3344 val = lcd->winenable;
3345 val &= ~(1<<plane);
3346 val |= (pdata->enable & 1) << plane;
3347 lcd->winenable = val;
3348 }
3349 au_sync();
3350}
3351
3352static void get_window(unsigned int plane,
3353 struct au1200_lcd_window_regs_t *pdata)
3354{
3355 /* Window control register 0 */
3356 pdata->xpos = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_OX) >> 21;
3357 pdata->ypos = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_OY) >> 10;
3358 pdata->alpha_color = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_A) >> 2;
3359 pdata->alpha_mode = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_AEN) >> 1;
3360
3361 /* Window control register 1 */
3362 pdata->priority = (lcd->window[plane].winctrl1& LCD_WINCTRL1_PRI) >> 30;
3363 pdata->channel = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_PIPE) >> 29;
3364 pdata->buffer_format = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_FRM) >> 25;
3365 pdata->color_order = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_CCO) >> 24;
3366 pdata->pixel_order = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_PO) >> 22;
3367 pdata->xsize = ((lcd->window[plane].winctrl1 & LCD_WINCTRL1_SZX) >> 11) + 1;
3368 pdata->ysize = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_SZY) + 1;
3369
3370 /* Window control register 2 */
3371 pdata->colorkey_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_CKMODE) >> 24;
3372 pdata->double_buffer_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_DBM) >> 23;
3373 pdata->ram_array_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_RAM) >> 21;
3374
3375 pdata->enable = (lcd->winenable >> plane) & 1;
3376 au_sync();
3377}
3378
3379static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
3380 unsigned long arg)
3381{
3382 int plane;
3383 int val;
3384
3385#ifdef CONFIG_PM
3386 au1xxx_pm_access(LCD_pm_dev);
3387#endif
3388
3389 plane = fbinfo2index(info);
3390 print_dbg("au1200fb: ioctl %d on plane %d\n", cmd, plane);
3391
3392 if (cmd == AU1200_LCD_FB_IOCTL) {
3393 struct au1200_lcd_iodata_t iodata;
3394
3395 if (copy_from_user(&iodata, (void __user *) arg, sizeof(iodata)))
3396 return -EFAULT;
3397
3398 print_dbg("FB IOCTL called\n");
3399
3400 switch (iodata.subcmd) {
3401 case AU1200_LCD_SET_SCREEN:
3402 print_dbg("AU1200_LCD_SET_SCREEN\n");
3403 set_global(cmd, &iodata.global);
3404 break;
3405
3406 case AU1200_LCD_GET_SCREEN:
3407 print_dbg("AU1200_LCD_GET_SCREEN\n");
3408 get_global(cmd, &iodata.global);
3409 break;
3410
3411 case AU1200_LCD_SET_WINDOW:
3412 print_dbg("AU1200_LCD_SET_WINDOW\n");
3413 set_window(plane, &iodata.window);
3414 break;
3415
3416 case AU1200_LCD_GET_WINDOW:
3417 print_dbg("AU1200_LCD_GET_WINDOW\n");
3418 get_window(plane, &iodata.window);
3419 break;
3420
3421 case AU1200_LCD_SET_PANEL:
3422 print_dbg("AU1200_LCD_SET_PANEL\n");
3423 if ((iodata.global.panel_choice >= 0) &&
3424 (iodata.global.panel_choice <
3425 NUM_PANELS))
3426 {
3427 struct panel_settings *newpanel;
3428 panel_index = iodata.global.panel_choice;
3429 newpanel = &known_lcd_panels[panel_index];
3430 au1200_setpanel(newpanel);
3431 }
3432 break;
3433
3434 case AU1200_LCD_GET_PANEL:
3435 print_dbg("AU1200_LCD_GET_PANEL\n");
3436 iodata.global.panel_choice = panel_index;
3437 break;
3438
3439 default:
3440 return -EINVAL;
3441 }
3442
3443 val = copy_to_user((void __user *) arg, &iodata, sizeof(iodata));
3444 if (val) {
3445 print_dbg("error: could not copy %d bytes\n", val);
3446 return -EFAULT;
3447 }
3448 }
3449
3450 return 0;
3451}
3452
3453
3454static struct fb_ops au1200fb_fb_ops = {
3455 .owner = THIS_MODULE,
3456 .fb_check_var = au1200fb_fb_check_var,
3457 .fb_set_par = au1200fb_fb_set_par,
3458 .fb_setcolreg = au1200fb_fb_setcolreg,
3459 .fb_blank = au1200fb_fb_blank,
3460 .fb_fillrect = cfb_fillrect,
3461 .fb_copyarea = cfb_copyarea,
3462 .fb_imageblit = cfb_imageblit,
3463 .fb_sync = NULL,
3464 .fb_ioctl = au1200fb_ioctl,
3465 .fb_mmap = au1200fb_fb_mmap,
3466};
3467
3468/*-------------------------------------------------------------------------*/
3469
3470static irqreturn_t au1200fb_handle_irq(int irq, void* dev_id, struct pt_regs *regs)
3471{
3472 /* Nothing to do for now, just clear any pending interrupt */
3473 lcd->intstatus = lcd->intstatus;
3474 au_sync();
3475
3476 return IRQ_HANDLED;
3477}
3478
3479/*-------------------------------------------------------------------------*/
3480
3481/* AU1200 LCD device probe helpers */
3482
3483static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
3484{
3485 struct fb_info *fbi = &fbdev->fb_info;
3486 int bpp;
3487
3488 memset(fbi, 0, sizeof(struct fb_info));
3489 fbi->fbops = &au1200fb_fb_ops;
3490
3491 bpp = winbpp(win->w[fbdev->plane].mode_winctrl1);
3492
3493 /* Copy monitor specs from panel data */
3494 /* fixme: we're setting up LCD controller windows, so these dont give a
3495 damn as to what the monitor specs are (the panel itself does, but that
3496 isnt done here...so maybe need a generic catchall monitor setting??? */
3497 memcpy(&fbi->monspecs, &panel->monspecs, sizeof(struct fb_monspecs));
3498
3499 /* We first try the user mode passed in argument. If that failed,
3500 * or if no one has been specified, we default to the first mode of the
3501 * panel list. Note that after this call, var data will be set */
3502 if (!fb_find_mode(&fbi->var,
3503 fbi,
3504 NULL, /* drv_info.opt_mode, */
3505 fbi->monspecs.modedb,
3506 fbi->monspecs.modedb_len,
3507 fbi->monspecs.modedb,
3508 bpp)) {
3509
3510 print_err("Cannot find valid mode for panel %s", panel->name);
3511 return -EFAULT;
3512 }
3513
3514 fbi->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
3515 if (!fbi->pseudo_palette) {
3516 return -ENOMEM;
3517 }
3518 memset(fbi->pseudo_palette, 0, sizeof(u32) * 16);
3519
3520 if (fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
3521 print_err("Fail to allocate colormap (%d entries)",
3522 AU1200_LCD_NBR_PALETTE_ENTRIES);
3523 kfree(fbi->pseudo_palette);
3524 return -EFAULT;
3525 }
3526
3527 strncpy(fbi->fix.id, "AU1200", sizeof(fbi->fix.id));
3528 fbi->fix.smem_start = fbdev->fb_phys;
3529 fbi->fix.smem_len = fbdev->fb_len;
3530 fbi->fix.type = FB_TYPE_PACKED_PIXELS;
3531 fbi->fix.xpanstep = 0;
3532 fbi->fix.ypanstep = 0;
3533 fbi->fix.mmio_start = 0;
3534 fbi->fix.mmio_len = 0;
3535 fbi->fix.accel = FB_ACCEL_NONE;
3536
3537 fbi->screen_base = (char __iomem *) fbdev->fb_mem;
3538
3539 au1200fb_update_fbinfo(fbi);
3540
3541 return 0;
3542}
3543
3544/*-------------------------------------------------------------------------*/
3545
3546/* AU1200 LCD controller device driver */
3547
3548static int au1200fb_drv_probe(struct device *dev)
3549{
3550 struct au1200fb_device *fbdev;
3551 unsigned long page;
3552 int bpp, plane, ret;
3553
3554 if (!dev)
3555 return -EINVAL;
3556
3557 for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane) {
3558 bpp = winbpp(win->w[plane].mode_winctrl1);
3559 if (win->w[plane].xres == 0)
3560 win->w[plane].xres = panel->Xres;
3561 if (win->w[plane].yres == 0)
3562 win->w[plane].yres = panel->Yres;
3563
3564 fbdev = &_au1200fb_devices[plane];
3565 memset(fbdev, 0, sizeof(struct au1200fb_device));
3566 fbdev->plane = plane;
3567
3568 /* Allocate the framebuffer to the maximum screen size */
3569 fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8;
3570
3571 fbdev->fb_mem = dma_alloc_noncoherent(dev,
3572 PAGE_ALIGN(fbdev->fb_len),
3573 &fbdev->fb_phys, GFP_KERNEL);
3574 if (!fbdev->fb_mem) {
3575 print_err("fail to allocate frambuffer (size: %dK))",
3576 fbdev->fb_len / 1024);
3577 return -ENOMEM;
3578 }
3579
3580 /*
3581 * Set page reserved so that mmap will work. This is necessary
3582 * since we'll be remapping normal memory.
3583 */
3584 for (page = (unsigned long)fbdev->fb_phys;
3585 page < PAGE_ALIGN((unsigned long)fbdev->fb_phys +
3586 fbdev->fb_len);
3587 page += PAGE_SIZE) {
3588 SetPageReserved(pfn_to_page(page >> PAGE_SHIFT)); /* LCD DMA is NOT coherent on Au1200 */
3589 }
3590 print_dbg("Framebuffer memory map at %p", fbdev->fb_mem);
3591 print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024);
3592
3593 /* Init FB data */
3594 if ((ret = au1200fb_init_fbinfo(fbdev)) < 0)
3595 goto failed;
3596
3597 /* Register new framebuffer */
3598 if ((ret = register_framebuffer(&fbdev->fb_info)) < 0) {
3599 print_err("cannot register new framebuffer");
3600 goto failed;
3601 }
3602
3603 au1200fb_fb_set_par(&fbdev->fb_info);
3604
3605#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
3606 if (plane == 0)
3607 if (fb_prepare_logo(&fbdev->fb_info, FB_ROTATE_UR)) {
3608 /* Start display and show logo on boot */
3609 fb_set_cmap(&fbdev->fb_info.cmap,
3610 &fbdev->fb_info);
3611
3612 fb_show_logo(&fbdev->fb_info, FB_ROTATE_UR);
3613 }
3614#endif
3615 }
3616
3617 /* Now hook interrupt too */
3618 if ((ret = request_irq(AU1200_LCD_INT, au1200fb_handle_irq,
3619 SA_INTERRUPT | SA_SHIRQ, "lcd", (void *)dev)) < 0) {
3620 print_err("fail to request interrupt line %d (err: %d)",
3621 AU1200_LCD_INT, ret);
3622 goto failed;
3623 }
3624
3625 return 0;
3626
3627failed:
3628 /* NOTE: This only does the current plane/window that failed; others are still active */
3629 if (fbdev->fb_mem)
3630 dma_free_noncoherent(dev, PAGE_ALIGN(fbdev->fb_len),
3631 fbdev->fb_mem, fbdev->fb_phys);
3632 if (fbdev->fb_info.cmap.len != 0)
3633 fb_dealloc_cmap(&fbdev->fb_info.cmap);
3634 if (fbdev->fb_info.pseudo_palette)
3635 kfree(fbdev->fb_info.pseudo_palette);
3636 if (plane == 0)
3637 free_irq(AU1200_LCD_INT, (void*)dev);
3638 return ret;
3639}
3640
3641static int au1200fb_drv_remove(struct device *dev)
3642{
3643 struct au1200fb_device *fbdev;
3644 int plane;
3645
3646 if (!dev)
3647 return -ENODEV;
3648
3649 /* Turn off the panel */
3650 au1200_setpanel(NULL);
3651
3652 for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane)
3653 {
3654 fbdev = &_au1200fb_devices[plane];
3655
3656 /* Clean up all probe data */
3657 unregister_framebuffer(&fbdev->fb_info);
3658 if (fbdev->fb_mem)
3659 dma_free_noncoherent(dev, PAGE_ALIGN(fbdev->fb_len),
3660 fbdev->fb_mem, fbdev->fb_phys);
3661 if (fbdev->fb_info.cmap.len != 0)
3662 fb_dealloc_cmap(&fbdev->fb_info.cmap);
3663 if (fbdev->fb_info.pseudo_palette)
3664 kfree(fbdev->fb_info.pseudo_palette);
3665 }
3666
3667 free_irq(AU1200_LCD_INT, (void *)dev);
3668
3669 return 0;
3670}
3671
3672#ifdef CONFIG_PM
3673static int au1200fb_drv_suspend(struct device *dev, u32 state, u32 level)
3674{
3675 /* TODO */
3676 return 0;
3677}
3678
3679static int au1200fb_drv_resume(struct device *dev, u32 level)
3680{
3681 /* TODO */
3682 return 0;
3683}
3684#endif /* CONFIG_PM */
3685
3686static struct device_driver au1200fb_driver = {
3687 .name = "au1200-lcd",
3688 .bus = &platform_bus_type,
3689 .probe = au1200fb_drv_probe,
3690 .remove = au1200fb_drv_remove,
3691#ifdef CONFIG_PM
3692 .suspend = au1200fb_drv_suspend,
3693 .resume = au1200fb_drv_resume,
3694#endif
3695};
3696
3697/*-------------------------------------------------------------------------*/
3698
3699/* Kernel driver */
3700
3701static void au1200fb_setup(void)
3702{
3703 char* options = NULL;
3704 char* this_opt;
3705 int num_panels = ARRAY_SIZE(known_lcd_panels);
3706 int panel_idx = -1;
3707
3708 fb_get_options(DRIVER_NAME, &options);
3709
3710 if (options) {
3711 while ((this_opt = strsep(&options,",")) != NULL) {
3712 /* Panel option - can be panel name,
3713 * "bs" for board-switch, or number/index */
3714 if (!strncmp(this_opt, "panel:", 6)) {
3715 int i;
3716 long int li;
3717 char *endptr;
3718 this_opt += 6;
3719 /* First check for index, which allows
3720 * to short circuit this mess */
3721 li = simple_strtol(this_opt, &endptr, 0);
3722 if (*endptr == '\0') {
3723 panel_idx = (int)li;
3724 }
3725 else if (strcmp(this_opt, "bs") == 0) {
3726 extern int board_au1200fb_panel(void);
3727 panel_idx = board_au1200fb_panel();
3728 }
3729
3730 else
3731 for (i = 0; i < num_panels; i++) {
3732 if (!strcmp(this_opt, known_lcd_panels[i].name)) {
3733 panel_idx = i;
3734 break;
3735 }
3736 }
3737
3738 if ((panel_idx < 0) || (panel_idx >= num_panels)) {
3739 print_warn("Panel %s not supported!", this_opt);
3740 }
3741 else
3742 panel_index = panel_idx;
3743 }
3744
3745 else if (strncmp(this_opt, "nohwcursor", 10) == 0) {
3746 nohwcursor = 1;
3747 }
3748
3749 /* Unsupported option */
3750 else {
3751 print_warn("Unsupported option \"%s\"", this_opt);
3752 }
3753 }
3754 }
3755}
3756
3757#ifdef CONFIG_PM
3758static int au1200fb_pm_callback(au1xxx_power_dev_t *dev,
3759 au1xxx_request_t request, void *data) {
3760 int retval = -1;
3761 unsigned int d = 0;
3762 unsigned int brightness = 0;
3763
3764 if (request == AU1XXX_PM_SLEEP) {
3765 board_au1200fb_panel_shutdown();
3766 }
3767 else if (request == AU1XXX_PM_WAKEUP) {
3768 if(dev->prev_state == SLEEP_STATE)
3769 {
3770 int plane;
3771 au1200_setpanel(panel);
3772 for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane) {
3773 struct au1200fb_device *fbdev;
3774 fbdev = &_au1200fb_devices[plane];
3775 au1200fb_fb_set_par(&fbdev->fb_info);
3776 }
3777 }
3778
3779 d = *((unsigned int*)data);
3780 if(d <=10) brightness = 26;
3781 else if(d<=20) brightness = 51;
3782 else if(d<=30) brightness = 77;
3783 else if(d<=40) brightness = 102;
3784 else if(d<=50) brightness = 128;
3785 else if(d<=60) brightness = 153;
3786 else if(d<=70) brightness = 179;
3787 else if(d<=80) brightness = 204;
3788 else if(d<=90) brightness = 230;
3789 else brightness = 255;
3790 set_brightness(brightness);
3791 } else if (request == AU1XXX_PM_GETSTATUS) {
3792 return dev->cur_state;
3793 } else if (request == AU1XXX_PM_ACCESS) {
3794 if (dev->cur_state != SLEEP_STATE)
3795 return retval;
3796 else {
3797 au1200_setpanel(panel);
3798 }
3799 } else if (request == AU1XXX_PM_IDLE) {
3800 } else if (request == AU1XXX_PM_CLEANUP) {
3801 }
3802
3803 return retval;
3804}
3805#endif
3806
3807static int __init au1200fb_init(void)
3808{
3809 print_info("" DRIVER_DESC "");
3810
3811 /* Setup driver with options */
3812 au1200fb_setup();
3813
3814 /* Point to the panel selected */
3815 panel = &known_lcd_panels[panel_index];
3816 win = &windows[window_index];
3817
3818 printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name);
3819 printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name);
3820
3821 /* Kickstart the panel, the framebuffers/windows come soon enough */
3822 au1200_setpanel(panel);
3823
3824 #ifdef CONFIG_PM
3825 LCD_pm_dev = new_au1xxx_power_device("LCD", &au1200fb_pm_callback, NULL);
3826 if ( LCD_pm_dev == NULL)
3827 printk(KERN_INFO "Unable to create a power management device entry for the au1200fb.\n");
3828 else
3829 printk(KERN_INFO "Power management device entry for the au1200fb loaded.\n");
3830 #endif
3831
3832 return driver_register(&au1200fb_driver);
3833}
3834
3835static void __exit au1200fb_cleanup(void)
3836{
3837 driver_unregister(&au1200fb_driver);
3838}
3839
3840module_init(au1200fb_init);
3841module_exit(au1200fb_cleanup);
3842
3843MODULE_DESCRIPTION(DRIVER_DESC);
3844MODULE_LICENSE("GPL");
diff --git a/drivers/video/au1200fb.h b/drivers/video/au1200fb.h
new file mode 100644
index 000000000000..e2672714d8d4
--- /dev/null
+++ b/drivers/video/au1200fb.h
@@ -0,0 +1,572 @@
1/*
2 * BRIEF MODULE DESCRIPTION
3 * Hardware definitions for the Au1200 LCD controller
4 *
5 * Copyright 2004 AMD
6 * Author: AMD
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
14 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
16 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
19 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#ifndef _AU1200LCD_H
30#define _AU1200LCD_H
31
32/********************************************************************/
33#define AU1200_LCD_ADDR 0xB5000000
34
35#define uint8 unsigned char
36#define uint32 unsigned int
37
38struct au1200_lcd {
39 volatile uint32 reserved0;
40 volatile uint32 screen;
41 volatile uint32 backcolor;
42 volatile uint32 horztiming;
43 volatile uint32 verttiming;
44 volatile uint32 clkcontrol;
45 volatile uint32 pwmdiv;
46 volatile uint32 pwmhi;
47 volatile uint32 reserved1;
48 volatile uint32 winenable;
49 volatile uint32 colorkey;
50 volatile uint32 colorkeymsk;
51 struct
52 {
53 volatile uint32 cursorctrl;
54 volatile uint32 cursorpos;
55 volatile uint32 cursorcolor0;
56 volatile uint32 cursorcolor1;
57 volatile uint32 cursorcolor2;
58 uint32 cursorcolor3;
59 } hwc;
60 volatile uint32 intstatus;
61 volatile uint32 intenable;
62 volatile uint32 outmask;
63 volatile uint32 fifoctrl;
64 uint32 reserved2[(0x0100-0x0058)/4];
65 struct
66 {
67 volatile uint32 winctrl0;
68 volatile uint32 winctrl1;
69 volatile uint32 winctrl2;
70 volatile uint32 winbuf0;
71 volatile uint32 winbuf1;
72 volatile uint32 winbufctrl;
73 uint32 winreserved0;
74 uint32 winreserved1;
75 } window[4];
76
77 uint32 reserved3[(0x0400-0x0180)/4];
78
79 volatile uint32 palette[(0x0800-0x0400)/4];
80
81 volatile uint8 cursorpattern[256];
82};
83
84/* lcd_screen */
85#define LCD_SCREEN_SEN (1<<31)
86#define LCD_SCREEN_SX (0x07FF<<19)
87#define LCD_SCREEN_SY (0x07FF<< 8)
88#define LCD_SCREEN_SWP (1<<7)
89#define LCD_SCREEN_SWD (1<<6)
90#define LCD_SCREEN_PT (7<<0)
91#define LCD_SCREEN_PT_TFT (0<<0)
92#define LCD_SCREEN_SX_N(WIDTH) ((WIDTH-1)<<19)
93#define LCD_SCREEN_SY_N(HEIGHT) ((HEIGHT-1)<<8)
94#define LCD_SCREEN_PT_CSTN (1<<0)
95#define LCD_SCREEN_PT_CDSTN (2<<0)
96#define LCD_SCREEN_PT_M8STN (3<<0)
97#define LCD_SCREEN_PT_M4STN (4<<0)
98
99/* lcd_backcolor */
100#define LCD_BACKCOLOR_SBGR (0xFF<<16)
101#define LCD_BACKCOLOR_SBGG (0xFF<<8)
102#define LCD_BACKCOLOR_SBGB (0xFF<<0)
103#define LCD_BACKCOLOR_SBGR_N(N) ((N)<<16)
104#define LCD_BACKCOLOR_SBGG_N(N) ((N)<<8)
105#define LCD_BACKCOLOR_SBGB_N(N) ((N)<<0)
106
107/* lcd_winenable */
108#define LCD_WINENABLE_WEN3 (1<<3)
109#define LCD_WINENABLE_WEN2 (1<<2)
110#define LCD_WINENABLE_WEN1 (1<<1)
111#define LCD_WINENABLE_WEN0 (1<<0)
112
113/* lcd_colorkey */
114#define LCD_COLORKEY_CKR (0xFF<<16)
115#define LCD_COLORKEY_CKG (0xFF<<8)
116#define LCD_COLORKEY_CKB (0xFF<<0)
117#define LCD_COLORKEY_CKR_N(N) ((N)<<16)
118#define LCD_COLORKEY_CKG_N(N) ((N)<<8)
119#define LCD_COLORKEY_CKB_N(N) ((N)<<0)
120
121/* lcd_colorkeymsk */
122#define LCD_COLORKEYMSK_CKMR (0xFF<<16)
123#define LCD_COLORKEYMSK_CKMG (0xFF<<8)
124#define LCD_COLORKEYMSK_CKMB (0xFF<<0)
125#define LCD_COLORKEYMSK_CKMR_N(N) ((N)<<16)
126#define LCD_COLORKEYMSK_CKMG_N(N) ((N)<<8)
127#define LCD_COLORKEYMSK_CKMB_N(N) ((N)<<0)
128
129/* lcd windows control 0 */
130#define LCD_WINCTRL0_OX (0x07FF<<21)
131#define LCD_WINCTRL0_OY (0x07FF<<10)
132#define LCD_WINCTRL0_A (0x00FF<<2)
133#define LCD_WINCTRL0_AEN (1<<1)
134#define LCD_WINCTRL0_OX_N(N) ((N)<<21)
135#define LCD_WINCTRL0_OY_N(N) ((N)<<10)
136#define LCD_WINCTRL0_A_N(N) ((N)<<2)
137
138/* lcd windows control 1 */
139#define LCD_WINCTRL1_PRI (3<<30)
140#define LCD_WINCTRL1_PIPE (1<<29)
141#define LCD_WINCTRL1_FRM (0xF<<25)
142#define LCD_WINCTRL1_CCO (1<<24)
143#define LCD_WINCTRL1_PO (3<<22)
144#define LCD_WINCTRL1_SZX (0x07FF<<11)
145#define LCD_WINCTRL1_SZY (0x07FF<<0)
146#define LCD_WINCTRL1_FRM_1BPP (0<<25)
147#define LCD_WINCTRL1_FRM_2BPP (1<<25)
148#define LCD_WINCTRL1_FRM_4BPP (2<<25)
149#define LCD_WINCTRL1_FRM_8BPP (3<<25)
150#define LCD_WINCTRL1_FRM_12BPP (4<<25)
151#define LCD_WINCTRL1_FRM_16BPP655 (5<<25)
152#define LCD_WINCTRL1_FRM_16BPP565 (6<<25)
153#define LCD_WINCTRL1_FRM_16BPP556 (7<<25)
154#define LCD_WINCTRL1_FRM_16BPPI1555 (8<<25)
155#define LCD_WINCTRL1_FRM_16BPPI5551 (9<<25)
156#define LCD_WINCTRL1_FRM_16BPPA1555 (10<<25)
157#define LCD_WINCTRL1_FRM_16BPPA5551 (11<<25)
158#define LCD_WINCTRL1_FRM_24BPP (12<<25)
159#define LCD_WINCTRL1_FRM_32BPP (13<<25)
160#define LCD_WINCTRL1_PRI_N(N) ((N)<<30)
161#define LCD_WINCTRL1_PO_00 (0<<22)
162#define LCD_WINCTRL1_PO_01 (1<<22)
163#define LCD_WINCTRL1_PO_10 (2<<22)
164#define LCD_WINCTRL1_PO_11 (3<<22)
165#define LCD_WINCTRL1_SZX_N(N) ((N-1)<<11)
166#define LCD_WINCTRL1_SZY_N(N) ((N-1)<<0)
167
168/* lcd windows control 2 */
169#define LCD_WINCTRL2_CKMODE (3<<24)
170#define LCD_WINCTRL2_DBM (1<<23)
171#define LCD_WINCTRL2_RAM (3<<21)
172#define LCD_WINCTRL2_BX (0x1FFF<<8)
173#define LCD_WINCTRL2_SCX (0xF<<4)
174#define LCD_WINCTRL2_SCY (0xF<<0)
175#define LCD_WINCTRL2_CKMODE_00 (0<<24)
176#define LCD_WINCTRL2_CKMODE_01 (1<<24)
177#define LCD_WINCTRL2_CKMODE_10 (2<<24)
178#define LCD_WINCTRL2_CKMODE_11 (3<<24)
179#define LCD_WINCTRL2_RAM_NONE (0<<21)
180#define LCD_WINCTRL2_RAM_PALETTE (1<<21)
181#define LCD_WINCTRL2_RAM_GAMMA (2<<21)
182#define LCD_WINCTRL2_RAM_BUFFER (3<<21)
183#define LCD_WINCTRL2_BX_N(N) ((N)<<8)
184#define LCD_WINCTRL2_SCX_1 (0<<4)
185#define LCD_WINCTRL2_SCX_2 (1<<4)
186#define LCD_WINCTRL2_SCX_4 (2<<4)
187#define LCD_WINCTRL2_SCY_1 (0<<0)
188#define LCD_WINCTRL2_SCY_2 (1<<0)
189#define LCD_WINCTRL2_SCY_4 (2<<0)
190
191/* lcd windows buffer control */
192#define LCD_WINBUFCTRL_DB (1<<1)
193#define LCD_WINBUFCTRL_DBN (1<<0)
194
195/* lcd_intstatus, lcd_intenable */
196#define LCD_INT_IFO (0xF<<14)
197#define LCD_INT_IFU (0xF<<10)
198#define LCD_INT_OFO (1<<9)
199#define LCD_INT_OFU (1<<8)
200#define LCD_INT_WAIT (1<<3)
201#define LCD_INT_SD (1<<2)
202#define LCD_INT_SA (1<<1)
203#define LCD_INT_SS (1<<0)
204
205/* lcd_horztiming */
206#define LCD_HORZTIMING_HND2 (0x1FF<<18)
207#define LCD_HORZTIMING_HND1 (0x1FF<<9)
208#define LCD_HORZTIMING_HPW (0x1FF<<0)
209#define LCD_HORZTIMING_HND2_N(N)(((N)-1)<<18)
210#define LCD_HORZTIMING_HND1_N(N)(((N)-1)<<9)
211#define LCD_HORZTIMING_HPW_N(N) (((N)-1)<<0)
212
213/* lcd_verttiming */
214#define LCD_VERTTIMING_VND2 (0x1FF<<18)
215#define LCD_VERTTIMING_VND1 (0x1FF<<9)
216#define LCD_VERTTIMING_VPW (0x1FF<<0)
217#define LCD_VERTTIMING_VND2_N(N)(((N)-1)<<18)
218#define LCD_VERTTIMING_VND1_N(N)(((N)-1)<<9)
219#define LCD_VERTTIMING_VPW_N(N) (((N)-1)<<0)
220
221/* lcd_clkcontrol */
222#define LCD_CLKCONTROL_EXT (1<<22)
223#define LCD_CLKCONTROL_DELAY (3<<20)
224#define LCD_CLKCONTROL_CDD (1<<19)
225#define LCD_CLKCONTROL_IB (1<<18)
226#define LCD_CLKCONTROL_IC (1<<17)
227#define LCD_CLKCONTROL_IH (1<<16)
228#define LCD_CLKCONTROL_IV (1<<15)
229#define LCD_CLKCONTROL_BF (0x1F<<10)
230#define LCD_CLKCONTROL_PCD (0x3FF<<0)
231#define LCD_CLKCONTROL_BF_N(N) (((N)-1)<<10)
232#define LCD_CLKCONTROL_PCD_N(N) ((N)<<0)
233
234/* lcd_pwmdiv */
235#define LCD_PWMDIV_EN (1<<31)
236#define LCD_PWMDIV_PWMDIV (0x1FFFF<<0)
237#define LCD_PWMDIV_PWMDIV_N(N) ((N)<<0)
238
239/* lcd_pwmhi */
240#define LCD_PWMHI_PWMHI1 (0xFFFF<<16)
241#define LCD_PWMHI_PWMHI0 (0xFFFF<<0)
242#define LCD_PWMHI_PWMHI1_N(N) ((N)<<16)
243#define LCD_PWMHI_PWMHI0_N(N) ((N)<<0)
244
245/* lcd_hwccon */
246#define LCD_HWCCON_EN (1<<0)
247
248/* lcd_cursorpos */
249#define LCD_CURSORPOS_HWCXOFF (0x1F<<27)
250#define LCD_CURSORPOS_HWCXPOS (0x07FF<<16)
251#define LCD_CURSORPOS_HWCYOFF (0x1F<<11)
252#define LCD_CURSORPOS_HWCYPOS (0x07FF<<0)
253#define LCD_CURSORPOS_HWCXOFF_N(N) ((N)<<27)
254#define LCD_CURSORPOS_HWCXPOS_N(N) ((N)<<16)
255#define LCD_CURSORPOS_HWCYOFF_N(N) ((N)<<11)
256#define LCD_CURSORPOS_HWCYPOS_N(N) ((N)<<0)
257
258/* lcd_cursorcolor */
259#define LCD_CURSORCOLOR_HWCA (0xFF<<24)
260#define LCD_CURSORCOLOR_HWCR (0xFF<<16)
261#define LCD_CURSORCOLOR_HWCG (0xFF<<8)
262#define LCD_CURSORCOLOR_HWCB (0xFF<<0)
263#define LCD_CURSORCOLOR_HWCA_N(N) ((N)<<24)
264#define LCD_CURSORCOLOR_HWCR_N(N) ((N)<<16)
265#define LCD_CURSORCOLOR_HWCG_N(N) ((N)<<8)
266#define LCD_CURSORCOLOR_HWCB_N(N) ((N)<<0)
267
268/* lcd_fifoctrl */
269#define LCD_FIFOCTRL_F3IF (1<<29)
270#define LCD_FIFOCTRL_F3REQ (0x1F<<24)
271#define LCD_FIFOCTRL_F2IF (1<<29)
272#define LCD_FIFOCTRL_F2REQ (0x1F<<16)
273#define LCD_FIFOCTRL_F1IF (1<<29)
274#define LCD_FIFOCTRL_F1REQ (0x1F<<8)
275#define LCD_FIFOCTRL_F0IF (1<<29)
276#define LCD_FIFOCTRL_F0REQ (0x1F<<0)
277#define LCD_FIFOCTRL_F3REQ_N(N) ((N-1)<<24)
278#define LCD_FIFOCTRL_F2REQ_N(N) ((N-1)<<16)
279#define LCD_FIFOCTRL_F1REQ_N(N) ((N-1)<<8)
280#define LCD_FIFOCTRL_F0REQ_N(N) ((N-1)<<0)
281
282/* lcd_outmask */
283#define LCD_OUTMASK_MASK (0x00FFFFFF)
284
285/********************************************************************/
286#endif /* _AU1200LCD_H */
287/*
288 * BRIEF MODULE DESCRIPTION
289 * Hardware definitions for the Au1200 LCD controller
290 *
291 * Copyright 2004 AMD
292 * Author: AMD
293 *
294 * This program is free software; you can redistribute it and/or modify it
295 * under the terms of the GNU General Public License as published by the
296 * Free Software Foundation; either version 2 of the License, or (at your
297 * option) any later version.
298 *
299 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
300 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
301 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
302 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
303 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
304 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
305 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
306 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
307 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
308 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
309 *
310 * You should have received a copy of the GNU General Public License along
311 * with this program; if not, write to the Free Software Foundation, Inc.,
312 * 675 Mass Ave, Cambridge, MA 02139, USA.
313 */
314
315#ifndef _AU1200LCD_H
316#define _AU1200LCD_H
317
318/********************************************************************/
319#define AU1200_LCD_ADDR 0xB5000000
320
321#define uint8 unsigned char
322#define uint32 unsigned int
323
324struct au1200_lcd {
325 volatile uint32 reserved0;
326 volatile uint32 screen;
327 volatile uint32 backcolor;
328 volatile uint32 horztiming;
329 volatile uint32 verttiming;
330 volatile uint32 clkcontrol;
331 volatile uint32 pwmdiv;
332 volatile uint32 pwmhi;
333 volatile uint32 reserved1;
334 volatile uint32 winenable;
335 volatile uint32 colorkey;
336 volatile uint32 colorkeymsk;
337 struct
338 {
339 volatile uint32 cursorctrl;
340 volatile uint32 cursorpos;
341 volatile uint32 cursorcolor0;
342 volatile uint32 cursorcolor1;
343 volatile uint32 cursorcolor2;
344 uint32 cursorcolor3;
345 } hwc;
346 volatile uint32 intstatus;
347 volatile uint32 intenable;
348 volatile uint32 outmask;
349 volatile uint32 fifoctrl;
350 uint32 reserved2[(0x0100-0x0058)/4];
351 struct
352 {
353 volatile uint32 winctrl0;
354 volatile uint32 winctrl1;
355 volatile uint32 winctrl2;
356 volatile uint32 winbuf0;
357 volatile uint32 winbuf1;
358 volatile uint32 winbufctrl;
359 uint32 winreserved0;
360 uint32 winreserved1;
361 } window[4];
362
363 uint32 reserved3[(0x0400-0x0180)/4];
364
365 volatile uint32 palette[(0x0800-0x0400)/4];
366
367 volatile uint8 cursorpattern[256];
368};
369
370/* lcd_screen */
371#define LCD_SCREEN_SEN (1<<31)
372#define LCD_SCREEN_SX (0x07FF<<19)
373#define LCD_SCREEN_SY (0x07FF<< 8)
374#define LCD_SCREEN_SWP (1<<7)
375#define LCD_SCREEN_SWD (1<<6)
376#define LCD_SCREEN_PT (7<<0)
377#define LCD_SCREEN_PT_TFT (0<<0)
378#define LCD_SCREEN_SX_N(WIDTH) ((WIDTH-1)<<19)
379#define LCD_SCREEN_SY_N(HEIGHT) ((HEIGHT-1)<<8)
380#define LCD_SCREEN_PT_CSTN (1<<0)
381#define LCD_SCREEN_PT_CDSTN (2<<0)
382#define LCD_SCREEN_PT_M8STN (3<<0)
383#define LCD_SCREEN_PT_M4STN (4<<0)
384
385/* lcd_backcolor */
386#define LCD_BACKCOLOR_SBGR (0xFF<<16)
387#define LCD_BACKCOLOR_SBGG (0xFF<<8)
388#define LCD_BACKCOLOR_SBGB (0xFF<<0)
389#define LCD_BACKCOLOR_SBGR_N(N) ((N)<<16)
390#define LCD_BACKCOLOR_SBGG_N(N) ((N)<<8)
391#define LCD_BACKCOLOR_SBGB_N(N) ((N)<<0)
392
393/* lcd_winenable */
394#define LCD_WINENABLE_WEN3 (1<<3)
395#define LCD_WINENABLE_WEN2 (1<<2)
396#define LCD_WINENABLE_WEN1 (1<<1)
397#define LCD_WINENABLE_WEN0 (1<<0)
398
399/* lcd_colorkey */
400#define LCD_COLORKEY_CKR (0xFF<<16)
401#define LCD_COLORKEY_CKG (0xFF<<8)
402#define LCD_COLORKEY_CKB (0xFF<<0)
403#define LCD_COLORKEY_CKR_N(N) ((N)<<16)
404#define LCD_COLORKEY_CKG_N(N) ((N)<<8)
405#define LCD_COLORKEY_CKB_N(N) ((N)<<0)
406
407/* lcd_colorkeymsk */
408#define LCD_COLORKEYMSK_CKMR (0xFF<<16)
409#define LCD_COLORKEYMSK_CKMG (0xFF<<8)
410#define LCD_COLORKEYMSK_CKMB (0xFF<<0)
411#define LCD_COLORKEYMSK_CKMR_N(N) ((N)<<16)
412#define LCD_COLORKEYMSK_CKMG_N(N) ((N)<<8)
413#define LCD_COLORKEYMSK_CKMB_N(N) ((N)<<0)
414
415/* lcd windows control 0 */
416#define LCD_WINCTRL0_OX (0x07FF<<21)
417#define LCD_WINCTRL0_OY (0x07FF<<10)
418#define LCD_WINCTRL0_A (0x00FF<<2)
419#define LCD_WINCTRL0_AEN (1<<1)
420#define LCD_WINCTRL0_OX_N(N) ((N)<<21)
421#define LCD_WINCTRL0_OY_N(N) ((N)<<10)
422#define LCD_WINCTRL0_A_N(N) ((N)<<2)
423
424/* lcd windows control 1 */
425#define LCD_WINCTRL1_PRI (3<<30)
426#define LCD_WINCTRL1_PIPE (1<<29)
427#define LCD_WINCTRL1_FRM (0xF<<25)
428#define LCD_WINCTRL1_CCO (1<<24)
429#define LCD_WINCTRL1_PO (3<<22)
430#define LCD_WINCTRL1_SZX (0x07FF<<11)
431#define LCD_WINCTRL1_SZY (0x07FF<<0)
432#define LCD_WINCTRL1_FRM_1BPP (0<<25)
433#define LCD_WINCTRL1_FRM_2BPP (1<<25)
434#define LCD_WINCTRL1_FRM_4BPP (2<<25)
435#define LCD_WINCTRL1_FRM_8BPP (3<<25)
436#define LCD_WINCTRL1_FRM_12BPP (4<<25)
437#define LCD_WINCTRL1_FRM_16BPP655 (5<<25)
438#define LCD_WINCTRL1_FRM_16BPP565 (6<<25)
439#define LCD_WINCTRL1_FRM_16BPP556 (7<<25)
440#define LCD_WINCTRL1_FRM_16BPPI1555 (8<<25)
441#define LCD_WINCTRL1_FRM_16BPPI5551 (9<<25)
442#define LCD_WINCTRL1_FRM_16BPPA1555 (10<<25)
443#define LCD_WINCTRL1_FRM_16BPPA5551 (11<<25)
444#define LCD_WINCTRL1_FRM_24BPP (12<<25)
445#define LCD_WINCTRL1_FRM_32BPP (13<<25)
446#define LCD_WINCTRL1_PRI_N(N) ((N)<<30)
447#define LCD_WINCTRL1_PO_00 (0<<22)
448#define LCD_WINCTRL1_PO_01 (1<<22)
449#define LCD_WINCTRL1_PO_10 (2<<22)
450#define LCD_WINCTRL1_PO_11 (3<<22)
451#define LCD_WINCTRL1_SZX_N(N) ((N-1)<<11)
452#define LCD_WINCTRL1_SZY_N(N) ((N-1)<<0)
453
454/* lcd windows control 2 */
455#define LCD_WINCTRL2_CKMODE (3<<24)
456#define LCD_WINCTRL2_DBM (1<<23)
457#define LCD_WINCTRL2_RAM (3<<21)
458#define LCD_WINCTRL2_BX (0x1FFF<<8)
459#define LCD_WINCTRL2_SCX (0xF<<4)
460#define LCD_WINCTRL2_SCY (0xF<<0)
461#define LCD_WINCTRL2_CKMODE_00 (0<<24)
462#define LCD_WINCTRL2_CKMODE_01 (1<<24)
463#define LCD_WINCTRL2_CKMODE_10 (2<<24)
464#define LCD_WINCTRL2_CKMODE_11 (3<<24)
465#define LCD_WINCTRL2_RAM_NONE (0<<21)
466#define LCD_WINCTRL2_RAM_PALETTE (1<<21)
467#define LCD_WINCTRL2_RAM_GAMMA (2<<21)
468#define LCD_WINCTRL2_RAM_BUFFER (3<<21)
469#define LCD_WINCTRL2_BX_N(N) ((N)<<8)
470#define LCD_WINCTRL2_SCX_1 (0<<4)
471#define LCD_WINCTRL2_SCX_2 (1<<4)
472#define LCD_WINCTRL2_SCX_4 (2<<4)
473#define LCD_WINCTRL2_SCY_1 (0<<0)
474#define LCD_WINCTRL2_SCY_2 (1<<0)
475#define LCD_WINCTRL2_SCY_4 (2<<0)
476
477/* lcd windows buffer control */
478#define LCD_WINBUFCTRL_DB (1<<1)
479#define LCD_WINBUFCTRL_DBN (1<<0)
480
481/* lcd_intstatus, lcd_intenable */
482#define LCD_INT_IFO (0xF<<14)
483#define LCD_INT_IFU (0xF<<10)
484#define LCD_INT_OFO (1<<9)
485#define LCD_INT_OFU (1<<8)
486#define LCD_INT_WAIT (1<<3)
487#define LCD_INT_SD (1<<2)
488#define LCD_INT_SA (1<<1)
489#define LCD_INT_SS (1<<0)
490
491/* lcd_horztiming */
492#define LCD_HORZTIMING_HND2 (0x1FF<<18)
493#define LCD_HORZTIMING_HND1 (0x1FF<<9)
494#define LCD_HORZTIMING_HPW (0x1FF<<0)
495#define LCD_HORZTIMING_HND2_N(N)(((N)-1)<<18)
496#define LCD_HORZTIMING_HND1_N(N)(((N)-1)<<9)
497#define LCD_HORZTIMING_HPW_N(N) (((N)-1)<<0)
498
499/* lcd_verttiming */
500#define LCD_VERTTIMING_VND2 (0x1FF<<18)
501#define LCD_VERTTIMING_VND1 (0x1FF<<9)
502#define LCD_VERTTIMING_VPW (0x1FF<<0)
503#define LCD_VERTTIMING_VND2_N(N)(((N)-1)<<18)
504#define LCD_VERTTIMING_VND1_N(N)(((N)-1)<<9)
505#define LCD_VERTTIMING_VPW_N(N) (((N)-1)<<0)
506
507/* lcd_clkcontrol */
508#define LCD_CLKCONTROL_EXT (1<<22)
509#define LCD_CLKCONTROL_DELAY (3<<20)
510#define LCD_CLKCONTROL_CDD (1<<19)
511#define LCD_CLKCONTROL_IB (1<<18)
512#define LCD_CLKCONTROL_IC (1<<17)
513#define LCD_CLKCONTROL_IH (1<<16)
514#define LCD_CLKCONTROL_IV (1<<15)
515#define LCD_CLKCONTROL_BF (0x1F<<10)
516#define LCD_CLKCONTROL_PCD (0x3FF<<0)
517#define LCD_CLKCONTROL_BF_N(N) (((N)-1)<<10)
518#define LCD_CLKCONTROL_PCD_N(N) ((N)<<0)
519
520/* lcd_pwmdiv */
521#define LCD_PWMDIV_EN (1<<31)
522#define LCD_PWMDIV_PWMDIV (0x1FFFF<<0)
523#define LCD_PWMDIV_PWMDIV_N(N) ((N)<<0)
524
525/* lcd_pwmhi */
526#define LCD_PWMHI_PWMHI1 (0xFFFF<<16)
527#define LCD_PWMHI_PWMHI0 (0xFFFF<<0)
528#define LCD_PWMHI_PWMHI1_N(N) ((N)<<16)
529#define LCD_PWMHI_PWMHI0_N(N) ((N)<<0)
530
531/* lcd_hwccon */
532#define LCD_HWCCON_EN (1<<0)
533
534/* lcd_cursorpos */
535#define LCD_CURSORPOS_HWCXOFF (0x1F<<27)
536#define LCD_CURSORPOS_HWCXPOS (0x07FF<<16)
537#define LCD_CURSORPOS_HWCYOFF (0x1F<<11)
538#define LCD_CURSORPOS_HWCYPOS (0x07FF<<0)
539#define LCD_CURSORPOS_HWCXOFF_N(N) ((N)<<27)
540#define LCD_CURSORPOS_HWCXPOS_N(N) ((N)<<16)
541#define LCD_CURSORPOS_HWCYOFF_N(N) ((N)<<11)
542#define LCD_CURSORPOS_HWCYPOS_N(N) ((N)<<0)
543
544/* lcd_cursorcolor */
545#define LCD_CURSORCOLOR_HWCA (0xFF<<24)
546#define LCD_CURSORCOLOR_HWCR (0xFF<<16)
547#define LCD_CURSORCOLOR_HWCG (0xFF<<8)
548#define LCD_CURSORCOLOR_HWCB (0xFF<<0)
549#define LCD_CURSORCOLOR_HWCA_N(N) ((N)<<24)
550#define LCD_CURSORCOLOR_HWCR_N(N) ((N)<<16)
551#define LCD_CURSORCOLOR_HWCG_N(N) ((N)<<8)
552#define LCD_CURSORCOLOR_HWCB_N(N) ((N)<<0)
553
554/* lcd_fifoctrl */
555#define LCD_FIFOCTRL_F3IF (1<<29)
556#define LCD_FIFOCTRL_F3REQ (0x1F<<24)
557#define LCD_FIFOCTRL_F2IF (1<<29)
558#define LCD_FIFOCTRL_F2REQ (0x1F<<16)
559#define LCD_FIFOCTRL_F1IF (1<<29)
560#define LCD_FIFOCTRL_F1REQ (0x1F<<8)
561#define LCD_FIFOCTRL_F0IF (1<<29)
562#define LCD_FIFOCTRL_F0REQ (0x1F<<0)
563#define LCD_FIFOCTRL_F3REQ_N(N) ((N-1)<<24)
564#define LCD_FIFOCTRL_F2REQ_N(N) ((N-1)<<16)
565#define LCD_FIFOCTRL_F1REQ_N(N) ((N-1)<<8)
566#define LCD_FIFOCTRL_F0REQ_N(N) ((N-1)<<0)
567
568/* lcd_outmask */
569#define LCD_OUTMASK_MASK (0x00FFFFFF)
570
571/********************************************************************/
572#endif /* _AU1200LCD_H */
diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c
index bc061d4ec786..72ff6bf75e5e 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/chipsfb.c
@@ -178,8 +178,6 @@ struct chips_init_reg {
178 unsigned char data; 178 unsigned char data;
179}; 179};
180 180
181#define N_ELTS(x) (sizeof(x) / sizeof(x[0]))
182
183static struct chips_init_reg chips_init_sr[] = { 181static struct chips_init_reg chips_init_sr[] = {
184 { 0x00, 0x03 }, 182 { 0x00, 0x03 },
185 { 0x01, 0x01 }, 183 { 0x01, 0x01 },
@@ -287,18 +285,18 @@ static void __init chips_hw_init(void)
287{ 285{
288 int i; 286 int i;
289 287
290 for (i = 0; i < N_ELTS(chips_init_xr); ++i) 288 for (i = 0; i < ARRAY_SIZE(chips_init_xr); ++i)
291 write_xr(chips_init_xr[i].addr, chips_init_xr[i].data); 289 write_xr(chips_init_xr[i].addr, chips_init_xr[i].data);
292 outb(0x29, 0x3c2); /* set misc output reg */ 290 outb(0x29, 0x3c2); /* set misc output reg */
293 for (i = 0; i < N_ELTS(chips_init_sr); ++i) 291 for (i = 0; i < ARRAY_SIZE(chips_init_sr); ++i)
294 write_sr(chips_init_sr[i].addr, chips_init_sr[i].data); 292 write_sr(chips_init_sr[i].addr, chips_init_sr[i].data);
295 for (i = 0; i < N_ELTS(chips_init_gr); ++i) 293 for (i = 0; i < ARRAY_SIZE(chips_init_gr); ++i)
296 write_gr(chips_init_gr[i].addr, chips_init_gr[i].data); 294 write_gr(chips_init_gr[i].addr, chips_init_gr[i].data);
297 for (i = 0; i < N_ELTS(chips_init_ar); ++i) 295 for (i = 0; i < ARRAY_SIZE(chips_init_ar); ++i)
298 write_ar(chips_init_ar[i].addr, chips_init_ar[i].data); 296 write_ar(chips_init_ar[i].addr, chips_init_ar[i].data);
299 for (i = 0; i < N_ELTS(chips_init_cr); ++i) 297 for (i = 0; i < ARRAY_SIZE(chips_init_cr); ++i)
300 write_cr(chips_init_cr[i].addr, chips_init_cr[i].data); 298 write_cr(chips_init_cr[i].addr, chips_init_cr[i].data);
301 for (i = 0; i < N_ELTS(chips_init_fr); ++i) 299 for (i = 0; i < ARRAY_SIZE(chips_init_fr); ++i)
302 write_fr(chips_init_fr[i].addr, chips_init_fr[i].data); 300 write_fr(chips_init_fr[i].addr, chips_init_fr[i].data);
303} 301}
304 302
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 6ee449858a5c..4444bef68fba 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -26,6 +26,30 @@ config VGA_CONSOLE
26# fi 26# fi
27# fi 27# fi
28 28
29config VGACON_SOFT_SCROLLBACK
30 bool "Enable Scrollback Buffer in System RAM"
31 depends on VGA_CONSOLE
32 default n
33 help
34 The scrollback buffer of the standard VGA console is located in
35 the VGA RAM. The size of this RAM is fixed and is quite small.
36 If you require a larger scrollback buffer, this can be placed in
37 System RAM which is dynamically allocated during intialization.
38 Placing the scrollback buffer in System RAM will slightly slow
39 down the console.
40
41 If you want this feature, say 'Y' here and enter the amount of
42 RAM to allocate for this buffer. If unsure, say 'N'.
43
44config VGACON_SOFT_SCROLLBACK_SIZE
45 int "Scrollback Buffer Size (in KB)"
46 depends on VGACON_SOFT_SCROLLBACK
47 default "64"
48 help
49 Enter the amount of System RAM to allocate for the scrollback
50 buffer. Each 64KB will give you approximately 16 80x25
51 screenfuls of scrollback buffer
52
29config VIDEO_SELECT 53config VIDEO_SELECT
30 bool "Video mode selection support" 54 bool "Video mode selection support"
31 depends on X86 && VGA_CONSOLE 55 depends on X86 && VGA_CONSOLE
diff --git a/drivers/video/console/fonts.c b/drivers/video/console/fonts.c
index 4fd07d9eca03..0cc1bfda76a6 100644
--- a/drivers/video/console/fonts.c
+++ b/drivers/video/console/fonts.c
@@ -66,7 +66,7 @@ static const struct font_desc *fonts[] = {
66#endif 66#endif
67}; 67};
68 68
69#define num_fonts (sizeof(fonts)/sizeof(*fonts)) 69#define num_fonts ARRAY_SIZE(fonts)
70 70
71#ifdef NO_FONTS 71#ifdef NO_FONTS
72#error No fonts configured. 72#error No fonts configured.
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index 762c7a593141..e99fe30e568c 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -149,7 +149,7 @@ static inline void newport_clear_lines(int ystart, int yend, int ci)
149 newport_clear_screen(0, ystart, 1280 + 63, yend, ci); 149 newport_clear_screen(0, ystart, 1280 + 63, yend, ci);
150} 150}
151 151
152void newport_reset(void) 152static void newport_reset(void)
153{ 153{
154 unsigned short treg; 154 unsigned short treg;
155 int i; 155 int i;
@@ -193,7 +193,7 @@ void newport_reset(void)
193 * calculate the actual screen size by reading 193 * calculate the actual screen size by reading
194 * the video timing out of the VC2 194 * the video timing out of the VC2
195 */ 195 */
196void newport_get_screensize(void) 196static void newport_get_screensize(void)
197{ 197{
198 int i, cols; 198 int i, cols;
199 unsigned short ventry, treg; 199 unsigned short ventry, treg;
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 5a86978537d2..d5a04b68c4d4 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -93,7 +93,6 @@ static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity,
93static void vgacon_invert_region(struct vc_data *c, u16 * p, int count); 93static void vgacon_invert_region(struct vc_data *c, u16 * p, int count);
94static unsigned long vgacon_uni_pagedir[2]; 94static unsigned long vgacon_uni_pagedir[2];
95 95
96
97/* Description of the hardware situation */ 96/* Description of the hardware situation */
98static unsigned long vga_vram_base; /* Base of video memory */ 97static unsigned long vga_vram_base; /* Base of video memory */
99static unsigned long vga_vram_end; /* End of video memory */ 98static unsigned long vga_vram_end; /* End of video memory */
@@ -161,6 +160,201 @@ static inline void write_vga(unsigned char reg, unsigned int val)
161 spin_unlock_irqrestore(&vga_lock, flags); 160 spin_unlock_irqrestore(&vga_lock, flags);
162} 161}
163 162
163static inline void vga_set_mem_top(struct vc_data *c)
164{
165 write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2);
166}
167
168#ifdef CONFIG_VGACON_SOFT_SCROLLBACK
169#include <linux/bootmem.h>
170/* software scrollback */
171static void *vgacon_scrollback;
172static int vgacon_scrollback_tail;
173static int vgacon_scrollback_size;
174static int vgacon_scrollback_rows;
175static int vgacon_scrollback_cnt;
176static int vgacon_scrollback_cur;
177static int vgacon_scrollback_save;
178static int vgacon_scrollback_restore;
179
180static void vgacon_scrollback_init(int pitch)
181{
182 int rows = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024/pitch;
183
184 if (vgacon_scrollback) {
185 vgacon_scrollback_cnt = 0;
186 vgacon_scrollback_tail = 0;
187 vgacon_scrollback_cur = 0;
188 vgacon_scrollback_rows = rows - 1;
189 vgacon_scrollback_size = rows * pitch;
190 }
191}
192
193static void __init vgacon_scrollback_startup(void)
194{
195 vgacon_scrollback = alloc_bootmem(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE
196 * 1024);
197 vgacon_scrollback_init(vga_video_num_columns * 2);
198}
199
200static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
201{
202 void *p;
203
204 if (!vgacon_scrollback_size || c->vc_num != fg_console)
205 return;
206
207 p = (void *) (c->vc_origin + t * c->vc_size_row);
208
209 while (count--) {
210 scr_memcpyw(vgacon_scrollback + vgacon_scrollback_tail,
211 p, c->vc_size_row);
212 vgacon_scrollback_cnt++;
213 p += c->vc_size_row;
214 vgacon_scrollback_tail += c->vc_size_row;
215
216 if (vgacon_scrollback_tail >= vgacon_scrollback_size)
217 vgacon_scrollback_tail = 0;
218
219 if (vgacon_scrollback_cnt > vgacon_scrollback_rows)
220 vgacon_scrollback_cnt = vgacon_scrollback_rows;
221
222 vgacon_scrollback_cur = vgacon_scrollback_cnt;
223 }
224}
225
226static void vgacon_restore_screen(struct vc_data *c)
227{
228 vgacon_scrollback_save = 0;
229
230 if (!vga_is_gfx && !vgacon_scrollback_restore) {
231 scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf,
232 c->vc_screenbuf_size > vga_vram_size ?
233 vga_vram_size : c->vc_screenbuf_size);
234 vgacon_scrollback_restore = 1;
235 vgacon_scrollback_cur = vgacon_scrollback_cnt;
236 }
237}
238
239static int vgacon_scrolldelta(struct vc_data *c, int lines)
240{
241 int start, end, count, soff, diff;
242 void *d, *s;
243
244 if (!lines) {
245 c->vc_visible_origin = c->vc_origin;
246 vga_set_mem_top(c);
247 return 1;
248 }
249
250 if (!vgacon_scrollback)
251 return 1;
252
253 if (!vgacon_scrollback_save) {
254 vgacon_cursor(c, CM_ERASE);
255 vgacon_save_screen(c);
256 vgacon_scrollback_save = 1;
257 }
258
259 vgacon_scrollback_restore = 0;
260 start = vgacon_scrollback_cur + lines;
261 end = start + abs(lines);
262
263 if (start < 0)
264 start = 0;
265
266 if (start > vgacon_scrollback_cnt)
267 start = vgacon_scrollback_cnt;
268
269 if (end < 0)
270 end = 0;
271
272 if (end > vgacon_scrollback_cnt)
273 end = vgacon_scrollback_cnt;
274
275 vgacon_scrollback_cur = start;
276 count = end - start;
277 soff = vgacon_scrollback_tail - ((vgacon_scrollback_cnt - end) *
278 c->vc_size_row);
279 soff -= count * c->vc_size_row;
280
281 if (soff < 0)
282 soff += vgacon_scrollback_size;
283
284 count = vgacon_scrollback_cnt - start;
285
286 if (count > c->vc_rows)
287 count = c->vc_rows;
288
289 diff = c->vc_rows - count;
290
291 d = (void *) c->vc_origin;
292 s = (void *) c->vc_screenbuf;
293
294 while (count--) {
295 scr_memcpyw(d, vgacon_scrollback + soff, c->vc_size_row);
296 d += c->vc_size_row;
297 soff += c->vc_size_row;
298
299 if (soff >= vgacon_scrollback_size)
300 soff = 0;
301 }
302
303 if (diff == c->vc_rows) {
304 vgacon_cursor(c, CM_MOVE);
305 } else {
306 while (diff--) {
307 scr_memcpyw(d, s, c->vc_size_row);
308 d += c->vc_size_row;
309 s += c->vc_size_row;
310 }
311 }
312
313 return 1;
314}
315#else
316#define vgacon_scrollback_startup(...) do { } while (0)
317#define vgacon_scrollback_init(...) do { } while (0)
318#define vgacon_scrollback_update(...) do { } while (0)
319
320static void vgacon_restore_screen(struct vc_data *c)
321{
322 if (c->vc_origin != c->vc_visible_origin)
323 vgacon_scrolldelta(c, 0);
324}
325
326static int vgacon_scrolldelta(struct vc_data *c, int lines)
327{
328 if (!lines) /* Turn scrollback off */
329 c->vc_visible_origin = c->vc_origin;
330 else {
331 int margin = c->vc_size_row * 4;
332 int ul, we, p, st;
333
334 if (vga_rolled_over >
335 (c->vc_scr_end - vga_vram_base) + margin) {
336 ul = c->vc_scr_end - vga_vram_base;
337 we = vga_rolled_over + c->vc_size_row;
338 } else {
339 ul = 0;
340 we = vga_vram_size;
341 }
342 p = (c->vc_visible_origin - vga_vram_base - ul + we) % we +
343 lines * c->vc_size_row;
344 st = (c->vc_origin - vga_vram_base - ul + we) % we;
345 if (st < 2 * margin)
346 margin = 0;
347 if (p < margin)
348 p = 0;
349 if (p > st - margin)
350 p = st;
351 c->vc_visible_origin = vga_vram_base + (p + ul) % we;
352 }
353 vga_set_mem_top(c);
354 return 1;
355}
356#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */
357
164static const char __init *vgacon_startup(void) 358static const char __init *vgacon_startup(void)
165{ 359{
166 const char *display_desc = NULL; 360 const char *display_desc = NULL;
@@ -330,7 +524,7 @@ static const char __init *vgacon_startup(void)
330 524
331 vgacon_xres = ORIG_VIDEO_COLS * VGA_FONTWIDTH; 525 vgacon_xres = ORIG_VIDEO_COLS * VGA_FONTWIDTH;
332 vgacon_yres = vga_scan_lines; 526 vgacon_yres = vga_scan_lines;
333 527 vgacon_scrollback_startup();
334 return display_desc; 528 return display_desc;
335} 529}
336 530
@@ -357,11 +551,6 @@ static void vgacon_init(struct vc_data *c, int init)
357 con_set_default_unimap(c); 551 con_set_default_unimap(c);
358} 552}
359 553
360static inline void vga_set_mem_top(struct vc_data *c)
361{
362 write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2);
363}
364
365static void vgacon_deinit(struct vc_data *c) 554static void vgacon_deinit(struct vc_data *c)
366{ 555{
367 /* When closing the last console, reset video origin */ 556 /* When closing the last console, reset video origin */
@@ -433,29 +622,37 @@ static void vgacon_set_cursor_size(int xpos, int from, int to)
433 cursor_size_lastto = to; 622 cursor_size_lastto = to;
434 623
435 spin_lock_irqsave(&vga_lock, flags); 624 spin_lock_irqsave(&vga_lock, flags);
436 outb_p(0x0a, vga_video_port_reg); /* Cursor start */ 625 if (vga_video_type >= VIDEO_TYPE_VGAC) {
437 curs = inb_p(vga_video_port_val); 626 outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg);
438 outb_p(0x0b, vga_video_port_reg); /* Cursor end */ 627 curs = inb_p(vga_video_port_val);
439 cure = inb_p(vga_video_port_val); 628 outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg);
629 cure = inb_p(vga_video_port_val);
630 } else {
631 curs = 0;
632 cure = 0;
633 }
440 634
441 curs = (curs & 0xc0) | from; 635 curs = (curs & 0xc0) | from;
442 cure = (cure & 0xe0) | to; 636 cure = (cure & 0xe0) | to;
443 637
444 outb_p(0x0a, vga_video_port_reg); /* Cursor start */ 638 outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg);
445 outb_p(curs, vga_video_port_val); 639 outb_p(curs, vga_video_port_val);
446 outb_p(0x0b, vga_video_port_reg); /* Cursor end */ 640 outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg);
447 outb_p(cure, vga_video_port_val); 641 outb_p(cure, vga_video_port_val);
448 spin_unlock_irqrestore(&vga_lock, flags); 642 spin_unlock_irqrestore(&vga_lock, flags);
449} 643}
450 644
451static void vgacon_cursor(struct vc_data *c, int mode) 645static void vgacon_cursor(struct vc_data *c, int mode)
452{ 646{
453 if (c->vc_origin != c->vc_visible_origin) 647 vgacon_restore_screen(c);
454 vgacon_scrolldelta(c, 0); 648
455 switch (mode) { 649 switch (mode) {
456 case CM_ERASE: 650 case CM_ERASE:
457 write_vga(14, (c->vc_pos - vga_vram_base) / 2); 651 write_vga(14, (c->vc_pos - vga_vram_base) / 2);
458 vgacon_set_cursor_size(c->vc_x, 31, 30); 652 if (vga_video_type >= VIDEO_TYPE_VGAC)
653 vgacon_set_cursor_size(c->vc_x, 31, 30);
654 else
655 vgacon_set_cursor_size(c->vc_x, 31, 31);
459 break; 656 break;
460 657
461 case CM_MOVE: 658 case CM_MOVE:
@@ -493,7 +690,10 @@ static void vgacon_cursor(struct vc_data *c, int mode)
493 10 ? 1 : 2)); 690 10 ? 1 : 2));
494 break; 691 break;
495 case CUR_NONE: 692 case CUR_NONE:
496 vgacon_set_cursor_size(c->vc_x, 31, 30); 693 if (vga_video_type >= VIDEO_TYPE_VGAC)
694 vgacon_set_cursor_size(c->vc_x, 31, 30);
695 else
696 vgacon_set_cursor_size(c->vc_x, 31, 31);
497 break; 697 break;
498 default: 698 default:
499 vgacon_set_cursor_size(c->vc_x, 1, 699 vgacon_set_cursor_size(c->vc_x, 1,
@@ -595,6 +795,7 @@ static int vgacon_switch(struct vc_data *c)
595 vgacon_doresize(c, c->vc_cols, c->vc_rows); 795 vgacon_doresize(c, c->vc_cols, c->vc_rows);
596 } 796 }
597 797
798 vgacon_scrollback_init(c->vc_size_row);
598 return 0; /* Redrawing not needed */ 799 return 0; /* Redrawing not needed */
599} 800}
600 801
@@ -1062,37 +1263,6 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
1062 return 0; 1263 return 0;
1063} 1264}
1064 1265
1065static int vgacon_scrolldelta(struct vc_data *c, int lines)
1066{
1067 if (!lines) /* Turn scrollback off */
1068 c->vc_visible_origin = c->vc_origin;
1069 else {
1070 int margin = c->vc_size_row * 4;
1071 int ul, we, p, st;
1072
1073 if (vga_rolled_over >
1074 (c->vc_scr_end - vga_vram_base) + margin) {
1075 ul = c->vc_scr_end - vga_vram_base;
1076 we = vga_rolled_over + c->vc_size_row;
1077 } else {
1078 ul = 0;
1079 we = vga_vram_size;
1080 }
1081 p = (c->vc_visible_origin - vga_vram_base - ul + we) % we +
1082 lines * c->vc_size_row;
1083 st = (c->vc_origin - vga_vram_base - ul + we) % we;
1084 if (st < 2 * margin)
1085 margin = 0;
1086 if (p < margin)
1087 p = 0;
1088 if (p > st - margin)
1089 p = st;
1090 c->vc_visible_origin = vga_vram_base + (p + ul) % we;
1091 }
1092 vga_set_mem_top(c);
1093 return 1;
1094}
1095
1096static int vgacon_set_origin(struct vc_data *c) 1266static int vgacon_set_origin(struct vc_data *c)
1097{ 1267{
1098 if (vga_is_gfx || /* We don't play origin tricks in graphic modes */ 1268 if (vga_is_gfx || /* We don't play origin tricks in graphic modes */
@@ -1135,15 +1305,14 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
1135 if (t || b != c->vc_rows || vga_is_gfx) 1305 if (t || b != c->vc_rows || vga_is_gfx)
1136 return 0; 1306 return 0;
1137 1307
1138 if (c->vc_origin != c->vc_visible_origin)
1139 vgacon_scrolldelta(c, 0);
1140
1141 if (!vga_hardscroll_enabled || lines >= c->vc_rows / 2) 1308 if (!vga_hardscroll_enabled || lines >= c->vc_rows / 2)
1142 return 0; 1309 return 0;
1143 1310
1311 vgacon_restore_screen(c);
1144 oldo = c->vc_origin; 1312 oldo = c->vc_origin;
1145 delta = lines * c->vc_size_row; 1313 delta = lines * c->vc_size_row;
1146 if (dir == SM_UP) { 1314 if (dir == SM_UP) {
1315 vgacon_scrollback_update(c, t, lines);
1147 if (c->vc_scr_end + delta >= vga_vram_end) { 1316 if (c->vc_scr_end + delta >= vga_vram_end) {
1148 scr_memcpyw((u16 *) vga_vram_base, 1317 scr_memcpyw((u16 *) vga_vram_base,
1149 (u16 *) (oldo + delta), 1318 (u16 *) (oldo + delta),
diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
index c32a2a50bfa2..1f98392a43b3 100644
--- a/drivers/video/fbcmap.c
+++ b/drivers/video/fbcmap.c
@@ -85,7 +85,7 @@ static struct fb_cmap default_16_colors = {
85 * Allocates memory for a colormap @cmap. @len is the 85 * Allocates memory for a colormap @cmap. @len is the
86 * number of entries in the palette. 86 * number of entries in the palette.
87 * 87 *
88 * Returns -1 errno on error, or zero on success. 88 * Returns negative errno on error, or zero on success.
89 * 89 *
90 */ 90 */
91 91
@@ -116,7 +116,7 @@ int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp)
116 116
117fail: 117fail:
118 fb_dealloc_cmap(cmap); 118 fb_dealloc_cmap(cmap);
119 return -1; 119 return -ENOMEM;
120} 120}
121 121
122/** 122/**
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 07d882b14396..b1a8dca76430 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -55,7 +55,7 @@
55 55
56#define FBPIXMAPSIZE (1024 * 8) 56#define FBPIXMAPSIZE (1024 * 8)
57 57
58static struct notifier_block *fb_notifier_list; 58static BLOCKING_NOTIFIER_HEAD(fb_notifier_list);
59struct fb_info *registered_fb[FB_MAX]; 59struct fb_info *registered_fb[FB_MAX];
60int num_registered_fb; 60int num_registered_fb;
61 61
@@ -784,7 +784,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
784 784
785 event.info = info; 785 event.info = info;
786 event.data = &mode1; 786 event.data = &mode1;
787 ret = notifier_call_chain(&fb_notifier_list, 787 ret = blocking_notifier_call_chain(&fb_notifier_list,
788 FB_EVENT_MODE_DELETE, &event); 788 FB_EVENT_MODE_DELETE, &event);
789 } 789 }
790 790
@@ -830,8 +830,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
830 830
831 info->flags &= ~FBINFO_MISC_USEREVENT; 831 info->flags &= ~FBINFO_MISC_USEREVENT;
832 event.info = info; 832 event.info = info;
833 notifier_call_chain(&fb_notifier_list, evnt, 833 blocking_notifier_call_chain(&fb_notifier_list,
834 &event); 834 evnt, &event);
835 } 835 }
836 } 836 }
837 } 837 }
@@ -854,7 +854,8 @@ fb_blank(struct fb_info *info, int blank)
854 854
855 event.info = info; 855 event.info = info;
856 event.data = &blank; 856 event.data = &blank;
857 notifier_call_chain(&fb_notifier_list, FB_EVENT_BLANK, &event); 857 blocking_notifier_call_chain(&fb_notifier_list,
858 FB_EVENT_BLANK, &event);
858 } 859 }
859 860
860 return ret; 861 return ret;
@@ -925,7 +926,7 @@ fb_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
925 con2fb.framebuffer = -1; 926 con2fb.framebuffer = -1;
926 event.info = info; 927 event.info = info;
927 event.data = &con2fb; 928 event.data = &con2fb;
928 notifier_call_chain(&fb_notifier_list, 929 blocking_notifier_call_chain(&fb_notifier_list,
929 FB_EVENT_GET_CONSOLE_MAP, &event); 930 FB_EVENT_GET_CONSOLE_MAP, &event);
930 return copy_to_user(argp, &con2fb, 931 return copy_to_user(argp, &con2fb,
931 sizeof(con2fb)) ? -EFAULT : 0; 932 sizeof(con2fb)) ? -EFAULT : 0;
@@ -944,7 +945,7 @@ fb_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
944 return -EINVAL; 945 return -EINVAL;
945 event.info = info; 946 event.info = info;
946 event.data = &con2fb; 947 event.data = &con2fb;
947 return notifier_call_chain(&fb_notifier_list, 948 return blocking_notifier_call_chain(&fb_notifier_list,
948 FB_EVENT_SET_CONSOLE_MAP, 949 FB_EVENT_SET_CONSOLE_MAP,
949 &event); 950 &event);
950 case FBIOBLANK: 951 case FBIOBLANK:
@@ -1324,7 +1325,7 @@ register_framebuffer(struct fb_info *fb_info)
1324 devfs_mk_cdev(MKDEV(FB_MAJOR, i), 1325 devfs_mk_cdev(MKDEV(FB_MAJOR, i),
1325 S_IFCHR | S_IRUGO | S_IWUGO, "fb/%d", i); 1326 S_IFCHR | S_IRUGO | S_IWUGO, "fb/%d", i);
1326 event.info = fb_info; 1327 event.info = fb_info;
1327 notifier_call_chain(&fb_notifier_list, 1328 blocking_notifier_call_chain(&fb_notifier_list,
1328 FB_EVENT_FB_REGISTERED, &event); 1329 FB_EVENT_FB_REGISTERED, &event);
1329 return 0; 1330 return 0;
1330} 1331}
@@ -1366,7 +1367,7 @@ unregister_framebuffer(struct fb_info *fb_info)
1366 */ 1367 */
1367int fb_register_client(struct notifier_block *nb) 1368int fb_register_client(struct notifier_block *nb)
1368{ 1369{
1369 return notifier_chain_register(&fb_notifier_list, nb); 1370 return blocking_notifier_chain_register(&fb_notifier_list, nb);
1370} 1371}
1371 1372
1372/** 1373/**
@@ -1375,7 +1376,7 @@ int fb_register_client(struct notifier_block *nb)
1375 */ 1376 */
1376int fb_unregister_client(struct notifier_block *nb) 1377int fb_unregister_client(struct notifier_block *nb)
1377{ 1378{
1378 return notifier_chain_unregister(&fb_notifier_list, nb); 1379 return blocking_notifier_chain_unregister(&fb_notifier_list, nb);
1379} 1380}
1380 1381
1381/** 1382/**
@@ -1393,11 +1394,13 @@ void fb_set_suspend(struct fb_info *info, int state)
1393 1394
1394 event.info = info; 1395 event.info = info;
1395 if (state) { 1396 if (state) {
1396 notifier_call_chain(&fb_notifier_list, FB_EVENT_SUSPEND, &event); 1397 blocking_notifier_call_chain(&fb_notifier_list,
1398 FB_EVENT_SUSPEND, &event);
1397 info->state = FBINFO_STATE_SUSPENDED; 1399 info->state = FBINFO_STATE_SUSPENDED;
1398 } else { 1400 } else {
1399 info->state = FBINFO_STATE_RUNNING; 1401 info->state = FBINFO_STATE_RUNNING;
1400 notifier_call_chain(&fb_notifier_list, FB_EVENT_RESUME, &event); 1402 blocking_notifier_call_chain(&fb_notifier_list,
1403 FB_EVENT_RESUME, &event);
1401 } 1404 }
1402} 1405}
1403 1406
@@ -1469,7 +1472,7 @@ int fb_new_modelist(struct fb_info *info)
1469 1472
1470 if (!list_empty(&info->modelist)) { 1473 if (!list_empty(&info->modelist)) {
1471 event.info = info; 1474 event.info = info;
1472 err = notifier_call_chain(&fb_notifier_list, 1475 err = blocking_notifier_call_chain(&fb_notifier_list,
1473 FB_EVENT_NEW_MODELIST, 1476 FB_EVENT_NEW_MODELIST,
1474 &event); 1477 &event);
1475 } 1478 }
@@ -1495,7 +1498,7 @@ int fb_con_duit(struct fb_info *info, int event, void *data)
1495 evnt.info = info; 1498 evnt.info = info;
1496 evnt.data = data; 1499 evnt.data = data;
1497 1500
1498 return notifier_call_chain(&fb_notifier_list, event, &evnt); 1501 return blocking_notifier_call_chain(&fb_notifier_list, event, &evnt);
1499} 1502}
1500EXPORT_SYMBOL(fb_con_duit); 1503EXPORT_SYMBOL(fb_con_duit);
1501 1504
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index 7c74e7325d95..53beeb4a9998 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -1281,7 +1281,7 @@ int fb_validate_mode(const struct fb_var_screeninfo *var, struct fb_info *info)
1281 -EINVAL : 0; 1281 -EINVAL : 0;
1282} 1282}
1283 1283
1284#if defined(__i386__) 1284#if defined(CONFIG_FB_FIRMWARE_EDID) && defined(__i386__)
1285#include <linux/pci.h> 1285#include <linux/pci.h>
1286 1286
1287/* 1287/*
@@ -1311,11 +1311,11 @@ const unsigned char *fb_firmware_edid(struct device *device)
1311{ 1311{
1312 return NULL; 1312 return NULL;
1313} 1313}
1314#endif /* _i386_ */ 1314#endif
1315EXPORT_SYMBOL(fb_firmware_edid);
1315 1316
1316EXPORT_SYMBOL(fb_parse_edid); 1317EXPORT_SYMBOL(fb_parse_edid);
1317EXPORT_SYMBOL(fb_edid_to_monspecs); 1318EXPORT_SYMBOL(fb_edid_to_monspecs);
1318EXPORT_SYMBOL(fb_firmware_edid);
1319EXPORT_SYMBOL(fb_get_mode); 1319EXPORT_SYMBOL(fb_get_mode);
1320EXPORT_SYMBOL(fb_validate_mode); 1320EXPORT_SYMBOL(fb_validate_mode);
1321EXPORT_SYMBOL(fb_destroy_modedb); 1321EXPORT_SYMBOL(fb_destroy_modedb);
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 6d26057337e2..b72b05250a9d 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -348,7 +348,7 @@ static ssize_t store_cmap(struct class_device *class_device, const char *buf,
348 fb_copy_cmap(&umap, &fb_info->cmap); 348 fb_copy_cmap(&umap, &fb_info->cmap);
349 fb_dealloc_cmap(&umap); 349 fb_dealloc_cmap(&umap);
350 350
351 return rc; 351 return rc ?: count;
352 } 352 }
353 for (i = 0; i < length; i++) { 353 for (i = 0; i < length; i++) {
354 u16 red, blue, green, tsp; 354 u16 red, blue, green, tsp;
@@ -367,7 +367,7 @@ static ssize_t store_cmap(struct class_device *class_device, const char *buf,
367 if (transp) 367 if (transp)
368 fb_info->cmap.transp[i] = tsp; 368 fb_info->cmap.transp[i] = tsp;
369 } 369 }
370 return 0; 370 return count;
371} 371}
372 372
373static ssize_t show_cmap(struct class_device *class_device, char *buf) 373static ssize_t show_cmap(struct class_device *class_device, char *buf)
diff --git a/drivers/video/geode/Kconfig b/drivers/video/geode/Kconfig
index 42fb9a89a792..4e173ef20a7d 100644
--- a/drivers/video/geode/Kconfig
+++ b/drivers/video/geode/Kconfig
@@ -8,9 +8,24 @@ config FB_GEODE
8 Say 'Y' here to allow you to select framebuffer drivers for 8 Say 'Y' here to allow you to select framebuffer drivers for
9 the AMD Geode family of processors. 9 the AMD Geode family of processors.
10 10
11config FB_GEODE_GX
12 tristate "AMD Geode GX framebuffer support (EXPERIMENTAL)"
13 depends on FB && FB_GEODE && EXPERIMENTAL
14 select FB_CFB_FILLRECT
15 select FB_CFB_COPYAREA
16 select FB_CFB_IMAGEBLIT
17 ---help---
18 Framebuffer driver for the display controller integrated into the
19 AMD Geode GX processors.
20
21 To compile this driver as a module, choose M here: the module will be
22 called gxfb.
23
24 If unsure, say N.
25
11config FB_GEODE_GX1 26config FB_GEODE_GX1
12 tristate "AMD Geode GX1 framebuffer support (EXPERIMENTAL)" 27 tristate "AMD Geode GX1 framebuffer support (EXPERIMENTAL)"
13 depends on FB_GEODE && EXPERIMENTAL 28 depends on FB && FB_GEODE && EXPERIMENTAL
14 select FB_CFB_FILLRECT 29 select FB_CFB_FILLRECT
15 select FB_CFB_COPYAREA 30 select FB_CFB_COPYAREA
16 select FB_CFB_IMAGEBLIT 31 select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/geode/Makefile b/drivers/video/geode/Makefile
index 13ad501ea990..f896565bc312 100644
--- a/drivers/video/geode/Makefile
+++ b/drivers/video/geode/Makefile
@@ -1,5 +1,7 @@
1# Makefile for the Geode family framebuffer drivers 1# Makefile for the Geode family framebuffer drivers
2 2
3obj-$(CONFIG_FB_GEODE_GX1) += gx1fb.o 3obj-$(CONFIG_FB_GEODE_GX1) += gx1fb.o
4obj-$(CONFIG_FB_GEODE_GX) += gxfb.o
4 5
5gx1fb-objs := gx1fb_core.o display_gx1.o video_cs5530.o 6gx1fb-objs := gx1fb_core.o display_gx1.o video_cs5530.o
7gxfb-objs := gxfb_core.o display_gx.o video_gx.o
diff --git a/drivers/video/geode/display_gx.c b/drivers/video/geode/display_gx.c
new file mode 100644
index 000000000000..825c3405f5c2
--- /dev/null
+++ b/drivers/video/geode/display_gx.c
@@ -0,0 +1,156 @@
1/*
2 * Geode GX display controller.
3 *
4 * Copyright (C) 2005 Arcom Control Systems Ltd.
5 *
6 * Portions from AMD's original 2.4 driver:
7 * Copyright (C) 2004 Advanced Micro Devices, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by * the
11 * Free Software Foundation; either version 2 of the License, or * (at your
12 * option) any later version.
13 */
14#include <linux/spinlock.h>
15#include <linux/fb.h>
16#include <linux/delay.h>
17#include <asm/io.h>
18#include <asm/div64.h>
19#include <asm/delay.h>
20
21#include "geodefb.h"
22#include "display_gx.h"
23
24int gx_frame_buffer_size(void)
25{
26 /* Assuming 16 MiB. */
27 return 16*1024*1024;
28}
29
30int gx_line_delta(int xres, int bpp)
31{
32 /* Must be a multiple of 8 bytes. */
33 return (xres * (bpp >> 3) + 7) & ~0x7;
34}
35
36static void gx_set_mode(struct fb_info *info)
37{
38 struct geodefb_par *par = info->par;
39 u32 gcfg, dcfg;
40 int hactive, hblankstart, hsyncstart, hsyncend, hblankend, htotal;
41 int vactive, vblankstart, vsyncstart, vsyncend, vblankend, vtotal;
42
43 /* Unlock the display controller registers. */
44 readl(par->dc_regs + DC_UNLOCK);
45 writel(DC_UNLOCK_CODE, par->dc_regs + DC_UNLOCK);
46
47 gcfg = readl(par->dc_regs + DC_GENERAL_CFG);
48 dcfg = readl(par->dc_regs + DC_DISPLAY_CFG);
49
50 /* Disable the timing generator. */
51 dcfg &= ~(DC_DCFG_TGEN);
52 writel(dcfg, par->dc_regs + DC_DISPLAY_CFG);
53
54 /* Wait for pending memory requests before disabling the FIFO load. */
55 udelay(100);
56
57 /* Disable FIFO load and compression. */
58 gcfg &= ~(DC_GCFG_DFLE | DC_GCFG_CMPE | DC_GCFG_DECE);
59 writel(gcfg, par->dc_regs + DC_GENERAL_CFG);
60
61 /* Setup DCLK and its divisor. */
62 par->vid_ops->set_dclk(info);
63
64 /*
65 * Setup new mode.
66 */
67
68 /* Clear all unused feature bits. */
69 gcfg &= DC_GCFG_YUVM | DC_GCFG_VDSE;
70 dcfg = 0;
71
72 /* Set FIFO priority (default 6/5) and enable. */
73 /* FIXME: increase fifo priority for 1280x1024 and higher modes? */
74 gcfg |= (6 << DC_GCFG_DFHPEL_POS) | (5 << DC_GCFG_DFHPSL_POS) | DC_GCFG_DFLE;
75
76 /* Framebuffer start offset. */
77 writel(0, par->dc_regs + DC_FB_ST_OFFSET);
78
79 /* Line delta and line buffer length. */
80 writel(info->fix.line_length >> 3, par->dc_regs + DC_GFX_PITCH);
81 writel(((info->var.xres * info->var.bits_per_pixel/8) >> 3) + 2,
82 par->dc_regs + DC_LINE_SIZE);
83
84 /* Enable graphics and video data and unmask address lines. */
85 dcfg |= DC_DCFG_GDEN | DC_DCFG_VDEN | DC_DCFG_A20M | DC_DCFG_A18M;
86
87 /* Set pixel format. */
88 switch (info->var.bits_per_pixel) {
89 case 8:
90 dcfg |= DC_DCFG_DISP_MODE_8BPP;
91 break;
92 case 16:
93 dcfg |= DC_DCFG_DISP_MODE_16BPP;
94 dcfg |= DC_DCFG_16BPP_MODE_565;
95 break;
96 case 32:
97 dcfg |= DC_DCFG_DISP_MODE_24BPP;
98 dcfg |= DC_DCFG_PALB;
99 break;
100 }
101
102 /* Enable timing generator. */
103 dcfg |= DC_DCFG_TGEN;
104
105 /* Horizontal and vertical timings. */
106 hactive = info->var.xres;
107 hblankstart = hactive;
108 hsyncstart = hblankstart + info->var.right_margin;
109 hsyncend = hsyncstart + info->var.hsync_len;
110 hblankend = hsyncend + info->var.left_margin;
111 htotal = hblankend;
112
113 vactive = info->var.yres;
114 vblankstart = vactive;
115 vsyncstart = vblankstart + info->var.lower_margin;
116 vsyncend = vsyncstart + info->var.vsync_len;
117 vblankend = vsyncend + info->var.upper_margin;
118 vtotal = vblankend;
119
120 writel((hactive - 1) | ((htotal - 1) << 16), par->dc_regs + DC_H_ACTIVE_TIMING);
121 writel((hblankstart - 1) | ((hblankend - 1) << 16), par->dc_regs + DC_H_BLANK_TIMING);
122 writel((hsyncstart - 1) | ((hsyncend - 1) << 16), par->dc_regs + DC_H_SYNC_TIMING);
123
124 writel((vactive - 1) | ((vtotal - 1) << 16), par->dc_regs + DC_V_ACTIVE_TIMING);
125 writel((vblankstart - 1) | ((vblankend - 1) << 16), par->dc_regs + DC_V_BLANK_TIMING);
126 writel((vsyncstart - 1) | ((vsyncend - 1) << 16), par->dc_regs + DC_V_SYNC_TIMING);
127
128 /* Write final register values. */
129 writel(dcfg, par->dc_regs + DC_DISPLAY_CFG);
130 writel(gcfg, par->dc_regs + DC_GENERAL_CFG);
131
132 par->vid_ops->configure_display(info);
133
134 /* Relock display controller registers */
135 writel(0, par->dc_regs + DC_UNLOCK);
136}
137
138static void gx_set_hw_palette_reg(struct fb_info *info, unsigned regno,
139 unsigned red, unsigned green, unsigned blue)
140{
141 struct geodefb_par *par = info->par;
142 int val;
143
144 /* Hardware palette is in RGB 8-8-8 format. */
145 val = (red << 8) & 0xff0000;
146 val |= (green) & 0x00ff00;
147 val |= (blue >> 8) & 0x0000ff;
148
149 writel(regno, par->dc_regs + DC_PAL_ADDRESS);
150 writel(val, par->dc_regs + DC_PAL_DATA);
151}
152
153struct geode_dc_ops gx_dc_ops = {
154 .set_mode = gx_set_mode,
155 .set_palette_reg = gx_set_hw_palette_reg,
156};
diff --git a/drivers/video/geode/display_gx.h b/drivers/video/geode/display_gx.h
new file mode 100644
index 000000000000..86c623361305
--- /dev/null
+++ b/drivers/video/geode/display_gx.h
@@ -0,0 +1,96 @@
1/*
2 * Geode GX display controller
3 *
4 * Copyright (C) 2006 Arcom Control Systems Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11#ifndef __DISPLAY_GX_H__
12#define __DISPLAY_GX_H__
13
14int gx_frame_buffer_size(void);
15int gx_line_delta(int xres, int bpp);
16
17extern struct geode_dc_ops gx_dc_ops;
18
19/* Display controller registers */
20
21#define DC_UNLOCK 0x00
22# define DC_UNLOCK_CODE 0x00004758
23
24#define DC_GENERAL_CFG 0x04
25# define DC_GCFG_DFLE 0x00000001
26# define DC_GCFG_CURE 0x00000002
27# define DC_GCFG_ICNE 0x00000004
28# define DC_GCFG_VIDE 0x00000008
29# define DC_GCFG_CMPE 0x00000020
30# define DC_GCFG_DECE 0x00000040
31# define DC_GCFG_VGAE 0x00000080
32# define DC_GCFG_DFHPSL_MASK 0x00000F00
33# define DC_GCFG_DFHPSL_POS 8
34# define DC_GCFG_DFHPEL_MASK 0x0000F000
35# define DC_GCFG_DFHPEL_POS 12
36# define DC_GCFG_STFM 0x00010000
37# define DC_GCFG_FDTY 0x00020000
38# define DC_GCFG_VGAFT 0x00040000
39# define DC_GCFG_VDSE 0x00080000
40# define DC_GCFG_YUVM 0x00100000
41# define DC_GCFG_VFSL 0x00800000
42# define DC_GCFG_SIGE 0x01000000
43# define DC_GCFG_SGRE 0x02000000
44# define DC_GCFG_SGFR 0x04000000
45# define DC_GCFG_CRC_MODE 0x08000000
46# define DC_GCFG_DIAG 0x10000000
47# define DC_GCFG_CFRW 0x20000000
48
49#define DC_DISPLAY_CFG 0x08
50# define DC_DCFG_TGEN 0x00000001
51# define DC_DCFG_GDEN 0x00000008
52# define DC_DCFG_VDEN 0x00000010
53# define DC_DCFG_TRUP 0x00000040
54# define DC_DCFG_DISP_MODE_MASK 0x00000300
55# define DC_DCFG_DISP_MODE_8BPP 0x00000000
56# define DC_DCFG_DISP_MODE_16BPP 0x00000100
57# define DC_DCFG_DISP_MODE_24BPP 0x00000200
58# define DC_DCFG_16BPP_MODE_MASK 0x00000c00
59# define DC_DCFG_16BPP_MODE_565 0x00000000
60# define DC_DCFG_16BPP_MODE_555 0x00000100
61# define DC_DCFG_16BPP_MODE_444 0x00000200
62# define DC_DCFG_DCEN 0x00080000
63# define DC_DCFG_PALB 0x02000000
64# define DC_DCFG_FRLK 0x04000000
65# define DC_DCFG_VISL 0x08000000
66# define DC_DCFG_FRSL 0x20000000
67# define DC_DCFG_A18M 0x40000000
68# define DC_DCFG_A20M 0x80000000
69
70#define DC_FB_ST_OFFSET 0x10
71
72#define DC_LINE_SIZE 0x30
73# define DC_LINE_SIZE_FB_LINE_SIZE_MASK 0x000007ff
74# define DC_LINE_SIZE_FB_LINE_SIZE_POS 0
75# define DC_LINE_SIZE_CB_LINE_SIZE_MASK 0x007f0000
76# define DC_LINE_SIZE_CB_LINE_SIZE_POS 16
77# define DC_LINE_SIZE_VID_LINE_SIZE_MASK 0xff000000
78# define DC_LINE_SIZE_VID_LINE_SIZE_POS 24
79
80#define DC_GFX_PITCH 0x34
81# define DC_GFX_PITCH_FB_PITCH_MASK 0x0000ffff
82# define DC_GFX_PITCH_FB_PITCH_POS 0
83# define DC_GFX_PITCH_CB_PITCH_MASK 0xffff0000
84# define DC_GFX_PITCH_CB_PITCH_POS 16
85
86#define DC_H_ACTIVE_TIMING 0x40
87#define DC_H_BLANK_TIMING 0x44
88#define DC_H_SYNC_TIMING 0x48
89#define DC_V_ACTIVE_TIMING 0x50
90#define DC_V_BLANK_TIMING 0x54
91#define DC_V_SYNC_TIMING 0x58
92
93#define DC_PAL_ADDRESS 0x70
94#define DC_PAL_DATA 0x74
95
96#endif /* !__DISPLAY_GX1_H__ */
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
new file mode 100644
index 000000000000..89c34b15f5d4
--- /dev/null
+++ b/drivers/video/geode/gxfb_core.c
@@ -0,0 +1,423 @@
1/*
2 * Geode GX framebuffer driver.
3 *
4 * Copyright (C) 2006 Arcom Control Systems Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 *
12 * This driver assumes that the BIOS has created a virtual PCI device header
13 * for the video device. The PCI header is assumed to contain the following
14 * BARs:
15 *
16 * BAR0 - framebuffer memory
17 * BAR1 - graphics processor registers
18 * BAR2 - display controller registers
19 * BAR3 - video processor and flat panel control registers.
20 *
21 * 16 MiB of framebuffer memory is assumed to be available.
22 */
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/string.h>
27#include <linux/mm.h>
28#include <linux/tty.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/fb.h>
32#include <linux/init.h>
33#include <linux/pci.h>
34
35#include "geodefb.h"
36#include "display_gx.h"
37#include "video_gx.h"
38
39static char mode_option[32] = "640x480-16@60";
40
41/* Modes relevant to the GX (taken from modedb.c) */
42static const struct fb_videomode __initdata gx_modedb[] = {
43 /* 640x480-60 VESA */
44 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
45 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
46 /* 640x480-75 VESA */
47 { NULL, 75, 640, 480, 31746, 120, 16, 16, 01, 64, 3,
48 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
49 /* 640x480-85 VESA */
50 { NULL, 85, 640, 480, 27777, 80, 56, 25, 01, 56, 3,
51 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
52 /* 800x600-60 VESA */
53 { NULL, 60, 800, 600, 25000, 88, 40, 23, 01, 128, 4,
54 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
55 FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
56 /* 800x600-75 VESA */
57 { NULL, 75, 800, 600, 20202, 160, 16, 21, 01, 80, 3,
58 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
59 FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
60 /* 800x600-85 VESA */
61 { NULL, 85, 800, 600, 17761, 152, 32, 27, 01, 64, 3,
62 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
63 FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
64 /* 1024x768-60 VESA */
65 { NULL, 60, 1024, 768, 15384, 160, 24, 29, 3, 136, 6,
66 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
67 /* 1024x768-75 VESA */
68 { NULL, 75, 1024, 768, 12690, 176, 16, 28, 1, 96, 3,
69 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
70 FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
71 /* 1024x768-85 VESA */
72 { NULL, 85, 1024, 768, 10582, 208, 48, 36, 1, 96, 3,
73 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
74 FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
75 /* 1280x960-60 VESA */
76 { NULL, 60, 1280, 960, 9259, 312, 96, 36, 1, 112, 3,
77 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
78 FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
79 /* 1280x960-85 VESA */
80 { NULL, 85, 1280, 960, 6734, 224, 64, 47, 1, 160, 3,
81 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
82 FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
83 /* 1280x1024-60 VESA */
84 { NULL, 60, 1280, 1024, 9259, 248, 48, 38, 1, 112, 3,
85 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
86 FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
87 /* 1280x1024-75 VESA */
88 { NULL, 75, 1280, 1024, 7407, 248, 16, 38, 1, 144, 3,
89 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
90 FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
91 /* 1280x1024-85 VESA */
92 { NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3,
93 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
94 FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
95 /* 1600x1200-60 VESA */
96 { NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3,
97 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
98 FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
99 /* 1600x1200-75 VESA */
100 { NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
101 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
102 FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
103 /* 1600x1200-85 VESA */
104 { NULL, 85, 1600, 1200, 4357, 304, 64, 46, 1, 192, 3,
105 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
106 FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
107};
108
109static int gxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
110{
111 if (var->xres > 1600 || var->yres > 1200)
112 return -EINVAL;
113 if ((var->xres > 1280 || var->yres > 1024) && var->bits_per_pixel > 16)
114 return -EINVAL;
115
116 if (var->bits_per_pixel == 32) {
117 var->red.offset = 16; var->red.length = 8;
118 var->green.offset = 8; var->green.length = 8;
119 var->blue.offset = 0; var->blue.length = 8;
120 } else if (var->bits_per_pixel == 16) {
121 var->red.offset = 11; var->red.length = 5;
122 var->green.offset = 5; var->green.length = 6;
123 var->blue.offset = 0; var->blue.length = 5;
124 } else if (var->bits_per_pixel == 8) {
125 var->red.offset = 0; var->red.length = 8;
126 var->green.offset = 0; var->green.length = 8;
127 var->blue.offset = 0; var->blue.length = 8;
128 } else
129 return -EINVAL;
130 var->transp.offset = 0; var->transp.length = 0;
131
132 /* Enough video memory? */
133 if (gx_line_delta(var->xres, var->bits_per_pixel) * var->yres > info->fix.smem_len)
134 return -EINVAL;
135
136 /* FIXME: Check timing parameters here? */
137
138 return 0;
139}
140
141static int gxfb_set_par(struct fb_info *info)
142{
143 struct geodefb_par *par = info->par;
144
145 if (info->var.bits_per_pixel > 8) {
146 info->fix.visual = FB_VISUAL_TRUECOLOR;
147 fb_dealloc_cmap(&info->cmap);
148 } else {
149 info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
150 fb_alloc_cmap(&info->cmap, 1<<info->var.bits_per_pixel, 0);
151 }
152
153 info->fix.line_length = gx_line_delta(info->var.xres, info->var.bits_per_pixel);
154
155 par->dc_ops->set_mode(info);
156
157 return 0;
158}
159
160static inline u_int chan_to_field(u_int chan, struct fb_bitfield *bf)
161{
162 chan &= 0xffff;
163 chan >>= 16 - bf->length;
164 return chan << bf->offset;
165}
166
167static int gxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
168 unsigned blue, unsigned transp,
169 struct fb_info *info)
170{
171 struct geodefb_par *par = info->par;
172
173 if (info->var.grayscale) {
174 /* grayscale = 0.30*R + 0.59*G + 0.11*B */
175 red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
176 }
177
178 /* Truecolor has hardware independent palette */
179 if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
180 u32 *pal = info->pseudo_palette;
181 u32 v;
182
183 if (regno >= 16)
184 return -EINVAL;
185
186 v = chan_to_field(red, &info->var.red);
187 v |= chan_to_field(green, &info->var.green);
188 v |= chan_to_field(blue, &info->var.blue);
189
190 pal[regno] = v;
191 } else {
192 if (regno >= 256)
193 return -EINVAL;
194
195 par->dc_ops->set_palette_reg(info, regno, red, green, blue);
196 }
197
198 return 0;
199}
200
201static int gxfb_blank(int blank_mode, struct fb_info *info)
202{
203 struct geodefb_par *par = info->par;
204
205 return par->vid_ops->blank_display(info, blank_mode);
206}
207
208static int __init gxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
209{
210 struct geodefb_par *par = info->par;
211 int fb_len;
212 int ret;
213
214 ret = pci_enable_device(dev);
215 if (ret < 0)
216 return ret;
217
218 ret = pci_request_region(dev, 3, "gxfb (video processor)");
219 if (ret < 0)
220 return ret;
221 par->vid_regs = ioremap(pci_resource_start(dev, 3),
222 pci_resource_len(dev, 3));
223 if (!par->vid_regs)
224 return -ENOMEM;
225
226 ret = pci_request_region(dev, 2, "gxfb (display controller)");
227 if (ret < 0)
228 return ret;
229 par->dc_regs = ioremap(pci_resource_start(dev, 2), pci_resource_len(dev, 2));
230 if (!par->dc_regs)
231 return -ENOMEM;
232
233 ret = pci_request_region(dev, 0, "gxfb (framebuffer)");
234 if (ret < 0)
235 return ret;
236 if ((fb_len = gx_frame_buffer_size()) < 0)
237 return -ENOMEM;
238 info->fix.smem_start = pci_resource_start(dev, 0);
239 info->fix.smem_len = fb_len;
240 info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
241 if (!info->screen_base)
242 return -ENOMEM;
243
244 dev_info(&dev->dev, "%d Kibyte of video memory at 0x%lx\n",
245 info->fix.smem_len / 1024, info->fix.smem_start);
246
247 return 0;
248}
249
250static struct fb_ops gxfb_ops = {
251 .owner = THIS_MODULE,
252 .fb_check_var = gxfb_check_var,
253 .fb_set_par = gxfb_set_par,
254 .fb_setcolreg = gxfb_setcolreg,
255 .fb_blank = gxfb_blank,
256 /* No HW acceleration for now. */
257 .fb_fillrect = cfb_fillrect,
258 .fb_copyarea = cfb_copyarea,
259 .fb_imageblit = cfb_imageblit,
260};
261
262static struct fb_info * __init gxfb_init_fbinfo(struct device *dev)
263{
264 struct geodefb_par *par;
265 struct fb_info *info;
266
267 /* Alloc enough space for the pseudo palette. */
268 info = framebuffer_alloc(sizeof(struct geodefb_par) + sizeof(u32) * 16, dev);
269 if (!info)
270 return NULL;
271
272 par = info->par;
273
274 strcpy(info->fix.id, "Geode GX");
275
276 info->fix.type = FB_TYPE_PACKED_PIXELS;
277 info->fix.type_aux = 0;
278 info->fix.xpanstep = 0;
279 info->fix.ypanstep = 0;
280 info->fix.ywrapstep = 0;
281 info->fix.accel = FB_ACCEL_NONE;
282
283 info->var.nonstd = 0;
284 info->var.activate = FB_ACTIVATE_NOW;
285 info->var.height = -1;
286 info->var.width = -1;
287 info->var.accel_flags = 0;
288 info->var.vmode = FB_VMODE_NONINTERLACED;
289
290 info->fbops = &gxfb_ops;
291 info->flags = FBINFO_DEFAULT;
292 info->node = -1;
293
294 info->pseudo_palette = (void *)par + sizeof(struct geodefb_par);
295
296 info->var.grayscale = 0;
297
298 return info;
299}
300
301static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
302{
303 struct geodefb_par *par;
304 struct fb_info *info;
305 int ret;
306
307 info = gxfb_init_fbinfo(&pdev->dev);
308 if (!info)
309 return -ENOMEM;
310 par = info->par;
311
312 /* GX display controller and GX video device. */
313 par->dc_ops = &gx_dc_ops;
314 par->vid_ops = &gx_vid_ops;
315
316 if ((ret = gxfb_map_video_memory(info, pdev)) < 0) {
317 dev_err(&pdev->dev, "failed to map frame buffer or controller registers\n");
318 goto err;
319 }
320
321 ret = fb_find_mode(&info->var, info, mode_option,
322 gx_modedb, ARRAY_SIZE(gx_modedb), NULL, 16);
323 if (ret == 0 || ret == 4) {
324 dev_err(&pdev->dev, "could not find valid video mode\n");
325 ret = -EINVAL;
326 goto err;
327 }
328
329 /* Clear the frame buffer of garbage. */
330 memset_io(info->screen_base, 0, info->fix.smem_len);
331
332 gxfb_check_var(&info->var, info);
333 gxfb_set_par(info);
334
335 if (register_framebuffer(info) < 0) {
336 ret = -EINVAL;
337 goto err;
338 }
339 pci_set_drvdata(pdev, info);
340 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
341 return 0;
342
343 err:
344 if (info->screen_base) {
345 iounmap(info->screen_base);
346 pci_release_region(pdev, 0);
347 }
348 if (par->vid_regs) {
349 iounmap(par->vid_regs);
350 pci_release_region(pdev, 3);
351 }
352 if (par->dc_regs) {
353 iounmap(par->dc_regs);
354 pci_release_region(pdev, 2);
355 }
356
357 pci_disable_device(pdev);
358
359 if (info)
360 framebuffer_release(info);
361 return ret;
362}
363
364static void gxfb_remove(struct pci_dev *pdev)
365{
366 struct fb_info *info = pci_get_drvdata(pdev);
367 struct geodefb_par *par = info->par;
368
369 unregister_framebuffer(info);
370
371 iounmap((void __iomem *)info->screen_base);
372 pci_release_region(pdev, 0);
373
374 iounmap(par->vid_regs);
375 pci_release_region(pdev, 3);
376
377 iounmap(par->dc_regs);
378 pci_release_region(pdev, 2);
379
380 pci_disable_device(pdev);
381 pci_set_drvdata(pdev, NULL);
382
383 framebuffer_release(info);
384}
385
386static struct pci_device_id gxfb_id_table[] = {
387 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_VIDEO,
388 PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16,
389 0xff0000, 0 },
390 { 0, }
391};
392
393MODULE_DEVICE_TABLE(pci, gxfb_id_table);
394
395static struct pci_driver gxfb_driver = {
396 .name = "gxfb",
397 .id_table = gxfb_id_table,
398 .probe = gxfb_probe,
399 .remove = gxfb_remove,
400};
401
402static int __init gxfb_init(void)
403{
404#ifndef MODULE
405 if (fb_get_options("gxfb", NULL))
406 return -ENODEV;
407#endif
408 return pci_register_driver(&gxfb_driver);
409}
410
411static void __exit gxfb_cleanup(void)
412{
413 pci_unregister_driver(&gxfb_driver);
414}
415
416module_init(gxfb_init);
417module_exit(gxfb_cleanup);
418
419module_param_string(mode, mode_option, sizeof(mode_option), 0444);
420MODULE_PARM_DESC(mode, "video mode (<x>x<y>[-<bpp>][@<refr>])");
421
422MODULE_DESCRIPTION("Framebuffer driver for the AMD Geode GX");
423MODULE_LICENSE("GPL");
diff --git a/drivers/video/geode/video_gx.c b/drivers/video/geode/video_gx.c
new file mode 100644
index 000000000000..2b2a7880ea75
--- /dev/null
+++ b/drivers/video/geode/video_gx.c
@@ -0,0 +1,262 @@
1/*
2 * Geode GX video processor device.
3 *
4 * Copyright (C) 2006 Arcom Control Systems Ltd.
5 *
6 * Portions from AMD's original 2.4 driver:
7 * Copyright (C) 2004 Advanced Micro Devices, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14#include <linux/fb.h>
15#include <linux/delay.h>
16#include <asm/io.h>
17#include <asm/delay.h>
18#include <asm/msr.h>
19
20#include "geodefb.h"
21#include "video_gx.h"
22
23
24/*
25 * Tables of register settings for various DOTCLKs.
26 */
27struct gx_pll_entry {
28 long pixclock; /* ps */
29 u32 sys_rstpll_bits;
30 u32 dotpll_value;
31};
32
33#define POSTDIV3 ((u32)MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3)
34#define PREMULT2 ((u32)MSR_GLCP_SYS_RSTPLL_DOTPREMULT2)
35#define PREDIV2 ((u32)MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3)
36
37static const struct gx_pll_entry gx_pll_table_48MHz[] = {
38 { 40123, POSTDIV3, 0x00000BF2 }, /* 24.9230 */
39 { 39721, 0, 0x00000037 }, /* 25.1750 */
40 { 35308, POSTDIV3|PREMULT2, 0x00000B1A }, /* 28.3220 */
41 { 31746, POSTDIV3, 0x000002D2 }, /* 31.5000 */
42 { 27777, POSTDIV3|PREMULT2, 0x00000FE2 }, /* 36.0000 */
43 { 26666, POSTDIV3, 0x0000057A }, /* 37.5000 */
44 { 25000, POSTDIV3, 0x0000030A }, /* 40.0000 */
45 { 22271, 0, 0x00000063 }, /* 44.9000 */
46 { 20202, 0, 0x0000054B }, /* 49.5000 */
47 { 20000, 0, 0x0000026E }, /* 50.0000 */
48 { 19860, PREMULT2, 0x00000037 }, /* 50.3500 */
49 { 18518, POSTDIV3|PREMULT2, 0x00000B0D }, /* 54.0000 */
50 { 17777, 0, 0x00000577 }, /* 56.2500 */
51 { 17733, 0, 0x000007F7 }, /* 56.3916 */
52 { 17653, 0, 0x0000057B }, /* 56.6444 */
53 { 16949, PREMULT2, 0x00000707 }, /* 59.0000 */
54 { 15873, POSTDIV3|PREMULT2, 0x00000B39 }, /* 63.0000 */
55 { 15384, POSTDIV3|PREMULT2, 0x00000B45 }, /* 65.0000 */
56 { 14814, POSTDIV3|PREMULT2, 0x00000FC1 }, /* 67.5000 */
57 { 14124, POSTDIV3, 0x00000561 }, /* 70.8000 */
58 { 13888, POSTDIV3, 0x000007E1 }, /* 72.0000 */
59 { 13426, PREMULT2, 0x00000F4A }, /* 74.4810 */
60 { 13333, 0, 0x00000052 }, /* 75.0000 */
61 { 12698, 0, 0x00000056 }, /* 78.7500 */
62 { 12500, POSTDIV3|PREMULT2, 0x00000709 }, /* 80.0000 */
63 { 11135, PREMULT2, 0x00000262 }, /* 89.8000 */
64 { 10582, 0, 0x000002D2 }, /* 94.5000 */
65 { 10101, PREMULT2, 0x00000B4A }, /* 99.0000 */
66 { 10000, PREMULT2, 0x00000036 }, /* 100.0000 */
67 { 9259, 0, 0x000007E2 }, /* 108.0000 */
68 { 8888, 0, 0x000007F6 }, /* 112.5000 */
69 { 7692, POSTDIV3|PREMULT2, 0x00000FB0 }, /* 130.0000 */
70 { 7407, POSTDIV3|PREMULT2, 0x00000B50 }, /* 135.0000 */
71 { 6349, 0, 0x00000055 }, /* 157.5000 */
72 { 6172, 0, 0x000009C1 }, /* 162.0000 */
73 { 5787, PREMULT2, 0x0000002D }, /* 172.798 */
74 { 5698, 0, 0x000002C1 }, /* 175.5000 */
75 { 5291, 0, 0x000002D1 }, /* 189.0000 */
76 { 4938, 0, 0x00000551 }, /* 202.5000 */
77 { 4357, 0, 0x0000057D }, /* 229.5000 */
78};
79
80static const struct gx_pll_entry gx_pll_table_14MHz[] = {
81 { 39721, 0, 0x00000037 }, /* 25.1750 */
82 { 35308, 0, 0x00000B7B }, /* 28.3220 */
83 { 31746, 0, 0x000004D3 }, /* 31.5000 */
84 { 27777, 0, 0x00000BE3 }, /* 36.0000 */
85 { 26666, 0, 0x0000074F }, /* 37.5000 */
86 { 25000, 0, 0x0000050B }, /* 40.0000 */
87 { 22271, 0, 0x00000063 }, /* 44.9000 */
88 { 20202, 0, 0x0000054B }, /* 49.5000 */
89 { 20000, 0, 0x0000026E }, /* 50.0000 */
90 { 19860, 0, 0x000007C3 }, /* 50.3500 */
91 { 18518, 0, 0x000007E3 }, /* 54.0000 */
92 { 17777, 0, 0x00000577 }, /* 56.2500 */
93 { 17733, 0, 0x000002FB }, /* 56.3916 */
94 { 17653, 0, 0x0000057B }, /* 56.6444 */
95 { 16949, 0, 0x0000058B }, /* 59.0000 */
96 { 15873, 0, 0x0000095E }, /* 63.0000 */
97 { 15384, 0, 0x0000096A }, /* 65.0000 */
98 { 14814, 0, 0x00000BC2 }, /* 67.5000 */
99 { 14124, 0, 0x0000098A }, /* 70.8000 */
100 { 13888, 0, 0x00000BE2 }, /* 72.0000 */
101 { 13333, 0, 0x00000052 }, /* 75.0000 */
102 { 12698, 0, 0x00000056 }, /* 78.7500 */
103 { 12500, 0, 0x0000050A }, /* 80.0000 */
104 { 11135, 0, 0x0000078E }, /* 89.8000 */
105 { 10582, 0, 0x000002D2 }, /* 94.5000 */
106 { 10101, 0, 0x000011F6 }, /* 99.0000 */
107 { 10000, 0, 0x0000054E }, /* 100.0000 */
108 { 9259, 0, 0x000007E2 }, /* 108.0000 */
109 { 8888, 0, 0x000002FA }, /* 112.5000 */
110 { 7692, 0, 0x00000BB1 }, /* 130.0000 */
111 { 7407, 0, 0x00000975 }, /* 135.0000 */
112 { 6349, 0, 0x00000055 }, /* 157.5000 */
113 { 6172, 0, 0x000009C1 }, /* 162.0000 */
114 { 5698, 0, 0x000002C1 }, /* 175.5000 */
115 { 5291, 0, 0x00000539 }, /* 189.0000 */
116 { 4938, 0, 0x00000551 }, /* 202.5000 */
117 { 4357, 0, 0x0000057D }, /* 229.5000 */
118};
119
120static void gx_set_dclk_frequency(struct fb_info *info)
121{
122 const struct gx_pll_entry *pll_table;
123 int pll_table_len;
124 int i, best_i;
125 long min, diff;
126 u64 dotpll, sys_rstpll;
127 int timeout = 1000;
128
129 /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
130 if (cpu_data->x86_mask == 1) {
131 pll_table = gx_pll_table_14MHz;
132 pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);
133 } else {
134 pll_table = gx_pll_table_48MHz;
135 pll_table_len = ARRAY_SIZE(gx_pll_table_48MHz);
136 }
137
138 /* Search the table for the closest pixclock. */
139 best_i = 0;
140 min = abs(pll_table[0].pixclock - info->var.pixclock);
141 for (i = 1; i < pll_table_len; i++) {
142 diff = abs(pll_table[i].pixclock - info->var.pixclock);
143 if (diff < min) {
144 min = diff;
145 best_i = i;
146 }
147 }
148
149 rdmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll);
150 rdmsrl(MSR_GLCP_DOTPLL, dotpll);
151
152 /* Program new M, N and P. */
153 dotpll &= 0x00000000ffffffffull;
154 dotpll |= (u64)pll_table[best_i].dotpll_value << 32;
155 dotpll |= MSR_GLCP_DOTPLL_DOTRESET;
156 dotpll &= ~MSR_GLCP_DOTPLL_BYPASS;
157
158 wrmsrl(MSR_GLCP_DOTPLL, dotpll);
159
160 /* Program dividers. */
161 sys_rstpll &= ~( MSR_GLCP_SYS_RSTPLL_DOTPREDIV2
162 | MSR_GLCP_SYS_RSTPLL_DOTPREMULT2
163 | MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 );
164 sys_rstpll |= pll_table[best_i].sys_rstpll_bits;
165
166 wrmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll);
167
168 /* Clear reset bit to start PLL. */
169 dotpll &= ~(MSR_GLCP_DOTPLL_DOTRESET);
170 wrmsrl(MSR_GLCP_DOTPLL, dotpll);
171
172 /* Wait for LOCK bit. */
173 do {
174 rdmsrl(MSR_GLCP_DOTPLL, dotpll);
175 } while (timeout-- && !(dotpll & MSR_GLCP_DOTPLL_LOCK));
176}
177
178static void gx_configure_display(struct fb_info *info)
179{
180 struct geodefb_par *par = info->par;
181 u32 dcfg, fp_pm;
182
183 dcfg = readl(par->vid_regs + GX_DCFG);
184
185 /* Clear bits from existing mode. */
186 dcfg &= ~(GX_DCFG_CRT_SYNC_SKW_MASK
187 | GX_DCFG_CRT_HSYNC_POL | GX_DCFG_CRT_VSYNC_POL
188 | GX_DCFG_VSYNC_EN | GX_DCFG_HSYNC_EN);
189
190 /* Set default sync skew. */
191 dcfg |= GX_DCFG_CRT_SYNC_SKW_DFLT;
192
193 /* Enable hsync and vsync. */
194 dcfg |= GX_DCFG_HSYNC_EN | GX_DCFG_VSYNC_EN;
195
196 /* Sync polarities. */
197 if (info->var.sync & FB_SYNC_HOR_HIGH_ACT)
198 dcfg |= GX_DCFG_CRT_HSYNC_POL;
199 if (info->var.sync & FB_SYNC_VERT_HIGH_ACT)
200 dcfg |= GX_DCFG_CRT_VSYNC_POL;
201
202 writel(dcfg, par->vid_regs + GX_DCFG);
203
204 /* Power on flat panel. */
205 fp_pm = readl(par->vid_regs + GX_FP_PM);
206 fp_pm |= GX_FP_PM_P;
207 writel(fp_pm, par->vid_regs + GX_FP_PM);
208}
209
210static int gx_blank_display(struct fb_info *info, int blank_mode)
211{
212 struct geodefb_par *par = info->par;
213 u32 dcfg, fp_pm;
214 int blank, hsync, vsync;
215
216 /* CRT power saving modes. */
217 switch (blank_mode) {
218 case FB_BLANK_UNBLANK:
219 blank = 0; hsync = 1; vsync = 1;
220 break;
221 case FB_BLANK_NORMAL:
222 blank = 1; hsync = 1; vsync = 1;
223 break;
224 case FB_BLANK_VSYNC_SUSPEND:
225 blank = 1; hsync = 1; vsync = 0;
226 break;
227 case FB_BLANK_HSYNC_SUSPEND:
228 blank = 1; hsync = 0; vsync = 1;
229 break;
230 case FB_BLANK_POWERDOWN:
231 blank = 1; hsync = 0; vsync = 0;
232 break;
233 default:
234 return -EINVAL;
235 }
236 dcfg = readl(par->vid_regs + GX_DCFG);
237 dcfg &= ~(GX_DCFG_DAC_BL_EN
238 | GX_DCFG_HSYNC_EN | GX_DCFG_VSYNC_EN);
239 if (!blank)
240 dcfg |= GX_DCFG_DAC_BL_EN;
241 if (hsync)
242 dcfg |= GX_DCFG_HSYNC_EN;
243 if (vsync)
244 dcfg |= GX_DCFG_VSYNC_EN;
245 writel(dcfg, par->vid_regs + GX_DCFG);
246
247 /* Power on/off flat panel. */
248 fp_pm = readl(par->vid_regs + GX_FP_PM);
249 if (blank_mode == FB_BLANK_POWERDOWN)
250 fp_pm &= ~GX_FP_PM_P;
251 else
252 fp_pm |= GX_FP_PM_P;
253 writel(fp_pm, par->vid_regs + GX_FP_PM);
254
255 return 0;
256}
257
258struct geode_vid_ops gx_vid_ops = {
259 .set_dclk = gx_set_dclk_frequency,
260 .configure_display = gx_configure_display,
261 .blank_display = gx_blank_display,
262};
diff --git a/drivers/video/geode/video_gx.h b/drivers/video/geode/video_gx.h
new file mode 100644
index 000000000000..2d9211f3ed84
--- /dev/null
+++ b/drivers/video/geode/video_gx.h
@@ -0,0 +1,47 @@
1/*
2 * Geode GX video device
3 *
4 * Copyright (C) 2006 Arcom Control Systems Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11#ifndef __VIDEO_GX_H__
12#define __VIDEO_GX_H__
13
14extern struct geode_vid_ops gx_vid_ops;
15
16/* Geode GX video processor registers */
17
18#define GX_DCFG 0x0008
19# define GX_DCFG_CRT_EN 0x00000001
20# define GX_DCFG_HSYNC_EN 0x00000002
21# define GX_DCFG_VSYNC_EN 0x00000004
22# define GX_DCFG_DAC_BL_EN 0x00000008
23# define GX_DCFG_CRT_HSYNC_POL 0x00000100
24# define GX_DCFG_CRT_VSYNC_POL 0x00000200
25# define GX_DCFG_CRT_SYNC_SKW_MASK 0x0001C000
26# define GX_DCFG_CRT_SYNC_SKW_DFLT 0x00010000
27# define GX_DCFG_VG_CK 0x00100000
28# define GX_DCFG_GV_GAM 0x00200000
29# define GX_DCFG_DAC_VREF 0x04000000
30
31/* Geode GX flat panel display control registers */
32#define GX_FP_PM 0x410
33# define GX_FP_PM_P 0x01000000
34
35/* Geode GX clock control MSRs */
36
37#define MSR_GLCP_SYS_RSTPLL 0x4c000014
38# define MSR_GLCP_SYS_RSTPLL_DOTPREDIV2 (0x0000000000000002ull)
39# define MSR_GLCP_SYS_RSTPLL_DOTPREMULT2 (0x0000000000000004ull)
40# define MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 (0x0000000000000008ull)
41
42#define MSR_GLCP_DOTPLL 0x4c000015
43# define MSR_GLCP_DOTPLL_DOTRESET (0x0000000000000001ull)
44# define MSR_GLCP_DOTPLL_BYPASS (0x0000000000008000ull)
45# define MSR_GLCP_DOTPLL_LOCK (0x0000000002000000ull)
46
47#endif /* !__VIDEO_GX_H__ */
diff --git a/drivers/video/i810/i810-i2c.c b/drivers/video/i810/i810-i2c.c
index e3c8b5f1ca76..3fe3ae1aff12 100644
--- a/drivers/video/i810/i810-i2c.c
+++ b/drivers/video/i810/i810-i2c.c
@@ -210,8 +210,7 @@ int i810_probe_i2c_connector(struct fb_info *info, u8 **out_edid, int conn)
210 } 210 }
211 } 211 }
212 212
213 if (out_edid) 213 *out_edid = edid;
214 *out_edid = edid;
215 214
216 return (edid) ? 0 : 1; 215 return (edid) ? 0 : 1;
217} 216}
diff --git a/drivers/video/imsttfb.c b/drivers/video/imsttfb.c
index 7db42542eb19..f73c642b50c2 100644
--- a/drivers/video/imsttfb.c
+++ b/drivers/video/imsttfb.c
@@ -440,9 +440,9 @@ getclkMHz(struct imstt_par *par)
440static void 440static void
441setclkMHz(struct imstt_par *par, __u32 MHz) 441setclkMHz(struct imstt_par *par, __u32 MHz)
442{ 442{
443 __u32 clk_m, clk_n, clk_p, x, stage, spilled; 443 __u32 clk_m, clk_n, x, stage, spilled;
444 444
445 clk_m = clk_n = clk_p = 0; 445 clk_m = clk_n = 0;
446 stage = spilled = 0; 446 stage = spilled = 0;
447 for (;;) { 447 for (;;) {
448 switch (stage) { 448 switch (stage) {
@@ -453,7 +453,7 @@ setclkMHz(struct imstt_par *par, __u32 MHz)
453 clk_n++; 453 clk_n++;
454 break; 454 break;
455 } 455 }
456 x = 20 * (clk_m + 1) / ((clk_n + 1) * (clk_p ? 2 * clk_p : 1)); 456 x = 20 * (clk_m + 1) / (clk_n + 1);
457 if (x == MHz) 457 if (x == MHz)
458 break; 458 break;
459 if (x > MHz) { 459 if (x > MHz) {
@@ -466,7 +466,7 @@ setclkMHz(struct imstt_par *par, __u32 MHz)
466 466
467 par->init.pclk_m = clk_m; 467 par->init.pclk_m = clk_m;
468 par->init.pclk_n = clk_n; 468 par->init.pclk_n = clk_n;
469 par->init.pclk_p = clk_p; 469 par->init.pclk_p = 0;
470} 470}
471 471
472static struct imstt_regvals * 472static struct imstt_regvals *
@@ -1372,18 +1372,24 @@ init_imstt(struct fb_info *info)
1372 write_reg_le32(par->dc_regs, STGCTL, tmp & ~0x1); 1372 write_reg_le32(par->dc_regs, STGCTL, tmp & ~0x1);
1373 write_reg_le32(par->dc_regs, SSR, 0); 1373 write_reg_le32(par->dc_regs, SSR, 0);
1374 1374
1375 /* set default values for DAC registers */ 1375 /* set default values for DAC registers */
1376 if (par->ramdac == IBM) { 1376 if (par->ramdac == IBM) {
1377 par->cmap_regs[PPMASK] = 0xff; eieio(); 1377 par->cmap_regs[PPMASK] = 0xff;
1378 par->cmap_regs[PIDXHI] = 0; eieio(); 1378 eieio();
1379 for (i = 0; i < sizeof(ibm_initregs) / sizeof(*ibm_initregs); i++) { 1379 par->cmap_regs[PIDXHI] = 0;
1380 par->cmap_regs[PIDXLO] = ibm_initregs[i].addr; eieio(); 1380 eieio();
1381 par->cmap_regs[PIDXDATA] = ibm_initregs[i].value; eieio(); 1381 for (i = 0; i < ARRAY_SIZE(ibm_initregs); i++) {
1382 par->cmap_regs[PIDXLO] = ibm_initregs[i].addr;
1383 eieio();
1384 par->cmap_regs[PIDXDATA] = ibm_initregs[i].value;
1385 eieio();
1382 } 1386 }
1383 } else { 1387 } else {
1384 for (i = 0; i < sizeof(tvp_initregs) / sizeof(*tvp_initregs); i++) { 1388 for (i = 0; i < ARRAY_SIZE(tvp_initregs); i++) {
1385 par->cmap_regs[TVPADDRW] = tvp_initregs[i].addr; eieio(); 1389 par->cmap_regs[TVPADDRW] = tvp_initregs[i].addr;
1386 par->cmap_regs[TVPIDATA] = tvp_initregs[i].value; eieio(); 1390 eieio();
1391 par->cmap_regs[TVPIDATA] = tvp_initregs[i].value;
1392 eieio();
1387 } 1393 }
1388 } 1394 }
1389 1395
diff --git a/drivers/video/macmodes.c b/drivers/video/macmodes.c
index 2fc71081f7e7..c0385c6f7db5 100644
--- a/drivers/video/macmodes.c
+++ b/drivers/video/macmodes.c
@@ -380,7 +380,7 @@ int __init mac_find_mode(struct fb_var_screeninfo *var, struct fb_info *info,
380 if (mode_option && !strncmp(mode_option, "mac", 3)) { 380 if (mode_option && !strncmp(mode_option, "mac", 3)) {
381 mode_option += 3; 381 mode_option += 3;
382 db = mac_modedb; 382 db = mac_modedb;
383 dbsize = sizeof(mac_modedb)/sizeof(*mac_modedb); 383 dbsize = ARRAY_SIZE(mac_modedb);
384 } 384 }
385 return fb_find_mode(var, info, mode_option, db, dbsize, 385 return fb_find_mode(var, info, mode_option, db, dbsize,
386 &mac_modedb[DEFAULT_MODEDB_INDEX], default_bpp); 386 &mac_modedb[DEFAULT_MODEDB_INDEX], default_bpp);
diff --git a/drivers/video/matrox/matroxfb_g450.c b/drivers/video/matrox/matroxfb_g450.c
index c122d8743dd2..4d610b405d45 100644
--- a/drivers/video/matrox/matroxfb_g450.c
+++ b/drivers/video/matrox/matroxfb_g450.c
@@ -59,7 +59,7 @@ static const struct mctl g450_controls[] =
59 }, offsetof(struct matrox_fb_info, altout.tvo_params.testout) }, 59 }, offsetof(struct matrox_fb_info, altout.tvo_params.testout) },
60}; 60};
61 61
62#define G450CTRLS (sizeof(g450_controls)/sizeof(g450_controls[0])) 62#define G450CTRLS ARRAY_SIZE(g450_controls)
63 63
64/* Return: positive number: id found 64/* Return: positive number: id found
65 -EINVAL: id not found, return failure 65 -EINVAL: id not found, return failure
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 6019710dc298..5d29a26b8cdf 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -89,12 +89,12 @@ static const struct mctl maven_controls[] =
89 }, offsetof(struct matrox_fb_info, altout.tvo_params.hue) }, 89 }, offsetof(struct matrox_fb_info, altout.tvo_params.hue) },
90 { { V4L2_CID_GAMMA, V4L2_CTRL_TYPE_INTEGER, 90 { { V4L2_CID_GAMMA, V4L2_CTRL_TYPE_INTEGER,
91 "gamma", 91 "gamma",
92 0, sizeof(maven_gamma)/sizeof(maven_gamma[0])-1, 1, 3, 92 0, ARRAY_SIZE(maven_gamma) - 1, 1, 3,
93 0, 93 0,
94 }, offsetof(struct matrox_fb_info, altout.tvo_params.gamma) }, 94 }, offsetof(struct matrox_fb_info, altout.tvo_params.gamma) },
95 { { MATROXFB_CID_TESTOUT, V4L2_CTRL_TYPE_BOOLEAN, 95 { { MATROXFB_CID_TESTOUT, V4L2_CTRL_TYPE_BOOLEAN,
96 "test output", 96 "test output",
97 0, 1, 1, 0, 97 0, 1, 1, 0,
98 0, 98 0,
99 }, offsetof(struct matrox_fb_info, altout.tvo_params.testout) }, 99 }, offsetof(struct matrox_fb_info, altout.tvo_params.testout) },
100 { { MATROXFB_CID_DEFLICKER, V4L2_CTRL_TYPE_INTEGER, 100 { { MATROXFB_CID_DEFLICKER, V4L2_CTRL_TYPE_INTEGER,
@@ -105,7 +105,7 @@ static const struct mctl maven_controls[] =
105 105
106}; 106};
107 107
108#define MAVCTRLS (sizeof(maven_controls)/sizeof(maven_controls[0])) 108#define MAVCTRLS ARRAY_SIZE(maven_controls)
109 109
110/* Return: positive number: id found 110/* Return: positive number: id found
111 -EINVAL: id not found, return failure 111 -EINVAL: id not found, return failure
@@ -129,7 +129,7 @@ static int get_ctrl_id(__u32 v4l2_id) {
129 129
130struct maven_data { 130struct maven_data {
131 struct matrox_fb_info* primary_head; 131 struct matrox_fb_info* primary_head;
132 struct i2c_client* client; 132 struct i2c_client client;
133 int version; 133 int version;
134}; 134};
135 135
@@ -970,7 +970,7 @@ static inline int maven_compute_timming(struct maven_data* md,
970 970
971static int maven_program_timming(struct maven_data* md, 971static int maven_program_timming(struct maven_data* md,
972 const struct mavenregs* m) { 972 const struct mavenregs* m) {
973 struct i2c_client* c = md->client; 973 struct i2c_client* c = &md->client;
974 974
975 if (m->mode == MATROXFB_OUTPUT_MODE_MONITOR) { 975 if (m->mode == MATROXFB_OUTPUT_MODE_MONITOR) {
976 LR(0x80); 976 LR(0x80);
@@ -1007,7 +1007,7 @@ static int maven_program_timming(struct maven_data* md,
1007} 1007}
1008 1008
1009static inline int maven_resync(struct maven_data* md) { 1009static inline int maven_resync(struct maven_data* md) {
1010 struct i2c_client* c = md->client; 1010 struct i2c_client* c = &md->client;
1011 maven_set_reg(c, 0x95, 0x20); /* start whole thing */ 1011 maven_set_reg(c, 0x95, 0x20); /* start whole thing */
1012 return 0; 1012 return 0;
1013} 1013}
@@ -1065,48 +1065,48 @@ static int maven_set_control (struct maven_data* md,
1065 maven_compute_bwlevel(md, &blacklevel, &whitelevel); 1065 maven_compute_bwlevel(md, &blacklevel, &whitelevel);
1066 blacklevel = (blacklevel >> 2) | ((blacklevel & 3) << 8); 1066 blacklevel = (blacklevel >> 2) | ((blacklevel & 3) << 8);
1067 whitelevel = (whitelevel >> 2) | ((whitelevel & 3) << 8); 1067 whitelevel = (whitelevel >> 2) | ((whitelevel & 3) << 8);
1068 maven_set_reg_pair(md->client, 0x0e, blacklevel); 1068 maven_set_reg_pair(&md->client, 0x0e, blacklevel);
1069 maven_set_reg_pair(md->client, 0x1e, whitelevel); 1069 maven_set_reg_pair(&md->client, 0x1e, whitelevel);
1070 } 1070 }
1071 break; 1071 break;
1072 case V4L2_CID_SATURATION: 1072 case V4L2_CID_SATURATION:
1073 { 1073 {
1074 maven_set_reg(md->client, 0x20, p->value); 1074 maven_set_reg(&md->client, 0x20, p->value);
1075 maven_set_reg(md->client, 0x22, p->value); 1075 maven_set_reg(&md->client, 0x22, p->value);
1076 } 1076 }
1077 break; 1077 break;
1078 case V4L2_CID_HUE: 1078 case V4L2_CID_HUE:
1079 { 1079 {
1080 maven_set_reg(md->client, 0x25, p->value); 1080 maven_set_reg(&md->client, 0x25, p->value);
1081 } 1081 }
1082 break; 1082 break;
1083 case V4L2_CID_GAMMA: 1083 case V4L2_CID_GAMMA:
1084 { 1084 {
1085 const struct maven_gamma* g; 1085 const struct maven_gamma* g;
1086 g = maven_compute_gamma(md); 1086 g = maven_compute_gamma(md);
1087 maven_set_reg(md->client, 0x83, g->reg83); 1087 maven_set_reg(&md->client, 0x83, g->reg83);
1088 maven_set_reg(md->client, 0x84, g->reg84); 1088 maven_set_reg(&md->client, 0x84, g->reg84);
1089 maven_set_reg(md->client, 0x85, g->reg85); 1089 maven_set_reg(&md->client, 0x85, g->reg85);
1090 maven_set_reg(md->client, 0x86, g->reg86); 1090 maven_set_reg(&md->client, 0x86, g->reg86);
1091 maven_set_reg(md->client, 0x87, g->reg87); 1091 maven_set_reg(&md->client, 0x87, g->reg87);
1092 maven_set_reg(md->client, 0x88, g->reg88); 1092 maven_set_reg(&md->client, 0x88, g->reg88);
1093 maven_set_reg(md->client, 0x89, g->reg89); 1093 maven_set_reg(&md->client, 0x89, g->reg89);
1094 maven_set_reg(md->client, 0x8a, g->reg8a); 1094 maven_set_reg(&md->client, 0x8a, g->reg8a);
1095 maven_set_reg(md->client, 0x8b, g->reg8b); 1095 maven_set_reg(&md->client, 0x8b, g->reg8b);
1096 } 1096 }
1097 break; 1097 break;
1098 case MATROXFB_CID_TESTOUT: 1098 case MATROXFB_CID_TESTOUT:
1099 { 1099 {
1100 unsigned char val 1100 unsigned char val
1101 = maven_get_reg (md->client,0x8d); 1101 = maven_get_reg(&md->client,0x8d);
1102 if (p->value) val |= 0x10; 1102 if (p->value) val |= 0x10;
1103 else val &= ~0x10; 1103 else val &= ~0x10;
1104 maven_set_reg (md->client, 0x8d, val); 1104 maven_set_reg(&md->client, 0x8d, val);
1105 } 1105 }
1106 break; 1106 break;
1107 case MATROXFB_CID_DEFLICKER: 1107 case MATROXFB_CID_DEFLICKER:
1108 { 1108 {
1109 maven_set_reg(md->client, 0x93, maven_compute_deflicker(md)); 1109 maven_set_reg(&md->client, 0x93, maven_compute_deflicker(md));
1110 } 1110 }
1111 break; 1111 break;
1112 } 1112 }
@@ -1185,7 +1185,6 @@ static int maven_init_client(struct i2c_client* clnt) {
1185 MINFO_FROM(container_of(clnt->adapter, struct i2c_bit_adapter, adapter)->minfo); 1185 MINFO_FROM(container_of(clnt->adapter, struct i2c_bit_adapter, adapter)->minfo);
1186 1186
1187 md->primary_head = MINFO; 1187 md->primary_head = MINFO;
1188 md->client = clnt;
1189 down_write(&ACCESS_FBINFO(altout.lock)); 1188 down_write(&ACCESS_FBINFO(altout.lock));
1190 ACCESS_FBINFO(outputs[1]).output = &maven_altout; 1189 ACCESS_FBINFO(outputs[1]).output = &maven_altout;
1191 ACCESS_FBINFO(outputs[1]).src = ACCESS_FBINFO(outputs[1]).default_src; 1190 ACCESS_FBINFO(outputs[1]).src = ACCESS_FBINFO(outputs[1]).default_src;
@@ -1243,19 +1242,17 @@ static int maven_detect_client(struct i2c_adapter* adapter, int address, int kin
1243 I2C_FUNC_SMBUS_BYTE_DATA | 1242 I2C_FUNC_SMBUS_BYTE_DATA |
1244 I2C_FUNC_PROTOCOL_MANGLING)) 1243 I2C_FUNC_PROTOCOL_MANGLING))
1245 goto ERROR0; 1244 goto ERROR0;
1246 if (!(new_client = (struct i2c_client*)kmalloc(sizeof(*new_client) + sizeof(*data), 1245 if (!(data = kzalloc(sizeof(*data), GFP_KERNEL))) {
1247 GFP_KERNEL))) {
1248 err = -ENOMEM; 1246 err = -ENOMEM;
1249 goto ERROR0; 1247 goto ERROR0;
1250 } 1248 }
1251 memset(new_client, 0, sizeof(*new_client) + sizeof(*data)); 1249 new_client = &data->client;
1252 data = (struct maven_data*)(new_client + 1);
1253 i2c_set_clientdata(new_client, data); 1250 i2c_set_clientdata(new_client, data);
1254 new_client->addr = address; 1251 new_client->addr = address;
1255 new_client->adapter = adapter; 1252 new_client->adapter = adapter;
1256 new_client->driver = &maven_driver; 1253 new_client->driver = &maven_driver;
1257 new_client->flags = 0; 1254 new_client->flags = 0;
1258 strcpy(new_client->name, "maven client"); 1255 strlcpy(new_client->name, "maven", I2C_NAME_SIZE);
1259 if ((err = i2c_attach_client(new_client))) 1256 if ((err = i2c_attach_client(new_client)))
1260 goto ERROR3; 1257 goto ERROR3;
1261 err = maven_init_client(new_client); 1258 err = maven_init_client(new_client);
@@ -1279,12 +1276,10 @@ static int maven_attach_adapter(struct i2c_adapter* adapter) {
1279static int maven_detach_client(struct i2c_client* client) { 1276static int maven_detach_client(struct i2c_client* client) {
1280 int err; 1277 int err;
1281 1278
1282 if ((err = i2c_detach_client(client))) { 1279 if ((err = i2c_detach_client(client)))
1283 printk(KERN_ERR "maven: Cannot deregister client\n");
1284 return err; 1280 return err;
1285 }
1286 maven_shutdown_client(client); 1281 maven_shutdown_client(client);
1287 kfree(client); 1282 kfree(i2c_get_clientdata(client));
1288 return 0; 1283 return 0;
1289} 1284}
1290 1285
@@ -1297,20 +1292,13 @@ static struct i2c_driver maven_driver={
1297 .detach_client = maven_detach_client, 1292 .detach_client = maven_detach_client,
1298}; 1293};
1299 1294
1300/* ************************** */ 1295static int __init matroxfb_maven_init(void)
1301 1296{
1302static int matroxfb_maven_init(void) { 1297 return i2c_add_driver(&maven_driver);
1303 int err;
1304
1305 err = i2c_add_driver(&maven_driver);
1306 if (err) {
1307 printk(KERN_ERR "maven: Maven driver failed to register (%d).\n", err);
1308 return err;
1309 }
1310 return 0;
1311} 1298}
1312 1299
1313static void matroxfb_maven_exit(void) { 1300static void __exit matroxfb_maven_exit(void)
1301{
1314 i2c_del_driver(&maven_driver); 1302 i2c_del_driver(&maven_driver);
1315} 1303}
1316 1304
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index 1da2f84bdc25..26a1c618a205 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -183,6 +183,10 @@ static const struct fb_videomode modedb[] = {
183 NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, 183 NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
184 FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED 184 FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
185 }, { 185 }, {
186 /* 1680x1050 @ 60 Hz, 65.191 kHz hsync */
187 NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6,
188 FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
189 }, {
186 /* 1600x1200 @ 85 Hz, 105.77 kHz hsync */ 190 /* 1600x1200 @ 85 Hz, 105.77 kHz hsync */
187 NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3, 191 NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3,
188 FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED 192 FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
@@ -496,7 +500,7 @@ int fb_find_mode(struct fb_var_screeninfo *var,
496 /* Set up defaults */ 500 /* Set up defaults */
497 if (!db) { 501 if (!db) {
498 db = modedb; 502 db = modedb;
499 dbsize = sizeof(modedb)/sizeof(*modedb); 503 dbsize = ARRAY_SIZE(modedb);
500 } 504 }
501 if (!default_mode) 505 if (!default_mode)
502 default_mode = &modedb[DEFAULT_MODEDB_INDEX]; 506 default_mode = &modedb[DEFAULT_MODEDB_INDEX];
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index b961d5601bd9..24b12f71d5a8 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -165,20 +165,20 @@ static int neoFindMode(int xres, int yres, int depth)
165 165
166 switch (depth) { 166 switch (depth) {
167 case 8: 167 case 8:
168 size = sizeof(bios8) / sizeof(biosMode); 168 size = ARRAY_SIZE(bios8);
169 mode = bios8; 169 mode = bios8;
170 break; 170 break;
171 case 16: 171 case 16:
172 size = sizeof(bios16) / sizeof(biosMode); 172 size = ARRAY_SIZE(bios16);
173 mode = bios16; 173 mode = bios16;
174 break; 174 break;
175 case 24: 175 case 24:
176 size = sizeof(bios24) / sizeof(biosMode); 176 size = ARRAY_SIZE(bios24);
177 mode = bios24; 177 mode = bios24;
178 break; 178 break;
179#ifdef NO_32BIT_SUPPORT_YET 179#ifdef NO_32BIT_SUPPORT_YET
180 case 32: 180 case 32:
181 size = sizeof(bios32) / sizeof(biosMode); 181 size = ARRAY_SIZE(bios32);
182 mode = bios32; 182 mode = bios32;
183 break; 183 break;
184#endif 184#endif
diff --git a/drivers/video/nvidia/nv_accel.c b/drivers/video/nvidia/nv_accel.c
index f377a29ec97a..4aefb8f41637 100644
--- a/drivers/video/nvidia/nv_accel.c
+++ b/drivers/video/nvidia/nv_accel.c
@@ -300,6 +300,9 @@ int nvidiafb_sync(struct fb_info *info)
300{ 300{
301 struct nvidia_par *par = info->par; 301 struct nvidia_par *par = info->par;
302 302
303 if (info->state != FBINFO_STATE_RUNNING)
304 return 0;
305
303 if (!par->lockup) 306 if (!par->lockup)
304 NVFlush(par); 307 NVFlush(par);
305 308
@@ -313,6 +316,9 @@ void nvidiafb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
313{ 316{
314 struct nvidia_par *par = info->par; 317 struct nvidia_par *par = info->par;
315 318
319 if (info->state != FBINFO_STATE_RUNNING)
320 return;
321
316 if (par->lockup) 322 if (par->lockup)
317 return cfb_copyarea(info, region); 323 return cfb_copyarea(info, region);
318 324
@@ -329,6 +335,9 @@ void nvidiafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
329 struct nvidia_par *par = info->par; 335 struct nvidia_par *par = info->par;
330 u32 color; 336 u32 color;
331 337
338 if (info->state != FBINFO_STATE_RUNNING)
339 return;
340
332 if (par->lockup) 341 if (par->lockup)
333 return cfb_fillrect(info, rect); 342 return cfb_fillrect(info, rect);
334 343
@@ -412,6 +421,9 @@ void nvidiafb_imageblit(struct fb_info *info, const struct fb_image *image)
412{ 421{
413 struct nvidia_par *par = info->par; 422 struct nvidia_par *par = info->par;
414 423
424 if (info->state != FBINFO_STATE_RUNNING)
425 return;
426
415 if (image->depth == 1 && !par->lockup) 427 if (image->depth == 1 && !par->lockup)
416 nvidiafb_mono_color_expand(info, image); 428 nvidiafb_mono_color_expand(info, image);
417 else 429 else
diff --git a/drivers/video/nvidia/nv_i2c.c b/drivers/video/nvidia/nv_i2c.c
index bd9eca05e146..1edb1c432b75 100644
--- a/drivers/video/nvidia/nv_i2c.c
+++ b/drivers/video/nvidia/nv_i2c.c
@@ -218,8 +218,7 @@ int nvidia_probe_i2c_connector(struct fb_info *info, int conn, u8 **out_edid)
218 } 218 }
219 } 219 }
220 220
221 if (out_edid) 221 *out_edid = edid;
222 *out_edid = edid;
223 222
224 return (edid) ? 0 : 1; 223 return (edid) ? 0 : 1;
225} 224}
diff --git a/drivers/video/nvidia/nv_type.h b/drivers/video/nvidia/nv_type.h
index e4a5b1da71c4..acdc26693402 100644
--- a/drivers/video/nvidia/nv_type.h
+++ b/drivers/video/nvidia/nv_type.h
@@ -129,6 +129,7 @@ struct nvidia_par {
129 int fpHeight; 129 int fpHeight;
130 int PanelTweak; 130 int PanelTweak;
131 int paneltweak; 131 int paneltweak;
132 int pm_state;
132 u32 crtcSync_read; 133 u32 crtcSync_read;
133 u32 fpSyncs; 134 u32 fpSyncs;
134 u32 dmaPut; 135 u32 dmaPut;
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index a7c4e5e8ead6..6d3e4890cb43 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -21,6 +21,7 @@
21#include <linux/fb.h> 21#include <linux/fb.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/console.h>
24#ifdef CONFIG_MTRR 25#ifdef CONFIG_MTRR
25#include <asm/mtrr.h> 26#include <asm/mtrr.h>
26#endif 27#endif
@@ -296,6 +297,8 @@ static struct pci_device_id nvidiafb_pci_tbl[] = {
296 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 297 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
297 {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT, 298 {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT,
298 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 299 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
300 {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280,
301 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
299 {PCI_VENDOR_ID_NVIDIA, 0x0252, 302 {PCI_VENDOR_ID_NVIDIA, 0x0252,
300 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 303 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
301 {PCI_VENDOR_ID_NVIDIA, 0x0313, 304 {PCI_VENDOR_ID_NVIDIA, 0x0313,
@@ -615,6 +618,30 @@ static int nvidia_panel_tweak(struct nvidia_par *par,
615 return tweak; 618 return tweak;
616} 619}
617 620
621static void nvidia_vga_protect(struct nvidia_par *par, int on)
622{
623 unsigned char tmp;
624
625 if (on) {
626 /*
627 * Turn off screen and disable sequencer.
628 */
629 tmp = NVReadSeq(par, 0x01);
630
631 NVWriteSeq(par, 0x00, 0x01); /* Synchronous Reset */
632 NVWriteSeq(par, 0x01, tmp | 0x20); /* disable the display */
633 } else {
634 /*
635 * Reenable sequencer, then turn on screen.
636 */
637
638 tmp = NVReadSeq(par, 0x01);
639
640 NVWriteSeq(par, 0x01, tmp & ~0x20); /* reenable display */
641 NVWriteSeq(par, 0x00, 0x03); /* End Reset */
642 }
643}
644
618static void nvidia_save_vga(struct nvidia_par *par, 645static void nvidia_save_vga(struct nvidia_par *par,
619 struct _riva_hw_state *state) 646 struct _riva_hw_state *state)
620{ 647{
@@ -643,9 +670,9 @@ static void nvidia_save_vga(struct nvidia_par *par,
643 670
644#undef DUMP_REG 671#undef DUMP_REG
645 672
646static void nvidia_write_regs(struct nvidia_par *par) 673static void nvidia_write_regs(struct nvidia_par *par,
674 struct _riva_hw_state *state)
647{ 675{
648 struct _riva_hw_state *state = &par->ModeReg;
649 int i; 676 int i;
650 677
651 NVTRACE_ENTER(); 678 NVTRACE_ENTER();
@@ -694,32 +721,6 @@ static void nvidia_write_regs(struct nvidia_par *par)
694 NVTRACE_LEAVE(); 721 NVTRACE_LEAVE();
695} 722}
696 723
697static void nvidia_vga_protect(struct nvidia_par *par, int on)
698{
699 unsigned char tmp;
700
701 if (on) {
702 /*
703 * Turn off screen and disable sequencer.
704 */
705 tmp = NVReadSeq(par, 0x01);
706
707 NVWriteSeq(par, 0x00, 0x01); /* Synchronous Reset */
708 NVWriteSeq(par, 0x01, tmp | 0x20); /* disable the display */
709 } else {
710 /*
711 * Reenable sequencer, then turn on screen.
712 */
713
714 tmp = NVReadSeq(par, 0x01);
715
716 NVWriteSeq(par, 0x01, tmp & ~0x20); /* reenable display */
717 NVWriteSeq(par, 0x00, 0x03); /* End Reset */
718 }
719}
720
721
722
723static int nvidia_calc_regs(struct fb_info *info) 724static int nvidia_calc_regs(struct fb_info *info)
724{ 725{
725 struct nvidia_par *par = info->par; 726 struct nvidia_par *par = info->par;
@@ -1068,7 +1069,8 @@ static int nvidiafb_set_par(struct fb_info *info)
1068 1069
1069 nvidia_vga_protect(par, 1); 1070 nvidia_vga_protect(par, 1);
1070 1071
1071 nvidia_write_regs(par); 1072 nvidia_write_regs(par, &par->ModeReg);
1073 NVSetStartAddress(par, 0);
1072 1074
1073#if defined (__BIG_ENDIAN) 1075#if defined (__BIG_ENDIAN)
1074 /* turn on LFB swapping */ 1076 /* turn on LFB swapping */
@@ -1377,6 +1379,57 @@ static struct fb_ops nvidia_fb_ops = {
1377 .fb_sync = nvidiafb_sync, 1379 .fb_sync = nvidiafb_sync,
1378}; 1380};
1379 1381
1382#ifdef CONFIG_PM
1383static int nvidiafb_suspend(struct pci_dev *dev, pm_message_t state)
1384{
1385 struct fb_info *info = pci_get_drvdata(dev);
1386 struct nvidia_par *par = info->par;
1387
1388 acquire_console_sem();
1389 par->pm_state = state.event;
1390
1391 if (state.event == PM_EVENT_FREEZE) {
1392 dev->dev.power.power_state = state;
1393 } else {
1394 fb_set_suspend(info, 1);
1395 nvidiafb_blank(FB_BLANK_POWERDOWN, info);
1396 nvidia_write_regs(par, &par->SavedReg);
1397 pci_save_state(dev);
1398 pci_disable_device(dev);
1399 pci_set_power_state(dev, pci_choose_state(dev, state));
1400 }
1401
1402 release_console_sem();
1403 return 0;
1404}
1405
1406static int nvidiafb_resume(struct pci_dev *dev)
1407{
1408 struct fb_info *info = pci_get_drvdata(dev);
1409 struct nvidia_par *par = info->par;
1410
1411 acquire_console_sem();
1412 pci_set_power_state(dev, PCI_D0);
1413
1414 if (par->pm_state != PM_EVENT_FREEZE) {
1415 pci_restore_state(dev);
1416 pci_enable_device(dev);
1417 pci_set_master(dev);
1418 }
1419
1420 par->pm_state = PM_EVENT_ON;
1421 nvidiafb_set_par(info);
1422 fb_set_suspend (info, 0);
1423 nvidiafb_blank(FB_BLANK_UNBLANK, info);
1424
1425 release_console_sem();
1426 return 0;
1427}
1428#else
1429#define nvidiafb_suspend NULL
1430#define nvidiafb_resume NULL
1431#endif
1432
1380static int __devinit nvidia_set_fbinfo(struct fb_info *info) 1433static int __devinit nvidia_set_fbinfo(struct fb_info *info)
1381{ 1434{
1382 struct fb_monspecs *specs = &info->monspecs; 1435 struct fb_monspecs *specs = &info->monspecs;
@@ -1720,8 +1773,6 @@ static void __exit nvidiafb_remove(struct pci_dev *pd)
1720 struct nvidia_par *par = info->par; 1773 struct nvidia_par *par = info->par;
1721 1774
1722 NVTRACE_ENTER(); 1775 NVTRACE_ENTER();
1723 if (!info)
1724 return;
1725 1776
1726 unregister_framebuffer(info); 1777 unregister_framebuffer(info);
1727#ifdef CONFIG_MTRR 1778#ifdef CONFIG_MTRR
@@ -1798,8 +1849,10 @@ static int __devinit nvidiafb_setup(char *options)
1798static struct pci_driver nvidiafb_driver = { 1849static struct pci_driver nvidiafb_driver = {
1799 .name = "nvidiafb", 1850 .name = "nvidiafb",
1800 .id_table = nvidiafb_pci_tbl, 1851 .id_table = nvidiafb_pci_tbl,
1801 .probe = nvidiafb_probe, 1852 .probe = nvidiafb_probe,
1802 .remove = __exit_p(nvidiafb_remove), 1853 .suspend = nvidiafb_suspend,
1854 .resume = nvidiafb_resume,
1855 .remove = __exit_p(nvidiafb_remove),
1803}; 1856};
1804 1857
1805/* ------------------------------------------------------------------------- * 1858/* ------------------------------------------------------------------------- *
diff --git a/drivers/video/pmagb-b-fb.c b/drivers/video/pmagb-b-fb.c
index eeeac924b500..73e2d7d16608 100644
--- a/drivers/video/pmagb-b-fb.c
+++ b/drivers/video/pmagb-b-fb.c
@@ -228,7 +228,7 @@ static void __init pmagbbfb_osc_setup(struct fb_info *info)
228 228
229 freq1 = (par->osc0 * count1 + count0 / 2) / count0; 229 freq1 = (par->osc0 * count1 + count0 / 2) / count0;
230 par->osc1 = freq1; 230 par->osc1 = freq1;
231 for (i = 0; i < sizeof(pmagbbfb_freqs) / sizeof(*pmagbbfb_freqs); i++) 231 for (i = 0; i < ARRAY_SIZE(pmagbbfb_freqs); i++)
232 if (freq1 >= pmagbbfb_freqs[i] - 232 if (freq1 >= pmagbbfb_freqs[i] -
233 (pmagbbfb_freqs[i] + 128) / 256 && 233 (pmagbbfb_freqs[i] + 128) / 256 &&
234 freq1 <= pmagbbfb_freqs[i] + 234 freq1 <= pmagbbfb_freqs[i] +
diff --git a/drivers/video/radeonfb.c b/drivers/video/radeonfb.c
index db9fb9074dbc..24982adb3aa2 100644
--- a/drivers/video/radeonfb.c
+++ b/drivers/video/radeonfb.c
@@ -759,7 +759,7 @@ static void __iomem *radeon_find_rom(struct radeonfb_info *rinfo)
759 rom = rom_base; 759 rom = rom_base;
760 760
761 for (i = 0; (i < 512) && (stage != 4); i++) { 761 for (i = 0; (i < 512) && (stage != 4); i++) {
762 for(j = 0;j < sizeof(radeon_sig)/sizeof(char *);j++) { 762 for (j = 0; j < ARRAY_SIZE(radeon_sig); j++) {
763 if (radeon_sig[j][0] == *rom) 763 if (radeon_sig[j][0] == *rom)
764 if (strncmp(radeon_sig[j], rom, 764 if (strncmp(radeon_sig[j], rom,
765 strlen(radeon_sig[j])) == 0) { 765 strlen(radeon_sig[j])) == 0) {
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 6c19ab6afb01..f841f013b96f 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -2072,8 +2072,6 @@ static void __exit rivafb_remove(struct pci_dev *pd)
2072 struct riva_par *par = info->par; 2072 struct riva_par *par = info->par;
2073 2073
2074 NVTRACE_ENTER(); 2074 NVTRACE_ENTER();
2075 if (!info)
2076 return;
2077 2075
2078#ifdef CONFIG_FB_RIVA_I2C 2076#ifdef CONFIG_FB_RIVA_I2C
2079 riva_delete_i2c_busses(par); 2077 riva_delete_i2c_busses(par);
diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c
index 00719a91479f..21debed863ac 100644
--- a/drivers/video/savage/savagefb-i2c.c
+++ b/drivers/video/savage/savagefb-i2c.c
@@ -273,8 +273,7 @@ int savagefb_probe_i2c_connector(struct fb_info *info, u8 **out_edid)
273 } 273 }
274 } 274 }
275 275
276 if (out_edid) 276 *out_edid = edid;
277 *out_edid = edid;
278 277
279 return (edid) ? 0 : 1; 278 return (edid) ? 0 : 1;
280} 279}
diff --git a/drivers/video/sis/init301.c b/drivers/video/sis/init301.c
index 2d88f908170a..c3e070a6effd 100644
--- a/drivers/video/sis/init301.c
+++ b/drivers/video/sis/init301.c
@@ -8564,11 +8564,9 @@ SiS_ChrontelDoSomething3(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
8564static void 8564static void
8565SiS_ChrontelDoSomething2(struct SiS_Private *SiS_Pr) 8565SiS_ChrontelDoSomething2(struct SiS_Private *SiS_Pr)
8566{ 8566{
8567 unsigned short temp,tempcl,tempch; 8567 unsigned short temp;
8568 8568
8569 SiS_LongDelay(SiS_Pr, 1); 8569 SiS_LongDelay(SiS_Pr, 1);
8570 tempcl = 3;
8571 tempch = 0;
8572 8570
8573 do { 8571 do {
8574 temp = SiS_GetCH701x(SiS_Pr,0x66); 8572 temp = SiS_GetCH701x(SiS_Pr,0x66);
@@ -8582,13 +8580,6 @@ SiS_ChrontelDoSomething2(struct SiS_Private *SiS_Pr)
8582 8580
8583 SiS_SetCH701xForLCD(SiS_Pr); 8581 SiS_SetCH701xForLCD(SiS_Pr);
8584 8582
8585 if(tempcl == 0) {
8586 if(tempch == 3) break;
8587 SiS_ChrontelResetDB(SiS_Pr);
8588 tempcl = 3;
8589 tempch++;
8590 }
8591 tempcl--;
8592 temp = SiS_GetCH701x(SiS_Pr,0x76); 8583 temp = SiS_GetCH701x(SiS_Pr,0x76);
8593 temp &= 0xfb; /* Reset PLL */ 8584 temp &= 0xfb; /* Reset PLL */
8594 SiS_SetCH701x(SiS_Pr,0x76,temp); 8585 SiS_SetCH701x(SiS_Pr,0x76,temp);
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index 8c1a8b5135c6..c44de90ca12e 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -1194,10 +1194,11 @@ static struct dac_switch dacs[] __devinitdata = {
1194static int __devinit sst_detect_dactype(struct fb_info *info, struct sstfb_par *par) 1194static int __devinit sst_detect_dactype(struct fb_info *info, struct sstfb_par *par)
1195{ 1195{
1196 int i, ret = 0; 1196 int i, ret = 0;
1197 1197
1198 for (i=0; i<sizeof(dacs)/sizeof(dacs[0]); i++) { 1198 for (i = 0; i < ARRAY_SIZE(dacs); i++) {
1199 ret = dacs[i].detect(info); 1199 ret = dacs[i].detect(info);
1200 if (ret) break; 1200 if (ret)
1201 break;
1201 } 1202 }
1202 if (!ret) 1203 if (!ret)
1203 return 0; 1204 return 0;
@@ -1604,8 +1605,8 @@ static int sstfb_dump_regs(struct fb_info *info)
1604 {FBZMODE,"fbzmode"}, 1605 {FBZMODE,"fbzmode"},
1605 }; 1606 };
1606 1607
1607 const int pci_s = sizeof(pci_regs)/sizeof(pci_regs[0]); 1608 const int pci_s = ARRAY_SIZE(pci_regs);
1608 const int sst_s = sizeof(sst_regs)/sizeof(sst_regs[0]); 1609 const int sst_s = ARRAY_SIZE(sst_regs);
1609 struct sstfb_par *par = info->par; 1610 struct sstfb_par *par = info->par;
1610 struct pci_dev *dev = par->dev; 1611 struct pci_dev *dev = par->dev;
1611 u32 pci_res[pci_s]; 1612 u32 pci_res[pci_s];
diff --git a/drivers/video/virgefb.c b/drivers/video/virgefb.c
index ed78747487e2..5ea2345dab99 100644
--- a/drivers/video/virgefb.c
+++ b/drivers/video/virgefb.c
@@ -616,8 +616,7 @@ static struct {
616#endif 616#endif
617}; 617};
618 618
619#define arraysize(x) (sizeof(x)/sizeof(*(x))) 619#define NUM_TOTAL_MODES ARRAY_SIZE(virgefb_predefined)
620#define NUM_TOTAL_MODES arraysize(virgefb_predefined)
621 620
622/* 621/*
623 * Default to 800x600 for video=virge8:, virge16: or virge32: 622 * Default to 800x600 for video=virge8:, virge16: or virge32:
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 43c9f7de0314..f867b8d3e973 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -39,8 +39,8 @@
39 39
40extern struct file_system_type v9fs_fs_type; 40extern struct file_system_type v9fs_fs_type;
41extern struct address_space_operations v9fs_addr_operations; 41extern struct address_space_operations v9fs_addr_operations;
42extern struct file_operations v9fs_file_operations; 42extern const struct file_operations v9fs_file_operations;
43extern struct file_operations v9fs_dir_operations; 43extern const struct file_operations v9fs_dir_operations;
44extern struct dentry_operations v9fs_dentry_operations; 44extern struct dentry_operations v9fs_dentry_operations;
45 45
46struct inode *v9fs_get_inode(struct super_block *sb, int mode); 46struct inode *v9fs_get_inode(struct super_block *sb, int mode);
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 766f11f1215c..e32d5971039b 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -204,7 +204,7 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
204 return 0; 204 return 0;
205} 205}
206 206
207struct file_operations v9fs_dir_operations = { 207const struct file_operations v9fs_dir_operations = {
208 .read = generic_read_dir, 208 .read = generic_read_dir,
209 .readdir = v9fs_dir_readdir, 209 .readdir = v9fs_dir_readdir,
210 .open = v9fs_file_open, 210 .open = v9fs_file_open,
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 59e744163407..083dcfcd158e 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -266,7 +266,7 @@ v9fs_file_write(struct file *filp, const char __user * data,
266 return total; 266 return total;
267} 267}
268 268
269struct file_operations v9fs_file_operations = { 269const struct file_operations v9fs_file_operations = {
270 .llseek = generic_file_llseek, 270 .llseek = generic_file_llseek,
271 .read = v9fs_file_read, 271 .read = v9fs_file_read,
272 .write = v9fs_file_write, 272 .write = v9fs_file_write,
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index f6cd01352cc8..29217ff36d44 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -85,7 +85,7 @@ void __adfs_error(struct super_block *sb, const char *function,
85 85
86/* dir_*.c */ 86/* dir_*.c */
87extern struct inode_operations adfs_dir_inode_operations; 87extern struct inode_operations adfs_dir_inode_operations;
88extern struct file_operations adfs_dir_operations; 88extern const struct file_operations adfs_dir_operations;
89extern struct dentry_operations adfs_dentry_operations; 89extern struct dentry_operations adfs_dentry_operations;
90extern struct adfs_dir_ops adfs_f_dir_ops; 90extern struct adfs_dir_ops adfs_f_dir_ops;
91extern struct adfs_dir_ops adfs_fplus_dir_ops; 91extern struct adfs_dir_ops adfs_fplus_dir_ops;
@@ -94,7 +94,7 @@ extern int adfs_dir_update(struct super_block *sb, struct object_info *obj);
94 94
95/* file.c */ 95/* file.c */
96extern struct inode_operations adfs_file_inode_operations; 96extern struct inode_operations adfs_file_inode_operations;
97extern struct file_operations adfs_file_operations; 97extern const struct file_operations adfs_file_operations;
98 98
99static inline __u32 signed_asl(__u32 val, signed int shift) 99static inline __u32 signed_asl(__u32 val, signed int shift)
100{ 100{
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index 0b4c3a028076..7b075fc397da 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -196,7 +196,7 @@ out:
196 return ret; 196 return ret;
197} 197}
198 198
199struct file_operations adfs_dir_operations = { 199const struct file_operations adfs_dir_operations = {
200 .read = generic_read_dir, 200 .read = generic_read_dir,
201 .readdir = adfs_readdir, 201 .readdir = adfs_readdir,
202 .fsync = file_fsync, 202 .fsync = file_fsync,
diff --git a/fs/adfs/file.c b/fs/adfs/file.c
index 6af10885f9d6..1014b9f2117b 100644
--- a/fs/adfs/file.c
+++ b/fs/adfs/file.c
@@ -25,7 +25,7 @@
25 25
26#include "adfs.h" 26#include "adfs.h"
27 27
28struct file_operations adfs_file_operations = { 28const struct file_operations adfs_file_operations = {
29 .llseek = generic_file_llseek, 29 .llseek = generic_file_llseek,
30 .read = generic_file_read, 30 .read = generic_file_read,
31 .mmap = generic_file_mmap, 31 .mmap = generic_file_mmap,
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index 0c6799f2137a..a43a876742b8 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -192,9 +192,9 @@ extern void affs_dir_truncate(struct inode *);
192extern struct inode_operations affs_file_inode_operations; 192extern struct inode_operations affs_file_inode_operations;
193extern struct inode_operations affs_dir_inode_operations; 193extern struct inode_operations affs_dir_inode_operations;
194extern struct inode_operations affs_symlink_inode_operations; 194extern struct inode_operations affs_symlink_inode_operations;
195extern struct file_operations affs_file_operations; 195extern const struct file_operations affs_file_operations;
196extern struct file_operations affs_file_operations_ofs; 196extern const struct file_operations affs_file_operations_ofs;
197extern struct file_operations affs_dir_operations; 197extern const struct file_operations affs_dir_operations;
198extern struct address_space_operations affs_symlink_aops; 198extern struct address_space_operations affs_symlink_aops;
199extern struct address_space_operations affs_aops; 199extern struct address_space_operations affs_aops;
200extern struct address_space_operations affs_aops_ofs; 200extern struct address_space_operations affs_aops_ofs;
diff --git a/fs/affs/dir.c b/fs/affs/dir.c
index 548efd0ee98c..5d9649fa1814 100644
--- a/fs/affs/dir.c
+++ b/fs/affs/dir.c
@@ -17,7 +17,7 @@
17 17
18static int affs_readdir(struct file *, void *, filldir_t); 18static int affs_readdir(struct file *, void *, filldir_t);
19 19
20struct file_operations affs_dir_operations = { 20const struct file_operations affs_dir_operations = {
21 .read = generic_read_dir, 21 .read = generic_read_dir,
22 .readdir = affs_readdir, 22 .readdir = affs_readdir,
23 .fsync = file_fsync, 23 .fsync = file_fsync,
diff --git a/fs/affs/file.c b/fs/affs/file.c
index f72fb776ecdf..7076262af39b 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -25,7 +25,7 @@ static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
25static int affs_file_open(struct inode *inode, struct file *filp); 25static int affs_file_open(struct inode *inode, struct file *filp);
26static int affs_file_release(struct inode *inode, struct file *filp); 26static int affs_file_release(struct inode *inode, struct file *filp);
27 27
28struct file_operations affs_file_operations = { 28const struct file_operations affs_file_operations = {
29 .llseek = generic_file_llseek, 29 .llseek = generic_file_llseek,
30 .read = generic_file_read, 30 .read = generic_file_read,
31 .write = generic_file_write, 31 .write = generic_file_write,
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 5c61c24dab2a..a6dff6a4f204 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -32,7 +32,7 @@ static int afs_d_delete(struct dentry *dentry);
32static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, 32static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen,
33 loff_t fpos, ino_t ino, unsigned dtype); 33 loff_t fpos, ino_t ino, unsigned dtype);
34 34
35struct file_operations afs_dir_file_operations = { 35const struct file_operations afs_dir_file_operations = {
36 .open = afs_dir_open, 36 .open = afs_dir_open,
37 .readdir = afs_dir_readdir, 37 .readdir = afs_dir_readdir,
38}; 38};
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 150b19227922..7bb716887e29 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -28,7 +28,7 @@ static int afs_file_release(struct inode *inode, struct file *file);
28#endif 28#endif
29 29
30static int afs_file_readpage(struct file *file, struct page *page); 30static int afs_file_readpage(struct file *file, struct page *page);
31static int afs_file_invalidatepage(struct page *page, unsigned long offset); 31static void afs_file_invalidatepage(struct page *page, unsigned long offset);
32static int afs_file_releasepage(struct page *page, gfp_t gfp_flags); 32static int afs_file_releasepage(struct page *page, gfp_t gfp_flags);
33 33
34struct inode_operations afs_file_inode_operations = { 34struct inode_operations afs_file_inode_operations = {
@@ -212,7 +212,7 @@ int afs_cache_get_page_cookie(struct page *page,
212/* 212/*
213 * invalidate part or all of a page 213 * invalidate part or all of a page
214 */ 214 */
215static int afs_file_invalidatepage(struct page *page, unsigned long offset) 215static void afs_file_invalidatepage(struct page *page, unsigned long offset)
216{ 216{
217 int ret = 1; 217 int ret = 1;
218 218
@@ -238,11 +238,11 @@ static int afs_file_invalidatepage(struct page *page, unsigned long offset)
238 if (!PageWriteback(page)) 238 if (!PageWriteback(page))
239 ret = page->mapping->a_ops->releasepage(page, 239 ret = page->mapping->a_ops->releasepage(page,
240 0); 240 0);
241 /* possibly should BUG_ON(!ret); - neilb */
241 } 242 }
242 } 243 }
243 244
244 _leave(" = %d", ret); 245 _leave(" = %d", ret);
245 return ret;
246} /* end afs_file_invalidatepage() */ 246} /* end afs_file_invalidatepage() */
247 247
248/*****************************************************************************/ 248/*****************************************************************************/
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index ab8f87c66319..72febdf9a35a 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -64,7 +64,7 @@ extern struct cachefs_index_def afs_cache_cell_index_def;
64 * dir.c 64 * dir.c
65 */ 65 */
66extern struct inode_operations afs_dir_inode_operations; 66extern struct inode_operations afs_dir_inode_operations;
67extern struct file_operations afs_dir_file_operations; 67extern const struct file_operations afs_dir_file_operations;
68 68
69/* 69/*
70 * file.c 70 * file.c
@@ -105,7 +105,7 @@ extern struct cachefs_netfs afs_cache_netfs;
105 * mntpt.c 105 * mntpt.c
106 */ 106 */
107extern struct inode_operations afs_mntpt_inode_operations; 107extern struct inode_operations afs_mntpt_inode_operations;
108extern struct file_operations afs_mntpt_file_operations; 108extern const struct file_operations afs_mntpt_file_operations;
109extern struct afs_timer afs_mntpt_expiry_timer; 109extern struct afs_timer afs_mntpt_expiry_timer;
110extern struct afs_timer_ops afs_mntpt_expiry_timer_ops; 110extern struct afs_timer_ops afs_mntpt_expiry_timer_ops;
111extern unsigned long afs_mntpt_expiry_timeout; 111extern unsigned long afs_mntpt_expiry_timeout;
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 31ee06590de5..4e6eeb59b83c 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -32,7 +32,7 @@ static struct dentry *afs_mntpt_lookup(struct inode *dir,
32static int afs_mntpt_open(struct inode *inode, struct file *file); 32static int afs_mntpt_open(struct inode *inode, struct file *file);
33static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd); 33static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd);
34 34
35struct file_operations afs_mntpt_file_operations = { 35const struct file_operations afs_mntpt_file_operations = {
36 .open = afs_mntpt_open, 36 .open = afs_mntpt_open,
37}; 37};
38 38
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 9c81b8f7eef0..101d21b6c037 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -37,7 +37,7 @@ static struct seq_operations afs_proc_cells_ops = {
37 .show = afs_proc_cells_show, 37 .show = afs_proc_cells_show,
38}; 38};
39 39
40static struct file_operations afs_proc_cells_fops = { 40static const struct file_operations afs_proc_cells_fops = {
41 .open = afs_proc_cells_open, 41 .open = afs_proc_cells_open,
42 .read = seq_read, 42 .read = seq_read,
43 .write = afs_proc_cells_write, 43 .write = afs_proc_cells_write,
@@ -53,7 +53,7 @@ static ssize_t afs_proc_rootcell_write(struct file *file,
53 const char __user *buf, 53 const char __user *buf,
54 size_t size, loff_t *_pos); 54 size_t size, loff_t *_pos);
55 55
56static struct file_operations afs_proc_rootcell_fops = { 56static const struct file_operations afs_proc_rootcell_fops = {
57 .open = afs_proc_rootcell_open, 57 .open = afs_proc_rootcell_open,
58 .read = afs_proc_rootcell_read, 58 .read = afs_proc_rootcell_read,
59 .write = afs_proc_rootcell_write, 59 .write = afs_proc_rootcell_write,
@@ -77,7 +77,7 @@ static struct seq_operations afs_proc_cell_volumes_ops = {
77 .show = afs_proc_cell_volumes_show, 77 .show = afs_proc_cell_volumes_show,
78}; 78};
79 79
80static struct file_operations afs_proc_cell_volumes_fops = { 80static const struct file_operations afs_proc_cell_volumes_fops = {
81 .open = afs_proc_cell_volumes_open, 81 .open = afs_proc_cell_volumes_open,
82 .read = seq_read, 82 .read = seq_read,
83 .llseek = seq_lseek, 83 .llseek = seq_lseek,
@@ -101,7 +101,7 @@ static struct seq_operations afs_proc_cell_vlservers_ops = {
101 .show = afs_proc_cell_vlservers_show, 101 .show = afs_proc_cell_vlservers_show,
102}; 102};
103 103
104static struct file_operations afs_proc_cell_vlservers_fops = { 104static const struct file_operations afs_proc_cell_vlservers_fops = {
105 .open = afs_proc_cell_vlservers_open, 105 .open = afs_proc_cell_vlservers_open,
106 .read = seq_read, 106 .read = seq_read,
107 .llseek = seq_lseek, 107 .llseek = seq_lseek,
@@ -124,7 +124,7 @@ static struct seq_operations afs_proc_cell_servers_ops = {
124 .show = afs_proc_cell_servers_show, 124 .show = afs_proc_cell_servers_show,
125}; 125};
126 126
127static struct file_operations afs_proc_cell_servers_fops = { 127static const struct file_operations afs_proc_cell_servers_fops = {
128 .open = afs_proc_cell_servers_open, 128 .open = afs_proc_cell_servers_open,
129 .read = seq_read, 129 .read = seq_read,
130 .llseek = seq_lseek, 130 .llseek = seq_lseek,
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
index 990c28da5aec..a62327f1bdff 100644
--- a/fs/autofs/autofs_i.h
+++ b/fs/autofs/autofs_i.h
@@ -146,7 +146,7 @@ struct autofs_dir_ent *autofs_expire(struct super_block *,struct autofs_sb_info
146 146
147extern struct inode_operations autofs_root_inode_operations; 147extern struct inode_operations autofs_root_inode_operations;
148extern struct inode_operations autofs_symlink_inode_operations; 148extern struct inode_operations autofs_symlink_inode_operations;
149extern struct file_operations autofs_root_operations; 149extern const struct file_operations autofs_root_operations;
150 150
151/* Initializing function */ 151/* Initializing function */
152 152
diff --git a/fs/autofs/dirhash.c b/fs/autofs/dirhash.c
index 5ccfcf26310d..3fded389d06b 100644
--- a/fs/autofs/dirhash.c
+++ b/fs/autofs/dirhash.c
@@ -92,7 +92,7 @@ struct autofs_dir_ent *autofs_expire(struct super_block *sb,
92 ; 92 ;
93 dput(dentry); 93 dput(dentry);
94 94
95 if ( may_umount(mnt) == 0 ) { 95 if ( may_umount(mnt) ) {
96 mntput(mnt); 96 mntput(mnt);
97 DPRINTK(("autofs: signaling expire on %s\n", ent->name)); 97 DPRINTK(("autofs: signaling expire on %s\n", ent->name));
98 return ent; /* Expirable! */ 98 return ent; /* Expirable! */
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 870e2cf33016..9cac08d6a873 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -26,7 +26,7 @@ static int autofs_root_rmdir(struct inode *,struct dentry *);
26static int autofs_root_mkdir(struct inode *,struct dentry *,int); 26static int autofs_root_mkdir(struct inode *,struct dentry *,int);
27static int autofs_root_ioctl(struct inode *, struct file *,unsigned int,unsigned long); 27static int autofs_root_ioctl(struct inode *, struct file *,unsigned int,unsigned long);
28 28
29struct file_operations autofs_root_operations = { 29const struct file_operations autofs_root_operations = {
30 .read = generic_read_dir, 30 .read = generic_read_dir,
31 .readdir = autofs_root_readdir, 31 .readdir = autofs_root_readdir,
32 .ioctl = autofs_root_ioctl, 32 .ioctl = autofs_root_ioctl,
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index f54c5b21f876..57c4903614e5 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -3,6 +3,7 @@
3 * linux/fs/autofs/autofs_i.h 3 * linux/fs/autofs/autofs_i.h
4 * 4 *
5 * Copyright 1997-1998 Transmeta Corporation - All Rights Reserved 5 * Copyright 1997-1998 Transmeta Corporation - All Rights Reserved
6 * Copyright 2005-2006 Ian Kent <raven@themaw.net>
6 * 7 *
7 * This file is part of the Linux kernel and is made available under 8 * This file is part of the Linux kernel and is made available under
8 * the terms of the GNU General Public License, version 2, or at your 9 * the terms of the GNU General Public License, version 2, or at your
@@ -41,14 +42,6 @@
41 42
42#define AUTOFS_SUPER_MAGIC 0x0187 43#define AUTOFS_SUPER_MAGIC 0x0187
43 44
44/*
45 * If the daemon returns a negative response (AUTOFS_IOC_FAIL) then the
46 * kernel will keep the negative response cached for up to the time given
47 * here, although the time can be shorter if the kernel throws the dcache
48 * entry away. This probably should be settable from user space.
49 */
50#define AUTOFS_NEGATIVE_TIMEOUT (60*HZ) /* 1 minute */
51
52/* Unified info structure. This is pointed to by both the dentry and 45/* Unified info structure. This is pointed to by both the dentry and
53 inode structures. Each file in the filesystem has an instance of this 46 inode structures. Each file in the filesystem has an instance of this
54 structure. It holds a reference to the dentry, so dentries are never 47 structure. It holds a reference to the dentry, so dentries are never
@@ -63,6 +56,7 @@ struct autofs_info {
63 56
64 struct autofs_sb_info *sbi; 57 struct autofs_sb_info *sbi;
65 unsigned long last_used; 58 unsigned long last_used;
59 atomic_t count;
66 60
67 mode_t mode; 61 mode_t mode;
68 size_t size; 62 size_t size;
@@ -83,23 +77,37 @@ struct autofs_wait_queue {
83 int hash; 77 int hash;
84 int len; 78 int len;
85 char *name; 79 char *name;
80 u32 dev;
81 u64 ino;
82 uid_t uid;
83 gid_t gid;
84 pid_t pid;
85 pid_t tgid;
86 /* This is for status reporting upon return */ 86 /* This is for status reporting upon return */
87 int status; 87 int status;
88 atomic_t notified; 88 atomic_t notify;
89 atomic_t wait_ctr; 89 atomic_t wait_ctr;
90}; 90};
91 91
92#define AUTOFS_SBI_MAGIC 0x6d4a556d 92#define AUTOFS_SBI_MAGIC 0x6d4a556d
93 93
94#define AUTOFS_TYPE_INDIRECT 0x0001
95#define AUTOFS_TYPE_DIRECT 0x0002
96#define AUTOFS_TYPE_OFFSET 0x0004
97
94struct autofs_sb_info { 98struct autofs_sb_info {
95 u32 magic; 99 u32 magic;
96 struct dentry *root; 100 struct dentry *root;
101 int pipefd;
97 struct file *pipe; 102 struct file *pipe;
98 pid_t oz_pgrp; 103 pid_t oz_pgrp;
99 int catatonic; 104 int catatonic;
100 int version; 105 int version;
101 int sub_version; 106 int sub_version;
107 int min_proto;
108 int max_proto;
102 unsigned long exp_timeout; 109 unsigned long exp_timeout;
110 unsigned int type;
103 int reghost_enabled; 111 int reghost_enabled;
104 int needs_reghost; 112 int needs_reghost;
105 struct super_block *sb; 113 struct super_block *sb;
@@ -166,8 +174,10 @@ int autofs4_expire_multi(struct super_block *, struct vfsmount *,
166extern struct inode_operations autofs4_symlink_inode_operations; 174extern struct inode_operations autofs4_symlink_inode_operations;
167extern struct inode_operations autofs4_dir_inode_operations; 175extern struct inode_operations autofs4_dir_inode_operations;
168extern struct inode_operations autofs4_root_inode_operations; 176extern struct inode_operations autofs4_root_inode_operations;
169extern struct file_operations autofs4_dir_operations; 177extern struct inode_operations autofs4_indirect_root_inode_operations;
170extern struct file_operations autofs4_root_operations; 178extern struct inode_operations autofs4_direct_root_inode_operations;
179extern const struct file_operations autofs4_dir_operations;
180extern const struct file_operations autofs4_root_operations;
171 181
172/* Initializing function */ 182/* Initializing function */
173 183
@@ -176,13 +186,6 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *, struct autofs_sb_info
176 186
177/* Queue management functions */ 187/* Queue management functions */
178 188
179enum autofs_notify
180{
181 NFY_NONE,
182 NFY_MOUNT,
183 NFY_EXPIRE
184};
185
186int autofs4_wait(struct autofs_sb_info *,struct dentry *, enum autofs_notify); 189int autofs4_wait(struct autofs_sb_info *,struct dentry *, enum autofs_notify);
187int autofs4_wait_release(struct autofs_sb_info *,autofs_wqt_t,int); 190int autofs4_wait_release(struct autofs_sb_info *,autofs_wqt_t,int);
188void autofs4_catatonic_mode(struct autofs_sb_info *); 191void autofs4_catatonic_mode(struct autofs_sb_info *);
@@ -200,12 +203,22 @@ static inline int autofs4_follow_mount(struct vfsmount **mnt, struct dentry **de
200 return res; 203 return res;
201} 204}
202 205
206static inline u32 autofs4_get_dev(struct autofs_sb_info *sbi)
207{
208 return new_encode_dev(sbi->sb->s_dev);
209}
210
211static inline u64 autofs4_get_ino(struct autofs_sb_info *sbi)
212{
213 return sbi->sb->s_root->d_inode->i_ino;
214}
215
203static inline int simple_positive(struct dentry *dentry) 216static inline int simple_positive(struct dentry *dentry)
204{ 217{
205 return dentry->d_inode && !d_unhashed(dentry); 218 return dentry->d_inode && !d_unhashed(dentry);
206} 219}
207 220
208static inline int simple_empty_nolock(struct dentry *dentry) 221static inline int __simple_empty(struct dentry *dentry)
209{ 222{
210 struct dentry *child; 223 struct dentry *child;
211 int ret = 0; 224 int ret = 0;
@@ -217,3 +230,6 @@ static inline int simple_empty_nolock(struct dentry *dentry)
217out: 230out:
218 return ret; 231 return ret;
219} 232}
233
234void autofs4_dentry_release(struct dentry *);
235
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index dc39589df165..b8ce02607d66 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -4,7 +4,7 @@
4 * 4 *
5 * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved 5 * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
6 * Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org> 6 * Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org>
7 * Copyright 2001-2003 Ian Kent <raven@themaw.net> 7 * Copyright 2001-2006 Ian Kent <raven@themaw.net>
8 * 8 *
9 * This file is part of the Linux kernel and is made available under 9 * This file is part of the Linux kernel and is made available under
10 * the terms of the GNU General Public License, version 2, or at your 10 * the terms of the GNU General Public License, version 2, or at your
@@ -16,7 +16,7 @@
16 16
17static unsigned long now; 17static unsigned long now;
18 18
19/* Check if a dentry can be expired return 1 if it can else return 0 */ 19/* Check if a dentry can be expired */
20static inline int autofs4_can_expire(struct dentry *dentry, 20static inline int autofs4_can_expire(struct dentry *dentry,
21 unsigned long timeout, int do_now) 21 unsigned long timeout, int do_now)
22{ 22{
@@ -41,14 +41,14 @@ static inline int autofs4_can_expire(struct dentry *dentry,
41 attempts if expire fails the first time */ 41 attempts if expire fails the first time */
42 ino->last_used = now; 42 ino->last_used = now;
43 } 43 }
44
45 return 1; 44 return 1;
46} 45}
47 46
48/* Check a mount point for busyness return 1 if not busy, otherwise */ 47/* Check a mount point for busyness */
49static int autofs4_check_mount(struct vfsmount *mnt, struct dentry *dentry) 48static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
50{ 49{
51 int status = 0; 50 struct dentry *top = dentry;
51 int status = 1;
52 52
53 DPRINTK("dentry %p %.*s", 53 DPRINTK("dentry %p %.*s",
54 dentry, (int)dentry->d_name.len, dentry->d_name.name); 54 dentry, (int)dentry->d_name.len, dentry->d_name.name);
@@ -63,9 +63,14 @@ static int autofs4_check_mount(struct vfsmount *mnt, struct dentry *dentry)
63 if (is_autofs4_dentry(dentry)) 63 if (is_autofs4_dentry(dentry))
64 goto done; 64 goto done;
65 65
66 /* The big question */ 66 /* Update the expiry counter if fs is busy */
67 if (may_umount_tree(mnt) == 0) 67 if (!may_umount_tree(mnt)) {
68 status = 1; 68 struct autofs_info *ino = autofs4_dentry_ino(top);
69 ino->last_used = jiffies;
70 goto done;
71 }
72
73 status = 0;
69done: 74done:
70 DPRINTK("returning = %d", status); 75 DPRINTK("returning = %d", status);
71 mntput(mnt); 76 mntput(mnt);
@@ -73,78 +78,124 @@ done:
73 return status; 78 return status;
74} 79}
75 80
81/*
82 * Calculate next entry in top down tree traversal.
83 * From next_mnt in namespace.c - elegant.
84 */
85static struct dentry *next_dentry(struct dentry *p, struct dentry *root)
86{
87 struct list_head *next = p->d_subdirs.next;
88
89 if (next == &p->d_subdirs) {
90 while (1) {
91 if (p == root)
92 return NULL;
93 next = p->d_u.d_child.next;
94 if (next != &p->d_parent->d_subdirs)
95 break;
96 p = p->d_parent;
97 }
98 }
99 return list_entry(next, struct dentry, d_u.d_child);
100}
101
102/*
103 * Check a direct mount point for busyness.
104 * Direct mounts have similar expiry semantics to tree mounts.
105 * The tree is not busy iff no mountpoints are busy and there are no
106 * autofs submounts.
107 */
108static int autofs4_direct_busy(struct vfsmount *mnt,
109 struct dentry *top,
110 unsigned long timeout,
111 int do_now)
112{
113 DPRINTK("top %p %.*s",
114 top, (int) top->d_name.len, top->d_name.name);
115
116 /* If it's busy update the expiry counters */
117 if (!may_umount_tree(mnt)) {
118 struct autofs_info *ino = autofs4_dentry_ino(top);
119 if (ino)
120 ino->last_used = jiffies;
121 return 1;
122 }
123
124 /* Timeout of a direct mount is determined by its top dentry */
125 if (!autofs4_can_expire(top, timeout, do_now))
126 return 1;
127
128 return 0;
129}
130
76/* Check a directory tree of mount points for busyness 131/* Check a directory tree of mount points for busyness
77 * The tree is not busy iff no mountpoints are busy 132 * The tree is not busy iff no mountpoints are busy
78 * Return 1 if the tree is busy or 0 otherwise
79 */ 133 */
80static int autofs4_check_tree(struct vfsmount *mnt, 134static int autofs4_tree_busy(struct vfsmount *mnt,
81 struct dentry *top, 135 struct dentry *top,
82 unsigned long timeout, 136 unsigned long timeout,
83 int do_now) 137 int do_now)
84{ 138{
85 struct dentry *this_parent = top; 139 struct autofs_info *top_ino = autofs4_dentry_ino(top);
86 struct list_head *next; 140 struct dentry *p;
87 141
88 DPRINTK("parent %p %.*s", 142 DPRINTK("top %p %.*s",
89 top, (int)top->d_name.len, top->d_name.name); 143 top, (int)top->d_name.len, top->d_name.name);
90 144
91 /* Negative dentry - give up */ 145 /* Negative dentry - give up */
92 if (!simple_positive(top)) 146 if (!simple_positive(top))
93 return 0; 147 return 1;
94
95 /* Timeout of a tree mount is determined by its top dentry */
96 if (!autofs4_can_expire(top, timeout, do_now))
97 return 0;
98
99 /* Is someone visiting anywhere in the tree ? */
100 if (may_umount_tree(mnt))
101 return 0;
102 148
103 spin_lock(&dcache_lock); 149 spin_lock(&dcache_lock);
104repeat: 150 for (p = top; p; p = next_dentry(p, top)) {
105 next = this_parent->d_subdirs.next;
106resume:
107 while (next != &this_parent->d_subdirs) {
108 struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child);
109
110 /* Negative dentry - give up */ 151 /* Negative dentry - give up */
111 if (!simple_positive(dentry)) { 152 if (!simple_positive(p))
112 next = next->next;
113 continue; 153 continue;
114 }
115 154
116 DPRINTK("dentry %p %.*s", 155 DPRINTK("dentry %p %.*s",
117 dentry, (int)dentry->d_name.len, dentry->d_name.name); 156 p, (int) p->d_name.len, p->d_name.name);
118
119 if (!simple_empty_nolock(dentry)) {
120 this_parent = dentry;
121 goto repeat;
122 }
123 157
124 dentry = dget(dentry); 158 p = dget(p);
125 spin_unlock(&dcache_lock); 159 spin_unlock(&dcache_lock);
126 160
127 if (d_mountpoint(dentry)) { 161 /*
128 /* First busy => tree busy */ 162 * Is someone visiting anywhere in the subtree ?
129 if (!autofs4_check_mount(mnt, dentry)) { 163 * If there's no mount we need to check the usage
130 dput(dentry); 164 * count for the autofs dentry.
131 return 0; 165 * If the fs is busy update the expiry counter.
166 */
167 if (d_mountpoint(p)) {
168 if (autofs4_mount_busy(mnt, p)) {
169 top_ino->last_used = jiffies;
170 dput(p);
171 return 1;
172 }
173 } else {
174 struct autofs_info *ino = autofs4_dentry_ino(p);
175 unsigned int ino_count = atomic_read(&ino->count);
176
177 /* allow for dget above and top is already dgot */
178 if (p == top)
179 ino_count += 2;
180 else
181 ino_count++;
182
183 if (atomic_read(&p->d_count) > ino_count) {
184 top_ino->last_used = jiffies;
185 dput(p);
186 return 1;
132 } 187 }
133 } 188 }
134 189 dput(p);
135 dput(dentry);
136 spin_lock(&dcache_lock); 190 spin_lock(&dcache_lock);
137 next = next->next;
138 }
139
140 if (this_parent != top) {
141 next = this_parent->d_u.d_child.next;
142 this_parent = this_parent->d_parent;
143 goto resume;
144 } 191 }
145 spin_unlock(&dcache_lock); 192 spin_unlock(&dcache_lock);
146 193
147 return 1; 194 /* Timeout of a tree mount is ultimately determined by its top dentry */
195 if (!autofs4_can_expire(top, timeout, do_now))
196 return 1;
197
198 return 0;
148} 199}
149 200
150static struct dentry *autofs4_check_leaves(struct vfsmount *mnt, 201static struct dentry *autofs4_check_leaves(struct vfsmount *mnt,
@@ -152,58 +203,68 @@ static struct dentry *autofs4_check_leaves(struct vfsmount *mnt,
152 unsigned long timeout, 203 unsigned long timeout,
153 int do_now) 204 int do_now)
154{ 205{
155 struct dentry *this_parent = parent; 206 struct dentry *p;
156 struct list_head *next;
157 207
158 DPRINTK("parent %p %.*s", 208 DPRINTK("parent %p %.*s",
159 parent, (int)parent->d_name.len, parent->d_name.name); 209 parent, (int)parent->d_name.len, parent->d_name.name);
160 210
161 spin_lock(&dcache_lock); 211 spin_lock(&dcache_lock);
162repeat: 212 for (p = parent; p; p = next_dentry(p, parent)) {
163 next = this_parent->d_subdirs.next;
164resume:
165 while (next != &this_parent->d_subdirs) {
166 struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child);
167
168 /* Negative dentry - give up */ 213 /* Negative dentry - give up */
169 if (!simple_positive(dentry)) { 214 if (!simple_positive(p))
170 next = next->next;
171 continue; 215 continue;
172 }
173 216
174 DPRINTK("dentry %p %.*s", 217 DPRINTK("dentry %p %.*s",
175 dentry, (int)dentry->d_name.len, dentry->d_name.name); 218 p, (int) p->d_name.len, p->d_name.name);
176
177 if (!list_empty(&dentry->d_subdirs)) {
178 this_parent = dentry;
179 goto repeat;
180 }
181 219
182 dentry = dget(dentry); 220 p = dget(p);
183 spin_unlock(&dcache_lock); 221 spin_unlock(&dcache_lock);
184 222
185 if (d_mountpoint(dentry)) { 223 if (d_mountpoint(p)) {
186 /* Can we expire this guy */
187 if (!autofs4_can_expire(dentry, timeout, do_now))
188 goto cont;
189
190 /* Can we umount this guy */ 224 /* Can we umount this guy */
191 if (autofs4_check_mount(mnt, dentry)) 225 if (autofs4_mount_busy(mnt, p))
192 return dentry; 226 goto cont;
193 227
228 /* Can we expire this guy */
229 if (autofs4_can_expire(p, timeout, do_now))
230 return p;
194 } 231 }
195cont: 232cont:
196 dput(dentry); 233 dput(p);
197 spin_lock(&dcache_lock); 234 spin_lock(&dcache_lock);
198 next = next->next;
199 } 235 }
236 spin_unlock(&dcache_lock);
237 return NULL;
238}
239
240/* Check if we can expire a direct mount (possibly a tree) */
241static struct dentry *autofs4_expire_direct(struct super_block *sb,
242 struct vfsmount *mnt,
243 struct autofs_sb_info *sbi,
244 int how)
245{
246 unsigned long timeout;
247 struct dentry *root = dget(sb->s_root);
248 int do_now = how & AUTOFS_EXP_IMMEDIATE;
249
250 if (!sbi->exp_timeout || !root)
251 return NULL;
252
253 now = jiffies;
254 timeout = sbi->exp_timeout;
255
256 /* Lock the tree as we must expire as a whole */
257 spin_lock(&sbi->fs_lock);
258 if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
259 struct autofs_info *ino = autofs4_dentry_ino(root);
200 260
201 if (this_parent != parent) { 261 /* Set this flag early to catch sys_chdir and the like */
202 next = this_parent->d_u.d_child.next; 262 ino->flags |= AUTOFS_INF_EXPIRING;
203 this_parent = this_parent->d_parent; 263 spin_unlock(&sbi->fs_lock);
204 goto resume; 264 return root;
205 } 265 }
206 spin_unlock(&dcache_lock); 266 spin_unlock(&sbi->fs_lock);
267 dput(root);
207 268
208 return NULL; 269 return NULL;
209} 270}
@@ -214,10 +275,10 @@ cont:
214 * - it is unused by any user process 275 * - it is unused by any user process
215 * - it has been unused for exp_timeout time 276 * - it has been unused for exp_timeout time
216 */ 277 */
217static struct dentry *autofs4_expire(struct super_block *sb, 278static struct dentry *autofs4_expire_indirect(struct super_block *sb,
218 struct vfsmount *mnt, 279 struct vfsmount *mnt,
219 struct autofs_sb_info *sbi, 280 struct autofs_sb_info *sbi,
220 int how) 281 int how)
221{ 282{
222 unsigned long timeout; 283 unsigned long timeout;
223 struct dentry *root = sb->s_root; 284 struct dentry *root = sb->s_root;
@@ -241,7 +302,7 @@ static struct dentry *autofs4_expire(struct super_block *sb,
241 struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child); 302 struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child);
242 303
243 /* Negative dentry - give up */ 304 /* Negative dentry - give up */
244 if ( !simple_positive(dentry) ) { 305 if (!simple_positive(dentry)) {
245 next = next->next; 306 next = next->next;
246 continue; 307 continue;
247 } 308 }
@@ -249,31 +310,36 @@ static struct dentry *autofs4_expire(struct super_block *sb,
249 dentry = dget(dentry); 310 dentry = dget(dentry);
250 spin_unlock(&dcache_lock); 311 spin_unlock(&dcache_lock);
251 312
252 /* Case 1: indirect mount or top level direct mount */ 313 /*
314 * Case 1: (i) indirect mount or top level pseudo direct mount
315 * (autofs-4.1).
316 * (ii) indirect mount with offset mount, check the "/"
317 * offset (autofs-5.0+).
318 */
253 if (d_mountpoint(dentry)) { 319 if (d_mountpoint(dentry)) {
254 DPRINTK("checking mountpoint %p %.*s", 320 DPRINTK("checking mountpoint %p %.*s",
255 dentry, (int)dentry->d_name.len, dentry->d_name.name); 321 dentry, (int)dentry->d_name.len, dentry->d_name.name);
256 322
257 /* Can we expire this guy */ 323 /* Can we umount this guy */
258 if (!autofs4_can_expire(dentry, timeout, do_now)) 324 if (autofs4_mount_busy(mnt, dentry))
259 goto next; 325 goto next;
260 326
261 /* Can we umount this guy */ 327 /* Can we expire this guy */
262 if (autofs4_check_mount(mnt, dentry)) { 328 if (autofs4_can_expire(dentry, timeout, do_now)) {
263 expired = dentry; 329 expired = dentry;
264 break; 330 break;
265 } 331 }
266 goto next; 332 goto next;
267 } 333 }
268 334
269 if ( simple_empty(dentry) ) 335 if (simple_empty(dentry))
270 goto next; 336 goto next;
271 337
272 /* Case 2: tree mount, expire iff entire tree is not busy */ 338 /* Case 2: tree mount, expire iff entire tree is not busy */
273 if (!exp_leaves) { 339 if (!exp_leaves) {
274 /* Lock the tree as we must expire as a whole */ 340 /* Lock the tree as we must expire as a whole */
275 spin_lock(&sbi->fs_lock); 341 spin_lock(&sbi->fs_lock);
276 if (autofs4_check_tree(mnt, dentry, timeout, do_now)) { 342 if (!autofs4_tree_busy(mnt, dentry, timeout, do_now)) {
277 struct autofs_info *inf = autofs4_dentry_ino(dentry); 343 struct autofs_info *inf = autofs4_dentry_ino(dentry);
278 344
279 /* Set this flag early to catch sys_chdir and the like */ 345 /* Set this flag early to catch sys_chdir and the like */
@@ -283,7 +349,10 @@ static struct dentry *autofs4_expire(struct super_block *sb,
283 break; 349 break;
284 } 350 }
285 spin_unlock(&sbi->fs_lock); 351 spin_unlock(&sbi->fs_lock);
286 /* Case 3: direct mount, expire individual leaves */ 352 /*
353 * Case 3: pseudo direct mount, expire individual leaves
354 * (autofs-4.1).
355 */
287 } else { 356 } else {
288 expired = autofs4_check_leaves(mnt, dentry, timeout, do_now); 357 expired = autofs4_check_leaves(mnt, dentry, timeout, do_now);
289 if (expired) { 358 if (expired) {
@@ -297,7 +366,7 @@ next:
297 next = next->next; 366 next = next->next;
298 } 367 }
299 368
300 if ( expired ) { 369 if (expired) {
301 DPRINTK("returning %p %.*s", 370 DPRINTK("returning %p %.*s",
302 expired, (int)expired->d_name.len, expired->d_name.name); 371 expired, (int)expired->d_name.len, expired->d_name.name);
303 spin_lock(&dcache_lock); 372 spin_lock(&dcache_lock);
@@ -325,7 +394,7 @@ int autofs4_expire_run(struct super_block *sb,
325 pkt.hdr.proto_version = sbi->version; 394 pkt.hdr.proto_version = sbi->version;
326 pkt.hdr.type = autofs_ptype_expire; 395 pkt.hdr.type = autofs_ptype_expire;
327 396
328 if ((dentry = autofs4_expire(sb, mnt, sbi, 0)) == NULL) 397 if ((dentry = autofs4_expire_indirect(sb, mnt, sbi, 0)) == NULL)
329 return -EAGAIN; 398 return -EAGAIN;
330 399
331 pkt.len = dentry->d_name.len; 400 pkt.len = dentry->d_name.len;
@@ -351,17 +420,22 @@ int autofs4_expire_multi(struct super_block *sb, struct vfsmount *mnt,
351 if (arg && get_user(do_now, arg)) 420 if (arg && get_user(do_now, arg))
352 return -EFAULT; 421 return -EFAULT;
353 422
354 if ((dentry = autofs4_expire(sb, mnt, sbi, do_now)) != NULL) { 423 if (sbi->type & AUTOFS_TYPE_DIRECT)
355 struct autofs_info *de_info = autofs4_dentry_ino(dentry); 424 dentry = autofs4_expire_direct(sb, mnt, sbi, do_now);
425 else
426 dentry = autofs4_expire_indirect(sb, mnt, sbi, do_now);
427
428 if (dentry) {
429 struct autofs_info *ino = autofs4_dentry_ino(dentry);
356 430
357 /* This is synchronous because it makes the daemon a 431 /* This is synchronous because it makes the daemon a
358 little easier */ 432 little easier */
359 de_info->flags |= AUTOFS_INF_EXPIRING; 433 ino->flags |= AUTOFS_INF_EXPIRING;
360 ret = autofs4_wait(sbi, dentry, NFY_EXPIRE); 434 ret = autofs4_wait(sbi, dentry, NFY_EXPIRE);
361 de_info->flags &= ~AUTOFS_INF_EXPIRING; 435 ino->flags &= ~AUTOFS_INF_EXPIRING;
362 dput(dentry); 436 dput(dentry);
363 } 437 }
364 438
365 return ret; 439 return ret;
366} 440}
367 441
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 1ad98d48e550..fde78b110ddd 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -3,6 +3,7 @@
3 * linux/fs/autofs/inode.c 3 * linux/fs/autofs/inode.c
4 * 4 *
5 * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved 5 * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
6 * Copyright 2005-2006 Ian Kent <raven@themaw.net>
6 * 7 *
7 * This file is part of the Linux kernel and is made available under 8 * This file is part of the Linux kernel and is made available under
8 * the terms of the GNU General Public License, version 2, or at your 9 * the terms of the GNU General Public License, version 2, or at your
@@ -13,6 +14,7 @@
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/file.h> 16#include <linux/file.h>
17#include <linux/seq_file.h>
16#include <linux/pagemap.h> 18#include <linux/pagemap.h>
17#include <linux/parser.h> 19#include <linux/parser.h>
18#include <linux/bitops.h> 20#include <linux/bitops.h>
@@ -46,6 +48,7 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino,
46 ino->size = 0; 48 ino->size = 0;
47 49
48 ino->last_used = jiffies; 50 ino->last_used = jiffies;
51 atomic_set(&ino->count, 0);
49 52
50 ino->sbi = sbi; 53 ino->sbi = sbi;
51 54
@@ -64,10 +67,19 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino,
64 67
65void autofs4_free_ino(struct autofs_info *ino) 68void autofs4_free_ino(struct autofs_info *ino)
66{ 69{
70 struct autofs_info *p_ino;
71
67 if (ino->dentry) { 72 if (ino->dentry) {
68 ino->dentry->d_fsdata = NULL; 73 ino->dentry->d_fsdata = NULL;
69 if (ino->dentry->d_inode) 74 if (ino->dentry->d_inode) {
75 struct dentry *parent = ino->dentry->d_parent;
76 if (atomic_dec_and_test(&ino->count)) {
77 p_ino = autofs4_dentry_ino(parent);
78 if (p_ino && parent != ino->dentry)
79 atomic_dec(&p_ino->count);
80 }
70 dput(ino->dentry); 81 dput(ino->dentry);
82 }
71 ino->dentry = NULL; 83 ino->dentry = NULL;
72 } 84 }
73 if (ino->free) 85 if (ino->free)
@@ -145,20 +157,44 @@ static void autofs4_put_super(struct super_block *sb)
145 autofs4_catatonic_mode(sbi); /* Free wait queues, close pipe */ 157 autofs4_catatonic_mode(sbi); /* Free wait queues, close pipe */
146 158
147 /* Clean up and release dangling references */ 159 /* Clean up and release dangling references */
148 if (sbi) 160 autofs4_force_release(sbi);
149 autofs4_force_release(sbi);
150 161
151 kfree(sbi); 162 kfree(sbi);
152 163
153 DPRINTK("shutting down"); 164 DPRINTK("shutting down");
154} 165}
155 166
167static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt)
168{
169 struct autofs_sb_info *sbi = autofs4_sbi(mnt->mnt_sb);
170
171 if (!sbi)
172 return 0;
173
174 seq_printf(m, ",fd=%d", sbi->pipefd);
175 seq_printf(m, ",pgrp=%d", sbi->oz_pgrp);
176 seq_printf(m, ",timeout=%lu", sbi->exp_timeout/HZ);
177 seq_printf(m, ",minproto=%d", sbi->min_proto);
178 seq_printf(m, ",maxproto=%d", sbi->max_proto);
179
180 if (sbi->type & AUTOFS_TYPE_OFFSET)
181 seq_printf(m, ",offset");
182 else if (sbi->type & AUTOFS_TYPE_DIRECT)
183 seq_printf(m, ",direct");
184 else
185 seq_printf(m, ",indirect");
186
187 return 0;
188}
189
156static struct super_operations autofs4_sops = { 190static struct super_operations autofs4_sops = {
157 .put_super = autofs4_put_super, 191 .put_super = autofs4_put_super,
158 .statfs = simple_statfs, 192 .statfs = simple_statfs,
193 .show_options = autofs4_show_options,
159}; 194};
160 195
161enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto}; 196enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto,
197 Opt_indirect, Opt_direct, Opt_offset};
162 198
163static match_table_t tokens = { 199static match_table_t tokens = {
164 {Opt_fd, "fd=%u"}, 200 {Opt_fd, "fd=%u"},
@@ -167,11 +203,15 @@ static match_table_t tokens = {
167 {Opt_pgrp, "pgrp=%u"}, 203 {Opt_pgrp, "pgrp=%u"},
168 {Opt_minproto, "minproto=%u"}, 204 {Opt_minproto, "minproto=%u"},
169 {Opt_maxproto, "maxproto=%u"}, 205 {Opt_maxproto, "maxproto=%u"},
206 {Opt_indirect, "indirect"},
207 {Opt_direct, "direct"},
208 {Opt_offset, "offset"},
170 {Opt_err, NULL} 209 {Opt_err, NULL}
171}; 210};
172 211
173static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid, 212static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid,
174 pid_t *pgrp, int *minproto, int *maxproto) 213 pid_t *pgrp, unsigned int *type,
214 int *minproto, int *maxproto)
175{ 215{
176 char *p; 216 char *p;
177 substring_t args[MAX_OPT_ARGS]; 217 substring_t args[MAX_OPT_ARGS];
@@ -225,6 +265,15 @@ static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid,
225 return 1; 265 return 1;
226 *maxproto = option; 266 *maxproto = option;
227 break; 267 break;
268 case Opt_indirect:
269 *type = AUTOFS_TYPE_INDIRECT;
270 break;
271 case Opt_direct:
272 *type = AUTOFS_TYPE_DIRECT;
273 break;
274 case Opt_offset:
275 *type = AUTOFS_TYPE_DIRECT | AUTOFS_TYPE_OFFSET;
276 break;
228 default: 277 default:
229 return 1; 278 return 1;
230 } 279 }
@@ -243,6 +292,10 @@ static struct autofs_info *autofs4_mkroot(struct autofs_sb_info *sbi)
243 return ino; 292 return ino;
244} 293}
245 294
295static struct dentry_operations autofs4_sb_dentry_operations = {
296 .d_release = autofs4_dentry_release,
297};
298
246int autofs4_fill_super(struct super_block *s, void *data, int silent) 299int autofs4_fill_super(struct super_block *s, void *data, int silent)
247{ 300{
248 struct inode * root_inode; 301 struct inode * root_inode;
@@ -251,7 +304,6 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
251 int pipefd; 304 int pipefd;
252 struct autofs_sb_info *sbi; 305 struct autofs_sb_info *sbi;
253 struct autofs_info *ino; 306 struct autofs_info *ino;
254 int minproto, maxproto;
255 307
256 sbi = (struct autofs_sb_info *) kmalloc(sizeof(*sbi), GFP_KERNEL); 308 sbi = (struct autofs_sb_info *) kmalloc(sizeof(*sbi), GFP_KERNEL);
257 if ( !sbi ) 309 if ( !sbi )
@@ -263,12 +315,16 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
263 s->s_fs_info = sbi; 315 s->s_fs_info = sbi;
264 sbi->magic = AUTOFS_SBI_MAGIC; 316 sbi->magic = AUTOFS_SBI_MAGIC;
265 sbi->root = NULL; 317 sbi->root = NULL;
318 sbi->pipefd = -1;
266 sbi->catatonic = 0; 319 sbi->catatonic = 0;
267 sbi->exp_timeout = 0; 320 sbi->exp_timeout = 0;
268 sbi->oz_pgrp = process_group(current); 321 sbi->oz_pgrp = process_group(current);
269 sbi->sb = s; 322 sbi->sb = s;
270 sbi->version = 0; 323 sbi->version = 0;
271 sbi->sub_version = 0; 324 sbi->sub_version = 0;
325 sbi->type = 0;
326 sbi->min_proto = 0;
327 sbi->max_proto = 0;
272 mutex_init(&sbi->wq_mutex); 328 mutex_init(&sbi->wq_mutex);
273 spin_lock_init(&sbi->fs_lock); 329 spin_lock_init(&sbi->fs_lock);
274 sbi->queues = NULL; 330 sbi->queues = NULL;
@@ -285,38 +341,46 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
285 if (!ino) 341 if (!ino)
286 goto fail_free; 342 goto fail_free;
287 root_inode = autofs4_get_inode(s, ino); 343 root_inode = autofs4_get_inode(s, ino);
288 kfree(ino);
289 if (!root_inode) 344 if (!root_inode)
290 goto fail_free; 345 goto fail_ino;
291 346
292 root_inode->i_op = &autofs4_root_inode_operations;
293 root_inode->i_fop = &autofs4_root_operations;
294 root = d_alloc_root(root_inode); 347 root = d_alloc_root(root_inode);
295 pipe = NULL;
296
297 if (!root) 348 if (!root)
298 goto fail_iput; 349 goto fail_iput;
350 pipe = NULL;
351
352 root->d_op = &autofs4_sb_dentry_operations;
353 root->d_fsdata = ino;
299 354
300 /* Can this call block? */ 355 /* Can this call block? */
301 if (parse_options(data, &pipefd, 356 if (parse_options(data, &pipefd,
302 &root_inode->i_uid, &root_inode->i_gid, 357 &root_inode->i_uid, &root_inode->i_gid,
303 &sbi->oz_pgrp, 358 &sbi->oz_pgrp, &sbi->type,
304 &minproto, &maxproto)) { 359 &sbi->min_proto, &sbi->max_proto)) {
305 printk("autofs: called with bogus options\n"); 360 printk("autofs: called with bogus options\n");
306 goto fail_dput; 361 goto fail_dput;
307 } 362 }
308 363
364 root_inode->i_fop = &autofs4_root_operations;
365 root_inode->i_op = sbi->type & AUTOFS_TYPE_DIRECT ?
366 &autofs4_direct_root_inode_operations :
367 &autofs4_indirect_root_inode_operations;
368
309 /* Couldn't this be tested earlier? */ 369 /* Couldn't this be tested earlier? */
310 if (maxproto < AUTOFS_MIN_PROTO_VERSION || 370 if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION ||
311 minproto > AUTOFS_MAX_PROTO_VERSION) { 371 sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) {
312 printk("autofs: kernel does not match daemon version " 372 printk("autofs: kernel does not match daemon version "
313 "daemon (%d, %d) kernel (%d, %d)\n", 373 "daemon (%d, %d) kernel (%d, %d)\n",
314 minproto, maxproto, 374 sbi->min_proto, sbi->max_proto,
315 AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION); 375 AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION);
316 goto fail_dput; 376 goto fail_dput;
317 } 377 }
318 378
319 sbi->version = maxproto > AUTOFS_MAX_PROTO_VERSION ? AUTOFS_MAX_PROTO_VERSION : maxproto; 379 /* Establish highest kernel protocol version */
380 if (sbi->max_proto > AUTOFS_MAX_PROTO_VERSION)
381 sbi->version = AUTOFS_MAX_PROTO_VERSION;
382 else
383 sbi->version = sbi->max_proto;
320 sbi->sub_version = AUTOFS_PROTO_SUBVERSION; 384 sbi->sub_version = AUTOFS_PROTO_SUBVERSION;
321 385
322 DPRINTK("pipe fd = %d, pgrp = %u", pipefd, sbi->oz_pgrp); 386 DPRINTK("pipe fd = %d, pgrp = %u", pipefd, sbi->oz_pgrp);
@@ -329,6 +393,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
329 if ( !pipe->f_op || !pipe->f_op->write ) 393 if ( !pipe->f_op || !pipe->f_op->write )
330 goto fail_fput; 394 goto fail_fput;
331 sbi->pipe = pipe; 395 sbi->pipe = pipe;
396 sbi->pipefd = pipefd;
332 397
333 /* 398 /*
334 * Take a reference to the root dentry so we get a chance to 399 * Take a reference to the root dentry so we get a chance to
@@ -356,6 +421,8 @@ fail_dput:
356fail_iput: 421fail_iput:
357 printk("autofs: get root dentry failed\n"); 422 printk("autofs: get root dentry failed\n");
358 iput(root_inode); 423 iput(root_inode);
424fail_ino:
425 kfree(ino);
359fail_free: 426fail_free:
360 kfree(sbi); 427 kfree(sbi);
361fail_unlock: 428fail_unlock:
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 62d8d4acb8bb..84e030c8ddd0 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -4,7 +4,7 @@
4 * 4 *
5 * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved 5 * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
6 * Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org> 6 * Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org>
7 * Copyright 2001-2003 Ian Kent <raven@themaw.net> 7 * Copyright 2001-2006 Ian Kent <raven@themaw.net>
8 * 8 *
9 * This file is part of the Linux kernel and is made available under 9 * This file is part of the Linux kernel and is made available under
10 * the terms of the GNU General Public License, version 2, or at your 10 * the terms of the GNU General Public License, version 2, or at your
@@ -30,9 +30,9 @@ static int autofs4_dir_close(struct inode *inode, struct file *file);
30static int autofs4_dir_readdir(struct file * filp, void * dirent, filldir_t filldir); 30static int autofs4_dir_readdir(struct file * filp, void * dirent, filldir_t filldir);
31static int autofs4_root_readdir(struct file * filp, void * dirent, filldir_t filldir); 31static int autofs4_root_readdir(struct file * filp, void * dirent, filldir_t filldir);
32static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *); 32static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *);
33static int autofs4_dcache_readdir(struct file *, void *, filldir_t); 33static void *autofs4_follow_link(struct dentry *, struct nameidata *);
34 34
35struct file_operations autofs4_root_operations = { 35const struct file_operations autofs4_root_operations = {
36 .open = dcache_dir_open, 36 .open = dcache_dir_open,
37 .release = dcache_dir_close, 37 .release = dcache_dir_close,
38 .read = generic_read_dir, 38 .read = generic_read_dir,
@@ -40,14 +40,14 @@ struct file_operations autofs4_root_operations = {
40 .ioctl = autofs4_root_ioctl, 40 .ioctl = autofs4_root_ioctl,
41}; 41};
42 42
43struct file_operations autofs4_dir_operations = { 43const struct file_operations autofs4_dir_operations = {
44 .open = autofs4_dir_open, 44 .open = autofs4_dir_open,
45 .release = autofs4_dir_close, 45 .release = autofs4_dir_close,
46 .read = generic_read_dir, 46 .read = generic_read_dir,
47 .readdir = autofs4_dir_readdir, 47 .readdir = autofs4_dir_readdir,
48}; 48};
49 49
50struct inode_operations autofs4_root_inode_operations = { 50struct inode_operations autofs4_indirect_root_inode_operations = {
51 .lookup = autofs4_lookup, 51 .lookup = autofs4_lookup,
52 .unlink = autofs4_dir_unlink, 52 .unlink = autofs4_dir_unlink,
53 .symlink = autofs4_dir_symlink, 53 .symlink = autofs4_dir_symlink,
@@ -55,6 +55,14 @@ struct inode_operations autofs4_root_inode_operations = {
55 .rmdir = autofs4_dir_rmdir, 55 .rmdir = autofs4_dir_rmdir,
56}; 56};
57 57
58struct inode_operations autofs4_direct_root_inode_operations = {
59 .lookup = autofs4_lookup,
60 .unlink = autofs4_dir_unlink,
61 .mkdir = autofs4_dir_mkdir,
62 .rmdir = autofs4_dir_rmdir,
63 .follow_link = autofs4_follow_link,
64};
65
58struct inode_operations autofs4_dir_inode_operations = { 66struct inode_operations autofs4_dir_inode_operations = {
59 .lookup = autofs4_lookup, 67 .lookup = autofs4_lookup,
60 .unlink = autofs4_dir_unlink, 68 .unlink = autofs4_dir_unlink,
@@ -82,87 +90,7 @@ static int autofs4_root_readdir(struct file *file, void *dirent,
82 90
83 DPRINTK("needs_reghost = %d", sbi->needs_reghost); 91 DPRINTK("needs_reghost = %d", sbi->needs_reghost);
84 92
85 return autofs4_dcache_readdir(file, dirent, filldir); 93 return dcache_readdir(file, dirent, filldir);
86}
87
88/* Update usage from here to top of tree, so that scan of
89 top-level directories will give a useful result */
90static void autofs4_update_usage(struct vfsmount *mnt, struct dentry *dentry)
91{
92 struct dentry *top = dentry->d_sb->s_root;
93
94 spin_lock(&dcache_lock);
95 for(; dentry != top; dentry = dentry->d_parent) {
96 struct autofs_info *ino = autofs4_dentry_ino(dentry);
97
98 if (ino) {
99 touch_atime(mnt, dentry);
100 ino->last_used = jiffies;
101 }
102 }
103 spin_unlock(&dcache_lock);
104}
105
106/*
107 * From 2.4 kernel readdir.c
108 */
109static int autofs4_dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
110{
111 int i;
112 struct dentry *dentry = filp->f_dentry;
113
114 i = filp->f_pos;
115 switch (i) {
116 case 0:
117 if (filldir(dirent, ".", 1, i, dentry->d_inode->i_ino, DT_DIR) < 0)
118 break;
119 i++;
120 filp->f_pos++;
121 /* fallthrough */
122 case 1:
123 if (filldir(dirent, "..", 2, i, dentry->d_parent->d_inode->i_ino, DT_DIR) < 0)
124 break;
125 i++;
126 filp->f_pos++;
127 /* fallthrough */
128 default: {
129 struct list_head *list;
130 int j = i-2;
131
132 spin_lock(&dcache_lock);
133 list = dentry->d_subdirs.next;
134
135 for (;;) {
136 if (list == &dentry->d_subdirs) {
137 spin_unlock(&dcache_lock);
138 return 0;
139 }
140 if (!j)
141 break;
142 j--;
143 list = list->next;
144 }
145
146 while(1) {
147 struct dentry *de = list_entry(list,
148 struct dentry, d_u.d_child);
149
150 if (!d_unhashed(de) && de->d_inode) {
151 spin_unlock(&dcache_lock);
152 if (filldir(dirent, de->d_name.name, de->d_name.len, filp->f_pos, de->d_inode->i_ino, DT_UNKNOWN) < 0)
153 break;
154 spin_lock(&dcache_lock);
155 }
156 filp->f_pos++;
157 list = list->next;
158 if (list != &dentry->d_subdirs)
159 continue;
160 spin_unlock(&dcache_lock);
161 break;
162 }
163 }
164 }
165 return 0;
166} 94}
167 95
168static int autofs4_dir_open(struct inode *inode, struct file *file) 96static int autofs4_dir_open(struct inode *inode, struct file *file)
@@ -170,8 +98,16 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
170 struct dentry *dentry = file->f_dentry; 98 struct dentry *dentry = file->f_dentry;
171 struct vfsmount *mnt = file->f_vfsmnt; 99 struct vfsmount *mnt = file->f_vfsmnt;
172 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 100 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
101 struct dentry *cursor;
173 int status; 102 int status;
174 103
104 status = dcache_dir_open(inode, file);
105 if (status)
106 goto out;
107
108 cursor = file->private_data;
109 cursor->d_fsdata = NULL;
110
175 DPRINTK("file=%p dentry=%p %.*s", 111 DPRINTK("file=%p dentry=%p %.*s",
176 file, dentry, dentry->d_name.len, dentry->d_name.name); 112 file, dentry, dentry->d_name.len, dentry->d_name.name);
177 113
@@ -180,12 +116,15 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
180 116
181 if (autofs4_ispending(dentry)) { 117 if (autofs4_ispending(dentry)) {
182 DPRINTK("dentry busy"); 118 DPRINTK("dentry busy");
183 return -EBUSY; 119 dcache_dir_close(inode, file);
120 status = -EBUSY;
121 goto out;
184 } 122 }
185 123
124 status = -ENOENT;
186 if (!d_mountpoint(dentry) && dentry->d_op && dentry->d_op->d_revalidate) { 125 if (!d_mountpoint(dentry) && dentry->d_op && dentry->d_op->d_revalidate) {
187 struct nameidata nd; 126 struct nameidata nd;
188 int empty; 127 int empty, ret;
189 128
190 /* In case there are stale directory dentrys from a failed mount */ 129 /* In case there are stale directory dentrys from a failed mount */
191 spin_lock(&dcache_lock); 130 spin_lock(&dcache_lock);
@@ -195,13 +134,13 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
195 if (!empty) 134 if (!empty)
196 d_invalidate(dentry); 135 d_invalidate(dentry);
197 136
198 nd.dentry = dentry;
199 nd.mnt = mnt;
200 nd.flags = LOOKUP_DIRECTORY; 137 nd.flags = LOOKUP_DIRECTORY;
201 status = (dentry->d_op->d_revalidate)(dentry, &nd); 138 ret = (dentry->d_op->d_revalidate)(dentry, &nd);
202 139
203 if (!status) 140 if (!ret) {
204 return -ENOENT; 141 dcache_dir_close(inode, file);
142 goto out;
143 }
205 } 144 }
206 145
207 if (d_mountpoint(dentry)) { 146 if (d_mountpoint(dentry)) {
@@ -212,25 +151,29 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
212 if (!autofs4_follow_mount(&fp_mnt, &fp_dentry)) { 151 if (!autofs4_follow_mount(&fp_mnt, &fp_dentry)) {
213 dput(fp_dentry); 152 dput(fp_dentry);
214 mntput(fp_mnt); 153 mntput(fp_mnt);
215 return -ENOENT; 154 dcache_dir_close(inode, file);
155 goto out;
216 } 156 }
217 157
218 fp = dentry_open(fp_dentry, fp_mnt, file->f_flags); 158 fp = dentry_open(fp_dentry, fp_mnt, file->f_flags);
219 status = PTR_ERR(fp); 159 status = PTR_ERR(fp);
220 if (IS_ERR(fp)) { 160 if (IS_ERR(fp)) {
221 file->private_data = NULL; 161 dcache_dir_close(inode, file);
222 return status; 162 goto out;
223 } 163 }
224 file->private_data = fp; 164 cursor->d_fsdata = fp;
225 } 165 }
226out:
227 return 0; 166 return 0;
167out:
168 return status;
228} 169}
229 170
230static int autofs4_dir_close(struct inode *inode, struct file *file) 171static int autofs4_dir_close(struct inode *inode, struct file *file)
231{ 172{
232 struct dentry *dentry = file->f_dentry; 173 struct dentry *dentry = file->f_dentry;
233 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 174 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
175 struct dentry *cursor = file->private_data;
176 int status = 0;
234 177
235 DPRINTK("file=%p dentry=%p %.*s", 178 DPRINTK("file=%p dentry=%p %.*s",
236 file, dentry, dentry->d_name.len, dentry->d_name.name); 179 file, dentry, dentry->d_name.len, dentry->d_name.name);
@@ -240,26 +183,28 @@ static int autofs4_dir_close(struct inode *inode, struct file *file)
240 183
241 if (autofs4_ispending(dentry)) { 184 if (autofs4_ispending(dentry)) {
242 DPRINTK("dentry busy"); 185 DPRINTK("dentry busy");
243 return -EBUSY; 186 status = -EBUSY;
187 goto out;
244 } 188 }
245 189
246 if (d_mountpoint(dentry)) { 190 if (d_mountpoint(dentry)) {
247 struct file *fp = file->private_data; 191 struct file *fp = cursor->d_fsdata;
248 192 if (!fp) {
249 if (!fp) 193 status = -ENOENT;
250 return -ENOENT; 194 goto out;
251 195 }
252 filp_close(fp, current->files); 196 filp_close(fp, current->files);
253 file->private_data = NULL;
254 } 197 }
255out: 198out:
256 return 0; 199 dcache_dir_close(inode, file);
200 return status;
257} 201}
258 202
259static int autofs4_dir_readdir(struct file *file, void *dirent, filldir_t filldir) 203static int autofs4_dir_readdir(struct file *file, void *dirent, filldir_t filldir)
260{ 204{
261 struct dentry *dentry = file->f_dentry; 205 struct dentry *dentry = file->f_dentry;
262 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 206 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
207 struct dentry *cursor = file->private_data;
263 int status; 208 int status;
264 209
265 DPRINTK("file=%p dentry=%p %.*s", 210 DPRINTK("file=%p dentry=%p %.*s",
@@ -274,7 +219,7 @@ static int autofs4_dir_readdir(struct file *file, void *dirent, filldir_t filldi
274 } 219 }
275 220
276 if (d_mountpoint(dentry)) { 221 if (d_mountpoint(dentry)) {
277 struct file *fp = file->private_data; 222 struct file *fp = cursor->d_fsdata;
278 223
279 if (!fp) 224 if (!fp)
280 return -ENOENT; 225 return -ENOENT;
@@ -289,27 +234,26 @@ static int autofs4_dir_readdir(struct file *file, void *dirent, filldir_t filldi
289 return status; 234 return status;
290 } 235 }
291out: 236out:
292 return autofs4_dcache_readdir(file, dirent, filldir); 237 return dcache_readdir(file, dirent, filldir);
293} 238}
294 239
295static int try_to_fill_dentry(struct vfsmount *mnt, struct dentry *dentry, int flags) 240static int try_to_fill_dentry(struct dentry *dentry, int flags)
296{ 241{
297 struct super_block *sb = mnt->mnt_sb; 242 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
298 struct autofs_sb_info *sbi = autofs4_sbi(sb); 243 struct autofs_info *ino = autofs4_dentry_ino(dentry);
299 struct autofs_info *de_info = autofs4_dentry_ino(dentry);
300 int status = 0; 244 int status = 0;
301 245
302 /* Block on any pending expiry here; invalidate the dentry 246 /* Block on any pending expiry here; invalidate the dentry
303 when expiration is done to trigger mount request with a new 247 when expiration is done to trigger mount request with a new
304 dentry */ 248 dentry */
305 if (de_info && (de_info->flags & AUTOFS_INF_EXPIRING)) { 249 if (ino && (ino->flags & AUTOFS_INF_EXPIRING)) {
306 DPRINTK("waiting for expire %p name=%.*s", 250 DPRINTK("waiting for expire %p name=%.*s",
307 dentry, dentry->d_name.len, dentry->d_name.name); 251 dentry, dentry->d_name.len, dentry->d_name.name);
308 252
309 status = autofs4_wait(sbi, dentry, NFY_NONE); 253 status = autofs4_wait(sbi, dentry, NFY_NONE);
310 254
311 DPRINTK("expire done status=%d", status); 255 DPRINTK("expire done status=%d", status);
312 256
313 /* 257 /*
314 * If the directory still exists the mount request must 258 * If the directory still exists the mount request must
315 * continue otherwise it can't be followed at the right 259 * continue otherwise it can't be followed at the right
@@ -317,34 +261,36 @@ static int try_to_fill_dentry(struct vfsmount *mnt, struct dentry *dentry, int f
317 */ 261 */
318 status = d_invalidate(dentry); 262 status = d_invalidate(dentry);
319 if (status != -EBUSY) 263 if (status != -EBUSY)
320 return 0; 264 return -ENOENT;
321 } 265 }
322 266
323 DPRINTK("dentry=%p %.*s ino=%p", 267 DPRINTK("dentry=%p %.*s ino=%p",
324 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 268 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
325 269
326 /* Wait for a pending mount, triggering one if there isn't one already */ 270 /*
271 * Wait for a pending mount, triggering one if there
272 * isn't one already
273 */
327 if (dentry->d_inode == NULL) { 274 if (dentry->d_inode == NULL) {
328 DPRINTK("waiting for mount name=%.*s", 275 DPRINTK("waiting for mount name=%.*s",
329 dentry->d_name.len, dentry->d_name.name); 276 dentry->d_name.len, dentry->d_name.name);
330 277
331 status = autofs4_wait(sbi, dentry, NFY_MOUNT); 278 status = autofs4_wait(sbi, dentry, NFY_MOUNT);
332 279
333 DPRINTK("mount done status=%d", status); 280 DPRINTK("mount done status=%d", status);
334 281
335 if (status && dentry->d_inode) 282 if (status && dentry->d_inode)
336 return 0; /* Try to get the kernel to invalidate this dentry */ 283 return status; /* Try to get the kernel to invalidate this dentry */
337 284
338 /* Turn this into a real negative dentry? */ 285 /* Turn this into a real negative dentry? */
339 if (status == -ENOENT) { 286 if (status == -ENOENT) {
340 dentry->d_time = jiffies + AUTOFS_NEGATIVE_TIMEOUT;
341 spin_lock(&dentry->d_lock); 287 spin_lock(&dentry->d_lock);
342 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING; 288 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
343 spin_unlock(&dentry->d_lock); 289 spin_unlock(&dentry->d_lock);
344 return 1; 290 return status;
345 } else if (status) { 291 } else if (status) {
346 /* Return a negative dentry, but leave it "pending" */ 292 /* Return a negative dentry, but leave it "pending" */
347 return 1; 293 return status;
348 } 294 }
349 /* Trigger mount for path component or follow link */ 295 /* Trigger mount for path component or follow link */
350 } else if (flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY) || 296 } else if (flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY) ||
@@ -363,19 +309,87 @@ static int try_to_fill_dentry(struct vfsmount *mnt, struct dentry *dentry, int f
363 spin_lock(&dentry->d_lock); 309 spin_lock(&dentry->d_lock);
364 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING; 310 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
365 spin_unlock(&dentry->d_lock); 311 spin_unlock(&dentry->d_lock);
366 return 0; 312 return status;
367 } 313 }
368 } 314 }
369 315
370 /* We don't update the usages for the autofs daemon itself, this 316 /* Initialize expiry counter after successful mount */
371 is necessary for recursive autofs mounts */ 317 if (ino)
372 if (!autofs4_oz_mode(sbi)) 318 ino->last_used = jiffies;
373 autofs4_update_usage(mnt, dentry);
374 319
375 spin_lock(&dentry->d_lock); 320 spin_lock(&dentry->d_lock);
376 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING; 321 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
377 spin_unlock(&dentry->d_lock); 322 spin_unlock(&dentry->d_lock);
378 return 1; 323 return status;
324}
325
326/* For autofs direct mounts the follow link triggers the mount */
327static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
328{
329 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
330 int oz_mode = autofs4_oz_mode(sbi);
331 unsigned int lookup_type;
332 int status;
333
334 DPRINTK("dentry=%p %.*s oz_mode=%d nd->flags=%d",
335 dentry, dentry->d_name.len, dentry->d_name.name, oz_mode,
336 nd->flags);
337
338 /* If it's our master or we shouldn't trigger a mount we're done */
339 lookup_type = nd->flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY);
340 if (oz_mode || !lookup_type)
341 goto done;
342
343 /*
344 * If a request is pending wait for it.
345 * If it's a mount then it won't be expired till at least
346 * a liitle later and if it's an expire then we might need
347 * to mount it again.
348 */
349 if (autofs4_ispending(dentry)) {
350 DPRINTK("waiting for active request %p name=%.*s",
351 dentry, dentry->d_name.len, dentry->d_name.name);
352
353 status = autofs4_wait(sbi, dentry, NFY_NONE);
354
355 DPRINTK("request done status=%d", status);
356 }
357
358 /*
359 * If the dentry contains directories then it is an
360 * autofs multi-mount with no root mount offset. So
361 * don't try to mount it again.
362 */
363 spin_lock(&dcache_lock);
364 if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
365 spin_unlock(&dcache_lock);
366
367 status = try_to_fill_dentry(dentry, 0);
368 if (status)
369 goto out_error;
370
371 /*
372 * The mount succeeded but if there is no root mount
373 * it must be an autofs multi-mount with no root offset
374 * so we don't need to follow the mount.
375 */
376 if (d_mountpoint(dentry)) {
377 if (!autofs4_follow_mount(&nd->mnt, &nd->dentry)) {
378 status = -ENOENT;
379 goto out_error;
380 }
381 }
382
383 goto done;
384 }
385 spin_unlock(&dcache_lock);
386
387done:
388 return NULL;
389
390out_error:
391 path_release(nd);
392 return ERR_PTR(status);
379} 393}
380 394
381/* 395/*
@@ -384,47 +398,43 @@ static int try_to_fill_dentry(struct vfsmount *mnt, struct dentry *dentry, int f
384 * yet completely filled in, and revalidate has to delay such 398 * yet completely filled in, and revalidate has to delay such
385 * lookups.. 399 * lookups..
386 */ 400 */
387static int autofs4_revalidate(struct dentry * dentry, struct nameidata *nd) 401static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
388{ 402{
389 struct inode * dir = dentry->d_parent->d_inode; 403 struct inode *dir = dentry->d_parent->d_inode;
390 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); 404 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
391 int oz_mode = autofs4_oz_mode(sbi); 405 int oz_mode = autofs4_oz_mode(sbi);
392 int flags = nd ? nd->flags : 0; 406 int flags = nd ? nd->flags : 0;
393 int status = 1; 407 int status = 0;
394 408
395 /* Pending dentry */ 409 /* Pending dentry */
396 if (autofs4_ispending(dentry)) { 410 if (autofs4_ispending(dentry)) {
397 if (!oz_mode) 411 if (!oz_mode)
398 status = try_to_fill_dentry(nd->mnt, dentry, flags); 412 status = try_to_fill_dentry(dentry, flags);
399 return status; 413 return !status;
400 } 414 }
401 415
402 /* Negative dentry.. invalidate if "old" */ 416 /* Negative dentry.. invalidate if "old" */
403 if (dentry->d_inode == NULL) 417 if (dentry->d_inode == NULL)
404 return (dentry->d_time - jiffies <= AUTOFS_NEGATIVE_TIMEOUT); 418 return 0;
405 419
406 /* Check for a non-mountpoint directory with no contents */ 420 /* Check for a non-mountpoint directory with no contents */
407 spin_lock(&dcache_lock); 421 spin_lock(&dcache_lock);
408 if (S_ISDIR(dentry->d_inode->i_mode) && 422 if (S_ISDIR(dentry->d_inode->i_mode) &&
409 !d_mountpoint(dentry) && 423 !d_mountpoint(dentry) &&
410 list_empty(&dentry->d_subdirs)) { 424 __simple_empty(dentry)) {
411 DPRINTK("dentry=%p %.*s, emptydir", 425 DPRINTK("dentry=%p %.*s, emptydir",
412 dentry, dentry->d_name.len, dentry->d_name.name); 426 dentry, dentry->d_name.len, dentry->d_name.name);
413 spin_unlock(&dcache_lock); 427 spin_unlock(&dcache_lock);
414 if (!oz_mode) 428 if (!oz_mode)
415 status = try_to_fill_dentry(nd->mnt, dentry, flags); 429 status = try_to_fill_dentry(dentry, flags);
416 return status; 430 return !status;
417 } 431 }
418 spin_unlock(&dcache_lock); 432 spin_unlock(&dcache_lock);
419 433
420 /* Update the usage list */
421 if (!oz_mode)
422 autofs4_update_usage(nd->mnt, dentry);
423
424 return 1; 434 return 1;
425} 435}
426 436
427static void autofs4_dentry_release(struct dentry *de) 437void autofs4_dentry_release(struct dentry *de)
428{ 438{
429 struct autofs_info *inf; 439 struct autofs_info *inf;
430 440
@@ -462,12 +472,13 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
462 DPRINTK("name = %.*s", 472 DPRINTK("name = %.*s",
463 dentry->d_name.len, dentry->d_name.name); 473 dentry->d_name.len, dentry->d_name.name);
464 474
475 /* File name too long to exist */
465 if (dentry->d_name.len > NAME_MAX) 476 if (dentry->d_name.len > NAME_MAX)
466 return ERR_PTR(-ENAMETOOLONG);/* File name too long to exist */ 477 return ERR_PTR(-ENAMETOOLONG);
467 478
468 sbi = autofs4_sbi(dir->i_sb); 479 sbi = autofs4_sbi(dir->i_sb);
469
470 oz_mode = autofs4_oz_mode(sbi); 480 oz_mode = autofs4_oz_mode(sbi);
481
471 DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d", 482 DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d",
472 current->pid, process_group(current), sbi->catatonic, oz_mode); 483 current->pid, process_group(current), sbi->catatonic, oz_mode);
473 484
@@ -519,7 +530,7 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
519 * doesn't do the right thing for all system calls, but it should 530 * doesn't do the right thing for all system calls, but it should
520 * be OK for the operations we permit from an autofs. 531 * be OK for the operations we permit from an autofs.
521 */ 532 */
522 if ( dentry->d_inode && d_unhashed(dentry) ) 533 if (dentry->d_inode && d_unhashed(dentry))
523 return ERR_PTR(-ENOENT); 534 return ERR_PTR(-ENOENT);
524 535
525 return NULL; 536 return NULL;
@@ -531,6 +542,7 @@ static int autofs4_dir_symlink(struct inode *dir,
531{ 542{
532 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); 543 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
533 struct autofs_info *ino = autofs4_dentry_ino(dentry); 544 struct autofs_info *ino = autofs4_dentry_ino(dentry);
545 struct autofs_info *p_ino;
534 struct inode *inode; 546 struct inode *inode;
535 char *cp; 547 char *cp;
536 548
@@ -564,6 +576,10 @@ static int autofs4_dir_symlink(struct inode *dir,
564 576
565 dentry->d_fsdata = ino; 577 dentry->d_fsdata = ino;
566 ino->dentry = dget(dentry); 578 ino->dentry = dget(dentry);
579 atomic_inc(&ino->count);
580 p_ino = autofs4_dentry_ino(dentry->d_parent);
581 if (p_ino && dentry->d_parent != dentry)
582 atomic_inc(&p_ino->count);
567 ino->inode = inode; 583 ino->inode = inode;
568 584
569 dir->i_mtime = CURRENT_TIME; 585 dir->i_mtime = CURRENT_TIME;
@@ -590,11 +606,17 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
590{ 606{
591 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); 607 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
592 struct autofs_info *ino = autofs4_dentry_ino(dentry); 608 struct autofs_info *ino = autofs4_dentry_ino(dentry);
609 struct autofs_info *p_ino;
593 610
594 /* This allows root to remove symlinks */ 611 /* This allows root to remove symlinks */
595 if ( !autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN) ) 612 if ( !autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN) )
596 return -EACCES; 613 return -EACCES;
597 614
615 if (atomic_dec_and_test(&ino->count)) {
616 p_ino = autofs4_dentry_ino(dentry->d_parent);
617 if (p_ino && dentry->d_parent != dentry)
618 atomic_dec(&p_ino->count);
619 }
598 dput(ino->dentry); 620 dput(ino->dentry);
599 621
600 dentry->d_inode->i_size = 0; 622 dentry->d_inode->i_size = 0;
@@ -611,6 +633,7 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
611{ 633{
612 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); 634 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
613 struct autofs_info *ino = autofs4_dentry_ino(dentry); 635 struct autofs_info *ino = autofs4_dentry_ino(dentry);
636 struct autofs_info *p_ino;
614 637
615 if (!autofs4_oz_mode(sbi)) 638 if (!autofs4_oz_mode(sbi))
616 return -EACCES; 639 return -EACCES;
@@ -625,8 +648,12 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
625 spin_unlock(&dentry->d_lock); 648 spin_unlock(&dentry->d_lock);
626 spin_unlock(&dcache_lock); 649 spin_unlock(&dcache_lock);
627 650
651 if (atomic_dec_and_test(&ino->count)) {
652 p_ino = autofs4_dentry_ino(dentry->d_parent);
653 if (p_ino && dentry->d_parent != dentry)
654 atomic_dec(&p_ino->count);
655 }
628 dput(ino->dentry); 656 dput(ino->dentry);
629
630 dentry->d_inode->i_size = 0; 657 dentry->d_inode->i_size = 0;
631 dentry->d_inode->i_nlink = 0; 658 dentry->d_inode->i_nlink = 0;
632 659
@@ -640,6 +667,7 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
640{ 667{
641 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); 668 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
642 struct autofs_info *ino = autofs4_dentry_ino(dentry); 669 struct autofs_info *ino = autofs4_dentry_ino(dentry);
670 struct autofs_info *p_ino;
643 struct inode *inode; 671 struct inode *inode;
644 672
645 if ( !autofs4_oz_mode(sbi) ) 673 if ( !autofs4_oz_mode(sbi) )
@@ -662,6 +690,10 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
662 690
663 dentry->d_fsdata = ino; 691 dentry->d_fsdata = ino;
664 ino->dentry = dget(dentry); 692 ino->dentry = dget(dentry);
693 atomic_inc(&ino->count);
694 p_ino = autofs4_dentry_ino(dentry->d_parent);
695 if (p_ino && dentry->d_parent != dentry)
696 atomic_inc(&p_ino->count);
665 ino->inode = inode; 697 ino->inode = inode;
666 dir->i_nlink++; 698 dir->i_nlink++;
667 dir->i_mtime = CURRENT_TIME; 699 dir->i_mtime = CURRENT_TIME;
@@ -745,7 +777,7 @@ static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p)
745{ 777{
746 int status = 0; 778 int status = 0;
747 779
748 if (may_umount(mnt) == 0) 780 if (may_umount(mnt))
749 status = 1; 781 status = 1;
750 782
751 DPRINTK("returning %d", status); 783 DPRINTK("returning %d", status);
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index be78e9378c03..142ab6aa2aa1 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -3,7 +3,7 @@
3 * linux/fs/autofs/waitq.c 3 * linux/fs/autofs/waitq.c
4 * 4 *
5 * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved 5 * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
6 * Copyright 2001-2003 Ian Kent <raven@themaw.net> 6 * Copyright 2001-2006 Ian Kent <raven@themaw.net>
7 * 7 *
8 * This file is part of the Linux kernel and is made available under 8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your 9 * the terms of the GNU General Public License, version 2, or at your
@@ -33,7 +33,7 @@ void autofs4_catatonic_mode(struct autofs_sb_info *sbi)
33 sbi->catatonic = 1; 33 sbi->catatonic = 1;
34 wq = sbi->queues; 34 wq = sbi->queues;
35 sbi->queues = NULL; /* Erase all wait queues */ 35 sbi->queues = NULL; /* Erase all wait queues */
36 while ( wq ) { 36 while (wq) {
37 nwq = wq->next; 37 nwq = wq->next;
38 wq->status = -ENOENT; /* Magic is gone - report failure */ 38 wq->status = -ENOENT; /* Magic is gone - report failure */
39 kfree(wq->name); 39 kfree(wq->name);
@@ -45,7 +45,6 @@ void autofs4_catatonic_mode(struct autofs_sb_info *sbi)
45 fput(sbi->pipe); /* Close the pipe */ 45 fput(sbi->pipe); /* Close the pipe */
46 sbi->pipe = NULL; 46 sbi->pipe = NULL;
47 } 47 }
48
49 shrink_dcache_sb(sbi->sb); 48 shrink_dcache_sb(sbi->sb);
50} 49}
51 50
@@ -98,7 +97,10 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
98 97
99 pkt.hdr.proto_version = sbi->version; 98 pkt.hdr.proto_version = sbi->version;
100 pkt.hdr.type = type; 99 pkt.hdr.type = type;
101 if (type == autofs_ptype_missing) { 100 switch (type) {
101 /* Kernel protocol v4 missing and expire packets */
102 case autofs_ptype_missing:
103 {
102 struct autofs_packet_missing *mp = &pkt.missing; 104 struct autofs_packet_missing *mp = &pkt.missing;
103 105
104 pktsz = sizeof(*mp); 106 pktsz = sizeof(*mp);
@@ -107,7 +109,10 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
107 mp->len = wq->len; 109 mp->len = wq->len;
108 memcpy(mp->name, wq->name, wq->len); 110 memcpy(mp->name, wq->name, wq->len);
109 mp->name[wq->len] = '\0'; 111 mp->name[wq->len] = '\0';
110 } else if (type == autofs_ptype_expire_multi) { 112 break;
113 }
114 case autofs_ptype_expire_multi:
115 {
111 struct autofs_packet_expire_multi *ep = &pkt.expire_multi; 116 struct autofs_packet_expire_multi *ep = &pkt.expire_multi;
112 117
113 pktsz = sizeof(*ep); 118 pktsz = sizeof(*ep);
@@ -116,7 +121,34 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
116 ep->len = wq->len; 121 ep->len = wq->len;
117 memcpy(ep->name, wq->name, wq->len); 122 memcpy(ep->name, wq->name, wq->len);
118 ep->name[wq->len] = '\0'; 123 ep->name[wq->len] = '\0';
119 } else { 124 break;
125 }
126 /*
127 * Kernel protocol v5 packet for handling indirect and direct
128 * mount missing and expire requests
129 */
130 case autofs_ptype_missing_indirect:
131 case autofs_ptype_expire_indirect:
132 case autofs_ptype_missing_direct:
133 case autofs_ptype_expire_direct:
134 {
135 struct autofs_v5_packet *packet = &pkt.v5_packet;
136
137 pktsz = sizeof(*packet);
138
139 packet->wait_queue_token = wq->wait_queue_token;
140 packet->len = wq->len;
141 memcpy(packet->name, wq->name, wq->len);
142 packet->name[wq->len] = '\0';
143 packet->dev = wq->dev;
144 packet->ino = wq->ino;
145 packet->uid = wq->uid;
146 packet->gid = wq->gid;
147 packet->pid = wq->pid;
148 packet->tgid = wq->tgid;
149 break;
150 }
151 default:
120 printk("autofs4_notify_daemon: bad type %d!\n", type); 152 printk("autofs4_notify_daemon: bad type %d!\n", type);
121 return; 153 return;
122 } 154 }
@@ -162,21 +194,29 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
162{ 194{
163 struct autofs_wait_queue *wq; 195 struct autofs_wait_queue *wq;
164 char *name; 196 char *name;
165 int len, status; 197 unsigned int len = 0;
198 unsigned int hash = 0;
199 int status;
166 200
167 /* In catatonic mode, we don't wait for nobody */ 201 /* In catatonic mode, we don't wait for nobody */
168 if ( sbi->catatonic ) 202 if (sbi->catatonic)
169 return -ENOENT; 203 return -ENOENT;
170 204
171 name = kmalloc(NAME_MAX + 1, GFP_KERNEL); 205 name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
172 if (!name) 206 if (!name)
173 return -ENOMEM; 207 return -ENOMEM;
174 208
175 len = autofs4_getpath(sbi, dentry, &name); 209 /* If this is a direct mount request create a dummy name */
176 if (!len) { 210 if (IS_ROOT(dentry) && (sbi->type & AUTOFS_TYPE_DIRECT))
177 kfree(name); 211 len = sprintf(name, "%p", dentry);
178 return -ENOENT; 212 else {
213 len = autofs4_getpath(sbi, dentry, &name);
214 if (!len) {
215 kfree(name);
216 return -ENOENT;
217 }
179 } 218 }
219 hash = full_name_hash(name, len);
180 220
181 if (mutex_lock_interruptible(&sbi->wq_mutex)) { 221 if (mutex_lock_interruptible(&sbi->wq_mutex)) {
182 kfree(name); 222 kfree(name);
@@ -190,7 +230,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
190 break; 230 break;
191 } 231 }
192 232
193 if ( !wq ) { 233 if (!wq) {
194 /* Can't wait for an expire if there's no mount */ 234 /* Can't wait for an expire if there's no mount */
195 if (notify == NFY_NONE && !d_mountpoint(dentry)) { 235 if (notify == NFY_NONE && !d_mountpoint(dentry)) {
196 kfree(name); 236 kfree(name);
@@ -200,7 +240,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
200 240
201 /* Create a new wait queue */ 241 /* Create a new wait queue */
202 wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); 242 wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL);
203 if ( !wq ) { 243 if (!wq) {
204 kfree(name); 244 kfree(name);
205 mutex_unlock(&sbi->wq_mutex); 245 mutex_unlock(&sbi->wq_mutex);
206 return -ENOMEM; 246 return -ENOMEM;
@@ -212,12 +252,18 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
212 wq->next = sbi->queues; 252 wq->next = sbi->queues;
213 sbi->queues = wq; 253 sbi->queues = wq;
214 init_waitqueue_head(&wq->queue); 254 init_waitqueue_head(&wq->queue);
215 wq->hash = dentry->d_name.hash; 255 wq->hash = hash;
216 wq->name = name; 256 wq->name = name;
217 wq->len = len; 257 wq->len = len;
258 wq->dev = autofs4_get_dev(sbi);
259 wq->ino = autofs4_get_ino(sbi);
260 wq->uid = current->uid;
261 wq->gid = current->gid;
262 wq->pid = current->pid;
263 wq->tgid = current->tgid;
218 wq->status = -EINTR; /* Status return if interrupted */ 264 wq->status = -EINTR; /* Status return if interrupted */
219 atomic_set(&wq->wait_ctr, 2); 265 atomic_set(&wq->wait_ctr, 2);
220 atomic_set(&wq->notified, 1); 266 atomic_set(&wq->notify, 1);
221 mutex_unlock(&sbi->wq_mutex); 267 mutex_unlock(&sbi->wq_mutex);
222 } else { 268 } else {
223 atomic_inc(&wq->wait_ctr); 269 atomic_inc(&wq->wait_ctr);
@@ -227,9 +273,26 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
227 (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); 273 (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify);
228 } 274 }
229 275
230 if (notify != NFY_NONE && atomic_dec_and_test(&wq->notified)) { 276 if (notify != NFY_NONE && atomic_read(&wq->notify)) {
231 int type = (notify == NFY_MOUNT ? 277 int type;
232 autofs_ptype_missing : autofs_ptype_expire_multi); 278
279 atomic_dec(&wq->notify);
280
281 if (sbi->version < 5) {
282 if (notify == NFY_MOUNT)
283 type = autofs_ptype_missing;
284 else
285 type = autofs_ptype_expire_multi;
286 } else {
287 if (notify == NFY_MOUNT)
288 type = (sbi->type & AUTOFS_TYPE_DIRECT) ?
289 autofs_ptype_missing_direct :
290 autofs_ptype_missing_indirect;
291 else
292 type = (sbi->type & AUTOFS_TYPE_DIRECT) ?
293 autofs_ptype_expire_direct :
294 autofs_ptype_expire_indirect;
295 }
233 296
234 DPRINTK("new wait id = 0x%08lx, name = %.*s, nfy=%d\n", 297 DPRINTK("new wait id = 0x%08lx, name = %.*s, nfy=%d\n",
235 (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); 298 (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify);
@@ -240,14 +303,14 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
240 303
241 /* wq->name is NULL if and only if the lock is already released */ 304 /* wq->name is NULL if and only if the lock is already released */
242 305
243 if ( sbi->catatonic ) { 306 if (sbi->catatonic) {
244 /* We might have slept, so check again for catatonic mode */ 307 /* We might have slept, so check again for catatonic mode */
245 wq->status = -ENOENT; 308 wq->status = -ENOENT;
246 kfree(wq->name); 309 kfree(wq->name);
247 wq->name = NULL; 310 wq->name = NULL;
248 } 311 }
249 312
250 if ( wq->name ) { 313 if (wq->name) {
251 /* Block all but "shutdown" signals while waiting */ 314 /* Block all but "shutdown" signals while waiting */
252 sigset_t oldset; 315 sigset_t oldset;
253 unsigned long irqflags; 316 unsigned long irqflags;
@@ -283,12 +346,12 @@ int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_tok
283 struct autofs_wait_queue *wq, **wql; 346 struct autofs_wait_queue *wq, **wql;
284 347
285 mutex_lock(&sbi->wq_mutex); 348 mutex_lock(&sbi->wq_mutex);
286 for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) { 349 for (wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next) {
287 if ( wq->wait_queue_token == wait_queue_token ) 350 if (wq->wait_queue_token == wait_queue_token)
288 break; 351 break;
289 } 352 }
290 353
291 if ( !wq ) { 354 if (!wq) {
292 mutex_unlock(&sbi->wq_mutex); 355 mutex_unlock(&sbi->wq_mutex);
293 return -EINVAL; 356 return -EINVAL;
294 } 357 }
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index e172180a1d8c..80599ae33966 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -22,7 +22,7 @@ static int return_EIO(void)
22 22
23#define EIO_ERROR ((void *) (return_EIO)) 23#define EIO_ERROR ((void *) (return_EIO))
24 24
25static struct file_operations bad_file_ops = 25static const struct file_operations bad_file_ops =
26{ 26{
27 .llseek = EIO_ERROR, 27 .llseek = EIO_ERROR,
28 .aio_read = EIO_ERROR, 28 .aio_read = EIO_ERROR,
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 044a59587829..68ebd10f345d 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -64,7 +64,7 @@ static const struct super_operations befs_sops = {
64/* slab cache for befs_inode_info objects */ 64/* slab cache for befs_inode_info objects */
65static kmem_cache_t *befs_inode_cachep; 65static kmem_cache_t *befs_inode_cachep;
66 66
67static struct file_operations befs_dir_operations = { 67static const struct file_operations befs_dir_operations = {
68 .read = generic_read_dir, 68 .read = generic_read_dir,
69 .readdir = befs_readdir, 69 .readdir = befs_readdir,
70}; 70};
diff --git a/fs/bfs/bfs.h b/fs/bfs/bfs.h
index 1fbc53f14aba..9d791004b21c 100644
--- a/fs/bfs/bfs.h
+++ b/fs/bfs/bfs.h
@@ -49,11 +49,11 @@ static inline struct bfs_inode_info *BFS_I(struct inode *inode)
49 49
50/* file.c */ 50/* file.c */
51extern struct inode_operations bfs_file_inops; 51extern struct inode_operations bfs_file_inops;
52extern struct file_operations bfs_file_operations; 52extern const struct file_operations bfs_file_operations;
53extern struct address_space_operations bfs_aops; 53extern struct address_space_operations bfs_aops;
54 54
55/* dir.c */ 55/* dir.c */
56extern struct inode_operations bfs_dir_inops; 56extern struct inode_operations bfs_dir_inops;
57extern struct file_operations bfs_dir_operations; 57extern const struct file_operations bfs_dir_operations;
58 58
59#endif /* _FS_BFS_BFS_H */ 59#endif /* _FS_BFS_BFS_H */
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 5af928fa0449..26fad9621738 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -70,7 +70,7 @@ static int bfs_readdir(struct file * f, void * dirent, filldir_t filldir)
70 return 0; 70 return 0;
71} 71}
72 72
73struct file_operations bfs_dir_operations = { 73const struct file_operations bfs_dir_operations = {
74 .read = generic_read_dir, 74 .read = generic_read_dir,
75 .readdir = bfs_readdir, 75 .readdir = bfs_readdir,
76 .fsync = file_fsync, 76 .fsync = file_fsync,
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index 807723b65daf..d83cd74a2e4e 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -17,7 +17,7 @@
17#define dprintf(x...) 17#define dprintf(x...)
18#endif 18#endif
19 19
20struct file_operations bfs_file_operations = { 20const struct file_operations bfs_file_operations = {
21 .llseek = generic_file_llseek, 21 .llseek = generic_file_llseek,
22 .read = generic_file_read, 22 .read = generic_file_read,
23 .write = generic_file_write, 23 .write = generic_file_write,
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 6a7b730c206b..d73d75591a39 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -600,7 +600,7 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
600 return count; 600 return count;
601} 601}
602 602
603static struct file_operations bm_entry_operations = { 603static const struct file_operations bm_entry_operations = {
604 .read = bm_entry_read, 604 .read = bm_entry_read,
605 .write = bm_entry_write, 605 .write = bm_entry_write,
606}; 606};
@@ -668,7 +668,7 @@ out:
668 return count; 668 return count;
669} 669}
670 670
671static struct file_operations bm_register_operations = { 671static const struct file_operations bm_register_operations = {
672 .write = bm_register_write, 672 .write = bm_register_write,
673}; 673};
674 674
@@ -715,7 +715,7 @@ static ssize_t bm_status_write(struct file * file, const char __user * buffer,
715 return count; 715 return count;
716} 716}
717 717
718static struct file_operations bm_status_operations = { 718static const struct file_operations bm_status_operations = {
719 .read = bm_status_read, 719 .read = bm_status_read,
720 .write = bm_status_write, 720 .write = bm_status_write,
721}; 721};
diff --git a/fs/bio.c b/fs/bio.c
index 73e664c01d30..eb8fbc53f2cd 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -30,7 +30,7 @@
30 30
31#define BIO_POOL_SIZE 256 31#define BIO_POOL_SIZE 256
32 32
33static kmem_cache_t *bio_slab; 33static kmem_cache_t *bio_slab __read_mostly;
34 34
35#define BIOVEC_NR_POOLS 6 35#define BIOVEC_NR_POOLS 6
36 36
@@ -39,7 +39,7 @@ static kmem_cache_t *bio_slab;
39 * basically we just need to survive 39 * basically we just need to survive
40 */ 40 */
41#define BIO_SPLIT_ENTRIES 8 41#define BIO_SPLIT_ENTRIES 8
42mempool_t *bio_split_pool; 42mempool_t *bio_split_pool __read_mostly;
43 43
44struct biovec_slab { 44struct biovec_slab {
45 int nr_vecs; 45 int nr_vecs;
@@ -1125,16 +1125,6 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
1125 return bp; 1125 return bp;
1126} 1126}
1127 1127
1128static void *bio_pair_alloc(gfp_t gfp_flags, void *data)
1129{
1130 return kmalloc(sizeof(struct bio_pair), gfp_flags);
1131}
1132
1133static void bio_pair_free(void *bp, void *data)
1134{
1135 kfree(bp);
1136}
1137
1138 1128
1139/* 1129/*
1140 * create memory pools for biovec's in a bio_set. 1130 * create memory pools for biovec's in a bio_set.
@@ -1151,8 +1141,7 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale)
1151 if (i >= scale) 1141 if (i >= scale)
1152 pool_entries >>= 1; 1142 pool_entries >>= 1;
1153 1143
1154 *bvp = mempool_create(pool_entries, mempool_alloc_slab, 1144 *bvp = mempool_create_slab_pool(pool_entries, bp->slab);
1155 mempool_free_slab, bp->slab);
1156 if (!*bvp) 1145 if (!*bvp)
1157 return -ENOMEM; 1146 return -ENOMEM;
1158 } 1147 }
@@ -1189,9 +1178,7 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale)
1189 if (!bs) 1178 if (!bs)
1190 return NULL; 1179 return NULL;
1191 1180
1192 bs->bio_pool = mempool_create(bio_pool_size, mempool_alloc_slab, 1181 bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab);
1193 mempool_free_slab, bio_slab);
1194
1195 if (!bs->bio_pool) 1182 if (!bs->bio_pool)
1196 goto bad; 1183 goto bad;
1197 1184
@@ -1254,8 +1241,8 @@ static int __init init_bio(void)
1254 if (!fs_bio_set) 1241 if (!fs_bio_set)
1255 panic("bio: can't allocate bios\n"); 1242 panic("bio: can't allocate bios\n");
1256 1243
1257 bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES, 1244 bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
1258 bio_pair_alloc, bio_pair_free, NULL); 1245 sizeof(struct bio_pair));
1259 if (!bio_split_pool) 1246 if (!bio_split_pool)
1260 panic("bio: can't create split pool\n"); 1247 panic("bio: can't create split pool\n");
1261 1248
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 573fc8e0b67a..af88c43043d5 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -131,9 +131,10 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
131 131
132static int 132static int
133blkdev_get_blocks(struct inode *inode, sector_t iblock, 133blkdev_get_blocks(struct inode *inode, sector_t iblock,
134 unsigned long max_blocks, struct buffer_head *bh, int create) 134 struct buffer_head *bh, int create)
135{ 135{
136 sector_t end_block = max_block(I_BDEV(inode)); 136 sector_t end_block = max_block(I_BDEV(inode));
137 unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
137 138
138 if ((iblock + max_blocks) > end_block) { 139 if ((iblock + max_blocks) > end_block) {
139 max_blocks = end_block - iblock; 140 max_blocks = end_block - iblock;
@@ -234,7 +235,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
234 */ 235 */
235 236
236static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); 237static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
237static kmem_cache_t * bdev_cachep; 238static kmem_cache_t * bdev_cachep __read_mostly;
238 239
239static struct inode *bdev_alloc_inode(struct super_block *sb) 240static struct inode *bdev_alloc_inode(struct super_block *sb)
240{ 241{
@@ -265,6 +266,9 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
265 mutex_init(&bdev->bd_mount_mutex); 266 mutex_init(&bdev->bd_mount_mutex);
266 INIT_LIST_HEAD(&bdev->bd_inodes); 267 INIT_LIST_HEAD(&bdev->bd_inodes);
267 INIT_LIST_HEAD(&bdev->bd_list); 268 INIT_LIST_HEAD(&bdev->bd_list);
269#ifdef CONFIG_SYSFS
270 INIT_LIST_HEAD(&bdev->bd_holder_list);
271#endif
268 inode_init_once(&ei->vfs_inode); 272 inode_init_once(&ei->vfs_inode);
269 } 273 }
270} 274}
@@ -308,7 +312,7 @@ static struct file_system_type bd_type = {
308 .kill_sb = kill_anon_super, 312 .kill_sb = kill_anon_super,
309}; 313};
310 314
311static struct vfsmount *bd_mnt; 315static struct vfsmount *bd_mnt __read_mostly;
312struct super_block *blockdev_superblock; 316struct super_block *blockdev_superblock;
313 317
314void __init bdev_cache_init(void) 318void __init bdev_cache_init(void)
@@ -489,6 +493,300 @@ void bd_release(struct block_device *bdev)
489 493
490EXPORT_SYMBOL(bd_release); 494EXPORT_SYMBOL(bd_release);
491 495
496#ifdef CONFIG_SYSFS
497/*
498 * Functions for bd_claim_by_kobject / bd_release_from_kobject
499 *
500 * If a kobject is passed to bd_claim_by_kobject()
501 * and the kobject has a parent directory,
502 * following symlinks are created:
503 * o from the kobject to the claimed bdev
504 * o from "holders" directory of the bdev to the parent of the kobject
505 * bd_release_from_kobject() removes these symlinks.
506 *
507 * Example:
508 * If /dev/dm-0 maps to /dev/sda, kobject corresponding to
509 * /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then:
510 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
511 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
512 */
513
514static struct kobject *bdev_get_kobj(struct block_device *bdev)
515{
516 if (bdev->bd_contains != bdev)
517 return kobject_get(&bdev->bd_part->kobj);
518 else
519 return kobject_get(&bdev->bd_disk->kobj);
520}
521
522static struct kobject *bdev_get_holder(struct block_device *bdev)
523{
524 if (bdev->bd_contains != bdev)
525 return kobject_get(bdev->bd_part->holder_dir);
526 else
527 return kobject_get(bdev->bd_disk->holder_dir);
528}
529
530static void add_symlink(struct kobject *from, struct kobject *to)
531{
532 if (!from || !to)
533 return;
534 sysfs_create_link(from, to, kobject_name(to));
535}
536
537static void del_symlink(struct kobject *from, struct kobject *to)
538{
539 if (!from || !to)
540 return;
541 sysfs_remove_link(from, kobject_name(to));
542}
543
544/*
545 * 'struct bd_holder' contains pointers to kobjects symlinked by
546 * bd_claim_by_kobject.
547 * It's connected to bd_holder_list which is protected by bdev->bd_sem.
548 */
549struct bd_holder {
550 struct list_head list; /* chain of holders of the bdev */
551 int count; /* references from the holder */
552 struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */
553 struct kobject *hdev; /* e.g. "/block/dm-0" */
554 struct kobject *hdir; /* e.g. "/block/sda/holders" */
555 struct kobject *sdev; /* e.g. "/block/sda" */
556};
557
558/*
559 * Get references of related kobjects at once.
560 * Returns 1 on success. 0 on failure.
561 *
562 * Should call bd_holder_release_dirs() after successful use.
563 */
564static int bd_holder_grab_dirs(struct block_device *bdev,
565 struct bd_holder *bo)
566{
567 if (!bdev || !bo)
568 return 0;
569
570 bo->sdir = kobject_get(bo->sdir);
571 if (!bo->sdir)
572 return 0;
573
574 bo->hdev = kobject_get(bo->sdir->parent);
575 if (!bo->hdev)
576 goto fail_put_sdir;
577
578 bo->sdev = bdev_get_kobj(bdev);
579 if (!bo->sdev)
580 goto fail_put_hdev;
581
582 bo->hdir = bdev_get_holder(bdev);
583 if (!bo->hdir)
584 goto fail_put_sdev;
585
586 return 1;
587
588fail_put_sdev:
589 kobject_put(bo->sdev);
590fail_put_hdev:
591 kobject_put(bo->hdev);
592fail_put_sdir:
593 kobject_put(bo->sdir);
594
595 return 0;
596}
597
598/* Put references of related kobjects at once. */
599static void bd_holder_release_dirs(struct bd_holder *bo)
600{
601 kobject_put(bo->hdir);
602 kobject_put(bo->sdev);
603 kobject_put(bo->hdev);
604 kobject_put(bo->sdir);
605}
606
607static struct bd_holder *alloc_bd_holder(struct kobject *kobj)
608{
609 struct bd_holder *bo;
610
611 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
612 if (!bo)
613 return NULL;
614
615 bo->count = 1;
616 bo->sdir = kobj;
617
618 return bo;
619}
620
621static void free_bd_holder(struct bd_holder *bo)
622{
623 kfree(bo);
624}
625
626/**
627 * add_bd_holder - create sysfs symlinks for bd_claim() relationship
628 *
629 * @bdev: block device to be bd_claimed
630 * @bo: preallocated and initialized by alloc_bd_holder()
631 *
632 * If there is no matching entry with @bo in @bdev->bd_holder_list,
633 * add @bo to the list, create symlinks.
634 *
635 * Returns 1 if @bo was added to the list.
636 * Returns 0 if @bo wasn't used by any reason and should be freed.
637 */
638static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo)
639{
640 struct bd_holder *tmp;
641
642 if (!bo)
643 return 0;
644
645 list_for_each_entry(tmp, &bdev->bd_holder_list, list) {
646 if (tmp->sdir == bo->sdir) {
647 tmp->count++;
648 return 0;
649 }
650 }
651
652 if (!bd_holder_grab_dirs(bdev, bo))
653 return 0;
654
655 add_symlink(bo->sdir, bo->sdev);
656 add_symlink(bo->hdir, bo->hdev);
657 list_add_tail(&bo->list, &bdev->bd_holder_list);
658 return 1;
659}
660
661/**
662 * del_bd_holder - delete sysfs symlinks for bd_claim() relationship
663 *
664 * @bdev: block device to be bd_claimed
665 * @kobj: holder's kobject
666 *
667 * If there is matching entry with @kobj in @bdev->bd_holder_list
668 * and no other bd_claim() from the same kobject,
669 * remove the struct bd_holder from the list, delete symlinks for it.
670 *
671 * Returns a pointer to the struct bd_holder when it's removed from the list
672 * and ready to be freed.
673 * Returns NULL if matching claim isn't found or there is other bd_claim()
674 * by the same kobject.
675 */
676static struct bd_holder *del_bd_holder(struct block_device *bdev,
677 struct kobject *kobj)
678{
679 struct bd_holder *bo;
680
681 list_for_each_entry(bo, &bdev->bd_holder_list, list) {
682 if (bo->sdir == kobj) {
683 bo->count--;
684 BUG_ON(bo->count < 0);
685 if (!bo->count) {
686 list_del(&bo->list);
687 del_symlink(bo->sdir, bo->sdev);
688 del_symlink(bo->hdir, bo->hdev);
689 bd_holder_release_dirs(bo);
690 return bo;
691 }
692 break;
693 }
694 }
695
696 return NULL;
697}
698
699/**
700 * bd_claim_by_kobject - bd_claim() with additional kobject signature
701 *
702 * @bdev: block device to be claimed
703 * @holder: holder's signature
704 * @kobj: holder's kobject
705 *
706 * Do bd_claim() and if it succeeds, create sysfs symlinks between
707 * the bdev and the holder's kobject.
708 * Use bd_release_from_kobject() when relesing the claimed bdev.
709 *
710 * Returns 0 on success. (same as bd_claim())
711 * Returns errno on failure.
712 */
713static int bd_claim_by_kobject(struct block_device *bdev, void *holder,
714 struct kobject *kobj)
715{
716 int res;
717 struct bd_holder *bo;
718
719 if (!kobj)
720 return -EINVAL;
721
722 bo = alloc_bd_holder(kobj);
723 if (!bo)
724 return -ENOMEM;
725
726 mutex_lock(&bdev->bd_mutex);
727 res = bd_claim(bdev, holder);
728 if (res || !add_bd_holder(bdev, bo))
729 free_bd_holder(bo);
730 mutex_unlock(&bdev->bd_mutex);
731
732 return res;
733}
734
735/**
736 * bd_release_from_kobject - bd_release() with additional kobject signature
737 *
738 * @bdev: block device to be released
739 * @kobj: holder's kobject
740 *
741 * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject().
742 */
743static void bd_release_from_kobject(struct block_device *bdev,
744 struct kobject *kobj)
745{
746 struct bd_holder *bo;
747
748 if (!kobj)
749 return;
750
751 mutex_lock(&bdev->bd_mutex);
752 bd_release(bdev);
753 if ((bo = del_bd_holder(bdev, kobj)))
754 free_bd_holder(bo);
755 mutex_unlock(&bdev->bd_mutex);
756}
757
758/**
759 * bd_claim_by_disk - wrapper function for bd_claim_by_kobject()
760 *
761 * @bdev: block device to be claimed
762 * @holder: holder's signature
763 * @disk: holder's gendisk
764 *
765 * Call bd_claim_by_kobject() with getting @disk->slave_dir.
766 */
767int bd_claim_by_disk(struct block_device *bdev, void *holder,
768 struct gendisk *disk)
769{
770 return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir));
771}
772EXPORT_SYMBOL_GPL(bd_claim_by_disk);
773
774/**
775 * bd_release_from_disk - wrapper function for bd_release_from_kobject()
776 *
777 * @bdev: block device to be claimed
778 * @disk: holder's gendisk
779 *
780 * Call bd_release_from_kobject() and put @disk->slave_dir.
781 */
782void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk)
783{
784 bd_release_from_kobject(bdev, disk->slave_dir);
785 kobject_put(disk->slave_dir);
786}
787EXPORT_SYMBOL_GPL(bd_release_from_disk);
788#endif
789
492/* 790/*
493 * Tries to open block device by device number. Use it ONLY if you 791 * Tries to open block device by device number. Use it ONLY if you
494 * really do not have anything better - i.e. when you are behind a 792 * really do not have anything better - i.e. when you are behind a
@@ -789,7 +1087,7 @@ struct address_space_operations def_blk_aops = {
789 .direct_IO = blkdev_direct_IO, 1087 .direct_IO = blkdev_direct_IO,
790}; 1088};
791 1089
792struct file_operations def_blk_fops = { 1090const struct file_operations def_blk_fops = {
793 .open = blkdev_open, 1091 .open = blkdev_open,
794 .release = blkdev_close, 1092 .release = blkdev_close,
795 .llseek = block_llseek, 1093 .llseek = block_llseek,
diff --git a/fs/buffer.c b/fs/buffer.c
index 3b3ab5281920..23f1f3a68077 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -426,8 +426,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
426 if (all_mapped) { 426 if (all_mapped) {
427 printk("__find_get_block_slow() failed. " 427 printk("__find_get_block_slow() failed. "
428 "block=%llu, b_blocknr=%llu\n", 428 "block=%llu, b_blocknr=%llu\n",
429 (unsigned long long)block, (unsigned long long)bh->b_blocknr); 429 (unsigned long long)block,
430 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size); 430 (unsigned long long)bh->b_blocknr);
431 printk("b_state=0x%08lx, b_size=%zu\n",
432 bh->b_state, bh->b_size);
431 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits); 433 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
432 } 434 }
433out_unlock: 435out_unlock:
@@ -491,7 +493,7 @@ static void free_more_memory(void)
491 wakeup_pdflush(1024); 493 wakeup_pdflush(1024);
492 yield(); 494 yield();
493 495
494 for_each_pgdat(pgdat) { 496 for_each_online_pgdat(pgdat) {
495 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; 497 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
496 if (*zones) 498 if (*zones)
497 try_to_free_pages(zones, GFP_NOFS); 499 try_to_free_pages(zones, GFP_NOFS);
@@ -796,8 +798,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
796 if (!mapping->assoc_mapping) { 798 if (!mapping->assoc_mapping) {
797 mapping->assoc_mapping = buffer_mapping; 799 mapping->assoc_mapping = buffer_mapping;
798 } else { 800 } else {
799 if (mapping->assoc_mapping != buffer_mapping) 801 BUG_ON(mapping->assoc_mapping != buffer_mapping);
800 BUG();
801 } 802 }
802 if (list_empty(&bh->b_assoc_buffers)) { 803 if (list_empty(&bh->b_assoc_buffers)) {
803 spin_lock(&buffer_mapping->private_lock); 804 spin_lock(&buffer_mapping->private_lock);
@@ -1114,8 +1115,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
1114 if (!page) 1115 if (!page)
1115 return NULL; 1116 return NULL;
1116 1117
1117 if (!PageLocked(page)) 1118 BUG_ON(!PageLocked(page));
1118 BUG();
1119 1119
1120 if (page_has_buffers(page)) { 1120 if (page_has_buffers(page)) {
1121 bh = page_buffers(page); 1121 bh = page_buffers(page);
@@ -1522,8 +1522,7 @@ void set_bh_page(struct buffer_head *bh,
1522 struct page *page, unsigned long offset) 1522 struct page *page, unsigned long offset)
1523{ 1523{
1524 bh->b_page = page; 1524 bh->b_page = page;
1525 if (offset >= PAGE_SIZE) 1525 BUG_ON(offset >= PAGE_SIZE);
1526 BUG();
1527 if (PageHighMem(page)) 1526 if (PageHighMem(page))
1528 /* 1527 /*
1529 * This catches illegal uses and preserves the offset: 1528 * This catches illegal uses and preserves the offset:
@@ -1593,11 +1592,10 @@ EXPORT_SYMBOL(try_to_release_page);
1593 * point. Because the caller is about to free (and possibly reuse) those 1592 * point. Because the caller is about to free (and possibly reuse) those
1594 * blocks on-disk. 1593 * blocks on-disk.
1595 */ 1594 */
1596int block_invalidatepage(struct page *page, unsigned long offset) 1595void block_invalidatepage(struct page *page, unsigned long offset)
1597{ 1596{
1598 struct buffer_head *head, *bh, *next; 1597 struct buffer_head *head, *bh, *next;
1599 unsigned int curr_off = 0; 1598 unsigned int curr_off = 0;
1600 int ret = 1;
1601 1599
1602 BUG_ON(!PageLocked(page)); 1600 BUG_ON(!PageLocked(page));
1603 if (!page_has_buffers(page)) 1601 if (!page_has_buffers(page))
@@ -1624,19 +1622,18 @@ int block_invalidatepage(struct page *page, unsigned long offset)
1624 * so real IO is not possible anymore. 1622 * so real IO is not possible anymore.
1625 */ 1623 */
1626 if (offset == 0) 1624 if (offset == 0)
1627 ret = try_to_release_page(page, 0); 1625 try_to_release_page(page, 0);
1628out: 1626out:
1629 return ret; 1627 return;
1630} 1628}
1631EXPORT_SYMBOL(block_invalidatepage); 1629EXPORT_SYMBOL(block_invalidatepage);
1632 1630
1633int do_invalidatepage(struct page *page, unsigned long offset) 1631void do_invalidatepage(struct page *page, unsigned long offset)
1634{ 1632{
1635 int (*invalidatepage)(struct page *, unsigned long); 1633 void (*invalidatepage)(struct page *, unsigned long);
1636 invalidatepage = page->mapping->a_ops->invalidatepage; 1634 invalidatepage = page->mapping->a_ops->invalidatepage ? :
1637 if (invalidatepage == NULL) 1635 block_invalidatepage;
1638 invalidatepage = block_invalidatepage; 1636 (*invalidatepage)(page, offset);
1639 return (*invalidatepage)(page, offset);
1640} 1637}
1641 1638
1642/* 1639/*
@@ -1738,6 +1735,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1738 sector_t block; 1735 sector_t block;
1739 sector_t last_block; 1736 sector_t last_block;
1740 struct buffer_head *bh, *head; 1737 struct buffer_head *bh, *head;
1738 const unsigned blocksize = 1 << inode->i_blkbits;
1741 int nr_underway = 0; 1739 int nr_underway = 0;
1742 1740
1743 BUG_ON(!PageLocked(page)); 1741 BUG_ON(!PageLocked(page));
@@ -1745,7 +1743,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1745 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; 1743 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1746 1744
1747 if (!page_has_buffers(page)) { 1745 if (!page_has_buffers(page)) {
1748 create_empty_buffers(page, 1 << inode->i_blkbits, 1746 create_empty_buffers(page, blocksize,
1749 (1 << BH_Dirty)|(1 << BH_Uptodate)); 1747 (1 << BH_Dirty)|(1 << BH_Uptodate));
1750 } 1748 }
1751 1749
@@ -1780,6 +1778,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1780 clear_buffer_dirty(bh); 1778 clear_buffer_dirty(bh);
1781 set_buffer_uptodate(bh); 1779 set_buffer_uptodate(bh);
1782 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { 1780 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1781 WARN_ON(bh->b_size != blocksize);
1783 err = get_block(inode, block, bh, 1); 1782 err = get_block(inode, block, bh, 1);
1784 if (err) 1783 if (err)
1785 goto recover; 1784 goto recover;
@@ -1933,6 +1932,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
1933 if (buffer_new(bh)) 1932 if (buffer_new(bh))
1934 clear_buffer_new(bh); 1933 clear_buffer_new(bh);
1935 if (!buffer_mapped(bh)) { 1934 if (!buffer_mapped(bh)) {
1935 WARN_ON(bh->b_size != blocksize);
1936 err = get_block(inode, block, bh, 1); 1936 err = get_block(inode, block, bh, 1);
1937 if (err) 1937 if (err)
1938 break; 1938 break;
@@ -2088,6 +2088,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
2088 2088
2089 fully_mapped = 0; 2089 fully_mapped = 0;
2090 if (iblock < lblock) { 2090 if (iblock < lblock) {
2091 WARN_ON(bh->b_size != blocksize);
2091 err = get_block(inode, iblock, bh, 0); 2092 err = get_block(inode, iblock, bh, 0);
2092 if (err) 2093 if (err)
2093 SetPageError(page); 2094 SetPageError(page);
@@ -2409,6 +2410,7 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2409 create = 1; 2410 create = 1;
2410 if (block_start >= to) 2411 if (block_start >= to)
2411 create = 0; 2412 create = 0;
2413 map_bh.b_size = blocksize;
2412 ret = get_block(inode, block_in_file + block_in_page, 2414 ret = get_block(inode, block_in_file + block_in_page,
2413 &map_bh, create); 2415 &map_bh, create);
2414 if (ret) 2416 if (ret)
@@ -2669,6 +2671,7 @@ int block_truncate_page(struct address_space *mapping,
2669 2671
2670 err = 0; 2672 err = 0;
2671 if (!buffer_mapped(bh)) { 2673 if (!buffer_mapped(bh)) {
2674 WARN_ON(bh->b_size != blocksize);
2672 err = get_block(inode, iblock, bh, 0); 2675 err = get_block(inode, iblock, bh, 0);
2673 if (err) 2676 if (err)
2674 goto unlock; 2677 goto unlock;
@@ -2755,6 +2758,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2755 struct inode *inode = mapping->host; 2758 struct inode *inode = mapping->host;
2756 tmp.b_state = 0; 2759 tmp.b_state = 0;
2757 tmp.b_blocknr = 0; 2760 tmp.b_blocknr = 0;
2761 tmp.b_size = 1 << inode->i_blkbits;
2758 get_block(inode, block, &tmp, 0); 2762 get_block(inode, block, &tmp, 0);
2759 return tmp.b_blocknr; 2763 return tmp.b_blocknr;
2760} 2764}
@@ -3007,7 +3011,7 @@ out:
3007} 3011}
3008EXPORT_SYMBOL(try_to_free_buffers); 3012EXPORT_SYMBOL(try_to_free_buffers);
3009 3013
3010int block_sync_page(struct page *page) 3014void block_sync_page(struct page *page)
3011{ 3015{
3012 struct address_space *mapping; 3016 struct address_space *mapping;
3013 3017
@@ -3015,7 +3019,6 @@ int block_sync_page(struct page *page)
3015 mapping = page_mapping(page); 3019 mapping = page_mapping(page);
3016 if (mapping) 3020 if (mapping)
3017 blk_run_backing_dev(mapping->backing_dev_info, page); 3021 blk_run_backing_dev(mapping->backing_dev_info, page);
3018 return 0;
3019} 3022}
3020 3023
3021/* 3024/*
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 8c6eb04d31e2..4e1b849f912f 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -250,7 +250,7 @@ int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
250} 250}
251 251
252int register_chrdev(unsigned int major, const char *name, 252int register_chrdev(unsigned int major, const char *name,
253 struct file_operations *fops) 253 const struct file_operations *fops)
254{ 254{
255 struct char_device_struct *cd; 255 struct char_device_struct *cd;
256 struct cdev *cdev; 256 struct cdev *cdev;
@@ -406,7 +406,7 @@ static void cdev_purge(struct cdev *cdev)
406 * is contain the open that then fills in the correct operations 406 * is contain the open that then fills in the correct operations
407 * depending on the special file... 407 * depending on the special file...
408 */ 408 */
409struct file_operations def_chr_fops = { 409const struct file_operations def_chr_fops = {
410 .open = chrdev_open, 410 .open = chrdev_open,
411}; 411};
412 412
@@ -473,7 +473,7 @@ struct cdev *cdev_alloc(void)
473 return p; 473 return p;
474} 474}
475 475
476void cdev_init(struct cdev *cdev, struct file_operations *fops) 476void cdev_init(struct cdev *cdev, const struct file_operations *fops)
477{ 477{
478 memset(cdev, 0, sizeof *cdev); 478 memset(cdev, 0, sizeof *cdev);
479 INIT_LIST_HEAD(&cdev->list); 479 INIT_LIST_HEAD(&cdev->list);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 221b3334b737..4bbc544857bc 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -583,7 +583,7 @@ struct inode_operations cifs_symlink_inode_ops = {
583#endif 583#endif
584}; 584};
585 585
586struct file_operations cifs_file_ops = { 586const struct file_operations cifs_file_ops = {
587 .read = do_sync_read, 587 .read = do_sync_read,
588 .write = do_sync_write, 588 .write = do_sync_write,
589 .readv = generic_file_readv, 589 .readv = generic_file_readv,
@@ -607,7 +607,7 @@ struct file_operations cifs_file_ops = {
607#endif /* CONFIG_CIFS_EXPERIMENTAL */ 607#endif /* CONFIG_CIFS_EXPERIMENTAL */
608}; 608};
609 609
610struct file_operations cifs_file_direct_ops = { 610const struct file_operations cifs_file_direct_ops = {
611 /* no mmap, no aio, no readv - 611 /* no mmap, no aio, no readv -
612 BB reevaluate whether they can be done with directio, no cache */ 612 BB reevaluate whether they can be done with directio, no cache */
613 .read = cifs_user_read, 613 .read = cifs_user_read,
@@ -626,7 +626,7 @@ struct file_operations cifs_file_direct_ops = {
626 .dir_notify = cifs_dir_notify, 626 .dir_notify = cifs_dir_notify,
627#endif /* CONFIG_CIFS_EXPERIMENTAL */ 627#endif /* CONFIG_CIFS_EXPERIMENTAL */
628}; 628};
629struct file_operations cifs_file_nobrl_ops = { 629const struct file_operations cifs_file_nobrl_ops = {
630 .read = do_sync_read, 630 .read = do_sync_read,
631 .write = do_sync_write, 631 .write = do_sync_write,
632 .readv = generic_file_readv, 632 .readv = generic_file_readv,
@@ -649,7 +649,7 @@ struct file_operations cifs_file_nobrl_ops = {
649#endif /* CONFIG_CIFS_EXPERIMENTAL */ 649#endif /* CONFIG_CIFS_EXPERIMENTAL */
650}; 650};
651 651
652struct file_operations cifs_file_direct_nobrl_ops = { 652const struct file_operations cifs_file_direct_nobrl_ops = {
653 /* no mmap, no aio, no readv - 653 /* no mmap, no aio, no readv -
654 BB reevaluate whether they can be done with directio, no cache */ 654 BB reevaluate whether they can be done with directio, no cache */
655 .read = cifs_user_read, 655 .read = cifs_user_read,
@@ -668,7 +668,7 @@ struct file_operations cifs_file_direct_nobrl_ops = {
668#endif /* CONFIG_CIFS_EXPERIMENTAL */ 668#endif /* CONFIG_CIFS_EXPERIMENTAL */
669}; 669};
670 670
671struct file_operations cifs_dir_ops = { 671const struct file_operations cifs_dir_ops = {
672 .readdir = cifs_readdir, 672 .readdir = cifs_readdir,
673 .release = cifs_closedir, 673 .release = cifs_closedir,
674 .read = generic_read_dir, 674 .read = generic_read_dir,
@@ -738,10 +738,8 @@ cifs_init_request_bufs(void)
738 cERROR(1,("cifs_min_rcv set to maximum (64)")); 738 cERROR(1,("cifs_min_rcv set to maximum (64)"));
739 } 739 }
740 740
741 cifs_req_poolp = mempool_create(cifs_min_rcv, 741 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
742 mempool_alloc_slab, 742 cifs_req_cachep);
743 mempool_free_slab,
744 cifs_req_cachep);
745 743
746 if(cifs_req_poolp == NULL) { 744 if(cifs_req_poolp == NULL) {
747 kmem_cache_destroy(cifs_req_cachep); 745 kmem_cache_destroy(cifs_req_cachep);
@@ -771,10 +769,8 @@ cifs_init_request_bufs(void)
771 cFYI(1,("cifs_min_small set to maximum (256)")); 769 cFYI(1,("cifs_min_small set to maximum (256)"));
772 } 770 }
773 771
774 cifs_sm_req_poolp = mempool_create(cifs_min_small, 772 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
775 mempool_alloc_slab, 773 cifs_sm_req_cachep);
776 mempool_free_slab,
777 cifs_sm_req_cachep);
778 774
779 if(cifs_sm_req_poolp == NULL) { 775 if(cifs_sm_req_poolp == NULL) {
780 mempool_destroy(cifs_req_poolp); 776 mempool_destroy(cifs_req_poolp);
@@ -808,10 +804,8 @@ cifs_init_mids(void)
808 if (cifs_mid_cachep == NULL) 804 if (cifs_mid_cachep == NULL)
809 return -ENOMEM; 805 return -ENOMEM;
810 806
811 cifs_mid_poolp = mempool_create(3 /* a reasonable min simultan opers */, 807 /* 3 is a reasonable minimum number of simultaneous operations */
812 mempool_alloc_slab, 808 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
813 mempool_free_slab,
814 cifs_mid_cachep);
815 if(cifs_mid_poolp == NULL) { 809 if(cifs_mid_poolp == NULL) {
816 kmem_cache_destroy(cifs_mid_cachep); 810 kmem_cache_destroy(cifs_mid_cachep);
817 return -ENOMEM; 811 return -ENOMEM;
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 821a8eb22559..74f405ae4da3 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -61,10 +61,10 @@ extern struct inode_operations cifs_file_inode_ops;
61extern struct inode_operations cifs_symlink_inode_ops; 61extern struct inode_operations cifs_symlink_inode_ops;
62 62
63/* Functions related to files and directories */ 63/* Functions related to files and directories */
64extern struct file_operations cifs_file_ops; 64extern const struct file_operations cifs_file_ops;
65extern struct file_operations cifs_file_direct_ops; /* if directio mount */ 65extern const struct file_operations cifs_file_direct_ops; /* if directio mount */
66extern struct file_operations cifs_file_nobrl_ops; 66extern const struct file_operations cifs_file_nobrl_ops;
67extern struct file_operations cifs_file_direct_nobrl_ops; /* if directio mount */ 67extern const struct file_operations cifs_file_direct_nobrl_ops; /* if directio mount */
68extern int cifs_open(struct inode *inode, struct file *file); 68extern int cifs_open(struct inode *inode, struct file *file);
69extern int cifs_close(struct inode *inode, struct file *file); 69extern int cifs_close(struct inode *inode, struct file *file);
70extern int cifs_closedir(struct inode *inode, struct file *file); 70extern int cifs_closedir(struct inode *inode, struct file *file);
@@ -76,7 +76,7 @@ extern int cifs_lock(struct file *, int, struct file_lock *);
76extern int cifs_fsync(struct file *, struct dentry *, int); 76extern int cifs_fsync(struct file *, struct dentry *, int);
77extern int cifs_flush(struct file *); 77extern int cifs_flush(struct file *);
78extern int cifs_file_mmap(struct file * , struct vm_area_struct *); 78extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
79extern struct file_operations cifs_dir_ops; 79extern const struct file_operations cifs_dir_ops;
80extern int cifs_dir_open(struct inode *inode, struct file *file); 80extern int cifs_dir_open(struct inode *inode, struct file *file);
81extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir); 81extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir);
82extern int cifs_dir_notify(struct file *, unsigned long arg); 82extern int cifs_dir_notify(struct file *, unsigned long arg);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 165d67426381..fb49aef1f2ec 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1339,7 +1339,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1339 return rc; 1339 return rc;
1340} 1340}
1341 1341
1342/* static int cifs_sync_page(struct page *page) 1342/* static void cifs_sync_page(struct page *page)
1343{ 1343{
1344 struct address_space *mapping; 1344 struct address_space *mapping;
1345 struct inode *inode; 1345 struct inode *inode;
@@ -1353,16 +1353,18 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1353 return 0; 1353 return 0;
1354 inode = mapping->host; 1354 inode = mapping->host;
1355 if (!inode) 1355 if (!inode)
1356 return 0; */ 1356 return; */
1357 1357
1358/* fill in rpages then 1358/* fill in rpages then
1359 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */ 1359 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1360 1360
1361/* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index)); 1361/* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index));
1362 1362
1363#if 0
1363 if (rc < 0) 1364 if (rc < 0)
1364 return rc; 1365 return rc;
1365 return 0; 1366 return 0;
1367#endif
1366} */ 1368} */
1367 1369
1368/* 1370/*
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index ff93a9f81d1c..598eec9778f6 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -163,9 +163,9 @@ int cifs_get_inode_info_unix(struct inode **pinode,
163 163
164 if (num_of_bytes < end_of_file) 164 if (num_of_bytes < end_of_file)
165 cFYI(1, ("allocation size less than end of file")); 165 cFYI(1, ("allocation size less than end of file"));
166 cFYI(1, 166 cFYI(1, ("Size %ld and blocks %llu",
167 ("Size %ld and blocks %ld", 167 (unsigned long) inode->i_size,
168 (unsigned long) inode->i_size, inode->i_blocks)); 168 (unsigned long long)inode->i_blocks));
169 if (S_ISREG(inode->i_mode)) { 169 if (S_ISREG(inode->i_mode)) {
170 cFYI(1, ("File inode")); 170 cFYI(1, ("File inode"));
171 inode->i_op = &cifs_file_inode_ops; 171 inode->i_op = &cifs_file_inode_ops;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index edb3b6eb34bc..488bd0d81dcf 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -197,10 +197,10 @@ static void fill_in_inode(struct inode *tmp_inode,
197 197
198 if (allocation_size < end_of_file) 198 if (allocation_size < end_of_file)
199 cFYI(1, ("May be sparse file, allocation less than file size")); 199 cFYI(1, ("May be sparse file, allocation less than file size"));
200 cFYI(1, 200 cFYI(1, ("File Size %ld and blocks %llu and blocksize %ld",
201 ("File Size %ld and blocks %ld and blocksize %ld", 201 (unsigned long)tmp_inode->i_size,
202 (unsigned long)tmp_inode->i_size, tmp_inode->i_blocks, 202 (unsigned long long)tmp_inode->i_blocks,
203 tmp_inode->i_blksize)); 203 tmp_inode->i_blksize));
204 if (S_ISREG(tmp_inode->i_mode)) { 204 if (S_ISREG(tmp_inode->i_mode)) {
205 cFYI(1, ("File inode")); 205 cFYI(1, ("File inode"));
206 tmp_inode->i_op = &cifs_file_inode_ops; 206 tmp_inode->i_op = &cifs_file_inode_ops;
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 54f76de8a686..71f2ea632e53 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -82,7 +82,7 @@ struct inode_operations coda_dir_inode_operations =
82 .setattr = coda_setattr, 82 .setattr = coda_setattr,
83}; 83};
84 84
85struct file_operations coda_dir_operations = { 85const struct file_operations coda_dir_operations = {
86 .llseek = generic_file_llseek, 86 .llseek = generic_file_llseek,
87 .read = generic_read_dir, 87 .read = generic_read_dir,
88 .readdir = coda_readdir, 88 .readdir = coda_readdir,
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 146a991d6eb5..7c2642431fa5 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -288,7 +288,7 @@ int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync)
288 return err; 288 return err;
289} 289}
290 290
291struct file_operations coda_file_operations = { 291const struct file_operations coda_file_operations = {
292 .llseek = generic_file_llseek, 292 .llseek = generic_file_llseek,
293 .read = coda_file_read, 293 .read = coda_file_read,
294 .write = coda_file_write, 294 .write = coda_file_write,
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 127714936c66..214822be87bd 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -36,7 +36,7 @@ struct inode_operations coda_ioctl_inode_operations =
36 .setattr = coda_setattr, 36 .setattr = coda_setattr,
37}; 37};
38 38
39struct file_operations coda_ioctl_operations = { 39const struct file_operations coda_ioctl_operations = {
40 .owner = THIS_MODULE, 40 .owner = THIS_MODULE,
41 .ioctl = coda_pioctl, 41 .ioctl = coda_pioctl,
42}; 42};
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index 98c74fe2e139..6c6771db36da 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -342,7 +342,7 @@ static int coda_psdev_release(struct inode * inode, struct file * file)
342} 342}
343 343
344 344
345static struct file_operations coda_psdev_fops = { 345static const struct file_operations coda_psdev_fops = {
346 .owner = THIS_MODULE, 346 .owner = THIS_MODULE,
347 .read = coda_psdev_read, 347 .read = coda_psdev_read,
348 .write = coda_psdev_write, 348 .write = coda_psdev_write,
diff --git a/fs/compat.c b/fs/compat.c
index ef5a0771592d..7f8e26ea427c 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1639,15 +1639,6 @@ void compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1639 * This is a virtual copy of sys_select from fs/select.c and probably 1639 * This is a virtual copy of sys_select from fs/select.c and probably
1640 * should be compared to it from time to time 1640 * should be compared to it from time to time
1641 */ 1641 */
1642static void *select_bits_alloc(int size)
1643{
1644 return kmalloc(6 * size, GFP_KERNEL);
1645}
1646
1647static void select_bits_free(void *bits, int size)
1648{
1649 kfree(bits);
1650}
1651 1642
1652/* 1643/*
1653 * We can actually return ERESTARTSYS instead of EINTR, but I'd 1644 * We can actually return ERESTARTSYS instead of EINTR, but I'd
@@ -1686,7 +1677,7 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
1686 */ 1677 */
1687 ret = -ENOMEM; 1678 ret = -ENOMEM;
1688 size = FDS_BYTES(n); 1679 size = FDS_BYTES(n);
1689 bits = select_bits_alloc(size); 1680 bits = kmalloc(6 * size, GFP_KERNEL);
1690 if (!bits) 1681 if (!bits)
1691 goto out_nofds; 1682 goto out_nofds;
1692 fds.in = (unsigned long *) bits; 1683 fds.in = (unsigned long *) bits;
@@ -1720,7 +1711,7 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
1720 compat_set_fd_set(n, exp, fds.res_ex); 1711 compat_set_fd_set(n, exp, fds.res_ex);
1721 1712
1722out: 1713out:
1723 select_bits_free(bits, size); 1714 kfree(bits);
1724out_nofds: 1715out_nofds:
1725 return ret; 1716 return ret;
1726} 1717}
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index f70e46951b37..3f4ff7a242b9 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -72,9 +72,9 @@ extern void configfs_release_fs(void);
72 72
73extern struct rw_semaphore configfs_rename_sem; 73extern struct rw_semaphore configfs_rename_sem;
74extern struct super_block * configfs_sb; 74extern struct super_block * configfs_sb;
75extern struct file_operations configfs_dir_operations; 75extern const struct file_operations configfs_dir_operations;
76extern struct file_operations configfs_file_operations; 76extern const struct file_operations configfs_file_operations;
77extern struct file_operations bin_fops; 77extern const struct file_operations bin_fops;
78extern struct inode_operations configfs_dir_inode_operations; 78extern struct inode_operations configfs_dir_inode_operations;
79extern struct inode_operations configfs_symlink_inode_operations; 79extern struct inode_operations configfs_symlink_inode_operations;
80 80
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index ca60e3abef45..8ed9b06a9828 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1027,7 +1027,7 @@ static loff_t configfs_dir_lseek(struct file * file, loff_t offset, int origin)
1027 return offset; 1027 return offset;
1028} 1028}
1029 1029
1030struct file_operations configfs_dir_operations = { 1030const struct file_operations configfs_dir_operations = {
1031 .open = configfs_dir_open, 1031 .open = configfs_dir_open,
1032 .release = configfs_dir_close, 1032 .release = configfs_dir_close,
1033 .llseek = configfs_dir_lseek, 1033 .llseek = configfs_dir_lseek,
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 3921920d8716..f499803743e0 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -322,7 +322,7 @@ static int configfs_release(struct inode * inode, struct file * filp)
322 return 0; 322 return 0;
323} 323}
324 324
325struct file_operations configfs_file_operations = { 325const struct file_operations configfs_file_operations = {
326 .read = configfs_read_file, 326 .read = configfs_read_file,
327 .write = configfs_write_file, 327 .write = configfs_write_file,
328 .llseek = generic_file_llseek, 328 .llseek = generic_file_llseek,
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 8ad52f5bf255..9efcc3a164e8 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -22,16 +22,17 @@
22#include <linux/cramfs_fs_sb.h> 22#include <linux/cramfs_fs_sb.h>
23#include <linux/buffer_head.h> 23#include <linux/buffer_head.h>
24#include <linux/vfs.h> 24#include <linux/vfs.h>
25#include <linux/mutex.h>
25#include <asm/semaphore.h> 26#include <asm/semaphore.h>
26 27
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
28 29
29static struct super_operations cramfs_ops; 30static struct super_operations cramfs_ops;
30static struct inode_operations cramfs_dir_inode_operations; 31static struct inode_operations cramfs_dir_inode_operations;
31static struct file_operations cramfs_directory_operations; 32static const struct file_operations cramfs_directory_operations;
32static struct address_space_operations cramfs_aops; 33static struct address_space_operations cramfs_aops;
33 34
34static DECLARE_MUTEX(read_mutex); 35static DEFINE_MUTEX(read_mutex);
35 36
36 37
37/* These two macros may change in future, to provide better st_ino 38/* These two macros may change in future, to provide better st_ino
@@ -250,20 +251,20 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
250 memset(sbi, 0, sizeof(struct cramfs_sb_info)); 251 memset(sbi, 0, sizeof(struct cramfs_sb_info));
251 252
252 /* Invalidate the read buffers on mount: think disk change.. */ 253 /* Invalidate the read buffers on mount: think disk change.. */
253 down(&read_mutex); 254 mutex_lock(&read_mutex);
254 for (i = 0; i < READ_BUFFERS; i++) 255 for (i = 0; i < READ_BUFFERS; i++)
255 buffer_blocknr[i] = -1; 256 buffer_blocknr[i] = -1;
256 257
257 /* Read the first block and get the superblock from it */ 258 /* Read the first block and get the superblock from it */
258 memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super)); 259 memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super));
259 up(&read_mutex); 260 mutex_unlock(&read_mutex);
260 261
261 /* Do sanity checks on the superblock */ 262 /* Do sanity checks on the superblock */
262 if (super.magic != CRAMFS_MAGIC) { 263 if (super.magic != CRAMFS_MAGIC) {
263 /* check at 512 byte offset */ 264 /* check at 512 byte offset */
264 down(&read_mutex); 265 mutex_lock(&read_mutex);
265 memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super)); 266 memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super));
266 up(&read_mutex); 267 mutex_unlock(&read_mutex);
267 if (super.magic != CRAMFS_MAGIC) { 268 if (super.magic != CRAMFS_MAGIC) {
268 if (!silent) 269 if (!silent)
269 printk(KERN_ERR "cramfs: wrong magic\n"); 270 printk(KERN_ERR "cramfs: wrong magic\n");
@@ -366,7 +367,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
366 mode_t mode; 367 mode_t mode;
367 int namelen, error; 368 int namelen, error;
368 369
369 down(&read_mutex); 370 mutex_lock(&read_mutex);
370 de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+256); 371 de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+256);
371 name = (char *)(de+1); 372 name = (char *)(de+1);
372 373
@@ -379,7 +380,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
379 memcpy(buf, name, namelen); 380 memcpy(buf, name, namelen);
380 ino = CRAMINO(de); 381 ino = CRAMINO(de);
381 mode = de->mode; 382 mode = de->mode;
382 up(&read_mutex); 383 mutex_unlock(&read_mutex);
383 nextoffset = offset + sizeof(*de) + namelen; 384 nextoffset = offset + sizeof(*de) + namelen;
384 for (;;) { 385 for (;;) {
385 if (!namelen) { 386 if (!namelen) {
@@ -410,7 +411,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
410 unsigned int offset = 0; 411 unsigned int offset = 0;
411 int sorted; 412 int sorted;
412 413
413 down(&read_mutex); 414 mutex_lock(&read_mutex);
414 sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS; 415 sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS;
415 while (offset < dir->i_size) { 416 while (offset < dir->i_size) {
416 struct cramfs_inode *de; 417 struct cramfs_inode *de;
@@ -433,7 +434,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
433 434
434 for (;;) { 435 for (;;) {
435 if (!namelen) { 436 if (!namelen) {
436 up(&read_mutex); 437 mutex_unlock(&read_mutex);
437 return ERR_PTR(-EIO); 438 return ERR_PTR(-EIO);
438 } 439 }
439 if (name[namelen-1]) 440 if (name[namelen-1])
@@ -447,7 +448,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
447 continue; 448 continue;
448 if (!retval) { 449 if (!retval) {
449 struct cramfs_inode entry = *de; 450 struct cramfs_inode entry = *de;
450 up(&read_mutex); 451 mutex_unlock(&read_mutex);
451 d_add(dentry, get_cramfs_inode(dir->i_sb, &entry)); 452 d_add(dentry, get_cramfs_inode(dir->i_sb, &entry));
452 return NULL; 453 return NULL;
453 } 454 }
@@ -455,7 +456,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
455 if (sorted) 456 if (sorted)
456 break; 457 break;
457 } 458 }
458 up(&read_mutex); 459 mutex_unlock(&read_mutex);
459 d_add(dentry, NULL); 460 d_add(dentry, NULL);
460 return NULL; 461 return NULL;
461} 462}
@@ -474,21 +475,21 @@ static int cramfs_readpage(struct file *file, struct page * page)
474 u32 start_offset, compr_len; 475 u32 start_offset, compr_len;
475 476
476 start_offset = OFFSET(inode) + maxblock*4; 477 start_offset = OFFSET(inode) + maxblock*4;
477 down(&read_mutex); 478 mutex_lock(&read_mutex);
478 if (page->index) 479 if (page->index)
479 start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4, 4); 480 start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4, 4);
480 compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) - start_offset); 481 compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) - start_offset);
481 up(&read_mutex); 482 mutex_unlock(&read_mutex);
482 pgdata = kmap(page); 483 pgdata = kmap(page);
483 if (compr_len == 0) 484 if (compr_len == 0)
484 ; /* hole */ 485 ; /* hole */
485 else { 486 else {
486 down(&read_mutex); 487 mutex_lock(&read_mutex);
487 bytes_filled = cramfs_uncompress_block(pgdata, 488 bytes_filled = cramfs_uncompress_block(pgdata,
488 PAGE_CACHE_SIZE, 489 PAGE_CACHE_SIZE,
489 cramfs_read(sb, start_offset, compr_len), 490 cramfs_read(sb, start_offset, compr_len),
490 compr_len); 491 compr_len);
491 up(&read_mutex); 492 mutex_unlock(&read_mutex);
492 } 493 }
493 } else 494 } else
494 pgdata = kmap(page); 495 pgdata = kmap(page);
@@ -511,7 +512,7 @@ static struct address_space_operations cramfs_aops = {
511/* 512/*
512 * A directory can only readdir 513 * A directory can only readdir
513 */ 514 */
514static struct file_operations cramfs_directory_operations = { 515static const struct file_operations cramfs_directory_operations = {
515 .llseek = generic_file_llseek, 516 .llseek = generic_file_llseek,
516 .read = generic_read_dir, 517 .read = generic_read_dir,
517 .readdir = cramfs_readdir, 518 .readdir = cramfs_readdir,
diff --git a/fs/dcache.c b/fs/dcache.c
index 939584648504..19458d399502 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -34,9 +34,8 @@
34#include <linux/swap.h> 34#include <linux/swap.h>
35#include <linux/bootmem.h> 35#include <linux/bootmem.h>
36 36
37/* #define DCACHE_DEBUG 1 */
38 37
39int sysctl_vfs_cache_pressure = 100; 38int sysctl_vfs_cache_pressure __read_mostly = 100;
40EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 39EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
41 40
42 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); 41 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
@@ -44,7 +43,7 @@ static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
44 43
45EXPORT_SYMBOL(dcache_lock); 44EXPORT_SYMBOL(dcache_lock);
46 45
47static kmem_cache_t *dentry_cache; 46static kmem_cache_t *dentry_cache __read_mostly;
48 47
49#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) 48#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
50 49
@@ -59,9 +58,9 @@ static kmem_cache_t *dentry_cache;
59#define D_HASHBITS d_hash_shift 58#define D_HASHBITS d_hash_shift
60#define D_HASHMASK d_hash_mask 59#define D_HASHMASK d_hash_mask
61 60
62static unsigned int d_hash_mask; 61static unsigned int d_hash_mask __read_mostly;
63static unsigned int d_hash_shift; 62static unsigned int d_hash_shift __read_mostly;
64static struct hlist_head *dentry_hashtable; 63static struct hlist_head *dentry_hashtable __read_mostly;
65static LIST_HEAD(dentry_unused); 64static LIST_HEAD(dentry_unused);
66 65
67/* Statistics gathering. */ 66/* Statistics gathering. */
@@ -603,10 +602,6 @@ resume:
603 */ 602 */
604 if (!list_empty(&dentry->d_subdirs)) { 603 if (!list_empty(&dentry->d_subdirs)) {
605 this_parent = dentry; 604 this_parent = dentry;
606#ifdef DCACHE_DEBUG
607printk(KERN_DEBUG "select_parent: descending to %s/%s, found=%d\n",
608dentry->d_parent->d_name.name, dentry->d_name.name, found);
609#endif
610 goto repeat; 605 goto repeat;
611 } 606 }
612 } 607 }
@@ -616,10 +611,6 @@ dentry->d_parent->d_name.name, dentry->d_name.name, found);
616 if (this_parent != parent) { 611 if (this_parent != parent) {
617 next = this_parent->d_u.d_child.next; 612 next = this_parent->d_u.d_child.next;
618 this_parent = this_parent->d_parent; 613 this_parent = this_parent->d_parent;
619#ifdef DCACHE_DEBUG
620printk(KERN_DEBUG "select_parent: ascending to %s/%s, found=%d\n",
621this_parent->d_parent->d_name.name, this_parent->d_name.name, found);
622#endif
623 goto resume; 614 goto resume;
624 } 615 }
625out: 616out:
@@ -798,7 +789,7 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name)
798 789
799void d_instantiate(struct dentry *entry, struct inode * inode) 790void d_instantiate(struct dentry *entry, struct inode * inode)
800{ 791{
801 if (!list_empty(&entry->d_alias)) BUG(); 792 BUG_ON(!list_empty(&entry->d_alias));
802 spin_lock(&dcache_lock); 793 spin_lock(&dcache_lock);
803 if (inode) 794 if (inode)
804 list_add(&entry->d_alias, &inode->i_dentry); 795 list_add(&entry->d_alias, &inode->i_dentry);
@@ -1719,10 +1710,10 @@ static void __init dcache_init(unsigned long mempages)
1719} 1710}
1720 1711
1721/* SLAB cache for __getname() consumers */ 1712/* SLAB cache for __getname() consumers */
1722kmem_cache_t *names_cachep; 1713kmem_cache_t *names_cachep __read_mostly;
1723 1714
1724/* SLAB cache for file structures */ 1715/* SLAB cache for file structures */
1725kmem_cache_t *filp_cachep; 1716kmem_cache_t *filp_cachep __read_mostly;
1726 1717
1727EXPORT_SYMBOL(d_genocide); 1718EXPORT_SYMBOL(d_genocide);
1728 1719
diff --git a/fs/dcookies.c b/fs/dcookies.c
index f8274a8f83bd..8749339bf4f6 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -23,6 +23,7 @@
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/dcookies.h> 25#include <linux/dcookies.h>
26#include <linux/mutex.h>
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
27 28
28/* The dcookies are allocated from a kmem_cache and 29/* The dcookies are allocated from a kmem_cache and
@@ -36,10 +37,10 @@ struct dcookie_struct {
36}; 37};
37 38
38static LIST_HEAD(dcookie_users); 39static LIST_HEAD(dcookie_users);
39static DECLARE_MUTEX(dcookie_sem); 40static DEFINE_MUTEX(dcookie_mutex);
40static kmem_cache_t * dcookie_cache; 41static kmem_cache_t *dcookie_cache __read_mostly;
41static struct list_head * dcookie_hashtable; 42static struct list_head *dcookie_hashtable __read_mostly;
42static size_t hash_size; 43static size_t hash_size __read_mostly;
43 44
44static inline int is_live(void) 45static inline int is_live(void)
45{ 46{
@@ -114,7 +115,7 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
114 int err = 0; 115 int err = 0;
115 struct dcookie_struct * dcs; 116 struct dcookie_struct * dcs;
116 117
117 down(&dcookie_sem); 118 mutex_lock(&dcookie_mutex);
118 119
119 if (!is_live()) { 120 if (!is_live()) {
120 err = -EINVAL; 121 err = -EINVAL;
@@ -134,7 +135,7 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
134 *cookie = dcookie_value(dcs); 135 *cookie = dcookie_value(dcs);
135 136
136out: 137out:
137 up(&dcookie_sem); 138 mutex_unlock(&dcookie_mutex);
138 return err; 139 return err;
139} 140}
140 141
@@ -157,7 +158,7 @@ asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len)
157 if (!capable(CAP_SYS_ADMIN)) 158 if (!capable(CAP_SYS_ADMIN))
158 return -EPERM; 159 return -EPERM;
159 160
160 down(&dcookie_sem); 161 mutex_lock(&dcookie_mutex);
161 162
162 if (!is_live()) { 163 if (!is_live()) {
163 err = -EINVAL; 164 err = -EINVAL;
@@ -192,7 +193,7 @@ asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len)
192out_free: 193out_free:
193 kfree(kbuf); 194 kfree(kbuf);
194out: 195out:
195 up(&dcookie_sem); 196 mutex_unlock(&dcookie_mutex);
196 return err; 197 return err;
197} 198}
198 199
@@ -290,7 +291,7 @@ struct dcookie_user * dcookie_register(void)
290{ 291{
291 struct dcookie_user * user; 292 struct dcookie_user * user;
292 293
293 down(&dcookie_sem); 294 mutex_lock(&dcookie_mutex);
294 295
295 user = kmalloc(sizeof(struct dcookie_user), GFP_KERNEL); 296 user = kmalloc(sizeof(struct dcookie_user), GFP_KERNEL);
296 if (!user) 297 if (!user)
@@ -302,7 +303,7 @@ struct dcookie_user * dcookie_register(void)
302 list_add(&user->next, &dcookie_users); 303 list_add(&user->next, &dcookie_users);
303 304
304out: 305out:
305 up(&dcookie_sem); 306 mutex_unlock(&dcookie_mutex);
306 return user; 307 return user;
307out_free: 308out_free:
308 kfree(user); 309 kfree(user);
@@ -313,7 +314,7 @@ out_free:
313 314
314void dcookie_unregister(struct dcookie_user * user) 315void dcookie_unregister(struct dcookie_user * user)
315{ 316{
316 down(&dcookie_sem); 317 mutex_lock(&dcookie_mutex);
317 318
318 list_del(&user->next); 319 list_del(&user->next);
319 kfree(user); 320 kfree(user);
@@ -321,7 +322,7 @@ void dcookie_unregister(struct dcookie_user * user)
321 if (!is_live()) 322 if (!is_live())
322 dcookie_exit(); 323 dcookie_exit();
323 324
324 up(&dcookie_sem); 325 mutex_unlock(&dcookie_mutex);
325} 326}
326 327
327EXPORT_SYMBOL_GPL(dcookie_register); 328EXPORT_SYMBOL_GPL(dcookie_register);
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 40c4fc973fad..66a505422e5c 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -39,7 +39,7 @@ static int default_open(struct inode *inode, struct file *file)
39 return 0; 39 return 0;
40} 40}
41 41
42struct file_operations debugfs_file_operations = { 42const struct file_operations debugfs_file_operations = {
43 .read = default_read_file, 43 .read = default_read_file,
44 .write = default_write_file, 44 .write = default_write_file,
45 .open = default_open, 45 .open = default_open,
@@ -213,7 +213,7 @@ static ssize_t write_file_bool(struct file *file, const char __user *user_buf,
213 return count; 213 return count;
214} 214}
215 215
216static struct file_operations fops_bool = { 216static const struct file_operations fops_bool = {
217 .read = read_file_bool, 217 .read = read_file_bool,
218 .write = write_file_bool, 218 .write = write_file_bool,
219 .open = default_open, 219 .open = default_open,
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index d4f1a2cddd47..85d166cdcae4 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -191,7 +191,7 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
191 */ 191 */
192struct dentry *debugfs_create_file(const char *name, mode_t mode, 192struct dentry *debugfs_create_file(const char *name, mode_t mode,
193 struct dentry *parent, void *data, 193 struct dentry *parent, void *data,
194 struct file_operations *fops) 194 const struct file_operations *fops)
195{ 195{
196 struct dentry *dentry = NULL; 196 struct dentry *dentry = NULL;
197 int error; 197 int error;
diff --git a/fs/devfs/base.c b/fs/devfs/base.c
index b621521e09d4..52f5059c4f31 100644
--- a/fs/devfs/base.c
+++ b/fs/devfs/base.c
@@ -856,14 +856,14 @@ static int devfsd_close(struct inode *inode, struct file *file);
856#ifdef CONFIG_DEVFS_DEBUG 856#ifdef CONFIG_DEVFS_DEBUG
857static ssize_t stat_read(struct file *file, char __user *buf, size_t len, 857static ssize_t stat_read(struct file *file, char __user *buf, size_t len,
858 loff_t * ppos); 858 loff_t * ppos);
859static struct file_operations stat_fops = { 859static const struct file_operations stat_fops = {
860 .open = nonseekable_open, 860 .open = nonseekable_open,
861 .read = stat_read, 861 .read = stat_read,
862}; 862};
863#endif 863#endif
864 864
865/* Devfs daemon file operations */ 865/* Devfs daemon file operations */
866static struct file_operations devfsd_fops = { 866static const struct file_operations devfsd_fops = {
867 .open = nonseekable_open, 867 .open = nonseekable_open,
868 .read = devfsd_read, 868 .read = devfsd_read,
869 .ioctl = devfsd_ioctl, 869 .ioctl = devfsd_ioctl,
@@ -1842,8 +1842,8 @@ static int try_modload(struct devfs_entry *parent, struct fs_info *fs_info,
1842 1842
1843static struct inode_operations devfs_iops; 1843static struct inode_operations devfs_iops;
1844static struct inode_operations devfs_dir_iops; 1844static struct inode_operations devfs_dir_iops;
1845static struct file_operations devfs_fops; 1845static const struct file_operations devfs_fops;
1846static struct file_operations devfs_dir_fops; 1846static const struct file_operations devfs_dir_fops;
1847static struct inode_operations devfs_symlink_iops; 1847static struct inode_operations devfs_symlink_iops;
1848 1848
1849static int devfs_notify_change(struct dentry *dentry, struct iattr *iattr) 1849static int devfs_notify_change(struct dentry *dentry, struct iattr *iattr)
@@ -2061,11 +2061,11 @@ static int devfs_open(struct inode *inode, struct file *file)
2061 return err; 2061 return err;
2062} /* End Function devfs_open */ 2062} /* End Function devfs_open */
2063 2063
2064static struct file_operations devfs_fops = { 2064static const struct file_operations devfs_fops = {
2065 .open = devfs_open, 2065 .open = devfs_open,
2066}; 2066};
2067 2067
2068static struct file_operations devfs_dir_fops = { 2068static const struct file_operations devfs_dir_fops = {
2069 .read = generic_read_dir, 2069 .read = generic_read_dir,
2070 .readdir = devfs_readdir, 2070 .readdir = devfs_readdir,
2071}; 2071};
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 235ed8d1f11e..9d1d2aa73e42 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -86,12 +86,12 @@ struct dio {
86 unsigned first_block_in_page; /* doesn't change, Used only once */ 86 unsigned first_block_in_page; /* doesn't change, Used only once */
87 int boundary; /* prev block is at a boundary */ 87 int boundary; /* prev block is at a boundary */
88 int reap_counter; /* rate limit reaping */ 88 int reap_counter; /* rate limit reaping */
89 get_blocks_t *get_blocks; /* block mapping function */ 89 get_block_t *get_block; /* block mapping function */
90 dio_iodone_t *end_io; /* IO completion function */ 90 dio_iodone_t *end_io; /* IO completion function */
91 sector_t final_block_in_bio; /* current final block in bio + 1 */ 91 sector_t final_block_in_bio; /* current final block in bio + 1 */
92 sector_t next_block_for_io; /* next block to be put under IO, 92 sector_t next_block_for_io; /* next block to be put under IO,
93 in dio_blocks units */ 93 in dio_blocks units */
94 struct buffer_head map_bh; /* last get_blocks() result */ 94 struct buffer_head map_bh; /* last get_block() result */
95 95
96 /* 96 /*
97 * Deferred addition of a page to the dio. These variables are 97 * Deferred addition of a page to the dio. These variables are
@@ -211,9 +211,9 @@ static struct page *dio_get_page(struct dio *dio)
211 211
212/* 212/*
213 * Called when all DIO BIO I/O has been completed - let the filesystem 213 * Called when all DIO BIO I/O has been completed - let the filesystem
214 * know, if it registered an interest earlier via get_blocks. Pass the 214 * know, if it registered an interest earlier via get_block. Pass the
215 * private field of the map buffer_head so that filesystems can use it 215 * private field of the map buffer_head so that filesystems can use it
216 * to hold additional state between get_blocks calls and dio_complete. 216 * to hold additional state between get_block calls and dio_complete.
217 */ 217 */
218static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes) 218static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes)
219{ 219{
@@ -493,7 +493,7 @@ static int dio_bio_reap(struct dio *dio)
493 * The fs is allowed to map lots of blocks at once. If it wants to do that, 493 * The fs is allowed to map lots of blocks at once. If it wants to do that,
494 * it uses the passed inode-relative block number as the file offset, as usual. 494 * it uses the passed inode-relative block number as the file offset, as usual.
495 * 495 *
496 * get_blocks() is passed the number of i_blkbits-sized blocks which direct_io 496 * get_block() is passed the number of i_blkbits-sized blocks which direct_io
497 * has remaining to do. The fs should not map more than this number of blocks. 497 * has remaining to do. The fs should not map more than this number of blocks.
498 * 498 *
499 * If the fs has mapped a lot of blocks, it should populate bh->b_size to 499 * If the fs has mapped a lot of blocks, it should populate bh->b_size to
@@ -506,7 +506,7 @@ static int dio_bio_reap(struct dio *dio)
506 * In the case of filesystem holes: the fs may return an arbitrarily-large 506 * In the case of filesystem holes: the fs may return an arbitrarily-large
507 * hole by returning an appropriate value in b_size and by clearing 507 * hole by returning an appropriate value in b_size and by clearing
508 * buffer_mapped(). However the direct-io code will only process holes one 508 * buffer_mapped(). However the direct-io code will only process holes one
509 * block at a time - it will repeatedly call get_blocks() as it walks the hole. 509 * block at a time - it will repeatedly call get_block() as it walks the hole.
510 */ 510 */
511static int get_more_blocks(struct dio *dio) 511static int get_more_blocks(struct dio *dio)
512{ 512{
@@ -548,7 +548,8 @@ static int get_more_blocks(struct dio *dio)
548 * at a higher level for inside-i_size block-instantiating 548 * at a higher level for inside-i_size block-instantiating
549 * writes. 549 * writes.
550 */ 550 */
551 ret = (*dio->get_blocks)(dio->inode, fs_startblk, fs_count, 551 map_bh->b_size = fs_count << dio->blkbits;
552 ret = (*dio->get_block)(dio->inode, fs_startblk,
552 map_bh, create); 553 map_bh, create);
553 } 554 }
554 return ret; 555 return ret;
@@ -783,11 +784,11 @@ static void dio_zero_block(struct dio *dio, int end)
783 * happily perform page-sized but 512-byte aligned IOs. It is important that 784 * happily perform page-sized but 512-byte aligned IOs. It is important that
784 * blockdev IO be able to have fine alignment and large sizes. 785 * blockdev IO be able to have fine alignment and large sizes.
785 * 786 *
786 * So what we do is to permit the ->get_blocks function to populate bh.b_size 787 * So what we do is to permit the ->get_block function to populate bh.b_size
787 * with the size of IO which is permitted at this offset and this i_blkbits. 788 * with the size of IO which is permitted at this offset and this i_blkbits.
788 * 789 *
789 * For best results, the blockdev should be set up with 512-byte i_blkbits and 790 * For best results, the blockdev should be set up with 512-byte i_blkbits and
790 * it should set b_size to PAGE_SIZE or more inside get_blocks(). This gives 791 * it should set b_size to PAGE_SIZE or more inside get_block(). This gives
791 * fine alignment but still allows this function to work in PAGE_SIZE units. 792 * fine alignment but still allows this function to work in PAGE_SIZE units.
792 */ 793 */
793static int do_direct_IO(struct dio *dio) 794static int do_direct_IO(struct dio *dio)
@@ -947,7 +948,7 @@ out:
947static ssize_t 948static ssize_t
948direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, 949direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
949 const struct iovec *iov, loff_t offset, unsigned long nr_segs, 950 const struct iovec *iov, loff_t offset, unsigned long nr_segs,
950 unsigned blkbits, get_blocks_t get_blocks, dio_iodone_t end_io, 951 unsigned blkbits, get_block_t get_block, dio_iodone_t end_io,
951 struct dio *dio) 952 struct dio *dio)
952{ 953{
953 unsigned long user_addr; 954 unsigned long user_addr;
@@ -969,7 +970,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
969 970
970 dio->boundary = 0; 971 dio->boundary = 0;
971 dio->reap_counter = 0; 972 dio->reap_counter = 0;
972 dio->get_blocks = get_blocks; 973 dio->get_block = get_block;
973 dio->end_io = end_io; 974 dio->end_io = end_io;
974 dio->map_bh.b_private = NULL; 975 dio->map_bh.b_private = NULL;
975 dio->final_block_in_bio = -1; 976 dio->final_block_in_bio = -1;
@@ -1177,7 +1178,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1177ssize_t 1178ssize_t
1178__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, 1179__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1179 struct block_device *bdev, const struct iovec *iov, loff_t offset, 1180 struct block_device *bdev, const struct iovec *iov, loff_t offset,
1180 unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io, 1181 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
1181 int dio_lock_type) 1182 int dio_lock_type)
1182{ 1183{
1183 int seg; 1184 int seg;
@@ -1273,7 +1274,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1273 (end > i_size_read(inode))); 1274 (end > i_size_read(inode)));
1274 1275
1275 retval = direct_io_worker(rw, iocb, inode, iov, offset, 1276 retval = direct_io_worker(rw, iocb, inode, iov, offset,
1276 nr_segs, blkbits, get_blocks, end_io, dio); 1277 nr_segs, blkbits, get_block, end_io, dio);
1277 1278
1278 if (rw == READ && dio_lock_type == DIO_LOCKING) 1279 if (rw == READ && dio_lock_type == DIO_LOCKING)
1279 release_i_mutex = 0; 1280 release_i_mutex = 0;
diff --git a/fs/dnotify.c b/fs/dnotify.c
index f3b540dd5d11..f932591df5a4 100644
--- a/fs/dnotify.c
+++ b/fs/dnotify.c
@@ -21,9 +21,9 @@
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
24int dir_notify_enable = 1; 24int dir_notify_enable __read_mostly = 1;
25 25
26static kmem_cache_t *dn_cache; 26static kmem_cache_t *dn_cache __read_mostly;
27 27
28static void redo_inode_mask(struct inode *inode) 28static void redo_inode_mask(struct inode *inode)
29{ 29{
diff --git a/fs/efs/dir.c b/fs/efs/dir.c
index 777c614ff360..17f5b2d3c16a 100644
--- a/fs/efs/dir.c
+++ b/fs/efs/dir.c
@@ -10,7 +10,7 @@
10 10
11static int efs_readdir(struct file *, void *, filldir_t); 11static int efs_readdir(struct file *, void *, filldir_t);
12 12
13struct file_operations efs_dir_operations = { 13const struct file_operations efs_dir_operations = {
14 .read = generic_read_dir, 14 .read = generic_read_dir,
15 .readdir = efs_readdir, 15 .readdir = efs_readdir,
16}; 16};
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index a0f682cdd03e..242fe1a66ce5 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -281,16 +281,16 @@ static struct mutex epmutex;
281static struct poll_safewake psw; 281static struct poll_safewake psw;
282 282
283/* Slab cache used to allocate "struct epitem" */ 283/* Slab cache used to allocate "struct epitem" */
284static kmem_cache_t *epi_cache; 284static kmem_cache_t *epi_cache __read_mostly;
285 285
286/* Slab cache used to allocate "struct eppoll_entry" */ 286/* Slab cache used to allocate "struct eppoll_entry" */
287static kmem_cache_t *pwq_cache; 287static kmem_cache_t *pwq_cache __read_mostly;
288 288
289/* Virtual fs used to allocate inodes for eventpoll files */ 289/* Virtual fs used to allocate inodes for eventpoll files */
290static struct vfsmount *eventpoll_mnt; 290static struct vfsmount *eventpoll_mnt __read_mostly;
291 291
292/* File callbacks that implement the eventpoll file behaviour */ 292/* File callbacks that implement the eventpoll file behaviour */
293static struct file_operations eventpoll_fops = { 293static const struct file_operations eventpoll_fops = {
294 .release = ep_eventpoll_close, 294 .release = ep_eventpoll_close,
295 .poll = ep_eventpoll_poll 295 .poll = ep_eventpoll_poll
296}; 296};
diff --git a/fs/exec.c b/fs/exec.c
index 995cba3c62b8..c7397c46ad6d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -632,7 +632,7 @@ static int de_thread(struct task_struct *tsk)
632 * synchronize with any firing (by calling del_timer_sync) 632 * synchronize with any firing (by calling del_timer_sync)
633 * before we can safely let the old group leader die. 633 * before we can safely let the old group leader die.
634 */ 634 */
635 sig->real_timer.data = current; 635 sig->tsk = current;
636 spin_unlock_irq(lock); 636 spin_unlock_irq(lock);
637 if (hrtimer_cancel(&sig->real_timer)) 637 if (hrtimer_cancel(&sig->real_timer))
638 hrtimer_restart(&sig->real_timer); 638 hrtimer_restart(&sig->real_timer);
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index b3dbd716cd3a..d672aa9f4061 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -416,8 +416,7 @@ void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
416 416
417 lock_page(page); 417 lock_page(page);
418 err = page->mapping->a_ops->prepare_write(NULL, page, from, to); 418 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
419 if (err) 419 BUG_ON(err);
420 BUG();
421 de->inode = cpu_to_le32(inode->i_ino); 420 de->inode = cpu_to_le32(inode->i_ino);
422 ext2_set_de_type (de, inode); 421 ext2_set_de_type (de, inode);
423 err = ext2_commit_chunk(page, from, to); 422 err = ext2_commit_chunk(page, from, to);
@@ -554,8 +553,7 @@ int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
554 from = (char*)pde - (char*)page_address(page); 553 from = (char*)pde - (char*)page_address(page);
555 lock_page(page); 554 lock_page(page);
556 err = mapping->a_ops->prepare_write(NULL, page, from, to); 555 err = mapping->a_ops->prepare_write(NULL, page, from, to);
557 if (err) 556 BUG_ON(err);
558 BUG();
559 if (pde) 557 if (pde)
560 pde->rec_len = cpu_to_le16(to-from); 558 pde->rec_len = cpu_to_le16(to-from);
561 dir->inode = 0; 559 dir->inode = 0;
@@ -660,7 +658,7 @@ not_empty:
660 return 0; 658 return 0;
661} 659}
662 660
663struct file_operations ext2_dir_operations = { 661const struct file_operations ext2_dir_operations = {
664 .llseek = generic_file_llseek, 662 .llseek = generic_file_llseek,
665 .read = generic_read_dir, 663 .read = generic_read_dir,
666 .readdir = ext2_readdir, 664 .readdir = ext2_readdir,
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 11035ac7986f..9f74a62be555 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -154,12 +154,12 @@ extern void ext2_write_super (struct super_block *);
154 */ 154 */
155 155
156/* dir.c */ 156/* dir.c */
157extern struct file_operations ext2_dir_operations; 157extern const struct file_operations ext2_dir_operations;
158 158
159/* file.c */ 159/* file.c */
160extern struct inode_operations ext2_file_inode_operations; 160extern struct inode_operations ext2_file_inode_operations;
161extern struct file_operations ext2_file_operations; 161extern const struct file_operations ext2_file_operations;
162extern struct file_operations ext2_xip_file_operations; 162extern const struct file_operations ext2_xip_file_operations;
163 163
164/* inode.c */ 164/* inode.c */
165extern struct address_space_operations ext2_aops; 165extern struct address_space_operations ext2_aops;
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index a484412fc782..509cceca04db 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -39,7 +39,7 @@ static int ext2_release_file (struct inode * inode, struct file * filp)
39 * We have mostly NULL's here: the current defaults are ok for 39 * We have mostly NULL's here: the current defaults are ok for
40 * the ext2 filesystem. 40 * the ext2 filesystem.
41 */ 41 */
42struct file_operations ext2_file_operations = { 42const struct file_operations ext2_file_operations = {
43 .llseek = generic_file_llseek, 43 .llseek = generic_file_llseek,
44 .read = generic_file_read, 44 .read = generic_file_read,
45 .write = generic_file_write, 45 .write = generic_file_write,
@@ -56,7 +56,7 @@ struct file_operations ext2_file_operations = {
56}; 56};
57 57
58#ifdef CONFIG_EXT2_FS_XIP 58#ifdef CONFIG_EXT2_FS_XIP
59struct file_operations ext2_xip_file_operations = { 59const struct file_operations ext2_xip_file_operations = {
60 .llseek = generic_file_llseek, 60 .llseek = generic_file_llseek,
61 .read = xip_file_read, 61 .read = xip_file_read,
62 .write = xip_file_write, 62 .write = xip_file_write,
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index a717837f272e..04af9c45dce2 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -667,18 +667,6 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
667 return generic_block_bmap(mapping,block,ext2_get_block); 667 return generic_block_bmap(mapping,block,ext2_get_block);
668} 668}
669 669
670static int
671ext2_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
672 struct buffer_head *bh_result, int create)
673{
674 int ret;
675
676 ret = ext2_get_block(inode, iblock, bh_result, create);
677 if (ret == 0)
678 bh_result->b_size = (1 << inode->i_blkbits);
679 return ret;
680}
681
682static ssize_t 670static ssize_t
683ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 671ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
684 loff_t offset, unsigned long nr_segs) 672 loff_t offset, unsigned long nr_segs)
@@ -687,7 +675,7 @@ ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
687 struct inode *inode = file->f_mapping->host; 675 struct inode *inode = file->f_mapping->host;
688 676
689 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 677 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
690 offset, nr_segs, ext2_get_blocks, NULL); 678 offset, nr_segs, ext2_get_block, NULL);
691} 679}
692 680
693static int 681static int
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 46623f77666b..77927d6938f6 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -653,9 +653,11 @@ claim_block(spinlock_t *lock, int block, struct buffer_head *bh)
653 */ 653 */
654static int 654static int
655ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, 655ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
656 struct buffer_head *bitmap_bh, int goal, struct ext3_reserve_window *my_rsv) 656 struct buffer_head *bitmap_bh, int goal,
657 unsigned long *count, struct ext3_reserve_window *my_rsv)
657{ 658{
658 int group_first_block, start, end; 659 int group_first_block, start, end;
660 unsigned long num = 0;
659 661
660 /* we do allocation within the reservation window if we have a window */ 662 /* we do allocation within the reservation window if we have a window */
661 if (my_rsv) { 663 if (my_rsv) {
@@ -713,8 +715,18 @@ repeat:
713 goto fail_access; 715 goto fail_access;
714 goto repeat; 716 goto repeat;
715 } 717 }
716 return goal; 718 num++;
719 goal++;
720 while (num < *count && goal < end
721 && ext3_test_allocatable(goal, bitmap_bh)
722 && claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) {
723 num++;
724 goal++;
725 }
726 *count = num;
727 return goal - num;
717fail_access: 728fail_access:
729 *count = num;
718 return -1; 730 return -1;
719} 731}
720 732
@@ -999,6 +1011,31 @@ retry:
999 goto retry; 1011 goto retry;
1000} 1012}
1001 1013
1014static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
1015 struct super_block *sb, int size)
1016{
1017 struct ext3_reserve_window_node *next_rsv;
1018 struct rb_node *next;
1019 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
1020
1021 if (!spin_trylock(rsv_lock))
1022 return;
1023
1024 next = rb_next(&my_rsv->rsv_node);
1025
1026 if (!next)
1027 my_rsv->rsv_end += size;
1028 else {
1029 next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node);
1030
1031 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
1032 my_rsv->rsv_end += size;
1033 else
1034 my_rsv->rsv_end = next_rsv->rsv_start - 1;
1035 }
1036 spin_unlock(rsv_lock);
1037}
1038
1002/* 1039/*
1003 * This is the main function used to allocate a new block and its reservation 1040 * This is the main function used to allocate a new block and its reservation
1004 * window. 1041 * window.
@@ -1024,11 +1061,12 @@ static int
1024ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, 1061ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1025 unsigned int group, struct buffer_head *bitmap_bh, 1062 unsigned int group, struct buffer_head *bitmap_bh,
1026 int goal, struct ext3_reserve_window_node * my_rsv, 1063 int goal, struct ext3_reserve_window_node * my_rsv,
1027 int *errp) 1064 unsigned long *count, int *errp)
1028{ 1065{
1029 unsigned long group_first_block; 1066 unsigned long group_first_block;
1030 int ret = 0; 1067 int ret = 0;
1031 int fatal; 1068 int fatal;
1069 unsigned long num = *count;
1032 1070
1033 *errp = 0; 1071 *errp = 0;
1034 1072
@@ -1051,7 +1089,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1051 * or last attempt to allocate a block with reservation turned on failed 1089 * or last attempt to allocate a block with reservation turned on failed
1052 */ 1090 */
1053 if (my_rsv == NULL ) { 1091 if (my_rsv == NULL ) {
1054 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, NULL); 1092 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
1093 goal, count, NULL);
1055 goto out; 1094 goto out;
1056 } 1095 }
1057 /* 1096 /*
@@ -1081,6 +1120,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1081 while (1) { 1120 while (1) {
1082 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || 1121 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
1083 !goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) { 1122 !goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) {
1123 if (my_rsv->rsv_goal_size < *count)
1124 my_rsv->rsv_goal_size = *count;
1084 ret = alloc_new_reservation(my_rsv, goal, sb, 1125 ret = alloc_new_reservation(my_rsv, goal, sb,
1085 group, bitmap_bh); 1126 group, bitmap_bh);
1086 if (ret < 0) 1127 if (ret < 0)
@@ -1088,16 +1129,21 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1088 1129
1089 if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) 1130 if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb))
1090 goal = -1; 1131 goal = -1;
1091 } 1132 } else if (goal > 0 && (my_rsv->rsv_end-goal+1) < *count)
1133 try_to_extend_reservation(my_rsv, sb,
1134 *count-my_rsv->rsv_end + goal - 1);
1135
1092 if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb)) 1136 if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb))
1093 || (my_rsv->rsv_end < group_first_block)) 1137 || (my_rsv->rsv_end < group_first_block))
1094 BUG(); 1138 BUG();
1095 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, 1139 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal,
1096 &my_rsv->rsv_window); 1140 &num, &my_rsv->rsv_window);
1097 if (ret >= 0) { 1141 if (ret >= 0) {
1098 my_rsv->rsv_alloc_hit++; 1142 my_rsv->rsv_alloc_hit += num;
1143 *count = num;
1099 break; /* succeed */ 1144 break; /* succeed */
1100 } 1145 }
1146 num = *count;
1101 } 1147 }
1102out: 1148out:
1103 if (ret >= 0) { 1149 if (ret >= 0) {
@@ -1154,8 +1200,8 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries)
1154 * bitmap, and then for any free bit if that fails. 1200 * bitmap, and then for any free bit if that fails.
1155 * This function also updates quota and i_blocks field. 1201 * This function also updates quota and i_blocks field.
1156 */ 1202 */
1157int ext3_new_block(handle_t *handle, struct inode *inode, 1203int ext3_new_blocks(handle_t *handle, struct inode *inode,
1158 unsigned long goal, int *errp) 1204 unsigned long goal, unsigned long *count, int *errp)
1159{ 1205{
1160 struct buffer_head *bitmap_bh = NULL; 1206 struct buffer_head *bitmap_bh = NULL;
1161 struct buffer_head *gdp_bh; 1207 struct buffer_head *gdp_bh;
@@ -1178,6 +1224,7 @@ int ext3_new_block(handle_t *handle, struct inode *inode,
1178 static int goal_hits, goal_attempts; 1224 static int goal_hits, goal_attempts;
1179#endif 1225#endif
1180 unsigned long ngroups; 1226 unsigned long ngroups;
1227 unsigned long num = *count;
1181 1228
1182 *errp = -ENOSPC; 1229 *errp = -ENOSPC;
1183 sb = inode->i_sb; 1230 sb = inode->i_sb;
@@ -1189,7 +1236,7 @@ int ext3_new_block(handle_t *handle, struct inode *inode,
1189 /* 1236 /*
1190 * Check quota for allocation of this block. 1237 * Check quota for allocation of this block.
1191 */ 1238 */
1192 if (DQUOT_ALLOC_BLOCK(inode, 1)) { 1239 if (DQUOT_ALLOC_BLOCK(inode, num)) {
1193 *errp = -EDQUOT; 1240 *errp = -EDQUOT;
1194 return 0; 1241 return 0;
1195 } 1242 }
@@ -1244,7 +1291,7 @@ retry:
1244 if (!bitmap_bh) 1291 if (!bitmap_bh)
1245 goto io_error; 1292 goto io_error;
1246 ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, 1293 ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
1247 bitmap_bh, ret_block, my_rsv, &fatal); 1294 bitmap_bh, ret_block, my_rsv, &num, &fatal);
1248 if (fatal) 1295 if (fatal)
1249 goto out; 1296 goto out;
1250 if (ret_block >= 0) 1297 if (ret_block >= 0)
@@ -1281,7 +1328,7 @@ retry:
1281 if (!bitmap_bh) 1328 if (!bitmap_bh)
1282 goto io_error; 1329 goto io_error;
1283 ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, 1330 ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
1284 bitmap_bh, -1, my_rsv, &fatal); 1331 bitmap_bh, -1, my_rsv, &num, &fatal);
1285 if (fatal) 1332 if (fatal)
1286 goto out; 1333 goto out;
1287 if (ret_block >= 0) 1334 if (ret_block >= 0)
@@ -1316,13 +1363,15 @@ allocated:
1316 target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb) 1363 target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb)
1317 + le32_to_cpu(es->s_first_data_block); 1364 + le32_to_cpu(es->s_first_data_block);
1318 1365
1319 if (target_block == le32_to_cpu(gdp->bg_block_bitmap) || 1366 if (in_range(le32_to_cpu(gdp->bg_block_bitmap), target_block, num) ||
1320 target_block == le32_to_cpu(gdp->bg_inode_bitmap) || 1367 in_range(le32_to_cpu(gdp->bg_inode_bitmap), target_block, num) ||
1321 in_range(target_block, le32_to_cpu(gdp->bg_inode_table), 1368 in_range(target_block, le32_to_cpu(gdp->bg_inode_table),
1369 EXT3_SB(sb)->s_itb_per_group) ||
1370 in_range(target_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
1322 EXT3_SB(sb)->s_itb_per_group)) 1371 EXT3_SB(sb)->s_itb_per_group))
1323 ext3_error(sb, "ext3_new_block", 1372 ext3_error(sb, "ext3_new_block",
1324 "Allocating block in system zone - " 1373 "Allocating block in system zone - "
1325 "block = %u", target_block); 1374 "blocks from %u, length %lu", target_block, num);
1326 1375
1327 performed_allocation = 1; 1376 performed_allocation = 1;
1328 1377
@@ -1341,10 +1390,14 @@ allocated:
1341 jbd_lock_bh_state(bitmap_bh); 1390 jbd_lock_bh_state(bitmap_bh);
1342 spin_lock(sb_bgl_lock(sbi, group_no)); 1391 spin_lock(sb_bgl_lock(sbi, group_no));
1343 if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) { 1392 if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
1344 if (ext3_test_bit(ret_block, 1393 int i;
1345 bh2jh(bitmap_bh)->b_committed_data)) { 1394
1346 printk("%s: block was unexpectedly set in " 1395 for (i = 0; i < num; i++) {
1347 "b_committed_data\n", __FUNCTION__); 1396 if (ext3_test_bit(ret_block,
1397 bh2jh(bitmap_bh)->b_committed_data)) {
1398 printk("%s: block was unexpectedly set in "
1399 "b_committed_data\n", __FUNCTION__);
1400 }
1348 } 1401 }
1349 } 1402 }
1350 ext3_debug("found bit %d\n", ret_block); 1403 ext3_debug("found bit %d\n", ret_block);
@@ -1355,7 +1408,7 @@ allocated:
1355 /* ret_block was blockgroup-relative. Now it becomes fs-relative */ 1408 /* ret_block was blockgroup-relative. Now it becomes fs-relative */
1356 ret_block = target_block; 1409 ret_block = target_block;
1357 1410
1358 if (ret_block >= le32_to_cpu(es->s_blocks_count)) { 1411 if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) {
1359 ext3_error(sb, "ext3_new_block", 1412 ext3_error(sb, "ext3_new_block",
1360 "block(%d) >= blocks count(%d) - " 1413 "block(%d) >= blocks count(%d) - "
1361 "block_group = %d, es == %p ", ret_block, 1414 "block_group = %d, es == %p ", ret_block,
@@ -1373,9 +1426,9 @@ allocated:
1373 1426
1374 spin_lock(sb_bgl_lock(sbi, group_no)); 1427 spin_lock(sb_bgl_lock(sbi, group_no));
1375 gdp->bg_free_blocks_count = 1428 gdp->bg_free_blocks_count =
1376 cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1); 1429 cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - num);
1377 spin_unlock(sb_bgl_lock(sbi, group_no)); 1430 spin_unlock(sb_bgl_lock(sbi, group_no));
1378 percpu_counter_mod(&sbi->s_freeblocks_counter, -1); 1431 percpu_counter_mod(&sbi->s_freeblocks_counter, -num);
1379 1432
1380 BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); 1433 BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
1381 err = ext3_journal_dirty_metadata(handle, gdp_bh); 1434 err = ext3_journal_dirty_metadata(handle, gdp_bh);
@@ -1388,6 +1441,8 @@ allocated:
1388 1441
1389 *errp = 0; 1442 *errp = 0;
1390 brelse(bitmap_bh); 1443 brelse(bitmap_bh);
1444 DQUOT_FREE_BLOCK(inode, *count-num);
1445 *count = num;
1391 return ret_block; 1446 return ret_block;
1392 1447
1393io_error: 1448io_error:
@@ -1401,11 +1456,19 @@ out:
1401 * Undo the block allocation 1456 * Undo the block allocation
1402 */ 1457 */
1403 if (!performed_allocation) 1458 if (!performed_allocation)
1404 DQUOT_FREE_BLOCK(inode, 1); 1459 DQUOT_FREE_BLOCK(inode, *count);
1405 brelse(bitmap_bh); 1460 brelse(bitmap_bh);
1406 return 0; 1461 return 0;
1407} 1462}
1408 1463
1464int ext3_new_block(handle_t *handle, struct inode *inode,
1465 unsigned long goal, int *errp)
1466{
1467 unsigned long count = 1;
1468
1469 return ext3_new_blocks(handle, inode, goal, &count, errp);
1470}
1471
1409unsigned long ext3_count_free_blocks(struct super_block *sb) 1472unsigned long ext3_count_free_blocks(struct super_block *sb)
1410{ 1473{
1411 unsigned long desc_count; 1474 unsigned long desc_count;
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 773459164bb2..f37528ed222e 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -39,7 +39,7 @@ static int ext3_dx_readdir(struct file * filp,
39static int ext3_release_dir (struct inode * inode, 39static int ext3_release_dir (struct inode * inode,
40 struct file * filp); 40 struct file * filp);
41 41
42struct file_operations ext3_dir_operations = { 42const struct file_operations ext3_dir_operations = {
43 .llseek = generic_file_llseek, 43 .llseek = generic_file_llseek,
44 .read = generic_read_dir, 44 .read = generic_read_dir,
45 .readdir = ext3_readdir, /* we take BKL. needed?*/ 45 .readdir = ext3_readdir, /* we take BKL. needed?*/
@@ -131,8 +131,9 @@ static int ext3_readdir(struct file * filp,
131 struct buffer_head *bh = NULL; 131 struct buffer_head *bh = NULL;
132 132
133 map_bh.b_state = 0; 133 map_bh.b_state = 0;
134 err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0); 134 err = ext3_get_blocks_handle(NULL, inode, blk, 1,
135 if (!err) { 135 &map_bh, 0, 0);
136 if (err > 0) {
136 page_cache_readahead(sb->s_bdev->bd_inode->i_mapping, 137 page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
137 &filp->f_ra, 138 &filp->f_ra,
138 filp, 139 filp,
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
index 59098ea56711..783a796220bb 100644
--- a/fs/ext3/file.c
+++ b/fs/ext3/file.c
@@ -105,7 +105,7 @@ force_commit:
105 return ret; 105 return ret;
106} 106}
107 107
108struct file_operations ext3_file_operations = { 108const struct file_operations ext3_file_operations = {
109 .llseek = generic_file_llseek, 109 .llseek = generic_file_llseek,
110 .read = do_sync_read, 110 .read = do_sync_read,
111 .write = do_sync_write, 111 .write = do_sync_write,
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 2c361377e0a5..48ae0339af17 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -44,16 +44,16 @@ static int ext3_writepage_trans_blocks(struct inode *inode);
44/* 44/*
45 * Test whether an inode is a fast symlink. 45 * Test whether an inode is a fast symlink.
46 */ 46 */
47static inline int ext3_inode_is_fast_symlink(struct inode *inode) 47static int ext3_inode_is_fast_symlink(struct inode *inode)
48{ 48{
49 int ea_blocks = EXT3_I(inode)->i_file_acl ? 49 int ea_blocks = EXT3_I(inode)->i_file_acl ?
50 (inode->i_sb->s_blocksize >> 9) : 0; 50 (inode->i_sb->s_blocksize >> 9) : 0;
51 51
52 return (S_ISLNK(inode->i_mode) && 52 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
53 inode->i_blocks - ea_blocks == 0);
54} 53}
55 54
56/* The ext3 forget function must perform a revoke if we are freeing data 55/*
56 * The ext3 forget function must perform a revoke if we are freeing data
57 * which has been journaled. Metadata (eg. indirect blocks) must be 57 * which has been journaled. Metadata (eg. indirect blocks) must be
58 * revoked in all cases. 58 * revoked in all cases.
59 * 59 *
@@ -61,10 +61,8 @@ static inline int ext3_inode_is_fast_symlink(struct inode *inode)
61 * but there may still be a record of it in the journal, and that record 61 * but there may still be a record of it in the journal, and that record
62 * still needs to be revoked. 62 * still needs to be revoked.
63 */ 63 */
64 64int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
65int ext3_forget(handle_t *handle, int is_metadata, 65 struct buffer_head *bh, int blocknr)
66 struct inode *inode, struct buffer_head *bh,
67 int blocknr)
68{ 66{
69 int err; 67 int err;
70 68
@@ -104,10 +102,9 @@ int ext3_forget(handle_t *handle, int is_metadata,
104} 102}
105 103
106/* 104/*
107 * Work out how many blocks we need to progress with the next chunk of a 105 * Work out how many blocks we need to proceed with the next chunk of a
108 * truncate transaction. 106 * truncate transaction.
109 */ 107 */
110
111static unsigned long blocks_for_truncate(struct inode *inode) 108static unsigned long blocks_for_truncate(struct inode *inode)
112{ 109{
113 unsigned long needed; 110 unsigned long needed;
@@ -141,7 +138,6 @@ static unsigned long blocks_for_truncate(struct inode *inode)
141 * extend fails, we need to propagate the failure up and restart the 138 * extend fails, we need to propagate the failure up and restart the
142 * transaction in the top-level truncate loop. --sct 139 * transaction in the top-level truncate loop. --sct
143 */ 140 */
144
145static handle_t *start_transaction(struct inode *inode) 141static handle_t *start_transaction(struct inode *inode)
146{ 142{
147 handle_t *result; 143 handle_t *result;
@@ -194,9 +190,11 @@ void ext3_delete_inode (struct inode * inode)
194 190
195 handle = start_transaction(inode); 191 handle = start_transaction(inode);
196 if (IS_ERR(handle)) { 192 if (IS_ERR(handle)) {
197 /* If we're going to skip the normal cleanup, we still 193 /*
198 * need to make sure that the in-core orphan linked list 194 * If we're going to skip the normal cleanup, we still need to
199 * is properly cleaned up. */ 195 * make sure that the in-core orphan linked list is properly
196 * cleaned up.
197 */
200 ext3_orphan_del(NULL, inode); 198 ext3_orphan_del(NULL, inode);
201 goto no_delete; 199 goto no_delete;
202 } 200 }
@@ -235,16 +233,6 @@ no_delete:
235 clear_inode(inode); /* We must guarantee clearing of inode... */ 233 clear_inode(inode); /* We must guarantee clearing of inode... */
236} 234}
237 235
238static int ext3_alloc_block (handle_t *handle,
239 struct inode * inode, unsigned long goal, int *err)
240{
241 unsigned long result;
242
243 result = ext3_new_block(handle, inode, goal, err);
244 return result;
245}
246
247
248typedef struct { 236typedef struct {
249 __le32 *p; 237 __le32 *p;
250 __le32 key; 238 __le32 key;
@@ -257,7 +245,7 @@ static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
257 p->bh = bh; 245 p->bh = bh;
258} 246}
259 247
260static inline int verify_chain(Indirect *from, Indirect *to) 248static int verify_chain(Indirect *from, Indirect *to)
261{ 249{
262 while (from <= to && from->key == *from->p) 250 while (from <= to && from->key == *from->p)
263 from++; 251 from++;
@@ -327,10 +315,10 @@ static int ext3_block_to_path(struct inode *inode,
327 offsets[n++] = i_block & (ptrs - 1); 315 offsets[n++] = i_block & (ptrs - 1);
328 final = ptrs; 316 final = ptrs;
329 } else { 317 } else {
330 ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big"); 318 ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
331 } 319 }
332 if (boundary) 320 if (boundary)
333 *boundary = (i_block & (ptrs - 1)) == (final - 1); 321 *boundary = final - 1 - (i_block & (ptrs - 1));
334 return n; 322 return n;
335} 323}
336 324
@@ -419,7 +407,6 @@ no_block:
419 * 407 *
420 * Caller must make sure that @ind is valid and will stay that way. 408 * Caller must make sure that @ind is valid and will stay that way.
421 */ 409 */
422
423static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) 410static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
424{ 411{
425 struct ext3_inode_info *ei = EXT3_I(inode); 412 struct ext3_inode_info *ei = EXT3_I(inode);
@@ -429,17 +416,18 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
429 unsigned long colour; 416 unsigned long colour;
430 417
431 /* Try to find previous block */ 418 /* Try to find previous block */
432 for (p = ind->p - 1; p >= start; p--) 419 for (p = ind->p - 1; p >= start; p--) {
433 if (*p) 420 if (*p)
434 return le32_to_cpu(*p); 421 return le32_to_cpu(*p);
422 }
435 423
436 /* No such thing, so let's try location of indirect block */ 424 /* No such thing, so let's try location of indirect block */
437 if (ind->bh) 425 if (ind->bh)
438 return ind->bh->b_blocknr; 426 return ind->bh->b_blocknr;
439 427
440 /* 428 /*
441 * It is going to be refered from inode itself? OK, just put it into 429 * It is going to be referred to from the inode itself? OK, just put it
442 * the same cylinder group then. 430 * into the same cylinder group then.
443 */ 431 */
444 bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) + 432 bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
445 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block); 433 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
@@ -463,7 +451,9 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
463static unsigned long ext3_find_goal(struct inode *inode, long block, 451static unsigned long ext3_find_goal(struct inode *inode, long block,
464 Indirect chain[4], Indirect *partial) 452 Indirect chain[4], Indirect *partial)
465{ 453{
466 struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; 454 struct ext3_block_alloc_info *block_i;
455
456 block_i = EXT3_I(inode)->i_block_alloc_info;
467 457
468 /* 458 /*
469 * try the heuristic for sequential allocation, 459 * try the heuristic for sequential allocation,
@@ -478,13 +468,113 @@ static unsigned long ext3_find_goal(struct inode *inode, long block,
478} 468}
479 469
480/** 470/**
471 * ext3_blks_to_allocate: Look up the block map and count the number
472 * of direct blocks need to be allocated for the given branch.
473 *
474 * @branch: chain of indirect blocks
475 * @k: number of blocks need for indirect blocks
476 * @blks: number of data blocks to be mapped.
477 * @blocks_to_boundary: the offset in the indirect block
478 *
479 * return the total number of blocks to be allocate, including the
480 * direct and indirect blocks.
481 */
482static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
483 int blocks_to_boundary)
484{
485 unsigned long count = 0;
486
487 /*
488 * Simple case, [t,d]Indirect block(s) has not allocated yet
489 * then it's clear blocks on that path have not allocated
490 */
491 if (k > 0) {
492 /* right now we don't handle cross boundary allocation */
493 if (blks < blocks_to_boundary + 1)
494 count += blks;
495 else
496 count += blocks_to_boundary + 1;
497 return count;
498 }
499
500 count++;
501 while (count < blks && count <= blocks_to_boundary &&
502 le32_to_cpu(*(branch[0].p + count)) == 0) {
503 count++;
504 }
505 return count;
506}
507
508/**
509 * ext3_alloc_blocks: multiple allocate blocks needed for a branch
510 * @indirect_blks: the number of blocks need to allocate for indirect
511 * blocks
512 *
513 * @new_blocks: on return it will store the new block numbers for
514 * the indirect blocks(if needed) and the first direct block,
515 * @blks: on return it will store the total number of allocated
516 * direct blocks
517 */
518static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
519 unsigned long goal, int indirect_blks, int blks,
520 unsigned long long new_blocks[4], int *err)
521{
522 int target, i;
523 unsigned long count = 0;
524 int index = 0;
525 unsigned long current_block = 0;
526 int ret = 0;
527
528 /*
529 * Here we try to allocate the requested multiple blocks at once,
530 * on a best-effort basis.
531 * To build a branch, we should allocate blocks for
532 * the indirect blocks(if not allocated yet), and at least
533 * the first direct block of this branch. That's the
534 * minimum number of blocks need to allocate(required)
535 */
536 target = blks + indirect_blks;
537
538 while (1) {
539 count = target;
540 /* allocating blocks for indirect blocks and direct blocks */
541 current_block = ext3_new_blocks(handle,inode,goal,&count,err);
542 if (*err)
543 goto failed_out;
544
545 target -= count;
546 /* allocate blocks for indirect blocks */
547 while (index < indirect_blks && count) {
548 new_blocks[index++] = current_block++;
549 count--;
550 }
551
552 if (count > 0)
553 break;
554 }
555
556 /* save the new block number for the first direct block */
557 new_blocks[index] = current_block;
558
559 /* total number of blocks allocated for direct blocks */
560 ret = count;
561 *err = 0;
562 return ret;
563failed_out:
564 for (i = 0; i <index; i++)
565 ext3_free_blocks(handle, inode, new_blocks[i], 1);
566 return ret;
567}
568
569/**
481 * ext3_alloc_branch - allocate and set up a chain of blocks. 570 * ext3_alloc_branch - allocate and set up a chain of blocks.
482 * @inode: owner 571 * @inode: owner
483 * @num: depth of the chain (number of blocks to allocate) 572 * @indirect_blks: number of allocated indirect blocks
573 * @blks: number of allocated direct blocks
484 * @offsets: offsets (in the blocks) to store the pointers to next. 574 * @offsets: offsets (in the blocks) to store the pointers to next.
485 * @branch: place to store the chain in. 575 * @branch: place to store the chain in.
486 * 576 *
487 * This function allocates @num blocks, zeroes out all but the last one, 577 * This function allocates blocks, zeroes out all but the last one,
488 * links them into chain and (if we are synchronous) writes them to disk. 578 * links them into chain and (if we are synchronous) writes them to disk.
489 * In other words, it prepares a branch that can be spliced onto the 579 * In other words, it prepares a branch that can be spliced onto the
490 * inode. It stores the information about that chain in the branch[], in 580 * inode. It stores the information about that chain in the branch[], in
@@ -501,97 +591,106 @@ static unsigned long ext3_find_goal(struct inode *inode, long block,
501 * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain 591 * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
502 * as described above and return 0. 592 * as described above and return 0.
503 */ 593 */
504
505static int ext3_alloc_branch(handle_t *handle, struct inode *inode, 594static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
506 int num, 595 int indirect_blks, int *blks, unsigned long goal,
507 unsigned long goal, 596 int *offsets, Indirect *branch)
508 int *offsets,
509 Indirect *branch)
510{ 597{
511 int blocksize = inode->i_sb->s_blocksize; 598 int blocksize = inode->i_sb->s_blocksize;
512 int n = 0, keys = 0; 599 int i, n = 0;
513 int err = 0; 600 int err = 0;
514 int i; 601 struct buffer_head *bh;
515 int parent = ext3_alloc_block(handle, inode, goal, &err); 602 int num;
516 603 unsigned long long new_blocks[4];
517 branch[0].key = cpu_to_le32(parent); 604 unsigned long long current_block;
518 if (parent) {
519 for (n = 1; n < num; n++) {
520 struct buffer_head *bh;
521 /* Allocate the next block */
522 int nr = ext3_alloc_block(handle, inode, parent, &err);
523 if (!nr)
524 break;
525 branch[n].key = cpu_to_le32(nr);
526 605
527 /* 606 num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
528 * Get buffer_head for parent block, zero it out 607 *blks, new_blocks, &err);
529 * and set the pointer to new one, then send 608 if (err)
530 * parent to disk. 609 return err;
531 */
532 bh = sb_getblk(inode->i_sb, parent);
533 if (!bh)
534 break;
535 keys = n+1;
536 branch[n].bh = bh;
537 lock_buffer(bh);
538 BUFFER_TRACE(bh, "call get_create_access");
539 err = ext3_journal_get_create_access(handle, bh);
540 if (err) {
541 unlock_buffer(bh);
542 brelse(bh);
543 break;
544 }
545 610
546 memset(bh->b_data, 0, blocksize); 611 branch[0].key = cpu_to_le32(new_blocks[0]);
547 branch[n].p = (__le32*) bh->b_data + offsets[n]; 612 /*
548 *branch[n].p = branch[n].key; 613 * metadata blocks and data blocks are allocated.
549 BUFFER_TRACE(bh, "marking uptodate"); 614 */
550 set_buffer_uptodate(bh); 615 for (n = 1; n <= indirect_blks; n++) {
616 /*
617 * Get buffer_head for parent block, zero it out
618 * and set the pointer to new one, then send
619 * parent to disk.
620 */
621 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
622 branch[n].bh = bh;
623 lock_buffer(bh);
624 BUFFER_TRACE(bh, "call get_create_access");
625 err = ext3_journal_get_create_access(handle, bh);
626 if (err) {
551 unlock_buffer(bh); 627 unlock_buffer(bh);
628 brelse(bh);
629 goto failed;
630 }
552 631
553 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 632 memset(bh->b_data, 0, blocksize);
554 err = ext3_journal_dirty_metadata(handle, bh); 633 branch[n].p = (__le32 *) bh->b_data + offsets[n];
555 if (err) 634 branch[n].key = cpu_to_le32(new_blocks[n]);
556 break; 635 *branch[n].p = branch[n].key;
557 636 if ( n == indirect_blks) {
558 parent = nr; 637 current_block = new_blocks[n];
638 /*
639 * End of chain, update the last new metablock of
640 * the chain to point to the new allocated
641 * data blocks numbers
642 */
643 for (i=1; i < num; i++)
644 *(branch[n].p + i) = cpu_to_le32(++current_block);
559 } 645 }
560 } 646 BUFFER_TRACE(bh, "marking uptodate");
561 if (n == num) 647 set_buffer_uptodate(bh);
562 return 0; 648 unlock_buffer(bh);
563 649
650 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
651 err = ext3_journal_dirty_metadata(handle, bh);
652 if (err)
653 goto failed;
654 }
655 *blks = num;
656 return err;
657failed:
564 /* Allocation failed, free what we already allocated */ 658 /* Allocation failed, free what we already allocated */
565 for (i = 1; i < keys; i++) { 659 for (i = 1; i <= n ; i++) {
566 BUFFER_TRACE(branch[i].bh, "call journal_forget"); 660 BUFFER_TRACE(branch[i].bh, "call journal_forget");
567 ext3_journal_forget(handle, branch[i].bh); 661 ext3_journal_forget(handle, branch[i].bh);
568 } 662 }
569 for (i = 0; i < keys; i++) 663 for (i = 0; i <indirect_blks; i++)
570 ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1); 664 ext3_free_blocks(handle, inode, new_blocks[i], 1);
665
666 ext3_free_blocks(handle, inode, new_blocks[i], num);
667
571 return err; 668 return err;
572} 669}
573 670
574/** 671/**
575 * ext3_splice_branch - splice the allocated branch onto inode. 672 * ext3_splice_branch - splice the allocated branch onto inode.
576 * @inode: owner 673 * @inode: owner
577 * @block: (logical) number of block we are adding 674 * @block: (logical) number of block we are adding
578 * @chain: chain of indirect blocks (with a missing link - see 675 * @chain: chain of indirect blocks (with a missing link - see
579 * ext3_alloc_branch) 676 * ext3_alloc_branch)
580 * @where: location of missing link 677 * @where: location of missing link
581 * @num: number of blocks we are adding 678 * @num: number of indirect blocks we are adding
582 * 679 * @blks: number of direct blocks we are adding
583 * This function fills the missing link and does all housekeeping needed in 680 *
584 * inode (->i_blocks, etc.). In case of success we end up with the full 681 * This function fills the missing link and does all housekeeping needed in
585 * chain to new block and return 0. 682 * inode (->i_blocks, etc.). In case of success we end up with the full
683 * chain to new block and return 0.
586 */ 684 */
587 685static int ext3_splice_branch(handle_t *handle, struct inode *inode,
588static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, 686 long block, Indirect *where, int num, int blks)
589 Indirect chain[4], Indirect *where, int num)
590{ 687{
591 int i; 688 int i;
592 int err = 0; 689 int err = 0;
593 struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; 690 struct ext3_block_alloc_info *block_i;
691 unsigned long current_block;
594 692
693 block_i = EXT3_I(inode)->i_block_alloc_info;
595 /* 694 /*
596 * If we're splicing into a [td]indirect block (as opposed to the 695 * If we're splicing into a [td]indirect block (as opposed to the
597 * inode) then we need to get write access to the [td]indirect block 696 * inode) then we need to get write access to the [td]indirect block
@@ -608,13 +707,24 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
608 *where->p = where->key; 707 *where->p = where->key;
609 708
610 /* 709 /*
710 * Update the host buffer_head or inode to point to more just allocated
711 * direct blocks blocks
712 */
713 if (num == 0 && blks > 1) {
714 current_block = le32_to_cpu(where->key + 1);
715 for (i = 1; i < blks; i++)
716 *(where->p + i ) = cpu_to_le32(current_block++);
717 }
718
719 /*
611 * update the most recently allocated logical & physical block 720 * update the most recently allocated logical & physical block
612 * in i_block_alloc_info, to assist find the proper goal block for next 721 * in i_block_alloc_info, to assist find the proper goal block for next
613 * allocation 722 * allocation
614 */ 723 */
615 if (block_i) { 724 if (block_i) {
616 block_i->last_alloc_logical_block = block; 725 block_i->last_alloc_logical_block = block + blks - 1;
617 block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key); 726 block_i->last_alloc_physical_block =
727 le32_to_cpu(where[num].key + blks - 1);
618 } 728 }
619 729
620 /* We are done with atomic stuff, now do the rest of housekeeping */ 730 /* We are done with atomic stuff, now do the rest of housekeeping */
@@ -625,7 +735,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
625 /* had we spliced it onto indirect block? */ 735 /* had we spliced it onto indirect block? */
626 if (where->bh) { 736 if (where->bh) {
627 /* 737 /*
628 * akpm: If we spliced it onto an indirect block, we haven't 738 * If we spliced it onto an indirect block, we haven't
629 * altered the inode. Note however that if it is being spliced 739 * altered the inode. Note however that if it is being spliced
630 * onto an indirect block at the very end of the file (the 740 * onto an indirect block at the very end of the file (the
631 * file is growing) then we *will* alter the inode to reflect 741 * file is growing) then we *will* alter the inode to reflect
@@ -647,10 +757,13 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
647 return err; 757 return err;
648 758
649err_out: 759err_out:
650 for (i = 1; i < num; i++) { 760 for (i = 1; i <= num; i++) {
651 BUFFER_TRACE(where[i].bh, "call journal_forget"); 761 BUFFER_TRACE(where[i].bh, "call journal_forget");
652 ext3_journal_forget(handle, where[i].bh); 762 ext3_journal_forget(handle, where[i].bh);
763 ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
653 } 764 }
765 ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
766
654 return err; 767 return err;
655} 768}
656 769
@@ -666,26 +779,33 @@ err_out:
666 * allocations is needed - we simply release blocks and do not touch anything 779 * allocations is needed - we simply release blocks and do not touch anything
667 * reachable from inode. 780 * reachable from inode.
668 * 781 *
669 * akpm: `handle' can be NULL if create == 0. 782 * `handle' can be NULL if create == 0.
670 * 783 *
671 * The BKL may not be held on entry here. Be sure to take it early. 784 * The BKL may not be held on entry here. Be sure to take it early.
785 * return > 0, # of blocks mapped or allocated.
786 * return = 0, if plain lookup failed.
787 * return < 0, error case.
672 */ 788 */
673 789int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
674int 790 sector_t iblock, unsigned long maxblocks,
675ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, 791 struct buffer_head *bh_result,
676 struct buffer_head *bh_result, int create, int extend_disksize) 792 int create, int extend_disksize)
677{ 793{
678 int err = -EIO; 794 int err = -EIO;
679 int offsets[4]; 795 int offsets[4];
680 Indirect chain[4]; 796 Indirect chain[4];
681 Indirect *partial; 797 Indirect *partial;
682 unsigned long goal; 798 unsigned long goal;
683 int left; 799 int indirect_blks;
684 int boundary = 0; 800 int blocks_to_boundary = 0;
685 const int depth = ext3_block_to_path(inode, iblock, offsets, &boundary); 801 int depth;
686 struct ext3_inode_info *ei = EXT3_I(inode); 802 struct ext3_inode_info *ei = EXT3_I(inode);
803 int count = 0;
804 unsigned long first_block = 0;
805
687 806
688 J_ASSERT(handle != NULL || create == 0); 807 J_ASSERT(handle != NULL || create == 0);
808 depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
689 809
690 if (depth == 0) 810 if (depth == 0)
691 goto out; 811 goto out;
@@ -694,8 +814,31 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
694 814
695 /* Simplest case - block found, no allocation needed */ 815 /* Simplest case - block found, no allocation needed */
696 if (!partial) { 816 if (!partial) {
817 first_block = chain[depth - 1].key;
697 clear_buffer_new(bh_result); 818 clear_buffer_new(bh_result);
698 goto got_it; 819 count++;
820 /*map more blocks*/
821 while (count < maxblocks && count <= blocks_to_boundary) {
822 if (!verify_chain(chain, partial)) {
823 /*
824 * Indirect block might be removed by
825 * truncate while we were reading it.
826 * Handling of that case: forget what we've
827 * got now. Flag the err as EAGAIN, so it
828 * will reread.
829 */
830 err = -EAGAIN;
831 count = 0;
832 break;
833 }
834 if (le32_to_cpu(*(chain[depth-1].p+count) ==
835 (first_block + count)))
836 count++;
837 else
838 break;
839 }
840 if (err != -EAGAIN)
841 goto got_it;
699 } 842 }
700 843
701 /* Next simple case - plain lookup or failed read of indirect block */ 844 /* Next simple case - plain lookup or failed read of indirect block */
@@ -723,6 +866,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
723 } 866 }
724 partial = ext3_get_branch(inode, depth, offsets, chain, &err); 867 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
725 if (!partial) { 868 if (!partial) {
869 count++;
726 mutex_unlock(&ei->truncate_mutex); 870 mutex_unlock(&ei->truncate_mutex);
727 if (err) 871 if (err)
728 goto cleanup; 872 goto cleanup;
@@ -740,12 +884,19 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
740 884
741 goal = ext3_find_goal(inode, iblock, chain, partial); 885 goal = ext3_find_goal(inode, iblock, chain, partial);
742 886
743 left = (chain + depth) - partial; 887 /* the number of blocks need to allocate for [d,t]indirect blocks */
888 indirect_blks = (chain + depth) - partial - 1;
744 889
745 /* 890 /*
891 * Next look up the indirect map to count the totoal number of
892 * direct blocks to allocate for this branch.
893 */
894 count = ext3_blks_to_allocate(partial, indirect_blks,
895 maxblocks, blocks_to_boundary);
896 /*
746 * Block out ext3_truncate while we alter the tree 897 * Block out ext3_truncate while we alter the tree
747 */ 898 */
748 err = ext3_alloc_branch(handle, inode, left, goal, 899 err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
749 offsets + (partial - chain), partial); 900 offsets + (partial - chain), partial);
750 901
751 /* 902 /*
@@ -756,8 +907,8 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
756 * may need to return -EAGAIN upwards in the worst case. --sct 907 * may need to return -EAGAIN upwards in the worst case. --sct
757 */ 908 */
758 if (!err) 909 if (!err)
759 err = ext3_splice_branch(handle, inode, iblock, chain, 910 err = ext3_splice_branch(handle, inode, iblock,
760 partial, left); 911 partial, indirect_blks, count);
761 /* 912 /*
762 * i_disksize growing is protected by truncate_mutex. Don't forget to 913 * i_disksize growing is protected by truncate_mutex. Don't forget to
763 * protect it if you're about to implement concurrent 914 * protect it if you're about to implement concurrent
@@ -772,8 +923,9 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
772 set_buffer_new(bh_result); 923 set_buffer_new(bh_result);
773got_it: 924got_it:
774 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); 925 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
775 if (boundary) 926 if (blocks_to_boundary == 0)
776 set_buffer_boundary(bh_result); 927 set_buffer_boundary(bh_result);
928 err = count;
777 /* Clean up and exit */ 929 /* Clean up and exit */
778 partial = chain + depth - 1; /* the whole chain */ 930 partial = chain + depth - 1; /* the whole chain */
779cleanup: 931cleanup:
@@ -787,34 +939,21 @@ out:
787 return err; 939 return err;
788} 940}
789 941
790static int ext3_get_block(struct inode *inode, sector_t iblock,
791 struct buffer_head *bh_result, int create)
792{
793 handle_t *handle = NULL;
794 int ret;
795
796 if (create) {
797 handle = ext3_journal_current_handle();
798 J_ASSERT(handle != 0);
799 }
800 ret = ext3_get_block_handle(handle, inode, iblock,
801 bh_result, create, 1);
802 return ret;
803}
804
805#define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32) 942#define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
806 943
807static int 944static int ext3_get_block(struct inode *inode, sector_t iblock,
808ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock, 945 struct buffer_head *bh_result, int create)
809 unsigned long max_blocks, struct buffer_head *bh_result,
810 int create)
811{ 946{
812 handle_t *handle = journal_current_handle(); 947 handle_t *handle = journal_current_handle();
813 int ret = 0; 948 int ret = 0;
949 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
814 950
815 if (!handle) 951 if (!create)
816 goto get_block; /* A read */ 952 goto get_block; /* A read */
817 953
954 if (max_blocks == 1)
955 goto get_block; /* A single block get */
956
818 if (handle->h_transaction->t_state == T_LOCKED) { 957 if (handle->h_transaction->t_state == T_LOCKED) {
819 /* 958 /*
820 * Huge direct-io writes can hold off commits for long 959 * Huge direct-io writes can hold off commits for long
@@ -841,18 +980,22 @@ ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
841 } 980 }
842 981
843get_block: 982get_block:
844 if (ret == 0) 983 if (ret == 0) {
845 ret = ext3_get_block_handle(handle, inode, iblock, 984 ret = ext3_get_blocks_handle(handle, inode, iblock,
846 bh_result, create, 0); 985 max_blocks, bh_result, create, 0);
847 bh_result->b_size = (1 << inode->i_blkbits); 986 if (ret > 0) {
987 bh_result->b_size = (ret << inode->i_blkbits);
988 ret = 0;
989 }
990 }
848 return ret; 991 return ret;
849} 992}
850 993
851/* 994/*
852 * `handle' can be NULL if create is zero 995 * `handle' can be NULL if create is zero
853 */ 996 */
854struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, 997struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
855 long block, int create, int * errp) 998 long block, int create, int *errp)
856{ 999{
857 struct buffer_head dummy; 1000 struct buffer_head dummy;
858 int fatal = 0, err; 1001 int fatal = 0, err;
@@ -862,8 +1005,16 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
862 dummy.b_state = 0; 1005 dummy.b_state = 0;
863 dummy.b_blocknr = -1000; 1006 dummy.b_blocknr = -1000;
864 buffer_trace_init(&dummy.b_history); 1007 buffer_trace_init(&dummy.b_history);
865 *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1); 1008 err = ext3_get_blocks_handle(handle, inode, block, 1,
866 if (!*errp && buffer_mapped(&dummy)) { 1009 &dummy, create, 1);
1010 if (err == 1) {
1011 err = 0;
1012 } else if (err >= 0) {
1013 WARN_ON(1);
1014 err = -EIO;
1015 }
1016 *errp = err;
1017 if (!err && buffer_mapped(&dummy)) {
867 struct buffer_head *bh; 1018 struct buffer_head *bh;
868 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 1019 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
869 if (!bh) { 1020 if (!bh) {
@@ -874,17 +1025,18 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
874 J_ASSERT(create != 0); 1025 J_ASSERT(create != 0);
875 J_ASSERT(handle != 0); 1026 J_ASSERT(handle != 0);
876 1027
877 /* Now that we do not always journal data, we 1028 /*
878 should keep in mind whether this should 1029 * Now that we do not always journal data, we should
879 always journal the new buffer as metadata. 1030 * keep in mind whether this should always journal the
880 For now, regular file writes use 1031 * new buffer as metadata. For now, regular file
881 ext3_get_block instead, so it's not a 1032 * writes use ext3_get_block instead, so it's not a
882 problem. */ 1033 * problem.
1034 */
883 lock_buffer(bh); 1035 lock_buffer(bh);
884 BUFFER_TRACE(bh, "call get_create_access"); 1036 BUFFER_TRACE(bh, "call get_create_access");
885 fatal = ext3_journal_get_create_access(handle, bh); 1037 fatal = ext3_journal_get_create_access(handle, bh);
886 if (!fatal && !buffer_uptodate(bh)) { 1038 if (!fatal && !buffer_uptodate(bh)) {
887 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 1039 memset(bh->b_data,0,inode->i_sb->s_blocksize);
888 set_buffer_uptodate(bh); 1040 set_buffer_uptodate(bh);
889 } 1041 }
890 unlock_buffer(bh); 1042 unlock_buffer(bh);
@@ -906,7 +1058,7 @@ err:
906 return NULL; 1058 return NULL;
907} 1059}
908 1060
909struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode, 1061struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
910 int block, int create, int *err) 1062 int block, int create, int *err)
911{ 1063{
912 struct buffer_head * bh; 1064 struct buffer_head * bh;
@@ -982,9 +1134,8 @@ static int walk_page_buffers( handle_t *handle,
982 * is elevated. We'll still have enough credits for the tiny quotafile 1134 * is elevated. We'll still have enough credits for the tiny quotafile
983 * write. 1135 * write.
984 */ 1136 */
985 1137static int do_journal_get_write_access(handle_t *handle,
986static int do_journal_get_write_access(handle_t *handle, 1138 struct buffer_head *bh)
987 struct buffer_head *bh)
988{ 1139{
989 if (!buffer_mapped(bh) || buffer_freed(bh)) 1140 if (!buffer_mapped(bh) || buffer_freed(bh))
990 return 0; 1141 return 0;
@@ -1025,8 +1176,7 @@ out:
1025 return ret; 1176 return ret;
1026} 1177}
1027 1178
1028int 1179int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1029ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1030{ 1180{
1031 int err = journal_dirty_data(handle, bh); 1181 int err = journal_dirty_data(handle, bh);
1032 if (err) 1182 if (err)
@@ -1051,7 +1201,6 @@ static int commit_write_fn(handle_t *handle, struct buffer_head *bh)
1051 * ext3 never places buffers on inode->i_mapping->private_list. metadata 1201 * ext3 never places buffers on inode->i_mapping->private_list. metadata
1052 * buffers are managed internally. 1202 * buffers are managed internally.
1053 */ 1203 */
1054
1055static int ext3_ordered_commit_write(struct file *file, struct page *page, 1204static int ext3_ordered_commit_write(struct file *file, struct page *page,
1056 unsigned from, unsigned to) 1205 unsigned from, unsigned to)
1057{ 1206{
@@ -1261,7 +1410,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1261 * we don't need to open a transaction here. 1410 * we don't need to open a transaction here.
1262 */ 1411 */
1263static int ext3_ordered_writepage(struct page *page, 1412static int ext3_ordered_writepage(struct page *page,
1264 struct writeback_control *wbc) 1413 struct writeback_control *wbc)
1265{ 1414{
1266 struct inode *inode = page->mapping->host; 1415 struct inode *inode = page->mapping->host;
1267 struct buffer_head *page_bufs; 1416 struct buffer_head *page_bufs;
@@ -1430,7 +1579,7 @@ ext3_readpages(struct file *file, struct address_space *mapping,
1430 return mpage_readpages(mapping, pages, nr_pages, ext3_get_block); 1579 return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1431} 1580}
1432 1581
1433static int ext3_invalidatepage(struct page *page, unsigned long offset) 1582static void ext3_invalidatepage(struct page *page, unsigned long offset)
1434{ 1583{
1435 journal_t *journal = EXT3_JOURNAL(page->mapping->host); 1584 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1436 1585
@@ -1440,7 +1589,7 @@ static int ext3_invalidatepage(struct page *page, unsigned long offset)
1440 if (offset == 0) 1589 if (offset == 0)
1441 ClearPageChecked(page); 1590 ClearPageChecked(page);
1442 1591
1443 return journal_invalidatepage(journal, page, offset); 1592 journal_invalidatepage(journal, page, offset);
1444} 1593}
1445 1594
1446static int ext3_releasepage(struct page *page, gfp_t wait) 1595static int ext3_releasepage(struct page *page, gfp_t wait)
@@ -1492,11 +1641,10 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1492 1641
1493 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 1642 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1494 offset, nr_segs, 1643 offset, nr_segs,
1495 ext3_direct_io_get_blocks, NULL); 1644 ext3_get_block, NULL);
1496 1645
1497 /* 1646 /*
1498 * Reacquire the handle: ext3_direct_io_get_block() can restart the 1647 * Reacquire the handle: ext3_get_block() can restart the transaction
1499 * transaction
1500 */ 1648 */
1501 handle = journal_current_handle(); 1649 handle = journal_current_handle();
1502 1650
@@ -1752,11 +1900,8 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
1752 * c) free the subtrees growing from the inode past the @chain[0]. 1900 * c) free the subtrees growing from the inode past the @chain[0].
1753 * (no partially truncated stuff there). */ 1901 * (no partially truncated stuff there). */
1754 1902
1755static Indirect *ext3_find_shared(struct inode *inode, 1903static Indirect *ext3_find_shared(struct inode *inode, int depth,
1756 int depth, 1904 int offsets[4], Indirect chain[4], __le32 *top)
1757 int offsets[4],
1758 Indirect chain[4],
1759 __le32 *top)
1760{ 1905{
1761 Indirect *partial, *p; 1906 Indirect *partial, *p;
1762 int k, err; 1907 int k, err;
@@ -1795,8 +1940,7 @@ static Indirect *ext3_find_shared(struct inode *inode,
1795 } 1940 }
1796 /* Writer: end */ 1941 /* Writer: end */
1797 1942
1798 while(partial > p) 1943 while(partial > p) {
1799 {
1800 brelse(partial->bh); 1944 brelse(partial->bh);
1801 partial--; 1945 partial--;
1802 } 1946 }
@@ -1812,10 +1956,9 @@ no_top:
1812 * We release `count' blocks on disk, but (last - first) may be greater 1956 * We release `count' blocks on disk, but (last - first) may be greater
1813 * than `count' because there can be holes in there. 1957 * than `count' because there can be holes in there.
1814 */ 1958 */
1815static void 1959static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
1816ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, 1960 struct buffer_head *bh, unsigned long block_to_free,
1817 unsigned long block_to_free, unsigned long count, 1961 unsigned long count, __le32 *first, __le32 *last)
1818 __le32 *first, __le32 *last)
1819{ 1962{
1820 __le32 *p; 1963 __le32 *p;
1821 if (try_to_extend_transaction(handle, inode)) { 1964 if (try_to_extend_transaction(handle, inode)) {
@@ -2076,8 +2219,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
2076 * that's fine - as long as they are linked from the inode, the post-crash 2219 * that's fine - as long as they are linked from the inode, the post-crash
2077 * ext3_truncate() run will find them and release them. 2220 * ext3_truncate() run will find them and release them.
2078 */ 2221 */
2079 2222void ext3_truncate(struct inode *inode)
2080void ext3_truncate(struct inode * inode)
2081{ 2223{
2082 handle_t *handle; 2224 handle_t *handle;
2083 struct ext3_inode_info *ei = EXT3_I(inode); 2225 struct ext3_inode_info *ei = EXT3_I(inode);
@@ -2201,29 +2343,26 @@ void ext3_truncate(struct inode * inode)
2201do_indirects: 2343do_indirects:
2202 /* Kill the remaining (whole) subtrees */ 2344 /* Kill the remaining (whole) subtrees */
2203 switch (offsets[0]) { 2345 switch (offsets[0]) {
2204 default: 2346 default:
2205 nr = i_data[EXT3_IND_BLOCK]; 2347 nr = i_data[EXT3_IND_BLOCK];
2206 if (nr) { 2348 if (nr) {
2207 ext3_free_branches(handle, inode, NULL, 2349 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2208 &nr, &nr+1, 1); 2350 i_data[EXT3_IND_BLOCK] = 0;
2209 i_data[EXT3_IND_BLOCK] = 0; 2351 }
2210 } 2352 case EXT3_IND_BLOCK:
2211 case EXT3_IND_BLOCK: 2353 nr = i_data[EXT3_DIND_BLOCK];
2212 nr = i_data[EXT3_DIND_BLOCK]; 2354 if (nr) {
2213 if (nr) { 2355 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2214 ext3_free_branches(handle, inode, NULL, 2356 i_data[EXT3_DIND_BLOCK] = 0;
2215 &nr, &nr+1, 2); 2357 }
2216 i_data[EXT3_DIND_BLOCK] = 0; 2358 case EXT3_DIND_BLOCK:
2217 } 2359 nr = i_data[EXT3_TIND_BLOCK];
2218 case EXT3_DIND_BLOCK: 2360 if (nr) {
2219 nr = i_data[EXT3_TIND_BLOCK]; 2361 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2220 if (nr) { 2362 i_data[EXT3_TIND_BLOCK] = 0;
2221 ext3_free_branches(handle, inode, NULL, 2363 }
2222 &nr, &nr+1, 3); 2364 case EXT3_TIND_BLOCK:
2223 i_data[EXT3_TIND_BLOCK] = 0; 2365 ;
2224 }
2225 case EXT3_TIND_BLOCK:
2226 ;
2227 } 2366 }
2228 2367
2229 ext3_discard_reservation(inode); 2368 ext3_discard_reservation(inode);
@@ -2232,8 +2371,10 @@ do_indirects:
2232 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 2371 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2233 ext3_mark_inode_dirty(handle, inode); 2372 ext3_mark_inode_dirty(handle, inode);
2234 2373
2235 /* In a multi-transaction truncate, we only make the final 2374 /*
2236 * transaction synchronous */ 2375 * In a multi-transaction truncate, we only make the final transaction
2376 * synchronous
2377 */
2237 if (IS_SYNC(inode)) 2378 if (IS_SYNC(inode))
2238 handle->h_sync = 1; 2379 handle->h_sync = 1;
2239out_stop: 2380out_stop:
@@ -2259,20 +2400,16 @@ static unsigned long ext3_get_inode_block(struct super_block *sb,
2259 struct ext3_group_desc * gdp; 2400 struct ext3_group_desc * gdp;
2260 2401
2261 2402
2262 if ((ino != EXT3_ROOT_INO && 2403 if ((ino != EXT3_ROOT_INO && ino != EXT3_JOURNAL_INO &&
2263 ino != EXT3_JOURNAL_INO && 2404 ino != EXT3_RESIZE_INO && ino < EXT3_FIRST_INO(sb)) ||
2264 ino != EXT3_RESIZE_INO && 2405 ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) {
2265 ino < EXT3_FIRST_INO(sb)) || 2406 ext3_error(sb, "ext3_get_inode_block",
2266 ino > le32_to_cpu(
2267 EXT3_SB(sb)->s_es->s_inodes_count)) {
2268 ext3_error (sb, "ext3_get_inode_block",
2269 "bad inode number: %lu", ino); 2407 "bad inode number: %lu", ino);
2270 return 0; 2408 return 0;
2271 } 2409 }
2272 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); 2410 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2273 if (block_group >= EXT3_SB(sb)->s_groups_count) { 2411 if (block_group >= EXT3_SB(sb)->s_groups_count) {
2274 ext3_error (sb, "ext3_get_inode_block", 2412 ext3_error(sb,"ext3_get_inode_block","group >= groups count");
2275 "group >= groups count");
2276 return 0; 2413 return 0;
2277 } 2414 }
2278 smp_rmb(); 2415 smp_rmb();
@@ -2285,7 +2422,7 @@ static unsigned long ext3_get_inode_block(struct super_block *sb,
2285 return 0; 2422 return 0;
2286 } 2423 }
2287 2424
2288 gdp = (struct ext3_group_desc *) bh->b_data; 2425 gdp = (struct ext3_group_desc *)bh->b_data;
2289 /* 2426 /*
2290 * Figure out the offset within the block group inode table 2427 * Figure out the offset within the block group inode table
2291 */ 2428 */
@@ -2834,7 +2971,7 @@ err_out:
2834 2971
2835 2972
2836/* 2973/*
2837 * akpm: how many blocks doth make a writepage()? 2974 * How many blocks doth make a writepage()?
2838 * 2975 *
2839 * With N blocks per page, it may be: 2976 * With N blocks per page, it may be:
2840 * N data blocks 2977 * N data blocks
@@ -2924,8 +3061,8 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
2924} 3061}
2925 3062
2926/* 3063/*
2927 * akpm: What we do here is to mark the in-core inode as clean 3064 * What we do here is to mark the in-core inode as clean with respect to inode
2928 * with respect to inode dirtiness (it may still be data-dirty). 3065 * dirtiness (it may still be data-dirty).
2929 * This means that the in-core inode may be reaped by prune_icache 3066 * This means that the in-core inode may be reaped by prune_icache
2930 * without having to perform any I/O. This is a very good thing, 3067 * without having to perform any I/O. This is a very good thing,
2931 * because *any* task may call prune_icache - even ones which 3068 * because *any* task may call prune_icache - even ones which
@@ -2957,7 +3094,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
2957} 3094}
2958 3095
2959/* 3096/*
2960 * akpm: ext3_dirty_inode() is called from __mark_inode_dirty() 3097 * ext3_dirty_inode() is called from __mark_inode_dirty()
2961 * 3098 *
2962 * We're really interested in the case where a file is being extended. 3099 * We're really interested in the case where a file is being extended.
2963 * i_size has been changed by generic_commit_write() and we thus need 3100 * i_size has been changed by generic_commit_write() and we thus need
@@ -2993,7 +3130,7 @@ out:
2993 return; 3130 return;
2994} 3131}
2995 3132
2996#ifdef AKPM 3133#if 0
2997/* 3134/*
2998 * Bind an inode's backing buffer_head into this transaction, to prevent 3135 * Bind an inode's backing buffer_head into this transaction, to prevent
2999 * it from being flushed to disk early. Unlike 3136 * it from being flushed to disk early. Unlike
@@ -3001,8 +3138,7 @@ out:
3001 * returns no iloc structure, so the caller needs to repeat the iloc 3138 * returns no iloc structure, so the caller needs to repeat the iloc
3002 * lookup to mark the inode dirty later. 3139 * lookup to mark the inode dirty later.
3003 */ 3140 */
3004static inline int 3141static int ext3_pin_inode(handle_t *handle, struct inode *inode)
3005ext3_pin_inode(handle_t *handle, struct inode *inode)
3006{ 3142{
3007 struct ext3_iloc iloc; 3143 struct ext3_iloc iloc;
3008 3144
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 86e443182de4..f8a5266ea1ff 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1678,12 +1678,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1678 } 1678 }
1679 1679
1680 if (test_opt(sb, NOBH)) { 1680 if (test_opt(sb, NOBH)) {
1681 if (sb->s_blocksize_bits != PAGE_CACHE_SHIFT) {
1682 printk(KERN_WARNING "EXT3-fs: Ignoring nobh option "
1683 "since filesystem blocksize doesn't match "
1684 "pagesize\n");
1685 clear_opt(sbi->s_mount_opt, NOBH);
1686 }
1687 if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) { 1681 if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) {
1688 printk(KERN_WARNING "EXT3-fs: Ignoring nobh option - " 1682 printk(KERN_WARNING "EXT3-fs: Ignoring nobh option - "
1689 "its supported only with writeback mode\n"); 1683 "its supported only with writeback mode\n");
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 4095bc149eb1..698b85bb1dd4 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -741,7 +741,7 @@ static int fat_dir_ioctl(struct inode * inode, struct file * filp,
741 return ret; 741 return ret;
742} 742}
743 743
744struct file_operations fat_dir_operations = { 744const struct file_operations fat_dir_operations = {
745 .read = generic_read_dir, 745 .read = generic_read_dir,
746 .readdir = fat_readdir, 746 .readdir = fat_readdir,
747 .ioctl = fat_dir_ioctl, 747 .ioctl = fat_dir_ioctl,
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 88aa1ae13f9f..1ee25232e6af 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -112,7 +112,7 @@ int fat_generic_ioctl(struct inode *inode, struct file *filp,
112 } 112 }
113} 113}
114 114
115struct file_operations fat_file_operations = { 115const struct file_operations fat_file_operations = {
116 .llseek = generic_file_llseek, 116 .llseek = generic_file_llseek,
117 .read = do_sync_read, 117 .read = do_sync_read,
118 .write = do_sync_write, 118 .write = do_sync_write,
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 297300fe81c2..c1ce284f8a94 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -101,11 +101,11 @@ static int __fat_get_blocks(struct inode *inode, sector_t iblock,
101} 101}
102 102
103static int fat_get_blocks(struct inode *inode, sector_t iblock, 103static int fat_get_blocks(struct inode *inode, sector_t iblock,
104 unsigned long max_blocks,
105 struct buffer_head *bh_result, int create) 104 struct buffer_head *bh_result, int create)
106{ 105{
107 struct super_block *sb = inode->i_sb; 106 struct super_block *sb = inode->i_sb;
108 int err; 107 int err;
108 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
109 109
110 err = __fat_get_blocks(inode, iblock, &max_blocks, bh_result, create); 110 err = __fat_get_blocks(inode, iblock, &max_blocks, bh_result, create);
111 if (err) 111 if (err)
@@ -1435,9 +1435,6 @@ out_fail:
1435 1435
1436EXPORT_SYMBOL_GPL(fat_fill_super); 1436EXPORT_SYMBOL_GPL(fat_fill_super);
1437 1437
1438int __init fat_cache_init(void);
1439void fat_cache_destroy(void);
1440
1441static int __init init_fat_fs(void) 1438static int __init init_fat_fs(void)
1442{ 1439{
1443 int err; 1440 int err;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 03c789560fb8..2a2479196f96 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -412,7 +412,7 @@ out:
412 412
413/* Table to convert sigio signal codes into poll band bitmaps */ 413/* Table to convert sigio signal codes into poll band bitmaps */
414 414
415static long band_table[NSIGPOLL] = { 415static const long band_table[NSIGPOLL] = {
416 POLLIN | POLLRDNORM, /* POLL_IN */ 416 POLLIN | POLLRDNORM, /* POLL_IN */
417 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ 417 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
418 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ 418 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
@@ -531,7 +531,7 @@ int send_sigurg(struct fown_struct *fown)
531} 531}
532 532
533static DEFINE_RWLOCK(fasync_lock); 533static DEFINE_RWLOCK(fasync_lock);
534static kmem_cache_t *fasync_cache; 534static kmem_cache_t *fasync_cache __read_mostly;
535 535
536/* 536/*
537 * fasync_helper() is used by some character device drivers (mainly mice) 537 * fasync_helper() is used by some character device drivers (mainly mice)
diff --git a/fs/fifo.c b/fs/fifo.c
index d13fcd3ec803..889f722ee36d 100644
--- a/fs/fifo.c
+++ b/fs/fifo.c
@@ -145,6 +145,6 @@ err_nocleanup:
145 * is contain the open that then fills in the correct operations 145 * is contain the open that then fills in the correct operations
146 * depending on the access mode of the file... 146 * depending on the access mode of the file...
147 */ 147 */
148struct file_operations def_fifo_fops = { 148const struct file_operations def_fifo_fops = {
149 .open = fifo_open, /* will set read or write pipe_fops */ 149 .open = fifo_open, /* will set read or write pipe_fops */
150}; 150};
diff --git a/fs/file.c b/fs/file.c
index bbc743314730..55f4e7022563 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -373,6 +373,6 @@ static void __devinit fdtable_defer_list_init(int cpu)
373void __init files_defer_init(void) 373void __init files_defer_init(void)
374{ 374{
375 int i; 375 int i;
376 for_each_cpu(i) 376 for_each_possible_cpu(i)
377 fdtable_defer_list_init(i); 377 fdtable_defer_list_init(i);
378} 378}
diff --git a/fs/freevxfs/vxfs_extern.h b/fs/freevxfs/vxfs_extern.h
index 927acf70c591..1cf1fe8466a2 100644
--- a/fs/freevxfs/vxfs_extern.h
+++ b/fs/freevxfs/vxfs_extern.h
@@ -63,7 +63,7 @@ extern void vxfs_clear_inode(struct inode *);
63 63
64/* vxfs_lookup.c */ 64/* vxfs_lookup.c */
65extern struct inode_operations vxfs_dir_inode_ops; 65extern struct inode_operations vxfs_dir_inode_ops;
66extern struct file_operations vxfs_dir_operations; 66extern const struct file_operations vxfs_dir_operations;
67 67
68/* vxfs_olt.c */ 68/* vxfs_olt.c */
69extern int vxfs_read_olt(struct super_block *, u_long); 69extern int vxfs_read_olt(struct super_block *, u_long);
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index 554eb455722c..29cce456c7ce 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -56,7 +56,7 @@ struct inode_operations vxfs_dir_inode_ops = {
56 .lookup = vxfs_lookup, 56 .lookup = vxfs_lookup,
57}; 57};
58 58
59struct file_operations vxfs_dir_operations = { 59const struct file_operations vxfs_dir_operations = {
60 .readdir = vxfs_readdir, 60 .readdir = vxfs_readdir,
61}; 61};
62 62
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 0c9a2ee54c91..23d1f52eb1b8 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -922,7 +922,7 @@ static int fuse_dev_release(struct inode *inode, struct file *file)
922 return 0; 922 return 0;
923} 923}
924 924
925struct file_operations fuse_dev_operations = { 925const struct file_operations fuse_dev_operations = {
926 .owner = THIS_MODULE, 926 .owner = THIS_MODULE,
927 .llseek = no_llseek, 927 .llseek = no_llseek,
928 .read = fuse_dev_read, 928 .read = fuse_dev_read,
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index c72a8a97935c..256355b80256 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1170,7 +1170,7 @@ static struct inode_operations fuse_dir_inode_operations = {
1170 .removexattr = fuse_removexattr, 1170 .removexattr = fuse_removexattr,
1171}; 1171};
1172 1172
1173static struct file_operations fuse_dir_operations = { 1173static const struct file_operations fuse_dir_operations = {
1174 .llseek = generic_file_llseek, 1174 .llseek = generic_file_llseek,
1175 .read = generic_read_dir, 1175 .read = generic_read_dir,
1176 .readdir = fuse_readdir, 1176 .readdir = fuse_readdir,
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 6f05379b0a0d..975f2697e866 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -12,7 +12,7 @@
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14 14
15static struct file_operations fuse_direct_io_file_operations; 15static const struct file_operations fuse_direct_io_file_operations;
16 16
17static int fuse_send_open(struct inode *inode, struct file *file, int isdir, 17static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
18 struct fuse_open_out *outargp) 18 struct fuse_open_out *outargp)
@@ -611,7 +611,7 @@ static int fuse_set_page_dirty(struct page *page)
611 return 0; 611 return 0;
612} 612}
613 613
614static struct file_operations fuse_file_operations = { 614static const struct file_operations fuse_file_operations = {
615 .llseek = generic_file_llseek, 615 .llseek = generic_file_llseek,
616 .read = generic_file_read, 616 .read = generic_file_read,
617 .write = generic_file_write, 617 .write = generic_file_write,
@@ -623,7 +623,7 @@ static struct file_operations fuse_file_operations = {
623 .sendfile = generic_file_sendfile, 623 .sendfile = generic_file_sendfile,
624}; 624};
625 625
626static struct file_operations fuse_direct_io_file_operations = { 626static const struct file_operations fuse_direct_io_file_operations = {
627 .llseek = generic_file_llseek, 627 .llseek = generic_file_llseek,
628 .read = fuse_direct_read, 628 .read = fuse_direct_read,
629 .write = fuse_direct_write, 629 .write = fuse_direct_write,
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 4a83adfec968..a16a04fcf41e 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -346,7 +346,7 @@ static inline u64 get_node_id(struct inode *inode)
346} 346}
347 347
348/** Device operations */ 348/** Device operations */
349extern struct file_operations fuse_dev_operations; 349extern const struct file_operations fuse_dev_operations;
350 350
351/** 351/**
352 * This is the single global spinlock which protects FUSE's structures 352 * This is the single global spinlock which protects FUSE's structures
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index a7a7d77f3fd3..1e44dcfe49c4 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -306,8 +306,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node)
306 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; 306 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
307 *p && *p != node; p = &(*p)->next_hash) 307 *p && *p != node; p = &(*p)->next_hash)
308 ; 308 ;
309 if (!*p) 309 BUG_ON(!*p);
310 BUG();
311 *p = node->next_hash; 310 *p = node->next_hash;
312 node->tree->node_hash_cnt--; 311 node->tree->node_hash_cnt--;
313} 312}
@@ -415,8 +414,7 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
415 spin_lock(&tree->hash_lock); 414 spin_lock(&tree->hash_lock);
416 node = hfs_bnode_findhash(tree, num); 415 node = hfs_bnode_findhash(tree, num);
417 spin_unlock(&tree->hash_lock); 416 spin_unlock(&tree->hash_lock);
418 if (node) 417 BUG_ON(node);
419 BUG();
420 node = __hfs_bnode_create(tree, num); 418 node = __hfs_bnode_create(tree, num);
421 if (!node) 419 if (!node)
422 return ERR_PTR(-ENOMEM); 420 return ERR_PTR(-ENOMEM);
@@ -459,8 +457,7 @@ void hfs_bnode_put(struct hfs_bnode *node)
459 457
460 dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", 458 dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
461 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 459 node->tree->cnid, node->this, atomic_read(&node->refcnt));
462 if (!atomic_read(&node->refcnt)) 460 BUG_ON(!atomic_read(&node->refcnt));
463 BUG();
464 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) 461 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
465 return; 462 return;
466 for (i = 0; i < tree->pages_per_bnode; i++) { 463 for (i = 0; i < tree->pages_per_bnode; i++) {
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 7bb11edd1488..d20131ce4b95 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -36,8 +36,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
36 tree->inode = iget_locked(sb, id); 36 tree->inode = iget_locked(sb, id);
37 if (!tree->inode) 37 if (!tree->inode)
38 goto free_tree; 38 goto free_tree;
39 if (!(tree->inode->i_state & I_NEW)) 39 BUG_ON(!(tree->inode->i_state & I_NEW));
40 BUG();
41 { 40 {
42 struct hfs_mdb *mdb = HFS_SB(sb)->mdb; 41 struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
43 HFS_I(tree->inode)->flags = 0; 42 HFS_I(tree->inode)->flags = 0;
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 534e5a7480ef..7cd8cc03aea7 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -313,7 +313,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
313 return res; 313 return res;
314} 314}
315 315
316struct file_operations hfs_dir_operations = { 316const struct file_operations hfs_dir_operations = {
317 .read = generic_read_dir, 317 .read = generic_read_dir,
318 .readdir = hfs_readdir, 318 .readdir = hfs_readdir,
319 .llseek = generic_file_llseek, 319 .llseek = generic_file_llseek,
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 18ce47ab1b71..3ed8663a8db1 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -169,7 +169,7 @@ extern int hfs_cat_move(u32, struct inode *, struct qstr *,
169extern void hfs_cat_build_key(struct super_block *, btree_key *, u32, struct qstr *); 169extern void hfs_cat_build_key(struct super_block *, btree_key *, u32, struct qstr *);
170 170
171/* dir.c */ 171/* dir.c */
172extern struct file_operations hfs_dir_operations; 172extern const struct file_operations hfs_dir_operations;
173extern struct inode_operations hfs_dir_inode_operations; 173extern struct inode_operations hfs_dir_inode_operations;
174 174
175/* extent.c */ 175/* extent.c */
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 39fd85b9b916..2d4ced22201b 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -17,7 +17,7 @@
17#include "hfs_fs.h" 17#include "hfs_fs.h"
18#include "btree.h" 18#include "btree.h"
19 19
20static struct file_operations hfs_file_operations; 20static const struct file_operations hfs_file_operations;
21static struct inode_operations hfs_file_inode_operations; 21static struct inode_operations hfs_file_inode_operations;
22 22
23/*================ Variable-like macros ================*/ 23/*================ Variable-like macros ================*/
@@ -98,17 +98,6 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
98 return res ? try_to_free_buffers(page) : 0; 98 return res ? try_to_free_buffers(page) : 0;
99} 99}
100 100
101static int hfs_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
102 struct buffer_head *bh_result, int create)
103{
104 int ret;
105
106 ret = hfs_get_block(inode, iblock, bh_result, create);
107 if (!ret)
108 bh_result->b_size = (1 << inode->i_blkbits);
109 return ret;
110}
111
112static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb, 101static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
113 const struct iovec *iov, loff_t offset, unsigned long nr_segs) 102 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
114{ 103{
@@ -116,7 +105,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
116 struct inode *inode = file->f_dentry->d_inode->i_mapping->host; 105 struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
117 106
118 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 107 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
119 offset, nr_segs, hfs_get_blocks, NULL); 108 offset, nr_segs, hfs_get_block, NULL);
120} 109}
121 110
122static int hfs_writepages(struct address_space *mapping, 111static int hfs_writepages(struct address_space *mapping,
@@ -612,7 +601,7 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
612} 601}
613 602
614 603
615static struct file_operations hfs_file_operations = { 604static const struct file_operations hfs_file_operations = {
616 .llseek = generic_file_llseek, 605 .llseek = generic_file_llseek,
617 .read = generic_file_read, 606 .read = generic_file_read,
618 .write = generic_file_write, 607 .write = generic_file_write,
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 01a6fe3a395c..1f9ece0de326 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -483,7 +483,7 @@ struct inode_operations hfsplus_dir_inode_operations = {
483 .rename = hfsplus_rename, 483 .rename = hfsplus_rename,
484}; 484};
485 485
486struct file_operations hfsplus_dir_operations = { 486const struct file_operations hfsplus_dir_operations = {
487 .read = generic_read_dir, 487 .read = generic_read_dir,
488 .readdir = hfsplus_readdir, 488 .readdir = hfsplus_readdir,
489 .ioctl = hfsplus_ioctl, 489 .ioctl = hfsplus_ioctl,
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 12ed2b7d046b..acf66dba3e01 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -93,17 +93,6 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
93 return res ? try_to_free_buffers(page) : 0; 93 return res ? try_to_free_buffers(page) : 0;
94} 94}
95 95
96static int hfsplus_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
97 struct buffer_head *bh_result, int create)
98{
99 int ret;
100
101 ret = hfsplus_get_block(inode, iblock, bh_result, create);
102 if (!ret)
103 bh_result->b_size = (1 << inode->i_blkbits);
104 return ret;
105}
106
107static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb, 96static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
108 const struct iovec *iov, loff_t offset, unsigned long nr_segs) 97 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
109{ 98{
@@ -111,7 +100,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
111 struct inode *inode = file->f_dentry->d_inode->i_mapping->host; 100 struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
112 101
113 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 102 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
114 offset, nr_segs, hfsplus_get_blocks, NULL); 103 offset, nr_segs, hfsplus_get_block, NULL);
115} 104}
116 105
117static int hfsplus_writepages(struct address_space *mapping, 106static int hfsplus_writepages(struct address_space *mapping,
@@ -291,7 +280,7 @@ static struct inode_operations hfsplus_file_inode_operations = {
291 .listxattr = hfsplus_listxattr, 280 .listxattr = hfsplus_listxattr,
292}; 281};
293 282
294static struct file_operations hfsplus_file_operations = { 283static const struct file_operations hfsplus_file_operations = {
295 .llseek = generic_file_llseek, 284 .llseek = generic_file_llseek,
296 .read = generic_file_read, 285 .read = generic_file_read,
297 .write = generic_file_write, 286 .write = generic_file_write,
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index b3ad0bd0312f..bf0f8e16e433 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -384,7 +384,7 @@ int hostfs_fsync(struct file *file, struct dentry *dentry, int datasync)
384 return fsync_file(HOSTFS_I(dentry->d_inode)->fd, datasync); 384 return fsync_file(HOSTFS_I(dentry->d_inode)->fd, datasync);
385} 385}
386 386
387static struct file_operations hostfs_file_fops = { 387static const struct file_operations hostfs_file_fops = {
388 .llseek = generic_file_llseek, 388 .llseek = generic_file_llseek,
389 .read = generic_file_read, 389 .read = generic_file_read,
390 .sendfile = generic_file_sendfile, 390 .sendfile = generic_file_sendfile,
@@ -399,7 +399,7 @@ static struct file_operations hostfs_file_fops = {
399 .fsync = hostfs_fsync, 399 .fsync = hostfs_fsync,
400}; 400};
401 401
402static struct file_operations hostfs_dir_fops = { 402static const struct file_operations hostfs_dir_fops = {
403 .llseek = generic_file_llseek, 403 .llseek = generic_file_llseek,
404 .readdir = hostfs_readdir, 404 .readdir = hostfs_readdir,
405 .read = generic_read_dir, 405 .read = generic_read_dir,
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
index b97809deba66..23b7cee72123 100644
--- a/fs/hostfs/hostfs_user.c
+++ b/fs/hostfs/hostfs_user.c
@@ -360,7 +360,6 @@ int do_statfs(char *root, long *bsize_out, long long *blocks_out,
360 spare_out[2] = buf.f_spare[2]; 360 spare_out[2] = buf.f_spare[2];
361 spare_out[3] = buf.f_spare[3]; 361 spare_out[3] = buf.f_spare[3];
362 spare_out[4] = buf.f_spare[4]; 362 spare_out[4] = buf.f_spare[4];
363 spare_out[5] = buf.f_spare[5];
364 return(0); 363 return(0);
365} 364}
366 365
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 5591f9623aa2..ecc9180645ae 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -310,7 +310,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
310 return ERR_PTR(-ENOENT); 310 return ERR_PTR(-ENOENT);
311} 311}
312 312
313struct file_operations hpfs_dir_ops = 313const struct file_operations hpfs_dir_ops =
314{ 314{
315 .llseek = hpfs_dir_lseek, 315 .llseek = hpfs_dir_lseek,
316 .read = generic_read_dir, 316 .read = generic_read_dir,
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 7c995ac4081b..d3b9fffe45a1 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -119,7 +119,7 @@ static ssize_t hpfs_file_write(struct file *file, const char __user *buf,
119 return retval; 119 return retval;
120} 120}
121 121
122struct file_operations hpfs_file_ops = 122const struct file_operations hpfs_file_ops =
123{ 123{
124 .llseek = generic_file_llseek, 124 .llseek = generic_file_llseek,
125 .read = generic_file_read, 125 .read = generic_file_read,
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 4c6473ab3b34..29b7a3e55173 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -240,7 +240,7 @@ void hpfs_set_dentry_operations(struct dentry *);
240/* dir.c */ 240/* dir.c */
241 241
242struct dentry *hpfs_lookup(struct inode *, struct dentry *, struct nameidata *); 242struct dentry *hpfs_lookup(struct inode *, struct dentry *, struct nameidata *);
243extern struct file_operations hpfs_dir_ops; 243extern const struct file_operations hpfs_dir_ops;
244 244
245/* dnode.c */ 245/* dnode.c */
246 246
@@ -266,7 +266,7 @@ void hpfs_set_ea(struct inode *, struct fnode *, char *, char *, int);
266/* file.c */ 266/* file.c */
267 267
268int hpfs_file_fsync(struct file *, struct dentry *, int); 268int hpfs_file_fsync(struct file *, struct dentry *, int);
269extern struct file_operations hpfs_file_ops; 269extern const struct file_operations hpfs_file_ops;
270extern struct inode_operations hpfs_file_iops; 270extern struct inode_operations hpfs_file_iops;
271extern struct address_space_operations hpfs_aops; 271extern struct address_space_operations hpfs_aops;
272 272
diff --git a/fs/hppfs/hppfs_kern.c b/fs/hppfs/hppfs_kern.c
index a44dc5897399..2ba20cdb5baa 100644
--- a/fs/hppfs/hppfs_kern.c
+++ b/fs/hppfs/hppfs_kern.c
@@ -558,7 +558,7 @@ static loff_t hppfs_llseek(struct file *file, loff_t off, int where)
558 return(default_llseek(file, off, where)); 558 return(default_llseek(file, off, where));
559} 559}
560 560
561static struct file_operations hppfs_file_fops = { 561static const struct file_operations hppfs_file_fops = {
562 .owner = NULL, 562 .owner = NULL,
563 .llseek = hppfs_llseek, 563 .llseek = hppfs_llseek,
564 .read = hppfs_read, 564 .read = hppfs_read,
@@ -609,7 +609,7 @@ static int hppfs_fsync(struct file *file, struct dentry *dentry, int datasync)
609 return(0); 609 return(0);
610} 610}
611 611
612static struct file_operations hppfs_dir_fops = { 612static const struct file_operations hppfs_dir_fops = {
613 .owner = NULL, 613 .owner = NULL,
614 .readdir = hppfs_readdir, 614 .readdir = hppfs_readdir,
615 .open = hppfs_dir_open, 615 .open = hppfs_dir_open,
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 25fa8bba8cb5..3a5b4e923455 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -35,7 +35,7 @@
35 35
36static struct super_operations hugetlbfs_ops; 36static struct super_operations hugetlbfs_ops;
37static struct address_space_operations hugetlbfs_aops; 37static struct address_space_operations hugetlbfs_aops;
38struct file_operations hugetlbfs_file_operations; 38const struct file_operations hugetlbfs_file_operations;
39static struct inode_operations hugetlbfs_dir_inode_operations; 39static struct inode_operations hugetlbfs_dir_inode_operations;
40static struct inode_operations hugetlbfs_inode_operations; 40static struct inode_operations hugetlbfs_inode_operations;
41 41
@@ -566,7 +566,7 @@ static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
566 inode_init_once(&ei->vfs_inode); 566 inode_init_once(&ei->vfs_inode);
567} 567}
568 568
569struct file_operations hugetlbfs_file_operations = { 569const struct file_operations hugetlbfs_file_operations = {
570 .mmap = hugetlbfs_file_mmap, 570 .mmap = hugetlbfs_file_mmap,
571 .fsync = simple_sync_file, 571 .fsync = simple_sync_file,
572 .get_unmapped_area = hugetlb_get_unmapped_area, 572 .get_unmapped_area = hugetlb_get_unmapped_area,
diff --git a/fs/inode.c b/fs/inode.c
index 85da11044adc..32b7c3375021 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -56,8 +56,8 @@
56#define I_HASHBITS i_hash_shift 56#define I_HASHBITS i_hash_shift
57#define I_HASHMASK i_hash_mask 57#define I_HASHMASK i_hash_mask
58 58
59static unsigned int i_hash_mask; 59static unsigned int i_hash_mask __read_mostly;
60static unsigned int i_hash_shift; 60static unsigned int i_hash_shift __read_mostly;
61 61
62/* 62/*
63 * Each inode can be on two separate lists. One is 63 * Each inode can be on two separate lists. One is
@@ -73,7 +73,7 @@ static unsigned int i_hash_shift;
73 73
74LIST_HEAD(inode_in_use); 74LIST_HEAD(inode_in_use);
75LIST_HEAD(inode_unused); 75LIST_HEAD(inode_unused);
76static struct hlist_head *inode_hashtable; 76static struct hlist_head *inode_hashtable __read_mostly;
77 77
78/* 78/*
79 * A simple spinlock to protect the list manipulations. 79 * A simple spinlock to protect the list manipulations.
@@ -98,13 +98,13 @@ static DEFINE_MUTEX(iprune_mutex);
98 */ 98 */
99struct inodes_stat_t inodes_stat; 99struct inodes_stat_t inodes_stat;
100 100
101static kmem_cache_t * inode_cachep; 101static kmem_cache_t * inode_cachep __read_mostly;
102 102
103static struct inode *alloc_inode(struct super_block *sb) 103static struct inode *alloc_inode(struct super_block *sb)
104{ 104{
105 static struct address_space_operations empty_aops; 105 static struct address_space_operations empty_aops;
106 static struct inode_operations empty_iops; 106 static struct inode_operations empty_iops;
107 static struct file_operations empty_fops; 107 static const struct file_operations empty_fops;
108 struct inode *inode; 108 struct inode *inode;
109 109
110 if (sb->s_op->alloc_inode) 110 if (sb->s_op->alloc_inode)
diff --git a/fs/inotify.c b/fs/inotify.c
index a61e93e17853..367c487c014b 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -39,15 +39,15 @@
39 39
40static atomic_t inotify_cookie; 40static atomic_t inotify_cookie;
41 41
42static kmem_cache_t *watch_cachep; 42static kmem_cache_t *watch_cachep __read_mostly;
43static kmem_cache_t *event_cachep; 43static kmem_cache_t *event_cachep __read_mostly;
44 44
45static struct vfsmount *inotify_mnt; 45static struct vfsmount *inotify_mnt __read_mostly;
46 46
47/* these are configurable via /proc/sys/fs/inotify/ */ 47/* these are configurable via /proc/sys/fs/inotify/ */
48int inotify_max_user_instances; 48int inotify_max_user_instances __read_mostly;
49int inotify_max_user_watches; 49int inotify_max_user_watches __read_mostly;
50int inotify_max_queued_events; 50int inotify_max_queued_events __read_mostly;
51 51
52/* 52/*
53 * Lock ordering: 53 * Lock ordering:
@@ -920,7 +920,7 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
920 return ret; 920 return ret;
921} 921}
922 922
923static struct file_operations inotify_fops = { 923static const struct file_operations inotify_fops = {
924 .poll = inotify_poll, 924 .poll = inotify_poll,
925 .read = inotify_read, 925 .read = inotify_read,
926 .release = inotify_release, 926 .release = inotify_release,
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
index 7901ac9f97ab..5440ea292c69 100644
--- a/fs/isofs/dir.c
+++ b/fs/isofs/dir.c
@@ -16,7 +16,7 @@
16 16
17static int isofs_readdir(struct file *, void *, filldir_t); 17static int isofs_readdir(struct file *, void *, filldir_t);
18 18
19struct file_operations isofs_dir_operations = 19const struct file_operations isofs_dir_operations =
20{ 20{
21 .read = generic_read_dir, 21 .read = generic_read_dir,
22 .readdir = isofs_readdir, 22 .readdir = isofs_readdir,
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index 439a19b1bf3e..b87ba066f5e7 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -175,6 +175,6 @@ isofs_normalize_block_and_offset(struct iso_directory_record* de,
175} 175}
176 176
177extern struct inode_operations isofs_dir_inode_operations; 177extern struct inode_operations isofs_dir_inode_operations;
178extern struct file_operations isofs_dir_operations; 178extern const struct file_operations isofs_dir_operations;
179extern struct address_space_operations isofs_symlink_aops; 179extern struct address_space_operations isofs_symlink_aops;
180extern struct export_operations isofs_export_ops; 180extern struct export_operations isofs_export_ops;
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index ada31fa272e3..c609f5034fcd 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1873,16 +1873,15 @@ zap_buffer_unlocked:
1873} 1873}
1874 1874
1875/** 1875/**
1876 * int journal_invalidatepage() 1876 * void journal_invalidatepage()
1877 * @journal: journal to use for flush... 1877 * @journal: journal to use for flush...
1878 * @page: page to flush 1878 * @page: page to flush
1879 * @offset: length of page to invalidate. 1879 * @offset: length of page to invalidate.
1880 * 1880 *
1881 * Reap page buffers containing data after offset in page. 1881 * Reap page buffers containing data after offset in page.
1882 * 1882 *
1883 * Return non-zero if the page's buffers were successfully reaped.
1884 */ 1883 */
1885int journal_invalidatepage(journal_t *journal, 1884void journal_invalidatepage(journal_t *journal,
1886 struct page *page, 1885 struct page *page,
1887 unsigned long offset) 1886 unsigned long offset)
1888{ 1887{
@@ -1893,7 +1892,7 @@ int journal_invalidatepage(journal_t *journal,
1893 if (!PageLocked(page)) 1892 if (!PageLocked(page))
1894 BUG(); 1893 BUG();
1895 if (!page_has_buffers(page)) 1894 if (!page_has_buffers(page))
1896 return 1; 1895 return;
1897 1896
1898 /* We will potentially be playing with lists other than just the 1897 /* We will potentially be playing with lists other than just the
1899 * data lists (especially for journaled data mode), so be 1898 * data lists (especially for journaled data mode), so be
@@ -1916,11 +1915,9 @@ int journal_invalidatepage(journal_t *journal,
1916 } while (bh != head); 1915 } while (bh != head);
1917 1916
1918 if (!offset) { 1917 if (!offset) {
1919 if (!may_free || !try_to_free_buffers(page)) 1918 if (may_free && try_to_free_buffers(page))
1920 return 0; 1919 J_ASSERT(!page_has_buffers(page));
1921 J_ASSERT(!page_has_buffers(page));
1922 } 1920 }
1923 return 1;
1924} 1921}
1925 1922
1926/* 1923/*
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index 5a4519e834da..020cc097c539 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -55,9 +55,9 @@
55static int jffs_remove(struct inode *dir, struct dentry *dentry, int type); 55static int jffs_remove(struct inode *dir, struct dentry *dentry, int type);
56 56
57static struct super_operations jffs_ops; 57static struct super_operations jffs_ops;
58static struct file_operations jffs_file_operations; 58static const struct file_operations jffs_file_operations;
59static struct inode_operations jffs_file_inode_operations; 59static struct inode_operations jffs_file_inode_operations;
60static struct file_operations jffs_dir_operations; 60static const struct file_operations jffs_dir_operations;
61static struct inode_operations jffs_dir_inode_operations; 61static struct inode_operations jffs_dir_inode_operations;
62static struct address_space_operations jffs_address_operations; 62static struct address_space_operations jffs_address_operations;
63 63
@@ -1629,7 +1629,7 @@ static int jffs_fsync(struct file *f, struct dentry *d, int datasync)
1629} 1629}
1630 1630
1631 1631
1632static struct file_operations jffs_file_operations = 1632static const struct file_operations jffs_file_operations =
1633{ 1633{
1634 .open = generic_file_open, 1634 .open = generic_file_open,
1635 .llseek = generic_file_llseek, 1635 .llseek = generic_file_llseek,
@@ -1649,7 +1649,7 @@ static struct inode_operations jffs_file_inode_operations =
1649}; 1649};
1650 1650
1651 1651
1652static struct file_operations jffs_dir_operations = 1652static const struct file_operations jffs_dir_operations =
1653{ 1653{
1654 .readdir = jffs_readdir, 1654 .readdir = jffs_readdir,
1655}; 1655};
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
index 4db8be8e90cc..5c63e0cdcf4c 100644
--- a/fs/jffs2/compr_zlib.c
+++ b/fs/jffs2/compr_zlib.c
@@ -33,13 +33,14 @@
33 */ 33 */
34#define STREAM_END_SPACE 12 34#define STREAM_END_SPACE 12
35 35
36static DECLARE_MUTEX(deflate_sem); 36static DEFINE_MUTEX(deflate_mutex);
37static DECLARE_MUTEX(inflate_sem); 37static DEFINE_MUTEX(inflate_mutex);
38static z_stream inf_strm, def_strm; 38static z_stream inf_strm, def_strm;
39 39
40#ifdef __KERNEL__ /* Linux-only */ 40#ifdef __KERNEL__ /* Linux-only */
41#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
42#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/mutex.h>
43 44
44static int __init alloc_workspaces(void) 45static int __init alloc_workspaces(void)
45{ 46{
@@ -79,11 +80,11 @@ static int jffs2_zlib_compress(unsigned char *data_in,
79 if (*dstlen <= STREAM_END_SPACE) 80 if (*dstlen <= STREAM_END_SPACE)
80 return -1; 81 return -1;
81 82
82 down(&deflate_sem); 83 mutex_lock(&deflate_mutex);
83 84
84 if (Z_OK != zlib_deflateInit(&def_strm, 3)) { 85 if (Z_OK != zlib_deflateInit(&def_strm, 3)) {
85 printk(KERN_WARNING "deflateInit failed\n"); 86 printk(KERN_WARNING "deflateInit failed\n");
86 up(&deflate_sem); 87 mutex_unlock(&deflate_mutex);
87 return -1; 88 return -1;
88 } 89 }
89 90
@@ -104,7 +105,7 @@ static int jffs2_zlib_compress(unsigned char *data_in,
104 if (ret != Z_OK) { 105 if (ret != Z_OK) {
105 D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret)); 106 D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret));
106 zlib_deflateEnd(&def_strm); 107 zlib_deflateEnd(&def_strm);
107 up(&deflate_sem); 108 mutex_unlock(&deflate_mutex);
108 return -1; 109 return -1;
109 } 110 }
110 } 111 }
@@ -133,7 +134,7 @@ static int jffs2_zlib_compress(unsigned char *data_in,
133 *sourcelen = def_strm.total_in; 134 *sourcelen = def_strm.total_in;
134 ret = 0; 135 ret = 0;
135 out: 136 out:
136 up(&deflate_sem); 137 mutex_unlock(&deflate_mutex);
137 return ret; 138 return ret;
138} 139}
139 140
@@ -145,7 +146,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
145 int ret; 146 int ret;
146 int wbits = MAX_WBITS; 147 int wbits = MAX_WBITS;
147 148
148 down(&inflate_sem); 149 mutex_lock(&inflate_mutex);
149 150
150 inf_strm.next_in = data_in; 151 inf_strm.next_in = data_in;
151 inf_strm.avail_in = srclen; 152 inf_strm.avail_in = srclen;
@@ -173,7 +174,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
173 174
174 if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) { 175 if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) {
175 printk(KERN_WARNING "inflateInit failed\n"); 176 printk(KERN_WARNING "inflateInit failed\n");
176 up(&inflate_sem); 177 mutex_unlock(&inflate_mutex);
177 return 1; 178 return 1;
178 } 179 }
179 180
@@ -183,7 +184,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
183 printk(KERN_NOTICE "inflate returned %d\n", ret); 184 printk(KERN_NOTICE "inflate returned %d\n", ret);
184 } 185 }
185 zlib_inflateEnd(&inf_strm); 186 zlib_inflateEnd(&inf_strm);
186 up(&inflate_sem); 187 mutex_unlock(&inflate_mutex);
187 return 0; 188 return 0;
188} 189}
189 190
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index a7bf9cb2567f..8bc7a5018e40 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -37,7 +37,7 @@ static int jffs2_mknod (struct inode *,struct dentry *,int,dev_t);
37static int jffs2_rename (struct inode *, struct dentry *, 37static int jffs2_rename (struct inode *, struct dentry *,
38 struct inode *, struct dentry *); 38 struct inode *, struct dentry *);
39 39
40struct file_operations jffs2_dir_operations = 40const struct file_operations jffs2_dir_operations =
41{ 41{
42 .read = generic_read_dir, 42 .read = generic_read_dir,
43 .readdir = jffs2_readdir, 43 .readdir = jffs2_readdir,
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 935f273dc57b..9f4171213e58 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -38,7 +38,7 @@ int jffs2_fsync(struct file *filp, struct dentry *dentry, int datasync)
38 return 0; 38 return 0;
39} 39}
40 40
41struct file_operations jffs2_file_operations = 41const struct file_operations jffs2_file_operations =
42{ 42{
43 .llseek = generic_file_llseek, 43 .llseek = generic_file_llseek,
44 .open = generic_file_open, 44 .open = generic_file_open,
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 59e7a393200c..d307cf548625 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -159,11 +159,11 @@ void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c);
159void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c); 159void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c);
160 160
161/* dir.c */ 161/* dir.c */
162extern struct file_operations jffs2_dir_operations; 162extern const struct file_operations jffs2_dir_operations;
163extern struct inode_operations jffs2_dir_inode_operations; 163extern struct inode_operations jffs2_dir_inode_operations;
164 164
165/* file.c */ 165/* file.c */
166extern struct file_operations jffs2_file_operations; 166extern const struct file_operations jffs2_file_operations;
167extern struct inode_operations jffs2_file_inode_operations; 167extern struct inode_operations jffs2_file_inode_operations;
168extern struct address_space_operations jffs2_file_address_operations; 168extern struct address_space_operations jffs2_file_address_operations;
169int jffs2_fsync(struct file *, struct dentry *, int); 169int jffs2_fsync(struct file *, struct dentry *, int);
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index e1ac6e497e2b..1c9745be5ada 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -100,7 +100,7 @@ struct inode_operations jfs_file_inode_operations = {
100#endif 100#endif
101}; 101};
102 102
103struct file_operations jfs_file_operations = { 103const struct file_operations jfs_file_operations = {
104 .open = jfs_open, 104 .open = jfs_open,
105 .llseek = generic_file_llseek, 105 .llseek = generic_file_llseek,
106 .write = generic_file_write, 106 .write = generic_file_write,
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 51a5fed90cca..04eb78f1252e 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -258,7 +258,8 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
258static int jfs_get_block(struct inode *ip, sector_t lblock, 258static int jfs_get_block(struct inode *ip, sector_t lblock,
259 struct buffer_head *bh_result, int create) 259 struct buffer_head *bh_result, int create)
260{ 260{
261 return jfs_get_blocks(ip, lblock, 1, bh_result, create); 261 return jfs_get_blocks(ip, lblock, bh_result->b_size >> ip->i_blkbits,
262 bh_result, create);
262} 263}
263 264
264static int jfs_writepage(struct page *page, struct writeback_control *wbc) 265static int jfs_writepage(struct page *page, struct writeback_control *wbc)
@@ -301,7 +302,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
301 struct inode *inode = file->f_mapping->host; 302 struct inode *inode = file->f_mapping->host;
302 303
303 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 304 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
304 offset, nr_segs, jfs_get_blocks, NULL); 305 offset, nr_segs, jfs_get_block, NULL);
305} 306}
306 307
307struct address_space_operations jfs_aops = { 308struct address_space_operations jfs_aops = {
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 095d471b9f9a..c30072674464 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -35,9 +35,9 @@ extern void jfs_set_inode_flags(struct inode *);
35 35
36extern struct address_space_operations jfs_aops; 36extern struct address_space_operations jfs_aops;
37extern struct inode_operations jfs_dir_inode_operations; 37extern struct inode_operations jfs_dir_inode_operations;
38extern struct file_operations jfs_dir_operations; 38extern const struct file_operations jfs_dir_operations;
39extern struct inode_operations jfs_file_inode_operations; 39extern struct inode_operations jfs_file_inode_operations;
40extern struct file_operations jfs_file_operations; 40extern const struct file_operations jfs_file_operations;
41extern struct inode_operations jfs_symlink_inode_operations; 41extern struct inode_operations jfs_symlink_inode_operations;
42extern struct dentry_operations jfs_ci_dentry_operations; 42extern struct dentry_operations jfs_ci_dentry_operations;
43#endif /* _H_JFS_INODE */ 43#endif /* _H_JFS_INODE */
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 0b348b13b551..3315f0b1fbc0 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -69,6 +69,7 @@
69#include <linux/bio.h> 69#include <linux/bio.h>
70#include <linux/suspend.h> 70#include <linux/suspend.h>
71#include <linux/delay.h> 71#include <linux/delay.h>
72#include <linux/mutex.h>
72#include "jfs_incore.h" 73#include "jfs_incore.h"
73#include "jfs_filsys.h" 74#include "jfs_filsys.h"
74#include "jfs_metapage.h" 75#include "jfs_metapage.h"
@@ -165,7 +166,7 @@ do { \
165 */ 166 */
166static LIST_HEAD(jfs_external_logs); 167static LIST_HEAD(jfs_external_logs);
167static struct jfs_log *dummy_log = NULL; 168static struct jfs_log *dummy_log = NULL;
168static DECLARE_MUTEX(jfs_log_sem); 169static DEFINE_MUTEX(jfs_log_mutex);
169 170
170/* 171/*
171 * forward references 172 * forward references
@@ -1085,20 +1086,20 @@ int lmLogOpen(struct super_block *sb)
1085 if (sbi->mntflag & JFS_INLINELOG) 1086 if (sbi->mntflag & JFS_INLINELOG)
1086 return open_inline_log(sb); 1087 return open_inline_log(sb);
1087 1088
1088 down(&jfs_log_sem); 1089 mutex_lock(&jfs_log_mutex);
1089 list_for_each_entry(log, &jfs_external_logs, journal_list) { 1090 list_for_each_entry(log, &jfs_external_logs, journal_list) {
1090 if (log->bdev->bd_dev == sbi->logdev) { 1091 if (log->bdev->bd_dev == sbi->logdev) {
1091 if (memcmp(log->uuid, sbi->loguuid, 1092 if (memcmp(log->uuid, sbi->loguuid,
1092 sizeof(log->uuid))) { 1093 sizeof(log->uuid))) {
1093 jfs_warn("wrong uuid on JFS journal\n"); 1094 jfs_warn("wrong uuid on JFS journal\n");
1094 up(&jfs_log_sem); 1095 mutex_unlock(&jfs_log_mutex);
1095 return -EINVAL; 1096 return -EINVAL;
1096 } 1097 }
1097 /* 1098 /*
1098 * add file system to log active file system list 1099 * add file system to log active file system list
1099 */ 1100 */
1100 if ((rc = lmLogFileSystem(log, sbi, 1))) { 1101 if ((rc = lmLogFileSystem(log, sbi, 1))) {
1101 up(&jfs_log_sem); 1102 mutex_unlock(&jfs_log_mutex);
1102 return rc; 1103 return rc;
1103 } 1104 }
1104 goto journal_found; 1105 goto journal_found;
@@ -1106,7 +1107,7 @@ int lmLogOpen(struct super_block *sb)
1106 } 1107 }
1107 1108
1108 if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) { 1109 if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) {
1109 up(&jfs_log_sem); 1110 mutex_unlock(&jfs_log_mutex);
1110 return -ENOMEM; 1111 return -ENOMEM;
1111 } 1112 }
1112 INIT_LIST_HEAD(&log->sb_list); 1113 INIT_LIST_HEAD(&log->sb_list);
@@ -1151,7 +1152,7 @@ journal_found:
1151 sbi->log = log; 1152 sbi->log = log;
1152 LOG_UNLOCK(log); 1153 LOG_UNLOCK(log);
1153 1154
1154 up(&jfs_log_sem); 1155 mutex_unlock(&jfs_log_mutex);
1155 return 0; 1156 return 0;
1156 1157
1157 /* 1158 /*
@@ -1168,7 +1169,7 @@ journal_found:
1168 blkdev_put(bdev); 1169 blkdev_put(bdev);
1169 1170
1170 free: /* free log descriptor */ 1171 free: /* free log descriptor */
1171 up(&jfs_log_sem); 1172 mutex_unlock(&jfs_log_mutex);
1172 kfree(log); 1173 kfree(log);
1173 1174
1174 jfs_warn("lmLogOpen: exit(%d)", rc); 1175 jfs_warn("lmLogOpen: exit(%d)", rc);
@@ -1212,11 +1213,11 @@ static int open_dummy_log(struct super_block *sb)
1212{ 1213{
1213 int rc; 1214 int rc;
1214 1215
1215 down(&jfs_log_sem); 1216 mutex_lock(&jfs_log_mutex);
1216 if (!dummy_log) { 1217 if (!dummy_log) {
1217 dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL); 1218 dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL);
1218 if (!dummy_log) { 1219 if (!dummy_log) {
1219 up(&jfs_log_sem); 1220 mutex_unlock(&jfs_log_mutex);
1220 return -ENOMEM; 1221 return -ENOMEM;
1221 } 1222 }
1222 INIT_LIST_HEAD(&dummy_log->sb_list); 1223 INIT_LIST_HEAD(&dummy_log->sb_list);
@@ -1229,7 +1230,7 @@ static int open_dummy_log(struct super_block *sb)
1229 if (rc) { 1230 if (rc) {
1230 kfree(dummy_log); 1231 kfree(dummy_log);
1231 dummy_log = NULL; 1232 dummy_log = NULL;
1232 up(&jfs_log_sem); 1233 mutex_unlock(&jfs_log_mutex);
1233 return rc; 1234 return rc;
1234 } 1235 }
1235 } 1236 }
@@ -1238,7 +1239,7 @@ static int open_dummy_log(struct super_block *sb)
1238 list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list); 1239 list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list);
1239 JFS_SBI(sb)->log = dummy_log; 1240 JFS_SBI(sb)->log = dummy_log;
1240 LOG_UNLOCK(dummy_log); 1241 LOG_UNLOCK(dummy_log);
1241 up(&jfs_log_sem); 1242 mutex_unlock(&jfs_log_mutex);
1242 1243
1243 return 0; 1244 return 0;
1244} 1245}
@@ -1466,7 +1467,7 @@ int lmLogClose(struct super_block *sb)
1466 1467
1467 jfs_info("lmLogClose: log:0x%p", log); 1468 jfs_info("lmLogClose: log:0x%p", log);
1468 1469
1469 down(&jfs_log_sem); 1470 mutex_lock(&jfs_log_mutex);
1470 LOG_LOCK(log); 1471 LOG_LOCK(log);
1471 list_del(&sbi->log_list); 1472 list_del(&sbi->log_list);
1472 LOG_UNLOCK(log); 1473 LOG_UNLOCK(log);
@@ -1516,7 +1517,7 @@ int lmLogClose(struct super_block *sb)
1516 kfree(log); 1517 kfree(log);
1517 1518
1518 out: 1519 out:
1519 up(&jfs_log_sem); 1520 mutex_unlock(&jfs_log_mutex);
1520 jfs_info("lmLogClose: exit(%d)", rc); 1521 jfs_info("lmLogClose: exit(%d)", rc);
1521 return rc; 1522 return rc;
1522} 1523}
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 5fbaeaadccd3..f28696f235c4 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -220,8 +220,8 @@ int __init metapage_init(void)
220 if (metapage_cache == NULL) 220 if (metapage_cache == NULL)
221 return -ENOMEM; 221 return -ENOMEM;
222 222
223 metapage_mempool = mempool_create(METAPOOL_MIN_PAGES, mempool_alloc_slab, 223 metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
224 mempool_free_slab, metapage_cache); 224 metapage_cache);
225 225
226 if (metapage_mempool == NULL) { 226 if (metapage_mempool == NULL) {
227 kmem_cache_destroy(metapage_cache); 227 kmem_cache_destroy(metapage_cache);
@@ -578,14 +578,13 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
578 return 0; 578 return 0;
579} 579}
580 580
581static int metapage_invalidatepage(struct page *page, unsigned long offset) 581static void metapage_invalidatepage(struct page *page, unsigned long offset)
582{ 582{
583 BUG_ON(offset); 583 BUG_ON(offset);
584 584
585 if (PageWriteback(page)) 585 BUG_ON(PageWriteback(page));
586 return 0;
587 586
588 return metapage_releasepage(page, 0); 587 metapage_releasepage(page, 0);
589} 588}
590 589
591struct address_space_operations jfs_metapage_aops = { 590struct address_space_operations jfs_metapage_aops = {
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 309cee575f7d..09ea03f62277 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1519,7 +1519,7 @@ struct inode_operations jfs_dir_inode_operations = {
1519#endif 1519#endif
1520}; 1520};
1521 1521
1522struct file_operations jfs_dir_operations = { 1522const struct file_operations jfs_dir_operations = {
1523 .read = generic_read_dir, 1523 .read = generic_read_dir,
1524 .readdir = jfs_readdir, 1524 .readdir = jfs_readdir,
1525 .fsync = jfs_fsync, 1525 .fsync = jfs_fsync,
diff --git a/fs/libfs.c b/fs/libfs.c
index 4fdeaceb892c..7145ba7a48d0 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -179,7 +179,7 @@ ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t
179 return -EISDIR; 179 return -EISDIR;
180} 180}
181 181
182struct file_operations simple_dir_operations = { 182const struct file_operations simple_dir_operations = {
183 .open = dcache_dir_open, 183 .open = dcache_dir_open,
184 .release = dcache_dir_close, 184 .release = dcache_dir_close,
185 .llseek = dcache_dir_lseek, 185 .llseek = dcache_dir_lseek,
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 112ebf8b8dfe..729ac427d359 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -16,6 +16,7 @@
16#include <linux/sunrpc/svc.h> 16#include <linux/sunrpc/svc.h>
17#include <linux/lockd/lockd.h> 17#include <linux/lockd/lockd.h>
18#include <linux/lockd/sm_inter.h> 18#include <linux/lockd/sm_inter.h>
19#include <linux/mutex.h>
19 20
20 21
21#define NLMDBG_FACILITY NLMDBG_HOSTCACHE 22#define NLMDBG_FACILITY NLMDBG_HOSTCACHE
@@ -30,7 +31,7 @@
30static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH]; 31static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH];
31static unsigned long next_gc; 32static unsigned long next_gc;
32static int nrhosts; 33static int nrhosts;
33static DECLARE_MUTEX(nlm_host_sema); 34static DEFINE_MUTEX(nlm_host_mutex);
34 35
35 36
36static void nlm_gc_hosts(void); 37static void nlm_gc_hosts(void);
@@ -71,7 +72,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
71 hash = NLM_ADDRHASH(sin->sin_addr.s_addr); 72 hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
72 73
73 /* Lock hash table */ 74 /* Lock hash table */
74 down(&nlm_host_sema); 75 mutex_lock(&nlm_host_mutex);
75 76
76 if (time_after_eq(jiffies, next_gc)) 77 if (time_after_eq(jiffies, next_gc))
77 nlm_gc_hosts(); 78 nlm_gc_hosts();
@@ -91,7 +92,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
91 nlm_hosts[hash] = host; 92 nlm_hosts[hash] = host;
92 } 93 }
93 nlm_get_host(host); 94 nlm_get_host(host);
94 up(&nlm_host_sema); 95 mutex_unlock(&nlm_host_mutex);
95 return host; 96 return host;
96 } 97 }
97 } 98 }
@@ -130,7 +131,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
130 next_gc = 0; 131 next_gc = 0;
131 132
132nohost: 133nohost:
133 up(&nlm_host_sema); 134 mutex_unlock(&nlm_host_mutex);
134 return host; 135 return host;
135} 136}
136 137
@@ -141,19 +142,19 @@ nlm_find_client(void)
141 * and return it 142 * and return it
142 */ 143 */
143 int hash; 144 int hash;
144 down(&nlm_host_sema); 145 mutex_lock(&nlm_host_mutex);
145 for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) { 146 for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) {
146 struct nlm_host *host, **hp; 147 struct nlm_host *host, **hp;
147 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { 148 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
148 if (host->h_server && 149 if (host->h_server &&
149 host->h_killed == 0) { 150 host->h_killed == 0) {
150 nlm_get_host(host); 151 nlm_get_host(host);
151 up(&nlm_host_sema); 152 mutex_unlock(&nlm_host_mutex);
152 return host; 153 return host;
153 } 154 }
154 } 155 }
155 } 156 }
156 up(&nlm_host_sema); 157 mutex_unlock(&nlm_host_mutex);
157 return NULL; 158 return NULL;
158} 159}
159 160
@@ -265,7 +266,7 @@ nlm_shutdown_hosts(void)
265 int i; 266 int i;
266 267
267 dprintk("lockd: shutting down host module\n"); 268 dprintk("lockd: shutting down host module\n");
268 down(&nlm_host_sema); 269 mutex_lock(&nlm_host_mutex);
269 270
270 /* First, make all hosts eligible for gc */ 271 /* First, make all hosts eligible for gc */
271 dprintk("lockd: nuking all hosts...\n"); 272 dprintk("lockd: nuking all hosts...\n");
@@ -276,7 +277,7 @@ nlm_shutdown_hosts(void)
276 277
277 /* Then, perform a garbage collection pass */ 278 /* Then, perform a garbage collection pass */
278 nlm_gc_hosts(); 279 nlm_gc_hosts();
279 up(&nlm_host_sema); 280 mutex_unlock(&nlm_host_mutex);
280 281
281 /* complain if any hosts are left */ 282 /* complain if any hosts are left */
282 if (nrhosts) { 283 if (nrhosts) {
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 5e85bde6c123..fd56c8872f34 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/smp.h> 26#include <linux/smp.h>
27#include <linux/smp_lock.h> 27#include <linux/smp_lock.h>
28#include <linux/mutex.h>
28 29
29#include <linux/sunrpc/types.h> 30#include <linux/sunrpc/types.h>
30#include <linux/sunrpc/stats.h> 31#include <linux/sunrpc/stats.h>
@@ -43,13 +44,13 @@ static struct svc_program nlmsvc_program;
43struct nlmsvc_binding * nlmsvc_ops; 44struct nlmsvc_binding * nlmsvc_ops;
44EXPORT_SYMBOL(nlmsvc_ops); 45EXPORT_SYMBOL(nlmsvc_ops);
45 46
46static DECLARE_MUTEX(nlmsvc_sema); 47static DEFINE_MUTEX(nlmsvc_mutex);
47static unsigned int nlmsvc_users; 48static unsigned int nlmsvc_users;
48static pid_t nlmsvc_pid; 49static pid_t nlmsvc_pid;
49int nlmsvc_grace_period; 50int nlmsvc_grace_period;
50unsigned long nlmsvc_timeout; 51unsigned long nlmsvc_timeout;
51 52
52static DECLARE_MUTEX_LOCKED(lockd_start); 53static DECLARE_COMPLETION(lockd_start_done);
53static DECLARE_WAIT_QUEUE_HEAD(lockd_exit); 54static DECLARE_WAIT_QUEUE_HEAD(lockd_exit);
54 55
55/* 56/*
@@ -112,7 +113,7 @@ lockd(struct svc_rqst *rqstp)
112 * Let our maker know we're running. 113 * Let our maker know we're running.
113 */ 114 */
114 nlmsvc_pid = current->pid; 115 nlmsvc_pid = current->pid;
115 up(&lockd_start); 116 complete(&lockd_start_done);
116 117
117 daemonize("lockd"); 118 daemonize("lockd");
118 119
@@ -215,7 +216,7 @@ lockd_up(void)
215 struct svc_serv * serv; 216 struct svc_serv * serv;
216 int error = 0; 217 int error = 0;
217 218
218 down(&nlmsvc_sema); 219 mutex_lock(&nlmsvc_mutex);
219 /* 220 /*
220 * Unconditionally increment the user count ... this is 221 * Unconditionally increment the user count ... this is
221 * the number of clients who _want_ a lockd process. 222 * the number of clients who _want_ a lockd process.
@@ -263,7 +264,7 @@ lockd_up(void)
263 "lockd_up: create thread failed, error=%d\n", error); 264 "lockd_up: create thread failed, error=%d\n", error);
264 goto destroy_and_out; 265 goto destroy_and_out;
265 } 266 }
266 down(&lockd_start); 267 wait_for_completion(&lockd_start_done);
267 268
268 /* 269 /*
269 * Note: svc_serv structures have an initial use count of 1, 270 * Note: svc_serv structures have an initial use count of 1,
@@ -272,7 +273,7 @@ lockd_up(void)
272destroy_and_out: 273destroy_and_out:
273 svc_destroy(serv); 274 svc_destroy(serv);
274out: 275out:
275 up(&nlmsvc_sema); 276 mutex_unlock(&nlmsvc_mutex);
276 return error; 277 return error;
277} 278}
278EXPORT_SYMBOL(lockd_up); 279EXPORT_SYMBOL(lockd_up);
@@ -285,7 +286,7 @@ lockd_down(void)
285{ 286{
286 static int warned; 287 static int warned;
287 288
288 down(&nlmsvc_sema); 289 mutex_lock(&nlmsvc_mutex);
289 if (nlmsvc_users) { 290 if (nlmsvc_users) {
290 if (--nlmsvc_users) 291 if (--nlmsvc_users)
291 goto out; 292 goto out;
@@ -315,7 +316,7 @@ lockd_down(void)
315 recalc_sigpending(); 316 recalc_sigpending();
316 spin_unlock_irq(&current->sighand->siglock); 317 spin_unlock_irq(&current->sighand->siglock);
317out: 318out:
318 up(&nlmsvc_sema); 319 mutex_unlock(&nlmsvc_mutex);
319} 320}
320EXPORT_SYMBOL(lockd_down); 321EXPORT_SYMBOL(lockd_down);
321 322
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index c7a6e3ae44d6..a570e5c8a930 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -11,6 +11,7 @@
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/time.h> 12#include <linux/time.h>
13#include <linux/in.h> 13#include <linux/in.h>
14#include <linux/mutex.h>
14#include <linux/sunrpc/svc.h> 15#include <linux/sunrpc/svc.h>
15#include <linux/sunrpc/clnt.h> 16#include <linux/sunrpc/clnt.h>
16#include <linux/nfsd/nfsfh.h> 17#include <linux/nfsd/nfsfh.h>
@@ -28,7 +29,7 @@
28#define FILE_HASH_BITS 5 29#define FILE_HASH_BITS 5
29#define FILE_NRHASH (1<<FILE_HASH_BITS) 30#define FILE_NRHASH (1<<FILE_HASH_BITS)
30static struct nlm_file * nlm_files[FILE_NRHASH]; 31static struct nlm_file * nlm_files[FILE_NRHASH];
31static DECLARE_MUTEX(nlm_file_sema); 32static DEFINE_MUTEX(nlm_file_mutex);
32 33
33#ifdef NFSD_DEBUG 34#ifdef NFSD_DEBUG
34static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f) 35static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
@@ -91,7 +92,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
91 hash = file_hash(f); 92 hash = file_hash(f);
92 93
93 /* Lock file table */ 94 /* Lock file table */
94 down(&nlm_file_sema); 95 mutex_lock(&nlm_file_mutex);
95 96
96 for (file = nlm_files[hash]; file; file = file->f_next) 97 for (file = nlm_files[hash]; file; file = file->f_next)
97 if (!nfs_compare_fh(&file->f_handle, f)) 98 if (!nfs_compare_fh(&file->f_handle, f))
@@ -130,7 +131,7 @@ found:
130 nfserr = 0; 131 nfserr = 0;
131 132
132out_unlock: 133out_unlock:
133 up(&nlm_file_sema); 134 mutex_unlock(&nlm_file_mutex);
134 return nfserr; 135 return nfserr;
135 136
136out_free: 137out_free:
@@ -239,14 +240,14 @@ nlm_traverse_files(struct nlm_host *host, int action)
239 struct nlm_file *file, **fp; 240 struct nlm_file *file, **fp;
240 int i; 241 int i;
241 242
242 down(&nlm_file_sema); 243 mutex_lock(&nlm_file_mutex);
243 for (i = 0; i < FILE_NRHASH; i++) { 244 for (i = 0; i < FILE_NRHASH; i++) {
244 fp = nlm_files + i; 245 fp = nlm_files + i;
245 while ((file = *fp) != NULL) { 246 while ((file = *fp) != NULL) {
246 /* Traverse locks, blocks and shares of this file 247 /* Traverse locks, blocks and shares of this file
247 * and update file->f_locks count */ 248 * and update file->f_locks count */
248 if (nlm_inspect_file(host, file, action)) { 249 if (nlm_inspect_file(host, file, action)) {
249 up(&nlm_file_sema); 250 mutex_unlock(&nlm_file_mutex);
250 return 1; 251 return 1;
251 } 252 }
252 253
@@ -261,7 +262,7 @@ nlm_traverse_files(struct nlm_host *host, int action)
261 } 262 }
262 } 263 }
263 } 264 }
264 up(&nlm_file_sema); 265 mutex_unlock(&nlm_file_mutex);
265 return 0; 266 return 0;
266} 267}
267 268
@@ -281,7 +282,7 @@ nlm_release_file(struct nlm_file *file)
281 file, file->f_count); 282 file, file->f_count);
282 283
283 /* Lock file table */ 284 /* Lock file table */
284 down(&nlm_file_sema); 285 mutex_lock(&nlm_file_mutex);
285 286
286 /* If there are no more locks etc, delete the file */ 287 /* If there are no more locks etc, delete the file */
287 if(--file->f_count == 0) { 288 if(--file->f_count == 0) {
@@ -289,7 +290,7 @@ nlm_release_file(struct nlm_file *file)
289 nlm_delete_file(file); 290 nlm_delete_file(file);
290 } 291 }
291 292
292 up(&nlm_file_sema); 293 mutex_unlock(&nlm_file_mutex);
293} 294}
294 295
295/* 296/*
diff --git a/fs/locks.c b/fs/locks.c
index 56f996e98bbc..4d9e71d43e7e 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -142,7 +142,7 @@ int lease_break_time = 45;
142static LIST_HEAD(file_lock_list); 142static LIST_HEAD(file_lock_list);
143static LIST_HEAD(blocked_list); 143static LIST_HEAD(blocked_list);
144 144
145static kmem_cache_t *filelock_cache; 145static kmem_cache_t *filelock_cache __read_mostly;
146 146
147/* Allocate an empty lock structure. */ 147/* Allocate an empty lock structure. */
148static struct file_lock *locks_alloc_lock(void) 148static struct file_lock *locks_alloc_lock(void)
@@ -533,12 +533,7 @@ static void locks_delete_block(struct file_lock *waiter)
533static void locks_insert_block(struct file_lock *blocker, 533static void locks_insert_block(struct file_lock *blocker,
534 struct file_lock *waiter) 534 struct file_lock *waiter)
535{ 535{
536 if (!list_empty(&waiter->fl_block)) { 536 BUG_ON(!list_empty(&waiter->fl_block));
537 printk(KERN_ERR "locks_insert_block: removing duplicated lock "
538 "(pid=%d %Ld-%Ld type=%d)\n", waiter->fl_pid,
539 waiter->fl_start, waiter->fl_end, waiter->fl_type);
540 __locks_delete_block(waiter);
541 }
542 list_add_tail(&waiter->fl_block, &blocker->fl_block); 537 list_add_tail(&waiter->fl_block, &blocker->fl_block);
543 waiter->fl_next = blocker; 538 waiter->fl_next = blocker;
544 if (IS_POSIX(blocker)) 539 if (IS_POSIX(blocker))
@@ -797,9 +792,7 @@ out:
797 return error; 792 return error;
798} 793}
799 794
800EXPORT_SYMBOL(posix_lock_file); 795static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
801
802static int __posix_lock_file(struct inode *inode, struct file_lock *request)
803{ 796{
804 struct file_lock *fl; 797 struct file_lock *fl;
805 struct file_lock *new_fl, *new_fl2; 798 struct file_lock *new_fl, *new_fl2;
@@ -823,6 +816,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request)
823 continue; 816 continue;
824 if (!posix_locks_conflict(request, fl)) 817 if (!posix_locks_conflict(request, fl))
825 continue; 818 continue;
819 if (conflock)
820 locks_copy_lock(conflock, fl);
826 error = -EAGAIN; 821 error = -EAGAIN;
827 if (!(request->fl_flags & FL_SLEEP)) 822 if (!(request->fl_flags & FL_SLEEP))
828 goto out; 823 goto out;
@@ -992,8 +987,24 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request)
992 */ 987 */
993int posix_lock_file(struct file *filp, struct file_lock *fl) 988int posix_lock_file(struct file *filp, struct file_lock *fl)
994{ 989{
995 return __posix_lock_file(filp->f_dentry->d_inode, fl); 990 return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, NULL);
991}
992EXPORT_SYMBOL(posix_lock_file);
993
994/**
995 * posix_lock_file_conf - Apply a POSIX-style lock to a file
996 * @filp: The file to apply the lock to
997 * @fl: The lock to be applied
998 * @conflock: Place to return a copy of the conflicting lock, if found.
999 *
1000 * Except for the conflock parameter, acts just like posix_lock_file.
1001 */
1002int posix_lock_file_conf(struct file *filp, struct file_lock *fl,
1003 struct file_lock *conflock)
1004{
1005 return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, conflock);
996} 1006}
1007EXPORT_SYMBOL(posix_lock_file_conf);
997 1008
998/** 1009/**
999 * posix_lock_file_wait - Apply a POSIX-style lock to a file 1010 * posix_lock_file_wait - Apply a POSIX-style lock to a file
@@ -1009,7 +1020,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1009 int error; 1020 int error;
1010 might_sleep (); 1021 might_sleep ();
1011 for (;;) { 1022 for (;;) {
1012 error = __posix_lock_file(filp->f_dentry->d_inode, fl); 1023 error = posix_lock_file(filp, fl);
1013 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 1024 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP))
1014 break; 1025 break;
1015 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1026 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
@@ -1081,7 +1092,7 @@ int locks_mandatory_area(int read_write, struct inode *inode,
1081 fl.fl_end = offset + count - 1; 1092 fl.fl_end = offset + count - 1;
1082 1093
1083 for (;;) { 1094 for (;;) {
1084 error = __posix_lock_file(inode, &fl); 1095 error = __posix_lock_file_conf(inode, &fl, NULL);
1085 if (error != -EAGAIN) 1096 if (error != -EAGAIN)
1086 break; 1097 break;
1087 if (!(fl.fl_flags & FL_SLEEP)) 1098 if (!(fl.fl_flags & FL_SLEEP))
@@ -1694,7 +1705,7 @@ again:
1694 error = filp->f_op->lock(filp, cmd, file_lock); 1705 error = filp->f_op->lock(filp, cmd, file_lock);
1695 else { 1706 else {
1696 for (;;) { 1707 for (;;) {
1697 error = __posix_lock_file(inode, file_lock); 1708 error = posix_lock_file(filp, file_lock);
1698 if ((error != -EAGAIN) || (cmd == F_SETLK)) 1709 if ((error != -EAGAIN) || (cmd == F_SETLK))
1699 break; 1710 break;
1700 error = wait_event_interruptible(file_lock->fl_wait, 1711 error = wait_event_interruptible(file_lock->fl_wait,
@@ -1837,7 +1848,7 @@ again:
1837 error = filp->f_op->lock(filp, cmd, file_lock); 1848 error = filp->f_op->lock(filp, cmd, file_lock);
1838 else { 1849 else {
1839 for (;;) { 1850 for (;;) {
1840 error = __posix_lock_file(inode, file_lock); 1851 error = posix_lock_file(filp, file_lock);
1841 if ((error != -EAGAIN) || (cmd == F_SETLK64)) 1852 if ((error != -EAGAIN) || (cmd == F_SETLK64))
1842 break; 1853 break;
1843 error = wait_event_interruptible(file_lock->fl_wait, 1854 error = wait_event_interruptible(file_lock->fl_wait,
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 73e754fea2d8..e4fde1ab22cd 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -311,7 +311,7 @@ fail:
311/* 311/*
312 * mb_cache_shrink() 312 * mb_cache_shrink()
313 * 313 *
314 * Removes all cache entires of a device from the cache. All cache entries 314 * Removes all cache entries of a device from the cache. All cache entries
315 * currently in use cannot be freed, and thus remain in the cache. All others 315 * currently in use cannot be freed, and thus remain in the cache. All others
316 * are freed. 316 * are freed.
317 * 317 *
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index 732502aabc05..69224d1fe043 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -14,7 +14,7 @@ typedef struct minix_dir_entry minix_dirent;
14 14
15static int minix_readdir(struct file *, void *, filldir_t); 15static int minix_readdir(struct file *, void *, filldir_t);
16 16
17struct file_operations minix_dir_operations = { 17const struct file_operations minix_dir_operations = {
18 .read = generic_read_dir, 18 .read = generic_read_dir,
19 .readdir = minix_readdir, 19 .readdir = minix_readdir,
20 .fsync = minix_sync_file, 20 .fsync = minix_sync_file,
diff --git a/fs/minix/file.c b/fs/minix/file.c
index f1d77acb3f01..420b32882a10 100644
--- a/fs/minix/file.c
+++ b/fs/minix/file.c
@@ -15,7 +15,7 @@
15 */ 15 */
16int minix_sync_file(struct file *, struct dentry *, int); 16int minix_sync_file(struct file *, struct dentry *, int);
17 17
18struct file_operations minix_file_operations = { 18const struct file_operations minix_file_operations = {
19 .llseek = generic_file_llseek, 19 .llseek = generic_file_llseek,
20 .read = generic_file_read, 20 .read = generic_file_read,
21 .write = generic_file_write, 21 .write = generic_file_write,
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index e42a8bb89001..c55b77cdcc8e 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -81,8 +81,8 @@ extern int minix_sync_file(struct file *, struct dentry *, int);
81 81
82extern struct inode_operations minix_file_inode_operations; 82extern struct inode_operations minix_file_inode_operations;
83extern struct inode_operations minix_dir_inode_operations; 83extern struct inode_operations minix_dir_inode_operations;
84extern struct file_operations minix_file_operations; 84extern const struct file_operations minix_file_operations;
85extern struct file_operations minix_dir_operations; 85extern const struct file_operations minix_dir_operations;
86extern struct dentry_operations minix_dentry_operations; 86extern struct dentry_operations minix_dentry_operations;
87 87
88static inline struct minix_sb_info *minix_sb(struct super_block *sb) 88static inline struct minix_sb_info *minix_sb(struct super_block *sb)
diff --git a/fs/mpage.c b/fs/mpage.c
index e431cb3878d6..9bf2eb30e6f4 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -163,9 +163,19 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
163 } while (page_bh != head); 163 } while (page_bh != head);
164} 164}
165 165
166/*
167 * This is the worker routine which does all the work of mapping the disk
168 * blocks and constructs largest possible bios, submits them for IO if the
169 * blocks are not contiguous on the disk.
170 *
171 * We pass a buffer_head back and forth and use its buffer_mapped() flag to
172 * represent the validity of its disk mapping and to decide when to do the next
173 * get_block() call.
174 */
166static struct bio * 175static struct bio *
167do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, 176do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
168 sector_t *last_block_in_bio, get_block_t get_block) 177 sector_t *last_block_in_bio, struct buffer_head *map_bh,
178 unsigned long *first_logical_block, get_block_t get_block)
169{ 179{
170 struct inode *inode = page->mapping->host; 180 struct inode *inode = page->mapping->host;
171 const unsigned blkbits = inode->i_blkbits; 181 const unsigned blkbits = inode->i_blkbits;
@@ -173,33 +183,72 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
173 const unsigned blocksize = 1 << blkbits; 183 const unsigned blocksize = 1 << blkbits;
174 sector_t block_in_file; 184 sector_t block_in_file;
175 sector_t last_block; 185 sector_t last_block;
186 sector_t last_block_in_file;
176 sector_t blocks[MAX_BUF_PER_PAGE]; 187 sector_t blocks[MAX_BUF_PER_PAGE];
177 unsigned page_block; 188 unsigned page_block;
178 unsigned first_hole = blocks_per_page; 189 unsigned first_hole = blocks_per_page;
179 struct block_device *bdev = NULL; 190 struct block_device *bdev = NULL;
180 struct buffer_head bh;
181 int length; 191 int length;
182 int fully_mapped = 1; 192 int fully_mapped = 1;
193 unsigned nblocks;
194 unsigned relative_block;
183 195
184 if (page_has_buffers(page)) 196 if (page_has_buffers(page))
185 goto confused; 197 goto confused;
186 198
187 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 199 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
188 last_block = (i_size_read(inode) + blocksize - 1) >> blkbits; 200 last_block = block_in_file + nr_pages * blocks_per_page;
201 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
202 if (last_block > last_block_in_file)
203 last_block = last_block_in_file;
204 page_block = 0;
205
206 /*
207 * Map blocks using the result from the previous get_blocks call first.
208 */
209 nblocks = map_bh->b_size >> blkbits;
210 if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
211 block_in_file < (*first_logical_block + nblocks)) {
212 unsigned map_offset = block_in_file - *first_logical_block;
213 unsigned last = nblocks - map_offset;
214
215 for (relative_block = 0; ; relative_block++) {
216 if (relative_block == last) {
217 clear_buffer_mapped(map_bh);
218 break;
219 }
220 if (page_block == blocks_per_page)
221 break;
222 blocks[page_block] = map_bh->b_blocknr + map_offset +
223 relative_block;
224 page_block++;
225 block_in_file++;
226 }
227 bdev = map_bh->b_bdev;
228 }
229
230 /*
231 * Then do more get_blocks calls until we are done with this page.
232 */
233 map_bh->b_page = page;
234 while (page_block < blocks_per_page) {
235 map_bh->b_state = 0;
236 map_bh->b_size = 0;
189 237
190 bh.b_page = page;
191 for (page_block = 0; page_block < blocks_per_page;
192 page_block++, block_in_file++) {
193 bh.b_state = 0;
194 if (block_in_file < last_block) { 238 if (block_in_file < last_block) {
195 if (get_block(inode, block_in_file, &bh, 0)) 239 map_bh->b_size = (last_block-block_in_file) << blkbits;
240 if (get_block(inode, block_in_file, map_bh, 0))
196 goto confused; 241 goto confused;
242 *first_logical_block = block_in_file;
197 } 243 }
198 244
199 if (!buffer_mapped(&bh)) { 245 if (!buffer_mapped(map_bh)) {
200 fully_mapped = 0; 246 fully_mapped = 0;
201 if (first_hole == blocks_per_page) 247 if (first_hole == blocks_per_page)
202 first_hole = page_block; 248 first_hole = page_block;
249 page_block++;
250 block_in_file++;
251 clear_buffer_mapped(map_bh);
203 continue; 252 continue;
204 } 253 }
205 254
@@ -209,8 +258,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
209 * we just collected from get_block into the page's buffers 258 * we just collected from get_block into the page's buffers
210 * so readpage doesn't have to repeat the get_block call 259 * so readpage doesn't have to repeat the get_block call
211 */ 260 */
212 if (buffer_uptodate(&bh)) { 261 if (buffer_uptodate(map_bh)) {
213 map_buffer_to_page(page, &bh, page_block); 262 map_buffer_to_page(page, map_bh, page_block);
214 goto confused; 263 goto confused;
215 } 264 }
216 265
@@ -218,10 +267,20 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
218 goto confused; /* hole -> non-hole */ 267 goto confused; /* hole -> non-hole */
219 268
220 /* Contiguous blocks? */ 269 /* Contiguous blocks? */
221 if (page_block && blocks[page_block-1] != bh.b_blocknr-1) 270 if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
222 goto confused; 271 goto confused;
223 blocks[page_block] = bh.b_blocknr; 272 nblocks = map_bh->b_size >> blkbits;
224 bdev = bh.b_bdev; 273 for (relative_block = 0; ; relative_block++) {
274 if (relative_block == nblocks) {
275 clear_buffer_mapped(map_bh);
276 break;
277 } else if (page_block == blocks_per_page)
278 break;
279 blocks[page_block] = map_bh->b_blocknr+relative_block;
280 page_block++;
281 block_in_file++;
282 }
283 bdev = map_bh->b_bdev;
225 } 284 }
226 285
227 if (first_hole != blocks_per_page) { 286 if (first_hole != blocks_per_page) {
@@ -260,7 +319,7 @@ alloc_new:
260 goto alloc_new; 319 goto alloc_new;
261 } 320 }
262 321
263 if (buffer_boundary(&bh) || (first_hole != blocks_per_page)) 322 if (buffer_boundary(map_bh) || (first_hole != blocks_per_page))
264 bio = mpage_bio_submit(READ, bio); 323 bio = mpage_bio_submit(READ, bio);
265 else 324 else
266 *last_block_in_bio = blocks[blocks_per_page - 1]; 325 *last_block_in_bio = blocks[blocks_per_page - 1];
@@ -331,7 +390,10 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
331 unsigned page_idx; 390 unsigned page_idx;
332 sector_t last_block_in_bio = 0; 391 sector_t last_block_in_bio = 0;
333 struct pagevec lru_pvec; 392 struct pagevec lru_pvec;
393 struct buffer_head map_bh;
394 unsigned long first_logical_block = 0;
334 395
396 clear_buffer_mapped(&map_bh);
335 pagevec_init(&lru_pvec, 0); 397 pagevec_init(&lru_pvec, 0);
336 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 398 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
337 struct page *page = list_entry(pages->prev, struct page, lru); 399 struct page *page = list_entry(pages->prev, struct page, lru);
@@ -342,7 +404,9 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
342 page->index, GFP_KERNEL)) { 404 page->index, GFP_KERNEL)) {
343 bio = do_mpage_readpage(bio, page, 405 bio = do_mpage_readpage(bio, page,
344 nr_pages - page_idx, 406 nr_pages - page_idx,
345 &last_block_in_bio, get_block); 407 &last_block_in_bio, &map_bh,
408 &first_logical_block,
409 get_block);
346 if (!pagevec_add(&lru_pvec, page)) 410 if (!pagevec_add(&lru_pvec, page))
347 __pagevec_lru_add(&lru_pvec); 411 __pagevec_lru_add(&lru_pvec);
348 } else { 412 } else {
@@ -364,9 +428,12 @@ int mpage_readpage(struct page *page, get_block_t get_block)
364{ 428{
365 struct bio *bio = NULL; 429 struct bio *bio = NULL;
366 sector_t last_block_in_bio = 0; 430 sector_t last_block_in_bio = 0;
431 struct buffer_head map_bh;
432 unsigned long first_logical_block = 0;
367 433
368 bio = do_mpage_readpage(bio, page, 1, 434 clear_buffer_mapped(&map_bh);
369 &last_block_in_bio, get_block); 435 bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
436 &map_bh, &first_logical_block, get_block);
370 if (bio) 437 if (bio)
371 mpage_bio_submit(READ, bio); 438 mpage_bio_submit(READ, bio);
372 return 0; 439 return 0;
@@ -472,6 +539,7 @@ __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
472 for (page_block = 0; page_block < blocks_per_page; ) { 539 for (page_block = 0; page_block < blocks_per_page; ) {
473 540
474 map_bh.b_state = 0; 541 map_bh.b_state = 0;
542 map_bh.b_size = 1 << blkbits;
475 if (get_block(inode, block_in_file, &map_bh, 1)) 543 if (get_block(inode, block_in_file, &map_bh, 1))
476 goto confused; 544 goto confused;
477 if (buffer_new(&map_bh)) 545 if (buffer_new(&map_bh))
diff --git a/fs/namei.c b/fs/namei.c
index 98dc2e134362..22f6e8d16aa8 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -546,6 +546,22 @@ struct path {
546 struct dentry *dentry; 546 struct dentry *dentry;
547}; 547};
548 548
549static inline void dput_path(struct path *path, struct nameidata *nd)
550{
551 dput(path->dentry);
552 if (path->mnt != nd->mnt)
553 mntput(path->mnt);
554}
555
556static inline void path_to_nameidata(struct path *path, struct nameidata *nd)
557{
558 dput(nd->dentry);
559 if (nd->mnt != path->mnt)
560 mntput(nd->mnt);
561 nd->mnt = path->mnt;
562 nd->dentry = path->dentry;
563}
564
549static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd) 565static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd)
550{ 566{
551 int error; 567 int error;
@@ -555,8 +571,11 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
555 touch_atime(path->mnt, dentry); 571 touch_atime(path->mnt, dentry);
556 nd_set_link(nd, NULL); 572 nd_set_link(nd, NULL);
557 573
558 if (path->mnt == nd->mnt) 574 if (path->mnt != nd->mnt) {
559 mntget(path->mnt); 575 path_to_nameidata(path, nd);
576 dget(dentry);
577 }
578 mntget(path->mnt);
560 cookie = dentry->d_inode->i_op->follow_link(dentry, nd); 579 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
561 error = PTR_ERR(cookie); 580 error = PTR_ERR(cookie);
562 if (!IS_ERR(cookie)) { 581 if (!IS_ERR(cookie)) {
@@ -573,22 +592,6 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
573 return error; 592 return error;
574} 593}
575 594
576static inline void dput_path(struct path *path, struct nameidata *nd)
577{
578 dput(path->dentry);
579 if (path->mnt != nd->mnt)
580 mntput(path->mnt);
581}
582
583static inline void path_to_nameidata(struct path *path, struct nameidata *nd)
584{
585 dput(nd->dentry);
586 if (nd->mnt != path->mnt)
587 mntput(nd->mnt);
588 nd->mnt = path->mnt;
589 nd->dentry = path->dentry;
590}
591
592/* 595/*
593 * This limits recursive symlink follows to 8, while 596 * This limits recursive symlink follows to 8, while
594 * limiting consecutive symlinks to 40. 597 * limiting consecutive symlinks to 40.
diff --git a/fs/namespace.c b/fs/namespace.c
index 71e75bcf4d28..bf478addb852 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -43,9 +43,9 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
43 43
44static int event; 44static int event;
45 45
46static struct list_head *mount_hashtable; 46static struct list_head *mount_hashtable __read_mostly;
47static int hash_mask __read_mostly, hash_bits __read_mostly; 47static int hash_mask __read_mostly, hash_bits __read_mostly;
48static kmem_cache_t *mnt_cache; 48static kmem_cache_t *mnt_cache __read_mostly;
49static struct rw_semaphore namespace_sem; 49static struct rw_semaphore namespace_sem;
50 50
51/* /sys/fs */ 51/* /sys/fs */
@@ -459,9 +459,9 @@ int may_umount_tree(struct vfsmount *mnt)
459 spin_unlock(&vfsmount_lock); 459 spin_unlock(&vfsmount_lock);
460 460
461 if (actual_refs > minimum_refs) 461 if (actual_refs > minimum_refs)
462 return -EBUSY; 462 return 0;
463 463
464 return 0; 464 return 1;
465} 465}
466 466
467EXPORT_SYMBOL(may_umount_tree); 467EXPORT_SYMBOL(may_umount_tree);
@@ -481,10 +481,10 @@ EXPORT_SYMBOL(may_umount_tree);
481 */ 481 */
482int may_umount(struct vfsmount *mnt) 482int may_umount(struct vfsmount *mnt)
483{ 483{
484 int ret = 0; 484 int ret = 1;
485 spin_lock(&vfsmount_lock); 485 spin_lock(&vfsmount_lock);
486 if (propagate_mount_busy(mnt, 2)) 486 if (propagate_mount_busy(mnt, 2))
487 ret = -EBUSY; 487 ret = 0;
488 spin_unlock(&vfsmount_lock); 488 spin_unlock(&vfsmount_lock);
489 return ret; 489 return ret;
490} 490}
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index cfd76f431dc0..f0860c602d8b 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -49,7 +49,7 @@ extern int ncp_symlink(struct inode *, struct dentry *, const char *);
49#define ncp_symlink NULL 49#define ncp_symlink NULL
50#endif 50#endif
51 51
52struct file_operations ncp_dir_operations = 52const struct file_operations ncp_dir_operations =
53{ 53{
54 .read = generic_read_dir, 54 .read = generic_read_dir,
55 .readdir = ncp_readdir, 55 .readdir = ncp_readdir,
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index ebdad8f6398f..e6b7c67cf057 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -283,7 +283,7 @@ static int ncp_release(struct inode *inode, struct file *file) {
283 return 0; 283 return 0;
284} 284}
285 285
286struct file_operations ncp_file_operations = 286const struct file_operations ncp_file_operations =
287{ 287{
288 .llseek = remote_llseek, 288 .llseek = remote_llseek,
289 .read = ncp_file_read, 289 .read = ncp_file_read,
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 99d2cfbce863..90c95adc8c1b 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -14,6 +14,7 @@
14#include <linux/sunrpc/svc.h> 14#include <linux/sunrpc/svc.h>
15#include <linux/sunrpc/svcsock.h> 15#include <linux/sunrpc/svcsock.h>
16#include <linux/nfs_fs.h> 16#include <linux/nfs_fs.h>
17#include <linux/mutex.h>
17 18
18#include <net/inet_sock.h> 19#include <net/inet_sock.h>
19 20
@@ -31,7 +32,7 @@ struct nfs_callback_data {
31}; 32};
32 33
33static struct nfs_callback_data nfs_callback_info; 34static struct nfs_callback_data nfs_callback_info;
34static DECLARE_MUTEX(nfs_callback_sema); 35static DEFINE_MUTEX(nfs_callback_mutex);
35static struct svc_program nfs4_callback_program; 36static struct svc_program nfs4_callback_program;
36 37
37unsigned int nfs_callback_set_tcpport; 38unsigned int nfs_callback_set_tcpport;
@@ -95,7 +96,7 @@ int nfs_callback_up(void)
95 int ret = 0; 96 int ret = 0;
96 97
97 lock_kernel(); 98 lock_kernel();
98 down(&nfs_callback_sema); 99 mutex_lock(&nfs_callback_mutex);
99 if (nfs_callback_info.users++ || nfs_callback_info.pid != 0) 100 if (nfs_callback_info.users++ || nfs_callback_info.pid != 0)
100 goto out; 101 goto out;
101 init_completion(&nfs_callback_info.started); 102 init_completion(&nfs_callback_info.started);
@@ -121,7 +122,7 @@ int nfs_callback_up(void)
121 nfs_callback_info.serv = serv; 122 nfs_callback_info.serv = serv;
122 wait_for_completion(&nfs_callback_info.started); 123 wait_for_completion(&nfs_callback_info.started);
123out: 124out:
124 up(&nfs_callback_sema); 125 mutex_unlock(&nfs_callback_mutex);
125 unlock_kernel(); 126 unlock_kernel();
126 return ret; 127 return ret;
127out_destroy: 128out_destroy:
@@ -139,7 +140,7 @@ int nfs_callback_down(void)
139 int ret = 0; 140 int ret = 0;
140 141
141 lock_kernel(); 142 lock_kernel();
142 down(&nfs_callback_sema); 143 mutex_lock(&nfs_callback_mutex);
143 nfs_callback_info.users--; 144 nfs_callback_info.users--;
144 do { 145 do {
145 if (nfs_callback_info.users != 0 || nfs_callback_info.pid == 0) 146 if (nfs_callback_info.users != 0 || nfs_callback_info.pid == 0)
@@ -147,7 +148,7 @@ int nfs_callback_down(void)
147 if (kill_proc(nfs_callback_info.pid, SIGKILL, 1) < 0) 148 if (kill_proc(nfs_callback_info.pid, SIGKILL, 1) < 0)
148 break; 149 break;
149 } while (wait_for_completion_timeout(&nfs_callback_info.stopped, 5*HZ) == 0); 150 } while (wait_for_completion_timeout(&nfs_callback_info.stopped, 5*HZ) == 0);
150 up(&nfs_callback_sema); 151 mutex_unlock(&nfs_callback_mutex);
151 unlock_kernel(); 152 unlock_kernel();
152 return ret; 153 return ret;
153} 154}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 06c48b385c94..a23f34894167 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -54,7 +54,7 @@ static int nfs_rename(struct inode *, struct dentry *,
54static int nfs_fsync_dir(struct file *, struct dentry *, int); 54static int nfs_fsync_dir(struct file *, struct dentry *, int);
55static loff_t nfs_llseek_dir(struct file *, loff_t, int); 55static loff_t nfs_llseek_dir(struct file *, loff_t, int);
56 56
57struct file_operations nfs_dir_operations = { 57const struct file_operations nfs_dir_operations = {
58 .llseek = nfs_llseek_dir, 58 .llseek = nfs_llseek_dir,
59 .read = generic_read_dir, 59 .read = generic_read_dir,
60 .readdir = nfs_readdir, 60 .readdir = nfs_readdir,
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 5263b2864a44..f1df2c8d9259 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -49,7 +49,7 @@ static int nfs_check_flags(int flags);
49static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl); 49static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
50static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl); 50static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
51 51
52struct file_operations nfs_file_operations = { 52const struct file_operations nfs_file_operations = {
53 .llseek = nfs_file_llseek, 53 .llseek = nfs_file_llseek,
54 .read = do_sync_read, 54 .read = do_sync_read,
55 .write = do_sync_write, 55 .write = do_sync_write,
@@ -318,10 +318,9 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse
318 return status; 318 return status;
319} 319}
320 320
321static int nfs_invalidate_page(struct page *page, unsigned long offset) 321static void nfs_invalidate_page(struct page *page, unsigned long offset)
322{ 322{
323 /* FIXME: we really should cancel any unstarted writes on this page */ 323 /* FIXME: we really should cancel any unstarted writes on this page */
324 return 1;
325} 324}
326 325
327static int nfs_release_page(struct page *page, gfp_t gfp) 326static int nfs_release_page(struct page *page, gfp_t gfp)
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 3961524fd4ab..624ca7146b6b 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -663,10 +663,8 @@ int nfs_init_readpagecache(void)
663 if (nfs_rdata_cachep == NULL) 663 if (nfs_rdata_cachep == NULL)
664 return -ENOMEM; 664 return -ENOMEM;
665 665
666 nfs_rdata_mempool = mempool_create(MIN_POOL_READ, 666 nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
667 mempool_alloc_slab, 667 nfs_rdata_cachep);
668 mempool_free_slab,
669 nfs_rdata_cachep);
670 if (nfs_rdata_mempool == NULL) 668 if (nfs_rdata_mempool == NULL)
671 return -ENOMEM; 669 return -ENOMEM;
672 670
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 3f5225404c97..4cfada2cc09f 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1521,17 +1521,13 @@ int nfs_init_writepagecache(void)
1521 if (nfs_wdata_cachep == NULL) 1521 if (nfs_wdata_cachep == NULL)
1522 return -ENOMEM; 1522 return -ENOMEM;
1523 1523
1524 nfs_wdata_mempool = mempool_create(MIN_POOL_WRITE, 1524 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1525 mempool_alloc_slab, 1525 nfs_wdata_cachep);
1526 mempool_free_slab,
1527 nfs_wdata_cachep);
1528 if (nfs_wdata_mempool == NULL) 1526 if (nfs_wdata_mempool == NULL)
1529 return -ENOMEM; 1527 return -ENOMEM;
1530 1528
1531 nfs_commit_mempool = mempool_create(MIN_POOL_COMMIT, 1529 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1532 mempool_alloc_slab, 1530 nfs_wdata_cachep);
1533 mempool_free_slab,
1534 nfs_wdata_cachep);
1535 if (nfs_commit_mempool == NULL) 1531 if (nfs_commit_mempool == NULL)
1536 return -ENOMEM; 1532 return -ENOMEM;
1537 1533
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 417ec02df44f..c340be0a3f59 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -57,27 +57,17 @@ static int exp_verify_string(char *cp, int max);
57#define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1) 57#define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1)
58static struct cache_head *expkey_table[EXPKEY_HASHMAX]; 58static struct cache_head *expkey_table[EXPKEY_HASHMAX];
59 59
60static inline int svc_expkey_hash(struct svc_expkey *item) 60static void expkey_put(struct kref *ref)
61{ 61{
62 int hash = item->ek_fsidtype; 62 struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
63 char * cp = (char*)item->ek_fsid;
64 int len = key_len(item->ek_fsidtype);
65 63
66 hash ^= hash_mem(cp, len, EXPKEY_HASHBITS); 64 if (test_bit(CACHE_VALID, &key->h.flags) &&
67 hash ^= hash_ptr(item->ek_client, EXPKEY_HASHBITS); 65 !test_bit(CACHE_NEGATIVE, &key->h.flags)) {
68 return hash & EXPKEY_HASHMASK; 66 dput(key->ek_dentry);
69} 67 mntput(key->ek_mnt);
70
71void expkey_put(struct cache_head *item, struct cache_detail *cd)
72{
73 if (cache_put(item, cd)) {
74 struct svc_expkey *key = container_of(item, struct svc_expkey, h);
75 if (test_bit(CACHE_VALID, &item->flags) &&
76 !test_bit(CACHE_NEGATIVE, &item->flags))
77 exp_put(key->ek_export);
78 auth_domain_put(key->ek_client);
79 kfree(key);
80 } 68 }
69 auth_domain_put(key->ek_client);
70 kfree(key);
81} 71}
82 72
83static void expkey_request(struct cache_detail *cd, 73static void expkey_request(struct cache_detail *cd,
@@ -95,7 +85,10 @@ static void expkey_request(struct cache_detail *cd,
95 (*bpp)[-1] = '\n'; 85 (*bpp)[-1] = '\n';
96} 86}
97 87
98static struct svc_expkey *svc_expkey_lookup(struct svc_expkey *, int); 88static struct svc_expkey *svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old);
89static struct svc_expkey *svc_expkey_lookup(struct svc_expkey *);
90static struct cache_detail svc_expkey_cache;
91
99static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen) 92static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
100{ 93{
101 /* client fsidtype fsid [path] */ 94 /* client fsidtype fsid [path] */
@@ -106,6 +99,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
106 int fsidtype; 99 int fsidtype;
107 char *ep; 100 char *ep;
108 struct svc_expkey key; 101 struct svc_expkey key;
102 struct svc_expkey *ek;
109 103
110 if (mesg[mlen-1] != '\n') 104 if (mesg[mlen-1] != '\n')
111 return -EINVAL; 105 return -EINVAL;
@@ -150,40 +144,38 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
150 key.ek_fsidtype = fsidtype; 144 key.ek_fsidtype = fsidtype;
151 memcpy(key.ek_fsid, buf, len); 145 memcpy(key.ek_fsid, buf, len);
152 146
147 ek = svc_expkey_lookup(&key);
148 err = -ENOMEM;
149 if (!ek)
150 goto out;
151
153 /* now we want a pathname, or empty meaning NEGATIVE */ 152 /* now we want a pathname, or empty meaning NEGATIVE */
153 err = -EINVAL;
154 if ((len=qword_get(&mesg, buf, PAGE_SIZE)) < 0) 154 if ((len=qword_get(&mesg, buf, PAGE_SIZE)) < 0)
155 goto out; 155 goto out;
156 dprintk("Path seems to be <%s>\n", buf); 156 dprintk("Path seems to be <%s>\n", buf);
157 err = 0; 157 err = 0;
158 if (len == 0) { 158 if (len == 0) {
159 struct svc_expkey *ek;
160 set_bit(CACHE_NEGATIVE, &key.h.flags); 159 set_bit(CACHE_NEGATIVE, &key.h.flags);
161 ek = svc_expkey_lookup(&key, 1); 160 ek = svc_expkey_update(&key, ek);
162 if (ek) 161 if (ek)
163 expkey_put(&ek->h, &svc_expkey_cache); 162 cache_put(&ek->h, &svc_expkey_cache);
163 else err = -ENOMEM;
164 } else { 164 } else {
165 struct nameidata nd; 165 struct nameidata nd;
166 struct svc_expkey *ek;
167 struct svc_export *exp;
168 err = path_lookup(buf, 0, &nd); 166 err = path_lookup(buf, 0, &nd);
169 if (err) 167 if (err)
170 goto out; 168 goto out;
171 169
172 dprintk("Found the path %s\n", buf); 170 dprintk("Found the path %s\n", buf);
173 exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL); 171 key.ek_mnt = nd.mnt;
174 172 key.ek_dentry = nd.dentry;
175 err = -ENOENT;
176 if (!exp)
177 goto out_nd;
178 key.ek_export = exp;
179 dprintk("And found export\n");
180 173
181 ek = svc_expkey_lookup(&key, 1); 174 ek = svc_expkey_update(&key, ek);
182 if (ek) 175 if (ek)
183 expkey_put(&ek->h, &svc_expkey_cache); 176 cache_put(&ek->h, &svc_expkey_cache);
184 exp_put(exp); 177 else
185 err = 0; 178 err = -ENOMEM;
186 out_nd:
187 path_release(&nd); 179 path_release(&nd);
188 } 180 }
189 cache_flush(); 181 cache_flush();
@@ -214,35 +206,31 @@ static int expkey_show(struct seq_file *m,
214 if (test_bit(CACHE_VALID, &h->flags) && 206 if (test_bit(CACHE_VALID, &h->flags) &&
215 !test_bit(CACHE_NEGATIVE, &h->flags)) { 207 !test_bit(CACHE_NEGATIVE, &h->flags)) {
216 seq_printf(m, " "); 208 seq_printf(m, " ");
217 seq_path(m, ek->ek_export->ex_mnt, ek->ek_export->ex_dentry, "\\ \t\n"); 209 seq_path(m, ek->ek_mnt, ek->ek_dentry, "\\ \t\n");
218 } 210 }
219 seq_printf(m, "\n"); 211 seq_printf(m, "\n");
220 return 0; 212 return 0;
221} 213}
222
223struct cache_detail svc_expkey_cache = {
224 .owner = THIS_MODULE,
225 .hash_size = EXPKEY_HASHMAX,
226 .hash_table = expkey_table,
227 .name = "nfsd.fh",
228 .cache_put = expkey_put,
229 .cache_request = expkey_request,
230 .cache_parse = expkey_parse,
231 .cache_show = expkey_show,
232};
233 214
234static inline int svc_expkey_match (struct svc_expkey *a, struct svc_expkey *b) 215static inline int expkey_match (struct cache_head *a, struct cache_head *b)
235{ 216{
236 if (a->ek_fsidtype != b->ek_fsidtype || 217 struct svc_expkey *orig = container_of(a, struct svc_expkey, h);
237 a->ek_client != b->ek_client || 218 struct svc_expkey *new = container_of(b, struct svc_expkey, h);
238 memcmp(a->ek_fsid, b->ek_fsid, key_len(a->ek_fsidtype)) != 0) 219
220 if (orig->ek_fsidtype != new->ek_fsidtype ||
221 orig->ek_client != new->ek_client ||
222 memcmp(orig->ek_fsid, new->ek_fsid, key_len(orig->ek_fsidtype)) != 0)
239 return 0; 223 return 0;
240 return 1; 224 return 1;
241} 225}
242 226
243static inline void svc_expkey_init(struct svc_expkey *new, struct svc_expkey *item) 227static inline void expkey_init(struct cache_head *cnew,
228 struct cache_head *citem)
244{ 229{
245 cache_get(&item->ek_client->h); 230 struct svc_expkey *new = container_of(cnew, struct svc_expkey, h);
231 struct svc_expkey *item = container_of(citem, struct svc_expkey, h);
232
233 kref_get(&item->ek_client->ref);
246 new->ek_client = item->ek_client; 234 new->ek_client = item->ek_client;
247 new->ek_fsidtype = item->ek_fsidtype; 235 new->ek_fsidtype = item->ek_fsidtype;
248 new->ek_fsid[0] = item->ek_fsid[0]; 236 new->ek_fsid[0] = item->ek_fsid[0];
@@ -250,39 +238,94 @@ static inline void svc_expkey_init(struct svc_expkey *new, struct svc_expkey *it
250 new->ek_fsid[2] = item->ek_fsid[2]; 238 new->ek_fsid[2] = item->ek_fsid[2];
251} 239}
252 240
253static inline void svc_expkey_update(struct svc_expkey *new, struct svc_expkey *item) 241static inline void expkey_update(struct cache_head *cnew,
242 struct cache_head *citem)
243{
244 struct svc_expkey *new = container_of(cnew, struct svc_expkey, h);
245 struct svc_expkey *item = container_of(citem, struct svc_expkey, h);
246
247 new->ek_mnt = mntget(item->ek_mnt);
248 new->ek_dentry = dget(item->ek_dentry);
249}
250
251static struct cache_head *expkey_alloc(void)
254{ 252{
255 cache_get(&item->ek_export->h); 253 struct svc_expkey *i = kmalloc(sizeof(*i), GFP_KERNEL);
256 new->ek_export = item->ek_export; 254 if (i)
255 return &i->h;
256 else
257 return NULL;
257} 258}
258 259
259static DefineSimpleCacheLookup(svc_expkey,0) /* no inplace updates */ 260static struct cache_detail svc_expkey_cache = {
261 .owner = THIS_MODULE,
262 .hash_size = EXPKEY_HASHMAX,
263 .hash_table = expkey_table,
264 .name = "nfsd.fh",
265 .cache_put = expkey_put,
266 .cache_request = expkey_request,
267 .cache_parse = expkey_parse,
268 .cache_show = expkey_show,
269 .match = expkey_match,
270 .init = expkey_init,
271 .update = expkey_update,
272 .alloc = expkey_alloc,
273};
260 274
261#define EXPORT_HASHBITS 8 275static struct svc_expkey *
262#define EXPORT_HASHMAX (1<< EXPORT_HASHBITS) 276svc_expkey_lookup(struct svc_expkey *item)
263#define EXPORT_HASHMASK (EXPORT_HASHMAX -1) 277{
278 struct cache_head *ch;
279 int hash = item->ek_fsidtype;
280 char * cp = (char*)item->ek_fsid;
281 int len = key_len(item->ek_fsidtype);
264 282
265static struct cache_head *export_table[EXPORT_HASHMAX]; 283 hash ^= hash_mem(cp, len, EXPKEY_HASHBITS);
284 hash ^= hash_ptr(item->ek_client, EXPKEY_HASHBITS);
285 hash &= EXPKEY_HASHMASK;
266 286
267static inline int svc_export_hash(struct svc_export *item) 287 ch = sunrpc_cache_lookup(&svc_expkey_cache, &item->h,
288 hash);
289 if (ch)
290 return container_of(ch, struct svc_expkey, h);
291 else
292 return NULL;
293}
294
295static struct svc_expkey *
296svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old)
268{ 297{
269 int rv; 298 struct cache_head *ch;
299 int hash = new->ek_fsidtype;
300 char * cp = (char*)new->ek_fsid;
301 int len = key_len(new->ek_fsidtype);
270 302
271 rv = hash_ptr(item->ex_client, EXPORT_HASHBITS); 303 hash ^= hash_mem(cp, len, EXPKEY_HASHBITS);
272 rv ^= hash_ptr(item->ex_dentry, EXPORT_HASHBITS); 304 hash ^= hash_ptr(new->ek_client, EXPKEY_HASHBITS);
273 rv ^= hash_ptr(item->ex_mnt, EXPORT_HASHBITS); 305 hash &= EXPKEY_HASHMASK;
274 return rv; 306
307 ch = sunrpc_cache_update(&svc_expkey_cache, &new->h,
308 &old->h, hash);
309 if (ch)
310 return container_of(ch, struct svc_expkey, h);
311 else
312 return NULL;
275} 313}
276 314
277void svc_export_put(struct cache_head *item, struct cache_detail *cd) 315
316#define EXPORT_HASHBITS 8
317#define EXPORT_HASHMAX (1<< EXPORT_HASHBITS)
318#define EXPORT_HASHMASK (EXPORT_HASHMAX -1)
319
320static struct cache_head *export_table[EXPORT_HASHMAX];
321
322static void svc_export_put(struct kref *ref)
278{ 323{
279 if (cache_put(item, cd)) { 324 struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
280 struct svc_export *exp = container_of(item, struct svc_export, h); 325 dput(exp->ex_dentry);
281 dput(exp->ex_dentry); 326 mntput(exp->ex_mnt);
282 mntput(exp->ex_mnt); 327 auth_domain_put(exp->ex_client);
283 auth_domain_put(exp->ex_client); 328 kfree(exp);
284 kfree(exp);
285 }
286} 329}
287 330
288static void svc_export_request(struct cache_detail *cd, 331static void svc_export_request(struct cache_detail *cd,
@@ -304,7 +347,9 @@ static void svc_export_request(struct cache_detail *cd,
304 (*bpp)[-1] = '\n'; 347 (*bpp)[-1] = '\n';
305} 348}
306 349
307static struct svc_export *svc_export_lookup(struct svc_export *, int); 350static struct svc_export *svc_export_update(struct svc_export *new,
351 struct svc_export *old);
352static struct svc_export *svc_export_lookup(struct svc_export *);
308 353
309static int check_export(struct inode *inode, int flags) 354static int check_export(struct inode *inode, int flags)
310{ 355{
@@ -417,11 +462,16 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
417 if (err) goto out; 462 if (err) goto out;
418 } 463 }
419 464
420 expp = svc_export_lookup(&exp, 1); 465 expp = svc_export_lookup(&exp);
421 if (expp) 466 if (expp)
422 exp_put(expp); 467 expp = svc_export_update(&exp, expp);
423 err = 0; 468 else
469 err = -ENOMEM;
424 cache_flush(); 470 cache_flush();
471 if (expp == NULL)
472 err = -ENOMEM;
473 else
474 exp_put(expp);
425 out: 475 out:
426 if (nd.dentry) 476 if (nd.dentry)
427 path_release(&nd); 477 path_release(&nd);
@@ -455,6 +505,46 @@ static int svc_export_show(struct seq_file *m,
455 seq_puts(m, ")\n"); 505 seq_puts(m, ")\n");
456 return 0; 506 return 0;
457} 507}
508static int svc_export_match(struct cache_head *a, struct cache_head *b)
509{
510 struct svc_export *orig = container_of(a, struct svc_export, h);
511 struct svc_export *new = container_of(b, struct svc_export, h);
512 return orig->ex_client == new->ex_client &&
513 orig->ex_dentry == new->ex_dentry &&
514 orig->ex_mnt == new->ex_mnt;
515}
516
517static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
518{
519 struct svc_export *new = container_of(cnew, struct svc_export, h);
520 struct svc_export *item = container_of(citem, struct svc_export, h);
521
522 kref_get(&item->ex_client->ref);
523 new->ex_client = item->ex_client;
524 new->ex_dentry = dget(item->ex_dentry);
525 new->ex_mnt = mntget(item->ex_mnt);
526}
527
528static void export_update(struct cache_head *cnew, struct cache_head *citem)
529{
530 struct svc_export *new = container_of(cnew, struct svc_export, h);
531 struct svc_export *item = container_of(citem, struct svc_export, h);
532
533 new->ex_flags = item->ex_flags;
534 new->ex_anon_uid = item->ex_anon_uid;
535 new->ex_anon_gid = item->ex_anon_gid;
536 new->ex_fsid = item->ex_fsid;
537}
538
539static struct cache_head *svc_export_alloc(void)
540{
541 struct svc_export *i = kmalloc(sizeof(*i), GFP_KERNEL);
542 if (i)
543 return &i->h;
544 else
545 return NULL;
546}
547
458struct cache_detail svc_export_cache = { 548struct cache_detail svc_export_cache = {
459 .owner = THIS_MODULE, 549 .owner = THIS_MODULE,
460 .hash_size = EXPORT_HASHMAX, 550 .hash_size = EXPORT_HASHMAX,
@@ -464,34 +554,49 @@ struct cache_detail svc_export_cache = {
464 .cache_request = svc_export_request, 554 .cache_request = svc_export_request,
465 .cache_parse = svc_export_parse, 555 .cache_parse = svc_export_parse,
466 .cache_show = svc_export_show, 556 .cache_show = svc_export_show,
557 .match = svc_export_match,
558 .init = svc_export_init,
559 .update = export_update,
560 .alloc = svc_export_alloc,
467}; 561};
468 562
469static inline int svc_export_match(struct svc_export *a, struct svc_export *b) 563static struct svc_export *
564svc_export_lookup(struct svc_export *exp)
470{ 565{
471 return a->ex_client == b->ex_client && 566 struct cache_head *ch;
472 a->ex_dentry == b->ex_dentry && 567 int hash;
473 a->ex_mnt == b->ex_mnt; 568 hash = hash_ptr(exp->ex_client, EXPORT_HASHBITS);
474} 569 hash ^= hash_ptr(exp->ex_dentry, EXPORT_HASHBITS);
475static inline void svc_export_init(struct svc_export *new, struct svc_export *item) 570 hash ^= hash_ptr(exp->ex_mnt, EXPORT_HASHBITS);
476{ 571
477 cache_get(&item->ex_client->h); 572 ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h,
478 new->ex_client = item->ex_client; 573 hash);
479 new->ex_dentry = dget(item->ex_dentry); 574 if (ch)
480 new->ex_mnt = mntget(item->ex_mnt); 575 return container_of(ch, struct svc_export, h);
576 else
577 return NULL;
481} 578}
482 579
483static inline void svc_export_update(struct svc_export *new, struct svc_export *item) 580static struct svc_export *
581svc_export_update(struct svc_export *new, struct svc_export *old)
484{ 582{
485 new->ex_flags = item->ex_flags; 583 struct cache_head *ch;
486 new->ex_anon_uid = item->ex_anon_uid; 584 int hash;
487 new->ex_anon_gid = item->ex_anon_gid; 585 hash = hash_ptr(old->ex_client, EXPORT_HASHBITS);
488 new->ex_fsid = item->ex_fsid; 586 hash ^= hash_ptr(old->ex_dentry, EXPORT_HASHBITS);
587 hash ^= hash_ptr(old->ex_mnt, EXPORT_HASHBITS);
588
589 ch = sunrpc_cache_update(&svc_export_cache, &new->h,
590 &old->h,
591 hash);
592 if (ch)
593 return container_of(ch, struct svc_export, h);
594 else
595 return NULL;
489} 596}
490 597
491static DefineSimpleCacheLookup(svc_export,1) /* allow inplace updates */
492 598
493 599static struct svc_expkey *
494struct svc_expkey *
495exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp) 600exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
496{ 601{
497 struct svc_expkey key, *ek; 602 struct svc_expkey key, *ek;
@@ -504,7 +609,7 @@ exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
504 key.ek_fsidtype = fsid_type; 609 key.ek_fsidtype = fsid_type;
505 memcpy(key.ek_fsid, fsidv, key_len(fsid_type)); 610 memcpy(key.ek_fsid, fsidv, key_len(fsid_type));
506 611
507 ek = svc_expkey_lookup(&key, 0); 612 ek = svc_expkey_lookup(&key);
508 if (ek != NULL) 613 if (ek != NULL)
509 if ((err = cache_check(&svc_expkey_cache, &ek->h, reqp))) 614 if ((err = cache_check(&svc_expkey_cache, &ek->h, reqp)))
510 ek = ERR_PTR(err); 615 ek = ERR_PTR(err);
@@ -519,13 +624,16 @@ static int exp_set_key(svc_client *clp, int fsid_type, u32 *fsidv,
519 key.ek_client = clp; 624 key.ek_client = clp;
520 key.ek_fsidtype = fsid_type; 625 key.ek_fsidtype = fsid_type;
521 memcpy(key.ek_fsid, fsidv, key_len(fsid_type)); 626 memcpy(key.ek_fsid, fsidv, key_len(fsid_type));
522 key.ek_export = exp; 627 key.ek_mnt = exp->ex_mnt;
628 key.ek_dentry = exp->ex_dentry;
523 key.h.expiry_time = NEVER; 629 key.h.expiry_time = NEVER;
524 key.h.flags = 0; 630 key.h.flags = 0;
525 631
526 ek = svc_expkey_lookup(&key, 1); 632 ek = svc_expkey_lookup(&key);
633 if (ek)
634 ek = svc_expkey_update(&key,ek);
527 if (ek) { 635 if (ek) {
528 expkey_put(&ek->h, &svc_expkey_cache); 636 cache_put(&ek->h, &svc_expkey_cache);
529 return 0; 637 return 0;
530 } 638 }
531 return -ENOMEM; 639 return -ENOMEM;
@@ -573,7 +681,7 @@ exp_get_by_name(svc_client *clp, struct vfsmount *mnt, struct dentry *dentry,
573 key.ex_mnt = mnt; 681 key.ex_mnt = mnt;
574 key.ex_dentry = dentry; 682 key.ex_dentry = dentry;
575 683
576 exp = svc_export_lookup(&key, 0); 684 exp = svc_export_lookup(&key);
577 if (exp != NULL) 685 if (exp != NULL)
578 switch (cache_check(&svc_export_cache, &exp->h, reqp)) { 686 switch (cache_check(&svc_export_cache, &exp->h, reqp)) {
579 case 0: break; 687 case 0: break;
@@ -654,7 +762,7 @@ static void exp_fsid_unhash(struct svc_export *exp)
654 ek = exp_get_fsid_key(exp->ex_client, exp->ex_fsid); 762 ek = exp_get_fsid_key(exp->ex_client, exp->ex_fsid);
655 if (ek && !IS_ERR(ek)) { 763 if (ek && !IS_ERR(ek)) {
656 ek->h.expiry_time = get_seconds()-1; 764 ek->h.expiry_time = get_seconds()-1;
657 expkey_put(&ek->h, &svc_expkey_cache); 765 cache_put(&ek->h, &svc_expkey_cache);
658 } 766 }
659 svc_expkey_cache.nextcheck = get_seconds(); 767 svc_expkey_cache.nextcheck = get_seconds();
660} 768}
@@ -692,7 +800,7 @@ static void exp_unhash(struct svc_export *exp)
692 ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino); 800 ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino);
693 if (ek && !IS_ERR(ek)) { 801 if (ek && !IS_ERR(ek)) {
694 ek->h.expiry_time = get_seconds()-1; 802 ek->h.expiry_time = get_seconds()-1;
695 expkey_put(&ek->h, &svc_expkey_cache); 803 cache_put(&ek->h, &svc_expkey_cache);
696 } 804 }
697 svc_expkey_cache.nextcheck = get_seconds(); 805 svc_expkey_cache.nextcheck = get_seconds();
698} 806}
@@ -741,8 +849,8 @@ exp_export(struct nfsctl_export *nxp)
741 if ((nxp->ex_flags & NFSEXP_FSID) && 849 if ((nxp->ex_flags & NFSEXP_FSID) &&
742 (fsid_key = exp_get_fsid_key(clp, nxp->ex_dev)) && 850 (fsid_key = exp_get_fsid_key(clp, nxp->ex_dev)) &&
743 !IS_ERR(fsid_key) && 851 !IS_ERR(fsid_key) &&
744 fsid_key->ek_export && 852 fsid_key->ek_mnt &&
745 fsid_key->ek_export != exp) 853 (fsid_key->ek_mnt != nd.mnt || fsid_key->ek_dentry != nd.dentry) )
746 goto finish; 854 goto finish;
747 855
748 if (exp) { 856 if (exp) {
@@ -775,13 +883,13 @@ exp_export(struct nfsctl_export *nxp)
775 new.ex_anon_gid = nxp->ex_anon_gid; 883 new.ex_anon_gid = nxp->ex_anon_gid;
776 new.ex_fsid = nxp->ex_dev; 884 new.ex_fsid = nxp->ex_dev;
777 885
778 exp = svc_export_lookup(&new, 1); 886 exp = svc_export_lookup(&new);
887 if (exp)
888 exp = svc_export_update(&new, exp);
779 889
780 if (exp == NULL) 890 if (!exp)
781 goto finish; 891 goto finish;
782 892
783 err = 0;
784
785 if (exp_hash(clp, exp) || 893 if (exp_hash(clp, exp) ||
786 exp_fsid_hash(clp, exp)) { 894 exp_fsid_hash(clp, exp)) {
787 /* failed to create at least one index */ 895 /* failed to create at least one index */
@@ -794,7 +902,7 @@ finish:
794 if (exp) 902 if (exp)
795 exp_put(exp); 903 exp_put(exp);
796 if (fsid_key && !IS_ERR(fsid_key)) 904 if (fsid_key && !IS_ERR(fsid_key))
797 expkey_put(&fsid_key->h, &svc_expkey_cache); 905 cache_put(&fsid_key->h, &svc_expkey_cache);
798 if (clp) 906 if (clp)
799 auth_domain_put(clp); 907 auth_domain_put(clp);
800 path_release(&nd); 908 path_release(&nd);
@@ -912,6 +1020,24 @@ out:
912 return err; 1020 return err;
913} 1021}
914 1022
1023struct svc_export *
1024exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv,
1025 struct cache_req *reqp)
1026{
1027 struct svc_export *exp;
1028 struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp);
1029 if (!ek || IS_ERR(ek))
1030 return ERR_PTR(PTR_ERR(ek));
1031
1032 exp = exp_get_by_name(clp, ek->ek_mnt, ek->ek_dentry, reqp);
1033 cache_put(&ek->h, &svc_expkey_cache);
1034
1035 if (!exp || IS_ERR(exp))
1036 return ERR_PTR(PTR_ERR(exp));
1037 return exp;
1038}
1039
1040
915/* 1041/*
916 * Called when we need the filehandle for the root of the pseudofs, 1042 * Called when we need the filehandle for the root of the pseudofs,
917 * for a given NFSv4 client. The root is defined to be the 1043 * for a given NFSv4 client. The root is defined to be the
@@ -922,6 +1048,7 @@ exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp,
922 struct cache_req *creq) 1048 struct cache_req *creq)
923{ 1049{
924 struct svc_expkey *fsid_key; 1050 struct svc_expkey *fsid_key;
1051 struct svc_export *exp;
925 int rv; 1052 int rv;
926 u32 fsidv[2]; 1053 u32 fsidv[2];
927 1054
@@ -933,9 +1060,15 @@ exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp,
933 if (!fsid_key || IS_ERR(fsid_key)) 1060 if (!fsid_key || IS_ERR(fsid_key))
934 return nfserr_perm; 1061 return nfserr_perm;
935 1062
936 rv = fh_compose(fhp, fsid_key->ek_export, 1063 exp = exp_get_by_name(clp, fsid_key->ek_mnt, fsid_key->ek_dentry, creq);
937 fsid_key->ek_export->ex_dentry, NULL); 1064 if (exp == NULL)
938 expkey_put(&fsid_key->h, &svc_expkey_cache); 1065 rv = nfserr_perm;
1066 else if (IS_ERR(exp))
1067 rv = nfserrno(PTR_ERR(exp));
1068 else
1069 rv = fh_compose(fhp, exp,
1070 fsid_key->ek_dentry, NULL);
1071 cache_put(&fsid_key->h, &svc_expkey_cache);
939 return rv; 1072 return rv;
940} 1073}
941 1074
@@ -1054,7 +1187,7 @@ static int e_show(struct seq_file *m, void *p)
1054 cache_get(&exp->h); 1187 cache_get(&exp->h);
1055 if (cache_check(&svc_export_cache, &exp->h, NULL)) 1188 if (cache_check(&svc_export_cache, &exp->h, NULL))
1056 return 0; 1189 return 0;
1057 if (cache_put(&exp->h, &svc_export_cache)) BUG(); 1190 cache_put(&exp->h, &svc_export_cache);
1058 return svc_export_show(m, &svc_export_cache, cp); 1191 return svc_export_show(m, &svc_export_cache, cp);
1059} 1192}
1060 1193
@@ -1129,7 +1262,6 @@ exp_delclient(struct nfsctl_client *ncp)
1129 */ 1262 */
1130 if (dom) { 1263 if (dom) {
1131 err = auth_unix_forget_old(dom); 1264 err = auth_unix_forget_old(dom);
1132 dom->h.expiry_time = get_seconds();
1133 auth_domain_put(dom); 1265 auth_domain_put(dom);
1134 } 1266 }
1135 1267
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 13369650cdf9..4b6aa60dfceb 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -76,21 +76,18 @@ struct ent {
76 char authname[IDMAP_NAMESZ]; 76 char authname[IDMAP_NAMESZ];
77}; 77};
78 78
79#define DefineSimpleCacheLookupMap(STRUCT, FUNC) \
80 DefineCacheLookup(struct STRUCT, h, FUNC##_lookup, \
81 (struct STRUCT *item, int set), /*no setup */, \
82 & FUNC##_cache, FUNC##_hash(item), FUNC##_match(item, tmp), \
83 STRUCT##_init(new, item), STRUCT##_update(tmp, item), 0)
84
85/* Common entry handling */ 79/* Common entry handling */
86 80
87#define ENT_HASHBITS 8 81#define ENT_HASHBITS 8
88#define ENT_HASHMAX (1 << ENT_HASHBITS) 82#define ENT_HASHMAX (1 << ENT_HASHBITS)
89#define ENT_HASHMASK (ENT_HASHMAX - 1) 83#define ENT_HASHMASK (ENT_HASHMAX - 1)
90 84
91static inline void 85static void
92ent_init(struct ent *new, struct ent *itm) 86ent_init(struct cache_head *cnew, struct cache_head *citm)
93{ 87{
88 struct ent *new = container_of(cnew, struct ent, h);
89 struct ent *itm = container_of(citm, struct ent, h);
90
94 new->id = itm->id; 91 new->id = itm->id;
95 new->type = itm->type; 92 new->type = itm->type;
96 93
@@ -98,19 +95,21 @@ ent_init(struct ent *new, struct ent *itm)
98 strlcpy(new->authname, itm->authname, sizeof(new->name)); 95 strlcpy(new->authname, itm->authname, sizeof(new->name));
99} 96}
100 97
101static inline void 98static void
102ent_update(struct ent *new, struct ent *itm) 99ent_put(struct kref *ref)
103{ 100{
104 ent_init(new, itm); 101 struct ent *map = container_of(ref, struct ent, h.ref);
102 kfree(map);
105} 103}
106 104
107static void 105static struct cache_head *
108ent_put(struct cache_head *ch, struct cache_detail *cd) 106ent_alloc(void)
109{ 107{
110 if (cache_put(ch, cd)) { 108 struct ent *e = kmalloc(sizeof(*e), GFP_KERNEL);
111 struct ent *map = container_of(ch, struct ent, h); 109 if (e)
112 kfree(map); 110 return &e->h;
113 } 111 else
112 return NULL;
114} 113}
115 114
116/* 115/*
@@ -149,9 +148,12 @@ idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
149 (*bpp)[-1] = '\n'; 148 (*bpp)[-1] = '\n';
150} 149}
151 150
152static inline int 151static int
153idtoname_match(struct ent *a, struct ent *b) 152idtoname_match(struct cache_head *ca, struct cache_head *cb)
154{ 153{
154 struct ent *a = container_of(ca, struct ent, h);
155 struct ent *b = container_of(cb, struct ent, h);
156
155 return (a->id == b->id && a->type == b->type && 157 return (a->id == b->id && a->type == b->type &&
156 strcmp(a->authname, b->authname) == 0); 158 strcmp(a->authname, b->authname) == 0);
157} 159}
@@ -184,7 +186,8 @@ warn_no_idmapd(struct cache_detail *detail)
184 186
185 187
186static int idtoname_parse(struct cache_detail *, char *, int); 188static int idtoname_parse(struct cache_detail *, char *, int);
187static struct ent *idtoname_lookup(struct ent *, int); 189static struct ent *idtoname_lookup(struct ent *);
190static struct ent *idtoname_update(struct ent *, struct ent *);
188 191
189static struct cache_detail idtoname_cache = { 192static struct cache_detail idtoname_cache = {
190 .owner = THIS_MODULE, 193 .owner = THIS_MODULE,
@@ -196,6 +199,10 @@ static struct cache_detail idtoname_cache = {
196 .cache_parse = idtoname_parse, 199 .cache_parse = idtoname_parse,
197 .cache_show = idtoname_show, 200 .cache_show = idtoname_show,
198 .warn_no_listener = warn_no_idmapd, 201 .warn_no_listener = warn_no_idmapd,
202 .match = idtoname_match,
203 .init = ent_init,
204 .update = ent_init,
205 .alloc = ent_alloc,
199}; 206};
200 207
201int 208int
@@ -238,6 +245,11 @@ idtoname_parse(struct cache_detail *cd, char *buf, int buflen)
238 if (ent.h.expiry_time == 0) 245 if (ent.h.expiry_time == 0)
239 goto out; 246 goto out;
240 247
248 error = -ENOMEM;
249 res = idtoname_lookup(&ent);
250 if (!res)
251 goto out;
252
241 /* Name */ 253 /* Name */
242 error = qword_get(&buf, buf1, PAGE_SIZE); 254 error = qword_get(&buf, buf1, PAGE_SIZE);
243 if (error == -EINVAL) 255 if (error == -EINVAL)
@@ -252,10 +264,11 @@ idtoname_parse(struct cache_detail *cd, char *buf, int buflen)
252 memcpy(ent.name, buf1, sizeof(ent.name)); 264 memcpy(ent.name, buf1, sizeof(ent.name));
253 } 265 }
254 error = -ENOMEM; 266 error = -ENOMEM;
255 if ((res = idtoname_lookup(&ent, 1)) == NULL) 267 res = idtoname_update(&ent, res);
268 if (res == NULL)
256 goto out; 269 goto out;
257 270
258 ent_put(&res->h, &idtoname_cache); 271 cache_put(&res->h, &idtoname_cache);
259 272
260 error = 0; 273 error = 0;
261out: 274out:
@@ -264,7 +277,31 @@ out:
264 return error; 277 return error;
265} 278}
266 279
267static DefineSimpleCacheLookupMap(ent, idtoname); 280
281static struct ent *
282idtoname_lookup(struct ent *item)
283{
284 struct cache_head *ch = sunrpc_cache_lookup(&idtoname_cache,
285 &item->h,
286 idtoname_hash(item));
287 if (ch)
288 return container_of(ch, struct ent, h);
289 else
290 return NULL;
291}
292
293static struct ent *
294idtoname_update(struct ent *new, struct ent *old)
295{
296 struct cache_head *ch = sunrpc_cache_update(&idtoname_cache,
297 &new->h, &old->h,
298 idtoname_hash(new));
299 if (ch)
300 return container_of(ch, struct ent, h);
301 else
302 return NULL;
303}
304
268 305
269/* 306/*
270 * Name -> ID cache 307 * Name -> ID cache
@@ -291,9 +328,12 @@ nametoid_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
291 (*bpp)[-1] = '\n'; 328 (*bpp)[-1] = '\n';
292} 329}
293 330
294static inline int 331static int
295nametoid_match(struct ent *a, struct ent *b) 332nametoid_match(struct cache_head *ca, struct cache_head *cb)
296{ 333{
334 struct ent *a = container_of(ca, struct ent, h);
335 struct ent *b = container_of(cb, struct ent, h);
336
297 return (a->type == b->type && strcmp(a->name, b->name) == 0 && 337 return (a->type == b->type && strcmp(a->name, b->name) == 0 &&
298 strcmp(a->authname, b->authname) == 0); 338 strcmp(a->authname, b->authname) == 0);
299} 339}
@@ -317,7 +357,8 @@ nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
317 return 0; 357 return 0;
318} 358}
319 359
320static struct ent *nametoid_lookup(struct ent *, int); 360static struct ent *nametoid_lookup(struct ent *);
361static struct ent *nametoid_update(struct ent *, struct ent *);
321static int nametoid_parse(struct cache_detail *, char *, int); 362static int nametoid_parse(struct cache_detail *, char *, int);
322 363
323static struct cache_detail nametoid_cache = { 364static struct cache_detail nametoid_cache = {
@@ -330,6 +371,10 @@ static struct cache_detail nametoid_cache = {
330 .cache_parse = nametoid_parse, 371 .cache_parse = nametoid_parse,
331 .cache_show = nametoid_show, 372 .cache_show = nametoid_show,
332 .warn_no_listener = warn_no_idmapd, 373 .warn_no_listener = warn_no_idmapd,
374 .match = nametoid_match,
375 .init = ent_init,
376 .update = ent_init,
377 .alloc = ent_alloc,
333}; 378};
334 379
335static int 380static int
@@ -379,10 +424,14 @@ nametoid_parse(struct cache_detail *cd, char *buf, int buflen)
379 set_bit(CACHE_NEGATIVE, &ent.h.flags); 424 set_bit(CACHE_NEGATIVE, &ent.h.flags);
380 425
381 error = -ENOMEM; 426 error = -ENOMEM;
382 if ((res = nametoid_lookup(&ent, 1)) == NULL) 427 res = nametoid_lookup(&ent);
428 if (res == NULL)
429 goto out;
430 res = nametoid_update(&ent, res);
431 if (res == NULL)
383 goto out; 432 goto out;
384 433
385 ent_put(&res->h, &nametoid_cache); 434 cache_put(&res->h, &nametoid_cache);
386 error = 0; 435 error = 0;
387out: 436out:
388 kfree(buf1); 437 kfree(buf1);
@@ -390,7 +439,30 @@ out:
390 return (error); 439 return (error);
391} 440}
392 441
393static DefineSimpleCacheLookupMap(ent, nametoid); 442
443static struct ent *
444nametoid_lookup(struct ent *item)
445{
446 struct cache_head *ch = sunrpc_cache_lookup(&nametoid_cache,
447 &item->h,
448 nametoid_hash(item));
449 if (ch)
450 return container_of(ch, struct ent, h);
451 else
452 return NULL;
453}
454
455static struct ent *
456nametoid_update(struct ent *new, struct ent *old)
457{
458 struct cache_head *ch = sunrpc_cache_update(&nametoid_cache,
459 &new->h, &old->h,
460 nametoid_hash(new));
461 if (ch)
462 return container_of(ch, struct ent, h);
463 else
464 return NULL;
465}
394 466
395/* 467/*
396 * Exported API 468 * Exported API
@@ -458,24 +530,24 @@ idmap_defer(struct cache_req *req)
458} 530}
459 531
460static inline int 532static inline int
461do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *, int), struct ent *key, 533do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *), struct ent *key,
462 struct cache_detail *detail, struct ent **item, 534 struct cache_detail *detail, struct ent **item,
463 struct idmap_defer_req *mdr) 535 struct idmap_defer_req *mdr)
464{ 536{
465 *item = lookup_fn(key, 0); 537 *item = lookup_fn(key);
466 if (!*item) 538 if (!*item)
467 return -ENOMEM; 539 return -ENOMEM;
468 return cache_check(detail, &(*item)->h, &mdr->req); 540 return cache_check(detail, &(*item)->h, &mdr->req);
469} 541}
470 542
471static inline int 543static inline int
472do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *, int), 544do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *),
473 struct ent *key, struct cache_detail *detail, 545 struct ent *key, struct cache_detail *detail,
474 struct ent **item) 546 struct ent **item)
475{ 547{
476 int ret = -ENOMEM; 548 int ret = -ENOMEM;
477 549
478 *item = lookup_fn(key, 0); 550 *item = lookup_fn(key);
479 if (!*item) 551 if (!*item)
480 goto out_err; 552 goto out_err;
481 ret = -ETIMEDOUT; 553 ret = -ETIMEDOUT;
@@ -488,7 +560,7 @@ do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *, int),
488 goto out_put; 560 goto out_put;
489 return 0; 561 return 0;
490out_put: 562out_put:
491 ent_put(&(*item)->h, detail); 563 cache_put(&(*item)->h, detail);
492out_err: 564out_err:
493 *item = NULL; 565 *item = NULL;
494 return ret; 566 return ret;
@@ -496,7 +568,7 @@ out_err:
496 568
497static int 569static int
498idmap_lookup(struct svc_rqst *rqstp, 570idmap_lookup(struct svc_rqst *rqstp,
499 struct ent *(*lookup_fn)(struct ent *, int), struct ent *key, 571 struct ent *(*lookup_fn)(struct ent *), struct ent *key,
500 struct cache_detail *detail, struct ent **item) 572 struct cache_detail *detail, struct ent **item)
501{ 573{
502 struct idmap_defer_req *mdr; 574 struct idmap_defer_req *mdr;
@@ -539,7 +611,7 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen
539 if (ret) 611 if (ret)
540 return ret; 612 return ret;
541 *id = item->id; 613 *id = item->id;
542 ent_put(&item->h, &nametoid_cache); 614 cache_put(&item->h, &nametoid_cache);
543 return 0; 615 return 0;
544} 616}
545 617
@@ -561,7 +633,7 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
561 ret = strlen(item->name); 633 ret = strlen(item->name);
562 BUG_ON(ret > IDMAP_NAMESZ); 634 BUG_ON(ret > IDMAP_NAMESZ);
563 memcpy(name, item->name, ret); 635 memcpy(name, item->name, ret);
564 ent_put(&item->h, &idtoname_cache); 636 cache_put(&item->h, &idtoname_cache);
565 return ret; 637 return ret;
566} 638}
567 639
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f6ab762bea99..47ec112b266c 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -49,6 +49,7 @@
49#include <linux/nfsd/state.h> 49#include <linux/nfsd/state.h>
50#include <linux/nfsd/xdr4.h> 50#include <linux/nfsd/xdr4.h>
51#include <linux/namei.h> 51#include <linux/namei.h>
52#include <linux/mutex.h>
52 53
53#define NFSDDBG_FACILITY NFSDDBG_PROC 54#define NFSDDBG_FACILITY NFSDDBG_PROC
54 55
@@ -77,11 +78,11 @@ static void nfs4_set_recdir(char *recdir);
77 78
78/* Locking: 79/* Locking:
79 * 80 *
80 * client_sema: 81 * client_mutex:
81 * protects clientid_hashtbl[], clientstr_hashtbl[], 82 * protects clientid_hashtbl[], clientstr_hashtbl[],
82 * unconfstr_hashtbl[], uncofid_hashtbl[]. 83 * unconfstr_hashtbl[], uncofid_hashtbl[].
83 */ 84 */
84static DECLARE_MUTEX(client_sema); 85static DEFINE_MUTEX(client_mutex);
85 86
86static kmem_cache_t *stateowner_slab = NULL; 87static kmem_cache_t *stateowner_slab = NULL;
87static kmem_cache_t *file_slab = NULL; 88static kmem_cache_t *file_slab = NULL;
@@ -91,13 +92,13 @@ static kmem_cache_t *deleg_slab = NULL;
91void 92void
92nfs4_lock_state(void) 93nfs4_lock_state(void)
93{ 94{
94 down(&client_sema); 95 mutex_lock(&client_mutex);
95} 96}
96 97
97void 98void
98nfs4_unlock_state(void) 99nfs4_unlock_state(void)
99{ 100{
100 up(&client_sema); 101 mutex_unlock(&client_mutex);
101} 102}
102 103
103static inline u32 104static inline u32
@@ -2749,37 +2750,31 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2749 * Note: locks.c uses the BKL to protect the inode's lock list. 2750 * Note: locks.c uses the BKL to protect the inode's lock list.
2750 */ 2751 */
2751 2752
2752 status = posix_lock_file(filp, &file_lock); 2753 /* XXX?: Just to divert the locks_release_private at the start of
2753 dprintk("NFSD: nfsd4_lock: posix_lock_file status %d\n",status); 2754 * locks_copy_lock: */
2755 conflock.fl_ops = NULL;
2756 conflock.fl_lmops = NULL;
2757 status = posix_lock_file_conf(filp, &file_lock, &conflock);
2758 dprintk("NFSD: nfsd4_lock: posix_lock_file_conf status %d\n",status);
2754 switch (-status) { 2759 switch (-status) {
2755 case 0: /* success! */ 2760 case 0: /* success! */
2756 update_stateid(&lock_stp->st_stateid); 2761 update_stateid(&lock_stp->st_stateid);
2757 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stateid, 2762 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stateid,
2758 sizeof(stateid_t)); 2763 sizeof(stateid_t));
2759 goto out; 2764 break;
2760 case (EAGAIN): 2765 case (EAGAIN): /* conflock holds conflicting lock */
2761 goto conflicting_lock; 2766 status = nfserr_denied;
2767 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
2768 nfs4_set_lock_denied(&conflock, &lock->lk_denied);
2769 break;
2762 case (EDEADLK): 2770 case (EDEADLK):
2763 status = nfserr_deadlock; 2771 status = nfserr_deadlock;
2764 dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status); 2772 break;
2765 goto out;
2766 default: 2773 default:
2767 status = nfserrno(status); 2774 dprintk("NFSD: nfsd4_lock: posix_lock_file_conf() failed! status %d\n",status);
2768 dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status); 2775 status = nfserr_resource;
2769 goto out; 2776 break;
2770 }
2771
2772conflicting_lock:
2773 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
2774 status = nfserr_denied;
2775 /* XXX There is a race here. Future patch needed to provide
2776 * an atomic posix_lock_and_test_file
2777 */
2778 if (!posix_test_lock(filp, &file_lock, &conflock)) {
2779 status = nfserr_serverfault;
2780 goto out;
2781 } 2777 }
2782 nfs4_set_lock_denied(&conflock, &lock->lk_denied);
2783out: 2778out:
2784 if (status && lock->lk_is_new && lock_sop) 2779 if (status && lock->lk_is_new && lock_sop)
2785 release_stateowner(lock_sop); 2780 release_stateowner(lock_sop);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index c8960aff0968..3ef017b3b5bd 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -134,7 +134,7 @@ static ssize_t nfsctl_transaction_read(struct file *file, char __user *buf, size
134 return simple_transaction_read(file, buf, size, pos); 134 return simple_transaction_read(file, buf, size, pos);
135} 135}
136 136
137static struct file_operations transaction_ops = { 137static const struct file_operations transaction_ops = {
138 .write = nfsctl_transaction_write, 138 .write = nfsctl_transaction_write,
139 .read = nfsctl_transaction_read, 139 .read = nfsctl_transaction_read,
140 .release = simple_transaction_release, 140 .release = simple_transaction_release,
@@ -146,7 +146,7 @@ static int exports_open(struct inode *inode, struct file *file)
146 return seq_open(file, &nfs_exports_op); 146 return seq_open(file, &nfs_exports_op);
147} 147}
148 148
149static struct file_operations exports_operations = { 149static const struct file_operations exports_operations = {
150 .open = exports_open, 150 .open = exports_open,
151 .read = seq_read, 151 .read = seq_read,
152 .llseek = seq_lseek, 152 .llseek = seq_lseek,
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 7a3e397b4ed3..3f2ec2e6d06c 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -506,7 +506,7 @@ fh_put(struct svc_fh *fhp)
506 nfsd_nr_put++; 506 nfsd_nr_put++;
507 } 507 }
508 if (exp) { 508 if (exp) {
509 svc_export_put(&exp->h, &svc_export_cache); 509 cache_put(&exp->h, &svc_export_cache);
510 fhp->fh_export = NULL; 510 fhp->fh_export = NULL;
511 } 511 }
512 return; 512 return;
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
index 1cf955bcc526..57265d563804 100644
--- a/fs/nfsd/stats.c
+++ b/fs/nfsd/stats.c
@@ -80,7 +80,7 @@ static int nfsd_proc_open(struct inode *inode, struct file *file)
80 return single_open(file, nfsd_proc_show, NULL); 80 return single_open(file, nfsd_proc_show, NULL);
81} 81}
82 82
83static struct file_operations nfsd_proc_fops = { 83static const struct file_operations nfsd_proc_fops = {
84 .owner = THIS_MODULE, 84 .owner = THIS_MODULE,
85 .open = nfsd_proc_open, 85 .open = nfsd_proc_open,
86 .read = seq_read, 86 .read = seq_read,
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 5320e5afaddb..31018333dc38 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -706,7 +706,7 @@ nfsd_close(struct file *filp)
706 * after it. 706 * after it.
707 */ 707 */
708static inline int nfsd_dosync(struct file *filp, struct dentry *dp, 708static inline int nfsd_dosync(struct file *filp, struct dentry *dp,
709 struct file_operations *fop) 709 const struct file_operations *fop)
710{ 710{
711 struct inode *inode = dp->d_inode; 711 struct inode *inode = dp->d_inode;
712 int (*fsync) (struct file *, struct dentry *, int); 712 int (*fsync) (struct file *, struct dentry *, int);
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index 9d9ed3fe371d..d1e2c6f9f05e 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -1553,7 +1553,7 @@ static int ntfs_dir_fsync(struct file *filp, struct dentry *dentry,
1553 1553
1554#endif /* NTFS_RW */ 1554#endif /* NTFS_RW */
1555 1555
1556struct file_operations ntfs_dir_ops = { 1556const struct file_operations ntfs_dir_ops = {
1557 .llseek = generic_file_llseek, /* Seek inside directory. */ 1557 .llseek = generic_file_llseek, /* Seek inside directory. */
1558 .read = generic_read_dir, /* Return -EISDIR. */ 1558 .read = generic_read_dir, /* Return -EISDIR. */
1559 .readdir = ntfs_readdir, /* Read directory contents. */ 1559 .readdir = ntfs_readdir, /* Read directory contents. */
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index f5d057e4acc2..c63a83e8da98 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -2294,7 +2294,7 @@ static int ntfs_file_fsync(struct file *filp, struct dentry *dentry,
2294 2294
2295#endif /* NTFS_RW */ 2295#endif /* NTFS_RW */
2296 2296
2297struct file_operations ntfs_file_ops = { 2297const struct file_operations ntfs_file_ops = {
2298 .llseek = generic_file_llseek, /* Seek inside file. */ 2298 .llseek = generic_file_llseek, /* Seek inside file. */
2299 .read = generic_file_read, /* Read from file. */ 2299 .read = generic_file_read, /* Read from file. */
2300 .aio_read = generic_file_aio_read, /* Async read from file. */ 2300 .aio_read = generic_file_aio_read, /* Async read from file. */
@@ -2337,6 +2337,6 @@ struct inode_operations ntfs_file_inode_ops = {
2337#endif /* NTFS_RW */ 2337#endif /* NTFS_RW */
2338}; 2338};
2339 2339
2340struct file_operations ntfs_empty_file_ops = {}; 2340const struct file_operations ntfs_empty_file_ops = {};
2341 2341
2342struct inode_operations ntfs_empty_inode_ops = {}; 2342struct inode_operations ntfs_empty_inode_ops = {};
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index 0fd70295cca6..4af2ad1193ec 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -515,10 +515,10 @@ BOOL ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
515 log_page_size = PAGE_CACHE_SIZE; 515 log_page_size = PAGE_CACHE_SIZE;
516 log_page_mask = log_page_size - 1; 516 log_page_mask = log_page_size - 1;
517 /* 517 /*
518 * Use generic_ffs() instead of ffs() to enable the compiler to 518 * Use ntfs_ffs() instead of ffs() to enable the compiler to
519 * optimize log_page_size and log_page_bits into constants. 519 * optimize log_page_size and log_page_bits into constants.
520 */ 520 */
521 log_page_bits = generic_ffs(log_page_size) - 1; 521 log_page_bits = ntfs_ffs(log_page_size) - 1;
522 size &= ~(s64)(log_page_size - 1); 522 size &= ~(s64)(log_page_size - 1);
523 /* 523 /*
524 * Ensure the log file is big enough to store at least the two restart 524 * Ensure the log file is big enough to store at least the two restart
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 4e72bc7afdf9..2438c00ec0ce 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -2670,7 +2670,7 @@ mft_rec_already_initialized:
2670 ni->name_len = 4; 2670 ni->name_len = 4;
2671 2671
2672 ni->itype.index.block_size = 4096; 2672 ni->itype.index.block_size = 4096;
2673 ni->itype.index.block_size_bits = generic_ffs(4096) - 1; 2673 ni->itype.index.block_size_bits = ntfs_ffs(4096) - 1;
2674 ni->itype.index.collation_rule = COLLATION_FILE_NAME; 2674 ni->itype.index.collation_rule = COLLATION_FILE_NAME;
2675 if (vol->cluster_size <= ni->itype.index.block_size) { 2675 if (vol->cluster_size <= ni->itype.index.block_size) {
2676 ni->itype.index.vcn_size = vol->cluster_size; 2676 ni->itype.index.vcn_size = vol->cluster_size;
diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h
index 0624c8ef4d9c..bf7b3d7c0930 100644
--- a/fs/ntfs/ntfs.h
+++ b/fs/ntfs/ntfs.h
@@ -60,13 +60,13 @@ extern struct kmem_cache *ntfs_index_ctx_cache;
60extern struct address_space_operations ntfs_aops; 60extern struct address_space_operations ntfs_aops;
61extern struct address_space_operations ntfs_mst_aops; 61extern struct address_space_operations ntfs_mst_aops;
62 62
63extern struct file_operations ntfs_file_ops; 63extern const struct file_operations ntfs_file_ops;
64extern struct inode_operations ntfs_file_inode_ops; 64extern struct inode_operations ntfs_file_inode_ops;
65 65
66extern struct file_operations ntfs_dir_ops; 66extern const struct file_operations ntfs_dir_ops;
67extern struct inode_operations ntfs_dir_inode_ops; 67extern struct inode_operations ntfs_dir_inode_ops;
68 68
69extern struct file_operations ntfs_empty_file_ops; 69extern const struct file_operations ntfs_empty_file_ops;
70extern struct inode_operations ntfs_empty_inode_ops; 70extern struct inode_operations ntfs_empty_inode_ops;
71 71
72extern struct export_operations ntfs_export_ops; 72extern struct export_operations ntfs_export_ops;
@@ -132,4 +132,33 @@ extern int ntfs_ucstonls(const ntfs_volume *vol, const ntfschar *ins,
132/* From fs/ntfs/upcase.c */ 132/* From fs/ntfs/upcase.c */
133extern ntfschar *generate_default_upcase(void); 133extern ntfschar *generate_default_upcase(void);
134 134
135static inline int ntfs_ffs(int x)
136{
137 int r = 1;
138
139 if (!x)
140 return 0;
141 if (!(x & 0xffff)) {
142 x >>= 16;
143 r += 16;
144 }
145 if (!(x & 0xff)) {
146 x >>= 8;
147 r += 8;
148 }
149 if (!(x & 0xf)) {
150 x >>= 4;
151 r += 4;
152 }
153 if (!(x & 3)) {
154 x >>= 2;
155 r += 2;
156 }
157 if (!(x & 1)) {
158 x >>= 1;
159 r += 1;
160 }
161 return r;
162}
163
135#endif /* _LINUX_NTFS_H */ 164#endif /* _LINUX_NTFS_H */
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index bf931ba1d364..0d858d0b25be 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -540,7 +540,6 @@ bail:
540 * fs_count, map_bh, dio->rw == WRITE); 540 * fs_count, map_bh, dio->rw == WRITE);
541 */ 541 */
542static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, 542static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
543 unsigned long max_blocks,
544 struct buffer_head *bh_result, int create) 543 struct buffer_head *bh_result, int create)
545{ 544{
546 int ret; 545 int ret;
@@ -548,6 +547,7 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
548 u64 p_blkno; 547 u64 p_blkno;
549 int contig_blocks; 548 int contig_blocks;
550 unsigned char blocksize_bits; 549 unsigned char blocksize_bits;
550 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
551 551
552 if (!inode || !bh_result) { 552 if (!inode || !bh_result) {
553 mlog(ML_ERROR, "inode or bh_result is null\n"); 553 mlog(ML_ERROR, "inode or bh_result is null\n");
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 84f153aca692..64cd52860c87 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2017,7 +2017,7 @@ out:
2017 return ret; 2017 return ret;
2018} 2018}
2019 2019
2020static struct file_operations ocfs2_dlm_debug_fops = { 2020static const struct file_operations ocfs2_dlm_debug_fops = {
2021 .open = ocfs2_dlm_debug_open, 2021 .open = ocfs2_dlm_debug_open,
2022 .release = ocfs2_dlm_debug_release, 2022 .release = ocfs2_dlm_debug_release,
2023 .read = seq_read, 2023 .read = seq_read,
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 4b4cbadd5838..34e903a6a46b 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1176,7 +1176,7 @@ struct inode_operations ocfs2_special_file_iops = {
1176 .getattr = ocfs2_getattr, 1176 .getattr = ocfs2_getattr,
1177}; 1177};
1178 1178
1179struct file_operations ocfs2_fops = { 1179const struct file_operations ocfs2_fops = {
1180 .read = do_sync_read, 1180 .read = do_sync_read,
1181 .write = do_sync_write, 1181 .write = do_sync_write,
1182 .sendfile = generic_file_sendfile, 1182 .sendfile = generic_file_sendfile,
@@ -1188,7 +1188,7 @@ struct file_operations ocfs2_fops = {
1188 .aio_write = ocfs2_file_aio_write, 1188 .aio_write = ocfs2_file_aio_write,
1189}; 1189};
1190 1190
1191struct file_operations ocfs2_dops = { 1191const struct file_operations ocfs2_dops = {
1192 .read = generic_read_dir, 1192 .read = generic_read_dir,
1193 .readdir = ocfs2_readdir, 1193 .readdir = ocfs2_readdir,
1194 .fsync = ocfs2_sync_file, 1194 .fsync = ocfs2_sync_file,
diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h
index a5ea33b24060..740c9e7ca599 100644
--- a/fs/ocfs2/file.h
+++ b/fs/ocfs2/file.h
@@ -26,8 +26,8 @@
26#ifndef OCFS2_FILE_H 26#ifndef OCFS2_FILE_H
27#define OCFS2_FILE_H 27#define OCFS2_FILE_H
28 28
29extern struct file_operations ocfs2_fops; 29extern const struct file_operations ocfs2_fops;
30extern struct file_operations ocfs2_dops; 30extern const struct file_operations ocfs2_dops;
31extern struct inode_operations ocfs2_file_iops; 31extern struct inode_operations ocfs2_file_iops;
32extern struct inode_operations ocfs2_special_file_iops; 32extern struct inode_operations ocfs2_special_file_iops;
33struct ocfs2_alloc_context; 33struct ocfs2_alloc_context;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index ae3440ca083c..6a610ae53583 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -377,7 +377,7 @@ int ocfs2_journal_access(struct ocfs2_journal_handle *handle,
377 BUG_ON(!bh); 377 BUG_ON(!bh);
378 BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED)); 378 BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED));
379 379
380 mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %hu\n", 380 mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n",
381 (unsigned long long)bh->b_blocknr, type, 381 (unsigned long long)bh->b_blocknr, type,
382 (type == OCFS2_JOURNAL_ACCESS_CREATE) ? 382 (type == OCFS2_JOURNAL_ACCESS_CREATE) ?
383 "OCFS2_JOURNAL_ACCESS_CREATE" : 383 "OCFS2_JOURNAL_ACCESS_CREATE" :
@@ -582,7 +582,8 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
582 } 582 }
583 583
584 mlog(0, "inode->i_size = %lld\n", inode->i_size); 584 mlog(0, "inode->i_size = %lld\n", inode->i_size);
585 mlog(0, "inode->i_blocks = %lu\n", inode->i_blocks); 585 mlog(0, "inode->i_blocks = %llu\n",
586 (unsigned long long)inode->i_blocks);
586 mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters); 587 mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters);
587 588
588 /* call the kernels journal init function now */ 589 /* call the kernels journal init function now */
@@ -850,8 +851,9 @@ static int ocfs2_force_read_journal(struct inode *inode)
850 851
851 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); 852 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
852 853
853 mlog(0, "Force reading %lu blocks\n", 854 mlog(0, "Force reading %llu blocks\n",
854 (inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9))); 855 (unsigned long long)(inode->i_blocks >>
856 (inode->i_sb->s_blocksize_bits - 9)));
855 857
856 v_blkno = 0; 858 v_blkno = 0;
857 while (v_blkno < 859 while (v_blkno <
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 274f61d0cda9..0673862c8bdd 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -1444,8 +1444,9 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
1444 * write i_size + 1 bytes. */ 1444 * write i_size + 1 bytes. */
1445 blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits; 1445 blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
1446 1446
1447 mlog_entry("i_blocks = %lu, i_size = %llu, blocks = %d\n", 1447 mlog_entry("i_blocks = %llu, i_size = %llu, blocks = %d\n",
1448 inode->i_blocks, i_size_read(inode), blocks); 1448 (unsigned long long)inode->i_blocks,
1449 i_size_read(inode), blocks);
1449 1450
1450 /* Sanity check -- make sure we're going to fit. */ 1451 /* Sanity check -- make sure we're going to fit. */
1451 if (bytes_left > 1452 if (bytes_left >
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index aeb0106890e4..0f14276a2e51 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -581,17 +581,17 @@ int property_release (struct inode *inode, struct file *filp)
581 return 0; 581 return 0;
582} 582}
583 583
584static struct file_operations openpromfs_prop_ops = { 584static const struct file_operations openpromfs_prop_ops = {
585 .read = property_read, 585 .read = property_read,
586 .write = property_write, 586 .write = property_write,
587 .release = property_release, 587 .release = property_release,
588}; 588};
589 589
590static struct file_operations openpromfs_nodenum_ops = { 590static const struct file_operations openpromfs_nodenum_ops = {
591 .read = nodenum_read, 591 .read = nodenum_read,
592}; 592};
593 593
594static struct file_operations openprom_operations = { 594static const struct file_operations openprom_operations = {
595 .read = generic_read_dir, 595 .read = generic_read_dir,
596 .readdir = openpromfs_readdir, 596 .readdir = openpromfs_readdir,
597}; 597};
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index f924f459bdb8..af0cb4b9e784 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -297,6 +297,25 @@ struct kobj_type ktype_part = {
297 .sysfs_ops = &part_sysfs_ops, 297 .sysfs_ops = &part_sysfs_ops,
298}; 298};
299 299
300static inline void partition_sysfs_add_subdir(struct hd_struct *p)
301{
302 struct kobject *k;
303
304 k = kobject_get(&p->kobj);
305 p->holder_dir = kobject_add_dir(k, "holders");
306 kobject_put(k);
307}
308
309static inline void disk_sysfs_add_subdirs(struct gendisk *disk)
310{
311 struct kobject *k;
312
313 k = kobject_get(&disk->kobj);
314 disk->holder_dir = kobject_add_dir(k, "holders");
315 disk->slave_dir = kobject_add_dir(k, "slaves");
316 kobject_put(k);
317}
318
300void delete_partition(struct gendisk *disk, int part) 319void delete_partition(struct gendisk *disk, int part)
301{ 320{
302 struct hd_struct *p = disk->part[part-1]; 321 struct hd_struct *p = disk->part[part-1];
@@ -310,6 +329,8 @@ void delete_partition(struct gendisk *disk, int part)
310 p->ios[0] = p->ios[1] = 0; 329 p->ios[0] = p->ios[1] = 0;
311 p->sectors[0] = p->sectors[1] = 0; 330 p->sectors[0] = p->sectors[1] = 0;
312 devfs_remove("%s/part%d", disk->devfs_name, part); 331 devfs_remove("%s/part%d", disk->devfs_name, part);
332 if (p->holder_dir)
333 kobject_unregister(p->holder_dir);
313 kobject_unregister(&p->kobj); 334 kobject_unregister(&p->kobj);
314} 335}
315 336
@@ -337,6 +358,7 @@ void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len)
337 p->kobj.parent = &disk->kobj; 358 p->kobj.parent = &disk->kobj;
338 p->kobj.ktype = &ktype_part; 359 p->kobj.ktype = &ktype_part;
339 kobject_register(&p->kobj); 360 kobject_register(&p->kobj);
361 partition_sysfs_add_subdir(p);
340 disk->part[part-1] = p; 362 disk->part[part-1] = p;
341} 363}
342 364
@@ -383,6 +405,7 @@ void register_disk(struct gendisk *disk)
383 if ((err = kobject_add(&disk->kobj))) 405 if ((err = kobject_add(&disk->kobj)))
384 return; 406 return;
385 disk_sysfs_symlinks(disk); 407 disk_sysfs_symlinks(disk);
408 disk_sysfs_add_subdirs(disk);
386 kobject_uevent(&disk->kobj, KOBJ_ADD); 409 kobject_uevent(&disk->kobj, KOBJ_ADD);
387 410
388 /* No minors to use for partitions */ 411 /* No minors to use for partitions */
@@ -483,6 +506,10 @@ void del_gendisk(struct gendisk *disk)
483 506
484 devfs_remove_disk(disk); 507 devfs_remove_disk(disk);
485 508
509 if (disk->holder_dir)
510 kobject_unregister(disk->holder_dir);
511 if (disk->slave_dir)
512 kobject_unregister(disk->slave_dir);
486 if (disk->driverfs_dev) { 513 if (disk->driverfs_dev) {
487 char *disk_name = make_block_name(disk); 514 char *disk_name = make_block_name(disk);
488 sysfs_remove_link(&disk->kobj, "device"); 515 sysfs_remove_link(&disk->kobj, "device");
diff --git a/fs/partitions/devfs.c b/fs/partitions/devfs.c
index 87f50444fd39..3f0a780c9cec 100644
--- a/fs/partitions/devfs.c
+++ b/fs/partitions/devfs.c
@@ -6,7 +6,7 @@
6#include <linux/vmalloc.h> 6#include <linux/vmalloc.h>
7#include <linux/genhd.h> 7#include <linux/genhd.h>
8#include <linux/bitops.h> 8#include <linux/bitops.h>
9#include <asm/semaphore.h> 9#include <linux/mutex.h>
10 10
11 11
12struct unique_numspace { 12struct unique_numspace {
@@ -16,7 +16,7 @@ struct unique_numspace {
16 struct semaphore mutex; 16 struct semaphore mutex;
17}; 17};
18 18
19static DECLARE_MUTEX(numspace_mutex); 19static DEFINE_MUTEX(numspace_mutex);
20 20
21static int expand_numspace(struct unique_numspace *s) 21static int expand_numspace(struct unique_numspace *s)
22{ 22{
@@ -48,7 +48,7 @@ static int alloc_unique_number(struct unique_numspace *s)
48{ 48{
49 int rval = 0; 49 int rval = 0;
50 50
51 down(&numspace_mutex); 51 mutex_lock(&numspace_mutex);
52 if (s->num_free < 1) 52 if (s->num_free < 1)
53 rval = expand_numspace(s); 53 rval = expand_numspace(s);
54 if (!rval) { 54 if (!rval) {
@@ -56,7 +56,7 @@ static int alloc_unique_number(struct unique_numspace *s)
56 --s->num_free; 56 --s->num_free;
57 __set_bit(rval, s->bits); 57 __set_bit(rval, s->bits);
58 } 58 }
59 up(&numspace_mutex); 59 mutex_unlock(&numspace_mutex);
60 60
61 return rval; 61 return rval;
62} 62}
@@ -66,11 +66,11 @@ static void dealloc_unique_number(struct unique_numspace *s, int number)
66 int old_val; 66 int old_val;
67 67
68 if (number >= 0) { 68 if (number >= 0) {
69 down(&numspace_mutex); 69 mutex_lock(&numspace_mutex);
70 old_val = __test_and_clear_bit(number, s->bits); 70 old_val = __test_and_clear_bit(number, s->bits);
71 if (old_val) 71 if (old_val)
72 ++s->num_free; 72 ++s->num_free;
73 up(&numspace_mutex); 73 mutex_unlock(&numspace_mutex);
74 } 74 }
75} 75}
76 76
diff --git a/fs/pipe.c b/fs/pipe.c
index d976866a115b..e2f4f1d9ffc2 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -568,7 +568,7 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
568 * The file_operations structs are not static because they 568 * The file_operations structs are not static because they
569 * are also used in linux/fs/fifo.c to do operations on FIFOs. 569 * are also used in linux/fs/fifo.c to do operations on FIFOs.
570 */ 570 */
571struct file_operations read_fifo_fops = { 571const struct file_operations read_fifo_fops = {
572 .llseek = no_llseek, 572 .llseek = no_llseek,
573 .read = pipe_read, 573 .read = pipe_read,
574 .readv = pipe_readv, 574 .readv = pipe_readv,
@@ -580,7 +580,7 @@ struct file_operations read_fifo_fops = {
580 .fasync = pipe_read_fasync, 580 .fasync = pipe_read_fasync,
581}; 581};
582 582
583struct file_operations write_fifo_fops = { 583const struct file_operations write_fifo_fops = {
584 .llseek = no_llseek, 584 .llseek = no_llseek,
585 .read = bad_pipe_r, 585 .read = bad_pipe_r,
586 .write = pipe_write, 586 .write = pipe_write,
@@ -592,7 +592,7 @@ struct file_operations write_fifo_fops = {
592 .fasync = pipe_write_fasync, 592 .fasync = pipe_write_fasync,
593}; 593};
594 594
595struct file_operations rdwr_fifo_fops = { 595const struct file_operations rdwr_fifo_fops = {
596 .llseek = no_llseek, 596 .llseek = no_llseek,
597 .read = pipe_read, 597 .read = pipe_read,
598 .readv = pipe_readv, 598 .readv = pipe_readv,
@@ -675,7 +675,7 @@ fail_page:
675 return NULL; 675 return NULL;
676} 676}
677 677
678static struct vfsmount *pipe_mnt; 678static struct vfsmount *pipe_mnt __read_mostly;
679static int pipefs_delete_dentry(struct dentry *dentry) 679static int pipefs_delete_dentry(struct dentry *dentry)
680{ 680{
681 return 1; 681 return 1;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 7eb1bd7f800c..7a76ad570230 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -330,7 +330,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
330 unsigned long min_flt = 0, maj_flt = 0; 330 unsigned long min_flt = 0, maj_flt = 0;
331 cputime_t cutime, cstime, utime, stime; 331 cputime_t cutime, cstime, utime, stime;
332 unsigned long rsslim = 0; 332 unsigned long rsslim = 0;
333 DEFINE_KTIME(it_real_value);
334 struct task_struct *t; 333 struct task_struct *t;
335 char tcomm[sizeof(task->comm)]; 334 char tcomm[sizeof(task->comm)];
336 335
@@ -386,7 +385,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
386 utime = cputime_add(utime, task->signal->utime); 385 utime = cputime_add(utime, task->signal->utime);
387 stime = cputime_add(stime, task->signal->stime); 386 stime = cputime_add(stime, task->signal->stime);
388 } 387 }
389 it_real_value = task->signal->real_timer.expires;
390 } 388 }
391 ppid = pid_alive(task) ? task->group_leader->real_parent->tgid : 0; 389 ppid = pid_alive(task) ? task->group_leader->real_parent->tgid : 0;
392 read_unlock(&tasklist_lock); 390 read_unlock(&tasklist_lock);
@@ -413,7 +411,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
413 start_time = nsec_to_clock_t(start_time); 411 start_time = nsec_to_clock_t(start_time);
414 412
415 res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \ 413 res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \
416%lu %lu %lu %lu %lu %ld %ld %ld %ld %d %ld %llu %lu %ld %lu %lu %lu %lu %lu \ 414%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \
417%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n", 415%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n",
418 task->pid, 416 task->pid,
419 tcomm, 417 tcomm,
@@ -435,7 +433,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
435 priority, 433 priority,
436 nice, 434 nice,
437 num_threads, 435 num_threads,
438 (long) ktime_to_clock_t(it_real_value),
439 start_time, 436 start_time,
440 vsize, 437 vsize,
441 mm ? get_mm_rss(mm) : 0, 438 mm ? get_mm_rss(mm) : 0,
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 20e5c4509a43..4ba03009cf72 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -19,6 +19,7 @@
19#include <linux/idr.h> 19#include <linux/idr.h>
20#include <linux/namei.h> 20#include <linux/namei.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/spinlock.h>
22#include <asm/uaccess.h> 23#include <asm/uaccess.h>
23 24
24#include "internal.h" 25#include "internal.h"
@@ -29,6 +30,8 @@ static ssize_t proc_file_write(struct file *file, const char __user *buffer,
29 size_t count, loff_t *ppos); 30 size_t count, loff_t *ppos);
30static loff_t proc_file_lseek(struct file *, loff_t, int); 31static loff_t proc_file_lseek(struct file *, loff_t, int);
31 32
33DEFINE_SPINLOCK(proc_subdir_lock);
34
32int proc_match(int len, const char *name, struct proc_dir_entry *de) 35int proc_match(int len, const char *name, struct proc_dir_entry *de)
33{ 36{
34 if (de->namelen != len) 37 if (de->namelen != len)
@@ -277,7 +280,9 @@ static int xlate_proc_name(const char *name,
277 const char *cp = name, *next; 280 const char *cp = name, *next;
278 struct proc_dir_entry *de; 281 struct proc_dir_entry *de;
279 int len; 282 int len;
283 int rtn = 0;
280 284
285 spin_lock(&proc_subdir_lock);
281 de = &proc_root; 286 de = &proc_root;
282 while (1) { 287 while (1) {
283 next = strchr(cp, '/'); 288 next = strchr(cp, '/');
@@ -289,13 +294,17 @@ static int xlate_proc_name(const char *name,
289 if (proc_match(len, cp, de)) 294 if (proc_match(len, cp, de))
290 break; 295 break;
291 } 296 }
292 if (!de) 297 if (!de) {
293 return -ENOENT; 298 rtn = -ENOENT;
299 goto out;
300 }
294 cp += len + 1; 301 cp += len + 1;
295 } 302 }
296 *residual = cp; 303 *residual = cp;
297 *ret = de; 304 *ret = de;
298 return 0; 305out:
306 spin_unlock(&proc_subdir_lock);
307 return rtn;
299} 308}
300 309
301static DEFINE_IDR(proc_inum_idr); 310static DEFINE_IDR(proc_inum_idr);
@@ -380,6 +389,7 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam
380 int error = -ENOENT; 389 int error = -ENOENT;
381 390
382 lock_kernel(); 391 lock_kernel();
392 spin_lock(&proc_subdir_lock);
383 de = PDE(dir); 393 de = PDE(dir);
384 if (de) { 394 if (de) {
385 for (de = de->subdir; de ; de = de->next) { 395 for (de = de->subdir; de ; de = de->next) {
@@ -388,12 +398,15 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam
388 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { 398 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
389 unsigned int ino = de->low_ino; 399 unsigned int ino = de->low_ino;
390 400
401 spin_unlock(&proc_subdir_lock);
391 error = -EINVAL; 402 error = -EINVAL;
392 inode = proc_get_inode(dir->i_sb, ino, de); 403 inode = proc_get_inode(dir->i_sb, ino, de);
404 spin_lock(&proc_subdir_lock);
393 break; 405 break;
394 } 406 }
395 } 407 }
396 } 408 }
409 spin_unlock(&proc_subdir_lock);
397 unlock_kernel(); 410 unlock_kernel();
398 411
399 if (inode) { 412 if (inode) {
@@ -447,11 +460,13 @@ int proc_readdir(struct file * filp,
447 filp->f_pos++; 460 filp->f_pos++;
448 /* fall through */ 461 /* fall through */
449 default: 462 default:
463 spin_lock(&proc_subdir_lock);
450 de = de->subdir; 464 de = de->subdir;
451 i -= 2; 465 i -= 2;
452 for (;;) { 466 for (;;) {
453 if (!de) { 467 if (!de) {
454 ret = 1; 468 ret = 1;
469 spin_unlock(&proc_subdir_lock);
455 goto out; 470 goto out;
456 } 471 }
457 if (!i) 472 if (!i)
@@ -461,12 +476,16 @@ int proc_readdir(struct file * filp,
461 } 476 }
462 477
463 do { 478 do {
479 /* filldir passes info to user space */
480 spin_unlock(&proc_subdir_lock);
464 if (filldir(dirent, de->name, de->namelen, filp->f_pos, 481 if (filldir(dirent, de->name, de->namelen, filp->f_pos,
465 de->low_ino, de->mode >> 12) < 0) 482 de->low_ino, de->mode >> 12) < 0)
466 goto out; 483 goto out;
484 spin_lock(&proc_subdir_lock);
467 filp->f_pos++; 485 filp->f_pos++;
468 de = de->next; 486 de = de->next;
469 } while (de); 487 } while (de);
488 spin_unlock(&proc_subdir_lock);
470 } 489 }
471 ret = 1; 490 ret = 1;
472out: unlock_kernel(); 491out: unlock_kernel();
@@ -500,9 +519,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
500 if (i == 0) 519 if (i == 0)
501 return -EAGAIN; 520 return -EAGAIN;
502 dp->low_ino = i; 521 dp->low_ino = i;
522
523 spin_lock(&proc_subdir_lock);
503 dp->next = dir->subdir; 524 dp->next = dir->subdir;
504 dp->parent = dir; 525 dp->parent = dir;
505 dir->subdir = dp; 526 dir->subdir = dp;
527 spin_unlock(&proc_subdir_lock);
528
506 if (S_ISDIR(dp->mode)) { 529 if (S_ISDIR(dp->mode)) {
507 if (dp->proc_iops == NULL) { 530 if (dp->proc_iops == NULL) {
508 dp->proc_fops = &proc_dir_operations; 531 dp->proc_fops = &proc_dir_operations;
@@ -537,7 +560,7 @@ static void proc_kill_inodes(struct proc_dir_entry *de)
537 struct file * filp = list_entry(p, struct file, f_u.fu_list); 560 struct file * filp = list_entry(p, struct file, f_u.fu_list);
538 struct dentry * dentry = filp->f_dentry; 561 struct dentry * dentry = filp->f_dentry;
539 struct inode * inode; 562 struct inode * inode;
540 struct file_operations *fops; 563 const struct file_operations *fops;
541 564
542 if (dentry->d_op != &proc_dentry_operations) 565 if (dentry->d_op != &proc_dentry_operations)
543 continue; 566 continue;
@@ -694,6 +717,8 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
694 if (!parent && xlate_proc_name(name, &parent, &fn) != 0) 717 if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
695 goto out; 718 goto out;
696 len = strlen(fn); 719 len = strlen(fn);
720
721 spin_lock(&proc_subdir_lock);
697 for (p = &parent->subdir; *p; p=&(*p)->next ) { 722 for (p = &parent->subdir; *p; p=&(*p)->next ) {
698 if (!proc_match(len, fn, *p)) 723 if (!proc_match(len, fn, *p))
699 continue; 724 continue;
@@ -714,6 +739,7 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
714 } 739 }
715 break; 740 break;
716 } 741 }
742 spin_unlock(&proc_subdir_lock);
717out: 743out:
718 return; 744 return;
719} 745}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 95a1cf32b838..0502f17b860d 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -30,7 +30,7 @@ do { \
30 30
31#endif 31#endif
32 32
33extern void create_seq_entry(char *name, mode_t mode, struct file_operations *f); 33extern void create_seq_entry(char *name, mode_t mode, const struct file_operations *f);
34extern int proc_exe_link(struct inode *, struct dentry **, struct vfsmount **); 34extern int proc_exe_link(struct inode *, struct dentry **, struct vfsmount **);
35extern int proc_tid_stat(struct task_struct *, char *); 35extern int proc_tid_stat(struct task_struct *, char *);
36extern int proc_tgid_stat(struct task_struct *, char *); 36extern int proc_tgid_stat(struct task_struct *, char *);
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index adc2cd95169a..17f6e8fa1397 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -31,7 +31,7 @@ static int open_kcore(struct inode * inode, struct file * filp)
31 31
32static ssize_t read_kcore(struct file *, char __user *, size_t, loff_t *); 32static ssize_t read_kcore(struct file *, char __user *, size_t, loff_t *);
33 33
34struct file_operations proc_kcore_operations = { 34const struct file_operations proc_kcore_operations = {
35 .read = read_kcore, 35 .read = read_kcore,
36 .open = open_kcore, 36 .open = open_kcore,
37}; 37};
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index 10d37bf25206..ff3b90b56e9d 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -47,7 +47,7 @@ static unsigned int kmsg_poll(struct file *file, poll_table *wait)
47} 47}
48 48
49 49
50struct file_operations proc_kmsg_operations = { 50const struct file_operations proc_kmsg_operations = {
51 .read = kmsg_read, 51 .read = kmsg_read,
52 .poll = kmsg_poll, 52 .poll = kmsg_poll,
53 .open = kmsg_open, 53 .open = kmsg_open,
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index 9bdd077d6f55..596b4b4f1cc8 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -136,9 +136,11 @@ void proc_device_tree_add_node(struct device_node *np,
136 * properties are quite unimportant for us though, thus we 136 * properties are quite unimportant for us though, thus we
137 * simply "skip" them here, but we do have to check. 137 * simply "skip" them here, but we do have to check.
138 */ 138 */
139 spin_lock(&proc_subdir_lock);
139 for (ent = de->subdir; ent != NULL; ent = ent->next) 140 for (ent = de->subdir; ent != NULL; ent = ent->next)
140 if (!strcmp(ent->name, pp->name)) 141 if (!strcmp(ent->name, pp->name))
141 break; 142 break;
143 spin_unlock(&proc_subdir_lock);
142 if (ent != NULL) { 144 if (ent != NULL) {
143 printk(KERN_WARNING "device-tree: property \"%s\" name" 145 printk(KERN_WARNING "device-tree: property \"%s\" name"
144 " conflicts with node in %s\n", pp->name, 146 " conflicts with node in %s\n", pp->name,
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 1e9ea37d457e..ef5a3323f4b5 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -534,7 +534,7 @@ static int show_stat(struct seq_file *p, void *v)
534 if (wall_to_monotonic.tv_nsec) 534 if (wall_to_monotonic.tv_nsec)
535 --jif; 535 --jif;
536 536
537 for_each_cpu(i) { 537 for_each_possible_cpu(i) {
538 int j; 538 int j;
539 539
540 user = cputime64_add(user, kstat_cpu(i).cpustat.user); 540 user = cputime64_add(user, kstat_cpu(i).cpustat.user);
@@ -731,7 +731,7 @@ static struct file_operations proc_sysrq_trigger_operations = {
731 731
732struct proc_dir_entry *proc_root_kcore; 732struct proc_dir_entry *proc_root_kcore;
733 733
734void create_seq_entry(char *name, mode_t mode, struct file_operations *f) 734void create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
735{ 735{
736 struct proc_dir_entry *entry; 736 struct proc_dir_entry *entry;
737 entry = create_proc_entry(name, mode, NULL); 737 entry = create_proc_entry(name, mode, NULL);
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 4063fb32f78c..7efa73d44c9a 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -172,7 +172,7 @@ static int open_vmcore(struct inode *inode, struct file *filp)
172 return 0; 172 return 0;
173} 173}
174 174
175struct file_operations proc_vmcore_operations = { 175const struct file_operations proc_vmcore_operations = {
176 .read = read_vmcore, 176 .read = read_vmcore,
177 .open = open_vmcore, 177 .open = open_vmcore,
178}; 178};
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index 7a8f5595c26f..9031948fefd0 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -81,7 +81,7 @@ out:
81 return 0; 81 return 0;
82} 82}
83 83
84struct file_operations qnx4_dir_operations = 84const struct file_operations qnx4_dir_operations =
85{ 85{
86 .read = generic_read_dir, 86 .read = generic_read_dir,
87 .readdir = qnx4_readdir, 87 .readdir = qnx4_readdir,
diff --git a/fs/qnx4/file.c b/fs/qnx4/file.c
index c33963fded9e..62af4b1348bd 100644
--- a/fs/qnx4/file.c
+++ b/fs/qnx4/file.c
@@ -19,7 +19,7 @@
19 * We have mostly NULL's here: the current defaults are ok for 19 * We have mostly NULL's here: the current defaults are ok for
20 * the qnx4 filesystem. 20 * the qnx4 filesystem.
21 */ 21 */
22struct file_operations qnx4_file_operations = 22const struct file_operations qnx4_file_operations =
23{ 23{
24 .llseek = generic_file_llseek, 24 .llseek = generic_file_llseek,
25 .read = generic_file_read, 25 .read = generic_file_read,
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c
index 6ada2095b9ac..00a933eb820c 100644
--- a/fs/ramfs/file-mmu.c
+++ b/fs/ramfs/file-mmu.c
@@ -32,7 +32,7 @@ struct address_space_operations ramfs_aops = {
32 .commit_write = simple_commit_write 32 .commit_write = simple_commit_write
33}; 33};
34 34
35struct file_operations ramfs_file_operations = { 35const struct file_operations ramfs_file_operations = {
36 .read = generic_file_read, 36 .read = generic_file_read,
37 .write = generic_file_write, 37 .write = generic_file_write,
38 .mmap = generic_file_mmap, 38 .mmap = generic_file_mmap,
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index b1ca234068f6..f443a84b98a5 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -33,7 +33,7 @@ struct address_space_operations ramfs_aops = {
33 .commit_write = simple_commit_write 33 .commit_write = simple_commit_write
34}; 34};
35 35
36struct file_operations ramfs_file_operations = { 36const struct file_operations ramfs_file_operations = {
37 .mmap = ramfs_nommu_mmap, 37 .mmap = ramfs_nommu_mmap,
38 .get_unmapped_area = ramfs_nommu_get_unmapped_area, 38 .get_unmapped_area = ramfs_nommu_get_unmapped_area,
39 .read = generic_file_read, 39 .read = generic_file_read,
diff --git a/fs/ramfs/internal.h b/fs/ramfs/internal.h
index 272c8a7120b0..313237631b49 100644
--- a/fs/ramfs/internal.h
+++ b/fs/ramfs/internal.h
@@ -11,5 +11,5 @@
11 11
12 12
13extern struct address_space_operations ramfs_aops; 13extern struct address_space_operations ramfs_aops;
14extern struct file_operations ramfs_file_operations; 14extern const struct file_operations ramfs_file_operations;
15extern struct inode_operations ramfs_file_inode_operations; 15extern struct inode_operations ramfs_file_inode_operations;
diff --git a/fs/read_write.c b/fs/read_write.c
index 34b1bf259efd..6256ca81a718 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -19,7 +19,7 @@
19#include <asm/uaccess.h> 19#include <asm/uaccess.h>
20#include <asm/unistd.h> 20#include <asm/unistd.h>
21 21
22struct file_operations generic_ro_fops = { 22const struct file_operations generic_ro_fops = {
23 .llseek = generic_file_llseek, 23 .llseek = generic_file_llseek,
24 .read = generic_file_read, 24 .read = generic_file_read,
25 .mmap = generic_file_readonly_mmap, 25 .mmap = generic_file_readonly_mmap,
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index d71ac6579289..973c819f8033 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -18,7 +18,7 @@ static int reiserfs_readdir(struct file *, void *, filldir_t);
18static int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry, 18static int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry,
19 int datasync); 19 int datasync);
20 20
21struct file_operations reiserfs_dir_operations = { 21const struct file_operations reiserfs_dir_operations = {
22 .read = generic_read_dir, 22 .read = generic_read_dir,
23 .readdir = reiserfs_readdir, 23 .readdir = reiserfs_readdir,
24 .fsync = reiserfs_dir_fsync, 24 .fsync = reiserfs_dir_fsync,
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index d0c1e865963e..010094d14da6 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -1566,7 +1566,7 @@ static ssize_t reiserfs_aio_write(struct kiocb *iocb, const char __user * buf,
1566 return generic_file_aio_write(iocb, buf, count, pos); 1566 return generic_file_aio_write(iocb, buf, count, pos);
1567} 1567}
1568 1568
1569struct file_operations reiserfs_file_operations = { 1569const struct file_operations reiserfs_file_operations = {
1570 .read = generic_file_read, 1570 .read = generic_file_read,
1571 .write = reiserfs_file_write, 1571 .write = reiserfs_file_write,
1572 .ioctl = reiserfs_ioctl, 1572 .ioctl = reiserfs_ioctl,
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index d60f6238c66a..9857e50f85e7 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -466,7 +466,6 @@ static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
466 direct_IO request. */ 466 direct_IO request. */
467static int reiserfs_get_blocks_direct_io(struct inode *inode, 467static int reiserfs_get_blocks_direct_io(struct inode *inode,
468 sector_t iblock, 468 sector_t iblock,
469 unsigned long max_blocks,
470 struct buffer_head *bh_result, 469 struct buffer_head *bh_result,
471 int create) 470 int create)
472{ 471{
@@ -2793,7 +2792,7 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
2793} 2792}
2794 2793
2795/* clm -- taken from fs/buffer.c:block_invalidate_page */ 2794/* clm -- taken from fs/buffer.c:block_invalidate_page */
2796static int reiserfs_invalidatepage(struct page *page, unsigned long offset) 2795static void reiserfs_invalidatepage(struct page *page, unsigned long offset)
2797{ 2796{
2798 struct buffer_head *head, *bh, *next; 2797 struct buffer_head *head, *bh, *next;
2799 struct inode *inode = page->mapping->host; 2798 struct inode *inode = page->mapping->host;
@@ -2832,10 +2831,12 @@ static int reiserfs_invalidatepage(struct page *page, unsigned long offset)
2832 * The get_block cached value has been unconditionally invalidated, 2831 * The get_block cached value has been unconditionally invalidated,
2833 * so real IO is not possible anymore. 2832 * so real IO is not possible anymore.
2834 */ 2833 */
2835 if (!offset && ret) 2834 if (!offset && ret) {
2836 ret = try_to_release_page(page, 0); 2835 ret = try_to_release_page(page, 0);
2836 /* maybe should BUG_ON(!ret); - neilb */
2837 }
2837 out: 2838 out:
2838 return ret; 2839 return;
2839} 2840}
2840 2841
2841static int reiserfs_set_page_dirty(struct page *page) 2842static int reiserfs_set_page_dirty(struct page *page)
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 78b40621b88b..27bd3a1df2ad 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -143,7 +143,7 @@ static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
143 char b[BDEVNAME_SIZE]; 143 char b[BDEVNAME_SIZE];
144 144
145 sprintf(buf, 145 sprintf(buf,
146 "dev %s, size %d, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)", 146 "dev %s, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
147 bdevname(bh->b_bdev, b), bh->b_size, 147 bdevname(bh->b_bdev, b), bh->b_size,
148 (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)), 148 (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
149 bh->b_state, bh->b_page, 149 bh->b_state, bh->b_page,
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index ef6caed9336b..731688e1cfe3 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -470,7 +470,7 @@ static int r_open(struct inode *inode, struct file *file)
470 return ret; 470 return ret;
471} 471}
472 472
473static struct file_operations r_file_operations = { 473static const struct file_operations r_file_operations = {
474 .open = r_open, 474 .open = r_open,
475 .read = seq_read, 475 .read = seq_read,
476 .llseek = seq_lseek, 476 .llseek = seq_lseek,
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index c2fc424d7d5c..9b9eda7b335c 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -463,7 +463,7 @@ static struct address_space_operations romfs_aops = {
463 .readpage = romfs_readpage 463 .readpage = romfs_readpage
464}; 464};
465 465
466static struct file_operations romfs_dir_operations = { 466static const struct file_operations romfs_dir_operations = {
467 .read = generic_read_dir, 467 .read = generic_read_dir,
468 .readdir = romfs_readdir, 468 .readdir = romfs_readdir,
469}; 469};
diff --git a/fs/select.c b/fs/select.c
index 1815a57d2255..b3a3a1326af6 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -29,12 +29,6 @@
29#define ROUND_UP(x,y) (((x)+(y)-1)/(y)) 29#define ROUND_UP(x,y) (((x)+(y)-1)/(y))
30#define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM) 30#define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
31 31
32struct poll_table_entry {
33 struct file * filp;
34 wait_queue_t wait;
35 wait_queue_head_t * wait_address;
36};
37
38struct poll_table_page { 32struct poll_table_page {
39 struct poll_table_page * next; 33 struct poll_table_page * next;
40 struct poll_table_entry * entry; 34 struct poll_table_entry * entry;
@@ -64,13 +58,23 @@ void poll_initwait(struct poll_wqueues *pwq)
64 init_poll_funcptr(&pwq->pt, __pollwait); 58 init_poll_funcptr(&pwq->pt, __pollwait);
65 pwq->error = 0; 59 pwq->error = 0;
66 pwq->table = NULL; 60 pwq->table = NULL;
61 pwq->inline_index = 0;
67} 62}
68 63
69EXPORT_SYMBOL(poll_initwait); 64EXPORT_SYMBOL(poll_initwait);
70 65
66static void free_poll_entry(struct poll_table_entry *entry)
67{
68 remove_wait_queue(entry->wait_address,&entry->wait);
69 fput(entry->filp);
70}
71
71void poll_freewait(struct poll_wqueues *pwq) 72void poll_freewait(struct poll_wqueues *pwq)
72{ 73{
73 struct poll_table_page * p = pwq->table; 74 struct poll_table_page * p = pwq->table;
75 int i;
76 for (i = 0; i < pwq->inline_index; i++)
77 free_poll_entry(pwq->inline_entries + i);
74 while (p) { 78 while (p) {
75 struct poll_table_entry * entry; 79 struct poll_table_entry * entry;
76 struct poll_table_page *old; 80 struct poll_table_page *old;
@@ -78,8 +82,7 @@ void poll_freewait(struct poll_wqueues *pwq)
78 entry = p->entry; 82 entry = p->entry;
79 do { 83 do {
80 entry--; 84 entry--;
81 remove_wait_queue(entry->wait_address,&entry->wait); 85 free_poll_entry(entry);
82 fput(entry->filp);
83 } while (entry > p->entries); 86 } while (entry > p->entries);
84 old = p; 87 old = p;
85 p = p->next; 88 p = p->next;
@@ -89,12 +92,14 @@ void poll_freewait(struct poll_wqueues *pwq)
89 92
90EXPORT_SYMBOL(poll_freewait); 93EXPORT_SYMBOL(poll_freewait);
91 94
92static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, 95static struct poll_table_entry *poll_get_entry(poll_table *_p)
93 poll_table *_p)
94{ 96{
95 struct poll_wqueues *p = container_of(_p, struct poll_wqueues, pt); 97 struct poll_wqueues *p = container_of(_p, struct poll_wqueues, pt);
96 struct poll_table_page *table = p->table; 98 struct poll_table_page *table = p->table;
97 99
100 if (p->inline_index < N_INLINE_POLL_ENTRIES)
101 return p->inline_entries + p->inline_index++;
102
98 if (!table || POLL_TABLE_FULL(table)) { 103 if (!table || POLL_TABLE_FULL(table)) {
99 struct poll_table_page *new_table; 104 struct poll_table_page *new_table;
100 105
@@ -102,7 +107,7 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
102 if (!new_table) { 107 if (!new_table) {
103 p->error = -ENOMEM; 108 p->error = -ENOMEM;
104 __set_current_state(TASK_RUNNING); 109 __set_current_state(TASK_RUNNING);
105 return; 110 return NULL;
106 } 111 }
107 new_table->entry = new_table->entries; 112 new_table->entry = new_table->entries;
108 new_table->next = table; 113 new_table->next = table;
@@ -110,16 +115,21 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
110 table = new_table; 115 table = new_table;
111 } 116 }
112 117
113 /* Add a new entry */ 118 return table->entry++;
114 { 119}
115 struct poll_table_entry * entry = table->entry; 120
116 table->entry = entry+1; 121/* Add a new entry */
117 get_file(filp); 122static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
118 entry->filp = filp; 123 poll_table *p)
119 entry->wait_address = wait_address; 124{
120 init_waitqueue_entry(&entry->wait, current); 125 struct poll_table_entry *entry = poll_get_entry(p);
121 add_wait_queue(wait_address,&entry->wait); 126 if (!entry)
122 } 127 return;
128 get_file(filp);
129 entry->filp = filp;
130 entry->wait_address = wait_address;
131 init_waitqueue_entry(&entry->wait, current);
132 add_wait_queue(wait_address,&entry->wait);
123} 133}
124 134
125#define FDS_IN(fds, n) (fds->in + n) 135#define FDS_IN(fds, n) (fds->in + n)
@@ -210,7 +220,7 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout)
210 for (i = 0; i < n; ++rinp, ++routp, ++rexp) { 220 for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
211 unsigned long in, out, ex, all_bits, bit = 1, mask, j; 221 unsigned long in, out, ex, all_bits, bit = 1, mask, j;
212 unsigned long res_in = 0, res_out = 0, res_ex = 0; 222 unsigned long res_in = 0, res_out = 0, res_ex = 0;
213 struct file_operations *f_op = NULL; 223 const struct file_operations *f_op = NULL;
214 struct file *file = NULL; 224 struct file *file = NULL;
215 225
216 in = *inp++; out = *outp++; ex = *exp++; 226 in = *inp++; out = *outp++; ex = *exp++;
@@ -221,17 +231,18 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout)
221 } 231 }
222 232
223 for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) { 233 for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) {
234 int fput_needed;
224 if (i >= n) 235 if (i >= n)
225 break; 236 break;
226 if (!(bit & all_bits)) 237 if (!(bit & all_bits))
227 continue; 238 continue;
228 file = fget(i); 239 file = fget_light(i, &fput_needed);
229 if (file) { 240 if (file) {
230 f_op = file->f_op; 241 f_op = file->f_op;
231 mask = DEFAULT_POLLMASK; 242 mask = DEFAULT_POLLMASK;
232 if (f_op && f_op->poll) 243 if (f_op && f_op->poll)
233 mask = (*f_op->poll)(file, retval ? NULL : wait); 244 mask = (*f_op->poll)(file, retval ? NULL : wait);
234 fput(file); 245 fput_light(file, fput_needed);
235 if ((mask & POLLIN_SET) && (in & bit)) { 246 if ((mask & POLLIN_SET) && (in & bit)) {
236 res_in |= bit; 247 res_in |= bit;
237 retval++; 248 retval++;
@@ -284,16 +295,6 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout)
284 return retval; 295 return retval;
285} 296}
286 297
287static void *select_bits_alloc(int size)
288{
289 return kmalloc(6 * size, GFP_KERNEL);
290}
291
292static void select_bits_free(void *bits, int size)
293{
294 kfree(bits);
295}
296
297/* 298/*
298 * We can actually return ERESTARTSYS instead of EINTR, but I'd 299 * We can actually return ERESTARTSYS instead of EINTR, but I'd
299 * like to be certain this leads to no problems. So I return 300 * like to be certain this leads to no problems. So I return
@@ -312,6 +313,8 @@ static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
312 char *bits; 313 char *bits;
313 int ret, size, max_fdset; 314 int ret, size, max_fdset;
314 struct fdtable *fdt; 315 struct fdtable *fdt;
316 /* Allocate small arguments on the stack to save memory and be faster */
317 char stack_fds[SELECT_STACK_ALLOC];
315 318
316 ret = -EINVAL; 319 ret = -EINVAL;
317 if (n < 0) 320 if (n < 0)
@@ -332,7 +335,10 @@ static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
332 */ 335 */
333 ret = -ENOMEM; 336 ret = -ENOMEM;
334 size = FDS_BYTES(n); 337 size = FDS_BYTES(n);
335 bits = select_bits_alloc(size); 338 if (6*size < SELECT_STACK_ALLOC)
339 bits = stack_fds;
340 else
341 bits = kmalloc(6 * size, GFP_KERNEL);
336 if (!bits) 342 if (!bits)
337 goto out_nofds; 343 goto out_nofds;
338 fds.in = (unsigned long *) bits; 344 fds.in = (unsigned long *) bits;
@@ -367,7 +373,8 @@ static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
367 ret = -EFAULT; 373 ret = -EFAULT;
368 374
369out: 375out:
370 select_bits_free(bits, size); 376 if (bits != stack_fds)
377 kfree(bits);
371out_nofds: 378out_nofds:
372 return ret; 379 return ret;
373} 380}
@@ -551,14 +558,15 @@ static void do_pollfd(unsigned int num, struct pollfd * fdpage,
551 fdp = fdpage+i; 558 fdp = fdpage+i;
552 fd = fdp->fd; 559 fd = fdp->fd;
553 if (fd >= 0) { 560 if (fd >= 0) {
554 struct file * file = fget(fd); 561 int fput_needed;
562 struct file * file = fget_light(fd, &fput_needed);
555 mask = POLLNVAL; 563 mask = POLLNVAL;
556 if (file != NULL) { 564 if (file != NULL) {
557 mask = DEFAULT_POLLMASK; 565 mask = DEFAULT_POLLMASK;
558 if (file->f_op && file->f_op->poll) 566 if (file->f_op && file->f_op->poll)
559 mask = file->f_op->poll(file, *pwait); 567 mask = file->f_op->poll(file, *pwait);
560 mask &= fdp->events | POLLERR | POLLHUP; 568 mask &= fdp->events | POLLERR | POLLHUP;
561 fput(file); 569 fput_light(file, fput_needed);
562 } 570 }
563 if (mask) { 571 if (mask) {
564 *pwait = NULL; 572 *pwait = NULL;
@@ -619,6 +627,9 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
619 return count; 627 return count;
620} 628}
621 629
630#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \
631 sizeof(struct pollfd))
632
622int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout) 633int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout)
623{ 634{
624 struct poll_wqueues table; 635 struct poll_wqueues table;
@@ -628,6 +639,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout)
628 struct poll_list *walk; 639 struct poll_list *walk;
629 struct fdtable *fdt; 640 struct fdtable *fdt;
630 int max_fdset; 641 int max_fdset;
642 /* Allocate small arguments on the stack to save memory and be faster */
643 char stack_pps[POLL_STACK_ALLOC];
644 struct poll_list *stack_pp = NULL;
631 645
632 /* Do a sanity check on nfds ... */ 646 /* Do a sanity check on nfds ... */
633 rcu_read_lock(); 647 rcu_read_lock();
@@ -645,14 +659,23 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout)
645 err = -ENOMEM; 659 err = -ENOMEM;
646 while(i!=0) { 660 while(i!=0) {
647 struct poll_list *pp; 661 struct poll_list *pp;
648 pp = kmalloc(sizeof(struct poll_list)+ 662 int num, size;
649 sizeof(struct pollfd)* 663 if (stack_pp == NULL)
650 (i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i), 664 num = N_STACK_PPS;
651 GFP_KERNEL); 665 else
652 if(pp==NULL) 666 num = POLLFD_PER_PAGE;
653 goto out_fds; 667 if (num > i)
668 num = i;
669 size = sizeof(struct poll_list) + sizeof(struct pollfd)*num;
670 if (!stack_pp)
671 stack_pp = pp = (struct poll_list *)stack_pps;
672 else {
673 pp = kmalloc(size, GFP_KERNEL);
674 if (!pp)
675 goto out_fds;
676 }
654 pp->next=NULL; 677 pp->next=NULL;
655 pp->len = (i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i); 678 pp->len = num;
656 if (head == NULL) 679 if (head == NULL)
657 head = pp; 680 head = pp;
658 else 681 else
@@ -660,7 +683,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout)
660 683
661 walk = pp; 684 walk = pp;
662 if (copy_from_user(pp->entries, ufds + nfds-i, 685 if (copy_from_user(pp->entries, ufds + nfds-i,
663 sizeof(struct pollfd)*pp->len)) { 686 sizeof(struct pollfd)*num)) {
664 err = -EFAULT; 687 err = -EFAULT;
665 goto out_fds; 688 goto out_fds;
666 } 689 }
@@ -689,7 +712,8 @@ out_fds:
689 walk = head; 712 walk = head;
690 while(walk!=NULL) { 713 while(walk!=NULL) {
691 struct poll_list *pp = walk->next; 714 struct poll_list *pp = walk->next;
692 kfree(walk); 715 if (walk != stack_pp)
716 kfree(walk);
693 walk = pp; 717 walk = pp;
694 } 718 }
695 poll_freewait(&table); 719 poll_freewait(&table);
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c
index 0424d06b147e..34c7a11d91f0 100644
--- a/fs/smbfs/dir.c
+++ b/fs/smbfs/dir.c
@@ -34,7 +34,7 @@ static int smb_rename(struct inode *, struct dentry *,
34static int smb_make_node(struct inode *,struct dentry *,int,dev_t); 34static int smb_make_node(struct inode *,struct dentry *,int,dev_t);
35static int smb_link(struct dentry *, struct inode *, struct dentry *); 35static int smb_link(struct dentry *, struct inode *, struct dentry *);
36 36
37struct file_operations smb_dir_operations = 37const struct file_operations smb_dir_operations =
38{ 38{
39 .read = generic_read_dir, 39 .read = generic_read_dir,
40 .readdir = smb_readdir, 40 .readdir = smb_readdir,
diff --git a/fs/smbfs/file.c b/fs/smbfs/file.c
index 7042e62726a4..c56bd99a9701 100644
--- a/fs/smbfs/file.c
+++ b/fs/smbfs/file.c
@@ -401,7 +401,7 @@ smb_file_permission(struct inode *inode, int mask, struct nameidata *nd)
401 return error; 401 return error;
402} 402}
403 403
404struct file_operations smb_file_operations = 404const struct file_operations smb_file_operations =
405{ 405{
406 .llseek = remote_llseek, 406 .llseek = remote_llseek,
407 .read = smb_file_read, 407 .read = smb_file_read,
diff --git a/fs/smbfs/proto.h b/fs/smbfs/proto.h
index e866ec8660d0..47664597e6b1 100644
--- a/fs/smbfs/proto.h
+++ b/fs/smbfs/proto.h
@@ -35,7 +35,7 @@ extern int smb_proc_symlink(struct smb_sb_info *server, struct dentry *d, const
35extern int smb_proc_link(struct smb_sb_info *server, struct dentry *dentry, struct dentry *new_dentry); 35extern int smb_proc_link(struct smb_sb_info *server, struct dentry *dentry, struct dentry *new_dentry);
36extern void smb_install_null_ops(struct smb_ops *ops); 36extern void smb_install_null_ops(struct smb_ops *ops);
37/* dir.c */ 37/* dir.c */
38extern struct file_operations smb_dir_operations; 38extern const struct file_operations smb_dir_operations;
39extern struct inode_operations smb_dir_inode_operations; 39extern struct inode_operations smb_dir_inode_operations;
40extern struct inode_operations smb_dir_inode_operations_unix; 40extern struct inode_operations smb_dir_inode_operations_unix;
41extern void smb_new_dentry(struct dentry *dentry); 41extern void smb_new_dentry(struct dentry *dentry);
@@ -64,7 +64,7 @@ extern int smb_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
64extern int smb_notify_change(struct dentry *dentry, struct iattr *attr); 64extern int smb_notify_change(struct dentry *dentry, struct iattr *attr);
65/* file.c */ 65/* file.c */
66extern struct address_space_operations smb_file_aops; 66extern struct address_space_operations smb_file_aops;
67extern struct file_operations smb_file_operations; 67extern const struct file_operations smb_file_operations;
68extern struct inode_operations smb_file_inode_operations; 68extern struct inode_operations smb_file_inode_operations;
69/* ioctl.c */ 69/* ioctl.c */
70extern int smb_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); 70extern int smb_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
diff --git a/fs/super.c b/fs/super.c
index 8743e9bbb297..a66f66bb8049 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -37,6 +37,7 @@
37#include <linux/writeback.h> /* for the emergency remount stuff */ 37#include <linux/writeback.h> /* for the emergency remount stuff */
38#include <linux/idr.h> 38#include <linux/idr.h>
39#include <linux/kobject.h> 39#include <linux/kobject.h>
40#include <linux/mutex.h>
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
41 42
42 43
@@ -380,9 +381,9 @@ restart:
380void sync_filesystems(int wait) 381void sync_filesystems(int wait)
381{ 382{
382 struct super_block *sb; 383 struct super_block *sb;
383 static DECLARE_MUTEX(mutex); 384 static DEFINE_MUTEX(mutex);
384 385
385 down(&mutex); /* Could be down_interruptible */ 386 mutex_lock(&mutex); /* Could be down_interruptible */
386 spin_lock(&sb_lock); 387 spin_lock(&sb_lock);
387 list_for_each_entry(sb, &super_blocks, s_list) { 388 list_for_each_entry(sb, &super_blocks, s_list) {
388 if (!sb->s_op->sync_fs) 389 if (!sb->s_op->sync_fs)
@@ -411,7 +412,7 @@ restart:
411 goto restart; 412 goto restart;
412 } 413 }
413 spin_unlock(&sb_lock); 414 spin_unlock(&sb_lock);
414 up(&mutex); 415 mutex_unlock(&mutex);
415} 416}
416 417
417/** 418/**
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 78899eeab974..c16a93c353c0 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -163,7 +163,7 @@ static int release(struct inode * inode, struct file * file)
163 return 0; 163 return 0;
164} 164}
165 165
166struct file_operations bin_fops = { 166const struct file_operations bin_fops = {
167 .read = read, 167 .read = read,
168 .write = write, 168 .write = write,
169 .mmap = mmap, 169 .mmap = mmap,
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 9ee956864445..f26880a4785e 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -503,7 +503,7 @@ static loff_t sysfs_dir_lseek(struct file * file, loff_t offset, int origin)
503 return offset; 503 return offset;
504} 504}
505 505
506struct file_operations sysfs_dir_operations = { 506const struct file_operations sysfs_dir_operations = {
507 .open = sysfs_dir_open, 507 .open = sysfs_dir_open,
508 .release = sysfs_dir_close, 508 .release = sysfs_dir_close,
509 .llseek = sysfs_dir_lseek, 509 .llseek = sysfs_dir_lseek,
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 5e83e7246788..830f76fa098c 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -348,7 +348,7 @@ static int sysfs_release(struct inode * inode, struct file * filp)
348 return 0; 348 return 0;
349} 349}
350 350
351struct file_operations sysfs_file_operations = { 351const struct file_operations sysfs_file_operations = {
352 .read = sysfs_read_file, 352 .read = sysfs_read_file,
353 .write = sysfs_write_file, 353 .write = sysfs_write_file,
354 .llseek = generic_file_llseek, 354 .llseek = generic_file_llseek,
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index cf11d5b789d9..32958a7c50e9 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -21,9 +21,9 @@ extern int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
21 21
22extern struct rw_semaphore sysfs_rename_sem; 22extern struct rw_semaphore sysfs_rename_sem;
23extern struct super_block * sysfs_sb; 23extern struct super_block * sysfs_sb;
24extern struct file_operations sysfs_dir_operations; 24extern const struct file_operations sysfs_dir_operations;
25extern struct file_operations sysfs_file_operations; 25extern const struct file_operations sysfs_file_operations;
26extern struct file_operations bin_fops; 26extern const struct file_operations bin_fops;
27extern struct inode_operations sysfs_dir_inode_operations; 27extern struct inode_operations sysfs_dir_inode_operations;
28extern struct inode_operations sysfs_symlink_inode_operations; 28extern struct inode_operations sysfs_symlink_inode_operations;
29 29
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index cce8b05cba5a..8c66e9270dd6 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -20,7 +20,7 @@
20 20
21static int sysv_readdir(struct file *, void *, filldir_t); 21static int sysv_readdir(struct file *, void *, filldir_t);
22 22
23struct file_operations sysv_dir_operations = { 23const struct file_operations sysv_dir_operations = {
24 .read = generic_read_dir, 24 .read = generic_read_dir,
25 .readdir = sysv_readdir, 25 .readdir = sysv_readdir,
26 .fsync = sysv_sync_file, 26 .fsync = sysv_sync_file,
diff --git a/fs/sysv/file.c b/fs/sysv/file.c
index da69abc06240..a59e303135fa 100644
--- a/fs/sysv/file.c
+++ b/fs/sysv/file.c
@@ -19,7 +19,7 @@
19 * We have mostly NULLs here: the current defaults are OK for 19 * We have mostly NULLs here: the current defaults are OK for
20 * the coh filesystem. 20 * the coh filesystem.
21 */ 21 */
22struct file_operations sysv_file_operations = { 22const struct file_operations sysv_file_operations = {
23 .llseek = generic_file_llseek, 23 .llseek = generic_file_llseek,
24 .read = generic_file_read, 24 .read = generic_file_read,
25 .write = generic_file_write, 25 .write = generic_file_write,
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index b7f9b4a42aab..393a480e4deb 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -159,8 +159,8 @@ extern ino_t sysv_inode_by_name(struct dentry *);
159extern struct inode_operations sysv_file_inode_operations; 159extern struct inode_operations sysv_file_inode_operations;
160extern struct inode_operations sysv_dir_inode_operations; 160extern struct inode_operations sysv_dir_inode_operations;
161extern struct inode_operations sysv_fast_symlink_inode_operations; 161extern struct inode_operations sysv_fast_symlink_inode_operations;
162extern struct file_operations sysv_file_operations; 162extern const struct file_operations sysv_file_operations;
163extern struct file_operations sysv_dir_operations; 163extern const struct file_operations sysv_dir_operations;
164extern struct address_space_operations sysv_aops; 164extern struct address_space_operations sysv_aops;
165extern struct super_operations sysv_sops; 165extern struct super_operations sysv_sops;
166extern struct dentry_operations sysv_dentry_operations; 166extern struct dentry_operations sysv_dentry_operations;
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index f5222527fe39..8c28efa3b8ff 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -42,7 +42,7 @@ static int do_udf_readdir(struct inode *, struct file *, filldir_t, void *);
42 42
43/* readdir and lookup functions */ 43/* readdir and lookup functions */
44 44
45struct file_operations udf_dir_operations = { 45const struct file_operations udf_dir_operations = {
46 .read = generic_read_dir, 46 .read = generic_read_dir,
47 .readdir = udf_readdir, 47 .readdir = udf_readdir,
48 .ioctl = udf_ioctl, 48 .ioctl = udf_ioctl,
diff --git a/fs/udf/file.c b/fs/udf/file.c
index a6f2acc1f15c..e34b00e303f1 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -248,7 +248,7 @@ static int udf_release_file(struct inode * inode, struct file * filp)
248 return 0; 248 return 0;
249} 249}
250 250
251struct file_operations udf_file_operations = { 251const struct file_operations udf_file_operations = {
252 .read = generic_file_read, 252 .read = generic_file_read,
253 .ioctl = udf_ioctl, 253 .ioctl = udf_ioctl,
254 .open = generic_file_open, 254 .open = generic_file_open,
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 1d5800e0cbe7..023e19ba5a2e 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -44,9 +44,9 @@ struct buffer_head;
44struct super_block; 44struct super_block;
45 45
46extern struct inode_operations udf_dir_inode_operations; 46extern struct inode_operations udf_dir_inode_operations;
47extern struct file_operations udf_dir_operations; 47extern const struct file_operations udf_dir_operations;
48extern struct inode_operations udf_file_inode_operations; 48extern struct inode_operations udf_file_inode_operations;
49extern struct file_operations udf_file_operations; 49extern const struct file_operations udf_file_operations;
50extern struct address_space_operations udf_aops; 50extern struct address_space_operations udf_aops;
51extern struct address_space_operations udf_adinicb_aops; 51extern struct address_space_operations udf_adinicb_aops;
52extern struct address_space_operations udf_symlink_aops; 52extern struct address_space_operations udf_symlink_aops;
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 7c10c68902ae..1a561202d3f4 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -620,7 +620,7 @@ int ufs_empty_dir (struct inode * inode)
620 return 1; 620 return 1;
621} 621}
622 622
623struct file_operations ufs_dir_operations = { 623const struct file_operations ufs_dir_operations = {
624 .read = generic_read_dir, 624 .read = generic_read_dir,
625 .readdir = ufs_readdir, 625 .readdir = ufs_readdir,
626 .fsync = file_fsync, 626 .fsync = file_fsync,
diff --git a/fs/ufs/file.c b/fs/ufs/file.c
index 62ad481810ef..312fd3f86313 100644
--- a/fs/ufs/file.c
+++ b/fs/ufs/file.c
@@ -31,7 +31,7 @@
31 * the ufs filesystem. 31 * the ufs filesystem.
32 */ 32 */
33 33
34struct file_operations ufs_file_operations = { 34const struct file_operations ufs_file_operations = {
35 .llseek = generic_file_llseek, 35 .llseek = generic_file_llseek,
36 .read = generic_file_read, 36 .read = generic_file_read,
37 .write = generic_file_write, 37 .write = generic_file_write,
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 97fc056130eb..c02f7c5b7462 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1310,20 +1310,21 @@ xfs_get_block(
1310 struct buffer_head *bh_result, 1310 struct buffer_head *bh_result,
1311 int create) 1311 int create)
1312{ 1312{
1313 return __xfs_get_block(inode, iblock, 0, bh_result, 1313 return __xfs_get_block(inode, iblock,
1314 create, 0, BMAPI_WRITE); 1314 bh_result->b_size >> inode->i_blkbits,
1315 bh_result, create, 0, BMAPI_WRITE);
1315} 1316}
1316 1317
1317STATIC int 1318STATIC int
1318xfs_get_blocks_direct( 1319xfs_get_blocks_direct(
1319 struct inode *inode, 1320 struct inode *inode,
1320 sector_t iblock, 1321 sector_t iblock,
1321 unsigned long max_blocks,
1322 struct buffer_head *bh_result, 1322 struct buffer_head *bh_result,
1323 int create) 1323 int create)
1324{ 1324{
1325 return __xfs_get_block(inode, iblock, max_blocks, bh_result, 1325 return __xfs_get_block(inode, iblock,
1326 create, 1, BMAPI_WRITE|BMAPI_DIRECT); 1326 bh_result->b_size >> inode->i_blkbits,
1327 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1327} 1328}
1328 1329
1329STATIC void 1330STATIC void
@@ -1442,14 +1443,14 @@ xfs_vm_readpages(
1442 return mpage_readpages(mapping, pages, nr_pages, xfs_get_block); 1443 return mpage_readpages(mapping, pages, nr_pages, xfs_get_block);
1443} 1444}
1444 1445
1445STATIC int 1446STATIC void
1446xfs_vm_invalidatepage( 1447xfs_vm_invalidatepage(
1447 struct page *page, 1448 struct page *page,
1448 unsigned long offset) 1449 unsigned long offset)
1449{ 1450{
1450 xfs_page_trace(XFS_INVALIDPAGE_ENTER, 1451 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1451 page->mapping->host, page, offset); 1452 page->mapping->host, page, offset);
1452 return block_invalidatepage(page, offset); 1453 block_invalidatepage(page, offset);
1453} 1454}
1454 1455
1455struct address_space_operations xfs_address_space_operations = { 1456struct address_space_operations xfs_address_space_operations = {
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 185567a6a561..85997b1205f5 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -528,7 +528,7 @@ open_exec_out:
528} 528}
529#endif /* HAVE_FOP_OPEN_EXEC */ 529#endif /* HAVE_FOP_OPEN_EXEC */
530 530
531struct file_operations xfs_file_operations = { 531const struct file_operations xfs_file_operations = {
532 .llseek = generic_file_llseek, 532 .llseek = generic_file_llseek,
533 .read = do_sync_read, 533 .read = do_sync_read,
534 .write = do_sync_write, 534 .write = do_sync_write,
@@ -550,7 +550,7 @@ struct file_operations xfs_file_operations = {
550#endif 550#endif
551}; 551};
552 552
553struct file_operations xfs_invis_file_operations = { 553const struct file_operations xfs_invis_file_operations = {
554 .llseek = generic_file_llseek, 554 .llseek = generic_file_llseek,
555 .read = do_sync_read, 555 .read = do_sync_read,
556 .write = do_sync_write, 556 .write = do_sync_write,
@@ -570,7 +570,7 @@ struct file_operations xfs_invis_file_operations = {
570}; 570};
571 571
572 572
573struct file_operations xfs_dir_file_operations = { 573const struct file_operations xfs_dir_file_operations = {
574 .read = generic_read_dir, 574 .read = generic_read_dir,
575 .readdir = xfs_file_readdir, 575 .readdir = xfs_file_readdir,
576 .unlocked_ioctl = xfs_file_ioctl, 576 .unlocked_ioctl = xfs_file_ioctl,
diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h
index a8417d7af5f9..ad6173da5678 100644
--- a/fs/xfs/linux-2.6/xfs_iops.h
+++ b/fs/xfs/linux-2.6/xfs_iops.h
@@ -22,9 +22,9 @@ extern struct inode_operations xfs_inode_operations;
22extern struct inode_operations xfs_dir_inode_operations; 22extern struct inode_operations xfs_dir_inode_operations;
23extern struct inode_operations xfs_symlink_inode_operations; 23extern struct inode_operations xfs_symlink_inode_operations;
24 24
25extern struct file_operations xfs_file_operations; 25extern const struct file_operations xfs_file_operations;
26extern struct file_operations xfs_dir_file_operations; 26extern const struct file_operations xfs_dir_file_operations;
27extern struct file_operations xfs_invis_file_operations; 27extern const struct file_operations xfs_invis_file_operations;
28 28
29extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *, 29extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *,
30 int, unsigned int, void __user *); 30 int, unsigned int, void __user *);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 8355faf8ffde..1884300417e3 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -375,9 +375,8 @@ xfs_init_zones(void)
375 if (!xfs_ioend_zone) 375 if (!xfs_ioend_zone)
376 goto out_destroy_vnode_zone; 376 goto out_destroy_vnode_zone;
377 377
378 xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE, 378 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
379 mempool_alloc_slab, mempool_free_slab, 379 xfs_ioend_zone);
380 xfs_ioend_zone);
381 if (!xfs_ioend_pool) 380 if (!xfs_ioend_pool)
382 goto out_free_ioend_zone; 381 goto out_free_ioend_zone;
383 return 0; 382 return 0;
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index 302201f1a097..3f88715e811e 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -261,7 +261,7 @@ static inline unsigned long ffz_b(unsigned long x)
261 261
262static inline unsigned long ffz(unsigned long word) 262static inline unsigned long ffz(unsigned long word)
263{ 263{
264#if defined(__alpha_cix__) && defined(__alpha_fix__) 264#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
265 /* Whee. EV67 can calculate it directly. */ 265 /* Whee. EV67 can calculate it directly. */
266 return __kernel_cttz(~word); 266 return __kernel_cttz(~word);
267#else 267#else
@@ -281,7 +281,7 @@ static inline unsigned long ffz(unsigned long word)
281 */ 281 */
282static inline unsigned long __ffs(unsigned long word) 282static inline unsigned long __ffs(unsigned long word)
283{ 283{
284#if defined(__alpha_cix__) && defined(__alpha_fix__) 284#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
285 /* Whee. EV67 can calculate it directly. */ 285 /* Whee. EV67 can calculate it directly. */
286 return __kernel_cttz(word); 286 return __kernel_cttz(word);
287#else 287#else
@@ -313,20 +313,20 @@ static inline int ffs(int word)
313/* 313/*
314 * fls: find last bit set. 314 * fls: find last bit set.
315 */ 315 */
316#if defined(__alpha_cix__) && defined(__alpha_fix__) 316#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
317static inline int fls(int word) 317static inline int fls(int word)
318{ 318{
319 return 64 - __kernel_ctlz(word & 0xffffffff); 319 return 64 - __kernel_ctlz(word & 0xffffffff);
320} 320}
321#else 321#else
322#define fls generic_fls 322#include <asm-generic/bitops/fls.h>
323#endif 323#endif
324#define fls64 generic_fls64 324#include <asm-generic/bitops/fls64.h>
325 325
326/* Compute powers of two for the given integer. */ 326/* Compute powers of two for the given integer. */
327static inline long floor_log2(unsigned long word) 327static inline long floor_log2(unsigned long word)
328{ 328{
329#if defined(__alpha_cix__) && defined(__alpha_fix__) 329#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
330 return 63 - __kernel_ctlz(word); 330 return 63 - __kernel_ctlz(word);
331#else 331#else
332 long bit; 332 long bit;
@@ -347,7 +347,7 @@ static inline long ceil_log2(unsigned long word)
347 * of bits set) of a N-bit word 347 * of bits set) of a N-bit word
348 */ 348 */
349 349
350#if defined(__alpha_cix__) && defined(__alpha_fix__) 350#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
351/* Whee. EV67 can calculate it directly. */ 351/* Whee. EV67 can calculate it directly. */
352static inline unsigned long hweight64(unsigned long w) 352static inline unsigned long hweight64(unsigned long w)
353{ 353{
@@ -358,112 +358,12 @@ static inline unsigned long hweight64(unsigned long w)
358#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) 358#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful)
359#define hweight8(x) (unsigned int) hweight64((x) & 0xfful) 359#define hweight8(x) (unsigned int) hweight64((x) & 0xfful)
360#else 360#else
361static inline unsigned long hweight64(unsigned long w) 361#include <asm-generic/bitops/hweight.h>
362{
363 unsigned long result;
364 for (result = 0; w ; w >>= 1)
365 result += (w & 1);
366 return result;
367}
368
369#define hweight32(x) generic_hweight32(x)
370#define hweight16(x) generic_hweight16(x)
371#define hweight8(x) generic_hweight8(x)
372#endif 362#endif
373 363
374#endif /* __KERNEL__ */ 364#endif /* __KERNEL__ */
375 365
376/* 366#include <asm-generic/bitops/find.h>
377 * Find next zero bit in a bitmap reasonably efficiently..
378 */
379static inline unsigned long
380find_next_zero_bit(const void *addr, unsigned long size, unsigned long offset)
381{
382 const unsigned long *p = addr;
383 unsigned long result = offset & ~63UL;
384 unsigned long tmp;
385
386 p += offset >> 6;
387 if (offset >= size)
388 return size;
389 size -= result;
390 offset &= 63UL;
391 if (offset) {
392 tmp = *(p++);
393 tmp |= ~0UL >> (64-offset);
394 if (size < 64)
395 goto found_first;
396 if (~tmp)
397 goto found_middle;
398 size -= 64;
399 result += 64;
400 }
401 while (size & ~63UL) {
402 if (~(tmp = *(p++)))
403 goto found_middle;
404 result += 64;
405 size -= 64;
406 }
407 if (!size)
408 return result;
409 tmp = *p;
410 found_first:
411 tmp |= ~0UL << size;
412 if (tmp == ~0UL) /* Are any bits zero? */
413 return result + size; /* Nope. */
414 found_middle:
415 return result + ffz(tmp);
416}
417
418/*
419 * Find next one bit in a bitmap reasonably efficiently.
420 */
421static inline unsigned long
422find_next_bit(const void * addr, unsigned long size, unsigned long offset)
423{
424 const unsigned long *p = addr;
425 unsigned long result = offset & ~63UL;
426 unsigned long tmp;
427
428 p += offset >> 6;
429 if (offset >= size)
430 return size;
431 size -= result;
432 offset &= 63UL;
433 if (offset) {
434 tmp = *(p++);
435 tmp &= ~0UL << offset;
436 if (size < 64)
437 goto found_first;
438 if (tmp)
439 goto found_middle;
440 size -= 64;
441 result += 64;
442 }
443 while (size & ~63UL) {
444 if ((tmp = *(p++)))
445 goto found_middle;
446 result += 64;
447 size -= 64;
448 }
449 if (!size)
450 return result;
451 tmp = *p;
452 found_first:
453 tmp &= ~0UL >> (64 - size);
454 if (!tmp)
455 return result + size;
456 found_middle:
457 return result + __ffs(tmp);
458}
459
460/*
461 * The optimizer actually does good code for this case.
462 */
463#define find_first_zero_bit(addr, size) \
464 find_next_zero_bit((addr), (size), 0)
465#define find_first_bit(addr, size) \
466 find_next_bit((addr), (size), 0)
467 367
468#ifdef __KERNEL__ 368#ifdef __KERNEL__
469 369
@@ -487,21 +387,12 @@ sched_find_first_bit(unsigned long b[3])
487 return __ffs(b0) + ofs; 387 return __ffs(b0) + ofs;
488} 388}
489 389
390#include <asm-generic/bitops/ext2-non-atomic.h>
490 391
491#define ext2_set_bit __test_and_set_bit
492#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) 392#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
493#define ext2_clear_bit __test_and_clear_bit
494#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) 393#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
495#define ext2_test_bit test_bit 394
496#define ext2_find_first_zero_bit find_first_zero_bit 395#include <asm-generic/bitops/minix.h>
497#define ext2_find_next_zero_bit find_next_zero_bit
498
499/* Bitmap functions for the minix filesystem. */
500#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
501#define minix_set_bit(nr,addr) __set_bit(nr,addr)
502#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
503#define minix_test_bit(nr,addr) test_bit(nr,addr)
504#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
505 396
506#endif /* __KERNEL__ */ 397#endif /* __KERNEL__ */
507 398
diff --git a/include/asm-alpha/fpu.h b/include/asm-alpha/fpu.h
index c203fc2fa5cd..ecb17a72acc3 100644
--- a/include/asm-alpha/fpu.h
+++ b/include/asm-alpha/fpu.h
@@ -130,7 +130,7 @@ rdfpcr(void)
130{ 130{
131 unsigned long tmp, ret; 131 unsigned long tmp, ret;
132 132
133#if defined(__alpha_cix__) || defined(__alpha_fix__) 133#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
134 __asm__ __volatile__ ( 134 __asm__ __volatile__ (
135 "ftoit $f0,%0\n\t" 135 "ftoit $f0,%0\n\t"
136 "mf_fpcr $f0\n\t" 136 "mf_fpcr $f0\n\t"
@@ -154,7 +154,7 @@ wrfpcr(unsigned long val)
154{ 154{
155 unsigned long tmp; 155 unsigned long tmp;
156 156
157#if defined(__alpha_cix__) || defined(__alpha_fix__) 157#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
158 __asm__ __volatile__ ( 158 __asm__ __volatile__ (
159 "ftoit $f0,%0\n\t" 159 "ftoit $f0,%0\n\t"
160 "itoft %1,$f0\n\t" 160 "itoft %1,$f0\n\t"
diff --git a/include/asm-alpha/mmzone.h b/include/asm-alpha/mmzone.h
index a011ef4cf3d3..192d80c875b0 100644
--- a/include/asm-alpha/mmzone.h
+++ b/include/asm-alpha/mmzone.h
@@ -59,9 +59,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
59#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) 59#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
60#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) 60#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
61 61
62#define local_mapnr(kvaddr) \
63 ((__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)))
64
65/* 62/*
66 * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory 63 * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
67 * and returns the kaddr corresponding to first physical page in the 64 * and returns the kaddr corresponding to first physical page in the
@@ -86,8 +83,7 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
86 pte_t pte; \ 83 pte_t pte; \
87 unsigned long pfn; \ 84 unsigned long pfn; \
88 \ 85 \
89 pfn = ((unsigned long)((page)-page_zone(page)->zone_mem_map)) << 32; \ 86 pfn = page_to_pfn(page) << 32; \
90 pfn += page_zone(page)->zone_start_pfn << 32; \
91 pte_val(pte) = pfn | pgprot_val(pgprot); \ 87 pte_val(pte) = pfn | pgprot_val(pgprot); \
92 \ 88 \
93 pte; \ 89 pte; \
@@ -104,19 +100,8 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
104 __xx; \ 100 __xx; \
105}) 101})
106 102
107#define pfn_to_page(pfn) \
108({ \
109 unsigned long kaddr = (unsigned long)__va((pfn) << PAGE_SHIFT); \
110 (NODE_DATA(kvaddr_to_nid(kaddr))->node_mem_map + local_mapnr(kaddr)); \
111})
112
113#define page_to_pfn(page) \
114 ((page) - page_zone(page)->zone_mem_map + \
115 (page_zone(page)->zone_start_pfn))
116
117#define page_to_pa(page) \ 103#define page_to_pa(page) \
118 ((( (page) - page_zone(page)->zone_mem_map ) \ 104 (page_to_pfn(page) << PAGE_SHIFT)
119 + page_zone(page)->zone_start_pfn) << PAGE_SHIFT)
120 105
121#define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT)) 106#define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
122#define pfn_valid(pfn) \ 107#define pfn_valid(pfn) \
diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h
index fa0b41b164a7..61bcf70b5eac 100644
--- a/include/asm-alpha/page.h
+++ b/include/asm-alpha/page.h
@@ -85,8 +85,6 @@ typedef unsigned long pgprot_t;
85#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) 85#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
86#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) 86#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
87#ifndef CONFIG_DISCONTIGMEM 87#ifndef CONFIG_DISCONTIGMEM
88#define pfn_to_page(pfn) (mem_map + (pfn))
89#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
90#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 88#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
91 89
92#define pfn_valid(pfn) ((pfn) < max_mapnr) 90#define pfn_valid(pfn) ((pfn) < max_mapnr)
@@ -95,9 +93,9 @@ typedef unsigned long pgprot_t;
95 93
96#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 94#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
97 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 95 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
98
99#endif /* __KERNEL__ */ 96#endif /* __KERNEL__ */
100 97
98#include <asm-generic/memory_model.h>
101#include <asm-generic/page.h> 99#include <asm-generic/page.h>
102 100
103#endif /* _ALPHA_PAGE_H */ 101#endif /* _ALPHA_PAGE_H */
diff --git a/include/asm-alpha/poll.h b/include/asm-alpha/poll.h
index 95707182b3ed..76f89356b6a7 100644
--- a/include/asm-alpha/poll.h
+++ b/include/asm-alpha/poll.h
@@ -12,8 +12,8 @@
12#define POLLWRNORM (1 << 8) 12#define POLLWRNORM (1 << 8)
13#define POLLWRBAND (1 << 9) 13#define POLLWRBAND (1 << 9)
14#define POLLMSG (1 << 10) 14#define POLLMSG (1 << 10)
15#define POLLREMOVE (1 << 11) 15#define POLLREMOVE (1 << 12)
16#define POLLRDHUP (1 << 12) 16#define POLLRDHUP (1 << 13)
17 17
18 18
19struct pollfd { 19struct pollfd {
diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h
index d02de721ecc1..0ac54b1a8bad 100644
--- a/include/asm-arm/bitops.h
+++ b/include/asm-arm/bitops.h
@@ -117,65 +117,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
117 return res & mask; 117 return res & mask;
118} 118}
119 119
120/* 120#include <asm-generic/bitops/non-atomic.h>
121 * Now the non-atomic variants. We let the compiler handle all
122 * optimisations for these. These are all _native_ endian.
123 */
124static inline void __set_bit(int nr, volatile unsigned long *p)
125{
126 p[nr >> 5] |= (1UL << (nr & 31));
127}
128
129static inline void __clear_bit(int nr, volatile unsigned long *p)
130{
131 p[nr >> 5] &= ~(1UL << (nr & 31));
132}
133
134static inline void __change_bit(int nr, volatile unsigned long *p)
135{
136 p[nr >> 5] ^= (1UL << (nr & 31));
137}
138
139static inline int __test_and_set_bit(int nr, volatile unsigned long *p)
140{
141 unsigned long oldval, mask = 1UL << (nr & 31);
142
143 p += nr >> 5;
144
145 oldval = *p;
146 *p = oldval | mask;
147 return oldval & mask;
148}
149
150static inline int __test_and_clear_bit(int nr, volatile unsigned long *p)
151{
152 unsigned long oldval, mask = 1UL << (nr & 31);
153
154 p += nr >> 5;
155
156 oldval = *p;
157 *p = oldval & ~mask;
158 return oldval & mask;
159}
160
161static inline int __test_and_change_bit(int nr, volatile unsigned long *p)
162{
163 unsigned long oldval, mask = 1UL << (nr & 31);
164
165 p += nr >> 5;
166
167 oldval = *p;
168 *p = oldval ^ mask;
169 return oldval & mask;
170}
171
172/*
173 * This routine doesn't need to be atomic.
174 */
175static inline int __test_bit(int nr, const volatile unsigned long * p)
176{
177 return (p[nr >> 5] >> (nr & 31)) & 1UL;
178}
179 121
180/* 122/*
181 * A note about Endian-ness. 123 * A note about Endian-ness.
@@ -261,7 +203,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
261#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) 203#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p)
262#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) 204#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
263#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) 205#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p)
264#define test_bit(nr,p) __test_bit(nr,p)
265#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) 206#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz)
266#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) 207#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off)
267#define find_first_bit(p,sz) _find_first_bit_le(p,sz) 208#define find_first_bit(p,sz) _find_first_bit_le(p,sz)
@@ -280,7 +221,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
280#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) 221#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p)
281#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) 222#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p)
282#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) 223#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p)
283#define test_bit(nr,p) __test_bit(nr,p)
284#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) 224#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz)
285#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) 225#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off)
286#define find_first_bit(p,sz) _find_first_bit_be(p,sz) 226#define find_first_bit(p,sz) _find_first_bit_be(p,sz)
@@ -292,57 +232,41 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
292 232
293#if __LINUX_ARM_ARCH__ < 5 233#if __LINUX_ARM_ARCH__ < 5
294 234
295/* 235#include <asm-generic/bitops/ffz.h>
296 * ffz = Find First Zero in word. Undefined if no zero exists, 236#include <asm-generic/bitops/__ffs.h>
297 * so code should check against ~0UL first.. 237#include <asm-generic/bitops/fls.h>
298 */ 238#include <asm-generic/bitops/ffs.h>
299static inline unsigned long ffz(unsigned long word)
300{
301 int k;
302
303 word = ~word;
304 k = 31;
305 if (word & 0x0000ffff) { k -= 16; word <<= 16; }
306 if (word & 0x00ff0000) { k -= 8; word <<= 8; }
307 if (word & 0x0f000000) { k -= 4; word <<= 4; }
308 if (word & 0x30000000) { k -= 2; word <<= 2; }
309 if (word & 0x40000000) { k -= 1; }
310 return k;
311}
312
313/*
314 * ffz = Find First Zero in word. Undefined if no zero exists,
315 * so code should check against ~0UL first..
316 */
317static inline unsigned long __ffs(unsigned long word)
318{
319 int k;
320
321 k = 31;
322 if (word & 0x0000ffff) { k -= 16; word <<= 16; }
323 if (word & 0x00ff0000) { k -= 8; word <<= 8; }
324 if (word & 0x0f000000) { k -= 4; word <<= 4; }
325 if (word & 0x30000000) { k -= 2; word <<= 2; }
326 if (word & 0x40000000) { k -= 1; }
327 return k;
328}
329
330/*
331 * fls: find last bit set.
332 */
333 239
334#define fls(x) generic_fls(x) 240#else
335#define fls64(x) generic_fls64(x)
336
337/*
338 * ffs: find first bit set. This is defined the same way as
339 * the libc and compiler builtin ffs routines, therefore
340 * differs in spirit from the above ffz (man ffs).
341 */
342 241
343#define ffs(x) generic_ffs(x) 242static inline int constant_fls(int x)
243{
244 int r = 32;
344 245
345#else 246 if (!x)
247 return 0;
248 if (!(x & 0xffff0000u)) {
249 x <<= 16;
250 r -= 16;
251 }
252 if (!(x & 0xff000000u)) {
253 x <<= 8;
254 r -= 8;
255 }
256 if (!(x & 0xf0000000u)) {
257 x <<= 4;
258 r -= 4;
259 }
260 if (!(x & 0xc0000000u)) {
261 x <<= 2;
262 r -= 2;
263 }
264 if (!(x & 0x80000000u)) {
265 x <<= 1;
266 r -= 1;
267 }
268 return r;
269}
346 270
347/* 271/*
348 * On ARMv5 and above those functions can be implemented around 272 * On ARMv5 and above those functions can be implemented around
@@ -350,39 +274,18 @@ static inline unsigned long __ffs(unsigned long word)
350 */ 274 */
351 275
352#define fls(x) \ 276#define fls(x) \
353 ( __builtin_constant_p(x) ? generic_fls(x) : \ 277 ( __builtin_constant_p(x) ? constant_fls(x) : \
354 ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) ) 278 ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) )
355#define fls64(x) generic_fls64(x)
356#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) 279#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
357#define __ffs(x) (ffs(x) - 1) 280#define __ffs(x) (ffs(x) - 1)
358#define ffz(x) __ffs( ~(x) ) 281#define ffz(x) __ffs( ~(x) )
359 282
360#endif 283#endif
361 284
362/* 285#include <asm-generic/bitops/fls64.h>
363 * Find first bit set in a 168-bit bitmap, where the first
364 * 128 bits are unlikely to be set.
365 */
366static inline int sched_find_first_bit(const unsigned long *b)
367{
368 unsigned long v;
369 unsigned int off;
370
371 for (off = 0; v = b[off], off < 4; off++) {
372 if (unlikely(v))
373 break;
374 }
375 return __ffs(v) + off * 32;
376}
377
378/*
379 * hweightN: returns the hamming weight (i.e. the number
380 * of bits set) of a N-bit word
381 */
382 286
383#define hweight32(x) generic_hweight32(x) 287#include <asm-generic/bitops/sched.h>
384#define hweight16(x) generic_hweight16(x) 288#include <asm-generic/bitops/hweight.h>
385#define hweight8(x) generic_hweight8(x)
386 289
387/* 290/*
388 * Ext2 is defined to use little-endian byte ordering. 291 * Ext2 is defined to use little-endian byte ordering.
@@ -397,7 +300,7 @@ static inline int sched_find_first_bit(const unsigned long *b)
397#define ext2_clear_bit_atomic(lock,nr,p) \ 300#define ext2_clear_bit_atomic(lock,nr,p) \
398 test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 301 test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
399#define ext2_test_bit(nr,p) \ 302#define ext2_test_bit(nr,p) \
400 __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 303 test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
401#define ext2_find_first_zero_bit(p,sz) \ 304#define ext2_find_first_zero_bit(p,sz) \
402 _find_first_zero_bit_le(p,sz) 305 _find_first_zero_bit_le(p,sz)
403#define ext2_find_next_zero_bit(p,sz,off) \ 306#define ext2_find_next_zero_bit(p,sz,off) \
@@ -410,7 +313,7 @@ static inline int sched_find_first_bit(const unsigned long *b)
410#define minix_set_bit(nr,p) \ 313#define minix_set_bit(nr,p) \
411 __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 314 __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
412#define minix_test_bit(nr,p) \ 315#define minix_test_bit(nr,p) \
413 __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 316 test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
414#define minix_test_and_set_bit(nr,p) \ 317#define minix_test_and_set_bit(nr,p) \
415 __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 318 __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
416#define minix_test_and_clear_bit(nr,p) \ 319#define minix_test_and_clear_bit(nr,p) \
diff --git a/include/asm-arm/memory.h b/include/asm-arm/memory.h
index b4e1146ab682..afa5c3ea077c 100644
--- a/include/asm-arm/memory.h
+++ b/include/asm-arm/memory.h
@@ -172,9 +172,7 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
172 * virt_addr_valid(k) indicates whether a virtual address is valid 172 * virt_addr_valid(k) indicates whether a virtual address is valid
173 */ 173 */
174#ifndef CONFIG_DISCONTIGMEM 174#ifndef CONFIG_DISCONTIGMEM
175 175#define ARCH_PFN_OFFSET (PHYS_PFN_OFFSET)
176#define page_to_pfn(page) (((page) - mem_map) + PHYS_PFN_OFFSET)
177#define pfn_to_page(pfn) ((mem_map + (pfn)) - PHYS_PFN_OFFSET)
178#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) 176#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
179 177
180#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) 178#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
@@ -189,13 +187,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
189 * around in memory. 187 * around in memory.
190 */ 188 */
191#include <linux/numa.h> 189#include <linux/numa.h>
192 190#define arch_pfn_to_nid(pfn) (PFN_TO_NID(pfn))
193#define page_to_pfn(page) \ 191#define arch_local_page_offset(pfn, nid) (LOCAL_MAP_NR((pfn) << PAGE_OFFSET))
194 (( (page) - page_zone(page)->zone_mem_map) \
195 + page_zone(page)->zone_start_pfn)
196
197#define pfn_to_page(pfn) \
198 (PFN_TO_MAPBASE(pfn) + LOCAL_MAP_NR((pfn) << PAGE_SHIFT))
199 192
200#define pfn_valid(pfn) \ 193#define pfn_valid(pfn) \
201 ({ \ 194 ({ \
@@ -243,4 +236,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
243 236
244#endif 237#endif
245 238
239#include <asm-generic/memory_model.h>
240
246#endif 241#endif
diff --git a/include/asm-arm/rtc.h b/include/asm-arm/rtc.h
index 370dfe77589d..1a5c9232a91e 100644
--- a/include/asm-arm/rtc.h
+++ b/include/asm-arm/rtc.h
@@ -25,9 +25,6 @@ struct rtc_ops {
25 int (*proc)(char *buf); 25 int (*proc)(char *buf);
26}; 26};
27 27
28void rtc_time_to_tm(unsigned long, struct rtc_time *);
29int rtc_tm_to_time(struct rtc_time *, unsigned long *);
30int rtc_valid_tm(struct rtc_time *);
31void rtc_next_alarm_time(struct rtc_time *, struct rtc_time *, struct rtc_time *); 28void rtc_next_alarm_time(struct rtc_time *, struct rtc_time *, struct rtc_time *);
32void rtc_update(unsigned long, unsigned long); 29void rtc_update(unsigned long, unsigned long);
33int register_rtc(struct rtc_ops *); 30int register_rtc(struct rtc_ops *);
diff --git a/include/asm-arm26/bitops.h b/include/asm-arm26/bitops.h
index d87f8634e625..19a69573a654 100644
--- a/include/asm-arm26/bitops.h
+++ b/include/asm-arm26/bitops.h
@@ -117,65 +117,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
117 return res & mask; 117 return res & mask;
118} 118}
119 119
120/* 120#include <asm-generic/bitops/non-atomic.h>
121 * Now the non-atomic variants. We let the compiler handle all
122 * optimisations for these. These are all _native_ endian.
123 */
124static inline void __set_bit(int nr, volatile unsigned long *p)
125{
126 p[nr >> 5] |= (1UL << (nr & 31));
127}
128
129static inline void __clear_bit(int nr, volatile unsigned long *p)
130{
131 p[nr >> 5] &= ~(1UL << (nr & 31));
132}
133
134static inline void __change_bit(int nr, volatile unsigned long *p)
135{
136 p[nr >> 5] ^= (1UL << (nr & 31));
137}
138
139static inline int __test_and_set_bit(int nr, volatile unsigned long *p)
140{
141 unsigned long oldval, mask = 1UL << (nr & 31);
142
143 p += nr >> 5;
144
145 oldval = *p;
146 *p = oldval | mask;
147 return oldval & mask;
148}
149
150static inline int __test_and_clear_bit(int nr, volatile unsigned long *p)
151{
152 unsigned long oldval, mask = 1UL << (nr & 31);
153
154 p += nr >> 5;
155
156 oldval = *p;
157 *p = oldval & ~mask;
158 return oldval & mask;
159}
160
161static inline int __test_and_change_bit(int nr, volatile unsigned long *p)
162{
163 unsigned long oldval, mask = 1UL << (nr & 31);
164
165 p += nr >> 5;
166
167 oldval = *p;
168 *p = oldval ^ mask;
169 return oldval & mask;
170}
171
172/*
173 * This routine doesn't need to be atomic.
174 */
175static inline int __test_bit(int nr, const volatile unsigned long * p)
176{
177 return (p[nr >> 5] >> (nr & 31)) & 1UL;
178}
179 121
180/* 122/*
181 * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. 123 * Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
@@ -211,7 +153,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
211#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) 153#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p)
212#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) 154#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
213#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) 155#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p)
214#define test_bit(nr,p) __test_bit(nr,p)
215#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) 156#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz)
216#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) 157#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off)
217#define find_first_bit(p,sz) _find_first_bit_le(p,sz) 158#define find_first_bit(p,sz) _find_first_bit_le(p,sz)
@@ -219,80 +160,13 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
219 160
220#define WORD_BITOFF_TO_LE(x) ((x)) 161#define WORD_BITOFF_TO_LE(x) ((x))
221 162
222/* 163#include <asm-generic/bitops/ffz.h>
223 * ffz = Find First Zero in word. Undefined if no zero exists, 164#include <asm-generic/bitops/__ffs.h>
224 * so code should check against ~0UL first.. 165#include <asm-generic/bitops/fls.h>
225 */ 166#include <asm-generic/bitops/fls64.h>
226static inline unsigned long ffz(unsigned long word) 167#include <asm-generic/bitops/ffs.h>
227{ 168#include <asm-generic/bitops/sched.h>
228 int k; 169#include <asm-generic/bitops/hweight.h>
229
230 word = ~word;
231 k = 31;
232 if (word & 0x0000ffff) { k -= 16; word <<= 16; }
233 if (word & 0x00ff0000) { k -= 8; word <<= 8; }
234 if (word & 0x0f000000) { k -= 4; word <<= 4; }
235 if (word & 0x30000000) { k -= 2; word <<= 2; }
236 if (word & 0x40000000) { k -= 1; }
237 return k;
238}
239
240/*
241 * ffz = Find First Zero in word. Undefined if no zero exists,
242 * so code should check against ~0UL first..
243 */
244static inline unsigned long __ffs(unsigned long word)
245{
246 int k;
247
248 k = 31;
249 if (word & 0x0000ffff) { k -= 16; word <<= 16; }
250 if (word & 0x00ff0000) { k -= 8; word <<= 8; }
251 if (word & 0x0f000000) { k -= 4; word <<= 4; }
252 if (word & 0x30000000) { k -= 2; word <<= 2; }
253 if (word & 0x40000000) { k -= 1; }
254 return k;
255}
256
257/*
258 * fls: find last bit set.
259 */
260
261#define fls(x) generic_fls(x)
262#define fls64(x) generic_fls64(x)
263
264/*
265 * ffs: find first bit set. This is defined the same way as
266 * the libc and compiler builtin ffs routines, therefore
267 * differs in spirit from the above ffz (man ffs).
268 */
269
270#define ffs(x) generic_ffs(x)
271
272/*
273 * Find first bit set in a 168-bit bitmap, where the first
274 * 128 bits are unlikely to be set.
275 */
276static inline int sched_find_first_bit(unsigned long *b)
277{
278 unsigned long v;
279 unsigned int off;
280
281 for (off = 0; v = b[off], off < 4; off++) {
282 if (unlikely(v))
283 break;
284 }
285 return __ffs(v) + off * 32;
286}
287
288/*
289 * hweightN: returns the hamming weight (i.e. the number
290 * of bits set) of a N-bit word
291 */
292
293#define hweight32(x) generic_hweight32(x)
294#define hweight16(x) generic_hweight16(x)
295#define hweight8(x) generic_hweight8(x)
296 170
297/* 171/*
298 * Ext2 is defined to use little-endian byte ordering. 172 * Ext2 is defined to use little-endian byte ordering.
@@ -307,7 +181,7 @@ static inline int sched_find_first_bit(unsigned long *b)
307#define ext2_clear_bit_atomic(lock,nr,p) \ 181#define ext2_clear_bit_atomic(lock,nr,p) \
308 test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 182 test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
309#define ext2_test_bit(nr,p) \ 183#define ext2_test_bit(nr,p) \
310 __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 184 test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
311#define ext2_find_first_zero_bit(p,sz) \ 185#define ext2_find_first_zero_bit(p,sz) \
312 _find_first_zero_bit_le(p,sz) 186 _find_first_zero_bit_le(p,sz)
313#define ext2_find_next_zero_bit(p,sz,off) \ 187#define ext2_find_next_zero_bit(p,sz,off) \
@@ -320,7 +194,7 @@ static inline int sched_find_first_bit(unsigned long *b)
320#define minix_set_bit(nr,p) \ 194#define minix_set_bit(nr,p) \
321 __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 195 __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
322#define minix_test_bit(nr,p) \ 196#define minix_test_bit(nr,p) \
323 __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 197 test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
324#define minix_test_and_set_bit(nr,p) \ 198#define minix_test_and_set_bit(nr,p) \
325 __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 199 __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
326#define minix_test_and_clear_bit(nr,p) \ 200#define minix_test_and_clear_bit(nr,p) \
diff --git a/include/asm-arm26/memory.h b/include/asm-arm26/memory.h
index 20d78616f650..a65f10b80dfb 100644
--- a/include/asm-arm26/memory.h
+++ b/include/asm-arm26/memory.h
@@ -81,8 +81,7 @@ static inline void *phys_to_virt(unsigned long x)
81 * virt_to_page(k) convert a _valid_ virtual address to struct page * 81 * virt_to_page(k) convert a _valid_ virtual address to struct page *
82 * virt_addr_valid(k) indicates whether a virtual address is valid 82 * virt_addr_valid(k) indicates whether a virtual address is valid
83 */ 83 */
84#define page_to_pfn(page) (((page) - mem_map) + PHYS_PFN_OFFSET) 84#define ARCH_PFN_OFFSET (PHYS_PFN_OFFSET)
85#define pfn_to_page(pfn) ((mem_map + (pfn)) - PHYS_PFN_OFFSET)
86#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) 85#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
87 86
88#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) 87#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
@@ -98,4 +97,5 @@ static inline void *phys_to_virt(unsigned long x)
98 */ 97 */
99#define page_to_bus(page) (page_address(page)) 98#define page_to_bus(page) (page_address(page))
100 99
100#include <asm-generic/memory_model.h>
101#endif 101#endif
diff --git a/include/asm-cris/bitops.h b/include/asm-cris/bitops.h
index b7fef1572dc0..a569065113d9 100644
--- a/include/asm-cris/bitops.h
+++ b/include/asm-cris/bitops.h
@@ -39,8 +39,6 @@ struct __dummy { unsigned long a[100]; };
39 39
40#define set_bit(nr, addr) (void)test_and_set_bit(nr, addr) 40#define set_bit(nr, addr) (void)test_and_set_bit(nr, addr)
41 41
42#define __set_bit(nr, addr) (void)__test_and_set_bit(nr, addr)
43
44/* 42/*
45 * clear_bit - Clears a bit in memory 43 * clear_bit - Clears a bit in memory
46 * @nr: Bit to clear 44 * @nr: Bit to clear
@@ -54,8 +52,6 @@ struct __dummy { unsigned long a[100]; };
54 52
55#define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr) 53#define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr)
56 54
57#define __clear_bit(nr, addr) (void)__test_and_clear_bit(nr, addr)
58
59/* 55/*
60 * change_bit - Toggle a bit in memory 56 * change_bit - Toggle a bit in memory
61 * @nr: Bit to change 57 * @nr: Bit to change
@@ -68,18 +64,6 @@ struct __dummy { unsigned long a[100]; };
68 64
69#define change_bit(nr, addr) (void)test_and_change_bit(nr, addr) 65#define change_bit(nr, addr) (void)test_and_change_bit(nr, addr)
70 66
71/*
72 * __change_bit - Toggle a bit in memory
73 * @nr: the bit to change
74 * @addr: the address to start counting from
75 *
76 * Unlike change_bit(), this function is non-atomic and may be reordered.
77 * If it's called on the same region of memory simultaneously, the effect
78 * may be that only one operation succeeds.
79 */
80
81#define __change_bit(nr, addr) (void)__test_and_change_bit(nr, addr)
82
83/** 67/**
84 * test_and_set_bit - Set a bit and return its old value 68 * test_and_set_bit - Set a bit and return its old value
85 * @nr: Bit to set 69 * @nr: Bit to set
@@ -101,19 +85,6 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
101 retval = (mask & *adr) != 0; 85 retval = (mask & *adr) != 0;
102 *adr |= mask; 86 *adr |= mask;
103 cris_atomic_restore(addr, flags); 87 cris_atomic_restore(addr, flags);
104 local_irq_restore(flags);
105 return retval;
106}
107
108static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
109{
110 unsigned int mask, retval;
111 unsigned int *adr = (unsigned int *)addr;
112
113 adr += nr >> 5;
114 mask = 1 << (nr & 0x1f);
115 retval = (mask & *adr) != 0;
116 *adr |= mask;
117 return retval; 88 return retval;
118} 89}
119 90
@@ -148,27 +119,6 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
148} 119}
149 120
150/** 121/**
151 * __test_and_clear_bit - Clear a bit and return its old value
152 * @nr: Bit to clear
153 * @addr: Address to count from
154 *
155 * This operation is non-atomic and can be reordered.
156 * If two examples of this operation race, one can appear to succeed
157 * but actually fail. You must protect multiple accesses with a lock.
158 */
159
160static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
161{
162 unsigned int mask, retval;
163 unsigned int *adr = (unsigned int *)addr;
164
165 adr += nr >> 5;
166 mask = 1 << (nr & 0x1f);
167 retval = (mask & *adr) != 0;
168 *adr &= ~mask;
169 return retval;
170}
171/**
172 * test_and_change_bit - Change a bit and return its old value 122 * test_and_change_bit - Change a bit and return its old value
173 * @nr: Bit to change 123 * @nr: Bit to change
174 * @addr: Address to count from 124 * @addr: Address to count from
@@ -191,42 +141,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
191 return retval; 141 return retval;
192} 142}
193 143
194/* WARNING: non atomic and it can be reordered! */ 144#include <asm-generic/bitops/non-atomic.h>
195
196static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
197{
198 unsigned int mask, retval;
199 unsigned int *adr = (unsigned int *)addr;
200
201 adr += nr >> 5;
202 mask = 1 << (nr & 0x1f);
203 retval = (mask & *adr) != 0;
204 *adr ^= mask;
205
206 return retval;
207}
208
209/**
210 * test_bit - Determine whether a bit is set
211 * @nr: bit number to test
212 * @addr: Address to start counting from
213 *
214 * This routine doesn't need to be atomic.
215 */
216
217static inline int test_bit(int nr, const volatile unsigned long *addr)
218{
219 unsigned int mask;
220 unsigned int *adr = (unsigned int *)addr;
221
222 adr += nr >> 5;
223 mask = 1 << (nr & 0x1f);
224 return ((mask & *adr) != 0);
225}
226
227/*
228 * Find-bit routines..
229 */
230 145
231/* 146/*
232 * Since we define it "external", it collides with the built-in 147 * Since we define it "external", it collides with the built-in
@@ -235,152 +150,18 @@ static inline int test_bit(int nr, const volatile unsigned long *addr)
235 */ 150 */
236#define ffs kernel_ffs 151#define ffs kernel_ffs
237 152
238/* 153#include <asm-generic/bitops/fls.h>
239 * fls: find last bit set. 154#include <asm-generic/bitops/fls64.h>
240 */ 155#include <asm-generic/bitops/hweight.h>
241 156#include <asm-generic/bitops/find.h>
242#define fls(x) generic_fls(x)
243#define fls64(x) generic_fls64(x)
244
245/*
246 * hweightN - returns the hamming weight of a N-bit word
247 * @x: the word to weigh
248 *
249 * The Hamming Weight of a number is the total number of bits set in it.
250 */
251 157
252#define hweight32(x) generic_hweight32(x) 158#include <asm-generic/bitops/ext2-non-atomic.h>
253#define hweight16(x) generic_hweight16(x)
254#define hweight8(x) generic_hweight8(x)
255 159
256/**
257 * find_next_zero_bit - find the first zero bit in a memory region
258 * @addr: The address to base the search on
259 * @offset: The bitnumber to start searching at
260 * @size: The maximum size to search
261 */
262static inline int find_next_zero_bit (const unsigned long * addr, int size, int offset)
263{
264 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
265 unsigned long result = offset & ~31UL;
266 unsigned long tmp;
267
268 if (offset >= size)
269 return size;
270 size -= result;
271 offset &= 31UL;
272 if (offset) {
273 tmp = *(p++);
274 tmp |= ~0UL >> (32-offset);
275 if (size < 32)
276 goto found_first;
277 if (~tmp)
278 goto found_middle;
279 size -= 32;
280 result += 32;
281 }
282 while (size & ~31UL) {
283 if (~(tmp = *(p++)))
284 goto found_middle;
285 result += 32;
286 size -= 32;
287 }
288 if (!size)
289 return result;
290 tmp = *p;
291
292 found_first:
293 tmp |= ~0UL << size;
294 found_middle:
295 return result + ffz(tmp);
296}
297
298/**
299 * find_next_bit - find the first set bit in a memory region
300 * @addr: The address to base the search on
301 * @offset: The bitnumber to start searching at
302 * @size: The maximum size to search
303 */
304static __inline__ int find_next_bit(const unsigned long *addr, int size, int offset)
305{
306 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
307 unsigned long result = offset & ~31UL;
308 unsigned long tmp;
309
310 if (offset >= size)
311 return size;
312 size -= result;
313 offset &= 31UL;
314 if (offset) {
315 tmp = *(p++);
316 tmp &= (~0UL << offset);
317 if (size < 32)
318 goto found_first;
319 if (tmp)
320 goto found_middle;
321 size -= 32;
322 result += 32;
323 }
324 while (size & ~31UL) {
325 if ((tmp = *(p++)))
326 goto found_middle;
327 result += 32;
328 size -= 32;
329 }
330 if (!size)
331 return result;
332 tmp = *p;
333
334found_first:
335 tmp &= (~0UL >> (32 - size));
336 if (tmp == 0UL) /* Are any bits set? */
337 return result + size; /* Nope. */
338found_middle:
339 return result + __ffs(tmp);
340}
341
342/**
343 * find_first_zero_bit - find the first zero bit in a memory region
344 * @addr: The address to start the search at
345 * @size: The maximum size to search
346 *
347 * Returns the bit-number of the first zero bit, not the number of the byte
348 * containing a bit.
349 */
350
351#define find_first_zero_bit(addr, size) \
352 find_next_zero_bit((addr), (size), 0)
353#define find_first_bit(addr, size) \
354 find_next_bit((addr), (size), 0)
355
356#define ext2_set_bit test_and_set_bit
357#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) 160#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
358#define ext2_clear_bit test_and_clear_bit
359#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) 161#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
360#define ext2_test_bit test_bit
361#define ext2_find_first_zero_bit find_first_zero_bit
362#define ext2_find_next_zero_bit find_next_zero_bit
363
364/* Bitmap functions for the minix filesystem. */
365#define minix_set_bit(nr,addr) test_and_set_bit(nr,addr)
366#define minix_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
367#define minix_test_bit(nr,addr) test_bit(nr,addr)
368#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
369 162
370static inline int sched_find_first_bit(const unsigned long *b) 163#include <asm-generic/bitops/minix.h>
371{ 164#include <asm-generic/bitops/sched.h>
372 if (unlikely(b[0]))
373 return __ffs(b[0]);
374 if (unlikely(b[1]))
375 return __ffs(b[1]) + 32;
376 if (unlikely(b[2]))
377 return __ffs(b[2]) + 64;
378 if (unlikely(b[3]))
379 return __ffs(b[3]) + 96;
380 if (b[4])
381 return __ffs(b[4]) + 128;
382 return __ffs(b[5]) + 32 + 128;
383}
384 165
385#endif /* __KERNEL__ */ 166#endif /* __KERNEL__ */
386 167
diff --git a/include/asm-cris/page.h b/include/asm-cris/page.h
index c99c478c482f..3787633e6209 100644
--- a/include/asm-cris/page.h
+++ b/include/asm-cris/page.h
@@ -43,8 +43,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
43 43
44/* On CRIS the PFN numbers doesn't start at 0 so we have to compensate */ 44/* On CRIS the PFN numbers doesn't start at 0 so we have to compensate */
45/* for that before indexing into the page table starting at mem_map */ 45/* for that before indexing into the page table starting at mem_map */
46#define pfn_to_page(pfn) (mem_map + ((pfn) - (PAGE_OFFSET >> PAGE_SHIFT))) 46#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
47#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + (PAGE_OFFSET >> PAGE_SHIFT))
48#define pfn_valid(pfn) (((pfn) - (PAGE_OFFSET >> PAGE_SHIFT)) < max_mapnr) 47#define pfn_valid(pfn) (((pfn) - (PAGE_OFFSET >> PAGE_SHIFT)) < max_mapnr)
49 48
50/* to index into the page map. our pages all start at physical addr PAGE_OFFSET so 49/* to index into the page map. our pages all start at physical addr PAGE_OFFSET so
@@ -77,6 +76,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
77 76
78#endif /* __KERNEL__ */ 77#endif /* __KERNEL__ */
79 78
79#include <asm-generic/memory_model.h>
80#include <asm-generic/page.h> 80#include <asm-generic/page.h>
81 81
82#endif /* _CRIS_PAGE_H */ 82#endif /* _CRIS_PAGE_H */
diff --git a/include/asm-frv/bitops.h b/include/asm-frv/bitops.h
index f686b519878e..6344d06390b9 100644
--- a/include/asm-frv/bitops.h
+++ b/include/asm-frv/bitops.h
@@ -22,20 +22,7 @@
22 22
23#ifdef __KERNEL__ 23#ifdef __KERNEL__
24 24
25/* 25#include <asm-generic/bitops/ffz.h>
26 * ffz = Find First Zero in word. Undefined if no zero exists,
27 * so code should check against ~0UL first..
28 */
29static inline unsigned long ffz(unsigned long word)
30{
31 unsigned long result = 0;
32
33 while (word & 1) {
34 result++;
35 word >>= 1;
36 }
37 return result;
38}
39 26
40/* 27/*
41 * clear_bit() doesn't provide any barrier for the compiler. 28 * clear_bit() doesn't provide any barrier for the compiler.
@@ -171,51 +158,9 @@ static inline int __test_bit(int nr, const volatile void * addr)
171 __constant_test_bit((nr),(addr)) : \ 158 __constant_test_bit((nr),(addr)) : \
172 __test_bit((nr),(addr))) 159 __test_bit((nr),(addr)))
173 160
174extern int find_next_bit(const unsigned long *addr, int size, int offset); 161#include <asm-generic/bitops/ffs.h>
175 162#include <asm-generic/bitops/__ffs.h>
176#define find_first_bit(addr, size) find_next_bit(addr, size, 0) 163#include <asm-generic/bitops/find.h>
177
178#define find_first_zero_bit(addr, size) \
179 find_next_zero_bit((addr), (size), 0)
180
181static inline int find_next_zero_bit(const void *addr, int size, int offset)
182{
183 const unsigned long *p = ((const unsigned long *) addr) + (offset >> 5);
184 unsigned long result = offset & ~31UL;
185 unsigned long tmp;
186
187 if (offset >= size)
188 return size;
189 size -= result;
190 offset &= 31UL;
191 if (offset) {
192 tmp = *(p++);
193 tmp |= ~0UL >> (32-offset);
194 if (size < 32)
195 goto found_first;
196 if (~tmp)
197 goto found_middle;
198 size -= 32;
199 result += 32;
200 }
201 while (size & ~31UL) {
202 if (~(tmp = *(p++)))
203 goto found_middle;
204 result += 32;
205 size -= 32;
206 }
207 if (!size)
208 return result;
209 tmp = *p;
210
211found_first:
212 tmp |= ~0UL << size;
213found_middle:
214 return result + ffz(tmp);
215}
216
217#define ffs(x) generic_ffs(x)
218#define __ffs(x) (ffs(x) - 1)
219 164
220/* 165/*
221 * fls: find last bit set. 166 * fls: find last bit set.
@@ -228,114 +173,17 @@ found_middle:
228 \ 173 \
229 bit ? 33 - bit : bit; \ 174 bit ? 33 - bit : bit; \
230}) 175})
231#define fls64(x) generic_fls64(x)
232 176
233/* 177#include <asm-generic/bitops/fls64.h>
234 * Every architecture must define this function. It's the fastest 178#include <asm-generic/bitops/sched.h>
235 * way of searching a 140-bit bitmap where the first 100 bits are 179#include <asm-generic/bitops/hweight.h>
236 * unlikely to be set. It's guaranteed that at least one of the 140
237 * bits is cleared.
238 */
239static inline int sched_find_first_bit(const unsigned long *b)
240{
241 if (unlikely(b[0]))
242 return __ffs(b[0]);
243 if (unlikely(b[1]))
244 return __ffs(b[1]) + 32;
245 if (unlikely(b[2]))
246 return __ffs(b[2]) + 64;
247 if (b[3])
248 return __ffs(b[3]) + 96;
249 return __ffs(b[4]) + 128;
250}
251 180
181#include <asm-generic/bitops/ext2-non-atomic.h>
252 182
253/* 183#define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit ((nr) ^ 0x18, (addr))
254 * hweightN: returns the hamming weight (i.e. the number 184#define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr) ^ 0x18, (addr))
255 * of bits set) of a N-bit word
256 */
257
258#define hweight32(x) generic_hweight32(x)
259#define hweight16(x) generic_hweight16(x)
260#define hweight8(x) generic_hweight8(x)
261
262#define ext2_set_bit(nr, addr) test_and_set_bit ((nr) ^ 0x18, (addr))
263#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, (addr))
264
265#define ext2_set_bit_atomic(lock,nr,addr) ext2_set_bit((nr), addr)
266#define ext2_clear_bit_atomic(lock,nr,addr) ext2_clear_bit((nr), addr)
267
268static inline int ext2_test_bit(int nr, const volatile void * addr)
269{
270 const volatile unsigned char *ADDR = (const unsigned char *) addr;
271 int mask;
272
273 ADDR += nr >> 3;
274 mask = 1 << (nr & 0x07);
275 return ((mask & *ADDR) != 0);
276}
277
278#define ext2_find_first_zero_bit(addr, size) \
279 ext2_find_next_zero_bit((addr), (size), 0)
280
281static inline unsigned long ext2_find_next_zero_bit(const void *addr,
282 unsigned long size,
283 unsigned long offset)
284{
285 const unsigned long *p = ((const unsigned long *) addr) + (offset >> 5);
286 unsigned long result = offset & ~31UL;
287 unsigned long tmp;
288
289 if (offset >= size)
290 return size;
291 size -= result;
292 offset &= 31UL;
293 if(offset) {
294 /* We hold the little endian value in tmp, but then the
295 * shift is illegal. So we could keep a big endian value
296 * in tmp, like this:
297 *
298 * tmp = __swab32(*(p++));
299 * tmp |= ~0UL >> (32-offset);
300 *
301 * but this would decrease preformance, so we change the
302 * shift:
303 */
304 tmp = *(p++);
305 tmp |= __swab32(~0UL >> (32-offset));
306 if(size < 32)
307 goto found_first;
308 if(~tmp)
309 goto found_middle;
310 size -= 32;
311 result += 32;
312 }
313 while(size & ~31UL) {
314 if(~(tmp = *(p++)))
315 goto found_middle;
316 result += 32;
317 size -= 32;
318 }
319 if(!size)
320 return result;
321 tmp = *p;
322
323found_first:
324 /* tmp is little endian, so we would have to swab the shift,
325 * see above. But then we have to swab tmp below for ffz, so
326 * we might as well do this here.
327 */
328 return result + ffz(__swab32(tmp) | (~0UL << size));
329found_middle:
330 return result + ffz(__swab32(tmp));
331}
332 185
333/* Bitmap functions for the minix filesystem. */ 186#include <asm-generic/bitops/minix-le.h>
334#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
335#define minix_set_bit(nr,addr) ext2_set_bit(nr,addr)
336#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
337#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
338#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
339 187
340#endif /* __KERNEL__ */ 188#endif /* __KERNEL__ */
341 189
diff --git a/include/asm-frv/futex.h b/include/asm-frv/futex.h
index fca9d90e32c9..08b3d1da3583 100644
--- a/include/asm-frv/futex.h
+++ b/include/asm-frv/futex.h
@@ -9,5 +9,11 @@
9 9
10extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr); 10extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr);
11 11
12static inline int
13futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
14{
15 return -ENOSYS;
16}
17
12#endif 18#endif
13#endif 19#endif
diff --git a/include/asm-frv/page.h b/include/asm-frv/page.h
index b8221b611b5c..dc0f7e08a4c2 100644
--- a/include/asm-frv/page.h
+++ b/include/asm-frv/page.h
@@ -57,13 +57,9 @@ extern unsigned long min_low_pfn;
57extern unsigned long max_pfn; 57extern unsigned long max_pfn;
58 58
59#ifdef CONFIG_MMU 59#ifdef CONFIG_MMU
60#define pfn_to_page(pfn) (mem_map + (pfn))
61#define page_to_pfn(page) ((unsigned long) ((page) - mem_map))
62#define pfn_valid(pfn) ((pfn) < max_mapnr) 60#define pfn_valid(pfn) ((pfn) < max_mapnr)
63
64#else 61#else
65#define pfn_to_page(pfn) (&mem_map[(pfn) - (PAGE_OFFSET >> PAGE_SHIFT)]) 62#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
66#define page_to_pfn(page) ((PAGE_OFFSET >> PAGE_SHIFT) + (unsigned long) ((page) - mem_map))
67#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn) 63#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn)
68 64
69#endif 65#endif
@@ -87,6 +83,7 @@ extern unsigned long max_pfn;
87#define WANT_PAGE_VIRTUAL 1 83#define WANT_PAGE_VIRTUAL 1
88#endif 84#endif
89 85
86#include <asm-generic/memory_model.h>
90#include <asm-generic/page.h> 87#include <asm-generic/page.h>
91 88
92#endif /* _ASM_PAGE_H */ 89#endif /* _ASM_PAGE_H */
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index 0e6d9852008c..1f9d99193df8 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -5,77 +5,27 @@
5 * For the benefit of those who are trying to port Linux to another 5 * For the benefit of those who are trying to port Linux to another
6 * architecture, here are some C-language equivalents. You should 6 * architecture, here are some C-language equivalents. You should
7 * recode these in the native assembly language, if at all possible. 7 * recode these in the native assembly language, if at all possible.
8 * To guarantee atomicity, these routines call cli() and sti() to
9 * disable interrupts while they operate. (You have to provide inline
10 * routines to cli() and sti().)
11 *
12 * Also note, these routines assume that you have 32 bit longs.
13 * You will have to change this if you are trying to port Linux to the
14 * Alpha architecture or to a Cray. :-)
15 * 8 *
16 * C language equivalents written by Theodore Ts'o, 9/26/92 9 * C language equivalents written by Theodore Ts'o, 9/26/92
17 */ 10 */
18 11
19extern __inline__ int set_bit(int nr,long * addr) 12#include <asm-generic/bitops/atomic.h>
20{ 13#include <asm-generic/bitops/non-atomic.h>
21 int mask, retval; 14#include <asm-generic/bitops/__ffs.h>
22 15#include <asm-generic/bitops/ffz.h>
23 addr += nr >> 5; 16#include <asm-generic/bitops/fls.h>
24 mask = 1 << (nr & 0x1f); 17#include <asm-generic/bitops/fls64.h>
25 cli(); 18#include <asm-generic/bitops/find.h>
26 retval = (mask & *addr) != 0;
27 *addr |= mask;
28 sti();
29 return retval;
30}
31
32extern __inline__ int clear_bit(int nr, long * addr)
33{
34 int mask, retval;
35
36 addr += nr >> 5;
37 mask = 1 << (nr & 0x1f);
38 cli();
39 retval = (mask & *addr) != 0;
40 *addr &= ~mask;
41 sti();
42 return retval;
43}
44
45extern __inline__ int test_bit(int nr, const unsigned long * addr)
46{
47 int mask;
48
49 addr += nr >> 5;
50 mask = 1 << (nr & 0x1f);
51 return ((mask & *addr) != 0);
52}
53
54/*
55 * fls: find last bit set.
56 */
57
58#define fls(x) generic_fls(x)
59#define fls64(x) generic_fls64(x)
60 19
61#ifdef __KERNEL__ 20#ifdef __KERNEL__
62 21
63/* 22#include <asm-generic/bitops/sched.h>
64 * ffs: find first bit set. This is defined the same way as 23#include <asm-generic/bitops/ffs.h>
65 * the libc and compiler builtin ffs routines, therefore 24#include <asm-generic/bitops/hweight.h>
66 * differs in spirit from the above ffz (man ffs).
67 */
68
69#define ffs(x) generic_ffs(x)
70
71/*
72 * hweightN: returns the hamming weight (i.e. the number
73 * of bits set) of a N-bit word
74 */
75 25
76#define hweight32(x) generic_hweight32(x) 26#include <asm-generic/bitops/ext2-non-atomic.h>
77#define hweight16(x) generic_hweight16(x) 27#include <asm-generic/bitops/ext2-atomic.h>
78#define hweight8(x) generic_hweight8(x) 28#include <asm-generic/bitops/minix.h>
79 29
80#endif /* __KERNEL__ */ 30#endif /* __KERNEL__ */
81 31
diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h
new file mode 100644
index 000000000000..9a3274aecf83
--- /dev/null
+++ b/include/asm-generic/bitops/__ffs.h
@@ -0,0 +1,43 @@
1#ifndef _ASM_GENERIC_BITOPS___FFS_H_
2#define _ASM_GENERIC_BITOPS___FFS_H_
3
4#include <asm/types.h>
5
6/**
7 * __ffs - find first bit in word.
8 * @word: The word to search
9 *
10 * Undefined if no bit exists, so code should check against 0 first.
11 */
12static inline unsigned long __ffs(unsigned long word)
13{
14 int num = 0;
15
16#if BITS_PER_LONG == 64
17 if ((word & 0xffffffff) == 0) {
18 num += 32;
19 word >>= 32;
20 }
21#endif
22 if ((word & 0xffff) == 0) {
23 num += 16;
24 word >>= 16;
25 }
26 if ((word & 0xff) == 0) {
27 num += 8;
28 word >>= 8;
29 }
30 if ((word & 0xf) == 0) {
31 num += 4;
32 word >>= 4;
33 }
34 if ((word & 0x3) == 0) {
35 num += 2;
36 word >>= 2;
37 }
38 if ((word & 0x1) == 0)
39 num += 1;
40 return num;
41}
42
43#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
new file mode 100644
index 000000000000..78339319ba02
--- /dev/null
+++ b/include/asm-generic/bitops/atomic.h
@@ -0,0 +1,191 @@
1#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
2#define _ASM_GENERIC_BITOPS_ATOMIC_H_
3
4#include <asm/types.h>
5
6#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
7#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
8
9#ifdef CONFIG_SMP
10#include <asm/spinlock.h>
11#include <asm/cache.h> /* we use L1_CACHE_BYTES */
12
13/* Use an array of spinlocks for our atomic_ts.
14 * Hash function to index into a different SPINLOCK.
15 * Since "a" is usually an address, use one spinlock per cacheline.
16 */
17# define ATOMIC_HASH_SIZE 4
18# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
19
20extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
21
22/* Can't use raw_spin_lock_irq because of #include problems, so
23 * this is the substitute */
24#define _atomic_spin_lock_irqsave(l,f) do { \
25 raw_spinlock_t *s = ATOMIC_HASH(l); \
26 local_irq_save(f); \
27 __raw_spin_lock(s); \
28} while(0)
29
30#define _atomic_spin_unlock_irqrestore(l,f) do { \
31 raw_spinlock_t *s = ATOMIC_HASH(l); \
32 __raw_spin_unlock(s); \
33 local_irq_restore(f); \
34} while(0)
35
36
37#else
38# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
39# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
40#endif
41
42/*
43 * NMI events can occur at any time, including when interrupts have been
44 * disabled by *_irqsave(). So you can get NMI events occurring while a
45 * *_bit function is holding a spin lock. If the NMI handler also wants
46 * to do bit manipulation (and they do) then you can get a deadlock
47 * between the original caller of *_bit() and the NMI handler.
48 *
49 * by Keith Owens
50 */
51
52/**
53 * set_bit - Atomically set a bit in memory
54 * @nr: the bit to set
55 * @addr: the address to start counting from
56 *
57 * This function is atomic and may not be reordered. See __set_bit()
58 * if you do not require the atomic guarantees.
59 *
60 * Note: there are no guarantees that this function will not be reordered
61 * on non x86 architectures, so if you are writting portable code,
62 * make sure not to rely on its reordering guarantees.
63 *
64 * Note that @nr may be almost arbitrarily large; this function is not
65 * restricted to acting on a single-word quantity.
66 */
67static inline void set_bit(int nr, volatile unsigned long *addr)
68{
69 unsigned long mask = BITOP_MASK(nr);
70 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
71 unsigned long flags;
72
73 _atomic_spin_lock_irqsave(p, flags);
74 *p |= mask;
75 _atomic_spin_unlock_irqrestore(p, flags);
76}
77
78/**
79 * clear_bit - Clears a bit in memory
80 * @nr: Bit to clear
81 * @addr: Address to start counting from
82 *
83 * clear_bit() is atomic and may not be reordered. However, it does
84 * not contain a memory barrier, so if it is used for locking purposes,
85 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
86 * in order to ensure changes are visible on other processors.
87 */
88static inline void clear_bit(int nr, volatile unsigned long *addr)
89{
90 unsigned long mask = BITOP_MASK(nr);
91 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
92 unsigned long flags;
93
94 _atomic_spin_lock_irqsave(p, flags);
95 *p &= ~mask;
96 _atomic_spin_unlock_irqrestore(p, flags);
97}
98
99/**
100 * change_bit - Toggle a bit in memory
101 * @nr: Bit to change
102 * @addr: Address to start counting from
103 *
104 * change_bit() is atomic and may not be reordered. It may be
105 * reordered on other architectures than x86.
106 * Note that @nr may be almost arbitrarily large; this function is not
107 * restricted to acting on a single-word quantity.
108 */
109static inline void change_bit(int nr, volatile unsigned long *addr)
110{
111 unsigned long mask = BITOP_MASK(nr);
112 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
113 unsigned long flags;
114
115 _atomic_spin_lock_irqsave(p, flags);
116 *p ^= mask;
117 _atomic_spin_unlock_irqrestore(p, flags);
118}
119
120/**
121 * test_and_set_bit - Set a bit and return its old value
122 * @nr: Bit to set
123 * @addr: Address to count from
124 *
125 * This operation is atomic and cannot be reordered.
126 * It may be reordered on other architectures than x86.
127 * It also implies a memory barrier.
128 */
129static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
130{
131 unsigned long mask = BITOP_MASK(nr);
132 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
133 unsigned long old;
134 unsigned long flags;
135
136 _atomic_spin_lock_irqsave(p, flags);
137 old = *p;
138 *p = old | mask;
139 _atomic_spin_unlock_irqrestore(p, flags);
140
141 return (old & mask) != 0;
142}
143
144/**
145 * test_and_clear_bit - Clear a bit and return its old value
146 * @nr: Bit to clear
147 * @addr: Address to count from
148 *
149 * This operation is atomic and cannot be reordered.
150 * It can be reorderdered on other architectures other than x86.
151 * It also implies a memory barrier.
152 */
153static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
154{
155 unsigned long mask = BITOP_MASK(nr);
156 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
157 unsigned long old;
158 unsigned long flags;
159
160 _atomic_spin_lock_irqsave(p, flags);
161 old = *p;
162 *p = old & ~mask;
163 _atomic_spin_unlock_irqrestore(p, flags);
164
165 return (old & mask) != 0;
166}
167
168/**
169 * test_and_change_bit - Change a bit and return its old value
170 * @nr: Bit to change
171 * @addr: Address to count from
172 *
173 * This operation is atomic and cannot be reordered.
174 * It also implies a memory barrier.
175 */
176static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
177{
178 unsigned long mask = BITOP_MASK(nr);
179 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
180 unsigned long old;
181 unsigned long flags;
182
183 _atomic_spin_lock_irqsave(p, flags);
184 old = *p;
185 *p = old ^ mask;
186 _atomic_spin_unlock_irqrestore(p, flags);
187
188 return (old & mask) != 0;
189}
190
191#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/include/asm-generic/bitops/ext2-atomic.h b/include/asm-generic/bitops/ext2-atomic.h
new file mode 100644
index 000000000000..ab1c875efb74
--- /dev/null
+++ b/include/asm-generic/bitops/ext2-atomic.h
@@ -0,0 +1,22 @@
1#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
2#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
3
4#define ext2_set_bit_atomic(lock, nr, addr) \
5 ({ \
6 int ret; \
7 spin_lock(lock); \
8 ret = ext2_set_bit((nr), (unsigned long *)(addr)); \
9 spin_unlock(lock); \
10 ret; \
11 })
12
13#define ext2_clear_bit_atomic(lock, nr, addr) \
14 ({ \
15 int ret; \
16 spin_lock(lock); \
17 ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \
18 spin_unlock(lock); \
19 ret; \
20 })
21
22#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/ext2-non-atomic.h b/include/asm-generic/bitops/ext2-non-atomic.h
new file mode 100644
index 000000000000..1697404afa05
--- /dev/null
+++ b/include/asm-generic/bitops/ext2-non-atomic.h
@@ -0,0 +1,18 @@
1#ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
2#define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
3
4#include <asm-generic/bitops/le.h>
5
6#define ext2_set_bit(nr,addr) \
7 generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
8#define ext2_clear_bit(nr,addr) \
9 generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
10
11#define ext2_test_bit(nr,addr) \
12 generic_test_le_bit((nr),(unsigned long *)(addr))
13#define ext2_find_first_zero_bit(addr, size) \
14 generic_find_first_zero_le_bit((unsigned long *)(addr), (size))
15#define ext2_find_next_zero_bit(addr, size, off) \
16 generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
17
18#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h
new file mode 100644
index 000000000000..fbbb43af7dc0
--- /dev/null
+++ b/include/asm-generic/bitops/ffs.h
@@ -0,0 +1,41 @@
1#ifndef _ASM_GENERIC_BITOPS_FFS_H_
2#define _ASM_GENERIC_BITOPS_FFS_H_
3
4/**
5 * ffs - find first bit set
6 * @x: the word to search
7 *
8 * This is defined the same way as
9 * the libc and compiler builtin ffs routines, therefore
10 * differs in spirit from the above ffz (man ffs).
11 */
12static inline int ffs(int x)
13{
14 int r = 1;
15
16 if (!x)
17 return 0;
18 if (!(x & 0xffff)) {
19 x >>= 16;
20 r += 16;
21 }
22 if (!(x & 0xff)) {
23 x >>= 8;
24 r += 8;
25 }
26 if (!(x & 0xf)) {
27 x >>= 4;
28 r += 4;
29 }
30 if (!(x & 3)) {
31 x >>= 2;
32 r += 2;
33 }
34 if (!(x & 1)) {
35 x >>= 1;
36 r += 1;
37 }
38 return r;
39}
40
41#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */
diff --git a/include/asm-generic/bitops/ffz.h b/include/asm-generic/bitops/ffz.h
new file mode 100644
index 000000000000..6744bd4cdf46
--- /dev/null
+++ b/include/asm-generic/bitops/ffz.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
2#define _ASM_GENERIC_BITOPS_FFZ_H_
3
4/*
5 * ffz - find first zero in word.
6 * @word: The word to search
7 *
8 * Undefined if no zero exists, so code should check against ~0UL first.
9 */
10#define ffz(x) __ffs(~(x))
11
12#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
new file mode 100644
index 000000000000..72a51e5a12ef
--- /dev/null
+++ b/include/asm-generic/bitops/find.h
@@ -0,0 +1,13 @@
1#ifndef _ASM_GENERIC_BITOPS_FIND_H_
2#define _ASM_GENERIC_BITOPS_FIND_H_
3
4extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
5 size, unsigned long offset);
6
7extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
8 long size, unsigned long offset);
9
10#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
11#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
12
13#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
new file mode 100644
index 000000000000..850859bc5069
--- /dev/null
+++ b/include/asm-generic/bitops/fls.h
@@ -0,0 +1,41 @@
1#ifndef _ASM_GENERIC_BITOPS_FLS_H_
2#define _ASM_GENERIC_BITOPS_FLS_H_
3
4/**
5 * fls - find last (most-significant) bit set
6 * @x: the word to search
7 *
8 * This is defined the same way as ffs.
9 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
10 */
11
12static inline int fls(int x)
13{
14 int r = 32;
15
16 if (!x)
17 return 0;
18 if (!(x & 0xffff0000u)) {
19 x <<= 16;
20 r -= 16;
21 }
22 if (!(x & 0xff000000u)) {
23 x <<= 8;
24 r -= 8;
25 }
26 if (!(x & 0xf0000000u)) {
27 x <<= 4;
28 r -= 4;
29 }
30 if (!(x & 0xc0000000u)) {
31 x <<= 2;
32 r -= 2;
33 }
34 if (!(x & 0x80000000u)) {
35 x <<= 1;
36 r -= 1;
37 }
38 return r;
39}
40
41#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
new file mode 100644
index 000000000000..1b6b17ce2428
--- /dev/null
+++ b/include/asm-generic/bitops/fls64.h
@@ -0,0 +1,14 @@
1#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
2#define _ASM_GENERIC_BITOPS_FLS64_H_
3
4#include <asm/types.h>
5
6static inline int fls64(__u64 x)
7{
8 __u32 h = x >> 32;
9 if (h)
10 return fls(h) + 32;
11 return fls(x);
12}
13
14#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h
new file mode 100644
index 000000000000..fbbc383771da
--- /dev/null
+++ b/include/asm-generic/bitops/hweight.h
@@ -0,0 +1,11 @@
1#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
2#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
3
4#include <asm/types.h>
5
6extern unsigned int hweight32(unsigned int w);
7extern unsigned int hweight16(unsigned int w);
8extern unsigned int hweight8(unsigned int w);
9extern unsigned long hweight64(__u64 w);
10
11#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
new file mode 100644
index 000000000000..b9c7e5d2d2ad
--- /dev/null
+++ b/include/asm-generic/bitops/le.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_GENERIC_BITOPS_LE_H_
2#define _ASM_GENERIC_BITOPS_LE_H_
3
4#include <asm/types.h>
5#include <asm/byteorder.h>
6
7#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
8#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
9
10#if defined(__LITTLE_ENDIAN)
11
12#define generic_test_le_bit(nr, addr) test_bit(nr, addr)
13#define generic___set_le_bit(nr, addr) __set_bit(nr, addr)
14#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr)
15
16#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr)
17#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr)
18
19#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr)
20#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr)
21
22#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset)
23
24#elif defined(__BIG_ENDIAN)
25
26#define generic_test_le_bit(nr, addr) \
27 test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
28#define generic___set_le_bit(nr, addr) \
29 __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
30#define generic___clear_le_bit(nr, addr) \
31 __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
32
33#define generic_test_and_set_le_bit(nr, addr) \
34 test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
35#define generic_test_and_clear_le_bit(nr, addr) \
36 test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
37
38#define generic___test_and_set_le_bit(nr, addr) \
39 __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
40#define generic___test_and_clear_le_bit(nr, addr) \
41 __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
42
43extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
44 unsigned long size, unsigned long offset);
45
46#else
47#error "Please fix <asm/byteorder.h>"
48#endif
49
50#define generic_find_first_zero_le_bit(addr, size) \
51 generic_find_next_zero_le_bit((addr), (size), 0)
52
53#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
diff --git a/include/asm-generic/bitops/minix-le.h b/include/asm-generic/bitops/minix-le.h
new file mode 100644
index 000000000000..4a981c1bb1ae
--- /dev/null
+++ b/include/asm-generic/bitops/minix-le.h
@@ -0,0 +1,17 @@
1#ifndef _ASM_GENERIC_BITOPS_MINIX_LE_H_
2#define _ASM_GENERIC_BITOPS_MINIX_LE_H_
3
4#include <asm-generic/bitops/le.h>
5
6#define minix_test_and_set_bit(nr,addr) \
7 generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
8#define minix_set_bit(nr,addr) \
9 generic___set_le_bit((nr),(unsigned long *)(addr))
10#define minix_test_and_clear_bit(nr,addr) \
11 generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
12#define minix_test_bit(nr,addr) \
13 generic_test_le_bit((nr),(unsigned long *)(addr))
14#define minix_find_first_zero_bit(addr,size) \
15 generic_find_first_zero_le_bit((unsigned long *)(addr),(size))
16
17#endif /* _ASM_GENERIC_BITOPS_MINIX_LE_H_ */
diff --git a/include/asm-generic/bitops/minix.h b/include/asm-generic/bitops/minix.h
new file mode 100644
index 000000000000..91f42e87aa51
--- /dev/null
+++ b/include/asm-generic/bitops/minix.h
@@ -0,0 +1,15 @@
1#ifndef _ASM_GENERIC_BITOPS_MINIX_H_
2#define _ASM_GENERIC_BITOPS_MINIX_H_
3
4#define minix_test_and_set_bit(nr,addr) \
5 __test_and_set_bit((nr),(unsigned long *)(addr))
6#define minix_set_bit(nr,addr) \
7 __set_bit((nr),(unsigned long *)(addr))
8#define minix_test_and_clear_bit(nr,addr) \
9 __test_and_clear_bit((nr),(unsigned long *)(addr))
10#define minix_test_bit(nr,addr) \
11 test_bit((nr),(unsigned long *)(addr))
12#define minix_find_first_zero_bit(addr,size) \
13 find_first_zero_bit((unsigned long *)(addr),(size))
14
15#endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */
diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h
new file mode 100644
index 000000000000..46a825cf2ae1
--- /dev/null
+++ b/include/asm-generic/bitops/non-atomic.h
@@ -0,0 +1,111 @@
1#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
2#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
3
4#include <asm/types.h>
5
6#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
7#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
8
9/**
10 * __set_bit - Set a bit in memory
11 * @nr: the bit to set
12 * @addr: the address to start counting from
13 *
14 * Unlike set_bit(), this function is non-atomic and may be reordered.
15 * If it's called on the same region of memory simultaneously, the effect
16 * may be that only one operation succeeds.
17 */
18static inline void __set_bit(int nr, volatile unsigned long *addr)
19{
20 unsigned long mask = BITOP_MASK(nr);
21 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
22
23 *p |= mask;
24}
25
26static inline void __clear_bit(int nr, volatile unsigned long *addr)
27{
28 unsigned long mask = BITOP_MASK(nr);
29 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
30
31 *p &= ~mask;
32}
33
34/**
35 * __change_bit - Toggle a bit in memory
36 * @nr: the bit to change
37 * @addr: the address to start counting from
38 *
39 * Unlike change_bit(), this function is non-atomic and may be reordered.
40 * If it's called on the same region of memory simultaneously, the effect
41 * may be that only one operation succeeds.
42 */
43static inline void __change_bit(int nr, volatile unsigned long *addr)
44{
45 unsigned long mask = BITOP_MASK(nr);
46 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
47
48 *p ^= mask;
49}
50
51/**
52 * __test_and_set_bit - Set a bit and return its old value
53 * @nr: Bit to set
54 * @addr: Address to count from
55 *
56 * This operation is non-atomic and can be reordered.
57 * If two examples of this operation race, one can appear to succeed
58 * but actually fail. You must protect multiple accesses with a lock.
59 */
60static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
61{
62 unsigned long mask = BITOP_MASK(nr);
63 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
64 unsigned long old = *p;
65
66 *p = old | mask;
67 return (old & mask) != 0;
68}
69
70/**
71 * __test_and_clear_bit - Clear a bit and return its old value
72 * @nr: Bit to clear
73 * @addr: Address to count from
74 *
75 * This operation is non-atomic and can be reordered.
76 * If two examples of this operation race, one can appear to succeed
77 * but actually fail. You must protect multiple accesses with a lock.
78 */
79static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
80{
81 unsigned long mask = BITOP_MASK(nr);
82 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
83 unsigned long old = *p;
84
85 *p = old & ~mask;
86 return (old & mask) != 0;
87}
88
89/* WARNING: non atomic and it can be reordered! */
90static inline int __test_and_change_bit(int nr,
91 volatile unsigned long *addr)
92{
93 unsigned long mask = BITOP_MASK(nr);
94 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
95 unsigned long old = *p;
96
97 *p = old ^ mask;
98 return (old & mask) != 0;
99}
100
101/**
102 * test_bit - Determine whether a bit is set
103 * @nr: bit number to test
104 * @addr: Address to start counting from
105 */
106static inline int test_bit(int nr, const volatile unsigned long *addr)
107{
108 return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
109}
110
111#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h
new file mode 100644
index 000000000000..5ef93a4d009f
--- /dev/null
+++ b/include/asm-generic/bitops/sched.h
@@ -0,0 +1,36 @@
1#ifndef _ASM_GENERIC_BITOPS_SCHED_H_
2#define _ASM_GENERIC_BITOPS_SCHED_H_
3
4#include <linux/compiler.h> /* unlikely() */
5#include <asm/types.h>
6
7/*
8 * Every architecture must define this function. It's the fastest
9 * way of searching a 140-bit bitmap where the first 100 bits are
10 * unlikely to be set. It's guaranteed that at least one of the 140
11 * bits is cleared.
12 */
13static inline int sched_find_first_bit(const unsigned long *b)
14{
15#if BITS_PER_LONG == 64
16 if (unlikely(b[0]))
17 return __ffs(b[0]);
18 if (unlikely(b[1]))
19 return __ffs(b[1]) + 64;
20 return __ffs(b[2]) + 128;
21#elif BITS_PER_LONG == 32
22 if (unlikely(b[0]))
23 return __ffs(b[0]);
24 if (unlikely(b[1]))
25 return __ffs(b[1]) + 32;
26 if (unlikely(b[2]))
27 return __ffs(b[2]) + 64;
28 if (b[3])
29 return __ffs(b[3]) + 96;
30 return __ffs(b[4]) + 128;
31#else
32#error BITS_PER_LONG not defined
33#endif
34}
35
36#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index 3ae2c7347549..df893c160318 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -49,5 +49,11 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
49 return ret; 49 return ret;
50} 50}
51 51
52static inline int
53futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
54{
55 return -ENOSYS;
56}
57
52#endif 58#endif
53#endif 59#endif
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index 16fc00360f75..de4614840c2c 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -4,28 +4,28 @@
4#include <linux/config.h> 4#include <linux/config.h>
5#include <linux/percpu.h> 5#include <linux/percpu.h>
6#include <linux/hardirq.h> 6#include <linux/hardirq.h>
7#include <asm/atomic.h>
7#include <asm/types.h> 8#include <asm/types.h>
8 9
9/* An unsigned long type for operations which are atomic for a single 10/* An unsigned long type for operations which are atomic for a single
10 * CPU. Usually used in combination with per-cpu variables. */ 11 * CPU. Usually used in combination with per-cpu variables. */
11 12
12#if BITS_PER_LONG == 32
13/* Implement in terms of atomics. */ 13/* Implement in terms of atomics. */
14 14
15/* Don't use typedef: don't want them to be mixed with atomic_t's. */ 15/* Don't use typedef: don't want them to be mixed with atomic_t's. */
16typedef struct 16typedef struct
17{ 17{
18 atomic_t a; 18 atomic_long_t a;
19} local_t; 19} local_t;
20 20
21#define LOCAL_INIT(i) { ATOMIC_INIT(i) } 21#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
22 22
23#define local_read(l) ((unsigned long)atomic_read(&(l)->a)) 23#define local_read(l) ((unsigned long)atomic_long_read(&(l)->a))
24#define local_set(l,i) atomic_set((&(l)->a),(i)) 24#define local_set(l,i) atomic_long_set((&(l)->a),(i))
25#define local_inc(l) atomic_inc(&(l)->a) 25#define local_inc(l) atomic_long_inc(&(l)->a)
26#define local_dec(l) atomic_dec(&(l)->a) 26#define local_dec(l) atomic_long_dec(&(l)->a)
27#define local_add(i,l) atomic_add((i),(&(l)->a)) 27#define local_add(i,l) atomic_long_add((i),(&(l)->a))
28#define local_sub(i,l) atomic_sub((i),(&(l)->a)) 28#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
29 29
30/* Non-atomic variants, ie. preemption disabled and won't be touched 30/* Non-atomic variants, ie. preemption disabled and won't be touched
31 * in interrupt, etc. Some archs can optimize this case well. */ 31 * in interrupt, etc. Some archs can optimize this case well. */
@@ -34,68 +34,6 @@ typedef struct
34#define __local_add(i,l) local_set((l), local_read(l) + (i)) 34#define __local_add(i,l) local_set((l), local_read(l) + (i))
35#define __local_sub(i,l) local_set((l), local_read(l) - (i)) 35#define __local_sub(i,l) local_set((l), local_read(l) - (i))
36 36
37#else /* ... can't use atomics. */
38/* Implement in terms of three variables.
39 Another option would be to use local_irq_save/restore. */
40
41typedef struct
42{
43 /* 0 = in hardirq, 1 = in softirq, 2 = usermode. */
44 unsigned long v[3];
45} local_t;
46
47#define _LOCAL_VAR(l) ((l)->v[!in_interrupt() + !in_irq()])
48
49#define LOCAL_INIT(i) { { (i), 0, 0 } }
50
51static inline unsigned long local_read(local_t *l)
52{
53 return l->v[0] + l->v[1] + l->v[2];
54}
55
56static inline void local_set(local_t *l, unsigned long v)
57{
58 l->v[0] = v;
59 l->v[1] = l->v[2] = 0;
60}
61
62static inline void local_inc(local_t *l)
63{
64 preempt_disable();
65 _LOCAL_VAR(l)++;
66 preempt_enable();
67}
68
69static inline void local_dec(local_t *l)
70{
71 preempt_disable();
72 _LOCAL_VAR(l)--;
73 preempt_enable();
74}
75
76static inline void local_add(unsigned long v, local_t *l)
77{
78 preempt_disable();
79 _LOCAL_VAR(l) += v;
80 preempt_enable();
81}
82
83static inline void local_sub(unsigned long v, local_t *l)
84{
85 preempt_disable();
86 _LOCAL_VAR(l) -= v;
87 preempt_enable();
88}
89
90/* Non-atomic variants, ie. preemption disabled and won't be touched
91 * in interrupt, etc. Some archs can optimize this case well. */
92#define __local_inc(l) ((l)->v[0]++)
93#define __local_dec(l) ((l)->v[0]--)
94#define __local_add(i,l) ((l)->v[0] += (i))
95#define __local_sub(i,l) ((l)->v[0] -= (i))
96
97#endif /* Non-atomic implementation */
98
99/* Use these for per-cpu local_t variables: on some archs they are 37/* Use these for per-cpu local_t variables: on some archs they are
100 * much more efficient than these naive implementations. Note they take 38 * much more efficient than these naive implementations. Note they take
101 * a variable (eg. mystruct.foo), not an address. 39 * a variable (eg. mystruct.foo), not an address.
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
new file mode 100644
index 000000000000..0cfb086dd373
--- /dev/null
+++ b/include/asm-generic/memory_model.h
@@ -0,0 +1,77 @@
1#ifndef __ASM_MEMORY_MODEL_H
2#define __ASM_MEMORY_MODEL_H
3
4#ifdef __KERNEL__
5#ifndef __ASSEMBLY__
6
7#if defined(CONFIG_FLATMEM)
8
9#ifndef ARCH_PFN_OFFSET
10#define ARCH_PFN_OFFSET (0UL)
11#endif
12
13#elif defined(CONFIG_DISCONTIGMEM)
14
15#ifndef arch_pfn_to_nid
16#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
17#endif
18
19#ifndef arch_local_page_offset
20#define arch_local_page_offset(pfn, nid) \
21 ((pfn) - NODE_DATA(nid)->node_start_pfn)
22#endif
23
24#endif /* CONFIG_DISCONTIGMEM */
25
26#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
27struct page;
28/* this is useful when inlined pfn_to_page is too big */
29extern struct page *pfn_to_page(unsigned long pfn);
30extern unsigned long page_to_pfn(struct page *page);
31#else
32/*
33 * supports 3 memory models.
34 */
35#if defined(CONFIG_FLATMEM)
36
37#define pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
38#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
39 ARCH_PFN_OFFSET)
40#elif defined(CONFIG_DISCONTIGMEM)
41
42#define pfn_to_page(pfn) \
43({ unsigned long __pfn = (pfn); \
44 unsigned long __nid = arch_pfn_to_nid(pfn); \
45 NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
46})
47
48#define page_to_pfn(pg) \
49({ struct page *__pg = (pg); \
50 struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
51 (unsigned long)(__pg - __pgdat->node_mem_map) + \
52 __pgdat->node_start_pfn; \
53})
54
55#elif defined(CONFIG_SPARSEMEM)
56/*
57 * Note: section's mem_map is encorded to reflect its start_pfn.
58 * section[i].section_mem_map == mem_map's address - start_pfn;
59 */
60#define page_to_pfn(pg) \
61({ struct page *__pg = (pg); \
62 int __sec = page_to_section(__pg); \
63 __pg - __section_mem_map_addr(__nr_to_section(__sec)); \
64})
65
66#define pfn_to_page(pfn) \
67({ unsigned long __pfn = (pfn); \
68 struct mem_section *__sec = __pfn_to_section(__pfn); \
69 __section_mem_map_addr(__sec) + __pfn; \
70})
71#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
72#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
73
74#endif /* __ASSEMBLY__ */
75#endif /* __KERNEL__ */
76
77#endif
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 78cf45547e31..c0caf433a7d7 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -19,7 +19,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
19#define percpu_modcopy(pcpudst, src, size) \ 19#define percpu_modcopy(pcpudst, src, size) \
20do { \ 20do { \
21 unsigned int __i; \ 21 unsigned int __i; \
22 for_each_cpu(__i) \ 22 for_each_possible_cpu(__i) \
23 memcpy((pcpudst)+__per_cpu_offset[__i], \ 23 memcpy((pcpudst)+__per_cpu_offset[__i], \
24 (src), (size)); \ 24 (src), (size)); \
25} while (0) 25} while (0)
diff --git a/include/asm-h8300/bitops.h b/include/asm-h8300/bitops.h
index ff7c2b721594..574f57b6c4d1 100644
--- a/include/asm-h8300/bitops.h
+++ b/include/asm-h8300/bitops.h
@@ -8,7 +8,6 @@
8 8
9#include <linux/config.h> 9#include <linux/config.h>
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <asm/byteorder.h> /* swab32 */
12#include <asm/system.h> 11#include <asm/system.h>
13 12
14#ifdef __KERNEL__ 13#ifdef __KERNEL__
@@ -177,10 +176,7 @@ H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot")
177#undef H8300_GEN_TEST_BITOP_CONST_INT 176#undef H8300_GEN_TEST_BITOP_CONST_INT
178#undef H8300_GEN_TEST_BITOP 177#undef H8300_GEN_TEST_BITOP
179 178
180#define find_first_zero_bit(addr, size) \ 179#include <asm-generic/bitops/ffs.h>
181 find_next_zero_bit((addr), (size), 0)
182
183#define ffs(x) generic_ffs(x)
184 180
185static __inline__ unsigned long __ffs(unsigned long word) 181static __inline__ unsigned long __ffs(unsigned long word)
186{ 182{
@@ -196,216 +192,16 @@ static __inline__ unsigned long __ffs(unsigned long word)
196 return result; 192 return result;
197} 193}
198 194
199static __inline__ int find_next_zero_bit (const unsigned long * addr, int size, int offset) 195#include <asm-generic/bitops/find.h>
200{ 196#include <asm-generic/bitops/sched.h>
201 unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3); 197#include <asm-generic/bitops/hweight.h>
202 unsigned long result = offset & ~31UL; 198#include <asm-generic/bitops/ext2-non-atomic.h>
203 unsigned long tmp; 199#include <asm-generic/bitops/ext2-atomic.h>
204 200#include <asm-generic/bitops/minix.h>
205 if (offset >= size)
206 return size;
207 size -= result;
208 offset &= 31UL;
209 if (offset) {
210 tmp = *(p++);
211 tmp |= ~0UL >> (32-offset);
212 if (size < 32)
213 goto found_first;
214 if (~tmp)
215 goto found_middle;
216 size -= 32;
217 result += 32;
218 }
219 while (size & ~31UL) {
220 if (~(tmp = *(p++)))
221 goto found_middle;
222 result += 32;
223 size -= 32;
224 }
225 if (!size)
226 return result;
227 tmp = *p;
228
229found_first:
230 tmp |= ~0UL << size;
231found_middle:
232 return result + ffz(tmp);
233}
234
235static __inline__ unsigned long find_next_bit(const unsigned long *addr,
236 unsigned long size, unsigned long offset)
237{
238 unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3);
239 unsigned int result = offset & ~31UL;
240 unsigned int tmp;
241
242 if (offset >= size)
243 return size;
244 size -= result;
245 offset &= 31UL;
246 if (offset) {
247 tmp = *(p++);
248 tmp &= ~0UL << offset;
249 if (size < 32)
250 goto found_first;
251 if (tmp)
252 goto found_middle;
253 size -= 32;
254 result += 32;
255 }
256 while (size >= 32) {
257 if ((tmp = *p++) != 0)
258 goto found_middle;
259 result += 32;
260 size -= 32;
261 }
262 if (!size)
263 return result;
264 tmp = *p;
265
266found_first:
267 tmp &= ~0UL >> (32 - size);
268 if (tmp == 0UL)
269 return result + size;
270found_middle:
271 return result + __ffs(tmp);
272}
273
274#define find_first_bit(addr, size) find_next_bit(addr, size, 0)
275
276/*
277 * Every architecture must define this function. It's the fastest
278 * way of searching a 140-bit bitmap where the first 100 bits are
279 * unlikely to be set. It's guaranteed that at least one of the 140
280 * bits is cleared.
281 */
282static inline int sched_find_first_bit(unsigned long *b)
283{
284 if (unlikely(b[0]))
285 return __ffs(b[0]);
286 if (unlikely(b[1]))
287 return __ffs(b[1]) + 32;
288 if (unlikely(b[2]))
289 return __ffs(b[2]) + 64;
290 if (b[3])
291 return __ffs(b[3]) + 96;
292 return __ffs(b[4]) + 128;
293}
294
295/*
296 * hweightN: returns the hamming weight (i.e. the number
297 * of bits set) of a N-bit word
298 */
299
300#define hweight32(x) generic_hweight32(x)
301#define hweight16(x) generic_hweight16(x)
302#define hweight8(x) generic_hweight8(x)
303
304static __inline__ int ext2_set_bit(int nr, volatile void * addr)
305{
306 int mask, retval;
307 unsigned long flags;
308 volatile unsigned char *ADDR = (unsigned char *) addr;
309
310 ADDR += nr >> 3;
311 mask = 1 << (nr & 0x07);
312 local_irq_save(flags);
313 retval = (mask & *ADDR) != 0;
314 *ADDR |= mask;
315 local_irq_restore(flags);
316 return retval;
317}
318#define ext2_set_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr)
319
320static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
321{
322 int mask, retval;
323 unsigned long flags;
324 volatile unsigned char *ADDR = (unsigned char *) addr;
325
326 ADDR += nr >> 3;
327 mask = 1 << (nr & 0x07);
328 local_irq_save(flags);
329 retval = (mask & *ADDR) != 0;
330 *ADDR &= ~mask;
331 local_irq_restore(flags);
332 return retval;
333}
334#define ext2_clear_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr)
335
336static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
337{
338 int mask;
339 const volatile unsigned char *ADDR = (const unsigned char *) addr;
340
341 ADDR += nr >> 3;
342 mask = 1 << (nr & 0x07);
343 return ((mask & *ADDR) != 0);
344}
345
346#define ext2_find_first_zero_bit(addr, size) \
347 ext2_find_next_zero_bit((addr), (size), 0)
348
349static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
350{
351 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
352 unsigned long result = offset & ~31UL;
353 unsigned long tmp;
354
355 if (offset >= size)
356 return size;
357 size -= result;
358 offset &= 31UL;
359 if(offset) {
360 /* We hold the little endian value in tmp, but then the
361 * shift is illegal. So we could keep a big endian value
362 * in tmp, like this:
363 *
364 * tmp = __swab32(*(p++));
365 * tmp |= ~0UL >> (32-offset);
366 *
367 * but this would decrease performance, so we change the
368 * shift:
369 */
370 tmp = *(p++);
371 tmp |= __swab32(~0UL >> (32-offset));
372 if(size < 32)
373 goto found_first;
374 if(~tmp)
375 goto found_middle;
376 size -= 32;
377 result += 32;
378 }
379 while(size & ~31UL) {
380 if(~(tmp = *(p++)))
381 goto found_middle;
382 result += 32;
383 size -= 32;
384 }
385 if(!size)
386 return result;
387 tmp = *p;
388
389found_first:
390 /* tmp is little endian, so we would have to swab the shift,
391 * see above. But then we have to swab tmp below for ffz, so
392 * we might as well do this here.
393 */
394 return result + ffz(__swab32(tmp) | (~0UL << size));
395found_middle:
396 return result + ffz(__swab32(tmp));
397}
398
399/* Bitmap functions for the minix filesystem. */
400#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
401#define minix_set_bit(nr,addr) set_bit(nr,addr)
402#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
403#define minix_test_bit(nr,addr) test_bit(nr,addr)
404#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
405 201
406#endif /* __KERNEL__ */ 202#endif /* __KERNEL__ */
407 203
408#define fls(x) generic_fls(x) 204#include <asm-generic/bitops/fls.h>
409#define fls64(x) generic_fls64(x) 205#include <asm-generic/bitops/fls64.h>
410 206
411#endif /* _H8300_BITOPS_H */ 207#endif /* _H8300_BITOPS_H */
diff --git a/include/asm-h8300/page.h b/include/asm-h8300/page.h
index cd35b1cc6cde..6472c9f88227 100644
--- a/include/asm-h8300/page.h
+++ b/include/asm-h8300/page.h
@@ -71,8 +71,7 @@ extern unsigned long memory_end;
71#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) 71#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
72#define pfn_valid(page) (page < max_mapnr) 72#define pfn_valid(page) (page < max_mapnr)
73 73
74#define pfn_to_page(pfn) virt_to_page(pfn_to_virt(pfn)) 74#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
75#define page_to_pfn(page) virt_to_pfn(page_to_virt(page))
76 75
77#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ 76#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
78 ((void *)(kaddr) < (void *)memory_end)) 77 ((void *)(kaddr) < (void *)memory_end))
@@ -81,6 +80,7 @@ extern unsigned long memory_end;
81 80
82#endif /* __KERNEL__ */ 81#endif /* __KERNEL__ */
83 82
83#include <asm-generic/memory_model.h>
84#include <asm-generic/page.h> 84#include <asm-generic/page.h>
85 85
86#endif /* _H8300_PAGE_H */ 86#endif /* _H8300_PAGE_H */
diff --git a/include/asm-h8300/types.h b/include/asm-h8300/types.h
index bf91e0d4dde7..da2402b86540 100644
--- a/include/asm-h8300/types.h
+++ b/include/asm-h8300/types.h
@@ -58,6 +58,9 @@ typedef u32 dma_addr_t;
58#define HAVE_SECTOR_T 58#define HAVE_SECTOR_T
59typedef u64 sector_t; 59typedef u64 sector_t;
60 60
61#define HAVE_BLKCNT_T
62typedef u64 blkcnt_t;
63
61#endif /* __KERNEL__ */ 64#endif /* __KERNEL__ */
62 65
63#endif /* __ASSEMBLY__ */ 66#endif /* __ASSEMBLY__ */
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index 7d20b95edb3b..08deaeee6be9 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -362,28 +362,9 @@ static inline unsigned long ffz(unsigned long word)
362 return word; 362 return word;
363} 363}
364 364
365#define fls64(x) generic_fls64(x)
366
367#ifdef __KERNEL__ 365#ifdef __KERNEL__
368 366
369/* 367#include <asm-generic/bitops/sched.h>
370 * Every architecture must define this function. It's the fastest
371 * way of searching a 140-bit bitmap where the first 100 bits are
372 * unlikely to be set. It's guaranteed that at least one of the 140
373 * bits is cleared.
374 */
375static inline int sched_find_first_bit(const unsigned long *b)
376{
377 if (unlikely(b[0]))
378 return __ffs(b[0]);
379 if (unlikely(b[1]))
380 return __ffs(b[1]) + 32;
381 if (unlikely(b[2]))
382 return __ffs(b[2]) + 64;
383 if (b[3])
384 return __ffs(b[3]) + 96;
385 return __ffs(b[4]) + 128;
386}
387 368
388/** 369/**
389 * ffs - find first bit set 370 * ffs - find first bit set
@@ -421,42 +402,22 @@ static inline int fls(int x)
421 return r+1; 402 return r+1;
422} 403}
423 404
424/** 405#include <asm-generic/bitops/hweight.h>
425 * hweightN - returns the hamming weight of a N-bit word
426 * @x: the word to weigh
427 *
428 * The Hamming Weight of a number is the total number of bits set in it.
429 */
430
431#define hweight32(x) generic_hweight32(x)
432#define hweight16(x) generic_hweight16(x)
433#define hweight8(x) generic_hweight8(x)
434 406
435#endif /* __KERNEL__ */ 407#endif /* __KERNEL__ */
436 408
409#include <asm-generic/bitops/fls64.h>
410
437#ifdef __KERNEL__ 411#ifdef __KERNEL__
438 412
439#define ext2_set_bit(nr,addr) \ 413#include <asm-generic/bitops/ext2-non-atomic.h>
440 __test_and_set_bit((nr),(unsigned long*)addr) 414
441#define ext2_set_bit_atomic(lock,nr,addr) \ 415#define ext2_set_bit_atomic(lock,nr,addr) \
442 test_and_set_bit((nr),(unsigned long*)addr) 416 test_and_set_bit((nr),(unsigned long*)addr)
443#define ext2_clear_bit(nr, addr) \
444 __test_and_clear_bit((nr),(unsigned long*)addr)
445#define ext2_clear_bit_atomic(lock,nr, addr) \ 417#define ext2_clear_bit_atomic(lock,nr, addr) \
446 test_and_clear_bit((nr),(unsigned long*)addr) 418 test_and_clear_bit((nr),(unsigned long*)addr)
447#define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) 419
448#define ext2_find_first_zero_bit(addr, size) \ 420#include <asm-generic/bitops/minix.h>
449 find_first_zero_bit((unsigned long*)addr, size)
450#define ext2_find_next_zero_bit(addr, size, off) \
451 find_next_zero_bit((unsigned long*)addr, size, off)
452
453/* Bitmap functions for the minix filesystem. */
454#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr)
455#define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr)
456#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr)
457#define minix_test_bit(nr,addr) test_bit(nr,(void*)addr)
458#define minix_find_first_zero_bit(addr,size) \
459 find_first_zero_bit((void*)addr,size)
460 421
461#endif /* __KERNEL__ */ 422#endif /* __KERNEL__ */
462 423
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h
index 44b9db806474..7b8ceefd010f 100644
--- a/include/asm-i386/futex.h
+++ b/include/asm-i386/futex.h
@@ -104,5 +104,32 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
104 return ret; 104 return ret;
105} 105}
106 106
107static inline int
108futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
109{
110 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
111 return -EFAULT;
112
113 __asm__ __volatile__(
114 "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n"
115
116 "2: .section .fixup, \"ax\" \n"
117 "3: mov %2, %0 \n"
118 " jmp 2b \n"
119 " .previous \n"
120
121 " .section __ex_table, \"a\" \n"
122 " .align 8 \n"
123 " .long 1b,3b \n"
124 " .previous \n"
125
126 : "=a" (oldval), "=m" (*uaddr)
127 : "i" (-EFAULT), "r" (newval), "0" (oldval)
128 : "memory"
129 );
130
131 return oldval;
132}
133
107#endif 134#endif
108#endif 135#endif
diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h
index 316138e89910..96d0828ce096 100644
--- a/include/asm-i386/kdebug.h
+++ b/include/asm-i386/kdebug.h
@@ -17,11 +17,9 @@ struct die_args {
17 int signr; 17 int signr;
18}; 18};
19 19
20/* Note - you should never unregister because that can race with NMIs. 20extern int register_die_notifier(struct notifier_block *);
21 If you really want to do it first unregister - then synchronize_sched - then free. 21extern int unregister_die_notifier(struct notifier_block *);
22 */ 22extern struct atomic_notifier_head i386die_chain;
23int register_die_notifier(struct notifier_block *nb);
24extern struct notifier_block *i386die_chain;
25 23
26 24
27/* Grossly misnamed. */ 25/* Grossly misnamed. */
@@ -51,7 +49,7 @@ static inline int notify_die(enum die_val val, const char *str,
51 .trapnr = trap, 49 .trapnr = trap,
52 .signr = sig 50 .signr = sig
53 }; 51 };
54 return notifier_call_chain(&i386die_chain, val, &args); 52 return atomic_notifier_call_chain(&i386die_chain, val, &args);
55} 53}
56 54
57#endif 55#endif
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
index a0d2d74a7dda..57d157c5cf89 100644
--- a/include/asm-i386/kprobes.h
+++ b/include/asm-i386/kprobes.h
@@ -34,6 +34,7 @@ struct pt_regs;
34 34
35typedef u8 kprobe_opcode_t; 35typedef u8 kprobe_opcode_t;
36#define BREAKPOINT_INSTRUCTION 0xcc 36#define BREAKPOINT_INSTRUCTION 0xcc
37#define RELATIVEJUMP_INSTRUCTION 0xe9
37#define MAX_INSN_SIZE 16 38#define MAX_INSN_SIZE 16
38#define MAX_STACK_SIZE 64 39#define MAX_STACK_SIZE 64
39#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ 40#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
@@ -51,6 +52,11 @@ void kretprobe_trampoline(void);
51struct arch_specific_insn { 52struct arch_specific_insn {
52 /* copy of the original instruction */ 53 /* copy of the original instruction */
53 kprobe_opcode_t *insn; 54 kprobe_opcode_t *insn;
55 /*
56 * If this flag is not 0, this kprobe can be boost when its
57 * post_handler and break_handler is not set.
58 */
59 int boostable;
54}; 60};
55 61
56struct prev_kprobe { 62struct prev_kprobe {
diff --git a/include/asm-i386/mach-default/mach_time.h b/include/asm-i386/mach-default/mach_time.h
index b749aa44a86f..31eb5de6f3dc 100644
--- a/include/asm-i386/mach-default/mach_time.h
+++ b/include/asm-i386/mach-default/mach_time.h
@@ -82,21 +82,8 @@ static inline int mach_set_rtc_mmss(unsigned long nowtime)
82static inline unsigned long mach_get_cmos_time(void) 82static inline unsigned long mach_get_cmos_time(void)
83{ 83{
84 unsigned int year, mon, day, hour, min, sec; 84 unsigned int year, mon, day, hour, min, sec;
85 int i;
86 85
87 /* The Linux interpretation of the CMOS clock register contents: 86 do {
88 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
89 * RTC registers show the second which has precisely just started.
90 * Let's hope other operating systems interpret the RTC the same way.
91 */
92 /* read RTC exactly on falling edge of update flag */
93 for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
94 if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
95 break;
96 for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */
97 if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
98 break;
99 do { /* Isn't this overkill ? UIP above should guarantee consistency */
100 sec = CMOS_READ(RTC_SECONDS); 87 sec = CMOS_READ(RTC_SECONDS);
101 min = CMOS_READ(RTC_MINUTES); 88 min = CMOS_READ(RTC_MINUTES);
102 hour = CMOS_READ(RTC_HOURS); 89 hour = CMOS_READ(RTC_HOURS);
@@ -104,16 +91,18 @@ static inline unsigned long mach_get_cmos_time(void)
104 mon = CMOS_READ(RTC_MONTH); 91 mon = CMOS_READ(RTC_MONTH);
105 year = CMOS_READ(RTC_YEAR); 92 year = CMOS_READ(RTC_YEAR);
106 } while (sec != CMOS_READ(RTC_SECONDS)); 93 } while (sec != CMOS_READ(RTC_SECONDS));
107 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 94
108 { 95 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
109 BCD_TO_BIN(sec); 96 BCD_TO_BIN(sec);
110 BCD_TO_BIN(min); 97 BCD_TO_BIN(min);
111 BCD_TO_BIN(hour); 98 BCD_TO_BIN(hour);
112 BCD_TO_BIN(day); 99 BCD_TO_BIN(day);
113 BCD_TO_BIN(mon); 100 BCD_TO_BIN(mon);
114 BCD_TO_BIN(year); 101 BCD_TO_BIN(year);
115 } 102 }
116 if ((year += 1900) < 1970) 103
104 year += 1900;
105 if (year < 1970)
117 year += 100; 106 year += 100;
118 107
119 return mktime(year, mon, day, hour, min, sec); 108 return mktime(year, mon, day, hour, min, sec);
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index 74f595d80579..e33e9f9e4c66 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -70,8 +70,6 @@ static inline int pfn_to_nid(unsigned long pfn)
70#endif 70#endif
71} 71}
72 72
73#define node_localnr(pfn, nid) ((pfn) - node_data[nid]->node_start_pfn)
74
75/* 73/*
76 * Following are macros that each numa implmentation must define. 74 * Following are macros that each numa implmentation must define.
77 */ 75 */
@@ -86,21 +84,6 @@ static inline int pfn_to_nid(unsigned long pfn)
86/* XXX: FIXME -- wli */ 84/* XXX: FIXME -- wli */
87#define kern_addr_valid(kaddr) (0) 85#define kern_addr_valid(kaddr) (0)
88 86
89#define pfn_to_page(pfn) \
90({ \
91 unsigned long __pfn = pfn; \
92 int __node = pfn_to_nid(__pfn); \
93 &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \
94})
95
96#define page_to_pfn(pg) \
97({ \
98 struct page *__page = pg; \
99 struct zone *__zone = page_zone(__page); \
100 (unsigned long)(__page - __zone->zone_mem_map) \
101 + __zone->zone_start_pfn; \
102})
103
104#ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */ 87#ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */
105#define pfn_valid(pfn) ((pfn) < num_physpages) 88#define pfn_valid(pfn) ((pfn) < num_physpages)
106#else 89#else
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index 997ca5d17876..30f52a2263ba 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -126,8 +126,6 @@ extern int page_is_ram(unsigned long pagenr);
126#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) 126#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
127#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 127#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
128#ifdef CONFIG_FLATMEM 128#ifdef CONFIG_FLATMEM
129#define pfn_to_page(pfn) (mem_map + (pfn))
130#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
131#define pfn_valid(pfn) ((pfn) < max_mapnr) 129#define pfn_valid(pfn) ((pfn) < max_mapnr)
132#endif /* CONFIG_FLATMEM */ 130#endif /* CONFIG_FLATMEM */
133#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 131#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
@@ -141,6 +139,7 @@ extern int page_is_ram(unsigned long pagenr);
141 139
142#endif /* __KERNEL__ */ 140#endif /* __KERNEL__ */
143 141
142#include <asm-generic/memory_model.h>
144#include <asm-generic/page.h> 143#include <asm-generic/page.h>
145 144
146#endif /* _I386_PAGE_H */ 145#endif /* _I386_PAGE_H */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index feca5d961e2b..805f0dcda468 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -20,6 +20,7 @@
20#include <linux/config.h> 20#include <linux/config.h>
21#include <linux/threads.h> 21#include <linux/threads.h>
22#include <asm/percpu.h> 22#include <asm/percpu.h>
23#include <linux/cpumask.h>
23 24
24/* flag for disabling the tsc */ 25/* flag for disabling the tsc */
25extern int tsc_disable; 26extern int tsc_disable;
@@ -67,6 +68,9 @@ struct cpuinfo_x86 {
67 char pad0; 68 char pad0;
68 int x86_power; 69 int x86_power;
69 unsigned long loops_per_jiffy; 70 unsigned long loops_per_jiffy;
71#ifdef CONFIG_SMP
72 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
73#endif
70 unsigned char x86_max_cores; /* cpuid returned max cores value */ 74 unsigned char x86_max_cores; /* cpuid returned max cores value */
71 unsigned char booted_cores; /* number of cores as seen by OS */ 75 unsigned char booted_cores; /* number of cores as seen by OS */
72 unsigned char apicid; 76 unsigned char apicid;
@@ -103,6 +107,7 @@ extern struct cpuinfo_x86 cpu_data[];
103 107
104extern int phys_proc_id[NR_CPUS]; 108extern int phys_proc_id[NR_CPUS];
105extern int cpu_core_id[NR_CPUS]; 109extern int cpu_core_id[NR_CPUS];
110extern int cpu_llc_id[NR_CPUS];
106extern char ignore_fpu_irq; 111extern char ignore_fpu_irq;
107 112
108extern void identify_cpu(struct cpuinfo_x86 *); 113extern void identify_cpu(struct cpuinfo_x86 *);
@@ -616,8 +621,6 @@ struct extended_sigtable {
616 unsigned int reserved[3]; 621 unsigned int reserved[3];
617 struct extended_signature sigs[0]; 622 struct extended_signature sigs[0];
618}; 623};
619/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
620#define MICROCODE_IOCFREE _IO('6',0)
621 624
622/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 625/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
623static inline void rep_nop(void) 626static inline void rep_nop(void)
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index 826a8ca50ac8..ee941457b55d 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -6,9 +6,7 @@
6#ifndef _i386_SETUP_H 6#ifndef _i386_SETUP_H
7#define _i386_SETUP_H 7#define _i386_SETUP_H
8 8
9#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) 9#include <linux/pfn.h>
10#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
11#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
12 10
13/* 11/*
14 * Reserved space for vmalloc and iomap - defined in asm/page.h 12 * Reserved space for vmalloc and iomap - defined in asm/page.h
diff --git a/include/asm-i386/stat.h b/include/asm-i386/stat.h
index b464f8020ec4..67eae78323ba 100644
--- a/include/asm-i386/stat.h
+++ b/include/asm-i386/stat.h
@@ -58,8 +58,7 @@ struct stat64 {
58 long long st_size; 58 long long st_size;
59 unsigned long st_blksize; 59 unsigned long st_blksize;
60 60
61 unsigned long st_blocks; /* Number 512-byte blocks allocated. */ 61 unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
62 unsigned long __pad4; /* future possible st_blocks high bits */
63 62
64 unsigned long st_atime; 63 unsigned long st_atime;
65 unsigned long st_atime_nsec; 64 unsigned long st_atime_nsec;
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index aa958c6ee83e..b94e5eeef917 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -112,4 +112,6 @@ extern unsigned long node_remap_size[];
112 112
113#endif /* CONFIG_NUMA */ 113#endif /* CONFIG_NUMA */
114 114
115extern cpumask_t cpu_coregroup_map(int cpu);
116
115#endif /* _ASM_I386_TOPOLOGY_H */ 117#endif /* _ASM_I386_TOPOLOGY_H */
diff --git a/include/asm-i386/types.h b/include/asm-i386/types.h
index ced00fe8fe61..e50a08bd7ced 100644
--- a/include/asm-i386/types.h
+++ b/include/asm-i386/types.h
@@ -63,6 +63,11 @@ typedef u64 sector_t;
63#define HAVE_SECTOR_T 63#define HAVE_SECTOR_T
64#endif 64#endif
65 65
66#ifdef CONFIG_LSF
67typedef u64 blkcnt_t;
68#define HAVE_BLKCNT_T
69#endif
70
66#endif /* __ASSEMBLY__ */ 71#endif /* __ASSEMBLY__ */
67 72
68#endif /* __KERNEL__ */ 73#endif /* __KERNEL__ */
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index d8afd0e3b81a..014e3562895b 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -316,8 +316,10 @@
316#define __NR_pselect6 308 316#define __NR_pselect6 308
317#define __NR_ppoll 309 317#define __NR_ppoll 309
318#define __NR_unshare 310 318#define __NR_unshare 310
319#define __NR_set_robust_list 311
320#define __NR_get_robust_list 312
319 321
320#define NR_syscalls 311 322#define NR_syscalls 313
321 323
322/* 324/*
323 * user-visible error numbers are in the range -1 - -128: see 325 * user-visible error numbers are in the range -1 - -128: see
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
index 36d0fb95ea89..90921e162793 100644
--- a/include/asm-ia64/bitops.h
+++ b/include/asm-ia64/bitops.h
@@ -5,8 +5,8 @@
5 * Copyright (C) 1998-2003 Hewlett-Packard Co 5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * 7 *
8 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1) 8 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
9 * scheduler patch 9 * O(1) scheduler patch
10 */ 10 */
11 11
12#include <linux/compiler.h> 12#include <linux/compiler.h>
@@ -25,9 +25,9 @@
25 * restricted to acting on a single-word quantity. 25 * restricted to acting on a single-word quantity.
26 * 26 *
27 * The address must be (at least) "long" aligned. 27 * The address must be (at least) "long" aligned.
28 * Note that there are driver (e.g., eepro100) which use these operations to operate on 28 * Note that there are driver (e.g., eepro100) which use these operations to
29 * hw-defined data-structures, so we can't easily change these operations to force a 29 * operate on hw-defined data-structures, so we can't easily change these
30 * bigger alignment. 30 * operations to force a bigger alignment.
31 * 31 *
32 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). 32 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
33 */ 33 */
@@ -284,8 +284,8 @@ test_bit (int nr, const volatile void *addr)
284 * ffz - find the first zero bit in a long word 284 * ffz - find the first zero bit in a long word
285 * @x: The long word to find the bit in 285 * @x: The long word to find the bit in
286 * 286 *
287 * Returns the bit-number (0..63) of the first (least significant) zero bit. Undefined if 287 * Returns the bit-number (0..63) of the first (least significant) zero bit.
288 * no zero exists, so code should check against ~0UL first... 288 * Undefined if no zero exists, so code should check against ~0UL first...
289 */ 289 */
290static inline unsigned long 290static inline unsigned long
291ffz (unsigned long x) 291ffz (unsigned long x)
@@ -345,13 +345,14 @@ fls (int t)
345 x |= x >> 16; 345 x |= x >> 16;
346 return ia64_popcnt(x); 346 return ia64_popcnt(x);
347} 347}
348#define fls64(x) generic_fls64(x) 348
349#include <asm-generic/bitops/fls64.h>
349 350
350/* 351/*
351 * ffs: find first bit set. This is defined the same way as the libc and compiler builtin 352 * ffs: find first bit set. This is defined the same way as the libc and
352 * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on 353 * compiler builtin ffs routines, therefore differs in spirit from the above
353 * "int" values only and the result value is the bit number + 1. ffs(0) is defined to 354 * ffz (man ffs): it operates on "int" values only and the result value is the
354 * return zero. 355 * bit number + 1. ffs(0) is defined to return zero.
355 */ 356 */
356#define ffs(x) __builtin_ffs(x) 357#define ffs(x) __builtin_ffs(x)
357 358
@@ -373,51 +374,17 @@ hweight64 (unsigned long x)
373 374
374#endif /* __KERNEL__ */ 375#endif /* __KERNEL__ */
375 376
376extern int __find_next_zero_bit (const void *addr, unsigned long size, 377#include <asm-generic/bitops/find.h>
377 unsigned long offset);
378extern int __find_next_bit(const void *addr, unsigned long size,
379 unsigned long offset);
380
381#define find_next_zero_bit(addr, size, offset) \
382 __find_next_zero_bit((addr), (size), (offset))
383#define find_next_bit(addr, size, offset) \
384 __find_next_bit((addr), (size), (offset))
385
386/*
387 * The optimizer actually does good code for this case..
388 */
389#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
390
391#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
392 378
393#ifdef __KERNEL__ 379#ifdef __KERNEL__
394 380
395#define __clear_bit(nr, addr) clear_bit(nr, addr) 381#include <asm-generic/bitops/ext2-non-atomic.h>
396 382
397#define ext2_set_bit test_and_set_bit
398#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) 383#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
399#define ext2_clear_bit test_and_clear_bit
400#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) 384#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
401#define ext2_test_bit test_bit
402#define ext2_find_first_zero_bit find_first_zero_bit
403#define ext2_find_next_zero_bit find_next_zero_bit
404
405/* Bitmap functions for the minix filesystem. */
406#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
407#define minix_set_bit(nr,addr) set_bit(nr,addr)
408#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
409#define minix_test_bit(nr,addr) test_bit(nr,addr)
410#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
411 385
412static inline int 386#include <asm-generic/bitops/minix.h>
413sched_find_first_bit (unsigned long *b) 387#include <asm-generic/bitops/sched.h>
414{
415 if (unlikely(b[0]))
416 return __ffs(b[0]);
417 if (unlikely(b[1]))
418 return 64 + __ffs(b[1]);
419 return __ffs(b[2]) + 128;
420}
421 388
422#endif /* __KERNEL__ */ 389#endif /* __KERNEL__ */
423 390
diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h
index c0b19106665c..40d01d80610d 100644
--- a/include/asm-ia64/compat.h
+++ b/include/asm-ia64/compat.h
@@ -189,6 +189,12 @@ compat_ptr (compat_uptr_t uptr)
189 return (void __user *) (unsigned long) uptr; 189 return (void __user *) (unsigned long) uptr;
190} 190}
191 191
192static inline compat_uptr_t
193ptr_to_compat(void __user *uptr)
194{
195 return (u32)(unsigned long)uptr;
196}
197
192static __inline__ void __user * 198static __inline__ void __user *
193compat_alloc_user_space (long len) 199compat_alloc_user_space (long len)
194{ 200{
diff --git a/include/asm-ia64/dmi.h b/include/asm-ia64/dmi.h
new file mode 100644
index 000000000000..f3efaa229525
--- /dev/null
+++ b/include/asm-ia64/dmi.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_DMI_H
2#define _ASM_DMI_H 1
3
4#include <asm/io.h>
5
6#endif
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h
index b64fdb985494..c2e3742108bb 100644
--- a/include/asm-ia64/io.h
+++ b/include/asm-ia64/io.h
@@ -88,8 +88,8 @@ phys_to_virt (unsigned long address)
88} 88}
89 89
90#define ARCH_HAS_VALID_PHYS_ADDR_RANGE 90#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
91extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */ 91extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */
92extern int valid_mmap_phys_addr_range (unsigned long addr, size_t *count); 92extern int valid_mmap_phys_addr_range (unsigned long addr, size_t count);
93 93
94/* 94/*
95 * The following two macros are deprecated and scheduled for removal. 95 * The following two macros are deprecated and scheduled for removal.
@@ -416,24 +416,18 @@ __writeq (unsigned long val, volatile void __iomem *addr)
416# define outl_p outl 416# define outl_p outl
417#endif 417#endif
418 418
419/* 419extern void __iomem * ioremap(unsigned long offset, unsigned long size);
420 * An "address" in IO memory space is not clearly either an integer or a pointer. We will 420extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
421 * accept both, thus the casts.
422 *
423 * On ia-64, we access the physical I/O memory space through the uncached kernel region.
424 */
425static inline void __iomem *
426ioremap (unsigned long offset, unsigned long size)
427{
428 return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset));
429}
430 421
431static inline void 422static inline void
432iounmap (volatile void __iomem *addr) 423iounmap (volatile void __iomem *addr)
433{ 424{
434} 425}
435 426
436#define ioremap_nocache(o,s) ioremap(o,s) 427/* Use normal IO mappings for DMI */
428#define dmi_ioremap ioremap
429#define dmi_iounmap(x,l) iounmap(x)
430#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
437 431
438# ifdef __KERNEL__ 432# ifdef __KERNEL__
439 433
diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h
index 8b01a083dde6..218c458ab60c 100644
--- a/include/asm-ia64/kdebug.h
+++ b/include/asm-ia64/kdebug.h
@@ -40,7 +40,7 @@ struct die_args {
40 40
41extern int register_die_notifier(struct notifier_block *); 41extern int register_die_notifier(struct notifier_block *);
42extern int unregister_die_notifier(struct notifier_block *); 42extern int unregister_die_notifier(struct notifier_block *);
43extern struct notifier_block *ia64die_chain; 43extern struct atomic_notifier_head ia64die_chain;
44 44
45enum die_val { 45enum die_val {
46 DIE_BREAK = 1, 46 DIE_BREAK = 1,
@@ -81,7 +81,7 @@ static inline int notify_die(enum die_val val, char *str, struct pt_regs *regs,
81 .signr = sig 81 .signr = sig
82 }; 82 };
83 83
84 return notifier_call_chain(&ia64die_chain, val, &args); 84 return atomic_notifier_call_chain(&ia64die_chain, val, &args);
85} 85}
86 86
87#endif 87#endif
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 6e9aa23250c4..2087825eefa4 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -106,17 +106,25 @@ extern int ia64_pfn_valid (unsigned long pfn);
106# define ia64_pfn_valid(pfn) 1 106# define ia64_pfn_valid(pfn) 1
107#endif 107#endif
108 108
109#ifdef CONFIG_VIRTUAL_MEM_MAP
110extern struct page *vmem_map;
111#ifdef CONFIG_DISCONTIGMEM
112# define page_to_pfn(page) ((unsigned long) (page - vmem_map))
113# define pfn_to_page(pfn) (vmem_map + (pfn))
114#endif
115#endif
116
117#if defined(CONFIG_FLATMEM) || defined(CONFIG_SPARSEMEM)
118/* FLATMEM always configures mem_map (mem_map = vmem_map if necessary) */
119#include <asm-generic/memory_model.h>
120#endif
121
109#ifdef CONFIG_FLATMEM 122#ifdef CONFIG_FLATMEM
110# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) 123# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
111# define page_to_pfn(page) ((unsigned long) (page - mem_map))
112# define pfn_to_page(pfn) (mem_map + (pfn))
113#elif defined(CONFIG_DISCONTIGMEM) 124#elif defined(CONFIG_DISCONTIGMEM)
114extern struct page *vmem_map;
115extern unsigned long min_low_pfn; 125extern unsigned long min_low_pfn;
116extern unsigned long max_low_pfn; 126extern unsigned long max_low_pfn;
117# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) 127# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
118# define page_to_pfn(page) ((unsigned long) (page - vmem_map))
119# define pfn_to_page(pfn) (vmem_map + (pfn))
120#endif 128#endif
121 129
122#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 130#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h
index 244449df7411..bf4cc867a698 100644
--- a/include/asm-ia64/sn/sn_sal.h
+++ b/include/asm-ia64/sn/sn_sal.h
@@ -159,7 +159,7 @@
159static inline u32 159static inline u32
160sn_sal_rev(void) 160sn_sal_rev(void)
161{ 161{
162 struct ia64_sal_systab *systab = efi.sal_systab; 162 struct ia64_sal_systab *systab = __va(efi.sal_systab);
163 163
164 return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor); 164 return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor);
165} 165}
diff --git a/include/asm-m32r/bitops.h b/include/asm-m32r/bitops.h
index abea2fdd8689..902a366101a5 100644
--- a/include/asm-m32r/bitops.h
+++ b/include/asm-m32r/bitops.h
@@ -63,25 +63,6 @@ static __inline__ void set_bit(int nr, volatile void * addr)
63} 63}
64 64
65/** 65/**
66 * __set_bit - Set a bit in memory
67 * @nr: the bit to set
68 * @addr: the address to start counting from
69 *
70 * Unlike set_bit(), this function is non-atomic and may be reordered.
71 * If it's called on the same region of memory simultaneously, the effect
72 * may be that only one operation succeeds.
73 */
74static __inline__ void __set_bit(int nr, volatile void * addr)
75{
76 __u32 mask;
77 volatile __u32 *a = addr;
78
79 a += (nr >> 5);
80 mask = (1 << (nr & 0x1F));
81 *a |= mask;
82}
83
84/**
85 * clear_bit - Clears a bit in memory 66 * clear_bit - Clears a bit in memory
86 * @nr: Bit to clear 67 * @nr: Bit to clear
87 * @addr: Address to start counting from 68 * @addr: Address to start counting from
@@ -118,39 +99,10 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
118 local_irq_restore(flags); 99 local_irq_restore(flags);
119} 100}
120 101
121static __inline__ void __clear_bit(int nr, volatile unsigned long * addr)
122{
123 unsigned long mask;
124 volatile unsigned long *a = addr;
125
126 a += (nr >> 5);
127 mask = (1 << (nr & 0x1F));
128 *a &= ~mask;
129}
130
131#define smp_mb__before_clear_bit() barrier() 102#define smp_mb__before_clear_bit() barrier()
132#define smp_mb__after_clear_bit() barrier() 103#define smp_mb__after_clear_bit() barrier()
133 104
134/** 105/**
135 * __change_bit - Toggle a bit in memory
136 * @nr: the bit to set
137 * @addr: the address to start counting from
138 *
139 * Unlike change_bit(), this function is non-atomic and may be reordered.
140 * If it's called on the same region of memory simultaneously, the effect
141 * may be that only one operation succeeds.
142 */
143static __inline__ void __change_bit(int nr, volatile void * addr)
144{
145 __u32 mask;
146 volatile __u32 *a = addr;
147
148 a += (nr >> 5);
149 mask = (1 << (nr & 0x1F));
150 *a ^= mask;
151}
152
153/**
154 * change_bit - Toggle a bit in memory 106 * change_bit - Toggle a bit in memory
155 * @nr: Bit to clear 107 * @nr: Bit to clear
156 * @addr: Address to start counting from 108 * @addr: Address to start counting from
@@ -221,28 +173,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
221} 173}
222 174
223/** 175/**
224 * __test_and_set_bit - Set a bit and return its old value
225 * @nr: Bit to set
226 * @addr: Address to count from
227 *
228 * This operation is non-atomic and can be reordered.
229 * If two examples of this operation race, one can appear to succeed
230 * but actually fail. You must protect multiple accesses with a lock.
231 */
232static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
233{
234 __u32 mask, oldbit;
235 volatile __u32 *a = addr;
236
237 a += (nr >> 5);
238 mask = (1 << (nr & 0x1F));
239 oldbit = (*a & mask);
240 *a |= mask;
241
242 return (oldbit != 0);
243}
244
245/**
246 * test_and_clear_bit - Clear a bit and return its old value 176 * test_and_clear_bit - Clear a bit and return its old value
247 * @nr: Bit to set 177 * @nr: Bit to set
248 * @addr: Address to count from 178 * @addr: Address to count from
@@ -280,42 +210,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
280} 210}
281 211
282/** 212/**
283 * __test_and_clear_bit - Clear a bit and return its old value
284 * @nr: Bit to set
285 * @addr: Address to count from
286 *
287 * This operation is non-atomic and can be reordered.
288 * If two examples of this operation race, one can appear to succeed
289 * but actually fail. You must protect multiple accesses with a lock.
290 */
291static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
292{
293 __u32 mask, oldbit;
294 volatile __u32 *a = addr;
295
296 a += (nr >> 5);
297 mask = (1 << (nr & 0x1F));
298 oldbit = (*a & mask);
299 *a &= ~mask;
300
301 return (oldbit != 0);
302}
303
304/* WARNING: non atomic and it can be reordered! */
305static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
306{
307 __u32 mask, oldbit;
308 volatile __u32 *a = addr;
309
310 a += (nr >> 5);
311 mask = (1 << (nr & 0x1F));
312 oldbit = (*a & mask);
313 *a ^= mask;
314
315 return (oldbit != 0);
316}
317
318/**
319 * test_and_change_bit - Change a bit and return its old value 213 * test_and_change_bit - Change a bit and return its old value
320 * @nr: Bit to set 214 * @nr: Bit to set
321 * @addr: Address to count from 215 * @addr: Address to count from
@@ -350,353 +244,26 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
350 return (oldbit != 0); 244 return (oldbit != 0);
351} 245}
352 246
353/** 247#include <asm-generic/bitops/non-atomic.h>
354 * test_bit - Determine whether a bit is set 248#include <asm-generic/bitops/ffz.h>
355 * @nr: bit number to test 249#include <asm-generic/bitops/__ffs.h>
356 * @addr: Address to start counting from 250#include <asm-generic/bitops/fls.h>
357 */ 251#include <asm-generic/bitops/fls64.h>
358static __inline__ int test_bit(int nr, const volatile void * addr)
359{
360 __u32 mask;
361 const volatile __u32 *a = addr;
362
363 a += (nr >> 5);
364 mask = (1 << (nr & 0x1F));
365
366 return ((*a & mask) != 0);
367}
368
369/**
370 * ffz - find first zero in word.
371 * @word: The word to search
372 *
373 * Undefined if no zero exists, so code should check against ~0UL first.
374 */
375static __inline__ unsigned long ffz(unsigned long word)
376{
377 int k;
378
379 word = ~word;
380 k = 0;
381 if (!(word & 0x0000ffff)) { k += 16; word >>= 16; }
382 if (!(word & 0x000000ff)) { k += 8; word >>= 8; }
383 if (!(word & 0x0000000f)) { k += 4; word >>= 4; }
384 if (!(word & 0x00000003)) { k += 2; word >>= 2; }
385 if (!(word & 0x00000001)) { k += 1; }
386
387 return k;
388}
389
390/**
391 * find_first_zero_bit - find the first zero bit in a memory region
392 * @addr: The address to start the search at
393 * @size: The maximum size to search
394 *
395 * Returns the bit-number of the first zero bit, not the number of the byte
396 * containing a bit.
397 */
398
399#define find_first_zero_bit(addr, size) \
400 find_next_zero_bit((addr), (size), 0)
401
402/**
403 * find_next_zero_bit - find the first zero bit in a memory region
404 * @addr: The address to base the search on
405 * @offset: The bitnumber to start searching at
406 * @size: The maximum size to search
407 */
408static __inline__ int find_next_zero_bit(const unsigned long *addr,
409 int size, int offset)
410{
411 const unsigned long *p = addr + (offset >> 5);
412 unsigned long result = offset & ~31UL;
413 unsigned long tmp;
414
415 if (offset >= size)
416 return size;
417 size -= result;
418 offset &= 31UL;
419 if (offset) {
420 tmp = *(p++);
421 tmp |= ~0UL >> (32-offset);
422 if (size < 32)
423 goto found_first;
424 if (~tmp)
425 goto found_middle;
426 size -= 32;
427 result += 32;
428 }
429 while (size & ~31UL) {
430 if (~(tmp = *(p++)))
431 goto found_middle;
432 result += 32;
433 size -= 32;
434 }
435 if (!size)
436 return result;
437 tmp = *p;
438
439found_first:
440 tmp |= ~0UL << size;
441found_middle:
442 return result + ffz(tmp);
443}
444
445/**
446 * __ffs - find first bit in word.
447 * @word: The word to search
448 *
449 * Undefined if no bit exists, so code should check against 0 first.
450 */
451static __inline__ unsigned long __ffs(unsigned long word)
452{
453 int k = 0;
454
455 if (!(word & 0x0000ffff)) { k += 16; word >>= 16; }
456 if (!(word & 0x000000ff)) { k += 8; word >>= 8; }
457 if (!(word & 0x0000000f)) { k += 4; word >>= 4; }
458 if (!(word & 0x00000003)) { k += 2; word >>= 2; }
459 if (!(word & 0x00000001)) { k += 1;}
460
461 return k;
462}
463
464/*
465 * fls: find last bit set.
466 */
467#define fls(x) generic_fls(x)
468#define fls64(x) generic_fls64(x)
469 252
470#ifdef __KERNEL__ 253#ifdef __KERNEL__
471 254
472/* 255#include <asm-generic/bitops/sched.h>
473 * Every architecture must define this function. It's the fastest 256#include <asm-generic/bitops/find.h>
474 * way of searching a 140-bit bitmap where the first 100 bits are 257#include <asm-generic/bitops/ffs.h>
475 * unlikely to be set. It's guaranteed that at least one of the 140 258#include <asm-generic/bitops/hweight.h>
476 * bits is cleared.
477 */
478static inline int sched_find_first_bit(unsigned long *b)
479{
480 if (unlikely(b[0]))
481 return __ffs(b[0]);
482 if (unlikely(b[1]))
483 return __ffs(b[1]) + 32;
484 if (unlikely(b[2]))
485 return __ffs(b[2]) + 64;
486 if (b[3])
487 return __ffs(b[3]) + 96;
488 return __ffs(b[4]) + 128;
489}
490
491/**
492 * find_next_bit - find the first set bit in a memory region
493 * @addr: The address to base the search on
494 * @offset: The bitnumber to start searching at
495 * @size: The maximum size to search
496 */
497static inline unsigned long find_next_bit(const unsigned long *addr,
498 unsigned long size, unsigned long offset)
499{
500 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
501 unsigned int result = offset & ~31UL;
502 unsigned int tmp;
503
504 if (offset >= size)
505 return size;
506 size -= result;
507 offset &= 31UL;
508 if (offset) {
509 tmp = *p++;
510 tmp &= ~0UL << offset;
511 if (size < 32)
512 goto found_first;
513 if (tmp)
514 goto found_middle;
515 size -= 32;
516 result += 32;
517 }
518 while (size >= 32) {
519 if ((tmp = *p++) != 0)
520 goto found_middle;
521 result += 32;
522 size -= 32;
523 }
524 if (!size)
525 return result;
526 tmp = *p;
527
528found_first:
529 tmp &= ~0UL >> (32 - size);
530 if (tmp == 0UL) /* Are any bits set? */
531 return result + size; /* Nope. */
532found_middle:
533 return result + __ffs(tmp);
534}
535
536/**
537 * find_first_bit - find the first set bit in a memory region
538 * @addr: The address to start the search at
539 * @size: The maximum size to search
540 *
541 * Returns the bit-number of the first set bit, not the number of the byte
542 * containing a bit.
543 */
544#define find_first_bit(addr, size) \
545 find_next_bit((addr), (size), 0)
546
547/**
548 * ffs - find first bit set
549 * @x: the word to search
550 *
551 * This is defined the same way as
552 * the libc and compiler builtin ffs routines, therefore
553 * differs in spirit from the above ffz (man ffs).
554 */
555#define ffs(x) generic_ffs(x)
556
557/**
558 * hweightN - returns the hamming weight of a N-bit word
559 * @x: the word to weigh
560 *
561 * The Hamming Weight of a number is the total number of bits set in it.
562 */
563
564#define hweight32(x) generic_hweight32(x)
565#define hweight16(x) generic_hweight16(x)
566#define hweight8(x) generic_hweight8(x)
567 259
568#endif /* __KERNEL__ */ 260#endif /* __KERNEL__ */
569 261
570#ifdef __KERNEL__ 262#ifdef __KERNEL__
571 263
572/* 264#include <asm-generic/bitops/ext2-non-atomic.h>
573 * ext2_XXXX function 265#include <asm-generic/bitops/ext2-atomic.h>
574 * orig: include/asm-sh/bitops.h 266#include <asm-generic/bitops/minix.h>
575 */
576
577#ifdef __LITTLE_ENDIAN__
578#define ext2_set_bit test_and_set_bit
579#define ext2_clear_bit __test_and_clear_bit
580#define ext2_test_bit test_bit
581#define ext2_find_first_zero_bit find_first_zero_bit
582#define ext2_find_next_zero_bit find_next_zero_bit
583#else
584static inline int ext2_set_bit(int nr, volatile void * addr)
585{
586 __u8 mask, oldbit;
587 volatile __u8 *a = addr;
588
589 a += (nr >> 3);
590 mask = (1 << (nr & 0x07));
591 oldbit = (*a & mask);
592 *a |= mask;
593
594 return (oldbit != 0);
595}
596
597static inline int ext2_clear_bit(int nr, volatile void * addr)
598{
599 __u8 mask, oldbit;
600 volatile __u8 *a = addr;
601
602 a += (nr >> 3);
603 mask = (1 << (nr & 0x07));
604 oldbit = (*a & mask);
605 *a &= ~mask;
606
607 return (oldbit != 0);
608}
609
610static inline int ext2_test_bit(int nr, const volatile void * addr)
611{
612 __u32 mask;
613 const volatile __u8 *a = addr;
614
615 a += (nr >> 3);
616 mask = (1 << (nr & 0x07));
617
618 return ((mask & *a) != 0);
619}
620
621#define ext2_find_first_zero_bit(addr, size) \
622 ext2_find_next_zero_bit((addr), (size), 0)
623
624static inline unsigned long ext2_find_next_zero_bit(void *addr,
625 unsigned long size, unsigned long offset)
626{
627 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
628 unsigned long result = offset & ~31UL;
629 unsigned long tmp;
630
631 if (offset >= size)
632 return size;
633 size -= result;
634 offset &= 31UL;
635 if(offset) {
636 /* We hold the little endian value in tmp, but then the
637 * shift is illegal. So we could keep a big endian value
638 * in tmp, like this:
639 *
640 * tmp = __swab32(*(p++));
641 * tmp |= ~0UL >> (32-offset);
642 *
643 * but this would decrease preformance, so we change the
644 * shift:
645 */
646 tmp = *(p++);
647 tmp |= __swab32(~0UL >> (32-offset));
648 if(size < 32)
649 goto found_first;
650 if(~tmp)
651 goto found_middle;
652 size -= 32;
653 result += 32;
654 }
655 while(size & ~31UL) {
656 if(~(tmp = *(p++)))
657 goto found_middle;
658 result += 32;
659 size -= 32;
660 }
661 if(!size)
662 return result;
663 tmp = *p;
664
665found_first:
666 /* tmp is little endian, so we would have to swab the shift,
667 * see above. But then we have to swab tmp below for ffz, so
668 * we might as well do this here.
669 */
670 return result + ffz(__swab32(tmp) | (~0UL << size));
671found_middle:
672 return result + ffz(__swab32(tmp));
673}
674#endif
675
676#define ext2_set_bit_atomic(lock, nr, addr) \
677 ({ \
678 int ret; \
679 spin_lock(lock); \
680 ret = ext2_set_bit((nr), (addr)); \
681 spin_unlock(lock); \
682 ret; \
683 })
684
685#define ext2_clear_bit_atomic(lock, nr, addr) \
686 ({ \
687 int ret; \
688 spin_lock(lock); \
689 ret = ext2_clear_bit((nr), (addr)); \
690 spin_unlock(lock); \
691 ret; \
692 })
693
694/* Bitmap functions for the minix filesystem. */
695#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
696#define minix_set_bit(nr,addr) __set_bit(nr,addr)
697#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
698#define minix_test_bit(nr,addr) test_bit(nr,addr)
699#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
700 267
701#endif /* __KERNEL__ */ 268#endif /* __KERNEL__ */
702 269
diff --git a/include/asm-m32r/mmzone.h b/include/asm-m32r/mmzone.h
index adc7970a77ec..9f3b5accda88 100644
--- a/include/asm-m32r/mmzone.h
+++ b/include/asm-m32r/mmzone.h
@@ -21,20 +21,6 @@ extern struct pglist_data *node_data[];
21 __pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1; \ 21 __pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1; \
22}) 22})
23 23
24#define pfn_to_page(pfn) \
25({ \
26 unsigned long __pfn = pfn; \
27 int __node = pfn_to_nid(__pfn); \
28 &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \
29})
30
31#define page_to_pfn(pg) \
32({ \
33 struct page *__page = pg; \
34 struct zone *__zone = page_zone(__page); \
35 (unsigned long)(__page - __zone->zone_mem_map) \
36 + __zone->zone_start_pfn; \
37})
38#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) 24#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
39/* 25/*
40 * pfn_valid should be made as fast as possible, and the current definition 26 * pfn_valid should be made as fast as possible, and the current definition
diff --git a/include/asm-m32r/page.h b/include/asm-m32r/page.h
index 4ab578876361..9ddbc087dbc5 100644
--- a/include/asm-m32r/page.h
+++ b/include/asm-m32r/page.h
@@ -76,9 +76,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
76 76
77#ifndef CONFIG_DISCONTIGMEM 77#ifndef CONFIG_DISCONTIGMEM
78#define PFN_BASE (CONFIG_MEMORY_START >> PAGE_SHIFT) 78#define PFN_BASE (CONFIG_MEMORY_START >> PAGE_SHIFT)
79#define pfn_to_page(pfn) (mem_map + ((pfn) - PFN_BASE)) 79#define ARCH_PFN_OFFSET PFN_BASE
80#define page_to_pfn(page) \
81 ((unsigned long)((page) - mem_map) + PFN_BASE)
82#define pfn_valid(pfn) (((pfn) - PFN_BASE) < max_mapnr) 80#define pfn_valid(pfn) (((pfn) - PFN_BASE) < max_mapnr)
83#endif /* !CONFIG_DISCONTIGMEM */ 81#endif /* !CONFIG_DISCONTIGMEM */
84 82
@@ -92,6 +90,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
92 90
93#endif /* __KERNEL__ */ 91#endif /* __KERNEL__ */
94 92
93#include <asm-generic/memory_model.h>
95#include <asm-generic/page.h> 94#include <asm-generic/page.h>
96 95
97#endif /* _ASM_M32R_PAGE_H */ 96#endif /* _ASM_M32R_PAGE_H */
diff --git a/include/asm-m32r/setup.h b/include/asm-m32r/setup.h
index 5f028dc26a9b..52f4fa29abfc 100644
--- a/include/asm-m32r/setup.h
+++ b/include/asm-m32r/setup.h
@@ -24,10 +24,6 @@
24#define RAMDISK_PROMPT_FLAG (0x8000) 24#define RAMDISK_PROMPT_FLAG (0x8000)
25#define RAMDISK_LOAD_FLAG (0x4000) 25#define RAMDISK_LOAD_FLAG (0x4000)
26 26
27#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
28#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
29#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
30
31extern unsigned long memory_start; 27extern unsigned long memory_start;
32extern unsigned long memory_end; 28extern unsigned long memory_end;
33 29
diff --git a/include/asm-m68k/bitops.h b/include/asm-m68k/bitops.h
index 13f4c0048463..1a61fdb56aaf 100644
--- a/include/asm-m68k/bitops.h
+++ b/include/asm-m68k/bitops.h
@@ -310,36 +310,10 @@ static inline int fls(int x)
310 310
311 return 32 - cnt; 311 return 32 - cnt;
312} 312}
313#define fls64(x) generic_fls64(x)
314 313
315/* 314#include <asm-generic/bitops/fls64.h>
316 * Every architecture must define this function. It's the fastest 315#include <asm-generic/bitops/sched.h>
317 * way of searching a 140-bit bitmap where the first 100 bits are 316#include <asm-generic/bitops/hweight.h>
318 * unlikely to be set. It's guaranteed that at least one of the 140
319 * bits is cleared.
320 */
321static inline int sched_find_first_bit(const unsigned long *b)
322{
323 if (unlikely(b[0]))
324 return __ffs(b[0]);
325 if (unlikely(b[1]))
326 return __ffs(b[1]) + 32;
327 if (unlikely(b[2]))
328 return __ffs(b[2]) + 64;
329 if (b[3])
330 return __ffs(b[3]) + 96;
331 return __ffs(b[4]) + 128;
332}
333
334
335/*
336 * hweightN: returns the hamming weight (i.e. the number
337 * of bits set) of a N-bit word
338 */
339
340#define hweight32(x) generic_hweight32(x)
341#define hweight16(x) generic_hweight16(x)
342#define hweight8(x) generic_hweight8(x)
343 317
344/* Bitmap functions for the minix filesystem */ 318/* Bitmap functions for the minix filesystem */
345 319
@@ -365,9 +339,9 @@ static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size)
365 return ((p - addr) << 4) + (res ^ 31); 339 return ((p - addr) << 4) + (res ^ 31);
366} 340}
367 341
368#define minix_test_and_set_bit(nr, addr) test_and_set_bit((nr) ^ 16, (unsigned long *)(addr)) 342#define minix_test_and_set_bit(nr, addr) __test_and_set_bit((nr) ^ 16, (unsigned long *)(addr))
369#define minix_set_bit(nr,addr) set_bit((nr) ^ 16, (unsigned long *)(addr)) 343#define minix_set_bit(nr,addr) __set_bit((nr) ^ 16, (unsigned long *)(addr))
370#define minix_test_and_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr)) 344#define minix_test_and_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr))
371 345
372static inline int minix_test_bit(int nr, const void *vaddr) 346static inline int minix_test_bit(int nr, const void *vaddr)
373{ 347{
@@ -377,9 +351,9 @@ static inline int minix_test_bit(int nr, const void *vaddr)
377 351
378/* Bitmap functions for the ext2 filesystem. */ 352/* Bitmap functions for the ext2 filesystem. */
379 353
380#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) 354#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
381#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) 355#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
382#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) 356#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
383#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) 357#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
384 358
385static inline int ext2_test_bit(int nr, const void *vaddr) 359static inline int ext2_test_bit(int nr, const void *vaddr)
diff --git a/include/asm-m68k/stat.h b/include/asm-m68k/stat.h
index c4c402a45e21..dd38bc2e9f98 100644
--- a/include/asm-m68k/stat.h
+++ b/include/asm-m68k/stat.h
@@ -60,8 +60,7 @@ struct stat64 {
60 long long st_size; 60 long long st_size;
61 unsigned long st_blksize; 61 unsigned long st_blksize;
62 62
63 unsigned long __pad4; /* future possible st_blocks high bits */ 63 unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
64 unsigned long st_blocks; /* Number 512-byte blocks allocated. */
65 64
66 unsigned long st_atime; 65 unsigned long st_atime;
67 unsigned long st_atime_nsec; 66 unsigned long st_atime_nsec;
diff --git a/include/asm-m68knommu/bitops.h b/include/asm-m68knommu/bitops.h
index 25d8a3cfef90..0b68ccd327f7 100644
--- a/include/asm-m68knommu/bitops.h
+++ b/include/asm-m68knommu/bitops.h
@@ -12,104 +12,10 @@
12 12
13#ifdef __KERNEL__ 13#ifdef __KERNEL__
14 14
15/* 15#include <asm-generic/bitops/ffs.h>
16 * Generic ffs(). 16#include <asm-generic/bitops/__ffs.h>
17 */ 17#include <asm-generic/bitops/sched.h>
18static inline int ffs(int x) 18#include <asm-generic/bitops/ffz.h>
19{
20 int r = 1;
21
22 if (!x)
23 return 0;
24 if (!(x & 0xffff)) {
25 x >>= 16;
26 r += 16;
27 }
28 if (!(x & 0xff)) {
29 x >>= 8;
30 r += 8;
31 }
32 if (!(x & 0xf)) {
33 x >>= 4;
34 r += 4;
35 }
36 if (!(x & 3)) {
37 x >>= 2;
38 r += 2;
39 }
40 if (!(x & 1)) {
41 x >>= 1;
42 r += 1;
43 }
44 return r;
45}
46
47/*
48 * Generic __ffs().
49 */
50static inline int __ffs(int x)
51{
52 int r = 0;
53
54 if (!x)
55 return 0;
56 if (!(x & 0xffff)) {
57 x >>= 16;
58 r += 16;
59 }
60 if (!(x & 0xff)) {
61 x >>= 8;
62 r += 8;
63 }
64 if (!(x & 0xf)) {
65 x >>= 4;
66 r += 4;
67 }
68 if (!(x & 3)) {
69 x >>= 2;
70 r += 2;
71 }
72 if (!(x & 1)) {
73 x >>= 1;
74 r += 1;
75 }
76 return r;
77}
78
79/*
80 * Every architecture must define this function. It's the fastest
81 * way of searching a 140-bit bitmap where the first 100 bits are
82 * unlikely to be set. It's guaranteed that at least one of the 140
83 * bits is cleared.
84 */
85static inline int sched_find_first_bit(unsigned long *b)
86{
87 if (unlikely(b[0]))
88 return __ffs(b[0]);
89 if (unlikely(b[1]))
90 return __ffs(b[1]) + 32;
91 if (unlikely(b[2]))
92 return __ffs(b[2]) + 64;
93 if (b[3])
94 return __ffs(b[3]) + 96;
95 return __ffs(b[4]) + 128;
96}
97
98/*
99 * ffz = Find First Zero in word. Undefined if no zero exists,
100 * so code should check against ~0UL first..
101 */
102static __inline__ unsigned long ffz(unsigned long word)
103{
104 unsigned long result = 0;
105
106 while(word & 1) {
107 result++;
108 word >>= 1;
109 }
110 return result;
111}
112
113 19
114static __inline__ void set_bit(int nr, volatile unsigned long * addr) 20static __inline__ void set_bit(int nr, volatile unsigned long * addr)
115{ 21{
@@ -254,98 +160,8 @@ static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
254 __constant_test_bit((nr),(addr)) : \ 160 __constant_test_bit((nr),(addr)) : \
255 __test_bit((nr),(addr))) 161 __test_bit((nr),(addr)))
256 162
257#define find_first_zero_bit(addr, size) \ 163#include <asm-generic/bitops/find.h>
258 find_next_zero_bit((addr), (size), 0) 164#include <asm-generic/bitops/hweight.h>
259#define find_first_bit(addr, size) \
260 find_next_bit((addr), (size), 0)
261
262static __inline__ int find_next_zero_bit (const void * addr, int size, int offset)
263{
264 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
265 unsigned long result = offset & ~31UL;
266 unsigned long tmp;
267
268 if (offset >= size)
269 return size;
270 size -= result;
271 offset &= 31UL;
272 if (offset) {
273 tmp = *(p++);
274 tmp |= ~0UL >> (32-offset);
275 if (size < 32)
276 goto found_first;
277 if (~tmp)
278 goto found_middle;
279 size -= 32;
280 result += 32;
281 }
282 while (size & ~31UL) {
283 if (~(tmp = *(p++)))
284 goto found_middle;
285 result += 32;
286 size -= 32;
287 }
288 if (!size)
289 return result;
290 tmp = *p;
291
292found_first:
293 tmp |= ~0UL << size;
294found_middle:
295 return result + ffz(tmp);
296}
297
298/*
299 * Find next one bit in a bitmap reasonably efficiently.
300 */
301static __inline__ unsigned long find_next_bit(const unsigned long *addr,
302 unsigned long size, unsigned long offset)
303{
304 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
305 unsigned int result = offset & ~31UL;
306 unsigned int tmp;
307
308 if (offset >= size)
309 return size;
310 size -= result;
311 offset &= 31UL;
312 if (offset) {
313 tmp = *p++;
314 tmp &= ~0UL << offset;
315 if (size < 32)
316 goto found_first;
317 if (tmp)
318 goto found_middle;
319 size -= 32;
320 result += 32;
321 }
322 while (size >= 32) {
323 if ((tmp = *p++) != 0)
324 goto found_middle;
325 result += 32;
326 size -= 32;
327 }
328 if (!size)
329 return result;
330 tmp = *p;
331
332found_first:
333 tmp &= ~0UL >> (32 - size);
334 if (tmp == 0UL) /* Are any bits set? */
335 return result + size; /* Nope. */
336found_middle:
337 return result + __ffs(tmp);
338}
339
340/*
341 * hweightN: returns the hamming weight (i.e. the number
342 * of bits set) of a N-bit word
343 */
344
345#define hweight32(x) generic_hweight32(x)
346#define hweight16(x) generic_hweight16(x)
347#define hweight8(x) generic_hweight8(x)
348
349 165
350static __inline__ int ext2_set_bit(int nr, volatile void * addr) 166static __inline__ int ext2_set_bit(int nr, volatile void * addr)
351{ 167{
@@ -475,30 +291,11 @@ found_middle:
475 return result + ffz(__swab32(tmp)); 291 return result + ffz(__swab32(tmp));
476} 292}
477 293
478/* Bitmap functions for the minix filesystem. */ 294#include <asm-generic/bitops/minix.h>
479#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
480#define minix_set_bit(nr,addr) set_bit(nr,addr)
481#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
482#define minix_test_bit(nr,addr) test_bit(nr,addr)
483#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
484
485/**
486 * hweightN - returns the hamming weight of a N-bit word
487 * @x: the word to weigh
488 *
489 * The Hamming Weight of a number is the total number of bits set in it.
490 */
491
492#define hweight32(x) generic_hweight32(x)
493#define hweight16(x) generic_hweight16(x)
494#define hweight8(x) generic_hweight8(x)
495 295
496#endif /* __KERNEL__ */ 296#endif /* __KERNEL__ */
497 297
498/* 298#include <asm-generic/bitops/fls.h>
499 * fls: find last bit set. 299#include <asm-generic/bitops/fls64.h>
500 */
501#define fls(x) generic_fls(x)
502#define fls64(x) generic_fls64(x)
503 300
504#endif /* _M68KNOMMU_BITOPS_H */ 301#endif /* _M68KNOMMU_BITOPS_H */
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index 8e802059fe67..a1728f8c0705 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -105,22 +105,6 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
105} 105}
106 106
107/* 107/*
108 * __set_bit - Set a bit in memory
109 * @nr: the bit to set
110 * @addr: the address to start counting from
111 *
112 * Unlike set_bit(), this function is non-atomic and may be reordered.
113 * If it's called on the same region of memory simultaneously, the effect
114 * may be that only one operation succeeds.
115 */
116static inline void __set_bit(unsigned long nr, volatile unsigned long * addr)
117{
118 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
119
120 *m |= 1UL << (nr & SZLONG_MASK);
121}
122
123/*
124 * clear_bit - Clears a bit in memory 108 * clear_bit - Clears a bit in memory
125 * @nr: Bit to clear 109 * @nr: Bit to clear
126 * @addr: Address to start counting from 110 * @addr: Address to start counting from
@@ -169,22 +153,6 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
169} 153}
170 154
171/* 155/*
172 * __clear_bit - Clears a bit in memory
173 * @nr: Bit to clear
174 * @addr: Address to start counting from
175 *
176 * Unlike clear_bit(), this function is non-atomic and may be reordered.
177 * If it's called on the same region of memory simultaneously, the effect
178 * may be that only one operation succeeds.
179 */
180static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr)
181{
182 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
183
184 *m &= ~(1UL << (nr & SZLONG_MASK));
185}
186
187/*
188 * change_bit - Toggle a bit in memory 156 * change_bit - Toggle a bit in memory
189 * @nr: Bit to change 157 * @nr: Bit to change
190 * @addr: Address to start counting from 158 * @addr: Address to start counting from
@@ -235,22 +203,6 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
235} 203}
236 204
237/* 205/*
238 * __change_bit - Toggle a bit in memory
239 * @nr: the bit to change
240 * @addr: the address to start counting from
241 *
242 * Unlike change_bit(), this function is non-atomic and may be reordered.
243 * If it's called on the same region of memory simultaneously, the effect
244 * may be that only one operation succeeds.
245 */
246static inline void __change_bit(unsigned long nr, volatile unsigned long * addr)
247{
248 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
249
250 *m ^= 1UL << (nr & SZLONG_MASK);
251}
252
253/*
254 * test_and_set_bit - Set a bit and return its old value 206 * test_and_set_bit - Set a bit and return its old value
255 * @nr: Bit to set 207 * @nr: Bit to set
256 * @addr: Address to count from 208 * @addr: Address to count from
@@ -321,30 +273,6 @@ static inline int test_and_set_bit(unsigned long nr,
321} 273}
322 274
323/* 275/*
324 * __test_and_set_bit - Set a bit and return its old value
325 * @nr: Bit to set
326 * @addr: Address to count from
327 *
328 * This operation is non-atomic and can be reordered.
329 * If two examples of this operation race, one can appear to succeed
330 * but actually fail. You must protect multiple accesses with a lock.
331 */
332static inline int __test_and_set_bit(unsigned long nr,
333 volatile unsigned long *addr)
334{
335 volatile unsigned long *a = addr;
336 unsigned long mask;
337 int retval;
338
339 a += nr >> SZLONG_LOG;
340 mask = 1UL << (nr & SZLONG_MASK);
341 retval = (mask & *a) != 0;
342 *a |= mask;
343
344 return retval;
345}
346
347/*
348 * test_and_clear_bit - Clear a bit and return its old value 276 * test_and_clear_bit - Clear a bit and return its old value
349 * @nr: Bit to clear 277 * @nr: Bit to clear
350 * @addr: Address to count from 278 * @addr: Address to count from
@@ -417,30 +345,6 @@ static inline int test_and_clear_bit(unsigned long nr,
417} 345}
418 346
419/* 347/*
420 * __test_and_clear_bit - Clear a bit and return its old value
421 * @nr: Bit to clear
422 * @addr: Address to count from
423 *
424 * This operation is non-atomic and can be reordered.
425 * If two examples of this operation race, one can appear to succeed
426 * but actually fail. You must protect multiple accesses with a lock.
427 */
428static inline int __test_and_clear_bit(unsigned long nr,
429 volatile unsigned long * addr)
430{
431 volatile unsigned long *a = addr;
432 unsigned long mask;
433 int retval;
434
435 a += (nr >> SZLONG_LOG);
436 mask = 1UL << (nr & SZLONG_MASK);
437 retval = ((mask & *a) != 0);
438 *a &= ~mask;
439
440 return retval;
441}
442
443/*
444 * test_and_change_bit - Change a bit and return its old value 348 * test_and_change_bit - Change a bit and return its old value
445 * @nr: Bit to change 349 * @nr: Bit to change
446 * @addr: Address to count from 350 * @addr: Address to count from
@@ -509,43 +413,11 @@ static inline int test_and_change_bit(unsigned long nr,
509 } 413 }
510} 414}
511 415
512/*
513 * __test_and_change_bit - Change a bit and return its old value
514 * @nr: Bit to change
515 * @addr: Address to count from
516 *
517 * This operation is non-atomic and can be reordered.
518 * If two examples of this operation race, one can appear to succeed
519 * but actually fail. You must protect multiple accesses with a lock.
520 */
521static inline int __test_and_change_bit(unsigned long nr,
522 volatile unsigned long *addr)
523{
524 volatile unsigned long *a = addr;
525 unsigned long mask;
526 int retval;
527
528 a += (nr >> SZLONG_LOG);
529 mask = 1UL << (nr & SZLONG_MASK);
530 retval = ((mask & *a) != 0);
531 *a ^= mask;
532
533 return retval;
534}
535
536#undef __bi_flags 416#undef __bi_flags
537#undef __bi_local_irq_save 417#undef __bi_local_irq_save
538#undef __bi_local_irq_restore 418#undef __bi_local_irq_restore
539 419
540/* 420#include <asm-generic/bitops/non-atomic.h>
541 * test_bit - Determine whether a bit is set
542 * @nr: bit number to test
543 * @addr: Address to start counting from
544 */
545static inline int test_bit(unsigned long nr, const volatile unsigned long *addr)
546{
547 return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
548}
549 421
550/* 422/*
551 * Return the bit position (0..63) of the most significant 1 bit in a word 423 * Return the bit position (0..63) of the most significant 1 bit in a word
@@ -580,6 +452,8 @@ static inline int __ilog2(unsigned long x)
580 return 63 - lz; 452 return 63 - lz;
581} 453}
582 454
455#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
456
583/* 457/*
584 * __ffs - find first bit in word. 458 * __ffs - find first bit in word.
585 * @word: The word to search 459 * @word: The word to search
@@ -589,31 +463,7 @@ static inline int __ilog2(unsigned long x)
589 */ 463 */
590static inline unsigned long __ffs(unsigned long word) 464static inline unsigned long __ffs(unsigned long word)
591{ 465{
592#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
593 return __ilog2(word & -word); 466 return __ilog2(word & -word);
594#else
595 int b = 0, s;
596
597#ifdef CONFIG_32BIT
598 s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
599 s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s;
600 s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s;
601 s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s;
602 s = 1; if (word << 31 != 0) s = 0; b += s;
603
604 return b;
605#endif
606#ifdef CONFIG_64BIT
607 s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s;
608 s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s;
609 s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s;
610 s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s;
611 s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s;
612 s = 1; if (word << 63 != 0) s = 0; b += s;
613
614 return b;
615#endif
616#endif
617} 467}
618 468
619/* 469/*
@@ -652,321 +502,38 @@ static inline unsigned long ffz(unsigned long word)
652 */ 502 */
653static inline unsigned long fls(unsigned long word) 503static inline unsigned long fls(unsigned long word)
654{ 504{
655#ifdef CONFIG_32BIT
656#ifdef CONFIG_CPU_MIPS32 505#ifdef CONFIG_CPU_MIPS32
657 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word)); 506 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
658 507
659 return 32 - word; 508 return 32 - word;
660#else
661 {
662 int r = 32, s;
663
664 if (word == 0)
665 return 0;
666
667 s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s;
668 s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s;
669 s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s;
670 s = 2; if ((word & 0xc0000000)) s = 0; r -= s; word <<= s;
671 s = 1; if ((word & 0x80000000)) s = 0; r -= s;
672
673 return r;
674 }
675#endif 509#endif
676#endif /* CONFIG_32BIT */
677 510
678#ifdef CONFIG_64BIT
679#ifdef CONFIG_CPU_MIPS64 511#ifdef CONFIG_CPU_MIPS64
680
681 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word)); 512 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
682 513
683 return 64 - word; 514 return 64 - word;
684#else
685 {
686 int r = 64, s;
687
688 if (word == 0)
689 return 0;
690
691 s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s;
692 s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s;
693 s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s;
694 s = 4; if ((word & 0xf000000000000000UL)) s = 0; r -= s; word <<= s;
695 s = 2; if ((word & 0xc000000000000000UL)) s = 0; r -= s; word <<= s;
696 s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s;
697
698 return r;
699 }
700#endif 515#endif
701#endif /* CONFIG_64BIT */
702} 516}
703 517
704#define fls64(x) generic_fls64(x) 518#else
705
706/*
707 * find_next_zero_bit - find the first zero bit in a memory region
708 * @addr: The address to base the search on
709 * @offset: The bitnumber to start searching at
710 * @size: The maximum size to search
711 */
712static inline unsigned long find_next_zero_bit(const unsigned long *addr,
713 unsigned long size, unsigned long offset)
714{
715 const unsigned long *p = addr + (offset >> SZLONG_LOG);
716 unsigned long result = offset & ~SZLONG_MASK;
717 unsigned long tmp;
718
719 if (offset >= size)
720 return size;
721 size -= result;
722 offset &= SZLONG_MASK;
723 if (offset) {
724 tmp = *(p++);
725 tmp |= ~0UL >> (_MIPS_SZLONG-offset);
726 if (size < _MIPS_SZLONG)
727 goto found_first;
728 if (~tmp)
729 goto found_middle;
730 size -= _MIPS_SZLONG;
731 result += _MIPS_SZLONG;
732 }
733 while (size & ~SZLONG_MASK) {
734 if (~(tmp = *(p++)))
735 goto found_middle;
736 result += _MIPS_SZLONG;
737 size -= _MIPS_SZLONG;
738 }
739 if (!size)
740 return result;
741 tmp = *p;
742
743found_first:
744 tmp |= ~0UL << size;
745 if (tmp == ~0UL) /* Are any bits zero? */
746 return result + size; /* Nope. */
747found_middle:
748 return result + ffz(tmp);
749}
750 519
751#define find_first_zero_bit(addr, size) \ 520#include <asm-generic/bitops/__ffs.h>
752 find_next_zero_bit((addr), (size), 0) 521#include <asm-generic/bitops/ffs.h>
522#include <asm-generic/bitops/ffz.h>
523#include <asm-generic/bitops/fls.h>
753 524
754/* 525#endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
755 * find_next_bit - find the next set bit in a memory region
756 * @addr: The address to base the search on
757 * @offset: The bitnumber to start searching at
758 * @size: The maximum size to search
759 */
760static inline unsigned long find_next_bit(const unsigned long *addr,
761 unsigned long size, unsigned long offset)
762{
763 const unsigned long *p = addr + (offset >> SZLONG_LOG);
764 unsigned long result = offset & ~SZLONG_MASK;
765 unsigned long tmp;
766
767 if (offset >= size)
768 return size;
769 size -= result;
770 offset &= SZLONG_MASK;
771 if (offset) {
772 tmp = *(p++);
773 tmp &= ~0UL << offset;
774 if (size < _MIPS_SZLONG)
775 goto found_first;
776 if (tmp)
777 goto found_middle;
778 size -= _MIPS_SZLONG;
779 result += _MIPS_SZLONG;
780 }
781 while (size & ~SZLONG_MASK) {
782 if ((tmp = *(p++)))
783 goto found_middle;
784 result += _MIPS_SZLONG;
785 size -= _MIPS_SZLONG;
786 }
787 if (!size)
788 return result;
789 tmp = *p;
790
791found_first:
792 tmp &= ~0UL >> (_MIPS_SZLONG - size);
793 if (tmp == 0UL) /* Are any bits set? */
794 return result + size; /* Nope. */
795found_middle:
796 return result + __ffs(tmp);
797}
798 526
799/* 527#include <asm-generic/bitops/fls64.h>
800 * find_first_bit - find the first set bit in a memory region 528#include <asm-generic/bitops/find.h>
801 * @addr: The address to start the search at
802 * @size: The maximum size to search
803 *
804 * Returns the bit-number of the first set bit, not the number of the byte
805 * containing a bit.
806 */
807#define find_first_bit(addr, size) \
808 find_next_bit((addr), (size), 0)
809 529
810#ifdef __KERNEL__ 530#ifdef __KERNEL__
811 531
812/* 532#include <asm-generic/bitops/sched.h>
813 * Every architecture must define this function. It's the fastest 533#include <asm-generic/bitops/hweight.h>
814 * way of searching a 140-bit bitmap where the first 100 bits are 534#include <asm-generic/bitops/ext2-non-atomic.h>
815 * unlikely to be set. It's guaranteed that at least one of the 140 535#include <asm-generic/bitops/ext2-atomic.h>
816 * bits is cleared. 536#include <asm-generic/bitops/minix.h>
817 */
818static inline int sched_find_first_bit(const unsigned long *b)
819{
820#ifdef CONFIG_32BIT
821 if (unlikely(b[0]))
822 return __ffs(b[0]);
823 if (unlikely(b[1]))
824 return __ffs(b[1]) + 32;
825 if (unlikely(b[2]))
826 return __ffs(b[2]) + 64;
827 if (b[3])
828 return __ffs(b[3]) + 96;
829 return __ffs(b[4]) + 128;
830#endif
831#ifdef CONFIG_64BIT
832 if (unlikely(b[0]))
833 return __ffs(b[0]);
834 if (unlikely(b[1]))
835 return __ffs(b[1]) + 64;
836 return __ffs(b[2]) + 128;
837#endif
838}
839
840/*
841 * hweightN - returns the hamming weight of a N-bit word
842 * @x: the word to weigh
843 *
844 * The Hamming Weight of a number is the total number of bits set in it.
845 */
846
847#define hweight64(x) generic_hweight64(x)
848#define hweight32(x) generic_hweight32(x)
849#define hweight16(x) generic_hweight16(x)
850#define hweight8(x) generic_hweight8(x)
851
852static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr)
853{
854 unsigned char *ADDR = (unsigned char *) addr;
855 int mask, retval;
856
857 ADDR += nr >> 3;
858 mask = 1 << (nr & 0x07);
859 retval = (mask & *ADDR) != 0;
860 *ADDR |= mask;
861
862 return retval;
863}
864
865static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr)
866{
867 unsigned char *ADDR = (unsigned char *) addr;
868 int mask, retval;
869
870 ADDR += nr >> 3;
871 mask = 1 << (nr & 0x07);
872 retval = (mask & *ADDR) != 0;
873 *ADDR &= ~mask;
874
875 return retval;
876}
877
878static inline int test_le_bit(unsigned long nr, const unsigned long * addr)
879{
880 const unsigned char *ADDR = (const unsigned char *) addr;
881 int mask;
882
883 ADDR += nr >> 3;
884 mask = 1 << (nr & 0x07);
885
886 return ((mask & *ADDR) != 0);
887}
888
889static inline unsigned long find_next_zero_le_bit(unsigned long *addr,
890 unsigned long size, unsigned long offset)
891{
892 unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG);
893 unsigned long result = offset & ~SZLONG_MASK;
894 unsigned long tmp;
895
896 if (offset >= size)
897 return size;
898 size -= result;
899 offset &= SZLONG_MASK;
900 if (offset) {
901 tmp = cpu_to_lelongp(p++);
902 tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */
903 if (size < _MIPS_SZLONG)
904 goto found_first;
905 if (~tmp)
906 goto found_middle;
907 size -= _MIPS_SZLONG;
908 result += _MIPS_SZLONG;
909 }
910 while (size & ~SZLONG_MASK) {
911 if (~(tmp = cpu_to_lelongp(p++)))
912 goto found_middle;
913 result += _MIPS_SZLONG;
914 size -= _MIPS_SZLONG;
915 }
916 if (!size)
917 return result;
918 tmp = cpu_to_lelongp(p);
919
920found_first:
921 tmp |= ~0UL << size;
922 if (tmp == ~0UL) /* Are any bits zero? */
923 return result + size; /* Nope. */
924
925found_middle:
926 return result + ffz(tmp);
927}
928
929#define find_first_zero_le_bit(addr, size) \
930 find_next_zero_le_bit((addr), (size), 0)
931
932#define ext2_set_bit(nr,addr) \
933 __test_and_set_le_bit((nr),(unsigned long*)addr)
934#define ext2_clear_bit(nr, addr) \
935 __test_and_clear_le_bit((nr),(unsigned long*)addr)
936 #define ext2_set_bit_atomic(lock, nr, addr) \
937({ \
938 int ret; \
939 spin_lock(lock); \
940 ret = ext2_set_bit((nr), (addr)); \
941 spin_unlock(lock); \
942 ret; \
943})
944
945#define ext2_clear_bit_atomic(lock, nr, addr) \
946({ \
947 int ret; \
948 spin_lock(lock); \
949 ret = ext2_clear_bit((nr), (addr)); \
950 spin_unlock(lock); \
951 ret; \
952})
953#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
954#define ext2_find_first_zero_bit(addr, size) \
955 find_first_zero_le_bit((unsigned long*)addr, size)
956#define ext2_find_next_zero_bit(addr, size, off) \
957 find_next_zero_le_bit((unsigned long*)addr, size, off)
958
959/*
960 * Bitmap functions for the minix filesystem.
961 *
962 * FIXME: These assume that Minix uses the native byte/bitorder.
963 * This limits the Minix filesystem's value for data exchange very much.
964 */
965#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
966#define minix_set_bit(nr,addr) set_bit(nr,addr)
967#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
968#define minix_test_bit(nr,addr) test_bit(nr,addr)
969#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
970 537
971#endif /* __KERNEL__ */ 538#endif /* __KERNEL__ */
972 539
diff --git a/include/asm-mips/compat.h b/include/asm-mips/compat.h
index 0012bd804d2d..986511db54a6 100644
--- a/include/asm-mips/compat.h
+++ b/include/asm-mips/compat.h
@@ -133,6 +133,11 @@ static inline void __user *compat_ptr(compat_uptr_t uptr)
133 return (void __user *)(long)uptr; 133 return (void __user *)(long)uptr;
134} 134}
135 135
136static inline compat_uptr_t ptr_to_compat(void __user *uptr)
137{
138 return (u32)(unsigned long)uptr;
139}
140
136static inline void __user *compat_alloc_user_space(long len) 141static inline void __user *compat_alloc_user_space(long len)
137{ 142{
138 struct pt_regs *regs = (struct pt_regs *) 143 struct pt_regs *regs = (struct pt_regs *)
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h
index 2454c44a8f54..a554089991f2 100644
--- a/include/asm-mips/futex.h
+++ b/include/asm-mips/futex.h
@@ -99,5 +99,11 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
99 return ret; 99 return ret;
100} 100}
101 101
102static inline int
103futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
104{
105 return -ENOSYS;
106}
107
102#endif 108#endif
103#endif 109#endif
diff --git a/include/asm-mips/mc146818-time.h b/include/asm-mips/mc146818-time.h
index 47214861093b..41ac8d363c67 100644
--- a/include/asm-mips/mc146818-time.h
+++ b/include/asm-mips/mc146818-time.h
@@ -86,43 +86,14 @@ static inline int mc146818_set_rtc_mmss(unsigned long nowtime)
86 return retval; 86 return retval;
87} 87}
88 88
89/*
90 * Returns true if a clock update is in progress
91 */
92static inline unsigned char rtc_is_updating(void)
93{
94 unsigned char uip;
95 unsigned long flags;
96
97 spin_lock_irqsave(&rtc_lock, flags);
98 uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
99 spin_unlock_irqrestore(&rtc_lock, flags);
100 return uip;
101}
102
103static inline unsigned long mc146818_get_cmos_time(void) 89static inline unsigned long mc146818_get_cmos_time(void)
104{ 90{
105 unsigned int year, mon, day, hour, min, sec; 91 unsigned int year, mon, day, hour, min, sec;
106 int i;
107 unsigned long flags; 92 unsigned long flags;
108 93
109 /*
110 * The Linux interpretation of the CMOS clock register contents:
111 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
112 * RTC registers show the second which has precisely just started.
113 * Let's hope other operating systems interpret the RTC the same way.
114 */
115
116 /* read RTC exactly on falling edge of update flag */
117 for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
118 if (rtc_is_updating())
119 break;
120 for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */
121 if (!rtc_is_updating())
122 break;
123
124 spin_lock_irqsave(&rtc_lock, flags); 94 spin_lock_irqsave(&rtc_lock, flags);
125 do { /* Isn't this overkill ? UIP above should guarantee consistency */ 95
96 do {
126 sec = CMOS_READ(RTC_SECONDS); 97 sec = CMOS_READ(RTC_SECONDS);
127 min = CMOS_READ(RTC_MINUTES); 98 min = CMOS_READ(RTC_MINUTES);
128 hour = CMOS_READ(RTC_HOURS); 99 hour = CMOS_READ(RTC_HOURS);
diff --git a/include/asm-mips/mmzone.h b/include/asm-mips/mmzone.h
index 011caebac369..7bde4432092b 100644
--- a/include/asm-mips/mmzone.h
+++ b/include/asm-mips/mmzone.h
@@ -22,20 +22,6 @@
22 NODE_DATA(__n)->node_spanned_pages) : 0);\ 22 NODE_DATA(__n)->node_spanned_pages) : 0);\
23}) 23})
24 24
25#define pfn_to_page(pfn) \
26({ \
27 unsigned long __pfn = (pfn); \
28 pg_data_t *__pg = NODE_DATA(pfn_to_nid(__pfn)); \
29 __pg->node_mem_map + (__pfn - __pg->node_start_pfn); \
30})
31
32#define page_to_pfn(p) \
33({ \
34 struct page *__p = (p); \
35 struct zone *__z = page_zone(__p); \
36 ((__p - __z->zone_mem_map) + __z->zone_start_pfn); \
37})
38
39/* XXX: FIXME -- wli */ 25/* XXX: FIXME -- wli */
40#define kern_addr_valid(addr) (0) 26#define kern_addr_valid(addr) (0)
41 27
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index ee25a779bf49..a1eab136ff6c 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -140,8 +140,6 @@ typedef struct { unsigned long pgprot; } pgprot_t;
140#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 140#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
141 141
142#ifndef CONFIG_NEED_MULTIPLE_NODES 142#ifndef CONFIG_NEED_MULTIPLE_NODES
143#define pfn_to_page(pfn) (mem_map + (pfn))
144#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
145#define pfn_valid(pfn) ((pfn) < max_mapnr) 143#define pfn_valid(pfn) ((pfn) < max_mapnr)
146#endif 144#endif
147 145
@@ -160,6 +158,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
160#define WANT_PAGE_VIRTUAL 158#define WANT_PAGE_VIRTUAL
161#endif 159#endif
162 160
161#include <asm-generic/memory_model.h>
163#include <asm-generic/page.h> 162#include <asm-generic/page.h>
164 163
165#endif /* _ASM_PAGE_H */ 164#endif /* _ASM_PAGE_H */
diff --git a/include/asm-mips/time.h b/include/asm-mips/time.h
index 9cc3564cc2c9..d897c8bb554d 100644
--- a/include/asm-mips/time.h
+++ b/include/asm-mips/time.h
@@ -26,14 +26,14 @@ extern spinlock_t rtc_lock;
26 26
27/* 27/*
28 * RTC ops. By default, they point to no-RTC functions. 28 * RTC ops. By default, they point to no-RTC functions.
29 * rtc_get_time - mktime(year, mon, day, hour, min, sec) in seconds. 29 * rtc_mips_get_time - mktime(year, mon, day, hour, min, sec) in seconds.
30 * rtc_set_time - reverse the above translation and set time to RTC. 30 * rtc_mips_set_time - reverse the above translation and set time to RTC.
31 * rtc_set_mmss - similar to rtc_set_time, but only min and sec need 31 * rtc_mips_set_mmss - similar to rtc_set_time, but only min and sec need
32 * to be set. Used by RTC sync-up. 32 * to be set. Used by RTC sync-up.
33 */ 33 */
34extern unsigned long (*rtc_get_time)(void); 34extern unsigned long (*rtc_mips_get_time)(void);
35extern int (*rtc_set_time)(unsigned long); 35extern int (*rtc_mips_set_time)(unsigned long);
36extern int (*rtc_set_mmss)(unsigned long); 36extern int (*rtc_mips_set_mmss)(unsigned long);
37 37
38/* 38/*
39 * Timer interrupt functions. 39 * Timer interrupt functions.
diff --git a/include/asm-mips/types.h b/include/asm-mips/types.h
index 421b3aea14cc..cd2813d8e136 100644
--- a/include/asm-mips/types.h
+++ b/include/asm-mips/types.h
@@ -99,6 +99,11 @@ typedef u64 sector_t;
99#define HAVE_SECTOR_T 99#define HAVE_SECTOR_T
100#endif 100#endif
101 101
102#ifdef CONFIG_LSF
103typedef u64 blkcnt_t;
104#define HAVE_BLKCNT_T
105#endif
106
102#endif /* __ASSEMBLY__ */ 107#endif /* __ASSEMBLY__ */
103 108
104#endif /* __KERNEL__ */ 109#endif /* __KERNEL__ */
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h
index 15d8c2b51584..900561922c4c 100644
--- a/include/asm-parisc/bitops.h
+++ b/include/asm-parisc/bitops.h
@@ -35,13 +35,6 @@ static __inline__ void set_bit(int nr, volatile unsigned long * addr)
35 _atomic_spin_unlock_irqrestore(addr, flags); 35 _atomic_spin_unlock_irqrestore(addr, flags);
36} 36}
37 37
38static __inline__ void __set_bit(unsigned long nr, volatile unsigned long * addr)
39{
40 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
41
42 *m |= 1UL << CHOP_SHIFTCOUNT(nr);
43}
44
45static __inline__ void clear_bit(int nr, volatile unsigned long * addr) 38static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
46{ 39{
47 unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); 40 unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
@@ -53,13 +46,6 @@ static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
53 _atomic_spin_unlock_irqrestore(addr, flags); 46 _atomic_spin_unlock_irqrestore(addr, flags);
54} 47}
55 48
56static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * addr)
57{
58 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
59
60 *m &= ~(1UL << CHOP_SHIFTCOUNT(nr));
61}
62
63static __inline__ void change_bit(int nr, volatile unsigned long * addr) 49static __inline__ void change_bit(int nr, volatile unsigned long * addr)
64{ 50{
65 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 51 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -71,13 +57,6 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr)
71 _atomic_spin_unlock_irqrestore(addr, flags); 57 _atomic_spin_unlock_irqrestore(addr, flags);
72} 58}
73 59
74static __inline__ void __change_bit(unsigned long nr, volatile unsigned long * addr)
75{
76 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
77
78 *m ^= 1UL << CHOP_SHIFTCOUNT(nr);
79}
80
81static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) 60static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
82{ 61{
83 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 62 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -93,18 +72,6 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
93 return (oldbit & mask) ? 1 : 0; 72 return (oldbit & mask) ? 1 : 0;
94} 73}
95 74
96static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address)
97{
98 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
99 unsigned long oldbit;
100 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
101
102 oldbit = *addr;
103 *addr = oldbit | mask;
104
105 return (oldbit & mask) ? 1 : 0;
106}
107
108static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) 75static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
109{ 76{
110 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 77 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -120,18 +87,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
120 return (oldbit & mask) ? 1 : 0; 87 return (oldbit & mask) ? 1 : 0;
121} 88}
122 89
123static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address)
124{
125 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
126 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
127 unsigned long oldbit;
128
129 oldbit = *addr;
130 *addr = oldbit & ~mask;
131
132 return (oldbit & mask) ? 1 : 0;
133}
134
135static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) 90static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
136{ 91{
137 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 92 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -147,25 +102,7 @@ static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
147 return (oldbit & mask) ? 1 : 0; 102 return (oldbit & mask) ? 1 : 0;
148} 103}
149 104
150static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address) 105#include <asm-generic/bitops/non-atomic.h>
151{
152 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
153 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
154 unsigned long oldbit;
155
156 oldbit = *addr;
157 *addr = oldbit ^ mask;
158
159 return (oldbit & mask) ? 1 : 0;
160}
161
162static __inline__ int test_bit(int nr, const volatile unsigned long *address)
163{
164 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
165 const unsigned long *addr = (const unsigned long *)address + (nr >> SHIFT_PER_LONG);
166
167 return !!(*addr & mask);
168}
169 106
170#ifdef __KERNEL__ 107#ifdef __KERNEL__
171 108
@@ -219,8 +156,7 @@ static __inline__ unsigned long __ffs(unsigned long x)
219 return ret; 156 return ret;
220} 157}
221 158
222/* Undefined if no bit is zero. */ 159#include <asm-generic/bitops/ffz.h>
223#define ffz(x) __ffs(~x)
224 160
225/* 161/*
226 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set) 162 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
@@ -263,155 +199,22 @@ static __inline__ int fls(int x)
263 199
264 return ret; 200 return ret;
265} 201}
266#define fls64(x) generic_fls64(x)
267 202
268/* 203#include <asm-generic/bitops/fls64.h>
269 * hweightN: returns the hamming weight (i.e. the number 204#include <asm-generic/bitops/hweight.h>
270 * of bits set) of a N-bit word 205#include <asm-generic/bitops/sched.h>
271 */
272#define hweight64(x) generic_hweight64(x)
273#define hweight32(x) generic_hweight32(x)
274#define hweight16(x) generic_hweight16(x)
275#define hweight8(x) generic_hweight8(x)
276
277/*
278 * Every architecture must define this function. It's the fastest
279 * way of searching a 140-bit bitmap where the first 100 bits are
280 * unlikely to be set. It's guaranteed that at least one of the 140
281 * bits is cleared.
282 */
283static inline int sched_find_first_bit(const unsigned long *b)
284{
285#ifdef __LP64__
286 if (unlikely(b[0]))
287 return __ffs(b[0]);
288 if (unlikely(b[1]))
289 return __ffs(b[1]) + 64;
290 return __ffs(b[2]) + 128;
291#else
292 if (unlikely(b[0]))
293 return __ffs(b[0]);
294 if (unlikely(b[1]))
295 return __ffs(b[1]) + 32;
296 if (unlikely(b[2]))
297 return __ffs(b[2]) + 64;
298 if (b[3])
299 return __ffs(b[3]) + 96;
300 return __ffs(b[4]) + 128;
301#endif
302}
303 206
304#endif /* __KERNEL__ */ 207#endif /* __KERNEL__ */
305 208
306/* 209#include <asm-generic/bitops/find.h>
307 * This implementation of find_{first,next}_zero_bit was stolen from
308 * Linus' asm-alpha/bitops.h.
309 */
310#define find_first_zero_bit(addr, size) \
311 find_next_zero_bit((addr), (size), 0)
312
313static __inline__ unsigned long find_next_zero_bit(const void * addr, unsigned long size, unsigned long offset)
314{
315 const unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG);
316 unsigned long result = offset & ~(BITS_PER_LONG-1);
317 unsigned long tmp;
318
319 if (offset >= size)
320 return size;
321 size -= result;
322 offset &= (BITS_PER_LONG-1);
323 if (offset) {
324 tmp = *(p++);
325 tmp |= ~0UL >> (BITS_PER_LONG-offset);
326 if (size < BITS_PER_LONG)
327 goto found_first;
328 if (~tmp)
329 goto found_middle;
330 size -= BITS_PER_LONG;
331 result += BITS_PER_LONG;
332 }
333 while (size & ~(BITS_PER_LONG -1)) {
334 if (~(tmp = *(p++)))
335 goto found_middle;
336 result += BITS_PER_LONG;
337 size -= BITS_PER_LONG;
338 }
339 if (!size)
340 return result;
341 tmp = *p;
342found_first:
343 tmp |= ~0UL << size;
344found_middle:
345 return result + ffz(tmp);
346}
347
348static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
349{
350 const unsigned long *p = addr + (offset >> SHIFT_PER_LONG);
351 unsigned long result = offset & ~(BITS_PER_LONG-1);
352 unsigned long tmp;
353
354 if (offset >= size)
355 return size;
356 size -= result;
357 offset &= (BITS_PER_LONG-1);
358 if (offset) {
359 tmp = *(p++);
360 tmp &= (~0UL << offset);
361 if (size < BITS_PER_LONG)
362 goto found_first;
363 if (tmp)
364 goto found_middle;
365 size -= BITS_PER_LONG;
366 result += BITS_PER_LONG;
367 }
368 while (size & ~(BITS_PER_LONG-1)) {
369 if ((tmp = *(p++)))
370 goto found_middle;
371 result += BITS_PER_LONG;
372 size -= BITS_PER_LONG;
373 }
374 if (!size)
375 return result;
376 tmp = *p;
377
378found_first:
379 tmp &= (~0UL >> (BITS_PER_LONG - size));
380 if (tmp == 0UL) /* Are any bits set? */
381 return result + size; /* Nope. */
382found_middle:
383 return result + __ffs(tmp);
384}
385
386/**
387 * find_first_bit - find the first set bit in a memory region
388 * @addr: The address to start the search at
389 * @size: The maximum size to search
390 *
391 * Returns the bit-number of the first set bit, not the number of the byte
392 * containing a bit.
393 */
394#define find_first_bit(addr, size) \
395 find_next_bit((addr), (size), 0)
396
397#define _EXT2_HAVE_ASM_BITOPS_
398 210
399#ifdef __KERNEL__ 211#ifdef __KERNEL__
400/* 212
401 * test_and_{set,clear}_bit guarantee atomicity without 213#include <asm-generic/bitops/ext2-non-atomic.h>
402 * disabling interrupts.
403 */
404 214
405/* '3' is bits per byte */ 215/* '3' is bits per byte */
406#define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3) 216#define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3)
407 217
408#define ext2_test_bit(nr, addr) \
409 test_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
410#define ext2_set_bit(nr, addr) \
411 __test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
412#define ext2_clear_bit(nr, addr) \
413 __test_and_clear_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
414
415#define ext2_set_bit_atomic(l,nr,addr) \ 218#define ext2_set_bit_atomic(l,nr,addr) \
416 test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) 219 test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
417#define ext2_clear_bit_atomic(l,nr,addr) \ 220#define ext2_clear_bit_atomic(l,nr,addr) \
@@ -419,77 +222,6 @@ found_middle:
419 222
420#endif /* __KERNEL__ */ 223#endif /* __KERNEL__ */
421 224
422 225#include <asm-generic/bitops/minix-le.h>
423#define ext2_find_first_zero_bit(addr, size) \
424 ext2_find_next_zero_bit((addr), (size), 0)
425
426/* include/linux/byteorder does not support "unsigned long" type */
427static inline unsigned long ext2_swabp(unsigned long * x)
428{
429#ifdef __LP64__
430 return (unsigned long) __swab64p((u64 *) x);
431#else
432 return (unsigned long) __swab32p((u32 *) x);
433#endif
434}
435
436/* include/linux/byteorder doesn't support "unsigned long" type */
437static inline unsigned long ext2_swab(unsigned long y)
438{
439#ifdef __LP64__
440 return (unsigned long) __swab64((u64) y);
441#else
442 return (unsigned long) __swab32((u32) y);
443#endif
444}
445
446static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
447{
448 unsigned long *p = (unsigned long *) addr + (offset >> SHIFT_PER_LONG);
449 unsigned long result = offset & ~(BITS_PER_LONG - 1);
450 unsigned long tmp;
451
452 if (offset >= size)
453 return size;
454 size -= result;
455 offset &= (BITS_PER_LONG - 1UL);
456 if (offset) {
457 tmp = ext2_swabp(p++);
458 tmp |= (~0UL >> (BITS_PER_LONG - offset));
459 if (size < BITS_PER_LONG)
460 goto found_first;
461 if (~tmp)
462 goto found_middle;
463 size -= BITS_PER_LONG;
464 result += BITS_PER_LONG;
465 }
466
467 while (size & ~(BITS_PER_LONG - 1)) {
468 if (~(tmp = *(p++)))
469 goto found_middle_swap;
470 result += BITS_PER_LONG;
471 size -= BITS_PER_LONG;
472 }
473 if (!size)
474 return result;
475 tmp = ext2_swabp(p);
476found_first:
477 tmp |= ~0UL << size;
478 if (tmp == ~0UL) /* Are any bits zero? */
479 return result + size; /* Nope. Skip ffz */
480found_middle:
481 return result + ffz(tmp);
482
483found_middle_swap:
484 return result + ffz(ext2_swab(tmp));
485}
486
487
488/* Bitmap functions for the minix filesystem. */
489#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
490#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
491#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
492#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
493#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
494 226
495#endif /* _PARISC_BITOPS_H */ 227#endif /* _PARISC_BITOPS_H */
diff --git a/include/asm-parisc/compat.h b/include/asm-parisc/compat.h
index 38b918feead9..289624d8b2d4 100644
--- a/include/asm-parisc/compat.h
+++ b/include/asm-parisc/compat.h
@@ -138,6 +138,11 @@ static inline void __user *compat_ptr(compat_uptr_t uptr)
138 return (void __user *)(unsigned long)uptr; 138 return (void __user *)(unsigned long)uptr;
139} 139}
140 140
141static inline compat_uptr_t ptr_to_compat(void __user *uptr)
142{
143 return (u32)(unsigned long)uptr;
144}
145
141static __inline__ void __user *compat_alloc_user_space(long len) 146static __inline__ void __user *compat_alloc_user_space(long len)
142{ 147{
143 struct pt_regs *regs = &current->thread.regs; 148 struct pt_regs *regs = &current->thread.regs;
diff --git a/include/asm-parisc/mmzone.h b/include/asm-parisc/mmzone.h
index ae039f4fd711..ceb9b73199d1 100644
--- a/include/asm-parisc/mmzone.h
+++ b/include/asm-parisc/mmzone.h
@@ -25,23 +25,6 @@ extern struct node_map_data node_data[];
25 pg_data_t *__pgdat = NODE_DATA(nid); \ 25 pg_data_t *__pgdat = NODE_DATA(nid); \
26 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ 26 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
27}) 27})
28#define node_localnr(pfn, nid) ((pfn) - node_start_pfn(nid))
29
30#define pfn_to_page(pfn) \
31({ \
32 unsigned long __pfn = (pfn); \
33 int __node = pfn_to_nid(__pfn); \
34 &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \
35})
36
37#define page_to_pfn(pg) \
38({ \
39 struct page *__page = pg; \
40 struct zone *__zone = page_zone(__page); \
41 BUG_ON(__zone == NULL); \
42 (unsigned long)(__page - __zone->zone_mem_map) \
43 + __zone->zone_start_pfn; \
44})
45 28
46/* We have these possible memory map layouts: 29/* We have these possible memory map layouts:
47 * Astro: 0-3.75, 67.75-68, 4-64 30 * Astro: 0-3.75, 67.75-68, 4-64
diff --git a/include/asm-parisc/page.h b/include/asm-parisc/page.h
index 4a6752b0afed..9f303c0c3cd7 100644
--- a/include/asm-parisc/page.h
+++ b/include/asm-parisc/page.h
@@ -130,8 +130,6 @@ extern int npmem_ranges;
130#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) 130#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
131 131
132#ifndef CONFIG_DISCONTIGMEM 132#ifndef CONFIG_DISCONTIGMEM
133#define pfn_to_page(pfn) (mem_map + (pfn))
134#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
135#define pfn_valid(pfn) ((pfn) < max_mapnr) 133#define pfn_valid(pfn) ((pfn) < max_mapnr)
136#endif /* CONFIG_DISCONTIGMEM */ 134#endif /* CONFIG_DISCONTIGMEM */
137 135
@@ -152,6 +150,7 @@ extern int npmem_ranges;
152 150
153#endif /* __KERNEL__ */ 151#endif /* __KERNEL__ */
154 152
153#include <asm-generic/memory_model.h>
155#include <asm-generic/page.h> 154#include <asm-generic/page.h>
156 155
157#endif /* _PARISC_PAGE_H */ 156#endif /* _PARISC_PAGE_H */
diff --git a/include/asm-parisc/pdc.h b/include/asm-parisc/pdc.h
index 8e23e4c674f6..0a3face6c480 100644
--- a/include/asm-parisc/pdc.h
+++ b/include/asm-parisc/pdc.h
@@ -333,7 +333,7 @@ struct pdc_model { /* for PDC_MODEL */
333 unsigned long curr_key; 333 unsigned long curr_key;
334}; 334};
335 335
336/* Values for PDC_MODEL_CAPABILITES non-equivalent virtual aliasing support */ 336/* Values for PDC_MODEL_CAPABILITIES non-equivalent virtual aliasing support */
337 337
338#define PDC_MODEL_IOPDIR_FDC (1 << 2) /* see sba_iommu.c */ 338#define PDC_MODEL_IOPDIR_FDC (1 << 2) /* see sba_iommu.c */
339#define PDC_MODEL_NVA_MASK (3 << 4) 339#define PDC_MODEL_NVA_MASK (3 << 4)
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index bf6941a810b8..d1c2a4405660 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -184,72 +184,7 @@ static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
184 : "cc"); 184 : "cc");
185} 185}
186 186
187/* Non-atomic versions */ 187#include <asm-generic/bitops/non-atomic.h>
188static __inline__ int test_bit(unsigned long nr,
189 __const__ volatile unsigned long *addr)
190{
191 return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
192}
193
194static __inline__ void __set_bit(unsigned long nr,
195 volatile unsigned long *addr)
196{
197 unsigned long mask = BITOP_MASK(nr);
198 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
199
200 *p |= mask;
201}
202
203static __inline__ void __clear_bit(unsigned long nr,
204 volatile unsigned long *addr)
205{
206 unsigned long mask = BITOP_MASK(nr);
207 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
208
209 *p &= ~mask;
210}
211
212static __inline__ void __change_bit(unsigned long nr,
213 volatile unsigned long *addr)
214{
215 unsigned long mask = BITOP_MASK(nr);
216 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
217
218 *p ^= mask;
219}
220
221static __inline__ int __test_and_set_bit(unsigned long nr,
222 volatile unsigned long *addr)
223{
224 unsigned long mask = BITOP_MASK(nr);
225 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
226 unsigned long old = *p;
227
228 *p = old | mask;
229 return (old & mask) != 0;
230}
231
232static __inline__ int __test_and_clear_bit(unsigned long nr,
233 volatile unsigned long *addr)
234{
235 unsigned long mask = BITOP_MASK(nr);
236 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
237 unsigned long old = *p;
238
239 *p = old & ~mask;
240 return (old & mask) != 0;
241}
242
243static __inline__ int __test_and_change_bit(unsigned long nr,
244 volatile unsigned long *addr)
245{
246 unsigned long mask = BITOP_MASK(nr);
247 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
248 unsigned long old = *p;
249
250 *p = old ^ mask;
251 return (old & mask) != 0;
252}
253 188
254/* 189/*
255 * Return the zero-based bit position (LE, not IBM bit numbering) of 190 * Return the zero-based bit position (LE, not IBM bit numbering) of
@@ -310,16 +245,9 @@ static __inline__ int fls(unsigned int x)
310 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); 245 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
311 return 32 - lz; 246 return 32 - lz;
312} 247}
313#define fls64(x) generic_fls64(x) 248#include <asm-generic/bitops/fls64.h>
314 249
315/* 250#include <asm-generic/bitops/hweight.h>
316 * hweightN: returns the hamming weight (i.e. the number
317 * of bits set) of a N-bit word
318 */
319#define hweight64(x) generic_hweight64(x)
320#define hweight32(x) generic_hweight32(x)
321#define hweight16(x) generic_hweight16(x)
322#define hweight8(x) generic_hweight8(x)
323 251
324#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) 252#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
325unsigned long find_next_zero_bit(const unsigned long *addr, 253unsigned long find_next_zero_bit(const unsigned long *addr,
@@ -397,32 +325,7 @@ unsigned long find_next_zero_le_bit(const unsigned long *addr,
397#define minix_find_first_zero_bit(addr,size) \ 325#define minix_find_first_zero_bit(addr,size) \
398 find_first_zero_le_bit((unsigned long *)addr, size) 326 find_first_zero_le_bit((unsigned long *)addr, size)
399 327
400/* 328#include <asm-generic/bitops/sched.h>
401 * Every architecture must define this function. It's the fastest
402 * way of searching a 140-bit bitmap where the first 100 bits are
403 * unlikely to be set. It's guaranteed that at least one of the 140
404 * bits is cleared.
405 */
406static inline int sched_find_first_bit(const unsigned long *b)
407{
408#ifdef CONFIG_PPC64
409 if (unlikely(b[0]))
410 return __ffs(b[0]);
411 if (unlikely(b[1]))
412 return __ffs(b[1]) + 64;
413 return __ffs(b[2]) + 128;
414#else
415 if (unlikely(b[0]))
416 return __ffs(b[0]);
417 if (unlikely(b[1]))
418 return __ffs(b[1]) + 32;
419 if (unlikely(b[2]))
420 return __ffs(b[2]) + 64;
421 if (b[3])
422 return __ffs(b[3]) + 96;
423 return __ffs(b[4]) + 128;
424#endif
425}
426 329
427#endif /* __KERNEL__ */ 330#endif /* __KERNEL__ */
428 331
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h
index 39e85f320a76..f1b3c00bc1ce 100644
--- a/include/asm-powerpc/futex.h
+++ b/include/asm-powerpc/futex.h
@@ -81,5 +81,11 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
81 return ret; 81 return ret;
82} 82}
83 83
84static inline int
85futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
86{
87 return -ENOSYS;
88}
89
84#endif /* __KERNEL__ */ 90#endif /* __KERNEL__ */
85#endif /* _ASM_POWERPC_FUTEX_H */ 91#endif /* _ASM_POWERPC_FUTEX_H */
diff --git a/include/asm-powerpc/kdebug.h b/include/asm-powerpc/kdebug.h
index 7c16265568e0..c01786ab5fa6 100644
--- a/include/asm-powerpc/kdebug.h
+++ b/include/asm-powerpc/kdebug.h
@@ -16,13 +16,9 @@ struct die_args {
16 int signr; 16 int signr;
17}; 17};
18 18
19/* 19extern int register_die_notifier(struct notifier_block *);
20 Note - you should never unregister because that can race with NMIs. 20extern int unregister_die_notifier(struct notifier_block *);
21 If you really want to do it first unregister - then synchronize_sched - 21extern struct atomic_notifier_head powerpc_die_chain;
22 then free.
23 */
24int register_die_notifier(struct notifier_block *nb);
25extern struct notifier_block *powerpc_die_chain;
26 22
27/* Grossly misnamed. */ 23/* Grossly misnamed. */
28enum die_val { 24enum die_val {
@@ -37,7 +33,7 @@ enum die_val {
37static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig) 33static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig)
38{ 34{
39 struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig }; 35 struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig };
40 return notifier_call_chain(&powerpc_die_chain, val, &args); 36 return atomic_notifier_call_chain(&powerpc_die_chain, val, &args);
41} 37}
42 38
43#endif /* __KERNEL__ */ 39#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h
index 0b82df483f7f..2fbecebe1c92 100644
--- a/include/asm-powerpc/page.h
+++ b/include/asm-powerpc/page.h
@@ -69,8 +69,6 @@
69#endif 69#endif
70 70
71#ifdef CONFIG_FLATMEM 71#ifdef CONFIG_FLATMEM
72#define pfn_to_page(pfn) (mem_map + (pfn))
73#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
74#define pfn_valid(pfn) ((pfn) < max_mapnr) 72#define pfn_valid(pfn) ((pfn) < max_mapnr)
75#endif 73#endif
76 74
@@ -200,6 +198,7 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr,
200 struct page *p); 198 struct page *p);
201extern int page_is_ram(unsigned long pfn); 199extern int page_is_ram(unsigned long pfn);
202 200
201#include <asm-generic/memory_model.h>
203#endif /* __ASSEMBLY__ */ 202#endif /* __ASSEMBLY__ */
204 203
205#endif /* __KERNEL__ */ 204#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/types.h b/include/asm-powerpc/types.h
index ec3c2ee8bf86..baabba96e313 100644
--- a/include/asm-powerpc/types.h
+++ b/include/asm-powerpc/types.h
@@ -103,6 +103,11 @@ typedef u64 sector_t;
103#define HAVE_SECTOR_T 103#define HAVE_SECTOR_T
104#endif 104#endif
105 105
106#ifdef CONFIG_LSF
107typedef u64 blkcnt_t;
108#define HAVE_BLKCNT_T
109#endif
110
106#endif /* __ASSEMBLY__ */ 111#endif /* __ASSEMBLY__ */
107 112
108#endif /* __KERNEL__ */ 113#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/page.h b/include/asm-ppc/page.h
index 538e0c8ab243..a70ba2ee552d 100644
--- a/include/asm-ppc/page.h
+++ b/include/asm-ppc/page.h
@@ -149,8 +149,7 @@ extern int page_is_ram(unsigned long pfn);
149#define __pa(x) ___pa((unsigned long)(x)) 149#define __pa(x) ___pa((unsigned long)(x))
150#define __va(x) ((void *)(___va((unsigned long)(x)))) 150#define __va(x) ((void *)(___va((unsigned long)(x))))
151 151
152#define pfn_to_page(pfn) (mem_map + ((pfn) - PPC_PGSTART)) 152#define ARCH_PFN_OFFSET (PPC_PGSTART)
153#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PPC_PGSTART)
154#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 153#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
155#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) 154#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
156 155
@@ -175,5 +174,6 @@ extern __inline__ int get_order(unsigned long size)
175/* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */ 174/* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */
176#define __HAVE_ARCH_GATE_AREA 1 175#define __HAVE_ARCH_GATE_AREA 1
177 176
177#include <asm-generic/memory_model.h>
178#endif /* __KERNEL__ */ 178#endif /* __KERNEL__ */
179#endif /* _PPC_PAGE_H */ 179#endif /* _PPC_PAGE_H */
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
index 3628899f48bb..ca092ffb7a95 100644
--- a/include/asm-s390/bitops.h
+++ b/include/asm-s390/bitops.h
@@ -828,35 +828,12 @@ static inline int sched_find_first_bit(unsigned long *b)
828 return find_first_bit(b, 140); 828 return find_first_bit(b, 140);
829} 829}
830 830
831/* 831#include <asm-generic/bitops/ffs.h>
832 * ffs: find first bit set. This is defined the same way as
833 * the libc and compiler builtin ffs routines, therefore
834 * differs in spirit from the above ffz (man ffs).
835 */
836#define ffs(x) generic_ffs(x)
837 832
838/* 833#include <asm-generic/bitops/fls.h>
839 * fls: find last bit set. 834#include <asm-generic/bitops/fls64.h>
840 */
841#define fls(x) generic_fls(x)
842#define fls64(x) generic_fls64(x)
843
844/*
845 * hweightN: returns the hamming weight (i.e. the number
846 * of bits set) of a N-bit word
847 */
848#define hweight64(x) \
849({ \
850 unsigned long __x = (x); \
851 unsigned int __w; \
852 __w = generic_hweight32((unsigned int) __x); \
853 __w += generic_hweight32((unsigned int) (__x>>32)); \
854 __w; \
855})
856#define hweight32(x) generic_hweight32(x)
857#define hweight16(x) generic_hweight16(x)
858#define hweight8(x) generic_hweight8(x)
859 835
836#include <asm-generic/bitops/hweight.h>
860 837
861#ifdef __KERNEL__ 838#ifdef __KERNEL__
862 839
@@ -871,11 +848,11 @@ static inline int sched_find_first_bit(unsigned long *b)
871 */ 848 */
872 849
873#define ext2_set_bit(nr, addr) \ 850#define ext2_set_bit(nr, addr) \
874 test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 851 __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
875#define ext2_set_bit_atomic(lock, nr, addr) \ 852#define ext2_set_bit_atomic(lock, nr, addr) \
876 test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 853 test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
877#define ext2_clear_bit(nr, addr) \ 854#define ext2_clear_bit(nr, addr) \
878 test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 855 __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
879#define ext2_clear_bit_atomic(lock, nr, addr) \ 856#define ext2_clear_bit_atomic(lock, nr, addr) \
880 test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 857 test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
881#define ext2_test_bit(nr, addr) \ 858#define ext2_test_bit(nr, addr) \
@@ -1011,18 +988,7 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
1011 return offset + ext2_find_first_zero_bit(p, size); 988 return offset + ext2_find_first_zero_bit(p, size);
1012} 989}
1013 990
1014/* Bitmap functions for the minix filesystem. */ 991#include <asm-generic/bitops/minix.h>
1015/* FIXME !!! */
1016#define minix_test_and_set_bit(nr,addr) \
1017 test_and_set_bit(nr,(unsigned long *)addr)
1018#define minix_set_bit(nr,addr) \
1019 set_bit(nr,(unsigned long *)addr)
1020#define minix_test_and_clear_bit(nr,addr) \
1021 test_and_clear_bit(nr,(unsigned long *)addr)
1022#define minix_test_bit(nr,addr) \
1023 test_bit(nr,(unsigned long *)addr)
1024#define minix_find_first_zero_bit(addr,size) \
1025 find_first_zero_bit(addr,size)
1026 992
1027#endif /* __KERNEL__ */ 993#endif /* __KERNEL__ */
1028 994
diff --git a/include/asm-s390/compat.h b/include/asm-s390/compat.h
index a007715f4aea..356a0b183539 100644
--- a/include/asm-s390/compat.h
+++ b/include/asm-s390/compat.h
@@ -128,6 +128,11 @@ static inline void __user *compat_ptr(compat_uptr_t uptr)
128 return (void __user *)(unsigned long)(uptr & 0x7fffffffUL); 128 return (void __user *)(unsigned long)(uptr & 0x7fffffffUL);
129} 129}
130 130
131static inline compat_uptr_t ptr_to_compat(void __user *uptr)
132{
133 return (u32)(unsigned long)uptr;
134}
135
131static inline void __user *compat_alloc_user_space(long len) 136static inline void __user *compat_alloc_user_space(long len)
132{ 137{
133 unsigned long stack; 138 unsigned long stack;
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index 2430c561e021..3b1138ac7e79 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -181,8 +181,6 @@ page_get_storage_key(unsigned long addr)
181#define PAGE_OFFSET 0x0UL 181#define PAGE_OFFSET 0x0UL
182#define __pa(x) (unsigned long)(x) 182#define __pa(x) (unsigned long)(x)
183#define __va(x) (void *)(unsigned long)(x) 183#define __va(x) (void *)(unsigned long)(x)
184#define pfn_to_page(pfn) (mem_map + (pfn))
185#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
186#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 184#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
187 185
188#define pfn_valid(pfn) ((pfn) < max_mapnr) 186#define pfn_valid(pfn) ((pfn) < max_mapnr)
@@ -193,6 +191,7 @@ page_get_storage_key(unsigned long addr)
193 191
194#endif /* __KERNEL__ */ 192#endif /* __KERNEL__ */
195 193
194#include <asm-generic/memory_model.h>
196#include <asm-generic/page.h> 195#include <asm-generic/page.h>
197 196
198#endif /* _S390_PAGE_H */ 197#endif /* _S390_PAGE_H */
diff --git a/include/asm-s390/types.h b/include/asm-s390/types.h
index d0be3e477013..5738ad63537c 100644
--- a/include/asm-s390/types.h
+++ b/include/asm-s390/types.h
@@ -93,6 +93,11 @@ typedef u64 sector_t;
93#define HAVE_SECTOR_T 93#define HAVE_SECTOR_T
94#endif 94#endif
95 95
96#ifdef CONFIG_LSF
97typedef u64 blkcnt_t;
98#define HAVE_BLKCNT_T
99#endif
100
96#endif /* ! __s390x__ */ 101#endif /* ! __s390x__ */
97#endif /* __ASSEMBLY__ */ 102#endif /* __ASSEMBLY__ */
98#endif /* __KERNEL__ */ 103#endif /* __KERNEL__ */
diff --git a/include/asm-sh/addrspace.h b/include/asm-sh/addrspace.h
index dbb05d1a26d1..720afc11c2ca 100644
--- a/include/asm-sh/addrspace.h
+++ b/include/asm-sh/addrspace.h
@@ -13,7 +13,7 @@
13 13
14#include <asm/cpu/addrspace.h> 14#include <asm/cpu/addrspace.h>
15 15
16/* Memory segments (32bit Priviledged mode addresses) */ 16/* Memory segments (32bit Privileged mode addresses) */
17#define P0SEG 0x00000000 17#define P0SEG 0x00000000
18#define P1SEG 0x80000000 18#define P1SEG 0x80000000
19#define P2SEG 0xa0000000 19#define P2SEG 0xa0000000
diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h
index 1c5260860045..e34f82508568 100644
--- a/include/asm-sh/bitops.h
+++ b/include/asm-sh/bitops.h
@@ -19,16 +19,6 @@ static __inline__ void set_bit(int nr, volatile void * addr)
19 local_irq_restore(flags); 19 local_irq_restore(flags);
20} 20}
21 21
22static __inline__ void __set_bit(int nr, volatile void * addr)
23{
24 int mask;
25 volatile unsigned int *a = addr;
26
27 a += nr >> 5;
28 mask = 1 << (nr & 0x1f);
29 *a |= mask;
30}
31
32/* 22/*
33 * clear_bit() doesn't provide any barrier for the compiler. 23 * clear_bit() doesn't provide any barrier for the compiler.
34 */ 24 */
@@ -47,16 +37,6 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
47 local_irq_restore(flags); 37 local_irq_restore(flags);
48} 38}
49 39
50static __inline__ void __clear_bit(int nr, volatile void * addr)
51{
52 int mask;
53 volatile unsigned int *a = addr;
54
55 a += nr >> 5;
56 mask = 1 << (nr & 0x1f);
57 *a &= ~mask;
58}
59
60static __inline__ void change_bit(int nr, volatile void * addr) 40static __inline__ void change_bit(int nr, volatile void * addr)
61{ 41{
62 int mask; 42 int mask;
@@ -70,16 +50,6 @@ static __inline__ void change_bit(int nr, volatile void * addr)
70 local_irq_restore(flags); 50 local_irq_restore(flags);
71} 51}
72 52
73static __inline__ void __change_bit(int nr, volatile void * addr)
74{
75 int mask;
76 volatile unsigned int *a = addr;
77
78 a += nr >> 5;
79 mask = 1 << (nr & 0x1f);
80 *a ^= mask;
81}
82
83static __inline__ int test_and_set_bit(int nr, volatile void * addr) 53static __inline__ int test_and_set_bit(int nr, volatile void * addr)
84{ 54{
85 int mask, retval; 55 int mask, retval;
@@ -96,19 +66,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
96 return retval; 66 return retval;
97} 67}
98 68
99static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
100{
101 int mask, retval;
102 volatile unsigned int *a = addr;
103
104 a += nr >> 5;
105 mask = 1 << (nr & 0x1f);
106 retval = (mask & *a) != 0;
107 *a |= mask;
108
109 return retval;
110}
111
112static __inline__ int test_and_clear_bit(int nr, volatile void * addr) 69static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
113{ 70{
114 int mask, retval; 71 int mask, retval;
@@ -125,19 +82,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
125 return retval; 82 return retval;
126} 83}
127 84
128static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
129{
130 int mask, retval;
131 volatile unsigned int *a = addr;
132
133 a += nr >> 5;
134 mask = 1 << (nr & 0x1f);
135 retval = (mask & *a) != 0;
136 *a &= ~mask;
137
138 return retval;
139}
140
141static __inline__ int test_and_change_bit(int nr, volatile void * addr) 85static __inline__ int test_and_change_bit(int nr, volatile void * addr)
142{ 86{
143 int mask, retval; 87 int mask, retval;
@@ -154,23 +98,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
154 return retval; 98 return retval;
155} 99}
156 100
157static __inline__ int __test_and_change_bit(int nr, volatile void * addr) 101#include <asm-generic/bitops/non-atomic.h>
158{
159 int mask, retval;
160 volatile unsigned int *a = addr;
161
162 a += nr >> 5;
163 mask = 1 << (nr & 0x1f);
164 retval = (mask & *a) != 0;
165 *a ^= mask;
166
167 return retval;
168}
169
170static __inline__ int test_bit(int nr, const volatile void *addr)
171{
172 return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
173}
174 102
175static __inline__ unsigned long ffz(unsigned long word) 103static __inline__ unsigned long ffz(unsigned long word)
176{ 104{
@@ -206,271 +134,15 @@ static __inline__ unsigned long __ffs(unsigned long word)
206 return result; 134 return result;
207} 135}
208 136
209/** 137#include <asm-generic/bitops/find.h>
210 * find_next_bit - find the next set bit in a memory region 138#include <asm-generic/bitops/ffs.h>
211 * @addr: The address to base the search on 139#include <asm-generic/bitops/hweight.h>
212 * @offset: The bitnumber to start searching at 140#include <asm-generic/bitops/sched.h>
213 * @size: The maximum size to search 141#include <asm-generic/bitops/ext2-non-atomic.h>
214 */ 142#include <asm-generic/bitops/ext2-atomic.h>
215static __inline__ unsigned long find_next_bit(const unsigned long *addr, 143#include <asm-generic/bitops/minix.h>
216 unsigned long size, unsigned long offset) 144#include <asm-generic/bitops/fls.h>
217{ 145#include <asm-generic/bitops/fls64.h>
218 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
219 unsigned int result = offset & ~31UL;
220 unsigned int tmp;
221
222 if (offset >= size)
223 return size;
224 size -= result;
225 offset &= 31UL;
226 if (offset) {
227 tmp = *p++;
228 tmp &= ~0UL << offset;
229 if (size < 32)
230 goto found_first;
231 if (tmp)
232 goto found_middle;
233 size -= 32;
234 result += 32;
235 }
236 while (size >= 32) {
237 if ((tmp = *p++) != 0)
238 goto found_middle;
239 result += 32;
240 size -= 32;
241 }
242 if (!size)
243 return result;
244 tmp = *p;
245
246found_first:
247 tmp &= ~0UL >> (32 - size);
248 if (tmp == 0UL) /* Are any bits set? */
249 return result + size; /* Nope. */
250found_middle:
251 return result + __ffs(tmp);
252}
253
254/**
255 * find_first_bit - find the first set bit in a memory region
256 * @addr: The address to start the search at
257 * @size: The maximum size to search
258 *
259 * Returns the bit-number of the first set bit, not the number of the byte
260 * containing a bit.
261 */
262#define find_first_bit(addr, size) \
263 find_next_bit((addr), (size), 0)
264
265static __inline__ int find_next_zero_bit(const unsigned long *addr, int size, int offset)
266{
267 const unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
268 unsigned long result = offset & ~31UL;
269 unsigned long tmp;
270
271 if (offset >= size)
272 return size;
273 size -= result;
274 offset &= 31UL;
275 if (offset) {
276 tmp = *(p++);
277 tmp |= ~0UL >> (32-offset);
278 if (size < 32)
279 goto found_first;
280 if (~tmp)
281 goto found_middle;
282 size -= 32;
283 result += 32;
284 }
285 while (size & ~31UL) {
286 if (~(tmp = *(p++)))
287 goto found_middle;
288 result += 32;
289 size -= 32;
290 }
291 if (!size)
292 return result;
293 tmp = *p;
294
295found_first:
296 tmp |= ~0UL << size;
297found_middle:
298 return result + ffz(tmp);
299}
300
301#define find_first_zero_bit(addr, size) \
302 find_next_zero_bit((addr), (size), 0)
303
304/*
305 * ffs: find first bit set. This is defined the same way as
306 * the libc and compiler builtin ffs routines, therefore
307 * differs in spirit from the above ffz (man ffs).
308 */
309
310#define ffs(x) generic_ffs(x)
311
312/*
313 * hweightN: returns the hamming weight (i.e. the number
314 * of bits set) of a N-bit word
315 */
316
317#define hweight32(x) generic_hweight32(x)
318#define hweight16(x) generic_hweight16(x)
319#define hweight8(x) generic_hweight8(x)
320
321/*
322 * Every architecture must define this function. It's the fastest
323 * way of searching a 140-bit bitmap where the first 100 bits are
324 * unlikely to be set. It's guaranteed that at least one of the 140
325 * bits is cleared.
326 */
327
328static inline int sched_find_first_bit(const unsigned long *b)
329{
330 if (unlikely(b[0]))
331 return __ffs(b[0]);
332 if (unlikely(b[1]))
333 return __ffs(b[1]) + 32;
334 if (unlikely(b[2]))
335 return __ffs(b[2]) + 64;
336 if (b[3])
337 return __ffs(b[3]) + 96;
338 return __ffs(b[4]) + 128;
339}
340
341#ifdef __LITTLE_ENDIAN__
342#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
343#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
344#define ext2_test_bit(nr, addr) test_bit((nr), (addr))
345#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
346#define ext2_find_next_zero_bit(addr, size, offset) \
347 find_next_zero_bit((unsigned long *)(addr), (size), (offset))
348#else
349static __inline__ int ext2_set_bit(int nr, volatile void * addr)
350{
351 int mask, retval;
352 unsigned long flags;
353 volatile unsigned char *ADDR = (unsigned char *) addr;
354
355 ADDR += nr >> 3;
356 mask = 1 << (nr & 0x07);
357 local_irq_save(flags);
358 retval = (mask & *ADDR) != 0;
359 *ADDR |= mask;
360 local_irq_restore(flags);
361 return retval;
362}
363
364static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
365{
366 int mask, retval;
367 unsigned long flags;
368 volatile unsigned char *ADDR = (unsigned char *) addr;
369
370 ADDR += nr >> 3;
371 mask = 1 << (nr & 0x07);
372 local_irq_save(flags);
373 retval = (mask & *ADDR) != 0;
374 *ADDR &= ~mask;
375 local_irq_restore(flags);
376 return retval;
377}
378
379static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
380{
381 int mask;
382 const volatile unsigned char *ADDR = (const unsigned char *) addr;
383
384 ADDR += nr >> 3;
385 mask = 1 << (nr & 0x07);
386 return ((mask & *ADDR) != 0);
387}
388
389#define ext2_find_first_zero_bit(addr, size) \
390 ext2_find_next_zero_bit((addr), (size), 0)
391
392static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
393{
394 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
395 unsigned long result = offset & ~31UL;
396 unsigned long tmp;
397
398 if (offset >= size)
399 return size;
400 size -= result;
401 offset &= 31UL;
402 if(offset) {
403 /* We hold the little endian value in tmp, but then the
404 * shift is illegal. So we could keep a big endian value
405 * in tmp, like this:
406 *
407 * tmp = __swab32(*(p++));
408 * tmp |= ~0UL >> (32-offset);
409 *
410 * but this would decrease preformance, so we change the
411 * shift:
412 */
413 tmp = *(p++);
414 tmp |= __swab32(~0UL >> (32-offset));
415 if(size < 32)
416 goto found_first;
417 if(~tmp)
418 goto found_middle;
419 size -= 32;
420 result += 32;
421 }
422 while(size & ~31UL) {
423 if(~(tmp = *(p++)))
424 goto found_middle;
425 result += 32;
426 size -= 32;
427 }
428 if(!size)
429 return result;
430 tmp = *p;
431
432found_first:
433 /* tmp is little endian, so we would have to swab the shift,
434 * see above. But then we have to swab tmp below for ffz, so
435 * we might as well do this here.
436 */
437 return result + ffz(__swab32(tmp) | (~0UL << size));
438found_middle:
439 return result + ffz(__swab32(tmp));
440}
441#endif
442
443#define ext2_set_bit_atomic(lock, nr, addr) \
444 ({ \
445 int ret; \
446 spin_lock(lock); \
447 ret = ext2_set_bit((nr), (addr)); \
448 spin_unlock(lock); \
449 ret; \
450 })
451
452#define ext2_clear_bit_atomic(lock, nr, addr) \
453 ({ \
454 int ret; \
455 spin_lock(lock); \
456 ret = ext2_clear_bit((nr), (addr)); \
457 spin_unlock(lock); \
458 ret; \
459 })
460
461/* Bitmap functions for the minix filesystem. */
462#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
463#define minix_set_bit(nr,addr) set_bit(nr,addr)
464#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
465#define minix_test_bit(nr,addr) test_bit(nr,addr)
466#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
467
468/*
469 * fls: find last bit set.
470 */
471
472#define fls(x) generic_fls(x)
473#define fls64(x) generic_fls64(x)
474 146
475#endif /* __KERNEL__ */ 147#endif /* __KERNEL__ */
476 148
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index 972c3f655b2a..9c89287c3e56 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -105,9 +105,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
105 105
106/* PFN start number, because of __MEMORY_START */ 106/* PFN start number, because of __MEMORY_START */
107#define PFN_START (__MEMORY_START >> PAGE_SHIFT) 107#define PFN_START (__MEMORY_START >> PAGE_SHIFT)
108 108#define ARCH_PFN_OFFSET (FPN_START)
109#define pfn_to_page(pfn) (mem_map + (pfn) - PFN_START)
110#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PFN_START)
111#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 109#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
112#define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr) 110#define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr)
113#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 111#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
@@ -117,6 +115,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
117 115
118#endif /* __KERNEL__ */ 116#endif /* __KERNEL__ */
119 117
118#include <asm-generic/memory_model.h>
120#include <asm-generic/page.h> 119#include <asm-generic/page.h>
121 120
122#endif /* __ASM_SH_PAGE_H */ 121#endif /* __ASM_SH_PAGE_H */
diff --git a/include/asm-sh/stat.h b/include/asm-sh/stat.h
index 914e3fcbbd37..6c41a60657f1 100644
--- a/include/asm-sh/stat.h
+++ b/include/asm-sh/stat.h
@@ -60,13 +60,7 @@ struct stat64 {
60 long long st_size; 60 long long st_size;
61 unsigned long st_blksize; 61 unsigned long st_blksize;
62 62
63#if defined(__BIG_ENDIAN__) 63 unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
64 unsigned long __pad4; /* Future possible st_blocks hi bits */
65 unsigned long st_blocks; /* Number 512-byte blocks allocated. */
66#else /* Must be little */
67 unsigned long st_blocks; /* Number 512-byte blocks allocated. */
68 unsigned long __pad4; /* Future possible st_blocks hi bits */
69#endif
70 64
71 unsigned long st_atime; 65 unsigned long st_atime;
72 unsigned long st_atime_nsec; 66 unsigned long st_atime_nsec;
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h
index 85f0c11b4319..7345350d98c0 100644
--- a/include/asm-sh/thread_info.h
+++ b/include/asm-sh/thread_info.h
@@ -18,7 +18,7 @@
18struct thread_info { 18struct thread_info {
19 struct task_struct *task; /* main task structure */ 19 struct task_struct *task; /* main task structure */
20 struct exec_domain *exec_domain; /* execution domain */ 20 struct exec_domain *exec_domain; /* execution domain */
21 __u32 flags; /* low level flags */ 21 unsigned long flags; /* low level flags */
22 __u32 cpu; 22 __u32 cpu;
23 int preempt_count; /* 0 => preemptable, <0 => BUG */ 23 int preempt_count; /* 0 => preemptable, <0 => BUG */
24 struct restart_block restart_block; 24 struct restart_block restart_block;
diff --git a/include/asm-sh/types.h b/include/asm-sh/types.h
index cb7e183a0a6b..488552f43b2a 100644
--- a/include/asm-sh/types.h
+++ b/include/asm-sh/types.h
@@ -58,6 +58,11 @@ typedef u64 sector_t;
58#define HAVE_SECTOR_T 58#define HAVE_SECTOR_T
59#endif 59#endif
60 60
61#ifdef CONFIG_LSF
62typedef u64 blkcnt_t;
63#define HAVE_BLKCNT_T
64#endif
65
61#endif /* __ASSEMBLY__ */ 66#endif /* __ASSEMBLY__ */
62 67
63#endif /* __KERNEL__ */ 68#endif /* __KERNEL__ */
diff --git a/include/asm-sh64/bitops.h b/include/asm-sh64/bitops.h
index ce9c3ad45fe0..f3bdcdb5d046 100644
--- a/include/asm-sh64/bitops.h
+++ b/include/asm-sh64/bitops.h
@@ -31,16 +31,6 @@ static __inline__ void set_bit(int nr, volatile void * addr)
31 local_irq_restore(flags); 31 local_irq_restore(flags);
32} 32}
33 33
34static inline void __set_bit(int nr, void *addr)
35{
36 int mask;
37 unsigned int *a = addr;
38
39 a += nr >> 5;
40 mask = 1 << (nr & 0x1f);
41 *a |= mask;
42}
43
44/* 34/*
45 * clear_bit() doesn't provide any barrier for the compiler. 35 * clear_bit() doesn't provide any barrier for the compiler.
46 */ 36 */
@@ -58,15 +48,6 @@ static inline void clear_bit(int nr, volatile unsigned long *a)
58 local_irq_restore(flags); 48 local_irq_restore(flags);
59} 49}
60 50
61static inline void __clear_bit(int nr, volatile unsigned long *a)
62{
63 int mask;
64
65 a += nr >> 5;
66 mask = 1 << (nr & 0x1f);
67 *a &= ~mask;
68}
69
70static __inline__ void change_bit(int nr, volatile void * addr) 51static __inline__ void change_bit(int nr, volatile void * addr)
71{ 52{
72 int mask; 53 int mask;
@@ -80,16 +61,6 @@ static __inline__ void change_bit(int nr, volatile void * addr)
80 local_irq_restore(flags); 61 local_irq_restore(flags);
81} 62}
82 63
83static __inline__ void __change_bit(int nr, volatile void * addr)
84{
85 int mask;
86 volatile unsigned int *a = addr;
87
88 a += nr >> 5;
89 mask = 1 << (nr & 0x1f);
90 *a ^= mask;
91}
92
93static __inline__ int test_and_set_bit(int nr, volatile void * addr) 64static __inline__ int test_and_set_bit(int nr, volatile void * addr)
94{ 65{
95 int mask, retval; 66 int mask, retval;
@@ -106,19 +77,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
106 return retval; 77 return retval;
107} 78}
108 79
109static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
110{
111 int mask, retval;
112 volatile unsigned int *a = addr;
113
114 a += nr >> 5;
115 mask = 1 << (nr & 0x1f);
116 retval = (mask & *a) != 0;
117 *a |= mask;
118
119 return retval;
120}
121
122static __inline__ int test_and_clear_bit(int nr, volatile void * addr) 80static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
123{ 81{
124 int mask, retval; 82 int mask, retval;
@@ -135,19 +93,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
135 return retval; 93 return retval;
136} 94}
137 95
138static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
139{
140 int mask, retval;
141 volatile unsigned int *a = addr;
142
143 a += nr >> 5;
144 mask = 1 << (nr & 0x1f);
145 retval = (mask & *a) != 0;
146 *a &= ~mask;
147
148 return retval;
149}
150
151static __inline__ int test_and_change_bit(int nr, volatile void * addr) 96static __inline__ int test_and_change_bit(int nr, volatile void * addr)
152{ 97{
153 int mask, retval; 98 int mask, retval;
@@ -164,23 +109,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
164 return retval; 109 return retval;
165} 110}
166 111
167static __inline__ int __test_and_change_bit(int nr, volatile void * addr) 112#include <asm-generic/bitops/non-atomic.h>
168{
169 int mask, retval;
170 volatile unsigned int *a = addr;
171
172 a += nr >> 5;
173 mask = 1 << (nr & 0x1f);
174 retval = (mask & *a) != 0;
175 *a ^= mask;
176
177 return retval;
178}
179
180static __inline__ int test_bit(int nr, const volatile void *addr)
181{
182 return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
183}
184 113
185static __inline__ unsigned long ffz(unsigned long word) 114static __inline__ unsigned long ffz(unsigned long word)
186{ 115{
@@ -204,313 +133,16 @@ static __inline__ unsigned long ffz(unsigned long word)
204 return result; 133 return result;
205} 134}
206 135
207/** 136#include <asm-generic/bitops/__ffs.h>
208 * __ffs - find first bit in word 137#include <asm-generic/bitops/find.h>
209 * @word: The word to search 138#include <asm-generic/bitops/hweight.h>
210 * 139#include <asm-generic/bitops/sched.h>
211 * Undefined if no bit exists, so code should check against 0 first. 140#include <asm-generic/bitops/ffs.h>
212 */ 141#include <asm-generic/bitops/ext2-non-atomic.h>
213static inline unsigned long __ffs(unsigned long word) 142#include <asm-generic/bitops/ext2-atomic.h>
214{ 143#include <asm-generic/bitops/minix.h>
215 int r = 0; 144#include <asm-generic/bitops/fls.h>
216 145#include <asm-generic/bitops/fls64.h>
217 if (!word)
218 return 0;
219 if (!(word & 0xffff)) {
220 word >>= 16;
221 r += 16;
222 }
223 if (!(word & 0xff)) {
224 word >>= 8;
225 r += 8;
226 }
227 if (!(word & 0xf)) {
228 word >>= 4;
229 r += 4;
230 }
231 if (!(word & 3)) {
232 word >>= 2;
233 r += 2;
234 }
235 if (!(word & 1)) {
236 word >>= 1;
237 r += 1;
238 }
239 return r;
240}
241
242/**
243 * find_next_bit - find the next set bit in a memory region
244 * @addr: The address to base the search on
245 * @offset: The bitnumber to start searching at
246 * @size: The maximum size to search
247 */
248static inline unsigned long find_next_bit(const unsigned long *addr,
249 unsigned long size, unsigned long offset)
250{
251 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
252 unsigned int result = offset & ~31UL;
253 unsigned int tmp;
254
255 if (offset >= size)
256 return size;
257 size -= result;
258 offset &= 31UL;
259 if (offset) {
260 tmp = *p++;
261 tmp &= ~0UL << offset;
262 if (size < 32)
263 goto found_first;
264 if (tmp)
265 goto found_middle;
266 size -= 32;
267 result += 32;
268 }
269 while (size >= 32) {
270 if ((tmp = *p++) != 0)
271 goto found_middle;
272 result += 32;
273 size -= 32;
274 }
275 if (!size)
276 return result;
277 tmp = *p;
278
279found_first:
280 tmp &= ~0UL >> (32 - size);
281 if (tmp == 0UL) /* Are any bits set? */
282 return result + size; /* Nope. */
283found_middle:
284 return result + __ffs(tmp);
285}
286
287/**
288 * find_first_bit - find the first set bit in a memory region
289 * @addr: The address to start the search at
290 * @size: The maximum size to search
291 *
292 * Returns the bit-number of the first set bit, not the number of the byte
293 * containing a bit.
294 */
295#define find_first_bit(addr, size) \
296 find_next_bit((addr), (size), 0)
297
298
299static inline int find_next_zero_bit(void *addr, int size, int offset)
300{
301 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
302 unsigned long result = offset & ~31UL;
303 unsigned long tmp;
304
305 if (offset >= size)
306 return size;
307 size -= result;
308 offset &= 31UL;
309 if (offset) {
310 tmp = *(p++);
311 tmp |= ~0UL >> (32-offset);
312 if (size < 32)
313 goto found_first;
314 if (~tmp)
315 goto found_middle;
316 size -= 32;
317 result += 32;
318 }
319 while (size & ~31UL) {
320 if (~(tmp = *(p++)))
321 goto found_middle;
322 result += 32;
323 size -= 32;
324 }
325 if (!size)
326 return result;
327 tmp = *p;
328
329found_first:
330 tmp |= ~0UL << size;
331found_middle:
332 return result + ffz(tmp);
333}
334
335#define find_first_zero_bit(addr, size) \
336 find_next_zero_bit((addr), (size), 0)
337
338/*
339 * hweightN: returns the hamming weight (i.e. the number
340 * of bits set) of a N-bit word
341 */
342
343#define hweight32(x) generic_hweight32(x)
344#define hweight16(x) generic_hweight16(x)
345#define hweight8(x) generic_hweight8(x)
346
347/*
348 * Every architecture must define this function. It's the fastest
349 * way of searching a 140-bit bitmap where the first 100 bits are
350 * unlikely to be set. It's guaranteed that at least one of the 140
351 * bits is cleared.
352 */
353
354static inline int sched_find_first_bit(unsigned long *b)
355{
356 if (unlikely(b[0]))
357 return __ffs(b[0]);
358 if (unlikely(b[1]))
359 return __ffs(b[1]) + 32;
360 if (unlikely(b[2]))
361 return __ffs(b[2]) + 64;
362 if (b[3])
363 return __ffs(b[3]) + 96;
364 return __ffs(b[4]) + 128;
365}
366
367/*
368 * ffs: find first bit set. This is defined the same way as
369 * the libc and compiler builtin ffs routines, therefore
370 * differs in spirit from the above ffz (man ffs).
371 */
372
373#define ffs(x) generic_ffs(x)
374
375/*
376 * hweightN: returns the hamming weight (i.e. the number
377 * of bits set) of a N-bit word
378 */
379
380#define hweight32(x) generic_hweight32(x)
381#define hweight16(x) generic_hweight16(x)
382#define hweight8(x) generic_hweight8(x)
383
384#ifdef __LITTLE_ENDIAN__
385#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
386#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
387#define ext2_test_bit(nr, addr) test_bit((nr), (addr))
388#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
389#define ext2_find_next_zero_bit(addr, size, offset) \
390 find_next_zero_bit((addr), (size), (offset))
391#else
392static __inline__ int ext2_set_bit(int nr, volatile void * addr)
393{
394 int mask, retval;
395 unsigned long flags;
396 volatile unsigned char *ADDR = (unsigned char *) addr;
397
398 ADDR += nr >> 3;
399 mask = 1 << (nr & 0x07);
400 local_irq_save(flags);
401 retval = (mask & *ADDR) != 0;
402 *ADDR |= mask;
403 local_irq_restore(flags);
404 return retval;
405}
406
407static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
408{
409 int mask, retval;
410 unsigned long flags;
411 volatile unsigned char *ADDR = (unsigned char *) addr;
412
413 ADDR += nr >> 3;
414 mask = 1 << (nr & 0x07);
415 local_irq_save(flags);
416 retval = (mask & *ADDR) != 0;
417 *ADDR &= ~mask;
418 local_irq_restore(flags);
419 return retval;
420}
421
422static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
423{
424 int mask;
425 const volatile unsigned char *ADDR = (const unsigned char *) addr;
426
427 ADDR += nr >> 3;
428 mask = 1 << (nr & 0x07);
429 return ((mask & *ADDR) != 0);
430}
431
432#define ext2_find_first_zero_bit(addr, size) \
433 ext2_find_next_zero_bit((addr), (size), 0)
434
435static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
436{
437 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
438 unsigned long result = offset & ~31UL;
439 unsigned long tmp;
440
441 if (offset >= size)
442 return size;
443 size -= result;
444 offset &= 31UL;
445 if(offset) {
446 /* We hold the little endian value in tmp, but then the
447 * shift is illegal. So we could keep a big endian value
448 * in tmp, like this:
449 *
450 * tmp = __swab32(*(p++));
451 * tmp |= ~0UL >> (32-offset);
452 *
453 * but this would decrease preformance, so we change the
454 * shift:
455 */
456 tmp = *(p++);
457 tmp |= __swab32(~0UL >> (32-offset));
458 if(size < 32)
459 goto found_first;
460 if(~tmp)
461 goto found_middle;
462 size -= 32;
463 result += 32;
464 }
465 while(size & ~31UL) {
466 if(~(tmp = *(p++)))
467 goto found_middle;
468 result += 32;
469 size -= 32;
470 }
471 if(!size)
472 return result;
473 tmp = *p;
474
475found_first:
476 /* tmp is little endian, so we would have to swab the shift,
477 * see above. But then we have to swab tmp below for ffz, so
478 * we might as well do this here.
479 */
480 return result + ffz(__swab32(tmp) | (~0UL << size));
481found_middle:
482 return result + ffz(__swab32(tmp));
483}
484#endif
485
486#define ext2_set_bit_atomic(lock, nr, addr) \
487 ({ \
488 int ret; \
489 spin_lock(lock); \
490 ret = ext2_set_bit((nr), (addr)); \
491 spin_unlock(lock); \
492 ret; \
493 })
494
495#define ext2_clear_bit_atomic(lock, nr, addr) \
496 ({ \
497 int ret; \
498 spin_lock(lock); \
499 ret = ext2_clear_bit((nr), (addr)); \
500 spin_unlock(lock); \
501 ret; \
502 })
503
504/* Bitmap functions for the minix filesystem. */
505#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
506#define minix_set_bit(nr,addr) set_bit(nr,addr)
507#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
508#define minix_test_bit(nr,addr) test_bit(nr,addr)
509#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
510
511#define ffs(x) generic_ffs(x)
512#define fls(x) generic_fls(x)
513#define fls64(x) generic_fls64(x)
514 146
515#endif /* __KERNEL__ */ 147#endif /* __KERNEL__ */
516 148
diff --git a/include/asm-sh64/page.h b/include/asm-sh64/page.h
index c86df90f7cbd..e4937cdabebd 100644
--- a/include/asm-sh64/page.h
+++ b/include/asm-sh64/page.h
@@ -105,9 +105,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
105 105
106/* PFN start number, because of __MEMORY_START */ 106/* PFN start number, because of __MEMORY_START */
107#define PFN_START (__MEMORY_START >> PAGE_SHIFT) 107#define PFN_START (__MEMORY_START >> PAGE_SHIFT)
108 108#define ARCH_PFN_OFFSET (PFN_START)
109#define pfn_to_page(pfn) (mem_map + (pfn) - PFN_START)
110#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PFN_START)
111#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 109#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
112#define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr) 110#define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr)
113#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 111#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
@@ -117,6 +115,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
117 115
118#endif /* __KERNEL__ */ 116#endif /* __KERNEL__ */
119 117
118#include <asm-generic/memory_model.h>
120#include <asm-generic/page.h> 119#include <asm-generic/page.h>
121 120
122#endif /* __ASM_SH64_PAGE_H */ 121#endif /* __ASM_SH64_PAGE_H */
diff --git a/include/asm-sh64/platform.h b/include/asm-sh64/platform.h
index 7046a9014027..bd0d9c405a80 100644
--- a/include/asm-sh64/platform.h
+++ b/include/asm-sh64/platform.h
@@ -61,9 +61,4 @@ extern int platform_int_priority[NR_INTC_IRQS];
61#define code_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 2]) 61#define code_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 2])
62#define data_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 1]) 62#define data_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 1])
63 63
64/* Be prepared to 64-bit sign extensions */
65#define PFN_UP(x) ((((x) + PAGE_SIZE-1) >> PAGE_SHIFT) & 0x000fffff)
66#define PFN_DOWN(x) (((x) >> PAGE_SHIFT) & 0x000fffff)
67#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
68
69#endif /* __ASM_SH64_PLATFORM_H */ 64#endif /* __ASM_SH64_PLATFORM_H */
diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h
index 41722b5e45ef..04aa3318f76a 100644
--- a/include/asm-sparc/bitops.h
+++ b/include/asm-sparc/bitops.h
@@ -152,386 +152,22 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
152 : "memory", "cc"); 152 : "memory", "cc");
153} 153}
154 154
155/* 155#include <asm-generic/bitops/non-atomic.h>
156 * non-atomic versions
157 */
158static inline void __set_bit(int nr, volatile unsigned long *addr)
159{
160 unsigned long mask = 1UL << (nr & 0x1f);
161 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
162
163 *p |= mask;
164}
165
166static inline void __clear_bit(int nr, volatile unsigned long *addr)
167{
168 unsigned long mask = 1UL << (nr & 0x1f);
169 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
170
171 *p &= ~mask;
172}
173
174static inline void __change_bit(int nr, volatile unsigned long *addr)
175{
176 unsigned long mask = 1UL << (nr & 0x1f);
177 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
178
179 *p ^= mask;
180}
181
182static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
183{
184 unsigned long mask = 1UL << (nr & 0x1f);
185 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
186 unsigned long old = *p;
187
188 *p = old | mask;
189 return (old & mask) != 0;
190}
191
192static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
193{
194 unsigned long mask = 1UL << (nr & 0x1f);
195 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
196 unsigned long old = *p;
197
198 *p = old & ~mask;
199 return (old & mask) != 0;
200}
201
202static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
203{
204 unsigned long mask = 1UL << (nr & 0x1f);
205 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
206 unsigned long old = *p;
207
208 *p = old ^ mask;
209 return (old & mask) != 0;
210}
211 156
212#define smp_mb__before_clear_bit() do { } while(0) 157#define smp_mb__before_clear_bit() do { } while(0)
213#define smp_mb__after_clear_bit() do { } while(0) 158#define smp_mb__after_clear_bit() do { } while(0)
214 159
215/* The following routine need not be atomic. */ 160#include <asm-generic/bitops/ffz.h>
216static inline int test_bit(int nr, __const__ volatile unsigned long *addr) 161#include <asm-generic/bitops/__ffs.h>
217{ 162#include <asm-generic/bitops/sched.h>
218 return (1UL & (((unsigned long *)addr)[nr >> 5] >> (nr & 31))) != 0UL; 163#include <asm-generic/bitops/ffs.h>
219} 164#include <asm-generic/bitops/fls.h>
220 165#include <asm-generic/bitops/fls64.h>
221/* The easy/cheese version for now. */ 166#include <asm-generic/bitops/hweight.h>
222static inline unsigned long ffz(unsigned long word) 167#include <asm-generic/bitops/find.h>
223{ 168#include <asm-generic/bitops/ext2-non-atomic.h>
224 unsigned long result = 0; 169#include <asm-generic/bitops/ext2-atomic.h>
225 170#include <asm-generic/bitops/minix.h>
226 while(word & 1) {
227 result++;
228 word >>= 1;
229 }
230 return result;
231}
232
233/**
234 * __ffs - find first bit in word.
235 * @word: The word to search
236 *
237 * Undefined if no bit exists, so code should check against 0 first.
238 */
239static inline int __ffs(unsigned long word)
240{
241 int num = 0;
242
243 if ((word & 0xffff) == 0) {
244 num += 16;
245 word >>= 16;
246 }
247 if ((word & 0xff) == 0) {
248 num += 8;
249 word >>= 8;
250 }
251 if ((word & 0xf) == 0) {
252 num += 4;
253 word >>= 4;
254 }
255 if ((word & 0x3) == 0) {
256 num += 2;
257 word >>= 2;
258 }
259 if ((word & 0x1) == 0)
260 num += 1;
261 return num;
262}
263
264/*
265 * Every architecture must define this function. It's the fastest
266 * way of searching a 140-bit bitmap where the first 100 bits are
267 * unlikely to be set. It's guaranteed that at least one of the 140
268 * bits is cleared.
269 */
270static inline int sched_find_first_bit(unsigned long *b)
271{
272
273 if (unlikely(b[0]))
274 return __ffs(b[0]);
275 if (unlikely(b[1]))
276 return __ffs(b[1]) + 32;
277 if (unlikely(b[2]))
278 return __ffs(b[2]) + 64;
279 if (b[3])
280 return __ffs(b[3]) + 96;
281 return __ffs(b[4]) + 128;
282}
283
284/*
285 * ffs: find first bit set. This is defined the same way as
286 * the libc and compiler builtin ffs routines, therefore
287 * differs in spirit from the above ffz (man ffs).
288 */
289static inline int ffs(int x)
290{
291 if (!x)
292 return 0;
293 return __ffs((unsigned long)x) + 1;
294}
295
296/*
297 * fls: find last (most-significant) bit set.
298 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
299 */
300#define fls(x) generic_fls(x)
301#define fls64(x) generic_fls64(x)
302
303/*
304 * hweightN: returns the hamming weight (i.e. the number
305 * of bits set) of a N-bit word
306 */
307#define hweight32(x) generic_hweight32(x)
308#define hweight16(x) generic_hweight16(x)
309#define hweight8(x) generic_hweight8(x)
310
311/*
312 * find_next_zero_bit() finds the first zero bit in a bit string of length
313 * 'size' bits, starting the search at bit 'offset'. This is largely based
314 * on Linus's ALPHA routines, which are pretty portable BTW.
315 */
316static inline unsigned long find_next_zero_bit(const unsigned long *addr,
317 unsigned long size, unsigned long offset)
318{
319 const unsigned long *p = addr + (offset >> 5);
320 unsigned long result = offset & ~31UL;
321 unsigned long tmp;
322
323 if (offset >= size)
324 return size;
325 size -= result;
326 offset &= 31UL;
327 if (offset) {
328 tmp = *(p++);
329 tmp |= ~0UL >> (32-offset);
330 if (size < 32)
331 goto found_first;
332 if (~tmp)
333 goto found_middle;
334 size -= 32;
335 result += 32;
336 }
337 while (size & ~31UL) {
338 if (~(tmp = *(p++)))
339 goto found_middle;
340 result += 32;
341 size -= 32;
342 }
343 if (!size)
344 return result;
345 tmp = *p;
346
347found_first:
348 tmp |= ~0UL << size;
349 if (tmp == ~0UL) /* Are any bits zero? */
350 return result + size; /* Nope. */
351found_middle:
352 return result + ffz(tmp);
353}
354
355/*
356 * Linus sez that gcc can optimize the following correctly, we'll see if this
357 * holds on the Sparc as it does for the ALPHA.
358 */
359#define find_first_zero_bit(addr, size) \
360 find_next_zero_bit((addr), (size), 0)
361
362/**
363 * find_next_bit - find the first set bit in a memory region
364 * @addr: The address to base the search on
365 * @offset: The bitnumber to start searching at
366 * @size: The maximum size to search
367 *
368 * Scheduler induced bitop, do not use.
369 */
370static inline int find_next_bit(const unsigned long *addr, int size, int offset)
371{
372 const unsigned long *p = addr + (offset >> 5);
373 int num = offset & ~0x1f;
374 unsigned long word;
375
376 word = *p++;
377 word &= ~((1 << (offset & 0x1f)) - 1);
378 while (num < size) {
379 if (word != 0) {
380 return __ffs(word) + num;
381 }
382 word = *p++;
383 num += 0x20;
384 }
385 return num;
386}
387
388/**
389 * find_first_bit - find the first set bit in a memory region
390 * @addr: The address to start the search at
391 * @size: The maximum size to search
392 *
393 * Returns the bit-number of the first set bit, not the number of the byte
394 * containing a bit.
395 */
396#define find_first_bit(addr, size) \
397 find_next_bit((addr), (size), 0)
398
399/*
400 */
401static inline int test_le_bit(int nr, __const__ unsigned long * addr)
402{
403 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
404 return (ADDR[nr >> 3] >> (nr & 7)) & 1;
405}
406
407/*
408 * non-atomic versions
409 */
410static inline void __set_le_bit(int nr, unsigned long *addr)
411{
412 unsigned char *ADDR = (unsigned char *)addr;
413
414 ADDR += nr >> 3;
415 *ADDR |= 1 << (nr & 0x07);
416}
417
418static inline void __clear_le_bit(int nr, unsigned long *addr)
419{
420 unsigned char *ADDR = (unsigned char *)addr;
421
422 ADDR += nr >> 3;
423 *ADDR &= ~(1 << (nr & 0x07));
424}
425
426static inline int __test_and_set_le_bit(int nr, unsigned long *addr)
427{
428 int mask, retval;
429 unsigned char *ADDR = (unsigned char *)addr;
430
431 ADDR += nr >> 3;
432 mask = 1 << (nr & 0x07);
433 retval = (mask & *ADDR) != 0;
434 *ADDR |= mask;
435 return retval;
436}
437
438static inline int __test_and_clear_le_bit(int nr, unsigned long *addr)
439{
440 int mask, retval;
441 unsigned char *ADDR = (unsigned char *)addr;
442
443 ADDR += nr >> 3;
444 mask = 1 << (nr & 0x07);
445 retval = (mask & *ADDR) != 0;
446 *ADDR &= ~mask;
447 return retval;
448}
449
450static inline unsigned long find_next_zero_le_bit(const unsigned long *addr,
451 unsigned long size, unsigned long offset)
452{
453 const unsigned long *p = addr + (offset >> 5);
454 unsigned long result = offset & ~31UL;
455 unsigned long tmp;
456
457 if (offset >= size)
458 return size;
459 size -= result;
460 offset &= 31UL;
461 if(offset) {
462 tmp = *(p++);
463 tmp |= __swab32(~0UL >> (32-offset));
464 if(size < 32)
465 goto found_first;
466 if(~tmp)
467 goto found_middle;
468 size -= 32;
469 result += 32;
470 }
471 while(size & ~31UL) {
472 if(~(tmp = *(p++)))
473 goto found_middle;
474 result += 32;
475 size -= 32;
476 }
477 if(!size)
478 return result;
479 tmp = *p;
480
481found_first:
482 tmp = __swab32(tmp) | (~0UL << size);
483 if (tmp == ~0UL) /* Are any bits zero? */
484 return result + size; /* Nope. */
485 return result + ffz(tmp);
486
487found_middle:
488 return result + ffz(__swab32(tmp));
489}
490
491#define find_first_zero_le_bit(addr, size) \
492 find_next_zero_le_bit((addr), (size), 0)
493
494#define ext2_set_bit(nr,addr) \
495 __test_and_set_le_bit((nr),(unsigned long *)(addr))
496#define ext2_clear_bit(nr,addr) \
497 __test_and_clear_le_bit((nr),(unsigned long *)(addr))
498
499#define ext2_set_bit_atomic(lock, nr, addr) \
500 ({ \
501 int ret; \
502 spin_lock(lock); \
503 ret = ext2_set_bit((nr), (unsigned long *)(addr)); \
504 spin_unlock(lock); \
505 ret; \
506 })
507
508#define ext2_clear_bit_atomic(lock, nr, addr) \
509 ({ \
510 int ret; \
511 spin_lock(lock); \
512 ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \
513 spin_unlock(lock); \
514 ret; \
515 })
516
517#define ext2_test_bit(nr,addr) \
518 test_le_bit((nr),(unsigned long *)(addr))
519#define ext2_find_first_zero_bit(addr, size) \
520 find_first_zero_le_bit((unsigned long *)(addr), (size))
521#define ext2_find_next_zero_bit(addr, size, off) \
522 find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
523
524/* Bitmap functions for the minix filesystem. */
525#define minix_test_and_set_bit(nr,addr) \
526 test_and_set_bit((nr),(unsigned long *)(addr))
527#define minix_set_bit(nr,addr) \
528 set_bit((nr),(unsigned long *)(addr))
529#define minix_test_and_clear_bit(nr,addr) \
530 test_and_clear_bit((nr),(unsigned long *)(addr))
531#define minix_test_bit(nr,addr) \
532 test_bit((nr),(unsigned long *)(addr))
533#define minix_find_first_zero_bit(addr,size) \
534 find_first_zero_bit((unsigned long *)(addr),(size))
535 171
536#endif /* __KERNEL__ */ 172#endif /* __KERNEL__ */
537 173
diff --git a/include/asm-sparc/page.h b/include/asm-sparc/page.h
index 9122684f6c1e..ec3274b7ddf4 100644
--- a/include/asm-sparc/page.h
+++ b/include/asm-sparc/page.h
@@ -152,8 +152,7 @@ extern unsigned long pfn_base;
152#define virt_to_phys __pa 152#define virt_to_phys __pa
153#define phys_to_virt __va 153#define phys_to_virt __va
154 154
155#define pfn_to_page(pfn) (mem_map + ((pfn)-(pfn_base))) 155#define ARCH_PFN_OFFSET (pfn_base)
156#define page_to_pfn(page) ((unsigned long)(((page) - mem_map) + pfn_base))
157#define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT))) 156#define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT)))
158 157
159#define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr)) 158#define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr))
@@ -164,6 +163,7 @@ extern unsigned long pfn_base;
164 163
165#endif /* __KERNEL__ */ 164#endif /* __KERNEL__ */
166 165
166#include <asm-generic/memory_model.h>
167#include <asm-generic/page.h> 167#include <asm-generic/page.h>
168 168
169#endif /* _SPARC_PAGE_H */ 169#endif /* _SPARC_PAGE_H */
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 6efc0162fb09..71944b0f09de 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -18,58 +18,7 @@ extern void set_bit(unsigned long nr, volatile unsigned long *addr);
18extern void clear_bit(unsigned long nr, volatile unsigned long *addr); 18extern void clear_bit(unsigned long nr, volatile unsigned long *addr);
19extern void change_bit(unsigned long nr, volatile unsigned long *addr); 19extern void change_bit(unsigned long nr, volatile unsigned long *addr);
20 20
21/* "non-atomic" versions... */ 21#include <asm-generic/bitops/non-atomic.h>
22
23static inline void __set_bit(int nr, volatile unsigned long *addr)
24{
25 unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
26
27 *m |= (1UL << (nr & 63));
28}
29
30static inline void __clear_bit(int nr, volatile unsigned long *addr)
31{
32 unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
33
34 *m &= ~(1UL << (nr & 63));
35}
36
37static inline void __change_bit(int nr, volatile unsigned long *addr)
38{
39 unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
40
41 *m ^= (1UL << (nr & 63));
42}
43
44static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
45{
46 unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
47 unsigned long old = *m;
48 unsigned long mask = (1UL << (nr & 63));
49
50 *m = (old | mask);
51 return ((old & mask) != 0);
52}
53
54static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
55{
56 unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
57 unsigned long old = *m;
58 unsigned long mask = (1UL << (nr & 63));
59
60 *m = (old & ~mask);
61 return ((old & mask) != 0);
62}
63
64static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
65{
66 unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
67 unsigned long old = *m;
68 unsigned long mask = (1UL << (nr & 63));
69
70 *m = (old ^ mask);
71 return ((old & mask) != 0);
72}
73 22
74#ifdef CONFIG_SMP 23#ifdef CONFIG_SMP
75#define smp_mb__before_clear_bit() membar_storeload_loadload() 24#define smp_mb__before_clear_bit() membar_storeload_loadload()
@@ -79,78 +28,15 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
79#define smp_mb__after_clear_bit() barrier() 28#define smp_mb__after_clear_bit() barrier()
80#endif 29#endif
81 30
82static inline int test_bit(int nr, __const__ volatile unsigned long *addr) 31#include <asm-generic/bitops/ffz.h>
83{ 32#include <asm-generic/bitops/__ffs.h>
84 return (1UL & (addr[nr >> 6] >> (nr & 63))) != 0UL; 33#include <asm-generic/bitops/fls.h>
85} 34#include <asm-generic/bitops/fls64.h>
86
87/* The easy/cheese version for now. */
88static inline unsigned long ffz(unsigned long word)
89{
90 unsigned long result;
91
92 result = 0;
93 while(word & 1) {
94 result++;
95 word >>= 1;
96 }
97 return result;
98}
99
100/**
101 * __ffs - find first bit in word.
102 * @word: The word to search
103 *
104 * Undefined if no bit exists, so code should check against 0 first.
105 */
106static inline unsigned long __ffs(unsigned long word)
107{
108 unsigned long result = 0;
109
110 while (!(word & 1UL)) {
111 result++;
112 word >>= 1;
113 }
114 return result;
115}
116
117/*
118 * fls: find last bit set.
119 */
120
121#define fls(x) generic_fls(x)
122#define fls64(x) generic_fls64(x)
123 35
124#ifdef __KERNEL__ 36#ifdef __KERNEL__
125 37
126/* 38#include <asm-generic/bitops/sched.h>
127 * Every architecture must define this function. It's the fastest 39#include <asm-generic/bitops/ffs.h>
128 * way of searching a 140-bit bitmap where the first 100 bits are
129 * unlikely to be set. It's guaranteed that at least one of the 140
130 * bits is cleared.
131 */
132static inline int sched_find_first_bit(unsigned long *b)
133{
134 if (unlikely(b[0]))
135 return __ffs(b[0]);
136 if (unlikely(((unsigned int)b[1])))
137 return __ffs(b[1]) + 64;
138 if (b[1] >> 32)
139 return __ffs(b[1] >> 32) + 96;
140 return __ffs(b[2]) + 128;
141}
142
143/*
144 * ffs: find first bit set. This is defined the same way as
145 * the libc and compiler builtin ffs routines, therefore
146 * differs in spirit from the above ffz (man ffs).
147 */
148static inline int ffs(int x)
149{
150 if (!x)
151 return 0;
152 return __ffs((unsigned long)x) + 1;
153}
154 40
155/* 41/*
156 * hweightN: returns the hamming weight (i.e. the number 42 * hweightN: returns the hamming weight (i.e. the number
@@ -193,102 +79,23 @@ static inline unsigned int hweight8(unsigned int w)
193 79
194#else 80#else
195 81
196#define hweight64(x) generic_hweight64(x) 82#include <asm-generic/bitops/hweight.h>
197#define hweight32(x) generic_hweight32(x)
198#define hweight16(x) generic_hweight16(x)
199#define hweight8(x) generic_hweight8(x)
200 83
201#endif 84#endif
202#endif /* __KERNEL__ */ 85#endif /* __KERNEL__ */
203 86
204/** 87#include <asm-generic/bitops/find.h>
205 * find_next_bit - find the next set bit in a memory region
206 * @addr: The address to base the search on
207 * @offset: The bitnumber to start searching at
208 * @size: The maximum size to search
209 */
210extern unsigned long find_next_bit(const unsigned long *, unsigned long,
211 unsigned long);
212
213/**
214 * find_first_bit - find the first set bit in a memory region
215 * @addr: The address to start the search at
216 * @size: The maximum size to search
217 *
218 * Returns the bit-number of the first set bit, not the number of the byte
219 * containing a bit.
220 */
221#define find_first_bit(addr, size) \
222 find_next_bit((addr), (size), 0)
223
224/* find_next_zero_bit() finds the first zero bit in a bit string of length
225 * 'size' bits, starting the search at bit 'offset'. This is largely based
226 * on Linus's ALPHA routines, which are pretty portable BTW.
227 */
228
229extern unsigned long find_next_zero_bit(const unsigned long *,
230 unsigned long, unsigned long);
231
232#define find_first_zero_bit(addr, size) \
233 find_next_zero_bit((addr), (size), 0)
234
235#define test_and_set_le_bit(nr,addr) \
236 test_and_set_bit((nr) ^ 0x38, (addr))
237#define test_and_clear_le_bit(nr,addr) \
238 test_and_clear_bit((nr) ^ 0x38, (addr))
239
240static inline int test_le_bit(int nr, __const__ unsigned long * addr)
241{
242 int mask;
243 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
244
245 ADDR += nr >> 3;
246 mask = 1 << (nr & 0x07);
247 return ((mask & *ADDR) != 0);
248}
249
250#define find_first_zero_le_bit(addr, size) \
251 find_next_zero_le_bit((addr), (size), 0)
252
253extern unsigned long find_next_zero_le_bit(unsigned long *, unsigned long, unsigned long);
254 88
255#ifdef __KERNEL__ 89#ifdef __KERNEL__
256 90
257#define __set_le_bit(nr, addr) \ 91#include <asm-generic/bitops/ext2-non-atomic.h>
258 __set_bit((nr) ^ 0x38, (addr))
259#define __clear_le_bit(nr, addr) \
260 __clear_bit((nr) ^ 0x38, (addr))
261#define __test_and_clear_le_bit(nr, addr) \
262 __test_and_clear_bit((nr) ^ 0x38, (addr))
263#define __test_and_set_le_bit(nr, addr) \
264 __test_and_set_bit((nr) ^ 0x38, (addr))
265 92
266#define ext2_set_bit(nr,addr) \
267 __test_and_set_le_bit((nr),(unsigned long *)(addr))
268#define ext2_set_bit_atomic(lock,nr,addr) \ 93#define ext2_set_bit_atomic(lock,nr,addr) \
269 test_and_set_le_bit((nr),(unsigned long *)(addr)) 94 test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr))
270#define ext2_clear_bit(nr,addr) \
271 __test_and_clear_le_bit((nr),(unsigned long *)(addr))
272#define ext2_clear_bit_atomic(lock,nr,addr) \ 95#define ext2_clear_bit_atomic(lock,nr,addr) \
273 test_and_clear_le_bit((nr),(unsigned long *)(addr)) 96 test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr))
274#define ext2_test_bit(nr,addr) \
275 test_le_bit((nr),(unsigned long *)(addr))
276#define ext2_find_first_zero_bit(addr, size) \
277 find_first_zero_le_bit((unsigned long *)(addr), (size))
278#define ext2_find_next_zero_bit(addr, size, off) \
279 find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
280 97
281/* Bitmap functions for the minix filesystem. */ 98#include <asm-generic/bitops/minix.h>
282#define minix_test_and_set_bit(nr,addr) \
283 test_and_set_bit((nr),(unsigned long *)(addr))
284#define minix_set_bit(nr,addr) \
285 set_bit((nr),(unsigned long *)(addr))
286#define minix_test_and_clear_bit(nr,addr) \
287 test_and_clear_bit((nr),(unsigned long *)(addr))
288#define minix_test_bit(nr,addr) \
289 test_bit((nr),(unsigned long *)(addr))
290#define minix_find_first_zero_bit(addr,size) \
291 find_first_zero_bit((unsigned long *)(addr),(size))
292 99
293#endif /* __KERNEL__ */ 100#endif /* __KERNEL__ */
294 101
diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h
index 49d49a285943..6a95d5d0c576 100644
--- a/include/asm-sparc64/floppy.h
+++ b/include/asm-sparc64/floppy.h
@@ -738,7 +738,7 @@ static unsigned long __init sun_floppy_init(void)
738 if (!sun_floppy_types[0] && sun_floppy_types[1]) { 738 if (!sun_floppy_types[0] && sun_floppy_types[1]) {
739 /* 739 /*
740 * Set the drive exchange bit in FCR on NS87303, 740 * Set the drive exchange bit in FCR on NS87303,
741 * make shure other bits are sane before doing so. 741 * make sure other bits are sane before doing so.
742 */ 742 */
743 ns87303_modify(config, FER, FER_EDM, 0); 743 ns87303_modify(config, FER, FER_EDM, 0);
744 ns87303_modify(config, ASC, ASC_DRV2_SEL, 0); 744 ns87303_modify(config, ASC, ASC_DRV2_SEL, 0);
diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
index 34c4b43d3f98..dee40206b221 100644
--- a/include/asm-sparc64/futex.h
+++ b/include/asm-sparc64/futex.h
@@ -83,4 +83,28 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
83 return ret; 83 return ret;
84} 84}
85 85
86static inline int
87futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
88{
89 __asm__ __volatile__(
90 "\n1: lduwa [%2] %%asi, %0\n"
91 "2: casa [%2] %%asi, %0, %1\n"
92 "3:\n"
93 " .section .fixup,#alloc,#execinstr\n"
94 " .align 4\n"
95 "4: ba 3b\n"
96 " mov %3, %0\n"
97 " .previous\n"
98 " .section __ex_table,\"a\"\n"
99 " .align 4\n"
100 " .word 1b, 4b\n"
101 " .word 2b, 4b\n"
102 " .previous\n"
103 : "=&r" (oldval)
104 : "r" (newval), "r" (uaddr), "i" (-EFAULT)
105 : "memory");
106
107 return oldval;
108}
109
86#endif /* !(_SPARC64_FUTEX_H) */ 110#endif /* !(_SPARC64_FUTEX_H) */
diff --git a/include/asm-sparc64/kdebug.h b/include/asm-sparc64/kdebug.h
index 6321f5a0198d..4040d127ac3e 100644
--- a/include/asm-sparc64/kdebug.h
+++ b/include/asm-sparc64/kdebug.h
@@ -15,12 +15,9 @@ struct die_args {
15 int signr; 15 int signr;
16}; 16};
17 17
18/* Note - you should never unregister because that can race with NMIs. 18extern int register_die_notifier(struct notifier_block *);
19 * If you really want to do it first unregister - then synchronize_sched 19extern int unregister_die_notifier(struct notifier_block *);
20 * - then free. 20extern struct atomic_notifier_head sparc64die_chain;
21 */
22int register_die_notifier(struct notifier_block *nb);
23extern struct notifier_block *sparc64die_chain;
24 21
25extern void bad_trap(struct pt_regs *, long); 22extern void bad_trap(struct pt_regs *, long);
26 23
@@ -46,7 +43,7 @@ static inline int notify_die(enum die_val val,char *str, struct pt_regs *regs,
46 .trapnr = trap, 43 .trapnr = trap,
47 .signr = sig }; 44 .signr = sig };
48 45
49 return notifier_call_chain(&sparc64die_chain, val, &args); 46 return atomic_notifier_call_chain(&sparc64die_chain, val, &args);
50} 47}
51 48
52#endif 49#endif
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index 66fe4ac59fd6..aabb21906724 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -111,6 +111,8 @@ typedef unsigned long pgprot_t;
111 (_AC(0x0000000070000000,UL)) : \ 111 (_AC(0x0000000070000000,UL)) : \
112 (_AC(0xfffff80000000000,UL) + (1UL << 32UL))) 112 (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
113 113
114#include <asm-generic/memory_model.h>
115
114#endif /* !(__ASSEMBLY__) */ 116#endif /* !(__ASSEMBLY__) */
115 117
116/* to align the pointer to the (next) page boundary */ 118/* to align the pointer to the (next) page boundary */
diff --git a/include/asm-um/page.h b/include/asm-um/page.h
index 0229814af31e..41364330aff1 100644
--- a/include/asm-um/page.h
+++ b/include/asm-um/page.h
@@ -106,9 +106,6 @@ extern unsigned long uml_physmem;
106#define __pa(virt) to_phys((void *) (unsigned long) (virt)) 106#define __pa(virt) to_phys((void *) (unsigned long) (virt))
107#define __va(phys) to_virt((unsigned long) (phys)) 107#define __va(phys) to_virt((unsigned long) (phys))
108 108
109#define page_to_pfn(page) ((page) - mem_map)
110#define pfn_to_page(pfn) (mem_map + (pfn))
111
112#define phys_to_pfn(p) ((p) >> PAGE_SHIFT) 109#define phys_to_pfn(p) ((p) >> PAGE_SHIFT)
113#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) 110#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
114 111
@@ -121,6 +118,7 @@ extern struct page *arch_validate(struct page *page, gfp_t mask, int order);
121extern void arch_free_page(struct page *page, int order); 118extern void arch_free_page(struct page *page, int order);
122#define HAVE_ARCH_FREE_PAGE 119#define HAVE_ARCH_FREE_PAGE
123 120
121#include <asm-generic/memory_model.h>
124#include <asm-generic/page.h> 122#include <asm-generic/page.h>
125 123
126#endif 124#endif
diff --git a/include/asm-um/uaccess.h b/include/asm-um/uaccess.h
index 2ee028b8de9d..4e460d6f5ac8 100644
--- a/include/asm-um/uaccess.h
+++ b/include/asm-um/uaccess.h
@@ -41,16 +41,16 @@
41 41
42#define __get_user(x, ptr) \ 42#define __get_user(x, ptr) \
43({ \ 43({ \
44 const __typeof__(ptr) __private_ptr = ptr; \ 44 const __typeof__(ptr) __private_ptr = ptr; \
45 __typeof__(*(__private_ptr)) __private_val; \ 45 __typeof__(x) __private_val; \
46 int __private_ret = -EFAULT; \ 46 int __private_ret = -EFAULT; \
47 (x) = (__typeof__(*(__private_ptr)))0; \ 47 (x) = (__typeof__(*(__private_ptr)))0; \
48 if (__copy_from_user(&__private_val, (__private_ptr), \ 48 if (__copy_from_user((void *) &__private_val, (__private_ptr), \
49 sizeof(*(__private_ptr))) == 0) {\ 49 sizeof(*(__private_ptr))) == 0) { \
50 (x) = (__typeof__(*(__private_ptr))) __private_val; \ 50 (x) = (__typeof__(*(__private_ptr))) __private_val; \
51 __private_ret = 0; \ 51 __private_ret = 0; \
52 } \ 52 } \
53 __private_ret; \ 53 __private_ret; \
54}) 54})
55 55
56#define get_user(x, ptr) \ 56#define get_user(x, ptr) \
@@ -89,14 +89,3 @@ struct exception_table_entry
89}; 89};
90 90
91#endif 91#endif
92
93/*
94 * Overrides for Emacs so that we follow Linus's tabbing style.
95 * Emacs will notice this stuff at the end of the file and automatically
96 * adjust the settings for this buffer only. This must remain at the end
97 * of the file.
98 * ---------------------------------------------------------------------------
99 * Local variables:
100 * c-file-style: "linux"
101 * End:
102 */
diff --git a/include/asm-v850/bitops.h b/include/asm-v850/bitops.h
index 609b9e87222a..1f6fd5ab4177 100644
--- a/include/asm-v850/bitops.h
+++ b/include/asm-v850/bitops.h
@@ -22,25 +22,11 @@
22 22
23#ifdef __KERNEL__ 23#ifdef __KERNEL__
24 24
25/* 25#include <asm-generic/bitops/ffz.h>
26 * The __ functions are not atomic
27 */
28 26
29/* 27/*
30 * ffz = Find First Zero in word. Undefined if no zero exists, 28 * The __ functions are not atomic
31 * so code should check against ~0UL first..
32 */ 29 */
33static inline unsigned long ffz (unsigned long word)
34{
35 unsigned long result = 0;
36
37 while (word & 1) {
38 result++;
39 word >>= 1;
40 }
41 return result;
42}
43
44 30
45/* In the following constant-bit-op macros, a "g" constraint is used when 31/* In the following constant-bit-op macros, a "g" constraint is used when
46 we really need an integer ("i" constraint). This is to avoid 32 we really need an integer ("i" constraint). This is to avoid
@@ -153,203 +139,19 @@ static inline int __test_bit (int nr, const void *addr)
153#define smp_mb__before_clear_bit() barrier () 139#define smp_mb__before_clear_bit() barrier ()
154#define smp_mb__after_clear_bit() barrier () 140#define smp_mb__after_clear_bit() barrier ()
155 141
142#include <asm-generic/bitops/ffs.h>
143#include <asm-generic/bitops/fls.h>
144#include <asm-generic/bitops/fls64.h>
145#include <asm-generic/bitops/__ffs.h>
146#include <asm-generic/bitops/find.h>
147#include <asm-generic/bitops/sched.h>
148#include <asm-generic/bitops/hweight.h>
156 149
157#define find_first_zero_bit(addr, size) \ 150#include <asm-generic/bitops/ext2-non-atomic.h>
158 find_next_zero_bit ((addr), (size), 0)
159
160static inline int find_next_zero_bit(const void *addr, int size, int offset)
161{
162 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
163 unsigned long result = offset & ~31UL;
164 unsigned long tmp;
165
166 if (offset >= size)
167 return size;
168 size -= result;
169 offset &= 31UL;
170 if (offset) {
171 tmp = * (p++);
172 tmp |= ~0UL >> (32-offset);
173 if (size < 32)
174 goto found_first;
175 if (~tmp)
176 goto found_middle;
177 size -= 32;
178 result += 32;
179 }
180 while (size & ~31UL) {
181 if (~ (tmp = * (p++)))
182 goto found_middle;
183 result += 32;
184 size -= 32;
185 }
186 if (!size)
187 return result;
188 tmp = *p;
189
190 found_first:
191 tmp |= ~0UL << size;
192 found_middle:
193 return result + ffz (tmp);
194}
195
196
197/* This is the same as generic_ffs, but we can't use that because it's
198 inline and the #include order mucks things up. */
199static inline int generic_ffs_for_find_next_bit(int x)
200{
201 int r = 1;
202
203 if (!x)
204 return 0;
205 if (!(x & 0xffff)) {
206 x >>= 16;
207 r += 16;
208 }
209 if (!(x & 0xff)) {
210 x >>= 8;
211 r += 8;
212 }
213 if (!(x & 0xf)) {
214 x >>= 4;
215 r += 4;
216 }
217 if (!(x & 3)) {
218 x >>= 2;
219 r += 2;
220 }
221 if (!(x & 1)) {
222 x >>= 1;
223 r += 1;
224 }
225 return r;
226}
227
228/*
229 * Find next one bit in a bitmap reasonably efficiently.
230 */
231static __inline__ unsigned long find_next_bit(const unsigned long *addr,
232 unsigned long size, unsigned long offset)
233{
234 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
235 unsigned int result = offset & ~31UL;
236 unsigned int tmp;
237
238 if (offset >= size)
239 return size;
240 size -= result;
241 offset &= 31UL;
242 if (offset) {
243 tmp = *p++;
244 tmp &= ~0UL << offset;
245 if (size < 32)
246 goto found_first;
247 if (tmp)
248 goto found_middle;
249 size -= 32;
250 result += 32;
251 }
252 while (size >= 32) {
253 if ((tmp = *p++) != 0)
254 goto found_middle;
255 result += 32;
256 size -= 32;
257 }
258 if (!size)
259 return result;
260 tmp = *p;
261
262found_first:
263 tmp &= ~0UL >> (32 - size);
264 if (tmp == 0UL) /* Are any bits set? */
265 return result + size; /* Nope. */
266found_middle:
267 return result + generic_ffs_for_find_next_bit(tmp);
268}
269
270/*
271 * find_first_bit - find the first set bit in a memory region
272 */
273#define find_first_bit(addr, size) \
274 find_next_bit((addr), (size), 0)
275
276
277#define ffs(x) generic_ffs (x)
278#define fls(x) generic_fls (x)
279#define fls64(x) generic_fls64(x)
280#define __ffs(x) ffs(x)
281
282
283/*
284 * This is just `generic_ffs' from <linux/bitops.h>, except that it assumes
285 * that at least one bit is set, and returns the real index of the bit
286 * (rather than the bit index + 1, like ffs does).
287 */
288static inline int sched_ffs(int x)
289{
290 int r = 0;
291
292 if (!(x & 0xffff)) {
293 x >>= 16;
294 r += 16;
295 }
296 if (!(x & 0xff)) {
297 x >>= 8;
298 r += 8;
299 }
300 if (!(x & 0xf)) {
301 x >>= 4;
302 r += 4;
303 }
304 if (!(x & 3)) {
305 x >>= 2;
306 r += 2;
307 }
308 if (!(x & 1)) {
309 x >>= 1;
310 r += 1;
311 }
312 return r;
313}
314
315/*
316 * Every architecture must define this function. It's the fastest
317 * way of searching a 140-bit bitmap where the first 100 bits are
318 * unlikely to be set. It's guaranteed that at least one of the 140
319 * bits is set.
320 */
321static inline int sched_find_first_bit(unsigned long *b)
322{
323 unsigned offs = 0;
324 while (! *b) {
325 b++;
326 offs += 32;
327 }
328 return sched_ffs (*b) + offs;
329}
330
331/*
332 * hweightN: returns the hamming weight (i.e. the number
333 * of bits set) of a N-bit word
334 */
335#define hweight32(x) generic_hweight32 (x)
336#define hweight16(x) generic_hweight16 (x)
337#define hweight8(x) generic_hweight8 (x)
338
339#define ext2_set_bit test_and_set_bit
340#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) 151#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
341#define ext2_clear_bit test_and_clear_bit
342#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) 152#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
343#define ext2_test_bit test_bit
344#define ext2_find_first_zero_bit find_first_zero_bit
345#define ext2_find_next_zero_bit find_next_zero_bit
346 153
347/* Bitmap functions for the minix filesystem. */ 154#include <asm-generic/bitops/minix.h>
348#define minix_test_and_set_bit test_and_set_bit
349#define minix_set_bit set_bit
350#define minix_test_and_clear_bit test_and_clear_bit
351#define minix_test_bit test_bit
352#define minix_find_first_zero_bit find_first_zero_bit
353 155
354#endif /* __KERNEL__ */ 156#endif /* __KERNEL__ */
355 157
diff --git a/include/asm-v850/page.h b/include/asm-v850/page.h
index b4bc85e7b91a..ad03c46a1f92 100644
--- a/include/asm-v850/page.h
+++ b/include/asm-v850/page.h
@@ -111,8 +111,7 @@ typedef unsigned long pgprot_t;
111#define page_to_virt(page) \ 111#define page_to_virt(page) \
112 ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) 112 ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
113 113
114#define pfn_to_page(pfn) virt_to_page (pfn_to_virt (pfn)) 114#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
115#define page_to_pfn(page) virt_to_pfn (page_to_virt (page))
116#define pfn_valid(pfn) ((pfn) < max_mapnr) 115#define pfn_valid(pfn) ((pfn) < max_mapnr)
117 116
118#define virt_addr_valid(kaddr) \ 117#define virt_addr_valid(kaddr) \
@@ -125,6 +124,7 @@ typedef unsigned long pgprot_t;
125 124
126#endif /* KERNEL */ 125#endif /* KERNEL */
127 126
127#include <asm-generic/memory_model.h>
128#include <asm-generic/page.h> 128#include <asm-generic/page.h>
129 129
130#endif /* __V850_PAGE_H__ */ 130#endif /* __V850_PAGE_H__ */
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
index eb4df23e1e41..79212128d0f7 100644
--- a/include/asm-x86_64/bitops.h
+++ b/include/asm-x86_64/bitops.h
@@ -356,14 +356,7 @@ static __inline__ unsigned long __fls(unsigned long word)
356 356
357#ifdef __KERNEL__ 357#ifdef __KERNEL__
358 358
359static inline int sched_find_first_bit(const unsigned long *b) 359#include <asm-generic/bitops/sched.h>
360{
361 if (b[0])
362 return __ffs(b[0]);
363 if (b[1])
364 return __ffs(b[1]) + 64;
365 return __ffs(b[2]) + 128;
366}
367 360
368/** 361/**
369 * ffs - find first bit set 362 * ffs - find first bit set
@@ -412,43 +405,20 @@ static __inline__ int fls(int x)
412 return r+1; 405 return r+1;
413} 406}
414 407
415/** 408#include <asm-generic/bitops/hweight.h>
416 * hweightN - returns the hamming weight of a N-bit word
417 * @x: the word to weigh
418 *
419 * The Hamming Weight of a number is the total number of bits set in it.
420 */
421
422#define hweight64(x) generic_hweight64(x)
423#define hweight32(x) generic_hweight32(x)
424#define hweight16(x) generic_hweight16(x)
425#define hweight8(x) generic_hweight8(x)
426 409
427#endif /* __KERNEL__ */ 410#endif /* __KERNEL__ */
428 411
429#ifdef __KERNEL__ 412#ifdef __KERNEL__
430 413
431#define ext2_set_bit(nr,addr) \ 414#include <asm-generic/bitops/ext2-non-atomic.h>
432 __test_and_set_bit((nr),(unsigned long*)addr) 415
433#define ext2_set_bit_atomic(lock,nr,addr) \ 416#define ext2_set_bit_atomic(lock,nr,addr) \
434 test_and_set_bit((nr),(unsigned long*)addr) 417 test_and_set_bit((nr),(unsigned long*)addr)
435#define ext2_clear_bit(nr, addr) \
436 __test_and_clear_bit((nr),(unsigned long*)addr)
437#define ext2_clear_bit_atomic(lock,nr,addr) \ 418#define ext2_clear_bit_atomic(lock,nr,addr) \
438 test_and_clear_bit((nr),(unsigned long*)addr) 419 test_and_clear_bit((nr),(unsigned long*)addr)
439#define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) 420
440#define ext2_find_first_zero_bit(addr, size) \ 421#include <asm-generic/bitops/minix.h>
441 find_first_zero_bit((unsigned long*)addr, size)
442#define ext2_find_next_zero_bit(addr, size, off) \
443 find_next_zero_bit((unsigned long*)addr, size, off)
444
445/* Bitmap functions for the minix filesystem. */
446#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr)
447#define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr)
448#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr)
449#define minix_test_bit(nr,addr) test_bit(nr,(void*)addr)
450#define minix_find_first_zero_bit(addr,size) \
451 find_first_zero_bit((void*)addr,size)
452 422
453#endif /* __KERNEL__ */ 423#endif /* __KERNEL__ */
454 424
diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h
index 8602c09bf89e..9804bf07b092 100644
--- a/include/asm-x86_64/futex.h
+++ b/include/asm-x86_64/futex.h
@@ -94,5 +94,32 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
94 return ret; 94 return ret;
95} 95}
96 96
97static inline int
98futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
99{
100 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
101 return -EFAULT;
102
103 __asm__ __volatile__(
104 "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n"
105
106 "2: .section .fixup, \"ax\" \n"
107 "3: mov %2, %0 \n"
108 " jmp 2b \n"
109 " .previous \n"
110
111 " .section __ex_table, \"a\" \n"
112 " .align 8 \n"
113 " .quad 1b,3b \n"
114 " .previous \n"
115
116 : "=a" (oldval), "=m" (*uaddr)
117 : "i" (-EFAULT), "r" (newval), "0" (oldval)
118 : "memory"
119 );
120
121 return oldval;
122}
123
97#endif 124#endif
98#endif 125#endif
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index b9ed4c0c8783..cf795631d9b4 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -5,21 +5,20 @@
5 5
6struct pt_regs; 6struct pt_regs;
7 7
8struct die_args { 8struct die_args {
9 struct pt_regs *regs; 9 struct pt_regs *regs;
10 const char *str; 10 const char *str;
11 long err; 11 long err;
12 int trapnr; 12 int trapnr;
13 int signr; 13 int signr;
14}; 14};
15
16extern int register_die_notifier(struct notifier_block *);
17extern int unregister_die_notifier(struct notifier_block *);
18extern struct atomic_notifier_head die_chain;
15 19
16/* Note - you should never unregister because that can race with NMIs.
17 If you really want to do it first unregister - then synchronize_sched - then free.
18 */
19int register_die_notifier(struct notifier_block *nb);
20extern struct notifier_block *die_chain;
21/* Grossly misnamed. */ 20/* Grossly misnamed. */
22enum die_val { 21enum die_val {
23 DIE_OOPS = 1, 22 DIE_OOPS = 1,
24 DIE_INT3, 23 DIE_INT3,
25 DIE_DEBUG, 24 DIE_DEBUG,
@@ -33,8 +32,8 @@ enum die_val {
33 DIE_CALL, 32 DIE_CALL,
34 DIE_NMI_IPI, 33 DIE_NMI_IPI,
35 DIE_PAGE_FAULT, 34 DIE_PAGE_FAULT,
36}; 35};
37 36
38static inline int notify_die(enum die_val val, const char *str, 37static inline int notify_die(enum die_val val, const char *str,
39 struct pt_regs *regs, long err, int trap, int sig) 38 struct pt_regs *regs, long err, int trap, int sig)
40{ 39{
@@ -45,7 +44,7 @@ static inline int notify_die(enum die_val val, const char *str,
45 .trapnr = trap, 44 .trapnr = trap,
46 .signr = sig 45 .signr = sig
47 }; 46 };
48 return notifier_call_chain(&die_chain, val, &args); 47 return atomic_notifier_call_chain(&die_chain, val, &args);
49} 48}
50 49
51extern int printk_address(unsigned long address); 50extern int printk_address(unsigned long address);
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index 937f99b26883..6b18cd8f293d 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -44,12 +44,8 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
44#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) 44#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
45#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) 45#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr))
46 46
47extern struct page *pfn_to_page(unsigned long pfn);
48extern unsigned long page_to_pfn(struct page *page);
49extern int pfn_valid(unsigned long pfn); 47extern int pfn_valid(unsigned long pfn);
50#endif 48#endif
51 49
52#define local_mapnr(kvaddr) \
53 ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) )
54#endif 50#endif
55#endif 51#endif
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index 615e3e494929..408185bac351 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -123,8 +123,6 @@ typedef struct { unsigned long pgprot; } pgprot_t;
123#define __boot_va(x) __va(x) 123#define __boot_va(x) __va(x)
124#define __boot_pa(x) __pa(x) 124#define __boot_pa(x) __pa(x)
125#ifdef CONFIG_FLATMEM 125#ifdef CONFIG_FLATMEM
126#define pfn_to_page(pfn) (mem_map + (pfn))
127#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
128#define pfn_valid(pfn) ((pfn) < end_pfn) 126#define pfn_valid(pfn) ((pfn) < end_pfn)
129#endif 127#endif
130 128
@@ -140,6 +138,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
140 138
141#endif /* __KERNEL__ */ 139#endif /* __KERNEL__ */
142 140
141#include <asm-generic/memory_model.h>
143#include <asm-generic/page.h> 142#include <asm-generic/page.h>
144 143
145#endif /* _X86_64_PAGE_H */ 144#endif /* _X86_64_PAGE_H */
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 8c8d88c036ed..37a3ec433ee5 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -20,6 +20,7 @@
20#include <asm/mmsegment.h> 20#include <asm/mmsegment.h>
21#include <asm/percpu.h> 21#include <asm/percpu.h>
22#include <linux/personality.h> 22#include <linux/personality.h>
23#include <linux/cpumask.h>
23 24
24#define TF_MASK 0x00000100 25#define TF_MASK 0x00000100
25#define IF_MASK 0x00000200 26#define IF_MASK 0x00000200
@@ -65,6 +66,9 @@ struct cpuinfo_x86 {
65 __u32 x86_power; 66 __u32 x86_power;
66 __u32 extended_cpuid_level; /* Max extended CPUID function supported */ 67 __u32 extended_cpuid_level; /* Max extended CPUID function supported */
67 unsigned long loops_per_jiffy; 68 unsigned long loops_per_jiffy;
69#ifdef CONFIG_SMP
70 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
71#endif
68 __u8 apicid; 72 __u8 apicid;
69 __u8 booted_cores; /* number of cores as seen by OS */ 73 __u8 booted_cores; /* number of cores as seen by OS */
70} ____cacheline_aligned; 74} ____cacheline_aligned;
@@ -354,9 +358,6 @@ struct extended_sigtable {
354 struct extended_signature sigs[0]; 358 struct extended_signature sigs[0];
355}; 359};
356 360
357/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
358#define MICROCODE_IOCFREE _IO('6',0)
359
360 361
361#define ASM_NOP1 K8_NOP1 362#define ASM_NOP1 K8_NOP1
362#define ASM_NOP2 K8_NOP2 363#define ASM_NOP2 K8_NOP2
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index 9ccbb2cfd5c0..a4fdaeb5c397 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -56,6 +56,7 @@ extern cpumask_t cpu_sibling_map[NR_CPUS];
56extern cpumask_t cpu_core_map[NR_CPUS]; 56extern cpumask_t cpu_core_map[NR_CPUS];
57extern u8 phys_proc_id[NR_CPUS]; 57extern u8 phys_proc_id[NR_CPUS];
58extern u8 cpu_core_id[NR_CPUS]; 58extern u8 cpu_core_id[NR_CPUS];
59extern u8 cpu_llc_id[NR_CPUS];
59 60
60#define SMP_TRAMPOLINE_BASE 0x6000 61#define SMP_TRAMPOLINE_BASE 0x6000
61 62
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index c642f5d9882d..9db54e9d17bb 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -68,4 +68,6 @@ extern int __node_distance(int, int);
68 68
69#include <asm-generic/topology.h> 69#include <asm-generic/topology.h>
70 70
71extern cpumask_t cpu_coregroup_map(int cpu);
72
71#endif 73#endif
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index da0341c57949..fcc516353087 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -605,8 +605,12 @@ __SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */
605__SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */ 605__SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */
606#define __NR_unshare 272 606#define __NR_unshare 272
607__SYSCALL(__NR_unshare, sys_unshare) 607__SYSCALL(__NR_unshare, sys_unshare)
608#define __NR_set_robust_list 273
609__SYSCALL(__NR_set_robust_list, sys_set_robust_list)
610#define __NR_get_robust_list 274
611__SYSCALL(__NR_get_robust_list, sys_get_robust_list)
608 612
609#define __NR_syscall_max __NR_unshare 613#define __NR_syscall_max __NR_get_robust_list
610 614
611#ifndef __NO_STUBS 615#ifndef __NO_STUBS
612 616
diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h
index 0a2065f1a372..d815649617aa 100644
--- a/include/asm-xtensa/bitops.h
+++ b/include/asm-xtensa/bitops.h
@@ -23,156 +23,11 @@
23# error SMP not supported on this architecture 23# error SMP not supported on this architecture
24#endif 24#endif
25 25
26static __inline__ void set_bit(int nr, volatile void * addr)
27{
28 unsigned long mask = 1 << (nr & 0x1f);
29 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
30 unsigned long flags;
31
32 local_irq_save(flags);
33 *a |= mask;
34 local_irq_restore(flags);
35}
36
37static __inline__ void __set_bit(int nr, volatile unsigned long * addr)
38{
39 unsigned long mask = 1 << (nr & 0x1f);
40 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
41
42 *a |= mask;
43}
44
45static __inline__ void clear_bit(int nr, volatile void * addr)
46{
47 unsigned long mask = 1 << (nr & 0x1f);
48 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
49 unsigned long flags;
50
51 local_irq_save(flags);
52 *a &= ~mask;
53 local_irq_restore(flags);
54}
55
56static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
57{
58 unsigned long mask = 1 << (nr & 0x1f);
59 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
60
61 *a &= ~mask;
62}
63
64/*
65 * clear_bit() doesn't provide any barrier for the compiler.
66 */
67
68#define smp_mb__before_clear_bit() barrier() 26#define smp_mb__before_clear_bit() barrier()
69#define smp_mb__after_clear_bit() barrier() 27#define smp_mb__after_clear_bit() barrier()
70 28
71static __inline__ void change_bit(int nr, volatile void * addr) 29#include <asm-generic/bitops/atomic.h>
72{ 30#include <asm-generic/bitops/non-atomic.h>
73 unsigned long mask = 1 << (nr & 0x1f);
74 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
75 unsigned long flags;
76
77 local_irq_save(flags);
78 *a ^= mask;
79 local_irq_restore(flags);
80}
81
82static __inline__ void __change_bit(int nr, volatile void * addr)
83{
84 unsigned long mask = 1 << (nr & 0x1f);
85 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
86
87 *a ^= mask;
88}
89
90static __inline__ int test_and_set_bit(int nr, volatile void * addr)
91{
92 unsigned long retval;
93 unsigned long mask = 1 << (nr & 0x1f);
94 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
95 unsigned long flags;
96
97 local_irq_save(flags);
98 retval = (mask & *a) != 0;
99 *a |= mask;
100 local_irq_restore(flags);
101
102 return retval;
103}
104
105static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
106{
107 unsigned long retval;
108 unsigned long mask = 1 << (nr & 0x1f);
109 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
110
111 retval = (mask & *a) != 0;
112 *a |= mask;
113
114 return retval;
115}
116
117static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
118{
119 unsigned long retval;
120 unsigned long mask = 1 << (nr & 0x1f);
121 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
122 unsigned long flags;
123
124 local_irq_save(flags);
125 retval = (mask & *a) != 0;
126 *a &= ~mask;
127 local_irq_restore(flags);
128
129 return retval;
130}
131
132static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
133{
134 unsigned long mask = 1 << (nr & 0x1f);
135 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
136 unsigned long old = *a;
137
138 *a = old & ~mask;
139 return (old & mask) != 0;
140}
141
142static __inline__ int test_and_change_bit(int nr, volatile void * addr)
143{
144 unsigned long retval;
145 unsigned long mask = 1 << (nr & 0x1f);
146 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
147 unsigned long flags;
148
149 local_irq_save(flags);
150
151 retval = (mask & *a) != 0;
152 *a ^= mask;
153 local_irq_restore(flags);
154
155 return retval;
156}
157
158/*
159 * non-atomic version; can be reordered
160 */
161
162static __inline__ int __test_and_change_bit(int nr, volatile void *addr)
163{
164 unsigned long mask = 1 << (nr & 0x1f);
165 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
166 unsigned long old = *a;
167
168 *a = old ^ mask;
169 return (old & mask) != 0;
170}
171
172static __inline__ int test_bit(int nr, const volatile void *addr)
173{
174 return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31));
175}
176 31
177#if XCHAL_HAVE_NSA 32#if XCHAL_HAVE_NSA
178 33
@@ -245,202 +100,23 @@ static __inline__ int fls (unsigned int x)
245{ 100{
246 return __cntlz(x); 101 return __cntlz(x);
247} 102}
248#define fls64(x) generic_fls64(x) 103#include <asm-generic/bitops/fls64.h>
249 104#include <asm-generic/bitops/find.h>
250static __inline__ int 105#include <asm-generic/bitops/ext2-non-atomic.h>
251find_next_bit(const unsigned long *addr, int size, int offset)
252{
253 const unsigned long *p = addr + (offset >> 5);
254 unsigned long result = offset & ~31UL;
255 unsigned long tmp;
256
257 if (offset >= size)
258 return size;
259 size -= result;
260 offset &= 31UL;
261 if (offset) {
262 tmp = *p++;
263 tmp &= ~0UL << offset;
264 if (size < 32)
265 goto found_first;
266 if (tmp)
267 goto found_middle;
268 size -= 32;
269 result += 32;
270 }
271 while (size >= 32) {
272 if ((tmp = *p++) != 0)
273 goto found_middle;
274 result += 32;
275 size -= 32;
276 }
277 if (!size)
278 return result;
279 tmp = *p;
280
281found_first:
282 tmp &= ~0UL >> (32 - size);
283 if (tmp == 0UL) /* Are any bits set? */
284 return result + size; /* Nope. */
285found_middle:
286 return result + __ffs(tmp);
287}
288
289/**
290 * find_first_bit - find the first set bit in a memory region
291 * @addr: The address to start the search at
292 * @size: The maximum size to search
293 *
294 * Returns the bit-number of the first set bit, not the number of the byte
295 * containing a bit.
296 */
297
298#define find_first_bit(addr, size) \
299 find_next_bit((addr), (size), 0)
300
301static __inline__ int
302find_next_zero_bit(const unsigned long *addr, int size, int offset)
303{
304 const unsigned long *p = addr + (offset >> 5);
305 unsigned long result = offset & ~31UL;
306 unsigned long tmp;
307
308 if (offset >= size)
309 return size;
310 size -= result;
311 offset &= 31UL;
312 if (offset) {
313 tmp = *p++;
314 tmp |= ~0UL >> (32-offset);
315 if (size < 32)
316 goto found_first;
317 if (~tmp)
318 goto found_middle;
319 size -= 32;
320 result += 32;
321 }
322 while (size & ~31UL) {
323 if (~(tmp = *p++))
324 goto found_middle;
325 result += 32;
326 size -= 32;
327 }
328 if (!size)
329 return result;
330 tmp = *p;
331
332found_first:
333 tmp |= ~0UL << size;
334found_middle:
335 return result + ffz(tmp);
336}
337
338#define find_first_zero_bit(addr, size) \
339 find_next_zero_bit((addr), (size), 0)
340 106
341#ifdef __XTENSA_EL__ 107#ifdef __XTENSA_EL__
342# define ext2_set_bit(nr,addr) __test_and_set_bit((nr), (addr))
343# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr)) 108# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr))
344# define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr), (addr))
345# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr)) 109# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr))
346# define ext2_test_bit(nr,addr) test_bit((nr), (addr))
347# define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr),(size))
348# define ext2_find_next_zero_bit(addr, size, offset) \
349 find_next_zero_bit((addr), (size), (offset))
350#elif defined(__XTENSA_EB__) 110#elif defined(__XTENSA_EB__)
351# define ext2_set_bit(nr,addr) __test_and_set_bit((nr) ^ 0x18, (addr))
352# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr)) 111# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr))
353# define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr) ^ 18, (addr))
354# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr)) 112# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr))
355# define ext2_test_bit(nr,addr) test_bit((nr) ^ 0x18, (addr))
356# define ext2_find_first_zero_bit(addr, size) \
357 ext2_find_next_zero_bit((addr), (size), 0)
358
359static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
360{
361 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
362 unsigned long result = offset & ~31UL;
363 unsigned long tmp;
364
365 if (offset >= size)
366 return size;
367 size -= result;
368 offset &= 31UL;
369 if(offset) {
370 /* We hold the little endian value in tmp, but then the
371 * shift is illegal. So we could keep a big endian value
372 * in tmp, like this:
373 *
374 * tmp = __swab32(*(p++));
375 * tmp |= ~0UL >> (32-offset);
376 *
377 * but this would decrease preformance, so we change the
378 * shift:
379 */
380 tmp = *(p++);
381 tmp |= __swab32(~0UL >> (32-offset));
382 if(size < 32)
383 goto found_first;
384 if(~tmp)
385 goto found_middle;
386 size -= 32;
387 result += 32;
388 }
389 while(size & ~31UL) {
390 if(~(tmp = *(p++)))
391 goto found_middle;
392 result += 32;
393 size -= 32;
394 }
395 if(!size)
396 return result;
397 tmp = *p;
398
399found_first:
400 /* tmp is little endian, so we would have to swab the shift,
401 * see above. But then we have to swab tmp below for ffz, so
402 * we might as well do this here.
403 */
404 return result + ffz(__swab32(tmp) | (~0UL << size));
405found_middle:
406 return result + ffz(__swab32(tmp));
407}
408
409#else 113#else
410# error processor byte order undefined! 114# error processor byte order undefined!
411#endif 115#endif
412 116
413 117#include <asm-generic/bitops/hweight.h>
414#define hweight32(x) generic_hweight32(x) 118#include <asm-generic/bitops/sched.h>
415#define hweight16(x) generic_hweight16(x) 119#include <asm-generic/bitops/minix.h>
416#define hweight8(x) generic_hweight8(x)
417
418/*
419 * Find the first bit set in a 140-bit bitmap.
420 * The first 100 bits are unlikely to be set.
421 */
422
423static inline int sched_find_first_bit(const unsigned long *b)
424{
425 if (unlikely(b[0]))
426 return __ffs(b[0]);
427 if (unlikely(b[1]))
428 return __ffs(b[1]) + 32;
429 if (unlikely(b[2]))
430 return __ffs(b[2]) + 64;
431 if (b[3])
432 return __ffs(b[3]) + 96;
433 return __ffs(b[4]) + 128;
434}
435
436
437/* Bitmap functions for the minix filesystem. */
438
439#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
440#define minix_set_bit(nr,addr) set_bit(nr,addr)
441#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
442#define minix_test_bit(nr,addr) test_bit(nr,addr)
443#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
444 120
445#endif /* __KERNEL__ */ 121#endif /* __KERNEL__ */
446 122
diff --git a/include/asm-xtensa/page.h b/include/asm-xtensa/page.h
index 8ded36f255a2..992bac5c1258 100644
--- a/include/asm-xtensa/page.h
+++ b/include/asm-xtensa/page.h
@@ -109,10 +109,7 @@ void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page);
109#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) 109#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
110#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) 110#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
111#define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr) 111#define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr)
112#ifndef CONFIG_DISCONTIGMEM 112#ifdef CONFIG_DISCONTIGMEM
113# define pfn_to_page(pfn) (mem_map + (pfn))
114# define page_to_pfn(page) ((unsigned long)((page) - mem_map))
115#else
116# error CONFIG_DISCONTIGMEM not supported 113# error CONFIG_DISCONTIGMEM not supported
117#endif 114#endif
118 115
@@ -130,4 +127,5 @@ void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page);
130 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 127 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
131 128
132#endif /* __KERNEL__ */ 129#endif /* __KERNEL__ */
130#include <asm-generic/memory_model.h>
133#endif /* _XTENSA_PAGE_H */ 131#endif /* _XTENSA_PAGE_H */
diff --git a/include/linux/adb.h b/include/linux/adb.h
index e9fdc63483c7..b7305b178279 100644
--- a/include/linux/adb.h
+++ b/include/linux/adb.h
@@ -85,7 +85,7 @@ enum adb_message {
85 ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */ 85 ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */
86}; 86};
87extern struct adb_driver *adb_controller; 87extern struct adb_driver *adb_controller;
88extern struct notifier_block *adb_client_list; 88extern struct blocking_notifier_head adb_client_list;
89 89
90int adb_request(struct adb_request *req, void (*done)(struct adb_request *), 90int adb_request(struct adb_request *req, void (*done)(struct adb_request *),
91 int flags, int nbytes, ...); 91 int flags, int nbytes, ...);
diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h
index 9343c89d843c..0a6bc52ffe88 100644
--- a/include/linux/auto_fs4.h
+++ b/include/linux/auto_fs4.h
@@ -19,18 +19,37 @@
19#undef AUTOFS_MIN_PROTO_VERSION 19#undef AUTOFS_MIN_PROTO_VERSION
20#undef AUTOFS_MAX_PROTO_VERSION 20#undef AUTOFS_MAX_PROTO_VERSION
21 21
22#define AUTOFS_PROTO_VERSION 4 22#define AUTOFS_PROTO_VERSION 5
23#define AUTOFS_MIN_PROTO_VERSION 3 23#define AUTOFS_MIN_PROTO_VERSION 3
24#define AUTOFS_MAX_PROTO_VERSION 4 24#define AUTOFS_MAX_PROTO_VERSION 5
25 25
26#define AUTOFS_PROTO_SUBVERSION 7 26#define AUTOFS_PROTO_SUBVERSION 0
27 27
28/* Mask for expire behaviour */ 28/* Mask for expire behaviour */
29#define AUTOFS_EXP_IMMEDIATE 1 29#define AUTOFS_EXP_IMMEDIATE 1
30#define AUTOFS_EXP_LEAVES 2 30#define AUTOFS_EXP_LEAVES 2
31 31
32/* New message type */ 32/* Daemon notification packet types */
33#define autofs_ptype_expire_multi 2 /* Expire entry (umount request) */ 33enum autofs_notify {
34 NFY_NONE,
35 NFY_MOUNT,
36 NFY_EXPIRE
37};
38
39/* Kernel protocol version 4 packet types */
40
41/* Expire entry (umount request) */
42#define autofs_ptype_expire_multi 2
43
44/* Kernel protocol version 5 packet types */
45
46/* Indirect mount missing and expire requests. */
47#define autofs_ptype_missing_indirect 3
48#define autofs_ptype_expire_indirect 4
49
50/* Direct mount missing and expire requests */
51#define autofs_ptype_missing_direct 5
52#define autofs_ptype_expire_direct 6
34 53
35/* v4 multi expire (via pipe) */ 54/* v4 multi expire (via pipe) */
36struct autofs_packet_expire_multi { 55struct autofs_packet_expire_multi {
@@ -40,14 +59,36 @@ struct autofs_packet_expire_multi {
40 char name[NAME_MAX+1]; 59 char name[NAME_MAX+1];
41}; 60};
42 61
62/* autofs v5 common packet struct */
63struct autofs_v5_packet {
64 struct autofs_packet_hdr hdr;
65 autofs_wqt_t wait_queue_token;
66 __u32 dev;
67 __u64 ino;
68 __u32 uid;
69 __u32 gid;
70 __u32 pid;
71 __u32 tgid;
72 __u32 len;
73 char name[NAME_MAX+1];
74};
75
76typedef struct autofs_v5_packet autofs_packet_missing_indirect_t;
77typedef struct autofs_v5_packet autofs_packet_expire_indirect_t;
78typedef struct autofs_v5_packet autofs_packet_missing_direct_t;
79typedef struct autofs_v5_packet autofs_packet_expire_direct_t;
80
43union autofs_packet_union { 81union autofs_packet_union {
44 struct autofs_packet_hdr hdr; 82 struct autofs_packet_hdr hdr;
45 struct autofs_packet_missing missing; 83 struct autofs_packet_missing missing;
46 struct autofs_packet_expire expire; 84 struct autofs_packet_expire expire;
47 struct autofs_packet_expire_multi expire_multi; 85 struct autofs_packet_expire_multi expire_multi;
86 struct autofs_v5_packet v5_packet;
48}; 87};
49 88
50#define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93,0x66,int) 89#define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93,0x66,int)
90#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI
91#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI
51#define AUTOFS_IOC_PROTOSUBVER _IOR(0x93,0x67,int) 92#define AUTOFS_IOC_PROTOSUBVER _IOR(0x93,0x67,int)
52#define AUTOFS_IOC_ASKREGHOST _IOR(0x93,0x68,int) 93#define AUTOFS_IOC_ASKREGHOST _IOR(0x93,0x68,int)
53#define AUTOFS_IOC_TOGGLEREGHOST _IOR(0x93,0x69,int) 94#define AUTOFS_IOC_TOGGLEREGHOST _IOR(0x93,0x69,int)
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index f17525a963d1..5d1eabcde5d5 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -3,88 +3,11 @@
3#include <asm/types.h> 3#include <asm/types.h>
4 4
5/* 5/*
6 * ffs: find first bit set. This is defined the same way as
7 * the libc and compiler builtin ffs routines, therefore
8 * differs in spirit from the above ffz (man ffs).
9 */
10
11static inline int generic_ffs(int x)
12{
13 int r = 1;
14
15 if (!x)
16 return 0;
17 if (!(x & 0xffff)) {
18 x >>= 16;
19 r += 16;
20 }
21 if (!(x & 0xff)) {
22 x >>= 8;
23 r += 8;
24 }
25 if (!(x & 0xf)) {
26 x >>= 4;
27 r += 4;
28 }
29 if (!(x & 3)) {
30 x >>= 2;
31 r += 2;
32 }
33 if (!(x & 1)) {
34 x >>= 1;
35 r += 1;
36 }
37 return r;
38}
39
40/*
41 * fls: find last bit set.
42 */
43
44static __inline__ int generic_fls(int x)
45{
46 int r = 32;
47
48 if (!x)
49 return 0;
50 if (!(x & 0xffff0000u)) {
51 x <<= 16;
52 r -= 16;
53 }
54 if (!(x & 0xff000000u)) {
55 x <<= 8;
56 r -= 8;
57 }
58 if (!(x & 0xf0000000u)) {
59 x <<= 4;
60 r -= 4;
61 }
62 if (!(x & 0xc0000000u)) {
63 x <<= 2;
64 r -= 2;
65 }
66 if (!(x & 0x80000000u)) {
67 x <<= 1;
68 r -= 1;
69 }
70 return r;
71}
72
73/*
74 * Include this here because some architectures need generic_ffs/fls in 6 * Include this here because some architectures need generic_ffs/fls in
75 * scope 7 * scope
76 */ 8 */
77#include <asm/bitops.h> 9#include <asm/bitops.h>
78 10
79
80static inline int generic_fls64(__u64 x)
81{
82 __u32 h = x >> 32;
83 if (h)
84 return fls(h) + 32;
85 return fls(x);
86}
87
88static __inline__ int get_bitmask_order(unsigned int count) 11static __inline__ int get_bitmask_order(unsigned int count)
89{ 12{
90 int order; 13 int order;
@@ -103,54 +26,9 @@ static __inline__ int get_count_order(unsigned int count)
103 return order; 26 return order;
104} 27}
105 28
106/*
107 * hweightN: returns the hamming weight (i.e. the number
108 * of bits set) of a N-bit word
109 */
110
111static inline unsigned int generic_hweight32(unsigned int w)
112{
113 unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555);
114 res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
115 res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F);
116 res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF);
117 return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF);
118}
119
120static inline unsigned int generic_hweight16(unsigned int w)
121{
122 unsigned int res = (w & 0x5555) + ((w >> 1) & 0x5555);
123 res = (res & 0x3333) + ((res >> 2) & 0x3333);
124 res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F);
125 return (res & 0x00FF) + ((res >> 8) & 0x00FF);
126}
127
128static inline unsigned int generic_hweight8(unsigned int w)
129{
130 unsigned int res = (w & 0x55) + ((w >> 1) & 0x55);
131 res = (res & 0x33) + ((res >> 2) & 0x33);
132 return (res & 0x0F) + ((res >> 4) & 0x0F);
133}
134
135static inline unsigned long generic_hweight64(__u64 w)
136{
137#if BITS_PER_LONG < 64
138 return generic_hweight32((unsigned int)(w >> 32)) +
139 generic_hweight32((unsigned int)w);
140#else
141 u64 res;
142 res = (w & 0x5555555555555555ul) + ((w >> 1) & 0x5555555555555555ul);
143 res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
144 res = (res & 0x0F0F0F0F0F0F0F0Ful) + ((res >> 4) & 0x0F0F0F0F0F0F0F0Ful);
145 res = (res & 0x00FF00FF00FF00FFul) + ((res >> 8) & 0x00FF00FF00FF00FFul);
146 res = (res & 0x0000FFFF0000FFFFul) + ((res >> 16) & 0x0000FFFF0000FFFFul);
147 return (res & 0x00000000FFFFFFFFul) + ((res >> 32) & 0x00000000FFFFFFFFul);
148#endif
149}
150
151static inline unsigned long hweight_long(unsigned long w) 29static inline unsigned long hweight_long(unsigned long w)
152{ 30{
153 return sizeof(w) == 4 ? generic_hweight32(w) : generic_hweight64(w); 31 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
154} 32}
155 33
156/* 34/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c179966f1a2f..d0cac8b58de7 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -55,25 +55,29 @@ struct as_io_context {
55 55
56struct cfq_queue; 56struct cfq_queue;
57struct cfq_io_context { 57struct cfq_io_context {
58 /* 58 struct rb_node rb_node;
59 * circular list of cfq_io_contexts belonging to a process io context
60 */
61 struct list_head list;
62 struct cfq_queue *cfqq[2];
63 void *key; 59 void *key;
64 60
61 struct cfq_queue *cfqq[2];
62
65 struct io_context *ioc; 63 struct io_context *ioc;
66 64
67 unsigned long last_end_request; 65 unsigned long last_end_request;
68 unsigned long last_queue; 66 sector_t last_request_pos;
67 unsigned long last_queue;
68
69 unsigned long ttime_total; 69 unsigned long ttime_total;
70 unsigned long ttime_samples; 70 unsigned long ttime_samples;
71 unsigned long ttime_mean; 71 unsigned long ttime_mean;
72 72
73 unsigned int seek_samples;
74 u64 seek_total;
75 sector_t seek_mean;
76
73 struct list_head queue_list; 77 struct list_head queue_list;
74 78
75 void (*dtor)(struct cfq_io_context *); 79 void (*dtor)(struct io_context *); /* destructor */
76 void (*exit)(struct cfq_io_context *); 80 void (*exit)(struct io_context *); /* called on task exit */
77}; 81};
78 82
79/* 83/*
@@ -94,7 +98,7 @@ struct io_context {
94 int nr_batch_requests; /* Number of requests left in the batch */ 98 int nr_batch_requests; /* Number of requests left in the batch */
95 99
96 struct as_io_context *aic; 100 struct as_io_context *aic;
97 struct cfq_io_context *cic; 101 struct rb_root cic_root;
98}; 102};
99 103
100void put_io_context(struct io_context *ioc); 104void put_io_context(struct io_context *ioc);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 7155452fb4a8..de3eb8d8ae26 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -38,6 +38,7 @@ typedef struct bootmem_data {
38 unsigned long last_pos; 38 unsigned long last_pos;
39 unsigned long last_success; /* Previous allocation point. To speed 39 unsigned long last_success; /* Previous allocation point. To speed
40 * up searching */ 40 * up searching */
41 struct list_head list;
41} bootmem_data_t; 42} bootmem_data_t;
42 43
43extern unsigned long __init bootmem_bootmap_pages (unsigned long); 44extern unsigned long __init bootmem_bootmap_pages (unsigned long);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 9f159baf153f..fb7e9b7ccbe3 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -46,25 +46,28 @@ struct address_space;
46typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); 46typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
47 47
48/* 48/*
49 * Keep related fields in common cachelines. The most commonly accessed 49 * Historically, a buffer_head was used to map a single block
50 * field (b_state) goes at the start so the compiler does not generate 50 * within a page, and of course as the unit of I/O through the
51 * indexed addressing for it. 51 * filesystem and block layers. Nowadays the basic I/O unit
52 * is the bio, and buffer_heads are used for extracting block
53 * mappings (via a get_block_t call), for tracking state within
54 * a page (via a page_mapping) and for wrapping bio submission
55 * for backward compatibility reasons (e.g. submit_bh).
52 */ 56 */
53struct buffer_head { 57struct buffer_head {
54 /* First cache line: */
55 unsigned long b_state; /* buffer state bitmap (see above) */ 58 unsigned long b_state; /* buffer state bitmap (see above) */
56 struct buffer_head *b_this_page;/* circular list of page's buffers */ 59 struct buffer_head *b_this_page;/* circular list of page's buffers */
57 struct page *b_page; /* the page this bh is mapped to */ 60 struct page *b_page; /* the page this bh is mapped to */
58 atomic_t b_count; /* users using this block */
59 u32 b_size; /* block size */
60 61
61 sector_t b_blocknr; /* block number */ 62 sector_t b_blocknr; /* start block number */
62 char *b_data; /* pointer to data block */ 63 size_t b_size; /* size of mapping */
64 char *b_data; /* pointer to data within the page */
63 65
64 struct block_device *b_bdev; 66 struct block_device *b_bdev;
65 bh_end_io_t *b_end_io; /* I/O completion */ 67 bh_end_io_t *b_end_io; /* I/O completion */
66 void *b_private; /* reserved for b_end_io */ 68 void *b_private; /* reserved for b_end_io */
67 struct list_head b_assoc_buffers; /* associated with another mapping */ 69 struct list_head b_assoc_buffers; /* associated with another mapping */
70 atomic_t b_count; /* users using this buffer_head */
68}; 71};
69 72
70/* 73/*
@@ -189,8 +192,8 @@ extern int buffer_heads_over_limit;
189 * address_spaces. 192 * address_spaces.
190 */ 193 */
191int try_to_release_page(struct page * page, gfp_t gfp_mask); 194int try_to_release_page(struct page * page, gfp_t gfp_mask);
192int block_invalidatepage(struct page *page, unsigned long offset); 195void block_invalidatepage(struct page *page, unsigned long offset);
193int do_invalidatepage(struct page *page, unsigned long offset); 196void do_invalidatepage(struct page *page, unsigned long offset);
194int block_write_full_page(struct page *page, get_block_t *get_block, 197int block_write_full_page(struct page *page, get_block_t *get_block,
195 struct writeback_control *wbc); 198 struct writeback_control *wbc);
196int block_read_full_page(struct page*, get_block_t*); 199int block_read_full_page(struct page*, get_block_t*);
@@ -200,7 +203,7 @@ int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*,
200int generic_cont_expand(struct inode *inode, loff_t size); 203int generic_cont_expand(struct inode *inode, loff_t size);
201int generic_cont_expand_simple(struct inode *inode, loff_t size); 204int generic_cont_expand_simple(struct inode *inode, loff_t size);
202int block_commit_write(struct page *page, unsigned from, unsigned to); 205int block_commit_write(struct page *page, unsigned from, unsigned to);
203int block_sync_page(struct page *); 206void block_sync_page(struct page *);
204sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); 207sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
205int generic_commit_write(struct file *, struct page *, unsigned, unsigned); 208int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
206int block_truncate_page(struct address_space *, loff_t, get_block_t *); 209int block_truncate_page(struct address_space *, loff_t, get_block_t *);
@@ -277,6 +280,7 @@ map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
277 set_buffer_mapped(bh); 280 set_buffer_mapped(bh);
278 bh->b_bdev = sb->s_bdev; 281 bh->b_bdev = sb->s_bdev;
279 bh->b_blocknr = block; 282 bh->b_blocknr = block;
283 bh->b_size = sb->s_blocksize;
280} 284}
281 285
282/* 286/*
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index 8da37e29cb87..2216638962d2 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -5,13 +5,13 @@
5struct cdev { 5struct cdev {
6 struct kobject kobj; 6 struct kobject kobj;
7 struct module *owner; 7 struct module *owner;
8 struct file_operations *ops; 8 const struct file_operations *ops;
9 struct list_head list; 9 struct list_head list;
10 dev_t dev; 10 dev_t dev;
11 unsigned int count; 11 unsigned int count;
12}; 12};
13 13
14void cdev_init(struct cdev *, struct file_operations *); 14void cdev_init(struct cdev *, const struct file_operations *);
15 15
16struct cdev *cdev_alloc(void); 16struct cdev *cdev_alloc(void);
17 17
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h
index cc621ec409d8..b3ecf8f71d97 100644
--- a/include/linux/coda_linux.h
+++ b/include/linux/coda_linux.h
@@ -30,9 +30,9 @@ extern struct inode_operations coda_ioctl_inode_operations;
30extern struct address_space_operations coda_file_aops; 30extern struct address_space_operations coda_file_aops;
31extern struct address_space_operations coda_symlink_aops; 31extern struct address_space_operations coda_symlink_aops;
32 32
33extern struct file_operations coda_dir_operations; 33extern const struct file_operations coda_dir_operations;
34extern struct file_operations coda_file_operations; 34extern const struct file_operations coda_file_operations;
35extern struct file_operations coda_ioctl_operations; 35extern const struct file_operations coda_ioctl_operations;
36 36
37/* operations shared over more than one file */ 37/* operations shared over more than one file */
38int coda_open(struct inode *i, struct file *f); 38int coda_open(struct inode *i, struct file *f);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index c9ab2a26348c..6d3a654be1ae 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -45,6 +45,32 @@ struct compat_tms {
45 compat_clock_t tms_cstime; 45 compat_clock_t tms_cstime;
46}; 46};
47 47
48struct compat_timex {
49 compat_uint_t modes;
50 compat_long_t offset;
51 compat_long_t freq;
52 compat_long_t maxerror;
53 compat_long_t esterror;
54 compat_int_t status;
55 compat_long_t constant;
56 compat_long_t precision;
57 compat_long_t tolerance;
58 struct compat_timeval time;
59 compat_long_t tick;
60 compat_long_t ppsfreq;
61 compat_long_t jitter;
62 compat_int_t shift;
63 compat_long_t stabil;
64 compat_long_t jitcnt;
65 compat_long_t calcnt;
66 compat_long_t errcnt;
67 compat_long_t stbcnt;
68
69 compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32;
70 compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32;
71 compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32;
72};
73
48#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) 74#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
49 75
50typedef struct { 76typedef struct {
@@ -121,6 +147,24 @@ typedef struct compat_sigevent {
121 } _sigev_un; 147 } _sigev_un;
122} compat_sigevent_t; 148} compat_sigevent_t;
123 149
150struct compat_robust_list {
151 compat_uptr_t next;
152};
153
154struct compat_robust_list_head {
155 struct compat_robust_list list;
156 compat_long_t futex_offset;
157 compat_uptr_t list_op_pending;
158};
159
160extern void compat_exit_robust_list(struct task_struct *curr);
161
162asmlinkage long
163compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
164 compat_size_t len);
165asmlinkage long
166compat_sys_get_robust_list(int pid, compat_uptr_t *head_ptr,
167 compat_size_t __user *len_ptr);
124 168
125long compat_sys_semctl(int first, int second, int third, void __user *uptr); 169long compat_sys_semctl(int first, int second, int third, void __user *uptr);
126long compat_sys_msgsnd(int first, int second, int third, void __user *uptr); 170long compat_sys_msgsnd(int first, int second, int third, void __user *uptr);
@@ -181,5 +225,7 @@ static inline int compat_timespec_compare(struct compat_timespec *lhs,
181 return lhs->tv_nsec - rhs->tv_nsec; 225 return lhs->tv_nsec - rhs->tv_nsec;
182} 226}
183 227
228asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
229
184#endif /* CONFIG_COMPAT */ 230#endif /* CONFIG_COMPAT */
185#endif /* _LINUX_COMPAT_H */ 231#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
index efb518f16bb3..89ab677cb993 100644
--- a/include/linux/compat_ioctl.h
+++ b/include/linux/compat_ioctl.h
@@ -140,6 +140,7 @@ COMPATIBLE_IOCTL(DM_TABLE_DEPS_32)
140COMPATIBLE_IOCTL(DM_TABLE_STATUS_32) 140COMPATIBLE_IOCTL(DM_TABLE_STATUS_32)
141COMPATIBLE_IOCTL(DM_LIST_VERSIONS_32) 141COMPATIBLE_IOCTL(DM_LIST_VERSIONS_32)
142COMPATIBLE_IOCTL(DM_TARGET_MSG_32) 142COMPATIBLE_IOCTL(DM_TARGET_MSG_32)
143COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY_32)
143COMPATIBLE_IOCTL(DM_VERSION) 144COMPATIBLE_IOCTL(DM_VERSION)
144COMPATIBLE_IOCTL(DM_REMOVE_ALL) 145COMPATIBLE_IOCTL(DM_REMOVE_ALL)
145COMPATIBLE_IOCTL(DM_LIST_DEVICES) 146COMPATIBLE_IOCTL(DM_LIST_DEVICES)
@@ -155,6 +156,7 @@ COMPATIBLE_IOCTL(DM_TABLE_DEPS)
155COMPATIBLE_IOCTL(DM_TABLE_STATUS) 156COMPATIBLE_IOCTL(DM_TABLE_STATUS)
156COMPATIBLE_IOCTL(DM_LIST_VERSIONS) 157COMPATIBLE_IOCTL(DM_LIST_VERSIONS)
157COMPATIBLE_IOCTL(DM_TARGET_MSG) 158COMPATIBLE_IOCTL(DM_TARGET_MSG)
159COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY)
158/* Big K */ 160/* Big K */
159COMPATIBLE_IOCTL(PIO_FONT) 161COMPATIBLE_IOCTL(PIO_FONT)
160COMPATIBLE_IOCTL(GIO_FONT) 162COMPATIBLE_IOCTL(GIO_FONT)
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 99e6115d8e52..9cbb781d6f80 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -67,7 +67,7 @@
67 * 67 *
68 * int any_online_cpu(mask) First online cpu in mask 68 * int any_online_cpu(mask) First online cpu in mask
69 * 69 *
70 * for_each_cpu(cpu) for-loop cpu over cpu_possible_map 70 * for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map
71 * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map 71 * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
72 * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map 72 * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
73 * 73 *
@@ -405,7 +405,8 @@ int __any_online_cpu(const cpumask_t *mask);
405#define any_online_cpu(mask) 0 405#define any_online_cpu(mask) 0
406#endif 406#endif
407 407
408#define for_each_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map) 408#define for_each_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
409#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
409#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) 410#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map)
410#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) 411#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
411 412
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 534d750d922d..32503657f14f 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -11,7 +11,7 @@
11extern unsigned long long elfcorehdr_addr; 11extern unsigned long long elfcorehdr_addr;
12extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, 12extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
13 unsigned long, int); 13 unsigned long, int);
14extern struct file_operations proc_vmcore_operations; 14extern const struct file_operations proc_vmcore_operations;
15extern struct proc_dir_entry *proc_vmcore; 15extern struct proc_dir_entry *proc_vmcore;
16 16
17#endif /* CONFIG_CRASH_DUMP */ 17#endif /* CONFIG_CRASH_DUMP */
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 4b0428e335be..176e2d371577 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -29,7 +29,7 @@ struct debugfs_blob_wrapper {
29#if defined(CONFIG_DEBUG_FS) 29#if defined(CONFIG_DEBUG_FS)
30struct dentry *debugfs_create_file(const char *name, mode_t mode, 30struct dentry *debugfs_create_file(const char *name, mode_t mode,
31 struct dentry *parent, void *data, 31 struct dentry *parent, void *data,
32 struct file_operations *fops); 32 const struct file_operations *fops);
33 33
34struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); 34struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
35 35
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 51e0e95a421a..aee10b2ea4c6 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -97,6 +97,7 @@ struct io_restrictions {
97 unsigned short hardsect_size; 97 unsigned short hardsect_size;
98 unsigned int max_segment_size; 98 unsigned int max_segment_size;
99 unsigned long seg_boundary_mask; 99 unsigned long seg_boundary_mask;
100 unsigned char no_cluster; /* inverted so that 0 is default */
100}; 101};
101 102
102struct dm_target { 103struct dm_target {
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index fa75ba0d635e..c67c6786612a 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -80,6 +80,16 @@
80 * 80 *
81 * DM_TARGET_MSG: 81 * DM_TARGET_MSG:
82 * Pass a message string to the target at a specific offset of a device. 82 * Pass a message string to the target at a specific offset of a device.
83 *
84 * DM_DEV_SET_GEOMETRY:
85 * Set the geometry of a device by passing in a string in this format:
86 *
87 * "cylinders heads sectors_per_track start_sector"
88 *
89 * Beware that CHS geometry is nearly obsolete and only provided
90 * for compatibility with dm devices that can be booted by a PC
91 * BIOS. See struct hd_geometry for range limits. Also note that
92 * the geometry is erased if the device size changes.
83 */ 93 */
84 94
85/* 95/*
@@ -218,6 +228,7 @@ enum {
218 /* Added later */ 228 /* Added later */
219 DM_LIST_VERSIONS_CMD, 229 DM_LIST_VERSIONS_CMD,
220 DM_TARGET_MSG_CMD, 230 DM_TARGET_MSG_CMD,
231 DM_DEV_SET_GEOMETRY_CMD
221}; 232};
222 233
223/* 234/*
@@ -247,6 +258,7 @@ typedef char ioctl_struct[308];
247#define DM_TABLE_STATUS_32 _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, ioctl_struct) 258#define DM_TABLE_STATUS_32 _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, ioctl_struct)
248#define DM_LIST_VERSIONS_32 _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, ioctl_struct) 259#define DM_LIST_VERSIONS_32 _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, ioctl_struct)
249#define DM_TARGET_MSG_32 _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, ioctl_struct) 260#define DM_TARGET_MSG_32 _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, ioctl_struct)
261#define DM_DEV_SET_GEOMETRY_32 _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, ioctl_struct)
250#endif 262#endif
251 263
252#define DM_IOCTL 0xfd 264#define DM_IOCTL 0xfd
@@ -270,11 +282,12 @@ typedef char ioctl_struct[308];
270#define DM_LIST_VERSIONS _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, struct dm_ioctl) 282#define DM_LIST_VERSIONS _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, struct dm_ioctl)
271 283
272#define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl) 284#define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl)
285#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
273 286
274#define DM_VERSION_MAJOR 4 287#define DM_VERSION_MAJOR 4
275#define DM_VERSION_MINOR 5 288#define DM_VERSION_MINOR 6
276#define DM_VERSION_PATCHLEVEL 0 289#define DM_VERSION_PATCHLEVEL 0
277#define DM_VERSION_EXTRA "-ioctl (2005-10-04)" 290#define DM_VERSION_EXTRA "-ioctl (2006-02-17)"
278 291
279/* Status bits */ 292/* Status bits */
280#define DM_READONLY_FLAG (1 << 0) /* In/Out */ 293#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index a8731062a74c..9b4751aecc23 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -21,6 +21,7 @@ enum dma_data_direction {
21#define DMA_30BIT_MASK 0x000000003fffffffULL 21#define DMA_30BIT_MASK 0x000000003fffffffULL
22#define DMA_29BIT_MASK 0x000000001fffffffULL 22#define DMA_29BIT_MASK 0x000000001fffffffULL
23#define DMA_28BIT_MASK 0x000000000fffffffULL 23#define DMA_28BIT_MASK 0x000000000fffffffULL
24#define DMA_24BIT_MASK 0x0000000000ffffffULL
24 25
25#include <asm/dma-mapping.h> 26#include <asm/dma-mapping.h>
26 27
diff --git a/include/linux/efi.h b/include/linux/efi.h
index c7c5dd316182..e203613d3aec 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -240,19 +240,21 @@ struct efi_memory_map {
240 unsigned long desc_size; 240 unsigned long desc_size;
241}; 241};
242 242
243#define EFI_INVALID_TABLE_ADDR (~0UL)
244
243/* 245/*
244 * All runtime access to EFI goes through this structure: 246 * All runtime access to EFI goes through this structure:
245 */ 247 */
246extern struct efi { 248extern struct efi {
247 efi_system_table_t *systab; /* EFI system table */ 249 efi_system_table_t *systab; /* EFI system table */
248 void *mps; /* MPS table */ 250 unsigned long mps; /* MPS table */
249 void *acpi; /* ACPI table (IA64 ext 0.71) */ 251 unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
250 void *acpi20; /* ACPI table (ACPI 2.0) */ 252 unsigned long acpi20; /* ACPI table (ACPI 2.0) */
251 void *smbios; /* SM BIOS table */ 253 unsigned long smbios; /* SM BIOS table */
252 void *sal_systab; /* SAL system table */ 254 unsigned long sal_systab; /* SAL system table */
253 void *boot_info; /* boot info table */ 255 unsigned long boot_info; /* boot info table */
254 void *hcdp; /* HCDP table */ 256 unsigned long hcdp; /* HCDP table */
255 void *uga; /* UGA table */ 257 unsigned long uga; /* UGA table */
256 efi_get_time_t *get_time; 258 efi_get_time_t *get_time;
257 efi_set_time_t *set_time; 259 efi_set_time_t *set_time;
258 efi_get_wakeup_time_t *get_wakeup_time; 260 efi_get_wakeup_time_t *get_wakeup_time;
@@ -292,6 +294,8 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
292extern u64 efi_get_iobase (void); 294extern u64 efi_get_iobase (void);
293extern u32 efi_mem_type (unsigned long phys_addr); 295extern u32 efi_mem_type (unsigned long phys_addr);
294extern u64 efi_mem_attributes (unsigned long phys_addr); 296extern u64 efi_mem_attributes (unsigned long phys_addr);
297extern int efi_mem_attribute_range (unsigned long phys_addr, unsigned long size,
298 u64 attr);
295extern int __init efi_uart_console_only (void); 299extern int __init efi_uart_console_only (void);
296extern void efi_initialize_iomem_resources(struct resource *code_resource, 300extern void efi_initialize_iomem_resources(struct resource *code_resource,
297 struct resource *data_resource); 301 struct resource *data_resource);
diff --git a/include/linux/efs_fs.h b/include/linux/efs_fs.h
index 28f368c526fb..fbfa6b52e2fb 100644
--- a/include/linux/efs_fs.h
+++ b/include/linux/efs_fs.h
@@ -37,7 +37,7 @@ static inline struct efs_sb_info *SUPER_INFO(struct super_block *sb)
37struct statfs; 37struct statfs;
38 38
39extern struct inode_operations efs_dir_inode_operations; 39extern struct inode_operations efs_dir_inode_operations;
40extern struct file_operations efs_dir_operations; 40extern const struct file_operations efs_dir_operations;
41extern struct address_space_operations efs_symlink_aops; 41extern struct address_space_operations efs_symlink_aops;
42 42
43extern void efs_read_inode(struct inode *); 43extern void efs_read_inode(struct inode *);
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index e7239f2f97a1..3ade6a4e3bdd 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -36,7 +36,8 @@ struct statfs;
36 * Define EXT3_RESERVATION to reserve data blocks for expanding files 36 * Define EXT3_RESERVATION to reserve data blocks for expanding files
37 */ 37 */
38#define EXT3_DEFAULT_RESERVE_BLOCKS 8 38#define EXT3_DEFAULT_RESERVE_BLOCKS 8
39#define EXT3_MAX_RESERVE_BLOCKS 1024 39/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
40#define EXT3_MAX_RESERVE_BLOCKS 1027
40#define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0 41#define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0
41/* 42/*
42 * Always enable hashed directories 43 * Always enable hashed directories
@@ -732,6 +733,8 @@ struct dir_private_info {
732extern int ext3_bg_has_super(struct super_block *sb, int group); 733extern int ext3_bg_has_super(struct super_block *sb, int group);
733extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group); 734extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
734extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *); 735extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *);
736extern int ext3_new_blocks (handle_t *, struct inode *, unsigned long,
737 unsigned long *, int *);
735extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long, 738extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long,
736 unsigned long); 739 unsigned long);
737extern void ext3_free_blocks_sb (handle_t *, struct super_block *, 740extern void ext3_free_blocks_sb (handle_t *, struct super_block *,
@@ -775,9 +778,9 @@ extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
775int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int); 778int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
776struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); 779struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
777struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); 780struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
778int ext3_get_block_handle(handle_t *handle, struct inode *inode, 781int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
779 sector_t iblock, struct buffer_head *bh_result, int create, 782 sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
780 int extend_disksize); 783 int create, int extend_disksize);
781 784
782extern void ext3_read_inode (struct inode *); 785extern void ext3_read_inode (struct inode *);
783extern int ext3_write_inode (struct inode *, int); 786extern int ext3_write_inode (struct inode *, int);
@@ -830,11 +833,11 @@ do { \
830 */ 833 */
831 834
832/* dir.c */ 835/* dir.c */
833extern struct file_operations ext3_dir_operations; 836extern const struct file_operations ext3_dir_operations;
834 837
835/* file.c */ 838/* file.c */
836extern struct inode_operations ext3_file_inode_operations; 839extern struct inode_operations ext3_file_inode_operations;
837extern struct file_operations ext3_file_operations; 840extern const struct file_operations ext3_file_operations;
838 841
839/* namei.c */ 842/* namei.c */
840extern struct inode_operations ext3_dir_inode_operations; 843extern struct inode_operations ext3_dir_inode_operations;
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 2cb19e6503aa..d03fadfcafe3 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -734,7 +734,7 @@ struct fb_tile_ops {
734 734
735/* A driver may set this flag to indicate that it does want a set_par to be 735/* A driver may set this flag to indicate that it does want a set_par to be
736 * called every time when fbcon_switch is executed. The advantage is that with 736 * called every time when fbcon_switch is executed. The advantage is that with
737 * this flag set you can really be shure that set_par is always called before 737 * this flag set you can really be sure that set_par is always called before
738 * any of the functions dependant on the correct hardware state or altering 738 * any of the functions dependant on the correct hardware state or altering
739 * that state, even if you are using some broken X releases. The disadvantage 739 * that state, even if you are using some broken X releases. The disadvantage
740 * is that it introduces unwanted delays to every console switch if set_par 740 * is that it introduces unwanted delays to every console switch if set_par
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5adf32b90f36..408fe89498f4 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -252,9 +252,6 @@ extern void __init files_init(unsigned long);
252struct buffer_head; 252struct buffer_head;
253typedef int (get_block_t)(struct inode *inode, sector_t iblock, 253typedef int (get_block_t)(struct inode *inode, sector_t iblock,
254 struct buffer_head *bh_result, int create); 254 struct buffer_head *bh_result, int create);
255typedef int (get_blocks_t)(struct inode *inode, sector_t iblock,
256 unsigned long max_blocks,
257 struct buffer_head *bh_result, int create);
258typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, 255typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
259 ssize_t bytes, void *private); 256 ssize_t bytes, void *private);
260 257
@@ -350,7 +347,7 @@ struct writeback_control;
350struct address_space_operations { 347struct address_space_operations {
351 int (*writepage)(struct page *page, struct writeback_control *wbc); 348 int (*writepage)(struct page *page, struct writeback_control *wbc);
352 int (*readpage)(struct file *, struct page *); 349 int (*readpage)(struct file *, struct page *);
353 int (*sync_page)(struct page *); 350 void (*sync_page)(struct page *);
354 351
355 /* Write back some dirty pages from this mapping. */ 352 /* Write back some dirty pages from this mapping. */
356 int (*writepages)(struct address_space *, struct writeback_control *); 353 int (*writepages)(struct address_space *, struct writeback_control *);
@@ -369,7 +366,7 @@ struct address_space_operations {
369 int (*commit_write)(struct file *, struct page *, unsigned, unsigned); 366 int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
370 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ 367 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
371 sector_t (*bmap)(struct address_space *, sector_t); 368 sector_t (*bmap)(struct address_space *, sector_t);
372 int (*invalidatepage) (struct page *, unsigned long); 369 void (*invalidatepage) (struct page *, unsigned long);
373 int (*releasepage) (struct page *, gfp_t); 370 int (*releasepage) (struct page *, gfp_t);
374 ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, 371 ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
375 loff_t offset, unsigned long nr_segs); 372 loff_t offset, unsigned long nr_segs);
@@ -413,6 +410,9 @@ struct block_device {
413 struct list_head bd_inodes; 410 struct list_head bd_inodes;
414 void * bd_holder; 411 void * bd_holder;
415 int bd_holders; 412 int bd_holders;
413#ifdef CONFIG_SYSFS
414 struct list_head bd_holder_list;
415#endif
416 struct block_device * bd_contains; 416 struct block_device * bd_contains;
417 unsigned bd_block_size; 417 unsigned bd_block_size;
418 struct hd_struct * bd_part; 418 struct hd_struct * bd_part;
@@ -490,13 +490,13 @@ struct inode {
490 unsigned int i_blkbits; 490 unsigned int i_blkbits;
491 unsigned long i_blksize; 491 unsigned long i_blksize;
492 unsigned long i_version; 492 unsigned long i_version;
493 unsigned long i_blocks; 493 blkcnt_t i_blocks;
494 unsigned short i_bytes; 494 unsigned short i_bytes;
495 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ 495 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
496 struct mutex i_mutex; 496 struct mutex i_mutex;
497 struct rw_semaphore i_alloc_sem; 497 struct rw_semaphore i_alloc_sem;
498 struct inode_operations *i_op; 498 struct inode_operations *i_op;
499 struct file_operations *i_fop; /* former ->i_op->default_file_ops */ 499 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
500 struct super_block *i_sb; 500 struct super_block *i_sb;
501 struct file_lock *i_flock; 501 struct file_lock *i_flock;
502 struct address_space *i_mapping; 502 struct address_space *i_mapping;
@@ -636,7 +636,7 @@ struct file {
636 } f_u; 636 } f_u;
637 struct dentry *f_dentry; 637 struct dentry *f_dentry;
638 struct vfsmount *f_vfsmnt; 638 struct vfsmount *f_vfsmnt;
639 struct file_operations *f_op; 639 const struct file_operations *f_op;
640 atomic_t f_count; 640 atomic_t f_count;
641 unsigned int f_flags; 641 unsigned int f_flags;
642 mode_t f_mode; 642 mode_t f_mode;
@@ -763,6 +763,7 @@ extern void locks_copy_lock(struct file_lock *, struct file_lock *);
763extern void locks_remove_posix(struct file *, fl_owner_t); 763extern void locks_remove_posix(struct file *, fl_owner_t);
764extern void locks_remove_flock(struct file *); 764extern void locks_remove_flock(struct file *);
765extern int posix_test_lock(struct file *, struct file_lock *, struct file_lock *); 765extern int posix_test_lock(struct file *, struct file_lock *, struct file_lock *);
766extern int posix_lock_file_conf(struct file *, struct file_lock *, struct file_lock *);
766extern int posix_lock_file(struct file *, struct file_lock *); 767extern int posix_lock_file(struct file *, struct file_lock *);
767extern int posix_lock_file_wait(struct file *, struct file_lock *); 768extern int posix_lock_file_wait(struct file *, struct file_lock *);
768extern int posix_unblock_lock(struct file *, struct file_lock *); 769extern int posix_unblock_lock(struct file *, struct file_lock *);
@@ -1389,11 +1390,11 @@ extern void bd_set_size(struct block_device *, loff_t size);
1389extern void bd_forget(struct inode *inode); 1390extern void bd_forget(struct inode *inode);
1390extern void bdput(struct block_device *); 1391extern void bdput(struct block_device *);
1391extern struct block_device *open_by_devnum(dev_t, unsigned); 1392extern struct block_device *open_by_devnum(dev_t, unsigned);
1392extern struct file_operations def_blk_fops; 1393extern const struct file_operations def_blk_fops;
1393extern struct address_space_operations def_blk_aops; 1394extern struct address_space_operations def_blk_aops;
1394extern struct file_operations def_chr_fops; 1395extern const struct file_operations def_chr_fops;
1395extern struct file_operations bad_sock_fops; 1396extern const struct file_operations bad_sock_fops;
1396extern struct file_operations def_fifo_fops; 1397extern const struct file_operations def_fifo_fops;
1397extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); 1398extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
1398extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long); 1399extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long);
1399extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); 1400extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
@@ -1401,12 +1402,19 @@ extern int blkdev_get(struct block_device *, mode_t, unsigned);
1401extern int blkdev_put(struct block_device *); 1402extern int blkdev_put(struct block_device *);
1402extern int bd_claim(struct block_device *, void *); 1403extern int bd_claim(struct block_device *, void *);
1403extern void bd_release(struct block_device *); 1404extern void bd_release(struct block_device *);
1405#ifdef CONFIG_SYSFS
1406extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *);
1407extern void bd_release_from_disk(struct block_device *, struct gendisk *);
1408#else
1409#define bd_claim_by_disk(bdev, holder, disk) bd_claim(bdev, holder)
1410#define bd_release_from_disk(bdev, disk) bd_release(bdev)
1411#endif
1404 1412
1405/* fs/char_dev.c */ 1413/* fs/char_dev.c */
1406extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); 1414extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
1407extern int register_chrdev_region(dev_t, unsigned, const char *); 1415extern int register_chrdev_region(dev_t, unsigned, const char *);
1408extern int register_chrdev(unsigned int, const char *, 1416extern int register_chrdev(unsigned int, const char *,
1409 struct file_operations *); 1417 const struct file_operations *);
1410extern int unregister_chrdev(unsigned int, const char *); 1418extern int unregister_chrdev(unsigned int, const char *);
1411extern void unregister_chrdev_region(dev_t, unsigned); 1419extern void unregister_chrdev_region(dev_t, unsigned);
1412extern int chrdev_open(struct inode *, struct file *); 1420extern int chrdev_open(struct inode *, struct file *);
@@ -1436,9 +1444,9 @@ extern void init_special_inode(struct inode *, umode_t, dev_t);
1436extern void make_bad_inode(struct inode *); 1444extern void make_bad_inode(struct inode *);
1437extern int is_bad_inode(struct inode *); 1445extern int is_bad_inode(struct inode *);
1438 1446
1439extern struct file_operations read_fifo_fops; 1447extern const struct file_operations read_fifo_fops;
1440extern struct file_operations write_fifo_fops; 1448extern const struct file_operations write_fifo_fops;
1441extern struct file_operations rdwr_fifo_fops; 1449extern const struct file_operations rdwr_fifo_fops;
1442 1450
1443extern int fs_may_remount_ro(struct super_block *); 1451extern int fs_may_remount_ro(struct super_block *);
1444 1452
@@ -1644,7 +1652,7 @@ static inline void do_generic_file_read(struct file * filp, loff_t *ppos,
1644 1652
1645ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, 1653ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1646 struct block_device *bdev, const struct iovec *iov, loff_t offset, 1654 struct block_device *bdev, const struct iovec *iov, loff_t offset,
1647 unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io, 1655 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
1648 int lock_type); 1656 int lock_type);
1649 1657
1650enum { 1658enum {
@@ -1655,32 +1663,32 @@ enum {
1655 1663
1656static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, 1664static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
1657 struct inode *inode, struct block_device *bdev, const struct iovec *iov, 1665 struct inode *inode, struct block_device *bdev, const struct iovec *iov,
1658 loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks, 1666 loff_t offset, unsigned long nr_segs, get_block_t get_block,
1659 dio_iodone_t end_io) 1667 dio_iodone_t end_io)
1660{ 1668{
1661 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, 1669 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
1662 nr_segs, get_blocks, end_io, DIO_LOCKING); 1670 nr_segs, get_block, end_io, DIO_LOCKING);
1663} 1671}
1664 1672
1665static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, 1673static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
1666 struct inode *inode, struct block_device *bdev, const struct iovec *iov, 1674 struct inode *inode, struct block_device *bdev, const struct iovec *iov,
1667 loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks, 1675 loff_t offset, unsigned long nr_segs, get_block_t get_block,
1668 dio_iodone_t end_io) 1676 dio_iodone_t end_io)
1669{ 1677{
1670 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, 1678 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
1671 nr_segs, get_blocks, end_io, DIO_NO_LOCKING); 1679 nr_segs, get_block, end_io, DIO_NO_LOCKING);
1672} 1680}
1673 1681
1674static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb, 1682static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
1675 struct inode *inode, struct block_device *bdev, const struct iovec *iov, 1683 struct inode *inode, struct block_device *bdev, const struct iovec *iov,
1676 loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks, 1684 loff_t offset, unsigned long nr_segs, get_block_t get_block,
1677 dio_iodone_t end_io) 1685 dio_iodone_t end_io)
1678{ 1686{
1679 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, 1687 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
1680 nr_segs, get_blocks, end_io, DIO_OWN_LOCKING); 1688 nr_segs, get_block, end_io, DIO_OWN_LOCKING);
1681} 1689}
1682 1690
1683extern struct file_operations generic_ro_fops; 1691extern const struct file_operations generic_ro_fops;
1684 1692
1685#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m)) 1693#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
1686 1694
@@ -1736,9 +1744,9 @@ extern int simple_commit_write(struct file *file, struct page *page,
1736 1744
1737extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *); 1745extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *);
1738extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); 1746extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
1739extern struct file_operations simple_dir_operations; 1747extern const struct file_operations simple_dir_operations;
1740extern struct inode_operations simple_dir_inode_operations; 1748extern struct inode_operations simple_dir_inode_operations;
1741struct tree_descr { char *name; struct file_operations *ops; int mode; }; 1749struct tree_descr { char *name; const struct file_operations *ops; int mode; };
1742struct dentry *d_alloc_name(struct dentry *, const char *); 1750struct dentry *d_alloc_name(struct dentry *, const char *);
1743extern int simple_fill_super(struct super_block *, int, struct tree_descr *); 1751extern int simple_fill_super(struct super_block *, int, struct tree_descr *);
1744extern int simple_pin_fs(char *name, struct vfsmount **mount, int *count); 1752extern int simple_pin_fs(char *name, struct vfsmount **mount, int *count);
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 10f96c31971e..966a5b3da439 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -1,6 +1,8 @@
1#ifndef _LINUX_FUTEX_H 1#ifndef _LINUX_FUTEX_H
2#define _LINUX_FUTEX_H 2#define _LINUX_FUTEX_H
3 3
4#include <linux/sched.h>
5
4/* Second argument to futex syscall */ 6/* Second argument to futex syscall */
5 7
6 8
@@ -11,10 +13,97 @@
11#define FUTEX_CMP_REQUEUE 4 13#define FUTEX_CMP_REQUEUE 4
12#define FUTEX_WAKE_OP 5 14#define FUTEX_WAKE_OP 5
13 15
16/*
17 * Support for robust futexes: the kernel cleans up held futexes at
18 * thread exit time.
19 */
20
21/*
22 * Per-lock list entry - embedded in user-space locks, somewhere close
23 * to the futex field. (Note: user-space uses a double-linked list to
24 * achieve O(1) list add and remove, but the kernel only needs to know
25 * about the forward link)
26 *
27 * NOTE: this structure is part of the syscall ABI, and must not be
28 * changed.
29 */
30struct robust_list {
31 struct robust_list __user *next;
32};
33
34/*
35 * Per-thread list head:
36 *
37 * NOTE: this structure is part of the syscall ABI, and must only be
38 * changed if the change is first communicated with the glibc folks.
39 * (When an incompatible change is done, we'll increase the structure
40 * size, which glibc will detect)
41 */
42struct robust_list_head {
43 /*
44 * The head of the list. Points back to itself if empty:
45 */
46 struct robust_list list;
47
48 /*
49 * This relative offset is set by user-space, it gives the kernel
50 * the relative position of the futex field to examine. This way
51 * we keep userspace flexible, to freely shape its data-structure,
52 * without hardcoding any particular offset into the kernel:
53 */
54 long futex_offset;
55
56 /*
57 * The death of the thread may race with userspace setting
58 * up a lock's links. So to handle this race, userspace first
59 * sets this field to the address of the to-be-taken lock,
60 * then does the lock acquire, and then adds itself to the
61 * list, and then clears this field. Hence the kernel will
62 * always have full knowledge of all locks that the thread
63 * _might_ have taken. We check the owner TID in any case,
64 * so only truly owned locks will be handled.
65 */
66 struct robust_list __user *list_op_pending;
67};
68
69/*
70 * Are there any waiters for this robust futex:
71 */
72#define FUTEX_WAITERS 0x80000000
73
74/*
75 * The kernel signals via this bit that a thread holding a futex
76 * has exited without unlocking the futex. The kernel also does
77 * a FUTEX_WAKE on such futexes, after setting the bit, to wake
78 * up any possible waiters:
79 */
80#define FUTEX_OWNER_DIED 0x40000000
81
82/*
83 * The rest of the robust-futex field is for the TID:
84 */
85#define FUTEX_TID_MASK 0x3fffffff
86
87/*
88 * This limit protects against a deliberately circular list.
89 * (Not worth introducing an rlimit for it)
90 */
91#define ROBUST_LIST_LIMIT 2048
92
14long do_futex(unsigned long uaddr, int op, int val, 93long do_futex(unsigned long uaddr, int op, int val,
15 unsigned long timeout, unsigned long uaddr2, int val2, 94 unsigned long timeout, unsigned long uaddr2, int val2,
16 int val3); 95 int val3);
17 96
97extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr);
98
99#ifdef CONFIG_FUTEX
100extern void exit_robust_list(struct task_struct *curr);
101#else
102static inline void exit_robust_list(struct task_struct *curr)
103{
104}
105#endif
106
18#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ 107#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */
19#define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */ 108#define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */
20#define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */ 109#define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 2401dea2b867..9c8e6da2393b 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -119,7 +119,7 @@ static inline void gameport_set_name(struct gameport *gameport, const char *name
119} 119}
120 120
121/* 121/*
122 * Use the following fucntions to manipulate gameport's per-port 122 * Use the following functions to manipulate gameport's per-port
123 * driver-specific data. 123 * driver-specific data.
124 */ 124 */
125static inline void *gameport_get_drvdata(struct gameport *gameport) 125static inline void *gameport_get_drvdata(struct gameport *gameport)
@@ -133,7 +133,7 @@ static inline void gameport_set_drvdata(struct gameport *gameport, void *data)
133} 133}
134 134
135/* 135/*
136 * Use the following fucntions to pin gameport's driver in process context 136 * Use the following functions to pin gameport's driver in process context
137 */ 137 */
138static inline int gameport_pin_driver(struct gameport *gameport) 138static inline int gameport_pin_driver(struct gameport *gameport)
139{ 139{
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index fd647fde5ec1..10a27f29d692 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -78,6 +78,7 @@ struct hd_struct {
78 sector_t start_sect; 78 sector_t start_sect;
79 sector_t nr_sects; 79 sector_t nr_sects;
80 struct kobject kobj; 80 struct kobject kobj;
81 struct kobject *holder_dir;
81 unsigned ios[2], sectors[2]; /* READs and WRITEs */ 82 unsigned ios[2], sectors[2]; /* READs and WRITEs */
82 int policy, partno; 83 int policy, partno;
83}; 84};
@@ -89,12 +90,12 @@ struct hd_struct {
89#define GENHD_FL_SUPPRESS_PARTITION_INFO 32 90#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
90 91
91struct disk_stats { 92struct disk_stats {
92 unsigned sectors[2]; /* READs and WRITEs */ 93 unsigned long sectors[2]; /* READs and WRITEs */
93 unsigned ios[2]; 94 unsigned long ios[2];
94 unsigned merges[2]; 95 unsigned long merges[2];
95 unsigned ticks[2]; 96 unsigned long ticks[2];
96 unsigned io_ticks; 97 unsigned long io_ticks;
97 unsigned time_in_queue; 98 unsigned long time_in_queue;
98}; 99};
99 100
100struct gendisk { 101struct gendisk {
@@ -114,6 +115,8 @@ struct gendisk {
114 int number; /* more of the same */ 115 int number; /* more of the same */
115 struct device *driverfs_dev; 116 struct device *driverfs_dev;
116 struct kobject kobj; 117 struct kobject kobj;
118 struct kobject *holder_dir;
119 struct kobject *slave_dir;
117 120
118 struct timer_rand_state *random; 121 struct timer_rand_state *random;
119 int policy; 122 int policy;
@@ -149,14 +152,14 @@ struct disk_attribute {
149({ \ 152({ \
150 typeof(gendiskp->dkstats->field) res = 0; \ 153 typeof(gendiskp->dkstats->field) res = 0; \
151 int i; \ 154 int i; \
152 for_each_cpu(i) \ 155 for_each_possible_cpu(i) \
153 res += per_cpu_ptr(gendiskp->dkstats, i)->field; \ 156 res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
154 res; \ 157 res; \
155}) 158})
156 159
157static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { 160static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
158 int i; 161 int i;
159 for_each_cpu(i) 162 for_each_possible_cpu(i)
160 memset(per_cpu_ptr(gendiskp->dkstats, i), value, 163 memset(per_cpu_ptr(gendiskp->dkstats, i), value,
161 sizeof (struct disk_stats)); 164 sizeof (struct disk_stats));
162} 165}
diff --git a/include/linux/gigaset_dev.h b/include/linux/gigaset_dev.h
new file mode 100644
index 000000000000..70ad09c8ad1e
--- /dev/null
+++ b/include/linux/gigaset_dev.h
@@ -0,0 +1,32 @@
1/*
2 * interface to user space for the gigaset driver
3 *
4 * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de>
5 *
6 * =====================================================================
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 * =====================================================================
12 * Version: $Id: gigaset_dev.h,v 1.4.4.4 2005/11/21 22:28:09 hjlipp Exp $
13 * =====================================================================
14 */
15
16#ifndef GIGASET_INTERFACE_H
17#define GIGASET_INTERFACE_H
18
19#include <linux/ioctl.h>
20
21#define GIGASET_IOCTL 0x47
22
23#define GIGVER_DRIVER 0
24#define GIGVER_COMPAT 1
25#define GIGVER_FWBASE 2
26
27#define GIGASET_REDIR _IOWR (GIGASET_IOCTL, 0, int)
28#define GIGASET_CONFIG _IOWR (GIGASET_IOCTL, 1, int)
29#define GIGASET_BRKCHARS _IOW (GIGASET_IOCTL, 2, unsigned char[6]) //FIXME [6] okay?
30#define GIGASET_VERSION _IOWR (GIGASET_IOCTL, 3, unsigned[4])
31
32#endif
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 6bece9280eb7..892c4ea1b425 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -7,6 +7,18 @@
7 7
8#include <asm/cacheflush.h> 8#include <asm/cacheflush.h>
9 9
10#ifndef ARCH_HAS_FLUSH_ANON_PAGE
11static inline void flush_anon_page(struct page *page, unsigned long vmaddr)
12{
13}
14#endif
15
16#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
17static inline void flush_kernel_dcache_page(struct page *page)
18{
19}
20#endif
21
10#ifdef CONFIG_HIGHMEM 22#ifdef CONFIG_HIGHMEM
11 23
12#include <asm/highmem.h> 24#include <asm/highmem.h>
diff --git a/include/linux/hpet.h b/include/linux/hpet.h
index 27238194b212..707f7cb9e795 100644
--- a/include/linux/hpet.h
+++ b/include/linux/hpet.h
@@ -3,6 +3,8 @@
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5 5
6#ifdef __KERNEL__
7
6/* 8/*
7 * Offsets into HPET Registers 9 * Offsets into HPET Registers
8 */ 10 */
@@ -85,22 +87,6 @@ struct hpet {
85#define Tn_FSB_INT_ADDR_SHIFT (32UL) 87#define Tn_FSB_INT_ADDR_SHIFT (32UL)
86#define Tn_FSB_INT_VAL_MASK (0x00000000ffffffffULL) 88#define Tn_FSB_INT_VAL_MASK (0x00000000ffffffffULL)
87 89
88struct hpet_info {
89 unsigned long hi_ireqfreq; /* Hz */
90 unsigned long hi_flags; /* information */
91 unsigned short hi_hpet;
92 unsigned short hi_timer;
93};
94
95#define HPET_INFO_PERIODIC 0x0001 /* timer is periodic */
96
97#define HPET_IE_ON _IO('h', 0x01) /* interrupt on */
98#define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */
99#define HPET_INFO _IOR('h', 0x03, struct hpet_info)
100#define HPET_EPI _IO('h', 0x04) /* enable periodic */
101#define HPET_DPI _IO('h', 0x05) /* disable periodic */
102#define HPET_IRQFREQ _IOW('h', 0x6, unsigned long) /* IRQFREQ usec */
103
104/* 90/*
105 * exported interfaces 91 * exported interfaces
106 */ 92 */
@@ -133,4 +119,22 @@ int hpet_register(struct hpet_task *, int);
133int hpet_unregister(struct hpet_task *); 119int hpet_unregister(struct hpet_task *);
134int hpet_control(struct hpet_task *, unsigned int, unsigned long); 120int hpet_control(struct hpet_task *, unsigned int, unsigned long);
135 121
122#endif /* __KERNEL__ */
123
124struct hpet_info {
125 unsigned long hi_ireqfreq; /* Hz */
126 unsigned long hi_flags; /* information */
127 unsigned short hi_hpet;
128 unsigned short hi_timer;
129};
130
131#define HPET_INFO_PERIODIC 0x0001 /* timer is periodic */
132
133#define HPET_IE_ON _IO('h', 0x01) /* interrupt on */
134#define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */
135#define HPET_INFO _IOR('h', 0x03, struct hpet_info)
136#define HPET_EPI _IO('h', 0x04) /* enable periodic */
137#define HPET_DPI _IO('h', 0x05) /* disable periodic */
138#define HPET_IRQFREQ _IOW('h', 0x6, unsigned long) /* IRQFREQ usec */
139
136#endif /* !__HPET__ */ 140#endif /* !__HPET__ */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 6401c31d6add..93830158348e 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -34,15 +34,7 @@ enum hrtimer_restart {
34 HRTIMER_RESTART, 34 HRTIMER_RESTART,
35}; 35};
36 36
37/* 37#define HRTIMER_INACTIVE ((void *)1UL)
38 * Timer states:
39 */
40enum hrtimer_state {
41 HRTIMER_INACTIVE, /* Timer is inactive */
42 HRTIMER_EXPIRED, /* Timer is expired */
43 HRTIMER_RUNNING, /* Timer is running the callback function */
44 HRTIMER_PENDING, /* Timer is pending */
45};
46 38
47struct hrtimer_base; 39struct hrtimer_base;
48 40
@@ -53,9 +45,7 @@ struct hrtimer_base;
53 * @expires: the absolute expiry time in the hrtimers internal 45 * @expires: the absolute expiry time in the hrtimers internal
54 * representation. The time is related to the clock on 46 * representation. The time is related to the clock on
55 * which the timer is based. 47 * which the timer is based.
56 * @state: state of the timer
57 * @function: timer expiry callback function 48 * @function: timer expiry callback function
58 * @data: argument for the callback function
59 * @base: pointer to the timer base (per cpu and per clock) 49 * @base: pointer to the timer base (per cpu and per clock)
60 * 50 *
61 * The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE() 51 * The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE()
@@ -63,23 +53,23 @@ struct hrtimer_base;
63struct hrtimer { 53struct hrtimer {
64 struct rb_node node; 54 struct rb_node node;
65 ktime_t expires; 55 ktime_t expires;
66 enum hrtimer_state state; 56 int (*function)(struct hrtimer *);
67 int (*function)(void *);
68 void *data;
69 struct hrtimer_base *base; 57 struct hrtimer_base *base;
70}; 58};
71 59
72/** 60/**
73 * struct hrtimer_base - the timer base for a specific clock 61 * struct hrtimer_base - the timer base for a specific clock
74 * 62 *
75 * @index: clock type index for per_cpu support when moving a timer 63 * @index: clock type index for per_cpu support when moving a timer
76 * to a base on another cpu. 64 * to a base on another cpu.
77 * @lock: lock protecting the base and associated timers 65 * @lock: lock protecting the base and associated timers
78 * @active: red black tree root node for the active timers 66 * @active: red black tree root node for the active timers
79 * @first: pointer to the timer node which expires first 67 * @first: pointer to the timer node which expires first
80 * @resolution: the resolution of the clock, in nanoseconds 68 * @resolution: the resolution of the clock, in nanoseconds
81 * @get_time: function to retrieve the current time of the clock 69 * @get_time: function to retrieve the current time of the clock
82 * @curr_timer: the timer which is executing a callback right now 70 * @get_sofirq_time: function to retrieve the current time from the softirq
71 * @curr_timer: the timer which is executing a callback right now
72 * @softirq_time: the time when running the hrtimer queue in the softirq
83 */ 73 */
84struct hrtimer_base { 74struct hrtimer_base {
85 clockid_t index; 75 clockid_t index;
@@ -88,7 +78,9 @@ struct hrtimer_base {
88 struct rb_node *first; 78 struct rb_node *first;
89 ktime_t resolution; 79 ktime_t resolution;
90 ktime_t (*get_time)(void); 80 ktime_t (*get_time)(void);
81 ktime_t (*get_softirq_time)(void);
91 struct hrtimer *curr_timer; 82 struct hrtimer *curr_timer;
83 ktime_t softirq_time;
92}; 84};
93 85
94/* 86/*
@@ -122,11 +114,12 @@ extern ktime_t hrtimer_get_next_event(void);
122 114
123static inline int hrtimer_active(const struct hrtimer *timer) 115static inline int hrtimer_active(const struct hrtimer *timer)
124{ 116{
125 return timer->state == HRTIMER_PENDING; 117 return timer->node.rb_parent != HRTIMER_INACTIVE;
126} 118}
127 119
128/* Forward a hrtimer so it expires after now: */ 120/* Forward a hrtimer so it expires after now: */
129extern unsigned long hrtimer_forward(struct hrtimer *timer, ktime_t interval); 121extern unsigned long
122hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
130 123
131/* Precise sleep: */ 124/* Precise sleep: */
132extern long hrtimer_nanosleep(struct timespec *rqtp, 125extern long hrtimer_nanosleep(struct timespec *rqtp,
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d6f1019625af..4c5e610fe442 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -154,7 +154,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
154 return sb->s_fs_info; 154 return sb->s_fs_info;
155} 155}
156 156
157extern struct file_operations hugetlbfs_file_operations; 157extern const struct file_operations hugetlbfs_file_operations;
158extern struct vm_operations_struct hugetlb_vm_ops; 158extern struct vm_operations_struct hugetlb_vm_ops;
159struct file *hugetlb_zero_setup(size_t); 159struct file *hugetlb_zero_setup(size_t);
160int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info, 160int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info,
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 679b46a6a565..c8b81f419fd8 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -108,6 +108,10 @@
108#define I2C_DRIVERID_UPD64083 78 /* upd64083 video processor */ 108#define I2C_DRIVERID_UPD64083 78 /* upd64083 video processor */
109#define I2C_DRIVERID_UPD64031A 79 /* upd64031a video processor */ 109#define I2C_DRIVERID_UPD64031A 79 /* upd64031a video processor */
110#define I2C_DRIVERID_SAA717X 80 /* saa717x video encoder */ 110#define I2C_DRIVERID_SAA717X 80 /* saa717x video encoder */
111#define I2C_DRIVERID_DS1672 81 /* Dallas/Maxim DS1672 RTC */
112#define I2C_DRIVERID_X1205 82 /* Xicor/Intersil X1205 RTC */
113#define I2C_DRIVERID_PCF8563 83 /* Philips PCF8563 RTC */
114#define I2C_DRIVERID_RS5C372 84 /* Ricoh RS5C372 RTC */
111 115
112#define I2C_DRIVERID_I2CDEV 900 116#define I2C_DRIVERID_I2CDEV 900
113#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */ 117#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 5a9d8c599171..dd7d627bf66f 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -950,9 +950,7 @@ static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
950 if (!pool->slab) 950 if (!pool->slab)
951 goto free_name; 951 goto free_name;
952 952
953 pool->mempool = 953 pool->mempool = mempool_create_slab_pool(min_nr, pool->slab);
954 mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
955 pool->slab);
956 if (!pool->mempool) 954 if (!pool->mempool)
957 goto free_slab; 955 goto free_slab;
958 956
diff --git a/include/linux/input.h b/include/linux/input.h
index 6d4cc3c110d6..1d4e341b72e6 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -957,7 +957,7 @@ struct input_handler {
957 struct input_handle* (*connect)(struct input_handler *handler, struct input_dev *dev, struct input_device_id *id); 957 struct input_handle* (*connect)(struct input_handler *handler, struct input_dev *dev, struct input_device_id *id);
958 void (*disconnect)(struct input_handle *handle); 958 void (*disconnect)(struct input_handle *handle);
959 959
960 struct file_operations *fops; 960 const struct file_operations *fops;
961 int minor; 961 int minor;
962 char *name; 962 char *name;
963 963
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index d6276e60b3bf..0a84b56935c2 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -36,6 +36,7 @@
36 36
37#include <linux/ipmi_msgdefs.h> 37#include <linux/ipmi_msgdefs.h>
38#include <linux/compiler.h> 38#include <linux/compiler.h>
39#include <linux/device.h>
39 40
40/* 41/*
41 * This file describes an interface to an IPMI driver. You have to 42 * This file describes an interface to an IPMI driver. You have to
@@ -397,7 +398,7 @@ struct ipmi_smi_watcher
397 the watcher list. So you can add and remove users from the 398 the watcher list. So you can add and remove users from the
398 IPMI interface, send messages, etc., but you cannot add 399 IPMI interface, send messages, etc., but you cannot add
399 or remove SMI watchers or SMI interfaces. */ 400 or remove SMI watchers or SMI interfaces. */
400 void (*new_smi)(int if_num); 401 void (*new_smi)(int if_num, struct device *dev);
401 void (*smi_gone)(int if_num); 402 void (*smi_gone)(int if_num);
402}; 403};
403 404
diff --git a/include/linux/ipmi_msgdefs.h b/include/linux/ipmi_msgdefs.h
index 03bc64dc2ec1..22f5e2afda4f 100644
--- a/include/linux/ipmi_msgdefs.h
+++ b/include/linux/ipmi_msgdefs.h
@@ -47,6 +47,7 @@
47#define IPMI_NETFN_APP_RESPONSE 0x07 47#define IPMI_NETFN_APP_RESPONSE 0x07
48#define IPMI_GET_DEVICE_ID_CMD 0x01 48#define IPMI_GET_DEVICE_ID_CMD 0x01
49#define IPMI_CLEAR_MSG_FLAGS_CMD 0x30 49#define IPMI_CLEAR_MSG_FLAGS_CMD 0x30
50#define IPMI_GET_DEVICE_GUID_CMD 0x08
50#define IPMI_GET_MSG_FLAGS_CMD 0x31 51#define IPMI_GET_MSG_FLAGS_CMD 0x31
51#define IPMI_SEND_MSG_CMD 0x34 52#define IPMI_SEND_MSG_CMD 0x34
52#define IPMI_GET_MSG_CMD 0x33 53#define IPMI_GET_MSG_CMD 0x33
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index e36ee157ad67..53571288a9fc 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -37,6 +37,9 @@
37#include <linux/ipmi_msgdefs.h> 37#include <linux/ipmi_msgdefs.h>
38#include <linux/proc_fs.h> 38#include <linux/proc_fs.h>
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/device.h>
41#include <linux/platform_device.h>
42#include <linux/ipmi_smi.h>
40 43
41/* This files describes the interface for IPMI system management interface 44/* This files describes the interface for IPMI system management interface
42 drivers to bind into the IPMI message handler. */ 45 drivers to bind into the IPMI message handler. */
@@ -113,12 +116,52 @@ struct ipmi_smi_handlers
113 void (*dec_usecount)(void *send_info); 116 void (*dec_usecount)(void *send_info);
114}; 117};
115 118
119struct ipmi_device_id {
120 unsigned char device_id;
121 unsigned char device_revision;
122 unsigned char firmware_revision_1;
123 unsigned char firmware_revision_2;
124 unsigned char ipmi_version;
125 unsigned char additional_device_support;
126 unsigned int manufacturer_id;
127 unsigned int product_id;
128 unsigned char aux_firmware_revision[4];
129 unsigned int aux_firmware_revision_set : 1;
130};
131
132#define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
133#define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
134
135/* Take a pointer to a raw data buffer and a length and extract device
136 id information from it. The first byte of data must point to the
137 byte from the get device id response after the completion code.
138 The caller is responsible for making sure the length is at least
139 11 and the command completed without error. */
140static inline void ipmi_demangle_device_id(unsigned char *data,
141 unsigned int data_len,
142 struct ipmi_device_id *id)
143{
144 id->device_id = data[0];
145 id->device_revision = data[1];
146 id->firmware_revision_1 = data[2];
147 id->firmware_revision_2 = data[3];
148 id->ipmi_version = data[4];
149 id->additional_device_support = data[5];
150 id->manufacturer_id = data[6] | (data[7] << 8) | (data[8] << 16);
151 id->product_id = data[9] | (data[10] << 8);
152 if (data_len >= 15) {
153 memcpy(id->aux_firmware_revision, data+11, 4);
154 id->aux_firmware_revision_set = 1;
155 } else
156 id->aux_firmware_revision_set = 0;
157}
158
116/* Add a low-level interface to the IPMI driver. Note that if the 159/* Add a low-level interface to the IPMI driver. Note that if the
117 interface doesn't know its slave address, it should pass in zero. */ 160 interface doesn't know its slave address, it should pass in zero. */
118int ipmi_register_smi(struct ipmi_smi_handlers *handlers, 161int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
119 void *send_info, 162 void *send_info,
120 unsigned char version_major, 163 struct ipmi_device_id *device_id,
121 unsigned char version_minor, 164 struct device *dev,
122 unsigned char slave_addr, 165 unsigned char slave_addr,
123 ipmi_smi_t *intf); 166 ipmi_smi_t *intf);
124 167
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 4fc7dffd66ef..6a425e370cb3 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -895,7 +895,7 @@ extern int journal_dirty_metadata (handle_t *, struct buffer_head *);
895extern void journal_release_buffer (handle_t *, struct buffer_head *); 895extern void journal_release_buffer (handle_t *, struct buffer_head *);
896extern int journal_forget (handle_t *, struct buffer_head *); 896extern int journal_forget (handle_t *, struct buffer_head *);
897extern void journal_sync_buffer (struct buffer_head *); 897extern void journal_sync_buffer (struct buffer_head *);
898extern int journal_invalidatepage(journal_t *, 898extern void journal_invalidatepage(journal_t *,
899 struct page *, unsigned long); 899 struct page *, unsigned long);
900extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); 900extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
901extern int journal_stop(handle_t *); 901extern int journal_stop(handle_t *);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 03d6cfaa5b8a..a3720f973ea5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -87,7 +87,7 @@ extern int cond_resched(void);
87 (__x < 0) ? -__x : __x; \ 87 (__x < 0) ? -__x : __x; \
88 }) 88 })
89 89
90extern struct notifier_block *panic_notifier_list; 90extern struct atomic_notifier_head panic_notifier_list;
91extern long (*panic_blink)(long time); 91extern long (*panic_blink)(long time);
92NORET_TYPE void panic(const char * fmt, ...) 92NORET_TYPE void panic(const char * fmt, ...)
93 __attribute__ ((NORET_AND format (printf, 1, 2))); 93 __attribute__ ((NORET_AND format (printf, 1, 2)));
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index a484572c302e..b46249082cca 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -46,7 +46,7 @@ static inline int kstat_irqs(int irq)
46{ 46{
47 int cpu, sum = 0; 47 int cpu, sum = 0;
48 48
49 for_each_cpu(cpu) 49 for_each_possible_cpu(cpu)
50 sum += kstat_cpu(cpu).irqs[irq]; 50 sum += kstat_cpu(cpu).irqs[irq];
51 51
52 return sum; 52 return sum;
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index f3dec45ef874..62bc57580707 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -64,9 +64,6 @@ typedef union {
64 64
65#if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR) 65#if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)
66 66
67/* Define a ktime_t variable and initialize it to zero: */
68#define DEFINE_KTIME(kt) ktime_t kt = { .tv64 = 0 }
69
70/** 67/**
71 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value 68 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
72 * 69 *
@@ -113,9 +110,6 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
113/* Map the ktime_t to timeval conversion to ns_to_timeval function */ 110/* Map the ktime_t to timeval conversion to ns_to_timeval function */
114#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) 111#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64)
115 112
116/* Map the ktime_t to clock_t conversion to the inline in jiffies.h: */
117#define ktime_to_clock_t(kt) nsec_to_clock_t((kt).tv64)
118
119/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ 113/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
120#define ktime_to_ns(kt) ((kt).tv64) 114#define ktime_to_ns(kt) ((kt).tv64)
121 115
@@ -136,9 +130,6 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
136 * tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC 130 * tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC
137 */ 131 */
138 132
139/* Define a ktime_t variable and initialize it to zero: */
140#define DEFINE_KTIME(kt) ktime_t kt = { .tv64 = 0 }
141
142/* Set a ktime_t variable to a value in sec/nsec representation: */ 133/* Set a ktime_t variable to a value in sec/nsec representation: */
143static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) 134static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
144{ 135{
@@ -255,17 +246,6 @@ static inline struct timeval ktime_to_timeval(const ktime_t kt)
255} 246}
256 247
257/** 248/**
258 * ktime_to_clock_t - convert a ktime_t variable to clock_t format
259 * @kt: the ktime_t variable to convert
260 *
261 * Returns a clock_t variable with the converted value
262 */
263static inline clock_t ktime_to_clock_t(const ktime_t kt)
264{
265 return nsec_to_clock_t( (u64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec);
266}
267
268/**
269 * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds 249 * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds
270 * @kt: the ktime_t variable to convert 250 * @kt: the ktime_t variable to convert
271 * 251 *
diff --git a/include/linux/m48t86.h b/include/linux/m48t86.h
new file mode 100644
index 000000000000..9065199319d0
--- /dev/null
+++ b/include/linux/m48t86.h
@@ -0,0 +1,16 @@
1/*
2 * ST M48T86 / Dallas DS12887 RTC driver
3 * Copyright (c) 2006 Tower Technologies
4 *
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10*/
11
12struct m48t86_ops
13{
14 void (*writeb)(unsigned char value, unsigned long addr);
15 unsigned char (*readb)(unsigned long addr);
16};
diff --git a/include/linux/memory.h b/include/linux/memory.h
index e251dc43d0f5..8f04143ca363 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -77,7 +77,6 @@ extern int remove_memory_block(unsigned long, struct mem_section *, int);
77 77
78#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) 78#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
79 79
80struct notifier_block;
81 80
82#endif /* CONFIG_MEMORY_HOTPLUG */ 81#endif /* CONFIG_MEMORY_HOTPLUG */
83 82
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index f2427d7394b0..9be484d11283 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -6,6 +6,8 @@
6 6
7#include <linux/wait.h> 7#include <linux/wait.h>
8 8
9struct kmem_cache;
10
9typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); 11typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
10typedef void (mempool_free_t)(void *element, void *pool_data); 12typedef void (mempool_free_t)(void *element, void *pool_data);
11 13
@@ -37,5 +39,41 @@ extern void mempool_free(void *element, mempool_t *pool);
37 */ 39 */
38void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); 40void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
39void mempool_free_slab(void *element, void *pool_data); 41void mempool_free_slab(void *element, void *pool_data);
42static inline mempool_t *
43mempool_create_slab_pool(int min_nr, struct kmem_cache *kc)
44{
45 return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
46 (void *) kc);
47}
48
49/*
50 * 2 mempool_alloc_t's and a mempool_free_t to kmalloc/kzalloc and kfree
51 * the amount of memory specified by pool_data
52 */
53void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
54void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data);
55void mempool_kfree(void *element, void *pool_data);
56static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
57{
58 return mempool_create(min_nr, mempool_kmalloc, mempool_kfree,
59 (void *) size);
60}
61static inline mempool_t *mempool_create_kzalloc_pool(int min_nr, size_t size)
62{
63 return mempool_create(min_nr, mempool_kzalloc, mempool_kfree,
64 (void *) size);
65}
66
67/*
68 * A mempool_alloc_t and mempool_free_t for a simple page allocator that
69 * allocates pages of the order specified by pool_data
70 */
71void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
72void mempool_free_pages(void *element, void *pool_data);
73static inline mempool_t *mempool_create_page_pool(int min_nr, int order)
74{
75 return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages,
76 (void *)(long)order);
77}
40 78
41#endif /* _LINUX_MEMPOOL_H */ 79#endif /* _LINUX_MEMPOOL_H */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 14ceebfc1efa..5b584dafb5a6 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -36,7 +36,7 @@ struct class_device;
36struct miscdevice { 36struct miscdevice {
37 int minor; 37 int minor;
38 const char *name; 38 const char *name;
39 struct file_operations *fops; 39 const struct file_operations *fops;
40 struct list_head list; 40 struct list_head list;
41 struct device *dev; 41 struct device *dev;
42 struct class_device *class; 42 struct class_device *class;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ebfc238cc243..b5c21122c299 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -13,6 +13,7 @@
13#include <linux/numa.h> 13#include <linux/numa.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/seqlock.h> 15#include <linux/seqlock.h>
16#include <linux/nodemask.h>
16#include <asm/atomic.h> 17#include <asm/atomic.h>
17 18
18/* Free memory management - zoned buddy allocator. */ 19/* Free memory management - zoned buddy allocator. */
@@ -225,7 +226,6 @@ struct zone {
225 * Discontig memory support fields. 226 * Discontig memory support fields.
226 */ 227 */
227 struct pglist_data *zone_pgdat; 228 struct pglist_data *zone_pgdat;
228 struct page *zone_mem_map;
229 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 229 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
230 unsigned long zone_start_pfn; 230 unsigned long zone_start_pfn;
231 231
@@ -307,7 +307,6 @@ typedef struct pglist_data {
307 unsigned long node_spanned_pages; /* total size of physical page 307 unsigned long node_spanned_pages; /* total size of physical page
308 range, including holes */ 308 range, including holes */
309 int node_id; 309 int node_id;
310 struct pglist_data *pgdat_next;
311 wait_queue_head_t kswapd_wait; 310 wait_queue_head_t kswapd_wait;
312 struct task_struct *kswapd; 311 struct task_struct *kswapd;
313 int kswapd_max_order; 312 int kswapd_max_order;
@@ -324,8 +323,6 @@ typedef struct pglist_data {
324 323
325#include <linux/memory_hotplug.h> 324#include <linux/memory_hotplug.h>
326 325
327extern struct pglist_data *pgdat_list;
328
329void __get_zone_counts(unsigned long *active, unsigned long *inactive, 326void __get_zone_counts(unsigned long *active, unsigned long *inactive,
330 unsigned long *free, struct pglist_data *pgdat); 327 unsigned long *free, struct pglist_data *pgdat);
331void get_zone_counts(unsigned long *active, unsigned long *inactive, 328void get_zone_counts(unsigned long *active, unsigned long *inactive,
@@ -350,57 +347,6 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
350 */ 347 */
351#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 348#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
352 349
353/**
354 * for_each_pgdat - helper macro to iterate over all nodes
355 * @pgdat - pointer to a pg_data_t variable
356 *
357 * Meant to help with common loops of the form
358 * pgdat = pgdat_list;
359 * while(pgdat) {
360 * ...
361 * pgdat = pgdat->pgdat_next;
362 * }
363 */
364#define for_each_pgdat(pgdat) \
365 for (pgdat = pgdat_list; pgdat; pgdat = pgdat->pgdat_next)
366
367/*
368 * next_zone - helper magic for for_each_zone()
369 * Thanks to William Lee Irwin III for this piece of ingenuity.
370 */
371static inline struct zone *next_zone(struct zone *zone)
372{
373 pg_data_t *pgdat = zone->zone_pgdat;
374
375 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
376 zone++;
377 else if (pgdat->pgdat_next) {
378 pgdat = pgdat->pgdat_next;
379 zone = pgdat->node_zones;
380 } else
381 zone = NULL;
382
383 return zone;
384}
385
386/**
387 * for_each_zone - helper macro to iterate over all memory zones
388 * @zone - pointer to struct zone variable
389 *
390 * The user only needs to declare the zone variable, for_each_zone
391 * fills it in. This basically means for_each_zone() is an
392 * easier to read version of this piece of code:
393 *
394 * for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next)
395 * for (i = 0; i < MAX_NR_ZONES; ++i) {
396 * struct zone * z = pgdat->node_zones + i;
397 * ...
398 * }
399 * }
400 */
401#define for_each_zone(zone) \
402 for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
403
404static inline int populated_zone(struct zone *zone) 350static inline int populated_zone(struct zone *zone)
405{ 351{
406 return (!!zone->present_pages); 352 return (!!zone->present_pages);
@@ -472,6 +418,30 @@ extern struct pglist_data contig_page_data;
472 418
473#endif /* !CONFIG_NEED_MULTIPLE_NODES */ 419#endif /* !CONFIG_NEED_MULTIPLE_NODES */
474 420
421extern struct pglist_data *first_online_pgdat(void);
422extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
423extern struct zone *next_zone(struct zone *zone);
424
425/**
426 * for_each_pgdat - helper macro to iterate over all nodes
427 * @pgdat - pointer to a pg_data_t variable
428 */
429#define for_each_online_pgdat(pgdat) \
430 for (pgdat = first_online_pgdat(); \
431 pgdat; \
432 pgdat = next_online_pgdat(pgdat))
433/**
434 * for_each_zone - helper macro to iterate over all memory zones
435 * @zone - pointer to struct zone variable
436 *
437 * The user only needs to declare the zone variable, for_each_zone
438 * fills it in.
439 */
440#define for_each_zone(zone) \
441 for (zone = (first_online_pgdat())->node_zones; \
442 zone; \
443 zone = next_zone(zone))
444
475#ifdef CONFIG_SPARSEMEM 445#ifdef CONFIG_SPARSEMEM
476#include <asm/sparsemem.h> 446#include <asm/sparsemem.h>
477#endif 447#endif
@@ -602,17 +572,6 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
602 return __nr_to_section(pfn_to_section_nr(pfn)); 572 return __nr_to_section(pfn_to_section_nr(pfn));
603} 573}
604 574
605#define pfn_to_page(pfn) \
606({ \
607 unsigned long __pfn = (pfn); \
608 __section_mem_map_addr(__pfn_to_section(__pfn)) + __pfn; \
609})
610#define page_to_pfn(page) \
611({ \
612 page - __section_mem_map_addr(__nr_to_section( \
613 page_to_section(page))); \
614})
615
616static inline int pfn_valid(unsigned long pfn) 575static inline int pfn_valid(unsigned long pfn)
617{ 576{
618 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 577 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index 779e6a5744c7..d9035c73e5d1 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -334,7 +334,7 @@ extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
334 unsigned long *mapped_blocks); 334 unsigned long *mapped_blocks);
335 335
336/* fat/dir.c */ 336/* fat/dir.c */
337extern struct file_operations fat_dir_operations; 337extern const struct file_operations fat_dir_operations;
338extern int fat_search_long(struct inode *inode, const unsigned char *name, 338extern int fat_search_long(struct inode *inode, const unsigned char *name,
339 int name_len, struct fat_slot_info *sinfo); 339 int name_len, struct fat_slot_info *sinfo);
340extern int fat_dir_empty(struct inode *dir); 340extern int fat_dir_empty(struct inode *dir);
@@ -397,7 +397,7 @@ extern int fat_count_free_clusters(struct super_block *sb);
397/* fat/file.c */ 397/* fat/file.c */
398extern int fat_generic_ioctl(struct inode *inode, struct file *filp, 398extern int fat_generic_ioctl(struct inode *inode, struct file *filp,
399 unsigned int cmd, unsigned long arg); 399 unsigned int cmd, unsigned long arg);
400extern struct file_operations fat_file_operations; 400extern const struct file_operations fat_file_operations;
401extern struct inode_operations fat_file_inode_operations; 401extern struct inode_operations fat_file_inode_operations;
402extern int fat_notify_change(struct dentry * dentry, struct iattr * attr); 402extern int fat_notify_change(struct dentry * dentry, struct iattr * attr);
403extern void fat_truncate(struct inode *inode); 403extern void fat_truncate(struct inode *inode);
@@ -420,6 +420,9 @@ extern int date_dos2unix(unsigned short time, unsigned short date);
420extern void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date); 420extern void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date);
421extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs); 421extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
422 422
423int fat_cache_init(void);
424void fat_cache_destroy(void);
425
423#endif /* __KERNEL__ */ 426#endif /* __KERNEL__ */
424 427
425#endif 428#endif
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h
index e01342568530..96dc237b8f03 100644
--- a/include/linux/ncp_fs.h
+++ b/include/linux/ncp_fs.h
@@ -209,7 +209,7 @@ void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
209 209
210/* linux/fs/ncpfs/dir.c */ 210/* linux/fs/ncpfs/dir.c */
211extern struct inode_operations ncp_dir_inode_operations; 211extern struct inode_operations ncp_dir_inode_operations;
212extern struct file_operations ncp_dir_operations; 212extern const struct file_operations ncp_dir_operations;
213int ncp_conn_logged_in(struct super_block *); 213int ncp_conn_logged_in(struct super_block *);
214int ncp_date_dos2unix(__le16 time, __le16 date); 214int ncp_date_dos2unix(__le16 time, __le16 date);
215void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date); 215void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
@@ -230,7 +230,7 @@ void ncp_unlock_server(struct ncp_server *server);
230 230
231/* linux/fs/ncpfs/file.c */ 231/* linux/fs/ncpfs/file.c */
232extern struct inode_operations ncp_file_inode_operations; 232extern struct inode_operations ncp_file_inode_operations;
233extern struct file_operations ncp_file_operations; 233extern const struct file_operations ncp_file_operations;
234int ncp_make_open(struct inode *, int); 234int ncp_make_open(struct inode *, int);
235 235
236/* linux/fs/ncpfs/mmap.c */ 236/* linux/fs/ncpfs/mmap.c */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
index f32d75c4f4cf..d54d7b278e96 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack.h
@@ -308,29 +308,30 @@ DECLARE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache);
308 308
309#define CONNTRACK_ECACHE(x) (__get_cpu_var(ip_conntrack_ecache).x) 309#define CONNTRACK_ECACHE(x) (__get_cpu_var(ip_conntrack_ecache).x)
310 310
311extern struct notifier_block *ip_conntrack_chain; 311extern struct atomic_notifier_head ip_conntrack_chain;
312extern struct notifier_block *ip_conntrack_expect_chain; 312extern struct atomic_notifier_head ip_conntrack_expect_chain;
313 313
314static inline int ip_conntrack_register_notifier(struct notifier_block *nb) 314static inline int ip_conntrack_register_notifier(struct notifier_block *nb)
315{ 315{
316 return notifier_chain_register(&ip_conntrack_chain, nb); 316 return atomic_notifier_chain_register(&ip_conntrack_chain, nb);
317} 317}
318 318
319static inline int ip_conntrack_unregister_notifier(struct notifier_block *nb) 319static inline int ip_conntrack_unregister_notifier(struct notifier_block *nb)
320{ 320{
321 return notifier_chain_unregister(&ip_conntrack_chain, nb); 321 return atomic_notifier_chain_unregister(&ip_conntrack_chain, nb);
322} 322}
323 323
324static inline int 324static inline int
325ip_conntrack_expect_register_notifier(struct notifier_block *nb) 325ip_conntrack_expect_register_notifier(struct notifier_block *nb)
326{ 326{
327 return notifier_chain_register(&ip_conntrack_expect_chain, nb); 327 return atomic_notifier_chain_register(&ip_conntrack_expect_chain, nb);
328} 328}
329 329
330static inline int 330static inline int
331ip_conntrack_expect_unregister_notifier(struct notifier_block *nb) 331ip_conntrack_expect_unregister_notifier(struct notifier_block *nb)
332{ 332{
333 return notifier_chain_unregister(&ip_conntrack_expect_chain, nb); 333 return atomic_notifier_chain_unregister(&ip_conntrack_expect_chain,
334 nb);
334} 335}
335 336
336extern void ip_ct_deliver_cached_events(const struct ip_conntrack *ct); 337extern void ip_ct_deliver_cached_events(const struct ip_conntrack *ct);
@@ -355,14 +356,14 @@ static inline void ip_conntrack_event(enum ip_conntrack_events event,
355 struct ip_conntrack *ct) 356 struct ip_conntrack *ct)
356{ 357{
357 if (is_confirmed(ct) && !is_dying(ct)) 358 if (is_confirmed(ct) && !is_dying(ct))
358 notifier_call_chain(&ip_conntrack_chain, event, ct); 359 atomic_notifier_call_chain(&ip_conntrack_chain, event, ct);
359} 360}
360 361
361static inline void 362static inline void
362ip_conntrack_expect_event(enum ip_conntrack_expect_events event, 363ip_conntrack_expect_event(enum ip_conntrack_expect_events event,
363 struct ip_conntrack_expect *exp) 364 struct ip_conntrack_expect *exp)
364{ 365{
365 notifier_call_chain(&ip_conntrack_expect_chain, event, exp); 366 atomic_notifier_call_chain(&ip_conntrack_expect_chain, event, exp);
366} 367}
367#else /* CONFIG_IP_NF_CONNTRACK_EVENTS */ 368#else /* CONFIG_IP_NF_CONNTRACK_EVENTS */
368static inline void ip_conntrack_event_cache(enum ip_conntrack_events event, 369static inline void ip_conntrack_event_cache(enum ip_conntrack_events event,
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index cbebd7d1b9e8..c71227dd4389 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -324,7 +324,7 @@ extern struct inode_operations nfs_file_inode_operations;
324#ifdef CONFIG_NFS_V3 324#ifdef CONFIG_NFS_V3
325extern struct inode_operations nfs3_file_inode_operations; 325extern struct inode_operations nfs3_file_inode_operations;
326#endif /* CONFIG_NFS_V3 */ 326#endif /* CONFIG_NFS_V3 */
327extern struct file_operations nfs_file_operations; 327extern const struct file_operations nfs_file_operations;
328extern struct address_space_operations nfs_file_aops; 328extern struct address_space_operations nfs_file_aops;
329 329
330static inline struct rpc_cred *nfs_file_cred(struct file *file) 330static inline struct rpc_cred *nfs_file_cred(struct file *file)
@@ -371,7 +371,7 @@ extern struct inode_operations nfs_dir_inode_operations;
371#ifdef CONFIG_NFS_V3 371#ifdef CONFIG_NFS_V3
372extern struct inode_operations nfs3_dir_inode_operations; 372extern struct inode_operations nfs3_dir_inode_operations;
373#endif /* CONFIG_NFS_V3 */ 373#endif /* CONFIG_NFS_V3 */
374extern struct file_operations nfs_dir_operations; 374extern const struct file_operations nfs_dir_operations;
375extern struct dentry_operations nfs_dentry_operations; 375extern struct dentry_operations nfs_dentry_operations;
376 376
377extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr); 377extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr);
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index 6bad4766d3d9..d2a8abb5011a 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -67,7 +67,8 @@ struct svc_expkey {
67 int ek_fsidtype; 67 int ek_fsidtype;
68 u32 ek_fsid[3]; 68 u32 ek_fsid[3];
69 69
70 struct svc_export * ek_export; 70 struct vfsmount * ek_mnt;
71 struct dentry * ek_dentry;
71}; 72};
72 73
73#define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT)) 74#define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT))
@@ -85,9 +86,6 @@ void nfsd_export_shutdown(void);
85void nfsd_export_flush(void); 86void nfsd_export_flush(void);
86void exp_readlock(void); 87void exp_readlock(void);
87void exp_readunlock(void); 88void exp_readunlock(void);
88struct svc_expkey * exp_find_key(struct auth_domain *clp,
89 int fsid_type, u32 *fsidv,
90 struct cache_req *reqp);
91struct svc_export * exp_get_by_name(struct auth_domain *clp, 89struct svc_export * exp_get_by_name(struct auth_domain *clp,
92 struct vfsmount *mnt, 90 struct vfsmount *mnt,
93 struct dentry *dentry, 91 struct dentry *dentry,
@@ -101,35 +99,20 @@ int exp_rootfh(struct auth_domain *,
101int exp_pseudoroot(struct auth_domain *, struct svc_fh *fhp, struct cache_req *creq); 99int exp_pseudoroot(struct auth_domain *, struct svc_fh *fhp, struct cache_req *creq);
102int nfserrno(int errno); 100int nfserrno(int errno);
103 101
104extern void expkey_put(struct cache_head *item, struct cache_detail *cd); 102extern struct cache_detail svc_export_cache;
105extern void svc_export_put(struct cache_head *item, struct cache_detail *cd);
106extern struct cache_detail svc_export_cache, svc_expkey_cache;
107 103
108static inline void exp_put(struct svc_export *exp) 104static inline void exp_put(struct svc_export *exp)
109{ 105{
110 svc_export_put(&exp->h, &svc_export_cache); 106 cache_put(&exp->h, &svc_export_cache);
111} 107}
112 108
113static inline void exp_get(struct svc_export *exp) 109static inline void exp_get(struct svc_export *exp)
114{ 110{
115 cache_get(&exp->h); 111 cache_get(&exp->h);
116} 112}
117static inline struct svc_export * 113extern struct svc_export *
118exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv, 114exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv,
119 struct cache_req *reqp) 115 struct cache_req *reqp);
120{
121 struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp);
122 if (ek && !IS_ERR(ek)) {
123 struct svc_export *exp = ek->ek_export;
124 int err;
125 exp_get(exp);
126 expkey_put(&ek->h, &svc_expkey_cache);
127 if ((err = cache_check(&svc_export_cache, &exp->h, reqp)))
128 exp = ERR_PTR(err);
129 return exp;
130 } else
131 return ERR_PTR(PTR_ERR(ek));
132}
133 116
134#endif /* __KERNEL__ */ 117#endif /* __KERNEL__ */
135 118
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index b959a4525cbd..1a9ef3e627d1 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -350,11 +350,15 @@ extern nodemask_t node_possible_map;
350#define num_possible_nodes() nodes_weight(node_possible_map) 350#define num_possible_nodes() nodes_weight(node_possible_map)
351#define node_online(node) node_isset((node), node_online_map) 351#define node_online(node) node_isset((node), node_online_map)
352#define node_possible(node) node_isset((node), node_possible_map) 352#define node_possible(node) node_isset((node), node_possible_map)
353#define first_online_node first_node(node_online_map)
354#define next_online_node(nid) next_node((nid), node_online_map)
353#else 355#else
354#define num_online_nodes() 1 356#define num_online_nodes() 1
355#define num_possible_nodes() 1 357#define num_possible_nodes() 1
356#define node_online(node) ((node) == 0) 358#define node_online(node) ((node) == 0)
357#define node_possible(node) ((node) == 0) 359#define node_possible(node) ((node) == 0)
360#define first_online_node 0
361#define next_online_node(nid) (MAX_NUMNODES)
358#endif 362#endif
359 363
360#define any_online_node(mask) \ 364#define any_online_node(mask) \
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 5937dd6053c3..51dbab9710c7 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -10,25 +10,107 @@
10#ifndef _LINUX_NOTIFIER_H 10#ifndef _LINUX_NOTIFIER_H
11#define _LINUX_NOTIFIER_H 11#define _LINUX_NOTIFIER_H
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/mutex.h>
14#include <linux/rwsem.h>
13 15
14struct notifier_block 16/*
15{ 17 * Notifier chains are of three types:
16 int (*notifier_call)(struct notifier_block *self, unsigned long, void *); 18 *
19 * Atomic notifier chains: Chain callbacks run in interrupt/atomic
20 * context. Callouts are not allowed to block.
21 * Blocking notifier chains: Chain callbacks run in process context.
22 * Callouts are allowed to block.
23 * Raw notifier chains: There are no restrictions on callbacks,
24 * registration, or unregistration. All locking and protection
25 * must be provided by the caller.
26 *
27 * atomic_notifier_chain_register() may be called from an atomic context,
28 * but blocking_notifier_chain_register() must be called from a process
29 * context. Ditto for the corresponding _unregister() routines.
30 *
31 * atomic_notifier_chain_unregister() and blocking_notifier_chain_unregister()
32 * _must not_ be called from within the call chain.
33 */
34
35struct notifier_block {
36 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
17 struct notifier_block *next; 37 struct notifier_block *next;
18 int priority; 38 int priority;
19}; 39};
20 40
41struct atomic_notifier_head {
42 spinlock_t lock;
43 struct notifier_block *head;
44};
45
46struct blocking_notifier_head {
47 struct rw_semaphore rwsem;
48 struct notifier_block *head;
49};
50
51struct raw_notifier_head {
52 struct notifier_block *head;
53};
54
55#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \
56 spin_lock_init(&(name)->lock); \
57 (name)->head = NULL; \
58 } while (0)
59#define BLOCKING_INIT_NOTIFIER_HEAD(name) do { \
60 init_rwsem(&(name)->rwsem); \
61 (name)->head = NULL; \
62 } while (0)
63#define RAW_INIT_NOTIFIER_HEAD(name) do { \
64 (name)->head = NULL; \
65 } while (0)
66
67#define ATOMIC_NOTIFIER_INIT(name) { \
68 .lock = SPIN_LOCK_UNLOCKED, \
69 .head = NULL }
70#define BLOCKING_NOTIFIER_INIT(name) { \
71 .rwsem = __RWSEM_INITIALIZER((name).rwsem), \
72 .head = NULL }
73#define RAW_NOTIFIER_INIT(name) { \
74 .head = NULL }
75
76#define ATOMIC_NOTIFIER_HEAD(name) \
77 struct atomic_notifier_head name = \
78 ATOMIC_NOTIFIER_INIT(name)
79#define BLOCKING_NOTIFIER_HEAD(name) \
80 struct blocking_notifier_head name = \
81 BLOCKING_NOTIFIER_INIT(name)
82#define RAW_NOTIFIER_HEAD(name) \
83 struct raw_notifier_head name = \
84 RAW_NOTIFIER_INIT(name)
21 85
22#ifdef __KERNEL__ 86#ifdef __KERNEL__
23 87
24extern int notifier_chain_register(struct notifier_block **list, struct notifier_block *n); 88extern int atomic_notifier_chain_register(struct atomic_notifier_head *,
25extern int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n); 89 struct notifier_block *);
26extern int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v); 90extern int blocking_notifier_chain_register(struct blocking_notifier_head *,
91 struct notifier_block *);
92extern int raw_notifier_chain_register(struct raw_notifier_head *,
93 struct notifier_block *);
94
95extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *,
96 struct notifier_block *);
97extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *,
98 struct notifier_block *);
99extern int raw_notifier_chain_unregister(struct raw_notifier_head *,
100 struct notifier_block *);
101
102extern int atomic_notifier_call_chain(struct atomic_notifier_head *,
103 unsigned long val, void *v);
104extern int blocking_notifier_call_chain(struct blocking_notifier_head *,
105 unsigned long val, void *v);
106extern int raw_notifier_call_chain(struct raw_notifier_head *,
107 unsigned long val, void *v);
27 108
28#define NOTIFY_DONE 0x0000 /* Don't care */ 109#define NOTIFY_DONE 0x0000 /* Don't care */
29#define NOTIFY_OK 0x0001 /* Suits me */ 110#define NOTIFY_OK 0x0001 /* Suits me */
30#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */ 111#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
31#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) /* Bad/Veto action */ 112#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
113 /* Bad/Veto action */
32/* 114/*
33 * Clean way to return from the notifier and stop further calls. 115 * Clean way to return from the notifier and stop further calls.
34 */ 116 */
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 559c4c38a9c7..0d514b252454 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -61,6 +61,16 @@ void oprofile_arch_exit(void);
61 */ 61 */
62void oprofile_add_sample(struct pt_regs * const regs, unsigned long event); 62void oprofile_add_sample(struct pt_regs * const regs, unsigned long event);
63 63
64/**
65 * Add an extended sample. Use this when the PC is not from the regs, and
66 * we cannot determine if we're in kernel mode from the regs.
67 *
68 * This function does perform a backtrace.
69 *
70 */
71void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
72 unsigned long event, int is_kernel);
73
64/* Use this instead when the PC value is not from the regs. Doesn't 74/* Use this instead when the PC value is not from the regs. Doesn't
65 * backtrace. */ 75 * backtrace. */
66void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event); 76void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event);
@@ -74,10 +84,10 @@ void oprofile_add_trace(unsigned long eip);
74 * the specified file operations. 84 * the specified file operations.
75 */ 85 */
76int oprofilefs_create_file(struct super_block * sb, struct dentry * root, 86int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
77 char const * name, struct file_operations * fops); 87 char const * name, const struct file_operations * fops);
78 88
79int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root, 89int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root,
80 char const * name, struct file_operations * fops, int perm); 90 char const * name, const struct file_operations * fops, int perm);
81 91
82/** Create a file for read/write access to an unsigned long. */ 92/** Create a file for read/write access to an unsigned long. */
83int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root, 93int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 6f080ae59286..e2ab2ac18d6b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -642,6 +642,7 @@
642#define PCI_DEVICE_ID_SI_965 0x0965 642#define PCI_DEVICE_ID_SI_965 0x0965
643#define PCI_DEVICE_ID_SI_5511 0x5511 643#define PCI_DEVICE_ID_SI_5511 0x5511
644#define PCI_DEVICE_ID_SI_5513 0x5513 644#define PCI_DEVICE_ID_SI_5513 0x5513
645#define PCI_DEVICE_ID_SI_5517 0x5517
645#define PCI_DEVICE_ID_SI_5518 0x5518 646#define PCI_DEVICE_ID_SI_5518 0x5518
646#define PCI_DEVICE_ID_SI_5571 0x5571 647#define PCI_DEVICE_ID_SI_5571 0x5571
647#define PCI_DEVICE_ID_SI_5581 0x5581 648#define PCI_DEVICE_ID_SI_5581 0x5581
@@ -1052,6 +1053,7 @@
1052#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2 1053#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2
1053#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3 1054#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3
1054#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9 1055#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9
1056#define PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280 0x00fd
1055#define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100 1057#define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100
1056#define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101 1058#define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101
1057#define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103 1059#define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103
diff --git a/include/linux/pfn.h b/include/linux/pfn.h
new file mode 100644
index 000000000000..bb01f8b92b56
--- /dev/null
+++ b/include/linux/pfn.h
@@ -0,0 +1,9 @@
1#ifndef _LINUX_PFN_H_
2#define _LINUX_PFN_H_
3
4#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
5#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
6#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
7#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
8
9#endif
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 8e8f6098508a..51e1b56741fb 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -11,6 +11,15 @@
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <asm/uaccess.h> 12#include <asm/uaccess.h>
13 13
14/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
15 additional memory. */
16#define MAX_STACK_ALLOC 832
17#define FRONTEND_STACK_ALLOC 256
18#define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC
19#define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC
20#define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
21#define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
22
14struct poll_table_struct; 23struct poll_table_struct;
15 24
16/* 25/*
@@ -33,6 +42,12 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
33 pt->qproc = qproc; 42 pt->qproc = qproc;
34} 43}
35 44
45struct poll_table_entry {
46 struct file * filp;
47 wait_queue_t wait;
48 wait_queue_head_t * wait_address;
49};
50
36/* 51/*
37 * Structures and helpers for sys_poll/sys_poll 52 * Structures and helpers for sys_poll/sys_poll
38 */ 53 */
@@ -40,6 +55,8 @@ struct poll_wqueues {
40 poll_table pt; 55 poll_table pt;
41 struct poll_table_page * table; 56 struct poll_table_page * table;
42 int error; 57 int error;
58 int inline_index;
59 struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES];
43}; 60};
44 61
45extern void poll_initwait(struct poll_wqueues *pwq); 62extern void poll_initwait(struct poll_wqueues *pwq);
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index aa6322d45198..135871df9911 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -4,6 +4,7 @@
4#include <linux/config.h> 4#include <linux/config.h>
5#include <linux/slab.h> 5#include <linux/slab.h>
6#include <linux/fs.h> 6#include <linux/fs.h>
7#include <linux/spinlock.h>
7#include <asm/atomic.h> 8#include <asm/atomic.h>
8 9
9/* 10/*
@@ -55,9 +56,9 @@ struct proc_dir_entry {
55 nlink_t nlink; 56 nlink_t nlink;
56 uid_t uid; 57 uid_t uid;
57 gid_t gid; 58 gid_t gid;
58 unsigned long size; 59 loff_t size;
59 struct inode_operations * proc_iops; 60 struct inode_operations * proc_iops;
60 struct file_operations * proc_fops; 61 const struct file_operations * proc_fops;
61 get_info_t *get_info; 62 get_info_t *get_info;
62 struct module *owner; 63 struct module *owner;
63 struct proc_dir_entry *next, *parent, *subdir; 64 struct proc_dir_entry *next, *parent, *subdir;
@@ -92,6 +93,8 @@ extern struct proc_dir_entry *proc_bus;
92extern struct proc_dir_entry *proc_root_driver; 93extern struct proc_dir_entry *proc_root_driver;
93extern struct proc_dir_entry *proc_root_kcore; 94extern struct proc_dir_entry *proc_root_kcore;
94 95
96extern spinlock_t proc_subdir_lock;
97
95extern void proc_root_init(void); 98extern void proc_root_init(void);
96extern void proc_misc_init(void); 99extern void proc_misc_init(void);
97 100
@@ -125,9 +128,9 @@ extern int proc_match(int, const char *,struct proc_dir_entry *);
125extern int proc_readdir(struct file *, void *, filldir_t); 128extern int proc_readdir(struct file *, void *, filldir_t);
126extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *); 129extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *);
127 130
128extern struct file_operations proc_kcore_operations; 131extern const struct file_operations proc_kcore_operations;
129extern struct file_operations proc_kmsg_operations; 132extern const struct file_operations proc_kmsg_operations;
130extern struct file_operations ppc_htab_operations; 133extern const struct file_operations ppc_htab_operations;
131 134
132/* 135/*
133 * proc_tty.c 136 * proc_tty.c
@@ -186,7 +189,7 @@ static inline struct proc_dir_entry *proc_net_create(const char *name,
186} 189}
187 190
188static inline struct proc_dir_entry *proc_net_fops_create(const char *name, 191static inline struct proc_dir_entry *proc_net_fops_create(const char *name,
189 mode_t mode, struct file_operations *fops) 192 mode_t mode, const struct file_operations *fops)
190{ 193{
191 struct proc_dir_entry *res = create_proc_entry(name, mode, proc_net); 194 struct proc_dir_entry *res = create_proc_entry(name, mode, proc_net);
192 if (res) 195 if (res)
diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h
index fc610bb0f733..27f49c85d5d6 100644
--- a/include/linux/qnx4_fs.h
+++ b/include/linux/qnx4_fs.h
@@ -118,8 +118,8 @@ extern struct buffer_head *qnx4_bread(struct inode *, int, int);
118 118
119extern struct inode_operations qnx4_file_inode_operations; 119extern struct inode_operations qnx4_file_inode_operations;
120extern struct inode_operations qnx4_dir_inode_operations; 120extern struct inode_operations qnx4_dir_inode_operations;
121extern struct file_operations qnx4_file_operations; 121extern const struct file_operations qnx4_file_operations;
122extern struct file_operations qnx4_dir_operations; 122extern const struct file_operations qnx4_dir_operations;
123extern int qnx4_is_free(struct super_block *sb, long block); 123extern int qnx4_is_free(struct super_block *sb, long block);
124extern int qnx4_set_bitmap(struct super_block *sb, long block, int busy); 124extern int qnx4_set_bitmap(struct super_block *sb, long block, int busy);
125extern int qnx4_create(struct inode *inode, struct dentry *dentry, int mode, struct nameidata *nd); 125extern int qnx4_create(struct inode *inode, struct dentry *dentry, int mode, struct nameidata *nd);
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
index b6e0bcad84e1..66b44e5e0d6e 100644
--- a/include/linux/raid/md.h
+++ b/include/linux/raid/md.h
@@ -92,7 +92,10 @@ extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
92extern void md_super_wait(mddev_t *mddev); 92extern void md_super_wait(mddev_t *mddev);
93extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, 93extern int sync_page_io(struct block_device *bdev, sector_t sector, int size,
94 struct page *page, int rw); 94 struct page *page, int rw);
95extern void md_do_sync(mddev_t *mddev);
96extern void md_new_event(mddev_t *mddev);
95 97
98extern void md_update_sb(mddev_t * mddev);
96 99
97#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } 100#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
98 101
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index 617b9506c760..e2df61f5b09a 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -132,6 +132,14 @@ struct mddev_s
132 132
133 char uuid[16]; 133 char uuid[16];
134 134
135 /* If the array is being reshaped, we need to record the
136 * new shape and an indication of where we are up to.
137 * This is written to the superblock.
138 * If reshape_position is MaxSector, then no reshape is happening (yet).
139 */
140 sector_t reshape_position;
141 int delta_disks, new_level, new_layout, new_chunk;
142
135 struct mdk_thread_s *thread; /* management thread */ 143 struct mdk_thread_s *thread; /* management thread */
136 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ 144 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
137 sector_t curr_resync; /* blocks scheduled */ 145 sector_t curr_resync; /* blocks scheduled */
@@ -143,6 +151,10 @@ struct mddev_s
143 sector_t resync_mismatches; /* count of sectors where 151 sector_t resync_mismatches; /* count of sectors where
144 * parity/replica mismatch found 152 * parity/replica mismatch found
145 */ 153 */
154
155 /* allow user-space to request suspension of IO to regions of the array */
156 sector_t suspend_lo;
157 sector_t suspend_hi;
146 /* if zero, use the system-wide default */ 158 /* if zero, use the system-wide default */
147 int sync_speed_min; 159 int sync_speed_min;
148 int sync_speed_max; 160 int sync_speed_max;
@@ -157,6 +169,9 @@ struct mddev_s
157 * DONE: thread is done and is waiting to be reaped 169 * DONE: thread is done and is waiting to be reaped
158 * REQUEST: user-space has requested a sync (used with SYNC) 170 * REQUEST: user-space has requested a sync (used with SYNC)
159 * CHECK: user-space request for for check-only, no repair 171 * CHECK: user-space request for for check-only, no repair
172 * RESHAPE: A reshape is happening
173 *
174 * If neither SYNC or RESHAPE are set, then it is a recovery.
160 */ 175 */
161#define MD_RECOVERY_RUNNING 0 176#define MD_RECOVERY_RUNNING 0
162#define MD_RECOVERY_SYNC 1 177#define MD_RECOVERY_SYNC 1
@@ -166,10 +181,11 @@ struct mddev_s
166#define MD_RECOVERY_NEEDED 5 181#define MD_RECOVERY_NEEDED 5
167#define MD_RECOVERY_REQUESTED 6 182#define MD_RECOVERY_REQUESTED 6
168#define MD_RECOVERY_CHECK 7 183#define MD_RECOVERY_CHECK 7
184#define MD_RECOVERY_RESHAPE 8
169 unsigned long recovery; 185 unsigned long recovery;
170 186
171 int in_sync; /* know to not need resync */ 187 int in_sync; /* know to not need resync */
172 struct semaphore reconfig_sem; 188 struct mutex reconfig_mutex;
173 atomic_t active; 189 atomic_t active;
174 190
175 int changed; /* true if we might need to reread partition info */ 191 int changed; /* true if we might need to reread partition info */
@@ -249,7 +265,8 @@ struct mdk_personality
249 int (*spare_active) (mddev_t *mddev); 265 int (*spare_active) (mddev_t *mddev);
250 sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); 266 sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
251 int (*resize) (mddev_t *mddev, sector_t sectors); 267 int (*resize) (mddev_t *mddev, sector_t sectors);
252 int (*reshape) (mddev_t *mddev, int raid_disks); 268 int (*check_reshape) (mddev_t *mddev);
269 int (*start_reshape) (mddev_t *mddev);
253 int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); 270 int (*reconfig) (mddev_t *mddev, int layout, int chunk_size);
254 /* quiesce moves between quiescence states 271 /* quiesce moves between quiescence states
255 * 0 - fully active 272 * 0 - fully active
diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h
index c100fa5d4bfa..774e1acfb8c4 100644
--- a/include/linux/raid/md_p.h
+++ b/include/linux/raid/md_p.h
@@ -102,6 +102,18 @@ typedef struct mdp_device_descriptor_s {
102#define MD_SB_ERRORS 1 102#define MD_SB_ERRORS 1
103 103
104#define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */ 104#define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */
105
106/*
107 * Notes:
108 * - if an array is being reshaped (restriped) in order to change the
109 * the number of active devices in the array, 'raid_disks' will be
110 * the larger of the old and new numbers. 'delta_disks' will
111 * be the "new - old". So if +ve, raid_disks is the new value, and
112 * "raid_disks-delta_disks" is the old. If -ve, raid_disks is the
113 * old value and "raid_disks+delta_disks" is the new (smaller) value.
114 */
115
116
105typedef struct mdp_superblock_s { 117typedef struct mdp_superblock_s {
106 /* 118 /*
107 * Constant generic information 119 * Constant generic information
@@ -146,7 +158,13 @@ typedef struct mdp_superblock_s {
146 __u32 cp_events_hi; /* 10 high-order of checkpoint update count */ 158 __u32 cp_events_hi; /* 10 high-order of checkpoint update count */
147#endif 159#endif
148 __u32 recovery_cp; /* 11 recovery checkpoint sector count */ 160 __u32 recovery_cp; /* 11 recovery checkpoint sector count */
149 __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 12]; 161 /* There are only valid for minor_version > 90 */
162 __u64 reshape_position; /* 12,13 next address in array-space for reshape */
163 __u32 new_level; /* 14 new level we are reshaping to */
164 __u32 delta_disks; /* 15 change in number of raid_disks */
165 __u32 new_layout; /* 16 new layout */
166 __u32 new_chunk; /* 17 new chunk size (bytes) */
167 __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 18];
150 168
151 /* 169 /*
152 * Personality information 170 * Personality information
@@ -207,7 +225,14 @@ struct mdp_superblock_1 {
207 * NOTE: signed, so bitmap can be before superblock 225 * NOTE: signed, so bitmap can be before superblock
208 * only meaningful of feature_map[0] is set. 226 * only meaningful of feature_map[0] is set.
209 */ 227 */
210 __u8 pad1[128-100]; /* set to 0 when written */ 228
229 /* These are only valid with feature bit '4' */
230 __u64 reshape_position; /* next address in array-space for reshape */
231 __u32 new_level; /* new level we are reshaping to */
232 __u32 delta_disks; /* change in number of raid_disks */
233 __u32 new_layout; /* new layout */
234 __u32 new_chunk; /* new chunk size (bytes) */
235 __u8 pad1[128-124]; /* set to 0 when written */
211 236
212 /* constant this-device information - 64 bytes */ 237 /* constant this-device information - 64 bytes */
213 __u64 data_offset; /* sector start of data, often 0 */ 238 __u64 data_offset; /* sector start of data, often 0 */
@@ -240,8 +265,9 @@ struct mdp_superblock_1 {
240 265
241/* feature_map bits */ 266/* feature_map bits */
242#define MD_FEATURE_BITMAP_OFFSET 1 267#define MD_FEATURE_BITMAP_OFFSET 1
268#define MD_FEATURE_RESHAPE_ACTIVE 4
243 269
244#define MD_FEATURE_ALL 1 270#define MD_FEATURE_ALL 5
245 271
246#endif 272#endif
247 273
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index 394da8207b34..914af667044f 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -135,6 +135,7 @@ struct stripe_head {
135 atomic_t count; /* nr of active thread/requests */ 135 atomic_t count; /* nr of active thread/requests */
136 spinlock_t lock; 136 spinlock_t lock;
137 int bm_seq; /* sequence number for bitmap flushes */ 137 int bm_seq; /* sequence number for bitmap flushes */
138 int disks; /* disks in stripe */
138 struct r5dev { 139 struct r5dev {
139 struct bio req; 140 struct bio req;
140 struct bio_vec vec; 141 struct bio_vec vec;
@@ -156,6 +157,7 @@ struct stripe_head {
156#define R5_ReadError 8 /* seen a read error here recently */ 157#define R5_ReadError 8 /* seen a read error here recently */
157#define R5_ReWrite 9 /* have tried to over-write the readerror */ 158#define R5_ReWrite 9 /* have tried to over-write the readerror */
158 159
160#define R5_Expanded 10 /* This block now has post-expand data */
159/* 161/*
160 * Write method 162 * Write method
161 */ 163 */
@@ -174,7 +176,9 @@ struct stripe_head {
174#define STRIPE_DELAYED 6 176#define STRIPE_DELAYED 6
175#define STRIPE_DEGRADED 7 177#define STRIPE_DEGRADED 7
176#define STRIPE_BIT_DELAY 8 178#define STRIPE_BIT_DELAY 8
177 179#define STRIPE_EXPANDING 9
180#define STRIPE_EXPAND_SOURCE 10
181#define STRIPE_EXPAND_READY 11
178/* 182/*
179 * Plugging: 183 * Plugging:
180 * 184 *
@@ -211,12 +215,24 @@ struct raid5_private_data {
211 int raid_disks, working_disks, failed_disks; 215 int raid_disks, working_disks, failed_disks;
212 int max_nr_stripes; 216 int max_nr_stripes;
213 217
218 /* used during an expand */
219 sector_t expand_progress; /* MaxSector when no expand happening */
220 sector_t expand_lo; /* from here up to expand_progress it out-of-bounds
221 * as we haven't flushed the metadata yet
222 */
223 int previous_raid_disks;
224
214 struct list_head handle_list; /* stripes needing handling */ 225 struct list_head handle_list; /* stripes needing handling */
215 struct list_head delayed_list; /* stripes that have plugged requests */ 226 struct list_head delayed_list; /* stripes that have plugged requests */
216 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */ 227 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
217 atomic_t preread_active_stripes; /* stripes with scheduled io */ 228 atomic_t preread_active_stripes; /* stripes with scheduled io */
218 229
219 char cache_name[20]; 230 atomic_t reshape_stripes; /* stripes with pending writes for reshape */
231 /* unfortunately we need two cache names as we temporarily have
232 * two caches.
233 */
234 int active_name;
235 char cache_name[2][20];
220 kmem_cache_t *slab_cache; /* for allocating stripes */ 236 kmem_cache_t *slab_cache; /* for allocating stripes */
221 237
222 int seq_flush, seq_write; 238 int seq_flush, seq_write;
@@ -238,9 +254,10 @@ struct raid5_private_data {
238 wait_queue_head_t wait_for_overlap; 254 wait_queue_head_t wait_for_overlap;
239 int inactive_blocked; /* release of inactive stripes blocked, 255 int inactive_blocked; /* release of inactive stripes blocked,
240 * waiting for 25% to be free 256 * waiting for 25% to be free
241 */ 257 */
258 int pool_size; /* number of disks in stripeheads in pool */
242 spinlock_t device_lock; 259 spinlock_t device_lock;
243 struct disk_info disks[0]; 260 struct disk_info *disks;
244}; 261};
245 262
246typedef struct raid5_private_data raid5_conf_t; 263typedef struct raid5_private_data raid5_conf_t;
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index 953b6df5d037..78ecfa28b1c2 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -15,7 +15,7 @@ extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
15extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); 15extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
16#endif 16#endif
17 17
18extern struct file_operations ramfs_file_operations; 18extern const struct file_operations ramfs_file_operations;
19extern struct vm_operations_struct generic_file_vm_ops; 19extern struct vm_operations_struct generic_file_vm_ops;
20 20
21#endif 21#endif
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 912f1b7cb18f..5676c4210e2c 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -1960,7 +1960,7 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
1960extern struct inode_operations reiserfs_dir_inode_operations; 1960extern struct inode_operations reiserfs_dir_inode_operations;
1961extern struct inode_operations reiserfs_symlink_inode_operations; 1961extern struct inode_operations reiserfs_symlink_inode_operations;
1962extern struct inode_operations reiserfs_special_inode_operations; 1962extern struct inode_operations reiserfs_special_inode_operations;
1963extern struct file_operations reiserfs_dir_operations; 1963extern const struct file_operations reiserfs_dir_operations;
1964 1964
1965/* tail_conversion.c */ 1965/* tail_conversion.c */
1966int direct2indirect(struct reiserfs_transaction_handle *, struct inode *, 1966int direct2indirect(struct reiserfs_transaction_handle *, struct inode *,
@@ -1972,7 +1972,7 @@ void reiserfs_unmap_buffer(struct buffer_head *);
1972 1972
1973/* file.c */ 1973/* file.c */
1974extern struct inode_operations reiserfs_file_inode_operations; 1974extern struct inode_operations reiserfs_file_inode_operations;
1975extern struct file_operations reiserfs_file_operations; 1975extern const struct file_operations reiserfs_file_operations;
1976extern struct address_space_operations reiserfs_address_space_operations; 1976extern struct address_space_operations reiserfs_address_space_operations;
1977 1977
1978/* fix_nodes.c */ 1978/* fix_nodes.c */
diff --git a/include/linux/relayfs_fs.h b/include/linux/relayfs_fs.h
deleted file mode 100644
index 7342e66247fb..000000000000
--- a/include/linux/relayfs_fs.h
+++ /dev/null
@@ -1,287 +0,0 @@
1/*
2 * linux/include/linux/relayfs_fs.h
3 *
4 * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
5 * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com)
6 *
7 * RelayFS definitions and declarations
8 */
9
10#ifndef _LINUX_RELAYFS_FS_H
11#define _LINUX_RELAYFS_FS_H
12
13#include <linux/config.h>
14#include <linux/types.h>
15#include <linux/sched.h>
16#include <linux/wait.h>
17#include <linux/list.h>
18#include <linux/fs.h>
19#include <linux/poll.h>
20#include <linux/kref.h>
21
22/*
23 * Tracks changes to rchan/rchan_buf structs
24 */
25#define RELAYFS_CHANNEL_VERSION 6
26
27/*
28 * Per-cpu relay channel buffer
29 */
30struct rchan_buf
31{
32 void *start; /* start of channel buffer */
33 void *data; /* start of current sub-buffer */
34 size_t offset; /* current offset into sub-buffer */
35 size_t subbufs_produced; /* count of sub-buffers produced */
36 size_t subbufs_consumed; /* count of sub-buffers consumed */
37 struct rchan *chan; /* associated channel */
38 wait_queue_head_t read_wait; /* reader wait queue */
39 struct work_struct wake_readers; /* reader wake-up work struct */
40 struct dentry *dentry; /* channel file dentry */
41 struct kref kref; /* channel buffer refcount */
42 struct page **page_array; /* array of current buffer pages */
43 unsigned int page_count; /* number of current buffer pages */
44 unsigned int finalized; /* buffer has been finalized */
45 size_t *padding; /* padding counts per sub-buffer */
46 size_t prev_padding; /* temporary variable */
47 size_t bytes_consumed; /* bytes consumed in cur read subbuf */
48 unsigned int cpu; /* this buf's cpu */
49} ____cacheline_aligned;
50
51/*
52 * Relay channel data structure
53 */
54struct rchan
55{
56 u32 version; /* the version of this struct */
57 size_t subbuf_size; /* sub-buffer size */
58 size_t n_subbufs; /* number of sub-buffers per buffer */
59 size_t alloc_size; /* total buffer size allocated */
60 struct rchan_callbacks *cb; /* client callbacks */
61 struct kref kref; /* channel refcount */
62 void *private_data; /* for user-defined data */
63 size_t last_toobig; /* tried to log event > subbuf size */
64 struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
65};
66
67/*
68 * Relay channel client callbacks
69 */
70struct rchan_callbacks
71{
72 /*
73 * subbuf_start - called on buffer-switch to a new sub-buffer
74 * @buf: the channel buffer containing the new sub-buffer
75 * @subbuf: the start of the new sub-buffer
76 * @prev_subbuf: the start of the previous sub-buffer
77 * @prev_padding: unused space at the end of previous sub-buffer
78 *
79 * The client should return 1 to continue logging, 0 to stop
80 * logging.
81 *
82 * NOTE: subbuf_start will also be invoked when the buffer is
83 * created, so that the first sub-buffer can be initialized
84 * if necessary. In this case, prev_subbuf will be NULL.
85 *
86 * NOTE: the client can reserve bytes at the beginning of the new
87 * sub-buffer by calling subbuf_start_reserve() in this callback.
88 */
89 int (*subbuf_start) (struct rchan_buf *buf,
90 void *subbuf,
91 void *prev_subbuf,
92 size_t prev_padding);
93
94 /*
95 * buf_mapped - relayfs buffer mmap notification
96 * @buf: the channel buffer
97 * @filp: relayfs file pointer
98 *
99 * Called when a relayfs file is successfully mmapped
100 */
101 void (*buf_mapped)(struct rchan_buf *buf,
102 struct file *filp);
103
104 /*
105 * buf_unmapped - relayfs buffer unmap notification
106 * @buf: the channel buffer
107 * @filp: relayfs file pointer
108 *
109 * Called when a relayfs file is successfully unmapped
110 */
111 void (*buf_unmapped)(struct rchan_buf *buf,
112 struct file *filp);
113 /*
114 * create_buf_file - create file to represent a relayfs channel buffer
115 * @filename: the name of the file to create
116 * @parent: the parent of the file to create
117 * @mode: the mode of the file to create
118 * @buf: the channel buffer
119 * @is_global: outparam - set non-zero if the buffer should be global
120 *
121 * Called during relay_open(), once for each per-cpu buffer,
122 * to allow the client to create a file to be used to
123 * represent the corresponding channel buffer. If the file is
124 * created outside of relayfs, the parent must also exist in
125 * that filesystem.
126 *
127 * The callback should return the dentry of the file created
128 * to represent the relay buffer.
129 *
130 * Setting the is_global outparam to a non-zero value will
131 * cause relay_open() to create a single global buffer rather
132 * than the default set of per-cpu buffers.
133 *
134 * See Documentation/filesystems/relayfs.txt for more info.
135 */
136 struct dentry *(*create_buf_file)(const char *filename,
137 struct dentry *parent,
138 int mode,
139 struct rchan_buf *buf,
140 int *is_global);
141
142 /*
143 * remove_buf_file - remove file representing a relayfs channel buffer
144 * @dentry: the dentry of the file to remove
145 *
146 * Called during relay_close(), once for each per-cpu buffer,
147 * to allow the client to remove a file used to represent a
148 * channel buffer.
149 *
150 * The callback should return 0 if successful, negative if not.
151 */
152 int (*remove_buf_file)(struct dentry *dentry);
153};
154
155/*
156 * relayfs kernel API, fs/relayfs/relay.c
157 */
158
159struct rchan *relay_open(const char *base_filename,
160 struct dentry *parent,
161 size_t subbuf_size,
162 size_t n_subbufs,
163 struct rchan_callbacks *cb);
164extern void relay_close(struct rchan *chan);
165extern void relay_flush(struct rchan *chan);
166extern void relay_subbufs_consumed(struct rchan *chan,
167 unsigned int cpu,
168 size_t consumed);
169extern void relay_reset(struct rchan *chan);
170extern int relay_buf_full(struct rchan_buf *buf);
171
172extern size_t relay_switch_subbuf(struct rchan_buf *buf,
173 size_t length);
174extern struct dentry *relayfs_create_dir(const char *name,
175 struct dentry *parent);
176extern int relayfs_remove_dir(struct dentry *dentry);
177extern struct dentry *relayfs_create_file(const char *name,
178 struct dentry *parent,
179 int mode,
180 struct file_operations *fops,
181 void *data);
182extern int relayfs_remove_file(struct dentry *dentry);
183
184/**
185 * relay_write - write data into the channel
186 * @chan: relay channel
187 * @data: data to be written
188 * @length: number of bytes to write
189 *
190 * Writes data into the current cpu's channel buffer.
191 *
192 * Protects the buffer by disabling interrupts. Use this
193 * if you might be logging from interrupt context. Try
194 * __relay_write() if you know you won't be logging from
195 * interrupt context.
196 */
197static inline void relay_write(struct rchan *chan,
198 const void *data,
199 size_t length)
200{
201 unsigned long flags;
202 struct rchan_buf *buf;
203
204 local_irq_save(flags);
205 buf = chan->buf[smp_processor_id()];
206 if (unlikely(buf->offset + length > chan->subbuf_size))
207 length = relay_switch_subbuf(buf, length);
208 memcpy(buf->data + buf->offset, data, length);
209 buf->offset += length;
210 local_irq_restore(flags);
211}
212
213/**
214 * __relay_write - write data into the channel
215 * @chan: relay channel
216 * @data: data to be written
217 * @length: number of bytes to write
218 *
219 * Writes data into the current cpu's channel buffer.
220 *
221 * Protects the buffer by disabling preemption. Use
222 * relay_write() if you might be logging from interrupt
223 * context.
224 */
225static inline void __relay_write(struct rchan *chan,
226 const void *data,
227 size_t length)
228{
229 struct rchan_buf *buf;
230
231 buf = chan->buf[get_cpu()];
232 if (unlikely(buf->offset + length > buf->chan->subbuf_size))
233 length = relay_switch_subbuf(buf, length);
234 memcpy(buf->data + buf->offset, data, length);
235 buf->offset += length;
236 put_cpu();
237}
238
239/**
240 * relay_reserve - reserve slot in channel buffer
241 * @chan: relay channel
242 * @length: number of bytes to reserve
243 *
244 * Returns pointer to reserved slot, NULL if full.
245 *
246 * Reserves a slot in the current cpu's channel buffer.
247 * Does not protect the buffer at all - caller must provide
248 * appropriate synchronization.
249 */
250static inline void *relay_reserve(struct rchan *chan, size_t length)
251{
252 void *reserved;
253 struct rchan_buf *buf = chan->buf[smp_processor_id()];
254
255 if (unlikely(buf->offset + length > buf->chan->subbuf_size)) {
256 length = relay_switch_subbuf(buf, length);
257 if (!length)
258 return NULL;
259 }
260 reserved = buf->data + buf->offset;
261 buf->offset += length;
262
263 return reserved;
264}
265
266/**
267 * subbuf_start_reserve - reserve bytes at the start of a sub-buffer
268 * @buf: relay channel buffer
269 * @length: number of bytes to reserve
270 *
271 * Helper function used to reserve bytes at the beginning of
272 * a sub-buffer in the subbuf_start() callback.
273 */
274static inline void subbuf_start_reserve(struct rchan_buf *buf,
275 size_t length)
276{
277 BUG_ON(length >= buf->chan->subbuf_size - 1);
278 buf->offset = length;
279}
280
281/*
282 * exported relay file operations, fs/relayfs/inode.c
283 */
284extern struct file_operations relay_file_operations;
285
286#endif /* _LINUX_RELAYFS_FS_H */
287
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index b739ac1f7ca0..ab61cd1199f2 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -91,10 +91,102 @@ struct rtc_pll_info {
91#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */ 91#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */
92#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */ 92#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */
93 93
94/* interrupt flags */
95#define RTC_IRQF 0x80 /* any of the following is active */
96#define RTC_PF 0x40
97#define RTC_AF 0x20
98#define RTC_UF 0x10
99
94#ifdef __KERNEL__ 100#ifdef __KERNEL__
95 101
96#include <linux/interrupt.h> 102#include <linux/interrupt.h>
97 103
104extern int rtc_month_days(unsigned int month, unsigned int year);
105extern int rtc_valid_tm(struct rtc_time *tm);
106extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time);
107extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm);
108
109#include <linux/device.h>
110#include <linux/seq_file.h>
111#include <linux/cdev.h>
112#include <linux/poll.h>
113#include <linux/mutex.h>
114
115extern struct class *rtc_class;
116
117struct rtc_class_ops {
118 int (*open)(struct device *);
119 void (*release)(struct device *);
120 int (*ioctl)(struct device *, unsigned int, unsigned long);
121 int (*read_time)(struct device *, struct rtc_time *);
122 int (*set_time)(struct device *, struct rtc_time *);
123 int (*read_alarm)(struct device *, struct rtc_wkalrm *);
124 int (*set_alarm)(struct device *, struct rtc_wkalrm *);
125 int (*proc)(struct device *, struct seq_file *);
126 int (*set_mmss)(struct device *, unsigned long secs);
127 int (*irq_set_state)(struct device *, int enabled);
128 int (*irq_set_freq)(struct device *, int freq);
129 int (*read_callback)(struct device *, int data);
130};
131
132#define RTC_DEVICE_NAME_SIZE 20
133struct rtc_task;
134
135struct rtc_device
136{
137 struct class_device class_dev;
138 struct module *owner;
139
140 int id;
141 char name[RTC_DEVICE_NAME_SIZE];
142
143 struct rtc_class_ops *ops;
144 struct mutex ops_lock;
145
146 struct class_device *rtc_dev;
147 struct cdev char_dev;
148 struct mutex char_lock;
149
150 unsigned long irq_data;
151 spinlock_t irq_lock;
152 wait_queue_head_t irq_queue;
153 struct fasync_struct *async_queue;
154
155 struct rtc_task *irq_task;
156 spinlock_t irq_task_lock;
157 int irq_freq;
158};
159#define to_rtc_device(d) container_of(d, struct rtc_device, class_dev)
160
161extern struct rtc_device *rtc_device_register(const char *name,
162 struct device *dev,
163 struct rtc_class_ops *ops,
164 struct module *owner);
165extern void rtc_device_unregister(struct rtc_device *rdev);
166extern int rtc_interface_register(struct class_interface *intf);
167
168extern int rtc_read_time(struct class_device *class_dev, struct rtc_time *tm);
169extern int rtc_set_time(struct class_device *class_dev, struct rtc_time *tm);
170extern int rtc_set_mmss(struct class_device *class_dev, unsigned long secs);
171extern int rtc_read_alarm(struct class_device *class_dev,
172 struct rtc_wkalrm *alrm);
173extern int rtc_set_alarm(struct class_device *class_dev,
174 struct rtc_wkalrm *alrm);
175extern void rtc_update_irq(struct class_device *class_dev,
176 unsigned long num, unsigned long events);
177
178extern struct class_device *rtc_class_open(char *name);
179extern void rtc_class_close(struct class_device *class_dev);
180
181extern int rtc_irq_register(struct class_device *class_dev,
182 struct rtc_task *task);
183extern void rtc_irq_unregister(struct class_device *class_dev,
184 struct rtc_task *task);
185extern int rtc_irq_set_state(struct class_device *class_dev,
186 struct rtc_task *task, int enabled);
187extern int rtc_irq_set_freq(struct class_device *class_dev,
188 struct rtc_task *task, int freq);
189
98typedef struct rtc_task { 190typedef struct rtc_task {
99 void (*func)(void *private_data); 191 void (*func)(void *private_data);
100 void *private_data; 192 void *private_data;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e0054c1b9a09..20b4f0372e44 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -35,6 +35,7 @@
35#include <linux/topology.h> 35#include <linux/topology.h>
36#include <linux/seccomp.h> 36#include <linux/seccomp.h>
37#include <linux/rcupdate.h> 37#include <linux/rcupdate.h>
38#include <linux/futex.h>
38 39
39#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */ 40#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
40 41
@@ -402,6 +403,7 @@ struct signal_struct {
402 403
403 /* ITIMER_REAL timer for the process */ 404 /* ITIMER_REAL timer for the process */
404 struct hrtimer real_timer; 405 struct hrtimer real_timer;
406 struct task_struct *tsk;
405 ktime_t it_real_incr; 407 ktime_t it_real_incr;
406 408
407 /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ 409 /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
@@ -871,6 +873,11 @@ struct task_struct {
871 int cpuset_mems_generation; 873 int cpuset_mems_generation;
872 int cpuset_mem_spread_rotor; 874 int cpuset_mem_spread_rotor;
873#endif 875#endif
876 struct robust_list_head __user *robust_list;
877#ifdef CONFIG_COMPAT
878 struct compat_robust_list_head __user *compat_robust_list;
879#endif
880
874 atomic_t fs_excl; /* holding fs exclusive resources */ 881 atomic_t fs_excl; /* holding fs exclusive resources */
875 struct rcu_head rcu; 882 struct rcu_head rcu;
876}; 883};
diff --git a/include/linux/serio.h b/include/linux/serio.h
index aa4d6493a034..690aabca8ed0 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -119,7 +119,7 @@ static inline void serio_cleanup(struct serio *serio)
119} 119}
120 120
121/* 121/*
122 * Use the following fucntions to manipulate serio's per-port 122 * Use the following functions to manipulate serio's per-port
123 * driver-specific data. 123 * driver-specific data.
124 */ 124 */
125static inline void *serio_get_drvdata(struct serio *serio) 125static inline void *serio_get_drvdata(struct serio *serio)
@@ -133,7 +133,7 @@ static inline void serio_set_drvdata(struct serio *serio, void *data)
133} 133}
134 134
135/* 135/*
136 * Use the following fucntions to protect critical sections in 136 * Use the following functions to protect critical sections in
137 * driver code from port's interrupt handler 137 * driver code from port's interrupt handler
138 */ 138 */
139static inline void serio_pause_rx(struct serio *serio) 139static inline void serio_pause_rx(struct serio *serio)
@@ -147,7 +147,7 @@ static inline void serio_continue_rx(struct serio *serio)
147} 147}
148 148
149/* 149/*
150 * Use the following fucntions to pin serio's driver in process context 150 * Use the following functions to pin serio's driver in process context
151 */ 151 */
152static inline int serio_pin_driver(struct serio *serio) 152static inline int serio_pin_driver(struct serio *serio)
153{ 153{
diff --git a/include/linux/smp.h b/include/linux/smp.h
index d699a16b0cb2..e2fa3ab4afc5 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -82,7 +82,11 @@ void smp_prepare_boot_cpu(void);
82 */ 82 */
83#define raw_smp_processor_id() 0 83#define raw_smp_processor_id() 0
84#define hard_smp_processor_id() 0 84#define hard_smp_processor_id() 0
85#define smp_call_function(func,info,retry,wait) ({ 0; }) 85static inline int up_smp_call_function(void)
86{
87 return 0;
88}
89#define smp_call_function(func,info,retry,wait) (up_smp_call_function())
86#define on_each_cpu(func,info,retry,wait) \ 90#define on_each_cpu(func,info,retry,wait) \
87 ({ \ 91 ({ \
88 local_irq_disable(); \ 92 local_irq_disable(); \
diff --git a/include/linux/sound.h b/include/linux/sound.h
index 72b9af4c3fd4..f63d8342ffa3 100644
--- a/include/linux/sound.h
+++ b/include/linux/sound.h
@@ -30,12 +30,12 @@
30 */ 30 */
31 31
32struct device; 32struct device;
33extern int register_sound_special(struct file_operations *fops, int unit); 33extern int register_sound_special(const struct file_operations *fops, int unit);
34extern int register_sound_special_device(struct file_operations *fops, int unit, struct device *dev); 34extern int register_sound_special_device(const struct file_operations *fops, int unit, struct device *dev);
35extern int register_sound_mixer(struct file_operations *fops, int dev); 35extern int register_sound_mixer(const struct file_operations *fops, int dev);
36extern int register_sound_midi(struct file_operations *fops, int dev); 36extern int register_sound_midi(const struct file_operations *fops, int dev);
37extern int register_sound_dsp(struct file_operations *fops, int dev); 37extern int register_sound_dsp(const struct file_operations *fops, int dev);
38extern int register_sound_synth(struct file_operations *fops, int dev); 38extern int register_sound_synth(const struct file_operations *fops, int dev);
39 39
40extern void unregister_sound_special(int unit); 40extern void unregister_sound_special(int unit);
41extern void unregister_sound_mixer(int unit); 41extern void unregister_sound_mixer(int unit);
diff --git a/include/linux/stat.h b/include/linux/stat.h
index 8ff2a122dfef..8669291352db 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -69,7 +69,7 @@ struct kstat {
69 struct timespec mtime; 69 struct timespec mtime;
70 struct timespec ctime; 70 struct timespec ctime;
71 unsigned long blksize; 71 unsigned long blksize;
72 unsigned long blocks; 72 unsigned long long blocks;
73}; 73};
74 74
75#endif 75#endif
diff --git a/include/linux/statfs.h b/include/linux/statfs.h
index ad83a2bdb821..b34cc829f98d 100644
--- a/include/linux/statfs.h
+++ b/include/linux/statfs.h
@@ -8,11 +8,11 @@
8struct kstatfs { 8struct kstatfs {
9 long f_type; 9 long f_type;
10 long f_bsize; 10 long f_bsize;
11 sector_t f_blocks; 11 u64 f_blocks;
12 sector_t f_bfree; 12 u64 f_bfree;
13 sector_t f_bavail; 13 u64 f_bavail;
14 sector_t f_files; 14 u64 f_files;
15 sector_t f_ffree; 15 u64 f_ffree;
16 __kernel_fsid_t f_fsid; 16 __kernel_fsid_t f_fsid;
17 long f_namelen; 17 long f_namelen;
18 long f_frsize; 18 long f_frsize;
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index c4e3ea7cf154..b5612c958cce 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -50,7 +50,7 @@ struct cache_head {
50 time_t last_refresh; /* If CACHE_PENDING, this is when upcall 50 time_t last_refresh; /* If CACHE_PENDING, this is when upcall
51 * was sent, else this is when update was received 51 * was sent, else this is when update was received
52 */ 52 */
53 atomic_t refcnt; 53 struct kref ref;
54 unsigned long flags; 54 unsigned long flags;
55}; 55};
56#define CACHE_VALID 0 /* Entry contains valid data */ 56#define CACHE_VALID 0 /* Entry contains valid data */
@@ -68,8 +68,7 @@ struct cache_detail {
68 atomic_t inuse; /* active user-space update or lookup */ 68 atomic_t inuse; /* active user-space update or lookup */
69 69
70 char *name; 70 char *name;
71 void (*cache_put)(struct cache_head *, 71 void (*cache_put)(struct kref *);
72 struct cache_detail*);
73 72
74 void (*cache_request)(struct cache_detail *cd, 73 void (*cache_request)(struct cache_detail *cd,
75 struct cache_head *h, 74 struct cache_head *h,
@@ -81,6 +80,11 @@ struct cache_detail {
81 struct cache_detail *cd, 80 struct cache_detail *cd,
82 struct cache_head *h); 81 struct cache_head *h);
83 82
83 struct cache_head * (*alloc)(void);
84 int (*match)(struct cache_head *orig, struct cache_head *new);
85 void (*init)(struct cache_head *orig, struct cache_head *new);
86 void (*update)(struct cache_head *orig, struct cache_head *new);
87
84 /* fields below this comment are for internal use 88 /* fields below this comment are for internal use
85 * and should not be touched by cache owners 89 * and should not be touched by cache owners
86 */ 90 */
@@ -123,126 +127,14 @@ struct cache_deferred_req {
123 int too_many); 127 int too_many);
124}; 128};
125 129
126/*
127 * just like a template in C++, this macro does cache lookup
128 * for us.
129 * The function is passed some sort of HANDLE from which a cache_detail
130 * structure can be determined (via SETUP, DETAIL), a template
131 * cache entry (type RTN*), and a "set" flag. Using the HASHFN and the
132 * TEST, the function will try to find a matching cache entry in the cache.
133 * If "set" == 0 :
134 * If an entry is found, it is returned
135 * If no entry is found, a new non-VALID entry is created.
136 * If "set" == 1 and INPLACE == 0 :
137 * If no entry is found a new one is inserted with data from "template"
138 * If a non-CACHE_VALID entry is found, it is updated from template using UPDATE
139 * If a CACHE_VALID entry is found, a new entry is swapped in with data
140 * from "template"
141 * If set == 1, and INPLACE == 1 :
142 * As above, except that if a CACHE_VALID entry is found, we UPDATE in place
143 * instead of swapping in a new entry.
144 *
145 * If the passed handle has the CACHE_NEGATIVE flag set, then UPDATE is not
146 * run but insteead CACHE_NEGATIVE is set in any new item.
147 130
148 * In any case, the new entry is returned with a reference count. 131extern struct cache_head *
149 * 132sunrpc_cache_lookup(struct cache_detail *detail,
150 * 133 struct cache_head *key, int hash);
151 * RTN is a struct type for a cache entry 134extern struct cache_head *
152 * MEMBER is the member of the cache which is cache_head, which must be first 135sunrpc_cache_update(struct cache_detail *detail,
153 * FNAME is the name for the function 136 struct cache_head *new, struct cache_head *old, int hash);
154 * ARGS are arguments to function and must contain RTN *item, int set. May
155 * also contain something to be usedby SETUP or DETAIL to find cache_detail.
156 * SETUP locates the cache detail and makes it available as...
157 * DETAIL identifies the cache detail, possibly set up by SETUP
158 * HASHFN returns a hash value of the cache entry "item"
159 * TEST tests if "tmp" matches "item"
160 * INIT copies key information from "item" to "new"
161 * UPDATE copies content information from "item" to "tmp"
162 * INPLACE is true if updates can happen inplace rather than allocating a new structure
163 *
164 * WARNING: any substantial changes to this must be reflected in
165 * net/sunrpc/svcauth.c(auth_domain_lookup)
166 * which is a similar routine that is open-coded.
167 */
168#define DefineCacheLookup(RTN,MEMBER,FNAME,ARGS,SETUP,DETAIL,HASHFN,TEST,INIT,UPDATE,INPLACE) \
169RTN *FNAME ARGS \
170{ \
171 RTN *tmp, *new=NULL; \
172 struct cache_head **hp, **head; \
173 SETUP; \
174 head = &(DETAIL)->hash_table[HASHFN]; \
175 retry: \
176 if (set||new) write_lock(&(DETAIL)->hash_lock); \
177 else read_lock(&(DETAIL)->hash_lock); \
178 for(hp=head; *hp != NULL; hp = &tmp->MEMBER.next) { \
179 tmp = container_of(*hp, RTN, MEMBER); \
180 if (TEST) { /* found a match */ \
181 \
182 if (set && !INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags) && !new) \
183 break; \
184 \
185 if (new) \
186 {INIT;} \
187 if (set) { \
188 if (!INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags))\
189 { /* need to swap in new */ \
190 RTN *t2; \
191 \
192 new->MEMBER.next = tmp->MEMBER.next; \
193 *hp = &new->MEMBER; \
194 tmp->MEMBER.next = NULL; \
195 t2 = tmp; tmp = new; new = t2; \
196 } \
197 if (test_bit(CACHE_NEGATIVE, &item->MEMBER.flags)) \
198 set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \
199 else { \
200 UPDATE; \
201 clear_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \
202 } \
203 } \
204 cache_get(&tmp->MEMBER); \
205 if (set||new) write_unlock(&(DETAIL)->hash_lock); \
206 else read_unlock(&(DETAIL)->hash_lock); \
207 if (set) \
208 cache_fresh(DETAIL, &tmp->MEMBER, item->MEMBER.expiry_time); \
209 if (set && !INPLACE && new) cache_fresh(DETAIL, &new->MEMBER, 0); \
210 if (new) (DETAIL)->cache_put(&new->MEMBER, DETAIL); \
211 return tmp; \
212 } \
213 } \
214 /* Didn't find anything */ \
215 if (new) { \
216 INIT; \
217 new->MEMBER.next = *head; \
218 *head = &new->MEMBER; \
219 (DETAIL)->entries ++; \
220 cache_get(&new->MEMBER); \
221 if (set) { \
222 tmp = new; \
223 if (test_bit(CACHE_NEGATIVE, &item->MEMBER.flags)) \
224 set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \
225 else {UPDATE;} \
226 } \
227 } \
228 if (set||new) write_unlock(&(DETAIL)->hash_lock); \
229 else read_unlock(&(DETAIL)->hash_lock); \
230 if (new && set) \
231 cache_fresh(DETAIL, &new->MEMBER, item->MEMBER.expiry_time); \
232 if (new) \
233 return new; \
234 new = kmalloc(sizeof(*new), GFP_KERNEL); \
235 if (new) { \
236 cache_init(&new->MEMBER); \
237 goto retry; \
238 } \
239 return NULL; \
240}
241 137
242#define DefineSimpleCacheLookup(STRUCT,INPLACE) \
243 DefineCacheLookup(struct STRUCT, h, STRUCT##_lookup, (struct STRUCT *item, int set), /*no setup */, \
244 & STRUCT##_cache, STRUCT##_hash(item), STRUCT##_match(item, tmp),\
245 STRUCT##_init(new, item), STRUCT##_update(tmp, item),INPLACE)
246 138
247#define cache_for_each(pos, detail, index, member) \ 139#define cache_for_each(pos, detail, index, member) \
248 for (({read_lock(&(detail)->hash_lock); index = (detail)->hash_size;}) ; \ 140 for (({read_lock(&(detail)->hash_lock); index = (detail)->hash_size;}) ; \
@@ -258,22 +150,19 @@ extern void cache_clean_deferred(void *owner);
258 150
259static inline struct cache_head *cache_get(struct cache_head *h) 151static inline struct cache_head *cache_get(struct cache_head *h)
260{ 152{
261 atomic_inc(&h->refcnt); 153 kref_get(&h->ref);
262 return h; 154 return h;
263} 155}
264 156
265 157
266static inline int cache_put(struct cache_head *h, struct cache_detail *cd) 158static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
267{ 159{
268 if (atomic_read(&h->refcnt) <= 2 && 160 if (atomic_read(&h->ref.refcount) <= 2 &&
269 h->expiry_time < cd->nextcheck) 161 h->expiry_time < cd->nextcheck)
270 cd->nextcheck = h->expiry_time; 162 cd->nextcheck = h->expiry_time;
271 return atomic_dec_and_test(&h->refcnt); 163 kref_put(&h->ref, cd->cache_put);
272} 164}
273 165
274extern void cache_init(struct cache_head *h);
275extern void cache_fresh(struct cache_detail *detail,
276 struct cache_head *head, time_t expiry);
277extern int cache_check(struct cache_detail *detail, 166extern int cache_check(struct cache_detail *detail,
278 struct cache_head *h, struct cache_req *rqstp); 167 struct cache_head *h, struct cache_req *rqstp);
279extern void cache_flush(void); 168extern void cache_flush(void);
diff --git a/include/linux/sunrpc/stats.h b/include/linux/sunrpc/stats.h
index 0d6ed3c8bdc4..d93c24b47f3f 100644
--- a/include/linux/sunrpc/stats.h
+++ b/include/linux/sunrpc/stats.h
@@ -50,7 +50,7 @@ struct proc_dir_entry * rpc_proc_register(struct rpc_stat *);
50void rpc_proc_unregister(const char *); 50void rpc_proc_unregister(const char *);
51void rpc_proc_zero(struct rpc_program *); 51void rpc_proc_zero(struct rpc_program *);
52struct proc_dir_entry * svc_proc_register(struct svc_stat *, 52struct proc_dir_entry * svc_proc_register(struct svc_stat *,
53 struct file_operations *); 53 const struct file_operations *);
54void svc_proc_unregister(const char *); 54void svc_proc_unregister(const char *);
55 55
56void svc_seq_show(struct seq_file *, 56void svc_seq_show(struct seq_file *,
@@ -65,7 +65,7 @@ static inline void rpc_proc_unregister(const char *p) {}
65static inline void rpc_proc_zero(struct rpc_program *p) {} 65static inline void rpc_proc_zero(struct rpc_program *p) {}
66 66
67static inline struct proc_dir_entry *svc_proc_register(struct svc_stat *s, 67static inline struct proc_dir_entry *svc_proc_register(struct svc_stat *s,
68 struct file_operations *f) { return NULL; } 68 const struct file_operations *f) { return NULL; }
69static inline void svc_proc_unregister(const char *p) {} 69static inline void svc_proc_unregister(const char *p) {}
70 70
71static inline void svc_seq_show(struct seq_file *seq, 71static inline void svc_seq_show(struct seq_file *seq,
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index c119ce7cbd22..2fe2087edd66 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -45,9 +45,10 @@ struct svc_rqst; /* forward decl */
45 * of ip addresses to the given client. 45 * of ip addresses to the given client.
46 */ 46 */
47struct auth_domain { 47struct auth_domain {
48 struct cache_head h; 48 struct kref ref;
49 struct hlist_node hash;
49 char *name; 50 char *name;
50 int flavour; 51 struct auth_ops *flavour;
51}; 52};
52 53
53/* 54/*
@@ -86,6 +87,9 @@ struct auth_domain {
86 * 87 *
87 * domain_release() 88 * domain_release()
88 * This call releases a domain. 89 * This call releases a domain.
90 * set_client()
91 * Givens a pending request (struct svc_rqst), finds and assigns
92 * an appropriate 'auth_domain' as the client.
89 */ 93 */
90struct auth_ops { 94struct auth_ops {
91 char * name; 95 char * name;
@@ -117,7 +121,7 @@ extern void svc_auth_unregister(rpc_authflavor_t flavor);
117extern struct auth_domain *unix_domain_find(char *name); 121extern struct auth_domain *unix_domain_find(char *name);
118extern void auth_domain_put(struct auth_domain *item); 122extern void auth_domain_put(struct auth_domain *item);
119extern int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom); 123extern int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom);
120extern struct auth_domain *auth_domain_lookup(struct auth_domain *item, int set); 124extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new);
121extern struct auth_domain *auth_domain_find(char *name); 125extern struct auth_domain *auth_domain_find(char *name);
122extern struct auth_domain *auth_unix_lookup(struct in_addr addr); 126extern struct auth_domain *auth_unix_lookup(struct in_addr addr);
123extern int auth_unix_forget_old(struct auth_domain *dom); 127extern int auth_unix_forget_old(struct auth_domain *dom);
@@ -160,8 +164,6 @@ static inline unsigned long hash_mem(char *buf, int length, int bits)
160 return hash >> (BITS_PER_LONG - bits); 164 return hash >> (BITS_PER_LONG - bits);
161} 165}
162 166
163extern struct cache_detail auth_domain_cache, ip_map_cache;
164
165#endif /* __KERNEL__ */ 167#endif /* __KERNEL__ */
166 168
167#endif /* _LINUX_SUNRPC_SVCAUTH_H_ */ 169#endif /* _LINUX_SUNRPC_SVCAUTH_H_ */
diff --git a/include/linux/synclink.h b/include/linux/synclink.h
index 1b7cd8d1a71b..2993302f7923 100644
--- a/include/linux/synclink.h
+++ b/include/linux/synclink.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * SyncLink Multiprotocol Serial Adapter Driver 2 * SyncLink Multiprotocol Serial Adapter Driver
3 * 3 *
4 * $Id: synclink.h,v 3.10 2005/11/08 19:50:54 paulkf Exp $ 4 * $Id: synclink.h,v 3.11 2006/02/06 21:20:29 paulkf Exp $
5 * 5 *
6 * Copyright (C) 1998-2000 by Microgate Corporation 6 * Copyright (C) 1998-2000 by Microgate Corporation
7 * 7 *
@@ -221,6 +221,12 @@ struct mgsl_icount {
221 __u32 rxidle; 221 __u32 rxidle;
222}; 222};
223 223
224struct gpio_desc {
225 __u32 state;
226 __u32 smask;
227 __u32 dir;
228 __u32 dmask;
229};
224 230
225#define DEBUG_LEVEL_DATA 1 231#define DEBUG_LEVEL_DATA 1
226#define DEBUG_LEVEL_ERROR 2 232#define DEBUG_LEVEL_ERROR 2
@@ -276,5 +282,8 @@ struct mgsl_icount {
276#define MGSL_IOCLOOPTXDONE _IO(MGSL_MAGIC_IOC,9) 282#define MGSL_IOCLOOPTXDONE _IO(MGSL_MAGIC_IOC,9)
277#define MGSL_IOCSIF _IO(MGSL_MAGIC_IOC,10) 283#define MGSL_IOCSIF _IO(MGSL_MAGIC_IOC,10)
278#define MGSL_IOCGIF _IO(MGSL_MAGIC_IOC,11) 284#define MGSL_IOCGIF _IO(MGSL_MAGIC_IOC,11)
285#define MGSL_IOCSGPIO _IOW(MGSL_MAGIC_IOC,16,struct gpio_desc)
286#define MGSL_IOCGGPIO _IOR(MGSL_MAGIC_IOC,17,struct gpio_desc)
287#define MGSL_IOCWAITGPIO _IOWR(MGSL_MAGIC_IOC,18,struct gpio_desc)
279 288
280#endif /* _SYNCLINK_H_ */ 289#endif /* _SYNCLINK_H_ */
diff --git a/include/linux/threads.h b/include/linux/threads.h
index b59738ac6197..e646bcdf2614 100644
--- a/include/linux/threads.h
+++ b/include/linux/threads.h
@@ -28,7 +28,8 @@
28#define PID_MAX_DEFAULT (CONFIG_BASE_SMALL ? 0x1000 : 0x8000) 28#define PID_MAX_DEFAULT (CONFIG_BASE_SMALL ? 0x1000 : 0x8000)
29 29
30/* 30/*
31 * A maximum of 4 million PIDs should be enough for a while: 31 * A maximum of 4 million PIDs should be enough for a while.
32 * [NOTE: PID/TIDs are limited to 2^29 ~= 500+ million, see futex.h.]
32 */ 33 */
33#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \ 34#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \
34 (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT)) 35 (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT))
diff --git a/include/linux/time.h b/include/linux/time.h
index bf0e785e2e03..0cd696cee998 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -73,12 +73,6 @@ extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec);
73#define timespec_valid(ts) \ 73#define timespec_valid(ts) \
74 (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) 74 (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC))
75 75
76/*
77 * 64-bit nanosec type. Large enough to span 292+ years in nanosecond
78 * resolution. Ought to be enough for a while.
79 */
80typedef s64 nsec_t;
81
82extern struct timespec xtime; 76extern struct timespec xtime;
83extern struct timespec wall_to_monotonic; 77extern struct timespec wall_to_monotonic;
84extern seqlock_t xtime_lock; 78extern seqlock_t xtime_lock;
@@ -114,9 +108,9 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
114 * Returns the scalar nanosecond representation of the timespec 108 * Returns the scalar nanosecond representation of the timespec
115 * parameter. 109 * parameter.
116 */ 110 */
117static inline nsec_t timespec_to_ns(const struct timespec *ts) 111static inline s64 timespec_to_ns(const struct timespec *ts)
118{ 112{
119 return ((nsec_t) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; 113 return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
120} 114}
121 115
122/** 116/**
@@ -126,9 +120,9 @@ static inline nsec_t timespec_to_ns(const struct timespec *ts)
126 * Returns the scalar nanosecond representation of the timeval 120 * Returns the scalar nanosecond representation of the timeval
127 * parameter. 121 * parameter.
128 */ 122 */
129static inline nsec_t timeval_to_ns(const struct timeval *tv) 123static inline s64 timeval_to_ns(const struct timeval *tv)
130{ 124{
131 return ((nsec_t) tv->tv_sec * NSEC_PER_SEC) + 125 return ((s64) tv->tv_sec * NSEC_PER_SEC) +
132 tv->tv_usec * NSEC_PER_USEC; 126 tv->tv_usec * NSEC_PER_USEC;
133} 127}
134 128
@@ -138,7 +132,7 @@ static inline nsec_t timeval_to_ns(const struct timeval *tv)
138 * 132 *
139 * Returns the timespec representation of the nsec parameter. 133 * Returns the timespec representation of the nsec parameter.
140 */ 134 */
141extern struct timespec ns_to_timespec(const nsec_t nsec); 135extern struct timespec ns_to_timespec(const s64 nsec);
142 136
143/** 137/**
144 * ns_to_timeval - Convert nanoseconds to timeval 138 * ns_to_timeval - Convert nanoseconds to timeval
@@ -146,7 +140,7 @@ extern struct timespec ns_to_timespec(const nsec_t nsec);
146 * 140 *
147 * Returns the timeval representation of the nsec parameter. 141 * Returns the timeval representation of the nsec parameter.
148 */ 142 */
149extern struct timeval ns_to_timeval(const nsec_t nsec); 143extern struct timeval ns_to_timeval(const s64 nsec);
150 144
151#endif /* __KERNEL__ */ 145#endif /* __KERNEL__ */
152 146
diff --git a/include/linux/timer.h b/include/linux/timer.h
index ee5a09e806e8..b5caabca553c 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -96,6 +96,7 @@ static inline void add_timer(struct timer_list *timer)
96 96
97extern void init_timers(void); 97extern void init_timers(void);
98extern void run_local_timers(void); 98extern void run_local_timers(void);
99extern int it_real_fn(void *); 99struct hrtimer;
100extern int it_real_fn(struct hrtimer *);
100 101
101#endif 102#endif
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 82dc9ae79d37..03914b7e41b1 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -307,6 +307,8 @@ time_interpolator_reset(void)
307/* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */ 307/* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */
308extern u64 current_tick_length(void); 308extern u64 current_tick_length(void);
309 309
310extern int do_adjtimex(struct timex *);
311
310#endif /* KERNEL */ 312#endif /* KERNEL */
311 313
312#endif /* LINUX_TIMEX_H */ 314#endif /* LINUX_TIMEX_H */
diff --git a/include/linux/topology.h b/include/linux/topology.h
index e8eb0040ce3a..a305ae2e44b6 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -164,6 +164,15 @@
164 .nr_balance_failed = 0, \ 164 .nr_balance_failed = 0, \
165} 165}
166 166
167#ifdef CONFIG_SCHED_MC
168#ifndef SD_MC_INIT
169/* for now its same as SD_CPU_INIT.
170 * TBD: Tune Domain parameters!
171 */
172#define SD_MC_INIT SD_CPU_INIT
173#endif
174#endif
175
167#ifdef CONFIG_NUMA 176#ifdef CONFIG_NUMA
168#ifndef SD_NODE_INIT 177#ifndef SD_NODE_INIT
169#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!! 178#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!!
diff --git a/include/linux/types.h b/include/linux/types.h
index 54ae2d59e71b..1046c7ad86d9 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -137,6 +137,10 @@ typedef __s64 int64_t;
137typedef unsigned long sector_t; 137typedef unsigned long sector_t;
138#endif 138#endif
139 139
140#ifndef HAVE_BLKCNT_T
141typedef unsigned long blkcnt_t;
142#endif
143
140/* 144/*
141 * The type of an index into the pagecache. Use a #define so asm/types.h 145 * The type of an index into the pagecache. Use a #define so asm/types.h
142 * can override it. 146 * can override it.
diff --git a/include/linux/ufs_fs.h b/include/linux/ufs_fs.h
index b0ffe4356e5a..843aeaaa79d4 100644
--- a/include/linux/ufs_fs.h
+++ b/include/linux/ufs_fs.h
@@ -895,7 +895,7 @@ extern void ufs_set_link(struct inode *, struct ufs_dir_entry *, struct buffer_h
895 895
896/* file.c */ 896/* file.c */
897extern struct inode_operations ufs_file_inode_operations; 897extern struct inode_operations ufs_file_inode_operations;
898extern struct file_operations ufs_file_operations; 898extern const struct file_operations ufs_file_operations;
899 899
900extern struct address_space_operations ufs_aops; 900extern struct address_space_operations ufs_aops;
901 901
@@ -915,7 +915,7 @@ extern struct buffer_head * ufs_bread (struct inode *, unsigned, int, int *);
915extern int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create); 915extern int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create);
916 916
917/* namei.c */ 917/* namei.c */
918extern struct file_operations ufs_dir_operations; 918extern const struct file_operations ufs_dir_operations;
919 919
920/* super.c */ 920/* super.c */
921extern void ufs_warning (struct super_block *, const char *, const char *, ...) __attribute__ ((format (printf, 3, 4))); 921extern void ufs_warning (struct super_block *, const char *, const char *, ...) __attribute__ ((format (printf, 3, 4)));
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 130d125fda12..e34e5e3dce52 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -615,7 +615,7 @@ extern struct bus_type usb_bus_type;
615 */ 615 */
616struct usb_class_driver { 616struct usb_class_driver {
617 char *name; 617 char *name;
618 struct file_operations *fops; 618 const struct file_operations *fops;
619 int minor_base; 619 int minor_base;
620}; 620};
621 621
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 2275bfec5b68..af2d6155d3fe 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -75,7 +75,7 @@ struct video_device
75 int minor; 75 int minor;
76 76
77 /* device ops + callbacks */ 77 /* device ops + callbacks */
78 struct file_operations *fops; 78 const struct file_operations *fops;
79 void (*release)(struct video_device *vfd); 79 void (*release)(struct video_device *vfd);
80 80
81 81
diff --git a/include/linux/x1205.h b/include/linux/x1205.h
deleted file mode 100644
index 64fd3af894a5..000000000000
--- a/include/linux/x1205.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * x1205.h - defines for drivers/i2c/chips/x1205.c
3 * Copyright 2004 Karen Spearel
4 * Copyright 2005 Alessandro Zummo
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __LINUX_X1205_H__
13#define __LINUX_X1205_H__
14
15/* commands */
16
17#define X1205_CMD_GETDATETIME 0
18#define X1205_CMD_SETTIME 1
19#define X1205_CMD_SETDATETIME 2
20#define X1205_CMD_GETALARM 3
21#define X1205_CMD_SETALARM 4
22#define X1205_CMD_GETDTRIM 5
23#define X1205_CMD_SETDTRIM 6
24#define X1205_CMD_GETATRIM 7
25#define X1205_CMD_SETATRIM 8
26
27extern int x1205_do_command(unsigned int cmd, void *arg);
28extern int x1205_direct_attach(int adapter_id,
29 struct i2c_client_address_data *address_data);
30
31#endif /* __LINUX_X1205_H__ */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index b6f0905a4ee2..916013ca4a5c 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -300,29 +300,30 @@ DECLARE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
300 300
301#define CONNTRACK_ECACHE(x) (__get_cpu_var(nf_conntrack_ecache).x) 301#define CONNTRACK_ECACHE(x) (__get_cpu_var(nf_conntrack_ecache).x)
302 302
303extern struct notifier_block *nf_conntrack_chain; 303extern struct atomic_notifier_head nf_conntrack_chain;
304extern struct notifier_block *nf_conntrack_expect_chain; 304extern struct atomic_notifier_head nf_conntrack_expect_chain;
305 305
306static inline int nf_conntrack_register_notifier(struct notifier_block *nb) 306static inline int nf_conntrack_register_notifier(struct notifier_block *nb)
307{ 307{
308 return notifier_chain_register(&nf_conntrack_chain, nb); 308 return atomic_notifier_chain_register(&nf_conntrack_chain, nb);
309} 309}
310 310
311static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb) 311static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb)
312{ 312{
313 return notifier_chain_unregister(&nf_conntrack_chain, nb); 313 return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb);
314} 314}
315 315
316static inline int 316static inline int
317nf_conntrack_expect_register_notifier(struct notifier_block *nb) 317nf_conntrack_expect_register_notifier(struct notifier_block *nb)
318{ 318{
319 return notifier_chain_register(&nf_conntrack_expect_chain, nb); 319 return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb);
320} 320}
321 321
322static inline int 322static inline int
323nf_conntrack_expect_unregister_notifier(struct notifier_block *nb) 323nf_conntrack_expect_unregister_notifier(struct notifier_block *nb)
324{ 324{
325 return notifier_chain_unregister(&nf_conntrack_expect_chain, nb); 325 return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain,
326 nb);
326} 327}
327 328
328extern void nf_ct_deliver_cached_events(const struct nf_conn *ct); 329extern void nf_ct_deliver_cached_events(const struct nf_conn *ct);
@@ -347,14 +348,14 @@ static inline void nf_conntrack_event(enum ip_conntrack_events event,
347 struct nf_conn *ct) 348 struct nf_conn *ct)
348{ 349{
349 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) 350 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct))
350 notifier_call_chain(&nf_conntrack_chain, event, ct); 351 atomic_notifier_call_chain(&nf_conntrack_chain, event, ct);
351} 352}
352 353
353static inline void 354static inline void
354nf_conntrack_expect_event(enum ip_conntrack_expect_events event, 355nf_conntrack_expect_event(enum ip_conntrack_expect_events event,
355 struct nf_conntrack_expect *exp) 356 struct nf_conntrack_expect *exp)
356{ 357{
357 notifier_call_chain(&nf_conntrack_expect_chain, event, exp); 358 atomic_notifier_call_chain(&nf_conntrack_expect_chain, event, exp);
358} 359}
359#else /* CONFIG_NF_CONNTRACK_EVENTS */ 360#else /* CONFIG_NF_CONNTRACK_EVENTS */
360static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, 361static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 11641c9384f7..c5d7f920c352 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -145,7 +145,7 @@ static inline struct request_sock *
145{ 145{
146 struct request_sock *req = queue->rskq_accept_head; 146 struct request_sock *req = queue->rskq_accept_head;
147 147
148 queue->rskq_accept_head = queue->rskq_accept_head = NULL; 148 queue->rskq_accept_head = NULL;
149 return req; 149 return req;
150} 150}
151 151
diff --git a/include/sound/core.h b/include/sound/core.h
index 144bdc2f217f..7f32c12b4a0a 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -186,7 +186,7 @@ struct snd_minor {
186 int type; /* SNDRV_DEVICE_TYPE_XXX */ 186 int type; /* SNDRV_DEVICE_TYPE_XXX */
187 int card; /* card number */ 187 int card; /* card number */
188 int device; /* device number */ 188 int device; /* device number */
189 struct file_operations *f_ops; /* file operations */ 189 const struct file_operations *f_ops; /* file operations */
190 void *private_data; /* private data for f_ops->open */ 190 void *private_data; /* private data for f_ops->open */
191 char name[0]; /* device name (keep at the end of 191 char name[0]; /* device name (keep at the end of
192 structure) */ 192 structure) */
@@ -200,14 +200,14 @@ extern int snd_ecards_limit;
200void snd_request_card(int card); 200void snd_request_card(int card);
201 201
202int snd_register_device(int type, struct snd_card *card, int dev, 202int snd_register_device(int type, struct snd_card *card, int dev,
203 struct file_operations *f_ops, void *private_data, 203 const struct file_operations *f_ops, void *private_data,
204 const char *name); 204 const char *name);
205int snd_unregister_device(int type, struct snd_card *card, int dev); 205int snd_unregister_device(int type, struct snd_card *card, int dev);
206void *snd_lookup_minor_data(unsigned int minor, int type); 206void *snd_lookup_minor_data(unsigned int minor, int type);
207 207
208#ifdef CONFIG_SND_OSSEMUL 208#ifdef CONFIG_SND_OSSEMUL
209int snd_register_oss_device(int type, struct snd_card *card, int dev, 209int snd_register_oss_device(int type, struct snd_card *card, int dev,
210 struct file_operations *f_ops, void *private_data, 210 const struct file_operations *f_ops, void *private_data,
211 const char *name); 211 const char *name);
212int snd_unregister_oss_device(int type, struct snd_card *card, int dev); 212int snd_unregister_oss_device(int type, struct snd_card *card, int dev);
213void *snd_lookup_oss_minor_data(unsigned int minor, int type); 213void *snd_lookup_oss_minor_data(unsigned int minor, int type);
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 8b671fe68afa..adb7cad3e6ee 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -23,7 +23,6 @@ int root_mountflags = MS_RDONLY | MS_SILENT;
23char * __initdata root_device_name; 23char * __initdata root_device_name;
24static char __initdata saved_root_name[64]; 24static char __initdata saved_root_name[64];
25 25
26/* this is initialized in init/main.c */
27dev_t ROOT_DEV; 26dev_t ROOT_DEV;
28 27
29static int __init load_ramdisk(char *str) 28static int __init load_ramdisk(char *str)
diff --git a/init/initramfs.c b/init/initramfs.c
index 77b934cccefe..679d870d991b 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -519,7 +519,7 @@ void __init populate_rootfs(void)
519 return; 519 return;
520 } 520 }
521 printk("it isn't (%s); looks like an initrd\n", err); 521 printk("it isn't (%s); looks like an initrd\n", err);
522 fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 700); 522 fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700);
523 if (fd >= 0) { 523 if (fd >= 0) {
524 sys_write(fd, (char *)initrd_start, 524 sys_write(fd, (char *)initrd_start,
525 initrd_end - initrd_start); 525 initrd_end - initrd_start);
diff --git a/init/main.c b/init/main.c
index 006dcd547dc2..4a2f0898dda1 100644
--- a/init/main.c
+++ b/init/main.c
@@ -341,7 +341,7 @@ static void __init setup_per_cpu_areas(void)
341#endif 341#endif
342 ptr = alloc_bootmem(size * nr_possible_cpus); 342 ptr = alloc_bootmem(size * nr_possible_cpus);
343 343
344 for_each_cpu(i) { 344 for_each_possible_cpu(i) {
345 __per_cpu_offset[i] = ptr - __per_cpu_start; 345 __per_cpu_offset[i] = ptr - __per_cpu_start;
346 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 346 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
347 ptr += size; 347 ptr += size;
@@ -645,24 +645,6 @@ static void run_init_process(char *init_filename)
645 execve(init_filename, argv_init, envp_init); 645 execve(init_filename, argv_init, envp_init);
646} 646}
647 647
648static inline void fixup_cpu_present_map(void)
649{
650#ifdef CONFIG_SMP
651 int i;
652
653 /*
654 * If arch is not hotplug ready and did not populate
655 * cpu_present_map, just make cpu_present_map same as cpu_possible_map
656 * for other cpu bringup code to function as normal. e.g smp_init() etc.
657 */
658 if (cpus_empty(cpu_present_map)) {
659 for_each_cpu(i) {
660 cpu_set(i, cpu_present_map);
661 }
662 }
663#endif
664}
665
666static int init(void * unused) 648static int init(void * unused)
667{ 649{
668 lock_kernel(); 650 lock_kernel();
@@ -684,7 +666,6 @@ static int init(void * unused)
684 666
685 do_pre_smp_initcalls(); 667 do_pre_smp_initcalls();
686 668
687 fixup_cpu_present_map();
688 smp_init(); 669 smp_init();
689 sched_init_smp(); 670 sched_init_smp();
690 671
diff --git a/ipc/compat.c b/ipc/compat.c
index 1fe95f6659dd..a544dfbb082a 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -30,7 +30,7 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/syscalls.h> 31#include <linux/syscalls.h>
32 32
33#include <asm/semaphore.h> 33#include <linux/mutex.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35 35
36#include "util.h" 36#include "util.h"
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 85c52fd26bff..41ecbd440fed 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -25,6 +25,8 @@
25#include <linux/netlink.h> 25#include <linux/netlink.h>
26#include <linux/syscalls.h> 26#include <linux/syscalls.h>
27#include <linux/signal.h> 27#include <linux/signal.h>
28#include <linux/mutex.h>
29
28#include <net/sock.h> 30#include <net/sock.h>
29#include "util.h" 31#include "util.h"
30 32
@@ -760,7 +762,7 @@ out_unlock:
760 * The receiver accepts the message and returns without grabbing the queue 762 * The receiver accepts the message and returns without grabbing the queue
761 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers 763 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
762 * are necessary. The same algorithm is used for sysv semaphores, see 764 * are necessary. The same algorithm is used for sysv semaphores, see
763 * ipc/sem.c fore more details. 765 * ipc/sem.c for more details.
764 * 766 *
765 * The same algorithm is used for senders. 767 * The same algorithm is used for senders.
766 */ 768 */
diff --git a/ipc/msg.c b/ipc/msg.c
index 7eec5ed32379..48a7f17a7236 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -28,6 +28,8 @@
28#include <linux/syscalls.h> 28#include <linux/syscalls.h>
29#include <linux/audit.h> 29#include <linux/audit.h>
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/mutex.h>
32
31#include <asm/current.h> 33#include <asm/current.h>
32#include <asm/uaccess.h> 34#include <asm/uaccess.h>
33#include "util.h" 35#include "util.h"
@@ -179,8 +181,8 @@ static void expunge_all(struct msg_queue* msq, int res)
179 * removes the message queue from message queue ID 181 * removes the message queue from message queue ID
180 * array, and cleans up all the messages associated with this queue. 182 * array, and cleans up all the messages associated with this queue.
181 * 183 *
182 * msg_ids.sem and the spinlock for this message queue is hold 184 * msg_ids.mutex and the spinlock for this message queue is hold
183 * before freeque() is called. msg_ids.sem remains locked on exit. 185 * before freeque() is called. msg_ids.mutex remains locked on exit.
184 */ 186 */
185static void freeque (struct msg_queue *msq, int id) 187static void freeque (struct msg_queue *msq, int id)
186{ 188{
@@ -208,7 +210,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg)
208 int id, ret = -EPERM; 210 int id, ret = -EPERM;
209 struct msg_queue *msq; 211 struct msg_queue *msq;
210 212
211 down(&msg_ids.sem); 213 mutex_lock(&msg_ids.mutex);
212 if (key == IPC_PRIVATE) 214 if (key == IPC_PRIVATE)
213 ret = newque(key, msgflg); 215 ret = newque(key, msgflg);
214 else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */ 216 else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */
@@ -231,7 +233,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg)
231 } 233 }
232 msg_unlock(msq); 234 msg_unlock(msq);
233 } 235 }
234 up(&msg_ids.sem); 236 mutex_unlock(&msg_ids.mutex);
235 return ret; 237 return ret;
236} 238}
237 239
@@ -361,7 +363,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
361 msginfo.msgmnb = msg_ctlmnb; 363 msginfo.msgmnb = msg_ctlmnb;
362 msginfo.msgssz = MSGSSZ; 364 msginfo.msgssz = MSGSSZ;
363 msginfo.msgseg = MSGSEG; 365 msginfo.msgseg = MSGSEG;
364 down(&msg_ids.sem); 366 mutex_lock(&msg_ids.mutex);
365 if (cmd == MSG_INFO) { 367 if (cmd == MSG_INFO) {
366 msginfo.msgpool = msg_ids.in_use; 368 msginfo.msgpool = msg_ids.in_use;
367 msginfo.msgmap = atomic_read(&msg_hdrs); 369 msginfo.msgmap = atomic_read(&msg_hdrs);
@@ -372,7 +374,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
372 msginfo.msgtql = MSGTQL; 374 msginfo.msgtql = MSGTQL;
373 } 375 }
374 max_id = msg_ids.max_id; 376 max_id = msg_ids.max_id;
375 up(&msg_ids.sem); 377 mutex_unlock(&msg_ids.mutex);
376 if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) 378 if (copy_to_user (buf, &msginfo, sizeof(struct msginfo)))
377 return -EFAULT; 379 return -EFAULT;
378 return (max_id < 0) ? 0: max_id; 380 return (max_id < 0) ? 0: max_id;
@@ -435,7 +437,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
435 return -EINVAL; 437 return -EINVAL;
436 } 438 }
437 439
438 down(&msg_ids.sem); 440 mutex_lock(&msg_ids.mutex);
439 msq = msg_lock(msqid); 441 msq = msg_lock(msqid);
440 err=-EINVAL; 442 err=-EINVAL;
441 if (msq == NULL) 443 if (msq == NULL)
@@ -489,7 +491,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
489 } 491 }
490 err = 0; 492 err = 0;
491out_up: 493out_up:
492 up(&msg_ids.sem); 494 mutex_unlock(&msg_ids.mutex);
493 return err; 495 return err;
494out_unlock_up: 496out_unlock_up:
495 msg_unlock(msq); 497 msg_unlock(msq);
diff --git a/ipc/sem.c b/ipc/sem.c
index 59696a840be1..642659cd596b 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -75,6 +75,8 @@
75#include <linux/audit.h> 75#include <linux/audit.h>
76#include <linux/capability.h> 76#include <linux/capability.h>
77#include <linux/seq_file.h> 77#include <linux/seq_file.h>
78#include <linux/mutex.h>
79
78#include <asm/uaccess.h> 80#include <asm/uaccess.h>
79#include "util.h" 81#include "util.h"
80 82
@@ -139,7 +141,7 @@ void __init sem_init (void)
139 * * if it's IN_WAKEUP, then it must wait until the value changes 141 * * if it's IN_WAKEUP, then it must wait until the value changes
140 * * if it's not -EINTR, then the operation was completed by 142 * * if it's not -EINTR, then the operation was completed by
141 * update_queue. semtimedop can return queue.status without 143 * update_queue. semtimedop can return queue.status without
142 * performing any operation on the semaphore array. 144 * performing any operation on the sem array.
143 * * otherwise it must acquire the spinlock and check what's up. 145 * * otherwise it must acquire the spinlock and check what's up.
144 * 146 *
145 * The two-stage algorithm is necessary to protect against the following 147 * The two-stage algorithm is necessary to protect against the following
@@ -214,7 +216,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg)
214 216
215 if (nsems < 0 || nsems > sc_semmsl) 217 if (nsems < 0 || nsems > sc_semmsl)
216 return -EINVAL; 218 return -EINVAL;
217 down(&sem_ids.sem); 219 mutex_lock(&sem_ids.mutex);
218 220
219 if (key == IPC_PRIVATE) { 221 if (key == IPC_PRIVATE) {
220 err = newary(key, nsems, semflg); 222 err = newary(key, nsems, semflg);
@@ -227,8 +229,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg)
227 err = -EEXIST; 229 err = -EEXIST;
228 } else { 230 } else {
229 sma = sem_lock(id); 231 sma = sem_lock(id);
230 if(sma==NULL) 232 BUG_ON(sma==NULL);
231 BUG();
232 if (nsems > sma->sem_nsems) 233 if (nsems > sma->sem_nsems)
233 err = -EINVAL; 234 err = -EINVAL;
234 else if (ipcperms(&sma->sem_perm, semflg)) 235 else if (ipcperms(&sma->sem_perm, semflg))
@@ -242,7 +243,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg)
242 sem_unlock(sma); 243 sem_unlock(sma);
243 } 244 }
244 245
245 up(&sem_ids.sem); 246 mutex_unlock(&sem_ids.mutex);
246 return err; 247 return err;
247} 248}
248 249
@@ -437,8 +438,8 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum)
437 return semzcnt; 438 return semzcnt;
438} 439}
439 440
440/* Free a semaphore set. freeary() is called with sem_ids.sem down and 441/* Free a semaphore set. freeary() is called with sem_ids.mutex locked and
441 * the spinlock for this semaphore set hold. sem_ids.sem remains locked 442 * the spinlock for this semaphore set hold. sem_ids.mutex remains locked
442 * on exit. 443 * on exit.
443 */ 444 */
444static void freeary (struct sem_array *sma, int id) 445static void freeary (struct sem_array *sma, int id)
@@ -525,7 +526,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu
525 seminfo.semmnu = SEMMNU; 526 seminfo.semmnu = SEMMNU;
526 seminfo.semmap = SEMMAP; 527 seminfo.semmap = SEMMAP;
527 seminfo.semume = SEMUME; 528 seminfo.semume = SEMUME;
528 down(&sem_ids.sem); 529 mutex_lock(&sem_ids.mutex);
529 if (cmd == SEM_INFO) { 530 if (cmd == SEM_INFO) {
530 seminfo.semusz = sem_ids.in_use; 531 seminfo.semusz = sem_ids.in_use;
531 seminfo.semaem = used_sems; 532 seminfo.semaem = used_sems;
@@ -534,7 +535,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu
534 seminfo.semaem = SEMAEM; 535 seminfo.semaem = SEMAEM;
535 } 536 }
536 max_id = sem_ids.max_id; 537 max_id = sem_ids.max_id;
537 up(&sem_ids.sem); 538 mutex_unlock(&sem_ids.mutex);
538 if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) 539 if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo)))
539 return -EFAULT; 540 return -EFAULT;
540 return (max_id < 0) ? 0: max_id; 541 return (max_id < 0) ? 0: max_id;
@@ -885,9 +886,9 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
885 return err; 886 return err;
886 case IPC_RMID: 887 case IPC_RMID:
887 case IPC_SET: 888 case IPC_SET:
888 down(&sem_ids.sem); 889 mutex_lock(&sem_ids.mutex);
889 err = semctl_down(semid,semnum,cmd,version,arg); 890 err = semctl_down(semid,semnum,cmd,version,arg);
890 up(&sem_ids.sem); 891 mutex_unlock(&sem_ids.mutex);
891 return err; 892 return err;
892 default: 893 default:
893 return -EINVAL; 894 return -EINVAL;
@@ -1181,8 +1182,7 @@ retry_undos:
1181 1182
1182 sma = sem_lock(semid); 1183 sma = sem_lock(semid);
1183 if(sma==NULL) { 1184 if(sma==NULL) {
1184 if(queue.prev != NULL) 1185 BUG_ON(queue.prev != NULL);
1185 BUG();
1186 error = -EIDRM; 1186 error = -EIDRM;
1187 goto out_free; 1187 goto out_free;
1188 } 1188 }
@@ -1299,9 +1299,9 @@ found:
1299 /* perform adjustments registered in u */ 1299 /* perform adjustments registered in u */
1300 nsems = sma->sem_nsems; 1300 nsems = sma->sem_nsems;
1301 for (i = 0; i < nsems; i++) { 1301 for (i = 0; i < nsems; i++) {
1302 struct sem * sem = &sma->sem_base[i]; 1302 struct sem * semaphore = &sma->sem_base[i];
1303 if (u->semadj[i]) { 1303 if (u->semadj[i]) {
1304 sem->semval += u->semadj[i]; 1304 semaphore->semval += u->semadj[i];
1305 /* 1305 /*
1306 * Range checks of the new semaphore value, 1306 * Range checks of the new semaphore value,
1307 * not defined by sus: 1307 * not defined by sus:
@@ -1315,11 +1315,11 @@ found:
1315 * 1315 *
1316 * Manfred <manfred@colorfullife.com> 1316 * Manfred <manfred@colorfullife.com>
1317 */ 1317 */
1318 if (sem->semval < 0) 1318 if (semaphore->semval < 0)
1319 sem->semval = 0; 1319 semaphore->semval = 0;
1320 if (sem->semval > SEMVMX) 1320 if (semaphore->semval > SEMVMX)
1321 sem->semval = SEMVMX; 1321 semaphore->semval = SEMVMX;
1322 sem->sempid = current->tgid; 1322 semaphore->sempid = current->tgid;
1323 } 1323 }
1324 } 1324 }
1325 sma->sem_otime = get_seconds(); 1325 sma->sem_otime = get_seconds();
diff --git a/ipc/shm.c b/ipc/shm.c
index 6f9615c09fb2..f806a2e314e0 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -30,6 +30,7 @@
30#include <linux/capability.h> 30#include <linux/capability.h>
31#include <linux/ptrace.h> 31#include <linux/ptrace.h>
32#include <linux/seq_file.h> 32#include <linux/seq_file.h>
33#include <linux/mutex.h>
33 34
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35 36
@@ -109,7 +110,7 @@ static void shm_open (struct vm_area_struct *shmd)
109 * 110 *
110 * @shp: struct to free 111 * @shp: struct to free
111 * 112 *
112 * It has to be called with shp and shm_ids.sem locked, 113 * It has to be called with shp and shm_ids.mutex locked,
113 * but returns with shp unlocked and freed. 114 * but returns with shp unlocked and freed.
114 */ 115 */
115static void shm_destroy (struct shmid_kernel *shp) 116static void shm_destroy (struct shmid_kernel *shp)
@@ -139,7 +140,7 @@ static void shm_close (struct vm_area_struct *shmd)
139 int id = file->f_dentry->d_inode->i_ino; 140 int id = file->f_dentry->d_inode->i_ino;
140 struct shmid_kernel *shp; 141 struct shmid_kernel *shp;
141 142
142 down (&shm_ids.sem); 143 mutex_lock(&shm_ids.mutex);
143 /* remove from the list of attaches of the shm segment */ 144 /* remove from the list of attaches of the shm segment */
144 if(!(shp = shm_lock(id))) 145 if(!(shp = shm_lock(id)))
145 BUG(); 146 BUG();
@@ -151,7 +152,7 @@ static void shm_close (struct vm_area_struct *shmd)
151 shm_destroy (shp); 152 shm_destroy (shp);
152 else 153 else
153 shm_unlock(shp); 154 shm_unlock(shp);
154 up (&shm_ids.sem); 155 mutex_unlock(&shm_ids.mutex);
155} 156}
156 157
157static int shm_mmap(struct file * file, struct vm_area_struct * vma) 158static int shm_mmap(struct file * file, struct vm_area_struct * vma)
@@ -270,7 +271,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
270 struct shmid_kernel *shp; 271 struct shmid_kernel *shp;
271 int err, id = 0; 272 int err, id = 0;
272 273
273 down(&shm_ids.sem); 274 mutex_lock(&shm_ids.mutex);
274 if (key == IPC_PRIVATE) { 275 if (key == IPC_PRIVATE) {
275 err = newseg(key, shmflg, size); 276 err = newseg(key, shmflg, size);
276 } else if ((id = ipc_findkey(&shm_ids, key)) == -1) { 277 } else if ((id = ipc_findkey(&shm_ids, key)) == -1) {
@@ -296,7 +297,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
296 } 297 }
297 shm_unlock(shp); 298 shm_unlock(shp);
298 } 299 }
299 up(&shm_ids.sem); 300 mutex_unlock(&shm_ids.mutex);
300 301
301 return err; 302 return err;
302} 303}
@@ -467,14 +468,14 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
467 return err; 468 return err;
468 469
469 memset(&shm_info,0,sizeof(shm_info)); 470 memset(&shm_info,0,sizeof(shm_info));
470 down(&shm_ids.sem); 471 mutex_lock(&shm_ids.mutex);
471 shm_info.used_ids = shm_ids.in_use; 472 shm_info.used_ids = shm_ids.in_use;
472 shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp); 473 shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp);
473 shm_info.shm_tot = shm_tot; 474 shm_info.shm_tot = shm_tot;
474 shm_info.swap_attempts = 0; 475 shm_info.swap_attempts = 0;
475 shm_info.swap_successes = 0; 476 shm_info.swap_successes = 0;
476 err = shm_ids.max_id; 477 err = shm_ids.max_id;
477 up(&shm_ids.sem); 478 mutex_unlock(&shm_ids.mutex);
478 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { 479 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
479 err = -EFAULT; 480 err = -EFAULT;
480 goto out; 481 goto out;
@@ -583,7 +584,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
583 * Instead we set a destroyed flag, and then blow 584 * Instead we set a destroyed flag, and then blow
584 * the name away when the usage hits zero. 585 * the name away when the usage hits zero.
585 */ 586 */
586 down(&shm_ids.sem); 587 mutex_lock(&shm_ids.mutex);
587 shp = shm_lock(shmid); 588 shp = shm_lock(shmid);
588 err = -EINVAL; 589 err = -EINVAL;
589 if (shp == NULL) 590 if (shp == NULL)
@@ -610,7 +611,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
610 shm_unlock(shp); 611 shm_unlock(shp);
611 } else 612 } else
612 shm_destroy (shp); 613 shm_destroy (shp);
613 up(&shm_ids.sem); 614 mutex_unlock(&shm_ids.mutex);
614 goto out; 615 goto out;
615 } 616 }
616 617
@@ -620,12 +621,13 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
620 err = -EFAULT; 621 err = -EFAULT;
621 goto out; 622 goto out;
622 } 623 }
623 down(&shm_ids.sem); 624 mutex_lock(&shm_ids.mutex);
624 shp = shm_lock(shmid); 625 shp = shm_lock(shmid);
625 err=-EINVAL; 626 err=-EINVAL;
626 if(shp==NULL) 627 if(shp==NULL)
627 goto out_up; 628 goto out_up;
628 if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode, &(shp->shm_perm)))) 629 if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid,
630 setbuf.mode, &(shp->shm_perm))))
629 goto out_unlock_up; 631 goto out_unlock_up;
630 err = shm_checkid(shp,shmid); 632 err = shm_checkid(shp,shmid);
631 if(err) 633 if(err)
@@ -658,7 +660,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
658out_unlock_up: 660out_unlock_up:
659 shm_unlock(shp); 661 shm_unlock(shp);
660out_up: 662out_up:
661 up(&shm_ids.sem); 663 mutex_unlock(&shm_ids.mutex);
662 goto out; 664 goto out;
663out_unlock: 665out_unlock:
664 shm_unlock(shp); 666 shm_unlock(shp);
@@ -771,7 +773,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
771invalid: 773invalid:
772 up_write(&current->mm->mmap_sem); 774 up_write(&current->mm->mmap_sem);
773 775
774 down (&shm_ids.sem); 776 mutex_lock(&shm_ids.mutex);
775 if(!(shp = shm_lock(shmid))) 777 if(!(shp = shm_lock(shmid)))
776 BUG(); 778 BUG();
777 shp->shm_nattch--; 779 shp->shm_nattch--;
@@ -780,7 +782,7 @@ invalid:
780 shm_destroy (shp); 782 shm_destroy (shp);
781 else 783 else
782 shm_unlock(shp); 784 shm_unlock(shp);
783 up (&shm_ids.sem); 785 mutex_unlock(&shm_ids.mutex);
784 786
785 *raddr = (unsigned long) user_addr; 787 *raddr = (unsigned long) user_addr;
786 err = 0; 788 err = 0;
diff --git a/ipc/util.c b/ipc/util.c
index 862621980b01..23151ef32590 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -68,7 +68,8 @@ __initcall(ipc_init);
68void __init ipc_init_ids(struct ipc_ids* ids, int size) 68void __init ipc_init_ids(struct ipc_ids* ids, int size)
69{ 69{
70 int i; 70 int i;
71 sema_init(&ids->sem,1); 71
72 mutex_init(&ids->mutex);
72 73
73 if(size > IPCMNI) 74 if(size > IPCMNI)
74 size = IPCMNI; 75 size = IPCMNI;
@@ -138,7 +139,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
138 * @ids: Identifier set 139 * @ids: Identifier set
139 * @key: The key to find 140 * @key: The key to find
140 * 141 *
141 * Requires ipc_ids.sem locked. 142 * Requires ipc_ids.mutex locked.
142 * Returns the identifier if found or -1 if not. 143 * Returns the identifier if found or -1 if not.
143 */ 144 */
144 145
@@ -150,7 +151,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key)
150 151
151 /* 152 /*
152 * rcu_dereference() is not needed here 153 * rcu_dereference() is not needed here
153 * since ipc_ids.sem is held 154 * since ipc_ids.mutex is held
154 */ 155 */
155 for (id = 0; id <= max_id; id++) { 156 for (id = 0; id <= max_id; id++) {
156 p = ids->entries->p[id]; 157 p = ids->entries->p[id];
@@ -163,7 +164,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key)
163} 164}
164 165
165/* 166/*
166 * Requires ipc_ids.sem locked 167 * Requires ipc_ids.mutex locked
167 */ 168 */
168static int grow_ary(struct ipc_ids* ids, int newsize) 169static int grow_ary(struct ipc_ids* ids, int newsize)
169{ 170{
@@ -210,7 +211,7 @@ static int grow_ary(struct ipc_ids* ids, int newsize)
210 * is returned. The list is returned in a locked state on success. 211 * is returned. The list is returned in a locked state on success.
211 * On failure the list is not locked and -1 is returned. 212 * On failure the list is not locked and -1 is returned.
212 * 213 *
213 * Called with ipc_ids.sem held. 214 * Called with ipc_ids.mutex held.
214 */ 215 */
215 216
216int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) 217int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
@@ -221,7 +222,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
221 222
222 /* 223 /*
223 * rcu_dereference()() is not needed here since 224 * rcu_dereference()() is not needed here since
224 * ipc_ids.sem is held 225 * ipc_ids.mutex is held
225 */ 226 */
226 for (id = 0; id < size; id++) { 227 for (id = 0; id < size; id++) {
227 if(ids->entries->p[id] == NULL) 228 if(ids->entries->p[id] == NULL)
@@ -257,7 +258,7 @@ found:
257 * fed an invalid identifier. The entry is removed and internal 258 * fed an invalid identifier. The entry is removed and internal
258 * variables recomputed. The object associated with the identifier 259 * variables recomputed. The object associated with the identifier
259 * is returned. 260 * is returned.
260 * ipc_ids.sem and the spinlock for this ID is hold before this function 261 * ipc_ids.mutex and the spinlock for this ID is hold before this function
261 * is called, and remain locked on the exit. 262 * is called, and remain locked on the exit.
262 */ 263 */
263 264
@@ -270,7 +271,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
270 271
271 /* 272 /*
272 * do not need a rcu_dereference()() here to force ordering 273 * do not need a rcu_dereference()() here to force ordering
273 * on Alpha, since the ipc_ids.sem is held. 274 * on Alpha, since the ipc_ids.mutex is held.
274 */ 275 */
275 p = ids->entries->p[lid]; 276 p = ids->entries->p[lid];
276 ids->entries->p[lid] = NULL; 277 ids->entries->p[lid] = NULL;
@@ -530,13 +531,13 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
530 531
531/* 532/*
532 * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get() 533 * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get()
533 * is called with shm_ids.sem locked. Since grow_ary() is also called with 534 * is called with shm_ids.mutex locked. Since grow_ary() is also called with
534 * shm_ids.sem down(for Shared Memory), there is no need to add read 535 * shm_ids.mutex down(for Shared Memory), there is no need to add read
535 * barriers here to gurantee the writes in grow_ary() are seen in order 536 * barriers here to gurantee the writes in grow_ary() are seen in order
536 * here (for Alpha). 537 * here (for Alpha).
537 * 538 *
538 * However ipc_get() itself does not necessary require ipc_ids.sem down. So 539 * However ipc_get() itself does not necessary require ipc_ids.mutex down. So
539 * if in the future ipc_get() is used by other places without ipc_ids.sem 540 * if in the future ipc_get() is used by other places without ipc_ids.mutex
540 * down, then ipc_get() needs read memery barriers as ipc_lock() does. 541 * down, then ipc_get() needs read memery barriers as ipc_lock() does.
541 */ 542 */
542struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) 543struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id)
@@ -667,7 +668,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
667 * Take the lock - this will be released by the corresponding 668 * Take the lock - this will be released by the corresponding
668 * call to stop(). 669 * call to stop().
669 */ 670 */
670 down(&iface->ids->sem); 671 mutex_lock(&iface->ids->mutex);
671 672
672 /* pos < 0 is invalid */ 673 /* pos < 0 is invalid */
673 if (*pos < 0) 674 if (*pos < 0)
@@ -697,7 +698,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it)
697 ipc_unlock(ipc); 698 ipc_unlock(ipc);
698 699
699 /* Release the lock we took in start() */ 700 /* Release the lock we took in start() */
700 up(&iface->ids->sem); 701 mutex_unlock(&iface->ids->mutex);
701} 702}
702 703
703static int sysvipc_proc_show(struct seq_file *s, void *it) 704static int sysvipc_proc_show(struct seq_file *s, void *it)
diff --git a/ipc/util.h b/ipc/util.h
index efaff3ee7de7..0181553d31d8 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -25,7 +25,7 @@ struct ipc_ids {
25 int max_id; 25 int max_id;
26 unsigned short seq; 26 unsigned short seq;
27 unsigned short seq_max; 27 unsigned short seq_max;
28 struct semaphore sem; 28 struct mutex mutex;
29 struct ipc_id_ary nullentry; 29 struct ipc_id_ary nullentry;
30 struct ipc_id_ary* entries; 30 struct ipc_id_ary* entries;
31}; 31};
@@ -40,7 +40,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
40#define ipc_init_proc_interface(path, header, ids, show) do {} while (0) 40#define ipc_init_proc_interface(path, header, ids, show) do {} while (0)
41#endif 41#endif
42 42
43/* must be called with ids->sem acquired.*/ 43/* must be called with ids->mutex acquired.*/
44int ipc_findkey(struct ipc_ids* ids, key_t key); 44int ipc_findkey(struct ipc_ids* ids, key_t key);
45int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size); 45int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size);
46 46
diff --git a/kernel/Makefile b/kernel/Makefile
index ff1c11dc12cf..58908f9d156a 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -12,6 +12,9 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
12 12
13obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o 13obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
14obj-$(CONFIG_FUTEX) += futex.o 14obj-$(CONFIG_FUTEX) += futex.o
15ifeq ($(CONFIG_COMPAT),y)
16obj-$(CONFIG_FUTEX) += futex_compat.o
17endif
15obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o 18obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
16obj-$(CONFIG_SMP) += cpu.o spinlock.o 19obj-$(CONFIG_SMP) += cpu.o spinlock.o
17obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o 20obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
diff --git a/kernel/compat.c b/kernel/compat.c
index 8c9cd88b6785..c1601a84f8d8 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -17,10 +17,10 @@
17#include <linux/time.h> 17#include <linux/time.h>
18#include <linux/signal.h> 18#include <linux/signal.h>
19#include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */ 19#include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */
20#include <linux/futex.h> /* for FUTEX_WAIT */
21#include <linux/syscalls.h> 20#include <linux/syscalls.h>
22#include <linux/unistd.h> 21#include <linux/unistd.h>
23#include <linux/security.h> 22#include <linux/security.h>
23#include <linux/timex.h>
24 24
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26 26
@@ -238,28 +238,6 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
238 return ret; 238 return ret;
239} 239}
240 240
241#ifdef CONFIG_FUTEX
242asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, int val,
243 struct compat_timespec __user *utime, u32 __user *uaddr2,
244 int val3)
245{
246 struct timespec t;
247 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
248 int val2 = 0;
249
250 if ((op == FUTEX_WAIT) && utime) {
251 if (get_compat_timespec(&t, utime))
252 return -EFAULT;
253 timeout = timespec_to_jiffies(&t) + 1;
254 }
255 if (op >= FUTEX_REQUEUE)
256 val2 = (int) (unsigned long) utime;
257
258 return do_futex((unsigned long)uaddr, op, val, timeout,
259 (unsigned long)uaddr2, val2, val3);
260}
261#endif
262
263asmlinkage long compat_sys_setrlimit(unsigned int resource, 241asmlinkage long compat_sys_setrlimit(unsigned int resource,
264 struct compat_rlimit __user *rlim) 242 struct compat_rlimit __user *rlim)
265{ 243{
@@ -898,3 +876,61 @@ asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat
898 return -ERESTARTNOHAND; 876 return -ERESTARTNOHAND;
899} 877}
900#endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */ 878#endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */
879
880asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp)
881{
882 struct timex txc;
883 int ret;
884
885 memset(&txc, 0, sizeof(struct timex));
886
887 if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) ||
888 __get_user(txc.modes, &utp->modes) ||
889 __get_user(txc.offset, &utp->offset) ||
890 __get_user(txc.freq, &utp->freq) ||
891 __get_user(txc.maxerror, &utp->maxerror) ||
892 __get_user(txc.esterror, &utp->esterror) ||
893 __get_user(txc.status, &utp->status) ||
894 __get_user(txc.constant, &utp->constant) ||
895 __get_user(txc.precision, &utp->precision) ||
896 __get_user(txc.tolerance, &utp->tolerance) ||
897 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
898 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
899 __get_user(txc.tick, &utp->tick) ||
900 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
901 __get_user(txc.jitter, &utp->jitter) ||
902 __get_user(txc.shift, &utp->shift) ||
903 __get_user(txc.stabil, &utp->stabil) ||
904 __get_user(txc.jitcnt, &utp->jitcnt) ||
905 __get_user(txc.calcnt, &utp->calcnt) ||
906 __get_user(txc.errcnt, &utp->errcnt) ||
907 __get_user(txc.stbcnt, &utp->stbcnt))
908 return -EFAULT;
909
910 ret = do_adjtimex(&txc);
911
912 if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) ||
913 __put_user(txc.modes, &utp->modes) ||
914 __put_user(txc.offset, &utp->offset) ||
915 __put_user(txc.freq, &utp->freq) ||
916 __put_user(txc.maxerror, &utp->maxerror) ||
917 __put_user(txc.esterror, &utp->esterror) ||
918 __put_user(txc.status, &utp->status) ||
919 __put_user(txc.constant, &utp->constant) ||
920 __put_user(txc.precision, &utp->precision) ||
921 __put_user(txc.tolerance, &utp->tolerance) ||
922 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
923 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
924 __put_user(txc.tick, &utp->tick) ||
925 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
926 __put_user(txc.jitter, &utp->jitter) ||
927 __put_user(txc.shift, &utp->shift) ||
928 __put_user(txc.stabil, &utp->stabil) ||
929 __put_user(txc.jitcnt, &utp->jitcnt) ||
930 __put_user(txc.calcnt, &utp->calcnt) ||
931 __put_user(txc.errcnt, &utp->errcnt) ||
932 __put_user(txc.stbcnt, &utp->stbcnt))
933 ret = -EFAULT;
934
935 return ret;
936}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 8be22bd80933..fe2b8d0bfe4c 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -18,7 +18,7 @@
18/* This protects CPUs going up and down... */ 18/* This protects CPUs going up and down... */
19static DECLARE_MUTEX(cpucontrol); 19static DECLARE_MUTEX(cpucontrol);
20 20
21static struct notifier_block *cpu_chain; 21static BLOCKING_NOTIFIER_HEAD(cpu_chain);
22 22
23#ifdef CONFIG_HOTPLUG_CPU 23#ifdef CONFIG_HOTPLUG_CPU
24static struct task_struct *lock_cpu_hotplug_owner; 24static struct task_struct *lock_cpu_hotplug_owner;
@@ -71,21 +71,13 @@ EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
71/* Need to know about CPUs going up/down? */ 71/* Need to know about CPUs going up/down? */
72int register_cpu_notifier(struct notifier_block *nb) 72int register_cpu_notifier(struct notifier_block *nb)
73{ 73{
74 int ret; 74 return blocking_notifier_chain_register(&cpu_chain, nb);
75
76 if ((ret = lock_cpu_hotplug_interruptible()) != 0)
77 return ret;
78 ret = notifier_chain_register(&cpu_chain, nb);
79 unlock_cpu_hotplug();
80 return ret;
81} 75}
82EXPORT_SYMBOL(register_cpu_notifier); 76EXPORT_SYMBOL(register_cpu_notifier);
83 77
84void unregister_cpu_notifier(struct notifier_block *nb) 78void unregister_cpu_notifier(struct notifier_block *nb)
85{ 79{
86 lock_cpu_hotplug(); 80 blocking_notifier_chain_unregister(&cpu_chain, nb);
87 notifier_chain_unregister(&cpu_chain, nb);
88 unlock_cpu_hotplug();
89} 81}
90EXPORT_SYMBOL(unregister_cpu_notifier); 82EXPORT_SYMBOL(unregister_cpu_notifier);
91 83
@@ -141,7 +133,7 @@ int cpu_down(unsigned int cpu)
141 goto out; 133 goto out;
142 } 134 }
143 135
144 err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, 136 err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
145 (void *)(long)cpu); 137 (void *)(long)cpu);
146 if (err == NOTIFY_BAD) { 138 if (err == NOTIFY_BAD) {
147 printk("%s: attempt to take down CPU %u failed\n", 139 printk("%s: attempt to take down CPU %u failed\n",
@@ -159,7 +151,7 @@ int cpu_down(unsigned int cpu)
159 p = __stop_machine_run(take_cpu_down, NULL, cpu); 151 p = __stop_machine_run(take_cpu_down, NULL, cpu);
160 if (IS_ERR(p)) { 152 if (IS_ERR(p)) {
161 /* CPU didn't die: tell everyone. Can't complain. */ 153 /* CPU didn't die: tell everyone. Can't complain. */
162 if (notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, 154 if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
163 (void *)(long)cpu) == NOTIFY_BAD) 155 (void *)(long)cpu) == NOTIFY_BAD)
164 BUG(); 156 BUG();
165 157
@@ -182,8 +174,8 @@ int cpu_down(unsigned int cpu)
182 put_cpu(); 174 put_cpu();
183 175
184 /* CPU is completely dead: tell everyone. Too late to complain. */ 176 /* CPU is completely dead: tell everyone. Too late to complain. */
185 if (notifier_call_chain(&cpu_chain, CPU_DEAD, (void *)(long)cpu) 177 if (blocking_notifier_call_chain(&cpu_chain, CPU_DEAD,
186 == NOTIFY_BAD) 178 (void *)(long)cpu) == NOTIFY_BAD)
187 BUG(); 179 BUG();
188 180
189 check_for_tasks(cpu); 181 check_for_tasks(cpu);
@@ -211,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu)
211 goto out; 203 goto out;
212 } 204 }
213 205
214 ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); 206 ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
215 if (ret == NOTIFY_BAD) { 207 if (ret == NOTIFY_BAD) {
216 printk("%s: attempt to bring up CPU %u failed\n", 208 printk("%s: attempt to bring up CPU %u failed\n",
217 __FUNCTION__, cpu); 209 __FUNCTION__, cpu);
@@ -226,11 +218,12 @@ int __devinit cpu_up(unsigned int cpu)
226 BUG_ON(!cpu_online(cpu)); 218 BUG_ON(!cpu_online(cpu));
227 219
228 /* Now call notifier in preparation. */ 220 /* Now call notifier in preparation. */
229 notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); 221 blocking_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
230 222
231out_notify: 223out_notify:
232 if (ret != 0) 224 if (ret != 0)
233 notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); 225 blocking_notifier_call_chain(&cpu_chain,
226 CPU_UP_CANCELED, hcpu);
234out: 227out:
235 unlock_cpu_hotplug(); 228 unlock_cpu_hotplug();
236 return ret; 229 return ret;
diff --git a/kernel/exit.c b/kernel/exit.c
index 8037405e136e..a8c7efc7a681 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -31,6 +31,8 @@
31#include <linux/signal.h> 31#include <linux/signal.h>
32#include <linux/cn_proc.h> 32#include <linux/cn_proc.h>
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/futex.h>
35#include <linux/compat.h>
34 36
35#include <asm/uaccess.h> 37#include <asm/uaccess.h>
36#include <asm/unistd.h> 38#include <asm/unistd.h>
@@ -852,6 +854,12 @@ fastcall NORET_TYPE void do_exit(long code)
852 exit_itimers(tsk->signal); 854 exit_itimers(tsk->signal);
853 acct_process(code); 855 acct_process(code);
854 } 856 }
857 if (unlikely(tsk->robust_list))
858 exit_robust_list(tsk);
859#ifdef CONFIG_COMPAT
860 if (unlikely(tsk->compat_robust_list))
861 compat_exit_robust_list(tsk);
862#endif
855 exit_mm(tsk); 863 exit_mm(tsk);
856 864
857 exit_sem(tsk); 865 exit_sem(tsk);
diff --git a/kernel/fork.c b/kernel/fork.c
index a02063903aaa..c49bd193b058 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -769,8 +769,7 @@ int unshare_files(void)
769 struct files_struct *files = current->files; 769 struct files_struct *files = current->files;
770 int rc; 770 int rc;
771 771
772 if(!files) 772 BUG_ON(!files);
773 BUG();
774 773
775 /* This can race but the race causes us to copy when we don't 774 /* This can race but the race causes us to copy when we don't
776 need to and drop the copy */ 775 need to and drop the copy */
@@ -848,7 +847,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
848 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL); 847 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL);
849 sig->it_real_incr.tv64 = 0; 848 sig->it_real_incr.tv64 = 0;
850 sig->real_timer.function = it_real_fn; 849 sig->real_timer.function = it_real_fn;
851 sig->real_timer.data = tsk; 850 sig->tsk = tsk;
852 851
853 sig->it_virt_expires = cputime_zero; 852 sig->it_virt_expires = cputime_zero;
854 sig->it_virt_incr = cputime_zero; 853 sig->it_virt_incr = cputime_zero;
@@ -1062,7 +1061,10 @@ static task_t *copy_process(unsigned long clone_flags,
1062 * Clear TID on mm_release()? 1061 * Clear TID on mm_release()?
1063 */ 1062 */
1064 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; 1063 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
1065 1064 p->robust_list = NULL;
1065#ifdef CONFIG_COMPAT
1066 p->compat_robust_list = NULL;
1067#endif
1066 /* 1068 /*
1067 * sigaltstack should be cleared when sharing the same VM 1069 * sigaltstack should be cleared when sharing the same VM
1068 */ 1070 */
diff --git a/kernel/futex.c b/kernel/futex.c
index 5efa2f978032..9c9b2b6b22dd 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -8,6 +8,10 @@
8 * Removed page pinning, fix privately mapped COW pages and other cleanups 8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier 9 * (C) Copyright 2003, 2004 Jamie Lokier
10 * 10 *
11 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
11 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly 15 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
12 * enough at me, Linus for the original (flawed) idea, Matthew 16 * enough at me, Linus for the original (flawed) idea, Matthew
13 * Kirkwood for proof-of-concept implementation. 17 * Kirkwood for proof-of-concept implementation.
@@ -829,6 +833,172 @@ error:
829 goto out; 833 goto out;
830} 834}
831 835
836/*
837 * Support for robust futexes: the kernel cleans up held futexes at
838 * thread exit time.
839 *
840 * Implementation: user-space maintains a per-thread list of locks it
841 * is holding. Upon do_exit(), the kernel carefully walks this list,
842 * and marks all locks that are owned by this thread with the
843 * FUTEX_OWNER_DEAD bit, and wakes up a waiter (if any). The list is
844 * always manipulated with the lock held, so the list is private and
845 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
846 * field, to allow the kernel to clean up if the thread dies after
847 * acquiring the lock, but just before it could have added itself to
848 * the list. There can only be one such pending lock.
849 */
850
851/**
852 * sys_set_robust_list - set the robust-futex list head of a task
853 * @head: pointer to the list-head
854 * @len: length of the list-head, as userspace expects
855 */
856asmlinkage long
857sys_set_robust_list(struct robust_list_head __user *head,
858 size_t len)
859{
860 /*
861 * The kernel knows only one size for now:
862 */
863 if (unlikely(len != sizeof(*head)))
864 return -EINVAL;
865
866 current->robust_list = head;
867
868 return 0;
869}
870
871/**
872 * sys_get_robust_list - get the robust-futex list head of a task
873 * @pid: pid of the process [zero for current task]
874 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
875 * @len_ptr: pointer to a length field, the kernel fills in the header size
876 */
877asmlinkage long
878sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr,
879 size_t __user *len_ptr)
880{
881 struct robust_list_head *head;
882 unsigned long ret;
883
884 if (!pid)
885 head = current->robust_list;
886 else {
887 struct task_struct *p;
888
889 ret = -ESRCH;
890 read_lock(&tasklist_lock);
891 p = find_task_by_pid(pid);
892 if (!p)
893 goto err_unlock;
894 ret = -EPERM;
895 if ((current->euid != p->euid) && (current->euid != p->uid) &&
896 !capable(CAP_SYS_PTRACE))
897 goto err_unlock;
898 head = p->robust_list;
899 read_unlock(&tasklist_lock);
900 }
901
902 if (put_user(sizeof(*head), len_ptr))
903 return -EFAULT;
904 return put_user(head, head_ptr);
905
906err_unlock:
907 read_unlock(&tasklist_lock);
908
909 return ret;
910}
911
912/*
913 * Process a futex-list entry, check whether it's owned by the
914 * dying task, and do notification if so:
915 */
916int handle_futex_death(u32 __user *uaddr, struct task_struct *curr)
917{
918 u32 uval;
919
920retry:
921 if (get_user(uval, uaddr))
922 return -1;
923
924 if ((uval & FUTEX_TID_MASK) == curr->pid) {
925 /*
926 * Ok, this dying thread is truly holding a futex
927 * of interest. Set the OWNER_DIED bit atomically
928 * via cmpxchg, and if the value had FUTEX_WAITERS
929 * set, wake up a waiter (if any). (We have to do a
930 * futex_wake() even if OWNER_DIED is already set -
931 * to handle the rare but possible case of recursive
932 * thread-death.) The rest of the cleanup is done in
933 * userspace.
934 */
935 if (futex_atomic_cmpxchg_inatomic(uaddr, uval,
936 uval | FUTEX_OWNER_DIED) != uval)
937 goto retry;
938
939 if (uval & FUTEX_WAITERS)
940 futex_wake((unsigned long)uaddr, 1);
941 }
942 return 0;
943}
944
945/*
946 * Walk curr->robust_list (very carefully, it's a userspace list!)
947 * and mark any locks found there dead, and notify any waiters.
948 *
949 * We silently return on any sign of list-walking problem.
950 */
951void exit_robust_list(struct task_struct *curr)
952{
953 struct robust_list_head __user *head = curr->robust_list;
954 struct robust_list __user *entry, *pending;
955 unsigned int limit = ROBUST_LIST_LIMIT;
956 unsigned long futex_offset;
957
958 /*
959 * Fetch the list head (which was registered earlier, via
960 * sys_set_robust_list()):
961 */
962 if (get_user(entry, &head->list.next))
963 return;
964 /*
965 * Fetch the relative futex offset:
966 */
967 if (get_user(futex_offset, &head->futex_offset))
968 return;
969 /*
970 * Fetch any possibly pending lock-add first, and handle it
971 * if it exists:
972 */
973 if (get_user(pending, &head->list_op_pending))
974 return;
975 if (pending)
976 handle_futex_death((void *)pending + futex_offset, curr);
977
978 while (entry != &head->list) {
979 /*
980 * A pending lock might already be on the list, so
981 * dont process it twice:
982 */
983 if (entry != pending)
984 if (handle_futex_death((void *)entry + futex_offset,
985 curr))
986 return;
987 /*
988 * Fetch the next entry in the list:
989 */
990 if (get_user(entry, &entry->next))
991 return;
992 /*
993 * Avoid excessively long or circular lists:
994 */
995 if (!--limit)
996 break;
997
998 cond_resched();
999 }
1000}
1001
832long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout, 1002long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout,
833 unsigned long uaddr2, int val2, int val3) 1003 unsigned long uaddr2, int val2, int val3)
834{ 1004{
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
new file mode 100644
index 000000000000..54274fc85321
--- /dev/null
+++ b/kernel/futex_compat.c
@@ -0,0 +1,142 @@
1/*
2 * linux/kernel/futex_compat.c
3 *
4 * Futex compatibililty routines.
5 *
6 * Copyright 2006, Red Hat, Inc., Ingo Molnar
7 */
8
9#include <linux/linkage.h>
10#include <linux/compat.h>
11#include <linux/futex.h>
12
13#include <asm/uaccess.h>
14
15/*
16 * Walk curr->robust_list (very carefully, it's a userspace list!)
17 * and mark any locks found there dead, and notify any waiters.
18 *
19 * We silently return on any sign of list-walking problem.
20 */
21void compat_exit_robust_list(struct task_struct *curr)
22{
23 struct compat_robust_list_head __user *head = curr->compat_robust_list;
24 struct robust_list __user *entry, *pending;
25 compat_uptr_t uentry, upending;
26 unsigned int limit = ROBUST_LIST_LIMIT;
27 compat_long_t futex_offset;
28
29 /*
30 * Fetch the list head (which was registered earlier, via
31 * sys_set_robust_list()):
32 */
33 if (get_user(uentry, &head->list.next))
34 return;
35 entry = compat_ptr(uentry);
36 /*
37 * Fetch the relative futex offset:
38 */
39 if (get_user(futex_offset, &head->futex_offset))
40 return;
41 /*
42 * Fetch any possibly pending lock-add first, and handle it
43 * if it exists:
44 */
45 if (get_user(upending, &head->list_op_pending))
46 return;
47 pending = compat_ptr(upending);
48 if (upending)
49 handle_futex_death((void *)pending + futex_offset, curr);
50
51 while (compat_ptr(uentry) != &head->list) {
52 /*
53 * A pending lock might already be on the list, so
54 * dont process it twice:
55 */
56 if (entry != pending)
57 if (handle_futex_death((void *)entry + futex_offset,
58 curr))
59 return;
60
61 /*
62 * Fetch the next entry in the list:
63 */
64 if (get_user(uentry, (compat_uptr_t *)&entry->next))
65 return;
66 entry = compat_ptr(uentry);
67 /*
68 * Avoid excessively long or circular lists:
69 */
70 if (!--limit)
71 break;
72
73 cond_resched();
74 }
75}
76
77asmlinkage long
78compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
79 compat_size_t len)
80{
81 if (unlikely(len != sizeof(*head)))
82 return -EINVAL;
83
84 current->compat_robust_list = head;
85
86 return 0;
87}
88
89asmlinkage long
90compat_sys_get_robust_list(int pid, compat_uptr_t *head_ptr,
91 compat_size_t __user *len_ptr)
92{
93 struct compat_robust_list_head *head;
94 unsigned long ret;
95
96 if (!pid)
97 head = current->compat_robust_list;
98 else {
99 struct task_struct *p;
100
101 ret = -ESRCH;
102 read_lock(&tasklist_lock);
103 p = find_task_by_pid(pid);
104 if (!p)
105 goto err_unlock;
106 ret = -EPERM;
107 if ((current->euid != p->euid) && (current->euid != p->uid) &&
108 !capable(CAP_SYS_PTRACE))
109 goto err_unlock;
110 head = p->compat_robust_list;
111 read_unlock(&tasklist_lock);
112 }
113
114 if (put_user(sizeof(*head), len_ptr))
115 return -EFAULT;
116 return put_user(ptr_to_compat(head), head_ptr);
117
118err_unlock:
119 read_unlock(&tasklist_lock);
120
121 return ret;
122}
123
124asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
125 struct compat_timespec __user *utime, u32 __user *uaddr2,
126 u32 val3)
127{
128 struct timespec t;
129 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
130 int val2 = 0;
131
132 if ((op == FUTEX_WAIT) && utime) {
133 if (get_compat_timespec(&t, utime))
134 return -EFAULT;
135 timeout = timespec_to_jiffies(&t) + 1;
136 }
137 if (op >= FUTEX_REQUEUE)
138 val2 = (int) (unsigned long) utime;
139
140 return do_futex((unsigned long)uaddr, op, val, timeout,
141 (unsigned long)uaddr2, val2, val3);
142}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 14bc9cfa6399..0237a556eb1f 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -123,6 +123,26 @@ void ktime_get_ts(struct timespec *ts)
123EXPORT_SYMBOL_GPL(ktime_get_ts); 123EXPORT_SYMBOL_GPL(ktime_get_ts);
124 124
125/* 125/*
126 * Get the coarse grained time at the softirq based on xtime and
127 * wall_to_monotonic.
128 */
129static void hrtimer_get_softirq_time(struct hrtimer_base *base)
130{
131 ktime_t xtim, tomono;
132 unsigned long seq;
133
134 do {
135 seq = read_seqbegin(&xtime_lock);
136 xtim = timespec_to_ktime(xtime);
137 tomono = timespec_to_ktime(wall_to_monotonic);
138
139 } while (read_seqretry(&xtime_lock, seq));
140
141 base[CLOCK_REALTIME].softirq_time = xtim;
142 base[CLOCK_MONOTONIC].softirq_time = ktime_add(xtim, tomono);
143}
144
145/*
126 * Functions and macros which are different for UP/SMP systems are kept in a 146 * Functions and macros which are different for UP/SMP systems are kept in a
127 * single place 147 * single place
128 */ 148 */
@@ -246,7 +266,7 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
246/* 266/*
247 * Divide a ktime value by a nanosecond value 267 * Divide a ktime value by a nanosecond value
248 */ 268 */
249static unsigned long ktime_divns(const ktime_t kt, nsec_t div) 269static unsigned long ktime_divns(const ktime_t kt, s64 div)
250{ 270{
251 u64 dclc, inc, dns; 271 u64 dclc, inc, dns;
252 int sft = 0; 272 int sft = 0;
@@ -281,18 +301,17 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
281 * hrtimer_forward - forward the timer expiry 301 * hrtimer_forward - forward the timer expiry
282 * 302 *
283 * @timer: hrtimer to forward 303 * @timer: hrtimer to forward
304 * @now: forward past this time
284 * @interval: the interval to forward 305 * @interval: the interval to forward
285 * 306 *
286 * Forward the timer expiry so it will expire in the future. 307 * Forward the timer expiry so it will expire in the future.
287 * Returns the number of overruns. 308 * Returns the number of overruns.
288 */ 309 */
289unsigned long 310unsigned long
290hrtimer_forward(struct hrtimer *timer, ktime_t interval) 311hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
291{ 312{
292 unsigned long orun = 1; 313 unsigned long orun = 1;
293 ktime_t delta, now; 314 ktime_t delta;
294
295 now = timer->base->get_time();
296 315
297 delta = ktime_sub(now, timer->expires); 316 delta = ktime_sub(now, timer->expires);
298 317
@@ -303,7 +322,7 @@ hrtimer_forward(struct hrtimer *timer, ktime_t interval)
303 interval.tv64 = timer->base->resolution.tv64; 322 interval.tv64 = timer->base->resolution.tv64;
304 323
305 if (unlikely(delta.tv64 >= interval.tv64)) { 324 if (unlikely(delta.tv64 >= interval.tv64)) {
306 nsec_t incr = ktime_to_ns(interval); 325 s64 incr = ktime_to_ns(interval);
307 326
308 orun = ktime_divns(delta, incr); 327 orun = ktime_divns(delta, incr);
309 timer->expires = ktime_add_ns(timer->expires, incr * orun); 328 timer->expires = ktime_add_ns(timer->expires, incr * orun);
@@ -355,8 +374,6 @@ static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
355 rb_link_node(&timer->node, parent, link); 374 rb_link_node(&timer->node, parent, link);
356 rb_insert_color(&timer->node, &base->active); 375 rb_insert_color(&timer->node, &base->active);
357 376
358 timer->state = HRTIMER_PENDING;
359
360 if (!base->first || timer->expires.tv64 < 377 if (!base->first || timer->expires.tv64 <
361 rb_entry(base->first, struct hrtimer, node)->expires.tv64) 378 rb_entry(base->first, struct hrtimer, node)->expires.tv64)
362 base->first = &timer->node; 379 base->first = &timer->node;
@@ -376,6 +393,7 @@ static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
376 if (base->first == &timer->node) 393 if (base->first == &timer->node)
377 base->first = rb_next(&timer->node); 394 base->first = rb_next(&timer->node);
378 rb_erase(&timer->node, &base->active); 395 rb_erase(&timer->node, &base->active);
396 timer->node.rb_parent = HRTIMER_INACTIVE;
379} 397}
380 398
381/* 399/*
@@ -386,7 +404,6 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
386{ 404{
387 if (hrtimer_active(timer)) { 405 if (hrtimer_active(timer)) {
388 __remove_hrtimer(timer, base); 406 __remove_hrtimer(timer, base);
389 timer->state = HRTIMER_INACTIVE;
390 return 1; 407 return 1;
391 } 408 }
392 return 0; 409 return 0;
@@ -560,6 +577,7 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
560 clock_id = CLOCK_MONOTONIC; 577 clock_id = CLOCK_MONOTONIC;
561 578
562 timer->base = &bases[clock_id]; 579 timer->base = &bases[clock_id];
580 timer->node.rb_parent = HRTIMER_INACTIVE;
563} 581}
564 582
565/** 583/**
@@ -586,48 +604,35 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
586 */ 604 */
587static inline void run_hrtimer_queue(struct hrtimer_base *base) 605static inline void run_hrtimer_queue(struct hrtimer_base *base)
588{ 606{
589 ktime_t now = base->get_time();
590 struct rb_node *node; 607 struct rb_node *node;
591 608
609 if (base->get_softirq_time)
610 base->softirq_time = base->get_softirq_time();
611
592 spin_lock_irq(&base->lock); 612 spin_lock_irq(&base->lock);
593 613
594 while ((node = base->first)) { 614 while ((node = base->first)) {
595 struct hrtimer *timer; 615 struct hrtimer *timer;
596 int (*fn)(void *); 616 int (*fn)(struct hrtimer *);
597 int restart; 617 int restart;
598 void *data;
599 618
600 timer = rb_entry(node, struct hrtimer, node); 619 timer = rb_entry(node, struct hrtimer, node);
601 if (now.tv64 <= timer->expires.tv64) 620 if (base->softirq_time.tv64 <= timer->expires.tv64)
602 break; 621 break;
603 622
604 fn = timer->function; 623 fn = timer->function;
605 data = timer->data;
606 set_curr_timer(base, timer); 624 set_curr_timer(base, timer);
607 timer->state = HRTIMER_RUNNING;
608 __remove_hrtimer(timer, base); 625 __remove_hrtimer(timer, base);
609 spin_unlock_irq(&base->lock); 626 spin_unlock_irq(&base->lock);
610 627
611 /* 628 restart = fn(timer);
612 * fn == NULL is special case for the simplest timer
613 * variant - wake up process and do not restart:
614 */
615 if (!fn) {
616 wake_up_process(data);
617 restart = HRTIMER_NORESTART;
618 } else
619 restart = fn(data);
620 629
621 spin_lock_irq(&base->lock); 630 spin_lock_irq(&base->lock);
622 631
623 /* Another CPU has added back the timer */ 632 if (restart != HRTIMER_NORESTART) {
624 if (timer->state != HRTIMER_RUNNING) 633 BUG_ON(hrtimer_active(timer));
625 continue;
626
627 if (restart == HRTIMER_RESTART)
628 enqueue_hrtimer(timer, base); 634 enqueue_hrtimer(timer, base);
629 else 635 }
630 timer->state = HRTIMER_EXPIRED;
631 } 636 }
632 set_curr_timer(base, NULL); 637 set_curr_timer(base, NULL);
633 spin_unlock_irq(&base->lock); 638 spin_unlock_irq(&base->lock);
@@ -641,6 +646,8 @@ void hrtimer_run_queues(void)
641 struct hrtimer_base *base = __get_cpu_var(hrtimer_bases); 646 struct hrtimer_base *base = __get_cpu_var(hrtimer_bases);
642 int i; 647 int i;
643 648
649 hrtimer_get_softirq_time(base);
650
644 for (i = 0; i < MAX_HRTIMER_BASES; i++) 651 for (i = 0; i < MAX_HRTIMER_BASES; i++)
645 run_hrtimer_queue(&base[i]); 652 run_hrtimer_queue(&base[i]);
646} 653}
@@ -649,79 +656,70 @@ void hrtimer_run_queues(void)
649 * Sleep related functions: 656 * Sleep related functions:
650 */ 657 */
651 658
652/** 659struct sleep_hrtimer {
653 * schedule_hrtimer - sleep until timeout 660 struct hrtimer timer;
654 * 661 struct task_struct *task;
655 * @timer: hrtimer variable initialized with the correct clock base 662 int expired;
656 * @mode: timeout value is abs/rel 663};
657 *
658 * Make the current task sleep until @timeout is
659 * elapsed.
660 *
661 * You can set the task state as follows -
662 *
663 * %TASK_UNINTERRUPTIBLE - at least @timeout is guaranteed to
664 * pass before the routine returns. The routine will return 0
665 *
666 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
667 * delivered to the current task. In this case the remaining time
668 * will be returned
669 *
670 * The current task state is guaranteed to be TASK_RUNNING when this
671 * routine returns.
672 */
673static ktime_t __sched
674schedule_hrtimer(struct hrtimer *timer, const enum hrtimer_mode mode)
675{
676 /* fn stays NULL, meaning single-shot wakeup: */
677 timer->data = current;
678 664
679 hrtimer_start(timer, timer->expires, mode); 665static int nanosleep_wakeup(struct hrtimer *timer)
666{
667 struct sleep_hrtimer *t =
668 container_of(timer, struct sleep_hrtimer, timer);
680 669
681 schedule(); 670 t->expired = 1;
682 hrtimer_cancel(timer); 671 wake_up_process(t->task);
683 672
684 /* Return the remaining time: */ 673 return HRTIMER_NORESTART;
685 if (timer->state != HRTIMER_EXPIRED)
686 return ktime_sub(timer->expires, timer->base->get_time());
687 else
688 return (ktime_t) {.tv64 = 0 };
689} 674}
690 675
691static inline ktime_t __sched 676static int __sched do_nanosleep(struct sleep_hrtimer *t, enum hrtimer_mode mode)
692schedule_hrtimer_interruptible(struct hrtimer *timer,
693 const enum hrtimer_mode mode)
694{ 677{
695 set_current_state(TASK_INTERRUPTIBLE); 678 t->timer.function = nanosleep_wakeup;
679 t->task = current;
680 t->expired = 0;
681
682 do {
683 set_current_state(TASK_INTERRUPTIBLE);
684 hrtimer_start(&t->timer, t->timer.expires, mode);
685
686 schedule();
687
688 if (unlikely(!t->expired)) {
689 hrtimer_cancel(&t->timer);
690 mode = HRTIMER_ABS;
691 }
692 } while (!t->expired && !signal_pending(current));
696 693
697 return schedule_hrtimer(timer, mode); 694 return t->expired;
698} 695}
699 696
700static long __sched nanosleep_restart(struct restart_block *restart) 697static long __sched nanosleep_restart(struct restart_block *restart)
701{ 698{
699 struct sleep_hrtimer t;
702 struct timespec __user *rmtp; 700 struct timespec __user *rmtp;
703 struct timespec tu; 701 struct timespec tu;
704 void *rfn_save = restart->fn; 702 ktime_t time;
705 struct hrtimer timer;
706 ktime_t rem;
707 703
708 restart->fn = do_no_restart_syscall; 704 restart->fn = do_no_restart_syscall;
709 705
710 hrtimer_init(&timer, (clockid_t) restart->arg3, HRTIMER_ABS); 706 hrtimer_init(&t.timer, restart->arg3, HRTIMER_ABS);
711 707 t.timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0;
712 timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0;
713
714 rem = schedule_hrtimer_interruptible(&timer, HRTIMER_ABS);
715 708
716 if (rem.tv64 <= 0) 709 if (do_nanosleep(&t, HRTIMER_ABS))
717 return 0; 710 return 0;
718 711
719 rmtp = (struct timespec __user *) restart->arg2; 712 rmtp = (struct timespec __user *) restart->arg2;
720 tu = ktime_to_timespec(rem); 713 if (rmtp) {
721 if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu))) 714 time = ktime_sub(t.timer.expires, t.timer.base->get_time());
722 return -EFAULT; 715 if (time.tv64 <= 0)
716 return 0;
717 tu = ktime_to_timespec(time);
718 if (copy_to_user(rmtp, &tu, sizeof(tu)))
719 return -EFAULT;
720 }
723 721
724 restart->fn = rfn_save; 722 restart->fn = nanosleep_restart;
725 723
726 /* The other values in restart are already filled in */ 724 /* The other values in restart are already filled in */
727 return -ERESTART_RESTARTBLOCK; 725 return -ERESTART_RESTARTBLOCK;
@@ -731,33 +729,34 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
731 const enum hrtimer_mode mode, const clockid_t clockid) 729 const enum hrtimer_mode mode, const clockid_t clockid)
732{ 730{
733 struct restart_block *restart; 731 struct restart_block *restart;
734 struct hrtimer timer; 732 struct sleep_hrtimer t;
735 struct timespec tu; 733 struct timespec tu;
736 ktime_t rem; 734 ktime_t rem;
737 735
738 hrtimer_init(&timer, clockid, mode); 736 hrtimer_init(&t.timer, clockid, mode);
739 737 t.timer.expires = timespec_to_ktime(*rqtp);
740 timer.expires = timespec_to_ktime(*rqtp); 738 if (do_nanosleep(&t, mode))
741
742 rem = schedule_hrtimer_interruptible(&timer, mode);
743 if (rem.tv64 <= 0)
744 return 0; 739 return 0;
745 740
746 /* Absolute timers do not update the rmtp value and restart: */ 741 /* Absolute timers do not update the rmtp value and restart: */
747 if (mode == HRTIMER_ABS) 742 if (mode == HRTIMER_ABS)
748 return -ERESTARTNOHAND; 743 return -ERESTARTNOHAND;
749 744
750 tu = ktime_to_timespec(rem); 745 if (rmtp) {
751 746 rem = ktime_sub(t.timer.expires, t.timer.base->get_time());
752 if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu))) 747 if (rem.tv64 <= 0)
753 return -EFAULT; 748 return 0;
749 tu = ktime_to_timespec(rem);
750 if (copy_to_user(rmtp, &tu, sizeof(tu)))
751 return -EFAULT;
752 }
754 753
755 restart = &current_thread_info()->restart_block; 754 restart = &current_thread_info()->restart_block;
756 restart->fn = nanosleep_restart; 755 restart->fn = nanosleep_restart;
757 restart->arg0 = timer.expires.tv64 & 0xFFFFFFFF; 756 restart->arg0 = t.timer.expires.tv64 & 0xFFFFFFFF;
758 restart->arg1 = timer.expires.tv64 >> 32; 757 restart->arg1 = t.timer.expires.tv64 >> 32;
759 restart->arg2 = (unsigned long) rmtp; 758 restart->arg2 = (unsigned long) rmtp;
760 restart->arg3 = (unsigned long) timer.base->index; 759 restart->arg3 = (unsigned long) t.timer.base->index;
761 760
762 return -ERESTART_RESTARTBLOCK; 761 return -ERESTART_RESTARTBLOCK;
763} 762}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 6edfcef291e8..ac766ad573e8 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -271,6 +271,7 @@ void free_irq(unsigned int irq, void *dev_id)
271 struct irqaction **p; 271 struct irqaction **p;
272 unsigned long flags; 272 unsigned long flags;
273 273
274 WARN_ON(in_interrupt());
274 if (irq >= NR_IRQS) 275 if (irq >= NR_IRQS)
275 return; 276 return;
276 277
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 680e6b70c872..204ed7939e75 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -128,16 +128,16 @@ asmlinkage long sys_getitimer(int which, struct itimerval __user *value)
128/* 128/*
129 * The timer is automagically restarted, when interval != 0 129 * The timer is automagically restarted, when interval != 0
130 */ 130 */
131int it_real_fn(void *data) 131int it_real_fn(struct hrtimer *timer)
132{ 132{
133 struct task_struct *tsk = (struct task_struct *) data; 133 struct signal_struct *sig =
134 container_of(timer, struct signal_struct, real_timer);
134 135
135 send_group_sig_info(SIGALRM, SEND_SIG_PRIV, tsk); 136 send_group_sig_info(SIGALRM, SEND_SIG_PRIV, sig->tsk);
136
137 if (tsk->signal->it_real_incr.tv64 != 0) {
138 hrtimer_forward(&tsk->signal->real_timer,
139 tsk->signal->it_real_incr);
140 137
138 if (sig->it_real_incr.tv64 != 0) {
139 hrtimer_forward(timer, timer->base->softirq_time,
140 sig->it_real_incr);
141 return HRTIMER_RESTART; 141 return HRTIMER_RESTART;
142 } 142 }
143 return HRTIMER_NORESTART; 143 return HRTIMER_NORESTART;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1fb9f753ef60..1156eb0977d0 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -323,10 +323,10 @@ struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
323} 323}
324 324
325/* 325/*
326 * This function is called from exit_thread or flush_thread when task tk's 326 * This function is called from finish_task_switch when task tk becomes dead,
327 * stack is being recycled so that we can recycle any function-return probe 327 * so that we can recycle any function-return probe instances associated
328 * instances associated with this task. These left over instances represent 328 * with this task. These left over instances represent probed functions
329 * probed functions that have been called but will never return. 329 * that have been called but will never return.
330 */ 330 */
331void __kprobes kprobe_flush_task(struct task_struct *tk) 331void __kprobes kprobe_flush_task(struct task_struct *tk)
332{ 332{
@@ -336,7 +336,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
336 unsigned long flags = 0; 336 unsigned long flags = 0;
337 337
338 spin_lock_irqsave(&kretprobe_lock, flags); 338 spin_lock_irqsave(&kretprobe_lock, flags);
339 head = kretprobe_inst_table_head(current); 339 head = kretprobe_inst_table_head(tk);
340 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 340 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
341 if (ri->task == tk) 341 if (ri->task == tk)
342 recycle_rp_inst(ri); 342 recycle_rp_inst(ri);
diff --git a/kernel/module.c b/kernel/module.c
index ddfe45ac2fd1..bd088a7c1499 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -64,26 +64,17 @@ static DEFINE_SPINLOCK(modlist_lock);
64static DEFINE_MUTEX(module_mutex); 64static DEFINE_MUTEX(module_mutex);
65static LIST_HEAD(modules); 65static LIST_HEAD(modules);
66 66
67static DEFINE_MUTEX(notify_mutex); 67static BLOCKING_NOTIFIER_HEAD(module_notify_list);
68static struct notifier_block * module_notify_list;
69 68
70int register_module_notifier(struct notifier_block * nb) 69int register_module_notifier(struct notifier_block * nb)
71{ 70{
72 int err; 71 return blocking_notifier_chain_register(&module_notify_list, nb);
73 mutex_lock(&notify_mutex);
74 err = notifier_chain_register(&module_notify_list, nb);
75 mutex_unlock(&notify_mutex);
76 return err;
77} 72}
78EXPORT_SYMBOL(register_module_notifier); 73EXPORT_SYMBOL(register_module_notifier);
79 74
80int unregister_module_notifier(struct notifier_block * nb) 75int unregister_module_notifier(struct notifier_block * nb)
81{ 76{
82 int err; 77 return blocking_notifier_chain_unregister(&module_notify_list, nb);
83 mutex_lock(&notify_mutex);
84 err = notifier_chain_unregister(&module_notify_list, nb);
85 mutex_unlock(&notify_mutex);
86 return err;
87} 78}
88EXPORT_SYMBOL(unregister_module_notifier); 79EXPORT_SYMBOL(unregister_module_notifier);
89 80
@@ -136,7 +127,7 @@ extern const unsigned long __start___kcrctab_gpl_future[];
136#ifndef CONFIG_MODVERSIONS 127#ifndef CONFIG_MODVERSIONS
137#define symversion(base, idx) NULL 128#define symversion(base, idx) NULL
138#else 129#else
139#define symversion(base, idx) ((base) ? ((base) + (idx)) : NULL) 130#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
140#endif 131#endif
141 132
142/* lookup symbol in given range of kernel_symbols */ 133/* lookup symbol in given range of kernel_symbols */
@@ -1816,9 +1807,8 @@ sys_init_module(void __user *umod,
1816 /* Drop lock so they can recurse */ 1807 /* Drop lock so they can recurse */
1817 mutex_unlock(&module_mutex); 1808 mutex_unlock(&module_mutex);
1818 1809
1819 mutex_lock(&notify_mutex); 1810 blocking_notifier_call_chain(&module_notify_list,
1820 notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod); 1811 MODULE_STATE_COMING, mod);
1821 mutex_unlock(&notify_mutex);
1822 1812
1823 /* Start the module */ 1813 /* Start the module */
1824 if (mod->init != NULL) 1814 if (mod->init != NULL)
diff --git a/kernel/panic.c b/kernel/panic.c
index acd95adddb93..f895c7c01d5b 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -29,7 +29,7 @@ static DEFINE_SPINLOCK(pause_on_oops_lock);
29int panic_timeout; 29int panic_timeout;
30EXPORT_SYMBOL(panic_timeout); 30EXPORT_SYMBOL(panic_timeout);
31 31
32struct notifier_block *panic_notifier_list; 32ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
33 33
34EXPORT_SYMBOL(panic_notifier_list); 34EXPORT_SYMBOL(panic_notifier_list);
35 35
@@ -97,7 +97,7 @@ NORET_TYPE void panic(const char * fmt, ...)
97 smp_send_stop(); 97 smp_send_stop();
98#endif 98#endif
99 99
100 notifier_call_chain(&panic_notifier_list, 0, buf); 100 atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
101 101
102 if (!panic_blink) 102 if (!panic_blink)
103 panic_blink = no_blink; 103 panic_blink = no_blink;
diff --git a/kernel/params.c b/kernel/params.c
index 9de637a5c8bc..af43ecdc8d9b 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -31,7 +31,7 @@
31#define DEBUGP(fmt, a...) 31#define DEBUGP(fmt, a...)
32#endif 32#endif
33 33
34static inline int dash2underscore(char c) 34static inline char dash2underscore(char c)
35{ 35{
36 if (c == '-') 36 if (c == '-')
37 return '_'; 37 return '_';
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 9944379360b5..ac6dc8744429 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -145,7 +145,7 @@ static int common_timer_set(struct k_itimer *, int,
145 struct itimerspec *, struct itimerspec *); 145 struct itimerspec *, struct itimerspec *);
146static int common_timer_del(struct k_itimer *timer); 146static int common_timer_del(struct k_itimer *timer);
147 147
148static int posix_timer_fn(void *data); 148static int posix_timer_fn(struct hrtimer *data);
149 149
150static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags); 150static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags);
151 151
@@ -251,15 +251,18 @@ __initcall(init_posix_timers);
251 251
252static void schedule_next_timer(struct k_itimer *timr) 252static void schedule_next_timer(struct k_itimer *timr)
253{ 253{
254 struct hrtimer *timer = &timr->it.real.timer;
255
254 if (timr->it.real.interval.tv64 == 0) 256 if (timr->it.real.interval.tv64 == 0)
255 return; 257 return;
256 258
257 timr->it_overrun += hrtimer_forward(&timr->it.real.timer, 259 timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
258 timr->it.real.interval); 260 timr->it.real.interval);
261
259 timr->it_overrun_last = timr->it_overrun; 262 timr->it_overrun_last = timr->it_overrun;
260 timr->it_overrun = -1; 263 timr->it_overrun = -1;
261 ++timr->it_requeue_pending; 264 ++timr->it_requeue_pending;
262 hrtimer_restart(&timr->it.real.timer); 265 hrtimer_restart(timer);
263} 266}
264 267
265/* 268/*
@@ -331,13 +334,14 @@ EXPORT_SYMBOL_GPL(posix_timer_event);
331 334
332 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers. 335 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
333 */ 336 */
334static int posix_timer_fn(void *data) 337static int posix_timer_fn(struct hrtimer *timer)
335{ 338{
336 struct k_itimer *timr = data; 339 struct k_itimer *timr;
337 unsigned long flags; 340 unsigned long flags;
338 int si_private = 0; 341 int si_private = 0;
339 int ret = HRTIMER_NORESTART; 342 int ret = HRTIMER_NORESTART;
340 343
344 timr = container_of(timer, struct k_itimer, it.real.timer);
341 spin_lock_irqsave(&timr->it_lock, flags); 345 spin_lock_irqsave(&timr->it_lock, flags);
342 346
343 if (timr->it.real.interval.tv64 != 0) 347 if (timr->it.real.interval.tv64 != 0)
@@ -351,7 +355,8 @@ static int posix_timer_fn(void *data)
351 */ 355 */
352 if (timr->it.real.interval.tv64 != 0) { 356 if (timr->it.real.interval.tv64 != 0) {
353 timr->it_overrun += 357 timr->it_overrun +=
354 hrtimer_forward(&timr->it.real.timer, 358 hrtimer_forward(timer,
359 timer->base->softirq_time,
355 timr->it.real.interval); 360 timr->it.real.interval);
356 ret = HRTIMER_RESTART; 361 ret = HRTIMER_RESTART;
357 ++timr->it_requeue_pending; 362 ++timr->it_requeue_pending;
@@ -603,38 +608,41 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
603static void 608static void
604common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) 609common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
605{ 610{
606 ktime_t remaining; 611 ktime_t now, remaining, iv;
607 struct hrtimer *timer = &timr->it.real.timer; 612 struct hrtimer *timer = &timr->it.real.timer;
608 613
609 memset(cur_setting, 0, sizeof(struct itimerspec)); 614 memset(cur_setting, 0, sizeof(struct itimerspec));
610 remaining = hrtimer_get_remaining(timer);
611 615
612 /* Time left ? or timer pending */ 616 iv = timr->it.real.interval;
613 if (remaining.tv64 > 0 || hrtimer_active(timer)) 617
614 goto calci;
615 /* interval timer ? */ 618 /* interval timer ? */
616 if (timr->it.real.interval.tv64 == 0) 619 if (iv.tv64)
620 cur_setting->it_interval = ktime_to_timespec(iv);
621 else if (!hrtimer_active(timer) &&
622 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
617 return; 623 return;
624
625 now = timer->base->get_time();
626
618 /* 627 /*
619 * When a requeue is pending or this is a SIGEV_NONE timer 628 * When a requeue is pending or this is a SIGEV_NONE
620 * move the expiry time forward by intervals, so expiry is > 629 * timer move the expiry time forward by intervals, so
621 * now. 630 * expiry is > now.
622 */ 631 */
623 if (timr->it_requeue_pending & REQUEUE_PENDING || 632 if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
624 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { 633 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
625 timr->it_overrun += 634 timr->it_overrun += hrtimer_forward(timer, now, iv);
626 hrtimer_forward(timer, timr->it.real.interval); 635
627 remaining = hrtimer_get_remaining(timer); 636 remaining = ktime_sub(timer->expires, now);
628 }
629 calci:
630 /* interval timer ? */
631 if (timr->it.real.interval.tv64 != 0)
632 cur_setting->it_interval =
633 ktime_to_timespec(timr->it.real.interval);
634 /* Return 0 only, when the timer is expired and not pending */ 637 /* Return 0 only, when the timer is expired and not pending */
635 if (remaining.tv64 <= 0) 638 if (remaining.tv64 <= 0) {
636 cur_setting->it_value.tv_nsec = 1; 639 /*
637 else 640 * A single shot SIGEV_NONE timer must return 0, when
641 * it is expired !
642 */
643 if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
644 cur_setting->it_value.tv_nsec = 1;
645 } else
638 cur_setting->it_value = ktime_to_timespec(remaining); 646 cur_setting->it_value = ktime_to_timespec(remaining);
639} 647}
640 648
@@ -717,7 +725,6 @@ common_timer_set(struct k_itimer *timr, int flags,
717 725
718 mode = flags & TIMER_ABSTIME ? HRTIMER_ABS : HRTIMER_REL; 726 mode = flags & TIMER_ABSTIME ? HRTIMER_ABS : HRTIMER_REL;
719 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); 727 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
720 timr->it.real.timer.data = timr;
721 timr->it.real.timer.function = posix_timer_fn; 728 timr->it.real.timer.function = posix_timer_fn;
722 729
723 timer->expires = timespec_to_ktime(new_setting->it_value); 730 timer->expires = timespec_to_ktime(new_setting->it_value);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 9177f3f73a6c..044b8e0c1025 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -454,10 +454,11 @@ static int load_image(struct swap_map_handle *handle,
454 nr_pages++; 454 nr_pages++;
455 } 455 }
456 } while (ret > 0); 456 } while (ret > 0);
457 if (!error) 457 if (!error) {
458 printk("\b\b\b\bdone\n"); 458 printk("\b\b\b\bdone\n");
459 if (!snapshot_image_loaded(snapshot)) 459 if (!snapshot_image_loaded(snapshot))
460 error = -ENODATA; 460 error = -ENODATA;
461 }
461 return error; 462 return error;
462} 463}
463 464
diff --git a/kernel/profile.c b/kernel/profile.c
index ad81f799a9b4..5a730fdb1a2c 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -87,72 +87,52 @@ void __init profile_init(void)
87 87
88#ifdef CONFIG_PROFILING 88#ifdef CONFIG_PROFILING
89 89
90static DECLARE_RWSEM(profile_rwsem); 90static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
91static DEFINE_RWLOCK(handoff_lock); 91static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
92static struct notifier_block * task_exit_notifier; 92static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
93static struct notifier_block * task_free_notifier;
94static struct notifier_block * munmap_notifier;
95 93
96void profile_task_exit(struct task_struct * task) 94void profile_task_exit(struct task_struct * task)
97{ 95{
98 down_read(&profile_rwsem); 96 blocking_notifier_call_chain(&task_exit_notifier, 0, task);
99 notifier_call_chain(&task_exit_notifier, 0, task);
100 up_read(&profile_rwsem);
101} 97}
102 98
103int profile_handoff_task(struct task_struct * task) 99int profile_handoff_task(struct task_struct * task)
104{ 100{
105 int ret; 101 int ret;
106 read_lock(&handoff_lock); 102 ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
107 ret = notifier_call_chain(&task_free_notifier, 0, task);
108 read_unlock(&handoff_lock);
109 return (ret == NOTIFY_OK) ? 1 : 0; 103 return (ret == NOTIFY_OK) ? 1 : 0;
110} 104}
111 105
112void profile_munmap(unsigned long addr) 106void profile_munmap(unsigned long addr)
113{ 107{
114 down_read(&profile_rwsem); 108 blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
115 notifier_call_chain(&munmap_notifier, 0, (void *)addr);
116 up_read(&profile_rwsem);
117} 109}
118 110
119int task_handoff_register(struct notifier_block * n) 111int task_handoff_register(struct notifier_block * n)
120{ 112{
121 int err = -EINVAL; 113 return atomic_notifier_chain_register(&task_free_notifier, n);
122
123 write_lock(&handoff_lock);
124 err = notifier_chain_register(&task_free_notifier, n);
125 write_unlock(&handoff_lock);
126 return err;
127} 114}
128 115
129int task_handoff_unregister(struct notifier_block * n) 116int task_handoff_unregister(struct notifier_block * n)
130{ 117{
131 int err = -EINVAL; 118 return atomic_notifier_chain_unregister(&task_free_notifier, n);
132
133 write_lock(&handoff_lock);
134 err = notifier_chain_unregister(&task_free_notifier, n);
135 write_unlock(&handoff_lock);
136 return err;
137} 119}
138 120
139int profile_event_register(enum profile_type type, struct notifier_block * n) 121int profile_event_register(enum profile_type type, struct notifier_block * n)
140{ 122{
141 int err = -EINVAL; 123 int err = -EINVAL;
142 124
143 down_write(&profile_rwsem);
144
145 switch (type) { 125 switch (type) {
146 case PROFILE_TASK_EXIT: 126 case PROFILE_TASK_EXIT:
147 err = notifier_chain_register(&task_exit_notifier, n); 127 err = blocking_notifier_chain_register(
128 &task_exit_notifier, n);
148 break; 129 break;
149 case PROFILE_MUNMAP: 130 case PROFILE_MUNMAP:
150 err = notifier_chain_register(&munmap_notifier, n); 131 err = blocking_notifier_chain_register(
132 &munmap_notifier, n);
151 break; 133 break;
152 } 134 }
153 135
154 up_write(&profile_rwsem);
155
156 return err; 136 return err;
157} 137}
158 138
@@ -161,18 +141,17 @@ int profile_event_unregister(enum profile_type type, struct notifier_block * n)
161{ 141{
162 int err = -EINVAL; 142 int err = -EINVAL;
163 143
164 down_write(&profile_rwsem);
165
166 switch (type) { 144 switch (type) {
167 case PROFILE_TASK_EXIT: 145 case PROFILE_TASK_EXIT:
168 err = notifier_chain_unregister(&task_exit_notifier, n); 146 err = blocking_notifier_chain_unregister(
147 &task_exit_notifier, n);
169 break; 148 break;
170 case PROFILE_MUNMAP: 149 case PROFILE_MUNMAP:
171 err = notifier_chain_unregister(&munmap_notifier, n); 150 err = blocking_notifier_chain_unregister(
151 &munmap_notifier, n);
172 break; 152 break;
173 } 153 }
174 154
175 up_write(&profile_rwsem);
176 return err; 155 return err;
177} 156}
178 157
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index b4b362b5baf5..8154e7589d12 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -301,7 +301,7 @@ rcu_torture_printk(char *page)
301 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 301 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
302 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 302 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
303 303
304 for_each_cpu(cpu) { 304 for_each_possible_cpu(cpu) {
305 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 305 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
306 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; 306 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
307 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; 307 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
@@ -535,7 +535,7 @@ rcu_torture_init(void)
535 atomic_set(&n_rcu_torture_error, 0); 535 atomic_set(&n_rcu_torture_error, 0);
536 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 536 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
537 atomic_set(&rcu_torture_wcount[i], 0); 537 atomic_set(&rcu_torture_wcount[i], 0);
538 for_each_cpu(cpu) { 538 for_each_possible_cpu(cpu) {
539 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 539 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
540 per_cpu(rcu_torture_count, cpu)[i] = 0; 540 per_cpu(rcu_torture_count, cpu)[i] = 0;
541 per_cpu(rcu_torture_batch, cpu)[i] = 0; 541 per_cpu(rcu_torture_batch, cpu)[i] = 0;
diff --git a/kernel/sched.c b/kernel/sched.c
index 7ffaabd64f89..a9ecac398bb9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -49,6 +49,7 @@
49#include <linux/syscalls.h> 49#include <linux/syscalls.h>
50#include <linux/times.h> 50#include <linux/times.h>
51#include <linux/acct.h> 51#include <linux/acct.h>
52#include <linux/kprobes.h>
52#include <asm/tlb.h> 53#include <asm/tlb.h>
53 54
54#include <asm/unistd.h> 55#include <asm/unistd.h>
@@ -144,7 +145,8 @@
144 (v1) * (v2_max) / (v1_max) 145 (v1) * (v2_max) / (v1_max)
145 146
146#define DELTA(p) \ 147#define DELTA(p) \
147 (SCALE(TASK_NICE(p), 40, MAX_BONUS) + INTERACTIVE_DELTA) 148 (SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \
149 INTERACTIVE_DELTA)
148 150
149#define TASK_INTERACTIVE(p) \ 151#define TASK_INTERACTIVE(p) \
150 ((p)->prio <= (p)->static_prio - DELTA(p)) 152 ((p)->prio <= (p)->static_prio - DELTA(p))
@@ -1546,8 +1548,14 @@ static inline void finish_task_switch(runqueue_t *rq, task_t *prev)
1546 finish_lock_switch(rq, prev); 1548 finish_lock_switch(rq, prev);
1547 if (mm) 1549 if (mm)
1548 mmdrop(mm); 1550 mmdrop(mm);
1549 if (unlikely(prev_task_flags & PF_DEAD)) 1551 if (unlikely(prev_task_flags & PF_DEAD)) {
1552 /*
1553 * Remove function-return probe instances associated with this
1554 * task and put them back on the free list.
1555 */
1556 kprobe_flush_task(prev);
1550 put_task_struct(prev); 1557 put_task_struct(prev);
1558 }
1551} 1559}
1552 1560
1553/** 1561/**
@@ -1617,7 +1625,7 @@ unsigned long nr_uninterruptible(void)
1617{ 1625{
1618 unsigned long i, sum = 0; 1626 unsigned long i, sum = 0;
1619 1627
1620 for_each_cpu(i) 1628 for_each_possible_cpu(i)
1621 sum += cpu_rq(i)->nr_uninterruptible; 1629 sum += cpu_rq(i)->nr_uninterruptible;
1622 1630
1623 /* 1631 /*
@@ -1634,7 +1642,7 @@ unsigned long long nr_context_switches(void)
1634{ 1642{
1635 unsigned long long i, sum = 0; 1643 unsigned long long i, sum = 0;
1636 1644
1637 for_each_cpu(i) 1645 for_each_possible_cpu(i)
1638 sum += cpu_rq(i)->nr_switches; 1646 sum += cpu_rq(i)->nr_switches;
1639 1647
1640 return sum; 1648 return sum;
@@ -1644,7 +1652,7 @@ unsigned long nr_iowait(void)
1644{ 1652{
1645 unsigned long i, sum = 0; 1653 unsigned long i, sum = 0;
1646 1654
1647 for_each_cpu(i) 1655 for_each_possible_cpu(i)
1648 sum += atomic_read(&cpu_rq(i)->nr_iowait); 1656 sum += atomic_read(&cpu_rq(i)->nr_iowait);
1649 1657
1650 return sum; 1658 return sum;
@@ -2871,13 +2879,11 @@ asmlinkage void __sched schedule(void)
2871 * schedule() atomically, we ignore that path for now. 2879 * schedule() atomically, we ignore that path for now.
2872 * Otherwise, whine if we are scheduling when we should not be. 2880 * Otherwise, whine if we are scheduling when we should not be.
2873 */ 2881 */
2874 if (likely(!current->exit_state)) { 2882 if (unlikely(in_atomic() && !current->exit_state)) {
2875 if (unlikely(in_atomic())) { 2883 printk(KERN_ERR "BUG: scheduling while atomic: "
2876 printk(KERN_ERR "BUG: scheduling while atomic: " 2884 "%s/0x%08x/%d\n",
2877 "%s/0x%08x/%d\n", 2885 current->comm, preempt_count(), current->pid);
2878 current->comm, preempt_count(), current->pid); 2886 dump_stack();
2879 dump_stack();
2880 }
2881 } 2887 }
2882 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 2888 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
2883 2889
@@ -5568,11 +5574,31 @@ static int cpu_to_cpu_group(int cpu)
5568} 5574}
5569#endif 5575#endif
5570 5576
5577#ifdef CONFIG_SCHED_MC
5578static DEFINE_PER_CPU(struct sched_domain, core_domains);
5579static struct sched_group sched_group_core[NR_CPUS];
5580#endif
5581
5582#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
5583static int cpu_to_core_group(int cpu)
5584{
5585 return first_cpu(cpu_sibling_map[cpu]);
5586}
5587#elif defined(CONFIG_SCHED_MC)
5588static int cpu_to_core_group(int cpu)
5589{
5590 return cpu;
5591}
5592#endif
5593
5571static DEFINE_PER_CPU(struct sched_domain, phys_domains); 5594static DEFINE_PER_CPU(struct sched_domain, phys_domains);
5572static struct sched_group sched_group_phys[NR_CPUS]; 5595static struct sched_group sched_group_phys[NR_CPUS];
5573static int cpu_to_phys_group(int cpu) 5596static int cpu_to_phys_group(int cpu)
5574{ 5597{
5575#ifdef CONFIG_SCHED_SMT 5598#if defined(CONFIG_SCHED_MC)
5599 cpumask_t mask = cpu_coregroup_map(cpu);
5600 return first_cpu(mask);
5601#elif defined(CONFIG_SCHED_SMT)
5576 return first_cpu(cpu_sibling_map[cpu]); 5602 return first_cpu(cpu_sibling_map[cpu]);
5577#else 5603#else
5578 return cpu; 5604 return cpu;
@@ -5595,6 +5621,32 @@ static int cpu_to_allnodes_group(int cpu)
5595{ 5621{
5596 return cpu_to_node(cpu); 5622 return cpu_to_node(cpu);
5597} 5623}
5624static void init_numa_sched_groups_power(struct sched_group *group_head)
5625{
5626 struct sched_group *sg = group_head;
5627 int j;
5628
5629 if (!sg)
5630 return;
5631next_sg:
5632 for_each_cpu_mask(j, sg->cpumask) {
5633 struct sched_domain *sd;
5634
5635 sd = &per_cpu(phys_domains, j);
5636 if (j != first_cpu(sd->groups->cpumask)) {
5637 /*
5638 * Only add "power" once for each
5639 * physical package.
5640 */
5641 continue;
5642 }
5643
5644 sg->cpu_power += sd->groups->cpu_power;
5645 }
5646 sg = sg->next;
5647 if (sg != group_head)
5648 goto next_sg;
5649}
5598#endif 5650#endif
5599 5651
5600/* 5652/*
@@ -5670,6 +5722,17 @@ void build_sched_domains(const cpumask_t *cpu_map)
5670 sd->parent = p; 5722 sd->parent = p;
5671 sd->groups = &sched_group_phys[group]; 5723 sd->groups = &sched_group_phys[group];
5672 5724
5725#ifdef CONFIG_SCHED_MC
5726 p = sd;
5727 sd = &per_cpu(core_domains, i);
5728 group = cpu_to_core_group(i);
5729 *sd = SD_MC_INIT;
5730 sd->span = cpu_coregroup_map(i);
5731 cpus_and(sd->span, sd->span, *cpu_map);
5732 sd->parent = p;
5733 sd->groups = &sched_group_core[group];
5734#endif
5735
5673#ifdef CONFIG_SCHED_SMT 5736#ifdef CONFIG_SCHED_SMT
5674 p = sd; 5737 p = sd;
5675 sd = &per_cpu(cpu_domains, i); 5738 sd = &per_cpu(cpu_domains, i);
@@ -5695,6 +5758,19 @@ void build_sched_domains(const cpumask_t *cpu_map)
5695 } 5758 }
5696#endif 5759#endif
5697 5760
5761#ifdef CONFIG_SCHED_MC
5762 /* Set up multi-core groups */
5763 for_each_cpu_mask(i, *cpu_map) {
5764 cpumask_t this_core_map = cpu_coregroup_map(i);
5765 cpus_and(this_core_map, this_core_map, *cpu_map);
5766 if (i != first_cpu(this_core_map))
5767 continue;
5768 init_sched_build_groups(sched_group_core, this_core_map,
5769 &cpu_to_core_group);
5770 }
5771#endif
5772
5773
5698 /* Set up physical groups */ 5774 /* Set up physical groups */
5699 for (i = 0; i < MAX_NUMNODES; i++) { 5775 for (i = 0; i < MAX_NUMNODES; i++) {
5700 cpumask_t nodemask = node_to_cpumask(i); 5776 cpumask_t nodemask = node_to_cpumask(i);
@@ -5791,51 +5867,38 @@ void build_sched_domains(const cpumask_t *cpu_map)
5791 power = SCHED_LOAD_SCALE; 5867 power = SCHED_LOAD_SCALE;
5792 sd->groups->cpu_power = power; 5868 sd->groups->cpu_power = power;
5793#endif 5869#endif
5870#ifdef CONFIG_SCHED_MC
5871 sd = &per_cpu(core_domains, i);
5872 power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1)
5873 * SCHED_LOAD_SCALE / 10;
5874 sd->groups->cpu_power = power;
5875
5876 sd = &per_cpu(phys_domains, i);
5794 5877
5878 /*
5879 * This has to be < 2 * SCHED_LOAD_SCALE
5880 * Lets keep it SCHED_LOAD_SCALE, so that
5881 * while calculating NUMA group's cpu_power
5882 * we can simply do
5883 * numa_group->cpu_power += phys_group->cpu_power;
5884 *
5885 * See "only add power once for each physical pkg"
5886 * comment below
5887 */
5888 sd->groups->cpu_power = SCHED_LOAD_SCALE;
5889#else
5795 sd = &per_cpu(phys_domains, i); 5890 sd = &per_cpu(phys_domains, i);
5796 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * 5891 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
5797 (cpus_weight(sd->groups->cpumask)-1) / 10; 5892 (cpus_weight(sd->groups->cpumask)-1) / 10;
5798 sd->groups->cpu_power = power; 5893 sd->groups->cpu_power = power;
5799
5800#ifdef CONFIG_NUMA
5801 sd = &per_cpu(allnodes_domains, i);
5802 if (sd->groups) {
5803 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
5804 (cpus_weight(sd->groups->cpumask)-1) / 10;
5805 sd->groups->cpu_power = power;
5806 }
5807#endif 5894#endif
5808 } 5895 }
5809 5896
5810#ifdef CONFIG_NUMA 5897#ifdef CONFIG_NUMA
5811 for (i = 0; i < MAX_NUMNODES; i++) { 5898 for (i = 0; i < MAX_NUMNODES; i++)
5812 struct sched_group *sg = sched_group_nodes[i]; 5899 init_numa_sched_groups_power(sched_group_nodes[i]);
5813 int j;
5814
5815 if (sg == NULL)
5816 continue;
5817next_sg:
5818 for_each_cpu_mask(j, sg->cpumask) {
5819 struct sched_domain *sd;
5820 int power;
5821 5900
5822 sd = &per_cpu(phys_domains, j); 5901 init_numa_sched_groups_power(sched_group_allnodes);
5823 if (j != first_cpu(sd->groups->cpumask)) {
5824 /*
5825 * Only add "power" once for each
5826 * physical package.
5827 */
5828 continue;
5829 }
5830 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
5831 (cpus_weight(sd->groups->cpumask)-1) / 10;
5832
5833 sg->cpu_power += power;
5834 }
5835 sg = sg->next;
5836 if (sg != sched_group_nodes[i])
5837 goto next_sg;
5838 }
5839#endif 5902#endif
5840 5903
5841 /* Attach the domains */ 5904 /* Attach the domains */
@@ -5843,6 +5906,8 @@ next_sg:
5843 struct sched_domain *sd; 5906 struct sched_domain *sd;
5844#ifdef CONFIG_SCHED_SMT 5907#ifdef CONFIG_SCHED_SMT
5845 sd = &per_cpu(cpu_domains, i); 5908 sd = &per_cpu(cpu_domains, i);
5909#elif defined(CONFIG_SCHED_MC)
5910 sd = &per_cpu(core_domains, i);
5846#else 5911#else
5847 sd = &per_cpu(phys_domains, i); 5912 sd = &per_cpu(phys_domains, i);
5848#endif 5913#endif
@@ -6015,7 +6080,7 @@ void __init sched_init(void)
6015 runqueue_t *rq; 6080 runqueue_t *rq;
6016 int i, j, k; 6081 int i, j, k;
6017 6082
6018 for_each_cpu(i) { 6083 for_each_possible_cpu(i) {
6019 prio_array_t *array; 6084 prio_array_t *array;
6020 6085
6021 rq = cpu_rq(i); 6086 rq = cpu_rq(i);
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index d9b3d5847ed8..ced91e1ff564 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -152,5 +152,5 @@ __init void spawn_softlockup_task(void)
152 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); 152 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
153 register_cpu_notifier(&cpu_nfb); 153 register_cpu_notifier(&cpu_nfb);
154 154
155 notifier_chain_register(&panic_notifier_list, &panic_block); 155 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
156} 156}
diff --git a/kernel/sys.c b/kernel/sys.c
index 38bc73ede2ba..c93d37f71aef 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -95,99 +95,304 @@ int cad_pid = 1;
95 * and the like. 95 * and the like.
96 */ 96 */
97 97
98static struct notifier_block *reboot_notifier_list; 98static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
99static DEFINE_RWLOCK(notifier_lock); 99
100/*
101 * Notifier chain core routines. The exported routines below
102 * are layered on top of these, with appropriate locking added.
103 */
104
105static int notifier_chain_register(struct notifier_block **nl,
106 struct notifier_block *n)
107{
108 while ((*nl) != NULL) {
109 if (n->priority > (*nl)->priority)
110 break;
111 nl = &((*nl)->next);
112 }
113 n->next = *nl;
114 rcu_assign_pointer(*nl, n);
115 return 0;
116}
117
118static int notifier_chain_unregister(struct notifier_block **nl,
119 struct notifier_block *n)
120{
121 while ((*nl) != NULL) {
122 if ((*nl) == n) {
123 rcu_assign_pointer(*nl, n->next);
124 return 0;
125 }
126 nl = &((*nl)->next);
127 }
128 return -ENOENT;
129}
130
131static int __kprobes notifier_call_chain(struct notifier_block **nl,
132 unsigned long val, void *v)
133{
134 int ret = NOTIFY_DONE;
135 struct notifier_block *nb;
136
137 nb = rcu_dereference(*nl);
138 while (nb) {
139 ret = nb->notifier_call(nb, val, v);
140 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
141 break;
142 nb = rcu_dereference(nb->next);
143 }
144 return ret;
145}
146
147/*
148 * Atomic notifier chain routines. Registration and unregistration
149 * use a mutex, and call_chain is synchronized by RCU (no locks).
150 */
100 151
101/** 152/**
102 * notifier_chain_register - Add notifier to a notifier chain 153 * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
103 * @list: Pointer to root list pointer 154 * @nh: Pointer to head of the atomic notifier chain
104 * @n: New entry in notifier chain 155 * @n: New entry in notifier chain
105 * 156 *
106 * Adds a notifier to a notifier chain. 157 * Adds a notifier to an atomic notifier chain.
107 * 158 *
108 * Currently always returns zero. 159 * Currently always returns zero.
109 */ 160 */
161
162int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
163 struct notifier_block *n)
164{
165 unsigned long flags;
166 int ret;
167
168 spin_lock_irqsave(&nh->lock, flags);
169 ret = notifier_chain_register(&nh->head, n);
170 spin_unlock_irqrestore(&nh->lock, flags);
171 return ret;
172}
173
174EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
175
176/**
177 * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
178 * @nh: Pointer to head of the atomic notifier chain
179 * @n: Entry to remove from notifier chain
180 *
181 * Removes a notifier from an atomic notifier chain.
182 *
183 * Returns zero on success or %-ENOENT on failure.
184 */
185int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
186 struct notifier_block *n)
187{
188 unsigned long flags;
189 int ret;
190
191 spin_lock_irqsave(&nh->lock, flags);
192 ret = notifier_chain_unregister(&nh->head, n);
193 spin_unlock_irqrestore(&nh->lock, flags);
194 synchronize_rcu();
195 return ret;
196}
197
198EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
199
200/**
201 * atomic_notifier_call_chain - Call functions in an atomic notifier chain
202 * @nh: Pointer to head of the atomic notifier chain
203 * @val: Value passed unmodified to notifier function
204 * @v: Pointer passed unmodified to notifier function
205 *
206 * Calls each function in a notifier chain in turn. The functions
207 * run in an atomic context, so they must not block.
208 * This routine uses RCU to synchronize with changes to the chain.
209 *
210 * If the return value of the notifier can be and'ed
211 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain
212 * will return immediately, with the return value of
213 * the notifier function which halted execution.
214 * Otherwise the return value is the return value
215 * of the last notifier function called.
216 */
110 217
111int notifier_chain_register(struct notifier_block **list, struct notifier_block *n) 218int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
219 unsigned long val, void *v)
112{ 220{
113 write_lock(&notifier_lock); 221 int ret;
114 while(*list) 222
115 { 223 rcu_read_lock();
116 if(n->priority > (*list)->priority) 224 ret = notifier_call_chain(&nh->head, val, v);
117 break; 225 rcu_read_unlock();
118 list= &((*list)->next); 226 return ret;
119 }
120 n->next = *list;
121 *list=n;
122 write_unlock(&notifier_lock);
123 return 0;
124} 227}
125 228
126EXPORT_SYMBOL(notifier_chain_register); 229EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
230
231/*
232 * Blocking notifier chain routines. All access to the chain is
233 * synchronized by an rwsem.
234 */
127 235
128/** 236/**
129 * notifier_chain_unregister - Remove notifier from a notifier chain 237 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
130 * @nl: Pointer to root list pointer 238 * @nh: Pointer to head of the blocking notifier chain
131 * @n: New entry in notifier chain 239 * @n: New entry in notifier chain
132 * 240 *
133 * Removes a notifier from a notifier chain. 241 * Adds a notifier to a blocking notifier chain.
242 * Must be called in process context.
134 * 243 *
135 * Returns zero on success, or %-ENOENT on failure. 244 * Currently always returns zero.
136 */ 245 */
137 246
138int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n) 247int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
248 struct notifier_block *n)
139{ 249{
140 write_lock(&notifier_lock); 250 int ret;
141 while((*nl)!=NULL) 251
142 { 252 /*
143 if((*nl)==n) 253 * This code gets used during boot-up, when task switching is
144 { 254 * not yet working and interrupts must remain disabled. At
145 *nl=n->next; 255 * such times we must not call down_write().
146 write_unlock(&notifier_lock); 256 */
147 return 0; 257 if (unlikely(system_state == SYSTEM_BOOTING))
148 } 258 return notifier_chain_register(&nh->head, n);
149 nl=&((*nl)->next); 259
150 } 260 down_write(&nh->rwsem);
151 write_unlock(&notifier_lock); 261 ret = notifier_chain_register(&nh->head, n);
152 return -ENOENT; 262 up_write(&nh->rwsem);
263 return ret;
153} 264}
154 265
155EXPORT_SYMBOL(notifier_chain_unregister); 266EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
156 267
157/** 268/**
158 * notifier_call_chain - Call functions in a notifier chain 269 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
159 * @n: Pointer to root pointer of notifier chain 270 * @nh: Pointer to head of the blocking notifier chain
271 * @n: Entry to remove from notifier chain
272 *
273 * Removes a notifier from a blocking notifier chain.
274 * Must be called from process context.
275 *
276 * Returns zero on success or %-ENOENT on failure.
277 */
278int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
279 struct notifier_block *n)
280{
281 int ret;
282
283 /*
284 * This code gets used during boot-up, when task switching is
285 * not yet working and interrupts must remain disabled. At
286 * such times we must not call down_write().
287 */
288 if (unlikely(system_state == SYSTEM_BOOTING))
289 return notifier_chain_unregister(&nh->head, n);
290
291 down_write(&nh->rwsem);
292 ret = notifier_chain_unregister(&nh->head, n);
293 up_write(&nh->rwsem);
294 return ret;
295}
296
297EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
298
299/**
300 * blocking_notifier_call_chain - Call functions in a blocking notifier chain
301 * @nh: Pointer to head of the blocking notifier chain
160 * @val: Value passed unmodified to notifier function 302 * @val: Value passed unmodified to notifier function
161 * @v: Pointer passed unmodified to notifier function 303 * @v: Pointer passed unmodified to notifier function
162 * 304 *
163 * Calls each function in a notifier chain in turn. 305 * Calls each function in a notifier chain in turn. The functions
306 * run in a process context, so they are allowed to block.
164 * 307 *
165 * If the return value of the notifier can be and'd 308 * If the return value of the notifier can be and'ed
166 * with %NOTIFY_STOP_MASK, then notifier_call_chain 309 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain
167 * will return immediately, with the return value of 310 * will return immediately, with the return value of
168 * the notifier function which halted execution. 311 * the notifier function which halted execution.
169 * Otherwise, the return value is the return value 312 * Otherwise the return value is the return value
170 * of the last notifier function called. 313 * of the last notifier function called.
171 */ 314 */
172 315
173int __kprobes notifier_call_chain(struct notifier_block **n, unsigned long val, void *v) 316int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
317 unsigned long val, void *v)
174{ 318{
175 int ret=NOTIFY_DONE; 319 int ret;
176 struct notifier_block *nb = *n;
177 320
178 while(nb) 321 down_read(&nh->rwsem);
179 { 322 ret = notifier_call_chain(&nh->head, val, v);
180 ret=nb->notifier_call(nb,val,v); 323 up_read(&nh->rwsem);
181 if(ret&NOTIFY_STOP_MASK)
182 {
183 return ret;
184 }
185 nb=nb->next;
186 }
187 return ret; 324 return ret;
188} 325}
189 326
190EXPORT_SYMBOL(notifier_call_chain); 327EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
328
329/*
330 * Raw notifier chain routines. There is no protection;
331 * the caller must provide it. Use at your own risk!
332 */
333
334/**
335 * raw_notifier_chain_register - Add notifier to a raw notifier chain
336 * @nh: Pointer to head of the raw notifier chain
337 * @n: New entry in notifier chain
338 *
339 * Adds a notifier to a raw notifier chain.
340 * All locking must be provided by the caller.
341 *
342 * Currently always returns zero.
343 */
344
345int raw_notifier_chain_register(struct raw_notifier_head *nh,
346 struct notifier_block *n)
347{
348 return notifier_chain_register(&nh->head, n);
349}
350
351EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
352
353/**
354 * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
355 * @nh: Pointer to head of the raw notifier chain
356 * @n: Entry to remove from notifier chain
357 *
358 * Removes a notifier from a raw notifier chain.
359 * All locking must be provided by the caller.
360 *
361 * Returns zero on success or %-ENOENT on failure.
362 */
363int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
364 struct notifier_block *n)
365{
366 return notifier_chain_unregister(&nh->head, n);
367}
368
369EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
370
371/**
372 * raw_notifier_call_chain - Call functions in a raw notifier chain
373 * @nh: Pointer to head of the raw notifier chain
374 * @val: Value passed unmodified to notifier function
375 * @v: Pointer passed unmodified to notifier function
376 *
377 * Calls each function in a notifier chain in turn. The functions
378 * run in an undefined context.
379 * All locking must be provided by the caller.
380 *
381 * If the return value of the notifier can be and'ed
382 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain
383 * will return immediately, with the return value of
384 * the notifier function which halted execution.
385 * Otherwise the return value is the return value
386 * of the last notifier function called.
387 */
388
389int raw_notifier_call_chain(struct raw_notifier_head *nh,
390 unsigned long val, void *v)
391{
392 return notifier_call_chain(&nh->head, val, v);
393}
394
395EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
191 396
192/** 397/**
193 * register_reboot_notifier - Register function to be called at reboot time 398 * register_reboot_notifier - Register function to be called at reboot time
@@ -196,13 +401,13 @@ EXPORT_SYMBOL(notifier_call_chain);
196 * Registers a function with the list of functions 401 * Registers a function with the list of functions
197 * to be called at reboot time. 402 * to be called at reboot time.
198 * 403 *
199 * Currently always returns zero, as notifier_chain_register 404 * Currently always returns zero, as blocking_notifier_chain_register
200 * always returns zero. 405 * always returns zero.
201 */ 406 */
202 407
203int register_reboot_notifier(struct notifier_block * nb) 408int register_reboot_notifier(struct notifier_block * nb)
204{ 409{
205 return notifier_chain_register(&reboot_notifier_list, nb); 410 return blocking_notifier_chain_register(&reboot_notifier_list, nb);
206} 411}
207 412
208EXPORT_SYMBOL(register_reboot_notifier); 413EXPORT_SYMBOL(register_reboot_notifier);
@@ -219,7 +424,7 @@ EXPORT_SYMBOL(register_reboot_notifier);
219 424
220int unregister_reboot_notifier(struct notifier_block * nb) 425int unregister_reboot_notifier(struct notifier_block * nb)
221{ 426{
222 return notifier_chain_unregister(&reboot_notifier_list, nb); 427 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
223} 428}
224 429
225EXPORT_SYMBOL(unregister_reboot_notifier); 430EXPORT_SYMBOL(unregister_reboot_notifier);
@@ -380,7 +585,7 @@ EXPORT_SYMBOL_GPL(emergency_restart);
380 585
381void kernel_restart_prepare(char *cmd) 586void kernel_restart_prepare(char *cmd)
382{ 587{
383 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); 588 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
384 system_state = SYSTEM_RESTART; 589 system_state = SYSTEM_RESTART;
385 device_shutdown(); 590 device_shutdown();
386} 591}
@@ -430,7 +635,7 @@ EXPORT_SYMBOL_GPL(kernel_kexec);
430 635
431void kernel_shutdown_prepare(enum system_states state) 636void kernel_shutdown_prepare(enum system_states state)
432{ 637{
433 notifier_call_chain(&reboot_notifier_list, 638 blocking_notifier_call_chain(&reboot_notifier_list,
434 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); 639 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
435 system_state = state; 640 system_state = state;
436 device_shutdown(); 641 device_shutdown();
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 1067090db6b1..d82864c4a617 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -42,6 +42,10 @@ cond_syscall(sys_recvmsg);
42cond_syscall(sys_socketcall); 42cond_syscall(sys_socketcall);
43cond_syscall(sys_futex); 43cond_syscall(sys_futex);
44cond_syscall(compat_sys_futex); 44cond_syscall(compat_sys_futex);
45cond_syscall(sys_set_robust_list);
46cond_syscall(compat_sys_set_robust_list);
47cond_syscall(sys_get_robust_list);
48cond_syscall(compat_sys_get_robust_list);
45cond_syscall(sys_epoll_create); 49cond_syscall(sys_epoll_create);
46cond_syscall(sys_epoll_ctl); 50cond_syscall(sys_epoll_ctl);
47cond_syscall(sys_epoll_wait); 51cond_syscall(sys_epoll_wait);
diff --git a/kernel/time.c b/kernel/time.c
index e00a97b77241..ff8e7019c4c4 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -610,7 +610,7 @@ void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
610 * 610 *
611 * Returns the timespec representation of the nsec parameter. 611 * Returns the timespec representation of the nsec parameter.
612 */ 612 */
613struct timespec ns_to_timespec(const nsec_t nsec) 613struct timespec ns_to_timespec(const s64 nsec)
614{ 614{
615 struct timespec ts; 615 struct timespec ts;
616 616
@@ -630,7 +630,7 @@ struct timespec ns_to_timespec(const nsec_t nsec)
630 * 630 *
631 * Returns the timeval representation of the nsec parameter. 631 * Returns the timeval representation of the nsec parameter.
632 */ 632 */
633struct timeval ns_to_timeval(const nsec_t nsec) 633struct timeval ns_to_timeval(const s64 nsec)
634{ 634{
635 struct timespec ts = ns_to_timespec(nsec); 635 struct timespec ts = ns_to_timespec(nsec);
636 struct timeval tv; 636 struct timeval tv;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7e70ab13e191..6e8a60f67c7a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -172,7 +172,7 @@ config DEBUG_IOREMAP
172 172
173config DEBUG_FS 173config DEBUG_FS
174 bool "Debug Filesystem" 174 bool "Debug Filesystem"
175 depends on DEBUG_KERNEL && SYSFS 175 depends on SYSFS
176 help 176 help
177 debugfs is a virtual file system that kernel developers use to put 177 debugfs is a virtual file system that kernel developers use to put
178 debugging files into. Enable this option to be able to read and 178 debugging files into. Enable this option to be able to read and
diff --git a/lib/Makefile b/lib/Makefile
index f827e3c24ec0..b830c9a15541 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -23,6 +23,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
23lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 23lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
24lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o 24lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
25lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 25lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
26lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
26obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 27obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
27obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 28obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
28 29
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 8acab0e176ef..ed2ae3b0cd06 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -253,33 +253,18 @@ int __bitmap_subset(const unsigned long *bitmap1,
253} 253}
254EXPORT_SYMBOL(__bitmap_subset); 254EXPORT_SYMBOL(__bitmap_subset);
255 255
256#if BITS_PER_LONG == 32
257int __bitmap_weight(const unsigned long *bitmap, int bits) 256int __bitmap_weight(const unsigned long *bitmap, int bits)
258{ 257{
259 int k, w = 0, lim = bits/BITS_PER_LONG; 258 int k, w = 0, lim = bits/BITS_PER_LONG;
260 259
261 for (k = 0; k < lim; k++) 260 for (k = 0; k < lim; k++)
262 w += hweight32(bitmap[k]); 261 w += hweight_long(bitmap[k]);
263 262
264 if (bits % BITS_PER_LONG) 263 if (bits % BITS_PER_LONG)
265 w += hweight32(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); 264 w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
266 265
267 return w; 266 return w;
268} 267}
269#else
270int __bitmap_weight(const unsigned long *bitmap, int bits)
271{
272 int k, w = 0, lim = bits/BITS_PER_LONG;
273
274 for (k = 0; k < lim; k++)
275 w += hweight64(bitmap[k]);
276
277 if (bits % BITS_PER_LONG)
278 w += hweight64(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
279
280 return w;
281}
282#endif
283EXPORT_SYMBOL(__bitmap_weight); 268EXPORT_SYMBOL(__bitmap_weight);
284 269
285/* 270/*
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index c05b4b19cf6c..bda0d71a2514 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -11,48 +11,171 @@
11 11
12#include <linux/bitops.h> 12#include <linux/bitops.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <asm/types.h>
15#include <asm/byteorder.h>
14 16
15int find_next_bit(const unsigned long *addr, int size, int offset) 17#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
18
19/**
20 * find_next_bit - find the next set bit in a memory region
21 * @addr: The address to base the search on
22 * @offset: The bitnumber to start searching at
23 * @size: The maximum size to search
24 */
25unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
26 unsigned long offset)
16{ 27{
17 const unsigned long *base; 28 const unsigned long *p = addr + BITOP_WORD(offset);
18 const int NBITS = sizeof(*addr) * 8; 29 unsigned long result = offset & ~(BITS_PER_LONG-1);
19 unsigned long tmp; 30 unsigned long tmp;
20 31
21 base = addr; 32 if (offset >= size)
33 return size;
34 size -= result;
35 offset %= BITS_PER_LONG;
22 if (offset) { 36 if (offset) {
23 int suboffset; 37 tmp = *(p++);
38 tmp &= (~0UL << offset);
39 if (size < BITS_PER_LONG)
40 goto found_first;
41 if (tmp)
42 goto found_middle;
43 size -= BITS_PER_LONG;
44 result += BITS_PER_LONG;
45 }
46 while (size & ~(BITS_PER_LONG-1)) {
47 if ((tmp = *(p++)))
48 goto found_middle;
49 result += BITS_PER_LONG;
50 size -= BITS_PER_LONG;
51 }
52 if (!size)
53 return result;
54 tmp = *p;
24 55
25 addr += offset / NBITS; 56found_first:
57 tmp &= (~0UL >> (BITS_PER_LONG - size));
58 if (tmp == 0UL) /* Are any bits set? */
59 return result + size; /* Nope. */
60found_middle:
61 return result + __ffs(tmp);
62}
26 63
27 suboffset = offset % NBITS; 64EXPORT_SYMBOL(find_next_bit);
28 if (suboffset) {
29 tmp = *addr;
30 tmp >>= suboffset;
31 if (tmp)
32 goto finish;
33 }
34 65
35 addr++; 66/*
67 * This implementation of find_{first,next}_zero_bit was stolen from
68 * Linus' asm-alpha/bitops.h.
69 */
70unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
71 unsigned long offset)
72{
73 const unsigned long *p = addr + BITOP_WORD(offset);
74 unsigned long result = offset & ~(BITS_PER_LONG-1);
75 unsigned long tmp;
76
77 if (offset >= size)
78 return size;
79 size -= result;
80 offset %= BITS_PER_LONG;
81 if (offset) {
82 tmp = *(p++);
83 tmp |= ~0UL >> (BITS_PER_LONG - offset);
84 if (size < BITS_PER_LONG)
85 goto found_first;
86 if (~tmp)
87 goto found_middle;
88 size -= BITS_PER_LONG;
89 result += BITS_PER_LONG;
90 }
91 while (size & ~(BITS_PER_LONG-1)) {
92 if (~(tmp = *(p++)))
93 goto found_middle;
94 result += BITS_PER_LONG;
95 size -= BITS_PER_LONG;
36 } 96 }
97 if (!size)
98 return result;
99 tmp = *p;
100
101found_first:
102 tmp |= ~0UL << size;
103 if (tmp == ~0UL) /* Are any bits zero? */
104 return result + size; /* Nope. */
105found_middle:
106 return result + ffz(tmp);
107}
108
109EXPORT_SYMBOL(find_next_zero_bit);
37 110
38 while ((tmp = *addr) == 0) 111#ifdef __BIG_ENDIAN
39 addr++;
40 112
41 offset = (addr - base) * NBITS; 113/* include/linux/byteorder does not support "unsigned long" type */
114static inline unsigned long ext2_swabp(const unsigned long * x)
115{
116#if BITS_PER_LONG == 64
117 return (unsigned long) __swab64p((u64 *) x);
118#elif BITS_PER_LONG == 32
119 return (unsigned long) __swab32p((u32 *) x);
120#else
121#error BITS_PER_LONG not defined
122#endif
123}
124
125/* include/linux/byteorder doesn't support "unsigned long" type */
126static inline unsigned long ext2_swab(const unsigned long y)
127{
128#if BITS_PER_LONG == 64
129 return (unsigned long) __swab64((u64) y);
130#elif BITS_PER_LONG == 32
131 return (unsigned long) __swab32((u32) y);
132#else
133#error BITS_PER_LONG not defined
134#endif
135}
42 136
43 finish: 137unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned
44 /* count the remaining bits without using __ffs() since that takes a 32-bit arg */ 138 long size, unsigned long offset)
45 while (!(tmp & 0xff)) { 139{
46 offset += 8; 140 const unsigned long *p = addr + BITOP_WORD(offset);
47 tmp >>= 8; 141 unsigned long result = offset & ~(BITS_PER_LONG - 1);
142 unsigned long tmp;
143
144 if (offset >= size)
145 return size;
146 size -= result;
147 offset &= (BITS_PER_LONG - 1UL);
148 if (offset) {
149 tmp = ext2_swabp(p++);
150 tmp |= (~0UL >> (BITS_PER_LONG - offset));
151 if (size < BITS_PER_LONG)
152 goto found_first;
153 if (~tmp)
154 goto found_middle;
155 size -= BITS_PER_LONG;
156 result += BITS_PER_LONG;
48 } 157 }
49 158
50 while (!(tmp & 1)) { 159 while (size & ~(BITS_PER_LONG - 1)) {
51 offset++; 160 if (~(tmp = *(p++)))
52 tmp >>= 1; 161 goto found_middle_swap;
162 result += BITS_PER_LONG;
163 size -= BITS_PER_LONG;
53 } 164 }
165 if (!size)
166 return result;
167 tmp = ext2_swabp(p);
168found_first:
169 tmp |= ~0UL << size;
170 if (tmp == ~0UL) /* Are any bits zero? */
171 return result + size; /* Nope. Skip ffz */
172found_middle:
173 return result + ffz(tmp);
54 174
55 return offset; 175found_middle_swap:
176 return result + ffz(ext2_swab(tmp));
56} 177}
57 178
58EXPORT_SYMBOL(find_next_bit); 179EXPORT_SYMBOL(generic_find_next_zero_le_bit);
180
181#endif /* __BIG_ENDIAN */
diff --git a/lib/hweight.c b/lib/hweight.c
new file mode 100644
index 000000000000..438257671708
--- /dev/null
+++ b/lib/hweight.c
@@ -0,0 +1,53 @@
1#include <linux/module.h>
2#include <asm/types.h>
3
4/**
5 * hweightN - returns the hamming weight of a N-bit word
6 * @x: the word to weigh
7 *
8 * The Hamming Weight of a number is the total number of bits set in it.
9 */
10
11unsigned int hweight32(unsigned int w)
12{
13 unsigned int res = w - ((w >> 1) & 0x55555555);
14 res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
15 res = (res + (res >> 4)) & 0x0F0F0F0F;
16 res = res + (res >> 8);
17 return (res + (res >> 16)) & 0x000000FF;
18}
19EXPORT_SYMBOL(hweight32);
20
21unsigned int hweight16(unsigned int w)
22{
23 unsigned int res = w - ((w >> 1) & 0x5555);
24 res = (res & 0x3333) + ((res >> 2) & 0x3333);
25 res = (res + (res >> 4)) & 0x0F0F;
26 return (res + (res >> 8)) & 0x00FF;
27}
28EXPORT_SYMBOL(hweight16);
29
30unsigned int hweight8(unsigned int w)
31{
32 unsigned int res = w - ((w >> 1) & 0x55);
33 res = (res & 0x33) + ((res >> 2) & 0x33);
34 return (res + (res >> 4)) & 0x0F;
35}
36EXPORT_SYMBOL(hweight8);
37
38unsigned long hweight64(__u64 w)
39{
40#if BITS_PER_LONG == 32
41 return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
42#elif BITS_PER_LONG == 64
43 __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
44 res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
45 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
46 res = res + (res >> 8);
47 res = res + (res >> 16);
48 return (res + (res >> 32)) & 0x00000000000000FFul;
49#else
50#error BITS_PER_LONG not defined
51#endif
52}
53EXPORT_SYMBOL(hweight64);
diff --git a/mm/Makefile b/mm/Makefile
index f10c753dce6d..0b8f73f2ed16 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -10,7 +10,7 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
10obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ 10obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
11 page_alloc.o page-writeback.o pdflush.o \ 11 page_alloc.o page-writeback.o pdflush.o \
12 readahead.o swap.o truncate.o vmscan.o \ 12 readahead.o swap.o truncate.o vmscan.o \
13 prio_tree.o util.o $(mmu-y) 13 prio_tree.o util.o mmzone.o $(mmu-y)
14 14
15obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o 15obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
16obj-$(CONFIG_HUGETLBFS) += hugetlb.o 16obj-$(CONFIG_HUGETLBFS) += hugetlb.o
diff --git a/mm/bootmem.c b/mm/bootmem.c
index b55bd39fc5dd..d3e3bd2ffcea 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -33,6 +33,7 @@ EXPORT_SYMBOL(max_pfn); /* This is exported so
33 * dma_get_required_mask(), which uses 33 * dma_get_required_mask(), which uses
34 * it, can be an inline function */ 34 * it, can be an inline function */
35 35
36static LIST_HEAD(bdata_list);
36#ifdef CONFIG_CRASH_DUMP 37#ifdef CONFIG_CRASH_DUMP
37/* 38/*
38 * If we have booted due to a crash, max_pfn will be a very low value. We need 39 * If we have booted due to a crash, max_pfn will be a very low value. We need
@@ -52,6 +53,27 @@ unsigned long __init bootmem_bootmap_pages (unsigned long pages)
52 53
53 return mapsize; 54 return mapsize;
54} 55}
56/*
57 * link bdata in order
58 */
59static void link_bootmem(bootmem_data_t *bdata)
60{
61 bootmem_data_t *ent;
62 if (list_empty(&bdata_list)) {
63 list_add(&bdata->list, &bdata_list);
64 return;
65 }
66 /* insert in order */
67 list_for_each_entry(ent, &bdata_list, list) {
68 if (bdata->node_boot_start < ent->node_boot_start) {
69 list_add_tail(&bdata->list, &ent->list);
70 return;
71 }
72 }
73 list_add_tail(&bdata->list, &bdata_list);
74 return;
75}
76
55 77
56/* 78/*
57 * Called once to set up the allocator itself. 79 * Called once to set up the allocator itself.
@@ -62,13 +84,11 @@ static unsigned long __init init_bootmem_core (pg_data_t *pgdat,
62 bootmem_data_t *bdata = pgdat->bdata; 84 bootmem_data_t *bdata = pgdat->bdata;
63 unsigned long mapsize = ((end - start)+7)/8; 85 unsigned long mapsize = ((end - start)+7)/8;
64 86
65 pgdat->pgdat_next = pgdat_list;
66 pgdat_list = pgdat;
67
68 mapsize = ALIGN(mapsize, sizeof(long)); 87 mapsize = ALIGN(mapsize, sizeof(long));
69 bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT); 88 bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
70 bdata->node_boot_start = (start << PAGE_SHIFT); 89 bdata->node_boot_start = (start << PAGE_SHIFT);
71 bdata->node_low_pfn = end; 90 bdata->node_low_pfn = end;
91 link_bootmem(bdata);
72 92
73 /* 93 /*
74 * Initially all pages are reserved - setup_arch() has to 94 * Initially all pages are reserved - setup_arch() has to
@@ -383,12 +403,11 @@ unsigned long __init free_all_bootmem (void)
383 403
384void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal) 404void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal)
385{ 405{
386 pg_data_t *pgdat = pgdat_list; 406 bootmem_data_t *bdata;
387 void *ptr; 407 void *ptr;
388 408
389 for_each_pgdat(pgdat) 409 list_for_each_entry(bdata, &bdata_list, list)
390 if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, 410 if ((ptr = __alloc_bootmem_core(bdata, size, align, goal, 0)))
391 align, goal, 0)))
392 return(ptr); 411 return(ptr);
393 412
394 /* 413 /*
@@ -416,11 +435,11 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigne
416 435
417void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal) 436void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal)
418{ 437{
419 pg_data_t *pgdat = pgdat_list; 438 bootmem_data_t *bdata;
420 void *ptr; 439 void *ptr;
421 440
422 for_each_pgdat(pgdat) 441 list_for_each_entry(bdata, &bdata_list, list)
423 if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, 442 if ((ptr = __alloc_bootmem_core(bdata, size,
424 align, goal, LOW32LIMIT))) 443 align, goal, LOW32LIMIT)))
425 return(ptr); 444 return(ptr);
426 445
diff --git a/mm/highmem.c b/mm/highmem.c
index d0ea1eec6a9a..55885f64af40 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -31,14 +31,9 @@
31 31
32static mempool_t *page_pool, *isa_page_pool; 32static mempool_t *page_pool, *isa_page_pool;
33 33
34static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data) 34static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
35{ 35{
36 return alloc_page(gfp_mask | GFP_DMA); 36 return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
37}
38
39static void page_pool_free(void *page, void *data)
40{
41 __free_page(page);
42} 37}
43 38
44/* 39/*
@@ -51,11 +46,6 @@ static void page_pool_free(void *page, void *data)
51 */ 46 */
52#ifdef CONFIG_HIGHMEM 47#ifdef CONFIG_HIGHMEM
53 48
54static void *page_pool_alloc(gfp_t gfp_mask, void *data)
55{
56 return alloc_page(gfp_mask);
57}
58
59static int pkmap_count[LAST_PKMAP]; 49static int pkmap_count[LAST_PKMAP];
60static unsigned int last_pkmap_nr; 50static unsigned int last_pkmap_nr;
61static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); 51static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
@@ -229,7 +219,7 @@ static __init int init_emergency_pool(void)
229 if (!i.totalhigh) 219 if (!i.totalhigh)
230 return 0; 220 return 0;
231 221
232 page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL); 222 page_pool = mempool_create_page_pool(POOL_SIZE, 0);
233 if (!page_pool) 223 if (!page_pool)
234 BUG(); 224 BUG();
235 printk("highmem bounce pool size: %d pages\n", POOL_SIZE); 225 printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
@@ -272,7 +262,8 @@ int init_emergency_isa_pool(void)
272 if (isa_page_pool) 262 if (isa_page_pool)
273 return 0; 263 return 0;
274 264
275 isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL); 265 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
266 mempool_free_pages, (void *) 0);
276 if (!isa_page_pool) 267 if (!isa_page_pool)
277 BUG(); 268 BUG();
278 269
@@ -337,7 +328,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
337 bio_put(bio); 328 bio_put(bio);
338} 329}
339 330
340static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err) 331static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
341{ 332{
342 if (bio->bi_size) 333 if (bio->bi_size)
343 return 1; 334 return 1;
@@ -384,7 +375,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
384} 375}
385 376
386static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, 377static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
387 mempool_t *pool) 378 mempool_t *pool)
388{ 379{
389 struct page *page; 380 struct page *page;
390 struct bio *bio = NULL; 381 struct bio *bio = NULL;
diff --git a/mm/memory.c b/mm/memory.c
index e347e106ca3a..8d8f52569f32 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1071,6 +1071,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1071 } 1071 }
1072 if (pages) { 1072 if (pages) {
1073 pages[i] = page; 1073 pages[i] = page;
1074
1075 flush_anon_page(page, start);
1074 flush_dcache_page(page); 1076 flush_dcache_page(page);
1075 } 1077 }
1076 if (vmas) 1078 if (vmas)
@@ -2352,10 +2354,8 @@ int make_pages_present(unsigned long addr, unsigned long end)
2352 if (!vma) 2354 if (!vma)
2353 return -1; 2355 return -1;
2354 write = (vma->vm_flags & VM_WRITE) != 0; 2356 write = (vma->vm_flags & VM_WRITE) != 0;
2355 if (addr >= end) 2357 BUG_ON(addr >= end);
2356 BUG(); 2358 BUG_ON(end > vma->vm_end);
2357 if (end > vma->vm_end)
2358 BUG();
2359 len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE; 2359 len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
2360 ret = get_user_pages(current, current->mm, addr, 2360 ret = get_user_pages(current, current->mm, addr,
2361 len, write, 0, NULL, NULL); 2361 len, write, 0, NULL, NULL);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4f71cfd29c6f..dec8249e972d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -912,7 +912,7 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
912 /* 912 /*
913 * Check if this process has the right to modify the specified 913 * Check if this process has the right to modify the specified
914 * process. The right exists if the process has administrative 914 * process. The right exists if the process has administrative
915 * capabilities, superuser priviledges or the same 915 * capabilities, superuser privileges or the same
916 * userid as the target process. 916 * userid as the target process.
917 */ 917 */
918 if ((current->euid != task->suid) && (current->euid != task->uid) && 918 if ((current->euid != task->suid) && (current->euid != task->uid) &&
diff --git a/mm/mempool.c b/mm/mempool.c
index f71893ed3543..fe6e05289cc5 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -183,8 +183,8 @@ EXPORT_SYMBOL(mempool_resize);
183 */ 183 */
184void mempool_destroy(mempool_t *pool) 184void mempool_destroy(mempool_t *pool)
185{ 185{
186 if (pool->curr_nr != pool->min_nr) 186 /* Check for outstanding elements */
187 BUG(); /* There were outstanding elements */ 187 BUG_ON(pool->curr_nr != pool->min_nr);
188 free_pool(pool); 188 free_pool(pool);
189} 189}
190EXPORT_SYMBOL(mempool_destroy); 190EXPORT_SYMBOL(mempool_destroy);
@@ -289,3 +289,45 @@ void mempool_free_slab(void *element, void *pool_data)
289 kmem_cache_free(mem, element); 289 kmem_cache_free(mem, element);
290} 290}
291EXPORT_SYMBOL(mempool_free_slab); 291EXPORT_SYMBOL(mempool_free_slab);
292
293/*
294 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
295 * specfied by pool_data
296 */
297void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
298{
299 size_t size = (size_t)(long)pool_data;
300 return kmalloc(size, gfp_mask);
301}
302EXPORT_SYMBOL(mempool_kmalloc);
303
304void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data)
305{
306 size_t size = (size_t) pool_data;
307 return kzalloc(size, gfp_mask);
308}
309EXPORT_SYMBOL(mempool_kzalloc);
310
311void mempool_kfree(void *element, void *pool_data)
312{
313 kfree(element);
314}
315EXPORT_SYMBOL(mempool_kfree);
316
317/*
318 * A simple mempool-backed page allocator that allocates pages
319 * of the order specified by pool_data.
320 */
321void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
322{
323 int order = (int)(long)pool_data;
324 return alloc_pages(gfp_mask, order);
325}
326EXPORT_SYMBOL(mempool_alloc_pages);
327
328void mempool_free_pages(void *element, void *pool_data)
329{
330 int order = (int)(long)pool_data;
331 __free_pages(element, order);
332}
333EXPORT_SYMBOL(mempool_free_pages);
diff --git a/mm/mmzone.c b/mm/mmzone.c
new file mode 100644
index 000000000000..b022370e612e
--- /dev/null
+++ b/mm/mmzone.c
@@ -0,0 +1,50 @@
1/*
2 * linux/mm/mmzone.c
3 *
4 * management codes for pgdats and zones.
5 */
6
7
8#include <linux/config.h>
9#include <linux/stddef.h>
10#include <linux/mmzone.h>
11#include <linux/module.h>
12
13struct pglist_data *first_online_pgdat(void)
14{
15 return NODE_DATA(first_online_node);
16}
17
18EXPORT_SYMBOL(first_online_pgdat);
19
20struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
21{
22 int nid = next_online_node(pgdat->node_id);
23
24 if (nid == MAX_NUMNODES)
25 return NULL;
26 return NODE_DATA(nid);
27}
28EXPORT_SYMBOL(next_online_pgdat);
29
30
31/*
32 * next_zone - helper magic for for_each_zone()
33 */
34struct zone *next_zone(struct zone *zone)
35{
36 pg_data_t *pgdat = zone->zone_pgdat;
37
38 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
39 zone++;
40 else {
41 pgdat = next_online_pgdat(pgdat);
42 if (pgdat)
43 zone = pgdat->node_zones;
44 else
45 zone = NULL;
46 }
47 return zone;
48}
49EXPORT_SYMBOL(next_zone);
50
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 338a02bb004d..dc523a1f270d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -49,7 +49,6 @@ nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
49EXPORT_SYMBOL(node_online_map); 49EXPORT_SYMBOL(node_online_map);
50nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; 50nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
51EXPORT_SYMBOL(node_possible_map); 51EXPORT_SYMBOL(node_possible_map);
52struct pglist_data *pgdat_list __read_mostly;
53unsigned long totalram_pages __read_mostly; 52unsigned long totalram_pages __read_mostly;
54unsigned long totalhigh_pages __read_mostly; 53unsigned long totalhigh_pages __read_mostly;
55long nr_swap_pages; 54long nr_swap_pages;
@@ -1201,7 +1200,7 @@ unsigned int nr_free_highpages (void)
1201 pg_data_t *pgdat; 1200 pg_data_t *pgdat;
1202 unsigned int pages = 0; 1201 unsigned int pages = 0;
1203 1202
1204 for_each_pgdat(pgdat) 1203 for_each_online_pgdat(pgdat)
1205 pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages; 1204 pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
1206 1205
1207 return pages; 1206 return pages;
@@ -1343,7 +1342,7 @@ void get_zone_counts(unsigned long *active,
1343 *active = 0; 1342 *active = 0;
1344 *inactive = 0; 1343 *inactive = 0;
1345 *free = 0; 1344 *free = 0;
1346 for_each_pgdat(pgdat) { 1345 for_each_online_pgdat(pgdat) {
1347 unsigned long l, m, n; 1346 unsigned long l, m, n;
1348 __get_zone_counts(&l, &m, &n, pgdat); 1347 __get_zone_counts(&l, &m, &n, pgdat);
1349 *active += l; 1348 *active += l;
@@ -2042,7 +2041,6 @@ static __meminit void init_currently_empty_zone(struct zone *zone,
2042 zone_wait_table_init(zone, size); 2041 zone_wait_table_init(zone, size);
2043 pgdat->nr_zones = zone_idx(zone) + 1; 2042 pgdat->nr_zones = zone_idx(zone) + 1;
2044 2043
2045 zone->zone_mem_map = pfn_to_page(zone_start_pfn);
2046 zone->zone_start_pfn = zone_start_pfn; 2044 zone->zone_start_pfn = zone_start_pfn;
2047 2045
2048 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); 2046 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
@@ -2170,8 +2168,9 @@ static void *frag_start(struct seq_file *m, loff_t *pos)
2170{ 2168{
2171 pg_data_t *pgdat; 2169 pg_data_t *pgdat;
2172 loff_t node = *pos; 2170 loff_t node = *pos;
2173 2171 for (pgdat = first_online_pgdat();
2174 for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next) 2172 pgdat && node;
2173 pgdat = next_online_pgdat(pgdat))
2175 --node; 2174 --node;
2176 2175
2177 return pgdat; 2176 return pgdat;
@@ -2182,7 +2181,7 @@ static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
2182 pg_data_t *pgdat = (pg_data_t *)arg; 2181 pg_data_t *pgdat = (pg_data_t *)arg;
2183 2182
2184 (*pos)++; 2183 (*pos)++;
2185 return pgdat->pgdat_next; 2184 return next_online_pgdat(pgdat);
2186} 2185}
2187 2186
2188static void frag_stop(struct seq_file *m, void *arg) 2187static void frag_stop(struct seq_file *m, void *arg)
@@ -2483,7 +2482,7 @@ static void setup_per_zone_lowmem_reserve(void)
2483 struct pglist_data *pgdat; 2482 struct pglist_data *pgdat;
2484 int j, idx; 2483 int j, idx;
2485 2484
2486 for_each_pgdat(pgdat) { 2485 for_each_online_pgdat(pgdat) {
2487 for (j = 0; j < MAX_NR_ZONES; j++) { 2486 for (j = 0; j < MAX_NR_ZONES; j++) {
2488 struct zone *zone = pgdat->node_zones + j; 2487 struct zone *zone = pgdat->node_zones + j;
2489 unsigned long present_pages = zone->present_pages; 2488 unsigned long present_pages = zone->present_pages;
@@ -2745,3 +2744,44 @@ void *__init alloc_large_system_hash(const char *tablename,
2745 2744
2746 return table; 2745 return table;
2747} 2746}
2747
2748#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
2749/*
2750 * pfn <-> page translation. out-of-line version.
2751 * (see asm-generic/memory_model.h)
2752 */
2753#if defined(CONFIG_FLATMEM)
2754struct page *pfn_to_page(unsigned long pfn)
2755{
2756 return mem_map + (pfn - ARCH_PFN_OFFSET);
2757}
2758unsigned long page_to_pfn(struct page *page)
2759{
2760 return (page - mem_map) + ARCH_PFN_OFFSET;
2761}
2762#elif defined(CONFIG_DISCONTIGMEM)
2763struct page *pfn_to_page(unsigned long pfn)
2764{
2765 int nid = arch_pfn_to_nid(pfn);
2766 return NODE_DATA(nid)->node_mem_map + arch_local_page_offset(pfn,nid);
2767}
2768unsigned long page_to_pfn(struct page *page)
2769{
2770 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
2771 return (page - pgdat->node_mem_map) + pgdat->node_start_pfn;
2772}
2773#elif defined(CONFIG_SPARSEMEM)
2774struct page *pfn_to_page(unsigned long pfn)
2775{
2776 return __section_mem_map_addr(__pfn_to_section(pfn)) + pfn;
2777}
2778
2779unsigned long page_to_pfn(struct page *page)
2780{
2781 long section_id = page_to_section(page);
2782 return page - __section_mem_map_addr(__nr_to_section(section_id));
2783}
2784#endif /* CONFIG_FLATMEM/DISCONTIGMME/SPARSEMEM */
2785EXPORT_SYMBOL(pfn_to_page);
2786EXPORT_SYMBOL(page_to_pfn);
2787#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
diff --git a/mm/slab.c b/mm/slab.c
index 681837499d7d..4cbf8bb13557 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3311,7 +3311,7 @@ void *__alloc_percpu(size_t size)
3311 * and we have no way of figuring out how to fix the array 3311 * and we have no way of figuring out how to fix the array
3312 * that we have allocated then.... 3312 * that we have allocated then....
3313 */ 3313 */
3314 for_each_cpu(i) { 3314 for_each_possible_cpu(i) {
3315 int node = cpu_to_node(i); 3315 int node = cpu_to_node(i);
3316 3316
3317 if (node_online(node)) 3317 if (node_online(node))
@@ -3398,7 +3398,7 @@ void free_percpu(const void *objp)
3398 /* 3398 /*
3399 * We allocate for all cpus so we cannot use for online cpu here. 3399 * We allocate for all cpus so we cannot use for online cpu here.
3400 */ 3400 */
3401 for_each_cpu(i) 3401 for_each_possible_cpu(i)
3402 kfree(p->ptrs[i]); 3402 kfree(p->ptrs[i]);
3403 kfree(p); 3403 kfree(p);
3404} 3404}
diff --git a/mm/swap.c b/mm/swap.c
index 91b7e2026f69..88895c249bc9 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -512,7 +512,7 @@ long percpu_counter_sum(struct percpu_counter *fbc)
512 512
513 spin_lock(&fbc->lock); 513 spin_lock(&fbc->lock);
514 ret = fbc->count; 514 ret = fbc->count;
515 for_each_cpu(cpu) { 515 for_each_possible_cpu(cpu) {
516 long *pcount = per_cpu_ptr(fbc->counters, cpu); 516 long *pcount = per_cpu_ptr(fbc->counters, cpu);
517 ret += *pcount; 517 ret += *pcount;
518 } 518 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 78865c849f8f..acdf001d6941 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1305,7 +1305,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1305 1305
1306 current->reclaim_state = &reclaim_state; 1306 current->reclaim_state = &reclaim_state;
1307repeat: 1307repeat:
1308 for_each_pgdat(pgdat) { 1308 for_each_online_pgdat(pgdat) {
1309 unsigned long freed; 1309 unsigned long freed;
1310 1310
1311 freed = balance_pgdat(pgdat, nr_to_free, 0); 1311 freed = balance_pgdat(pgdat, nr_to_free, 0);
@@ -1335,7 +1335,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
1335 cpumask_t mask; 1335 cpumask_t mask;
1336 1336
1337 if (action == CPU_ONLINE) { 1337 if (action == CPU_ONLINE) {
1338 for_each_pgdat(pgdat) { 1338 for_each_online_pgdat(pgdat) {
1339 mask = node_to_cpumask(pgdat->node_id); 1339 mask = node_to_cpumask(pgdat->node_id);
1340 if (any_online_cpu(mask) != NR_CPUS) 1340 if (any_online_cpu(mask) != NR_CPUS)
1341 /* One of our CPUs online: restore mask */ 1341 /* One of our CPUs online: restore mask */
@@ -1351,7 +1351,7 @@ static int __init kswapd_init(void)
1351 pg_data_t *pgdat; 1351 pg_data_t *pgdat;
1352 1352
1353 swap_setup(); 1353 swap_setup();
1354 for_each_pgdat(pgdat) { 1354 for_each_online_pgdat(pgdat) {
1355 pid_t pid; 1355 pid_t pid;
1356 1356
1357 pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL); 1357 pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 9106354c781e..a49a6975092d 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -73,23 +73,23 @@ DEFINE_RWLOCK(hci_cb_list_lock);
73struct hci_proto *hci_proto[HCI_MAX_PROTO]; 73struct hci_proto *hci_proto[HCI_MAX_PROTO];
74 74
75/* HCI notifiers list */ 75/* HCI notifiers list */
76static struct notifier_block *hci_notifier; 76static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77 77
78/* ---- HCI notifications ---- */ 78/* ---- HCI notifications ---- */
79 79
80int hci_register_notifier(struct notifier_block *nb) 80int hci_register_notifier(struct notifier_block *nb)
81{ 81{
82 return notifier_chain_register(&hci_notifier, nb); 82 return atomic_notifier_chain_register(&hci_notifier, nb);
83} 83}
84 84
85int hci_unregister_notifier(struct notifier_block *nb) 85int hci_unregister_notifier(struct notifier_block *nb)
86{ 86{
87 return notifier_chain_unregister(&hci_notifier, nb); 87 return atomic_notifier_chain_unregister(&hci_notifier, nb);
88} 88}
89 89
90static void hci_notify(struct hci_dev *hdev, int event) 90static void hci_notify(struct hci_dev *hdev, int event)
91{ 91{
92 notifier_call_chain(&hci_notifier, event, hdev); 92 atomic_notifier_call_chain(&hci_notifier, event, hdev);
93} 93}
94 94
95/* ---- HCI requests ---- */ 95/* ---- HCI requests ---- */
diff --git a/net/core/dev.c b/net/core/dev.c
index 8e1dc3051222..a3ab11f34153 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -193,7 +193,7 @@ static inline struct hlist_head *dev_index_hash(int ifindex)
193 * Our notifier list 193 * Our notifier list
194 */ 194 */
195 195
196static struct notifier_block *netdev_chain; 196static BLOCKING_NOTIFIER_HEAD(netdev_chain);
197 197
198/* 198/*
199 * Device drivers call our routines to queue packets here. We empty the 199 * Device drivers call our routines to queue packets here. We empty the
@@ -736,7 +736,8 @@ int dev_change_name(struct net_device *dev, char *newname)
736 if (!err) { 736 if (!err) {
737 hlist_del(&dev->name_hlist); 737 hlist_del(&dev->name_hlist);
738 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name)); 738 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
739 notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev); 739 blocking_notifier_call_chain(&netdev_chain,
740 NETDEV_CHANGENAME, dev);
740 } 741 }
741 742
742 return err; 743 return err;
@@ -750,7 +751,7 @@ int dev_change_name(struct net_device *dev, char *newname)
750 */ 751 */
751void netdev_features_change(struct net_device *dev) 752void netdev_features_change(struct net_device *dev)
752{ 753{
753 notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev); 754 blocking_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
754} 755}
755EXPORT_SYMBOL(netdev_features_change); 756EXPORT_SYMBOL(netdev_features_change);
756 757
@@ -765,7 +766,8 @@ EXPORT_SYMBOL(netdev_features_change);
765void netdev_state_change(struct net_device *dev) 766void netdev_state_change(struct net_device *dev)
766{ 767{
767 if (dev->flags & IFF_UP) { 768 if (dev->flags & IFF_UP) {
768 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev); 769 blocking_notifier_call_chain(&netdev_chain,
770 NETDEV_CHANGE, dev);
769 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 771 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
770 } 772 }
771} 773}
@@ -862,7 +864,7 @@ int dev_open(struct net_device *dev)
862 /* 864 /*
863 * ... and announce new interface. 865 * ... and announce new interface.
864 */ 866 */
865 notifier_call_chain(&netdev_chain, NETDEV_UP, dev); 867 blocking_notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
866 } 868 }
867 return ret; 869 return ret;
868} 870}
@@ -885,7 +887,7 @@ int dev_close(struct net_device *dev)
885 * Tell people we are going down, so that they can 887 * Tell people we are going down, so that they can
886 * prepare to death, when device is still operating. 888 * prepare to death, when device is still operating.
887 */ 889 */
888 notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev); 890 blocking_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
889 891
890 dev_deactivate(dev); 892 dev_deactivate(dev);
891 893
@@ -922,7 +924,7 @@ int dev_close(struct net_device *dev)
922 /* 924 /*
923 * Tell people we are down 925 * Tell people we are down
924 */ 926 */
925 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev); 927 blocking_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
926 928
927 return 0; 929 return 0;
928} 930}
@@ -953,7 +955,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
953 int err; 955 int err;
954 956
955 rtnl_lock(); 957 rtnl_lock();
956 err = notifier_chain_register(&netdev_chain, nb); 958 err = blocking_notifier_chain_register(&netdev_chain, nb);
957 if (!err) { 959 if (!err) {
958 for (dev = dev_base; dev; dev = dev->next) { 960 for (dev = dev_base; dev; dev = dev->next) {
959 nb->notifier_call(nb, NETDEV_REGISTER, dev); 961 nb->notifier_call(nb, NETDEV_REGISTER, dev);
@@ -981,7 +983,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
981 int err; 983 int err;
982 984
983 rtnl_lock(); 985 rtnl_lock();
984 err = notifier_chain_unregister(&netdev_chain, nb); 986 err = blocking_notifier_chain_unregister(&netdev_chain, nb);
985 rtnl_unlock(); 987 rtnl_unlock();
986 return err; 988 return err;
987} 989}
@@ -992,12 +994,12 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
992 * @v: pointer passed unmodified to notifier function 994 * @v: pointer passed unmodified to notifier function
993 * 995 *
994 * Call all network notifier blocks. Parameters and return value 996 * Call all network notifier blocks. Parameters and return value
995 * are as for notifier_call_chain(). 997 * are as for blocking_notifier_call_chain().
996 */ 998 */
997 999
998int call_netdevice_notifiers(unsigned long val, void *v) 1000int call_netdevice_notifiers(unsigned long val, void *v)
999{ 1001{
1000 return notifier_call_chain(&netdev_chain, val, v); 1002 return blocking_notifier_call_chain(&netdev_chain, val, v);
1001} 1003}
1002 1004
1003/* When > 0 there are consumers of rx skb time stamps */ 1005/* When > 0 there are consumers of rx skb time stamps */
@@ -2242,7 +2244,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
2242 if (dev->flags & IFF_UP && 2244 if (dev->flags & IFF_UP &&
2243 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI | 2245 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2244 IFF_VOLATILE))) 2246 IFF_VOLATILE)))
2245 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev); 2247 blocking_notifier_call_chain(&netdev_chain,
2248 NETDEV_CHANGE, dev);
2246 2249
2247 if ((flags ^ dev->gflags) & IFF_PROMISC) { 2250 if ((flags ^ dev->gflags) & IFF_PROMISC) {
2248 int inc = (flags & IFF_PROMISC) ? +1 : -1; 2251 int inc = (flags & IFF_PROMISC) ? +1 : -1;
@@ -2286,8 +2289,8 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
2286 else 2289 else
2287 dev->mtu = new_mtu; 2290 dev->mtu = new_mtu;
2288 if (!err && dev->flags & IFF_UP) 2291 if (!err && dev->flags & IFF_UP)
2289 notifier_call_chain(&netdev_chain, 2292 blocking_notifier_call_chain(&netdev_chain,
2290 NETDEV_CHANGEMTU, dev); 2293 NETDEV_CHANGEMTU, dev);
2291 return err; 2294 return err;
2292} 2295}
2293 2296
@@ -2303,7 +2306,8 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
2303 return -ENODEV; 2306 return -ENODEV;
2304 err = dev->set_mac_address(dev, sa); 2307 err = dev->set_mac_address(dev, sa);
2305 if (!err) 2308 if (!err)
2306 notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev); 2309 blocking_notifier_call_chain(&netdev_chain,
2310 NETDEV_CHANGEADDR, dev);
2307 return err; 2311 return err;
2308} 2312}
2309 2313
@@ -2359,7 +2363,7 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
2359 return -EINVAL; 2363 return -EINVAL;
2360 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, 2364 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
2361 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); 2365 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2362 notifier_call_chain(&netdev_chain, 2366 blocking_notifier_call_chain(&netdev_chain,
2363 NETDEV_CHANGEADDR, dev); 2367 NETDEV_CHANGEADDR, dev);
2364 return 0; 2368 return 0;
2365 2369
@@ -2813,7 +2817,7 @@ int register_netdevice(struct net_device *dev)
2813 write_unlock_bh(&dev_base_lock); 2817 write_unlock_bh(&dev_base_lock);
2814 2818
2815 /* Notify protocols, that a new device appeared. */ 2819 /* Notify protocols, that a new device appeared. */
2816 notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev); 2820 blocking_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
2817 2821
2818 /* Finish registration after unlock */ 2822 /* Finish registration after unlock */
2819 net_set_todo(dev); 2823 net_set_todo(dev);
@@ -2892,7 +2896,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
2892 rtnl_lock(); 2896 rtnl_lock();
2893 2897
2894 /* Rebroadcast unregister notification */ 2898 /* Rebroadcast unregister notification */
2895 notifier_call_chain(&netdev_chain, 2899 blocking_notifier_call_chain(&netdev_chain,
2896 NETDEV_UNREGISTER, dev); 2900 NETDEV_UNREGISTER, dev);
2897 2901
2898 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 2902 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
@@ -3148,7 +3152,7 @@ int unregister_netdevice(struct net_device *dev)
3148 /* Notify protocols, that we are about to destroy 3152 /* Notify protocols, that we are about to destroy
3149 this device. They should clean all the things. 3153 this device. They should clean all the things.
3150 */ 3154 */
3151 notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); 3155 blocking_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3152 3156
3153 /* 3157 /*
3154 * Flush the multicast chain 3158 * Flush the multicast chain
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 98f0fc923f91..1e44eda1fda9 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -51,7 +51,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
51 51
52 get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); 52 get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
53 rwlock_init(&queue->syn_wait_lock); 53 rwlock_init(&queue->syn_wait_lock);
54 queue->rskq_accept_head = queue->rskq_accept_head = NULL; 54 queue->rskq_accept_head = NULL;
55 lopt->nr_table_entries = nr_table_entries; 55 lopt->nr_table_entries = nr_table_entries;
56 56
57 write_lock_bh(&queue->syn_wait_lock); 57 write_lock_bh(&queue->syn_wait_lock);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index cc7b9d9255ef..d2ae9893ca17 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -68,7 +68,7 @@ __le16 decnet_address = 0;
68 68
69static DEFINE_RWLOCK(dndev_lock); 69static DEFINE_RWLOCK(dndev_lock);
70static struct net_device *decnet_default_device; 70static struct net_device *decnet_default_device;
71static struct notifier_block *dnaddr_chain; 71static BLOCKING_NOTIFIER_HEAD(dnaddr_chain);
72 72
73static struct dn_dev *dn_dev_create(struct net_device *dev, int *err); 73static struct dn_dev *dn_dev_create(struct net_device *dev, int *err);
74static void dn_dev_delete(struct net_device *dev); 74static void dn_dev_delete(struct net_device *dev);
@@ -446,7 +446,7 @@ static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int de
446 } 446 }
447 447
448 rtmsg_ifa(RTM_DELADDR, ifa1); 448 rtmsg_ifa(RTM_DELADDR, ifa1);
449 notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1); 449 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1);
450 if (destroy) { 450 if (destroy) {
451 dn_dev_free_ifa(ifa1); 451 dn_dev_free_ifa(ifa1);
452 452
@@ -481,7 +481,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
481 dn_db->ifa_list = ifa; 481 dn_db->ifa_list = ifa;
482 482
483 rtmsg_ifa(RTM_NEWADDR, ifa); 483 rtmsg_ifa(RTM_NEWADDR, ifa);
484 notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); 484 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
485 485
486 return 0; 486 return 0;
487} 487}
@@ -1285,12 +1285,12 @@ void dn_dev_devices_on(void)
1285 1285
1286int register_dnaddr_notifier(struct notifier_block *nb) 1286int register_dnaddr_notifier(struct notifier_block *nb)
1287{ 1287{
1288 return notifier_chain_register(&dnaddr_chain, nb); 1288 return blocking_notifier_chain_register(&dnaddr_chain, nb);
1289} 1289}
1290 1290
1291int unregister_dnaddr_notifier(struct notifier_block *nb) 1291int unregister_dnaddr_notifier(struct notifier_block *nb)
1292{ 1292{
1293 return notifier_chain_unregister(&dnaddr_chain, nb); 1293 return blocking_notifier_chain_unregister(&dnaddr_chain, nb);
1294} 1294}
1295 1295
1296#ifdef CONFIG_PROC_FS 1296#ifdef CONFIG_PROC_FS
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 44fdf1413e2c..81c2f7885292 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -81,7 +81,7 @@ static struct ipv4_devconf ipv4_devconf_dflt = {
81 81
82static void rtmsg_ifa(int event, struct in_ifaddr *); 82static void rtmsg_ifa(int event, struct in_ifaddr *);
83 83
84static struct notifier_block *inetaddr_chain; 84static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
85static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, 85static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
86 int destroy); 86 int destroy);
87#ifdef CONFIG_SYSCTL 87#ifdef CONFIG_SYSCTL
@@ -267,7 +267,8 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
267 *ifap1 = ifa->ifa_next; 267 *ifap1 = ifa->ifa_next;
268 268
269 rtmsg_ifa(RTM_DELADDR, ifa); 269 rtmsg_ifa(RTM_DELADDR, ifa);
270 notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa); 270 blocking_notifier_call_chain(&inetaddr_chain,
271 NETDEV_DOWN, ifa);
271 inet_free_ifa(ifa); 272 inet_free_ifa(ifa);
272 } else { 273 } else {
273 promote = ifa; 274 promote = ifa;
@@ -291,7 +292,7 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
291 So that, this order is correct. 292 So that, this order is correct.
292 */ 293 */
293 rtmsg_ifa(RTM_DELADDR, ifa1); 294 rtmsg_ifa(RTM_DELADDR, ifa1);
294 notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1); 295 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
295 296
296 if (promote) { 297 if (promote) {
297 298
@@ -303,7 +304,8 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
303 304
304 promote->ifa_flags &= ~IFA_F_SECONDARY; 305 promote->ifa_flags &= ~IFA_F_SECONDARY;
305 rtmsg_ifa(RTM_NEWADDR, promote); 306 rtmsg_ifa(RTM_NEWADDR, promote);
306 notifier_call_chain(&inetaddr_chain, NETDEV_UP, promote); 307 blocking_notifier_call_chain(&inetaddr_chain,
308 NETDEV_UP, promote);
307 for (ifa = promote->ifa_next; ifa; ifa = ifa->ifa_next) { 309 for (ifa = promote->ifa_next; ifa; ifa = ifa->ifa_next) {
308 if (ifa1->ifa_mask != ifa->ifa_mask || 310 if (ifa1->ifa_mask != ifa->ifa_mask ||
309 !inet_ifa_match(ifa1->ifa_address, ifa)) 311 !inet_ifa_match(ifa1->ifa_address, ifa))
@@ -366,7 +368,7 @@ static int inet_insert_ifa(struct in_ifaddr *ifa)
366 Notifier will trigger FIB update, so that 368 Notifier will trigger FIB update, so that
367 listeners of netlink will know about new ifaddr */ 369 listeners of netlink will know about new ifaddr */
368 rtmsg_ifa(RTM_NEWADDR, ifa); 370 rtmsg_ifa(RTM_NEWADDR, ifa);
369 notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); 371 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
370 372
371 return 0; 373 return 0;
372} 374}
@@ -938,12 +940,12 @@ u32 inet_confirm_addr(const struct net_device *dev, u32 dst, u32 local, int scop
938 940
939int register_inetaddr_notifier(struct notifier_block *nb) 941int register_inetaddr_notifier(struct notifier_block *nb)
940{ 942{
941 return notifier_chain_register(&inetaddr_chain, nb); 943 return blocking_notifier_chain_register(&inetaddr_chain, nb);
942} 944}
943 945
944int unregister_inetaddr_notifier(struct notifier_block *nb) 946int unregister_inetaddr_notifier(struct notifier_block *nb)
945{ 947{
946 return notifier_chain_unregister(&inetaddr_chain, nb); 948 return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
947} 949}
948 950
949/* Rename ifa_labels for a device name change. Make some effort to preserve existing 951/* Rename ifa_labels for a device name change. Make some effort to preserve existing
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 7f0288b25fa1..f28ec6882162 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -34,6 +34,7 @@
34 34
35#include <linux/netfilter.h> 35#include <linux/netfilter.h>
36#include <linux/netfilter_ipv4.h> 36#include <linux/netfilter_ipv4.h>
37#include <linux/mutex.h>
37 38
38#include <net/ip.h> 39#include <net/ip.h>
39#include <net/route.h> 40#include <net/route.h>
@@ -44,7 +45,7 @@
44#include <net/ip_vs.h> 45#include <net/ip_vs.h>
45 46
46/* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */ 47/* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
47static DECLARE_MUTEX(__ip_vs_mutex); 48static DEFINE_MUTEX(__ip_vs_mutex);
48 49
49/* lock for service table */ 50/* lock for service table */
50static DEFINE_RWLOCK(__ip_vs_svc_lock); 51static DEFINE_RWLOCK(__ip_vs_svc_lock);
@@ -1950,7 +1951,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1950 /* increase the module use count */ 1951 /* increase the module use count */
1951 ip_vs_use_count_inc(); 1952 ip_vs_use_count_inc();
1952 1953
1953 if (down_interruptible(&__ip_vs_mutex)) { 1954 if (mutex_lock_interruptible(&__ip_vs_mutex)) {
1954 ret = -ERESTARTSYS; 1955 ret = -ERESTARTSYS;
1955 goto out_dec; 1956 goto out_dec;
1956 } 1957 }
@@ -2041,7 +2042,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2041 ip_vs_service_put(svc); 2042 ip_vs_service_put(svc);
2042 2043
2043 out_unlock: 2044 out_unlock:
2044 up(&__ip_vs_mutex); 2045 mutex_unlock(&__ip_vs_mutex);
2045 out_dec: 2046 out_dec:
2046 /* decrease the module use count */ 2047 /* decrease the module use count */
2047 ip_vs_use_count_dec(); 2048 ip_vs_use_count_dec();
@@ -2211,7 +2212,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2211 if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0) 2212 if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0)
2212 return -EFAULT; 2213 return -EFAULT;
2213 2214
2214 if (down_interruptible(&__ip_vs_mutex)) 2215 if (mutex_lock_interruptible(&__ip_vs_mutex))
2215 return -ERESTARTSYS; 2216 return -ERESTARTSYS;
2216 2217
2217 switch (cmd) { 2218 switch (cmd) {
@@ -2330,7 +2331,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2330 } 2331 }
2331 2332
2332 out: 2333 out:
2333 up(&__ip_vs_mutex); 2334 mutex_unlock(&__ip_vs_mutex);
2334 return ret; 2335 return ret;
2335} 2336}
2336 2337
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 9e34034729a6..ceaabc18202b 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -80,8 +80,8 @@ static int ip_conntrack_vmalloc;
80static unsigned int ip_conntrack_next_id; 80static unsigned int ip_conntrack_next_id;
81static unsigned int ip_conntrack_expect_next_id; 81static unsigned int ip_conntrack_expect_next_id;
82#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS 82#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
83struct notifier_block *ip_conntrack_chain; 83ATOMIC_NOTIFIER_HEAD(ip_conntrack_chain);
84struct notifier_block *ip_conntrack_expect_chain; 84ATOMIC_NOTIFIER_HEAD(ip_conntrack_expect_chain);
85 85
86DEFINE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache); 86DEFINE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache);
87 87
@@ -92,7 +92,7 @@ __ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache)
92{ 92{
93 DEBUGP("ecache: delivering events for %p\n", ecache->ct); 93 DEBUGP("ecache: delivering events for %p\n", ecache->ct);
94 if (is_confirmed(ecache->ct) && !is_dying(ecache->ct) && ecache->events) 94 if (is_confirmed(ecache->ct) && !is_dying(ecache->ct) && ecache->events)
95 notifier_call_chain(&ip_conntrack_chain, ecache->events, 95 atomic_notifier_call_chain(&ip_conntrack_chain, ecache->events,
96 ecache->ct); 96 ecache->ct);
97 ecache->events = 0; 97 ecache->events = 0;
98 ip_conntrack_put(ecache->ct); 98 ip_conntrack_put(ecache->ct);
diff --git a/net/ipv4/netfilter/ipt_hashlimit.c b/net/ipv4/netfilter/ipt_hashlimit.c
index dc1521c5aa81..ba5e23505e88 100644
--- a/net/ipv4/netfilter/ipt_hashlimit.c
+++ b/net/ipv4/netfilter/ipt_hashlimit.c
@@ -40,6 +40,7 @@
40 40
41/* FIXME: this is just for IP_NF_ASSERRT */ 41/* FIXME: this is just for IP_NF_ASSERRT */
42#include <linux/netfilter_ipv4/ip_conntrack.h> 42#include <linux/netfilter_ipv4/ip_conntrack.h>
43#include <linux/mutex.h>
43 44
44MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
45MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 46MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
@@ -92,7 +93,7 @@ struct ipt_hashlimit_htable {
92}; 93};
93 94
94static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ 95static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */
95static DECLARE_MUTEX(hlimit_mutex); /* additional checkentry protection */ 96static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */
96static HLIST_HEAD(hashlimit_htables); 97static HLIST_HEAD(hashlimit_htables);
97static kmem_cache_t *hashlimit_cachep __read_mostly; 98static kmem_cache_t *hashlimit_cachep __read_mostly;
98 99
@@ -542,13 +543,13 @@ hashlimit_checkentry(const char *tablename,
542 * call vmalloc, and that can sleep. And we cannot just re-search 543 * call vmalloc, and that can sleep. And we cannot just re-search
543 * the list of htable's in htable_create(), since then we would 544 * the list of htable's in htable_create(), since then we would
544 * create duplicate proc files. -HW */ 545 * create duplicate proc files. -HW */
545 down(&hlimit_mutex); 546 mutex_lock(&hlimit_mutex);
546 r->hinfo = htable_find_get(r->name); 547 r->hinfo = htable_find_get(r->name);
547 if (!r->hinfo && (htable_create(r) != 0)) { 548 if (!r->hinfo && (htable_create(r) != 0)) {
548 up(&hlimit_mutex); 549 mutex_unlock(&hlimit_mutex);
549 return 0; 550 return 0;
550 } 551 }
551 up(&hlimit_mutex); 552 mutex_unlock(&hlimit_mutex);
552 553
553 /* Ugly hack: For SMP, we only want to use one set */ 554 /* Ugly hack: For SMP, we only want to use one set */
554 r->u.master = r; 555 r->u.master = r;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 01c62a0d3742..445006ee4522 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -143,7 +143,7 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
143 struct prefix_info *pinfo); 143 struct prefix_info *pinfo);
144static int ipv6_chk_same_addr(const struct in6_addr *addr, struct net_device *dev); 144static int ipv6_chk_same_addr(const struct in6_addr *addr, struct net_device *dev);
145 145
146static struct notifier_block *inet6addr_chain; 146static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
147 147
148struct ipv6_devconf ipv6_devconf = { 148struct ipv6_devconf ipv6_devconf = {
149 .forwarding = 0, 149 .forwarding = 0,
@@ -593,7 +593,7 @@ out2:
593 read_unlock_bh(&addrconf_lock); 593 read_unlock_bh(&addrconf_lock);
594 594
595 if (likely(err == 0)) 595 if (likely(err == 0))
596 notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa); 596 atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa);
597 else { 597 else {
598 kfree(ifa); 598 kfree(ifa);
599 ifa = ERR_PTR(err); 599 ifa = ERR_PTR(err);
@@ -688,7 +688,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
688 688
689 ipv6_ifa_notify(RTM_DELADDR, ifp); 689 ipv6_ifa_notify(RTM_DELADDR, ifp);
690 690
691 notifier_call_chain(&inet6addr_chain,NETDEV_DOWN,ifp); 691 atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp);
692 692
693 addrconf_del_timer(ifp); 693 addrconf_del_timer(ifp);
694 694
@@ -3767,12 +3767,12 @@ static void addrconf_sysctl_unregister(struct ipv6_devconf *p)
3767 3767
3768int register_inet6addr_notifier(struct notifier_block *nb) 3768int register_inet6addr_notifier(struct notifier_block *nb)
3769{ 3769{
3770 return notifier_chain_register(&inet6addr_chain, nb); 3770 return atomic_notifier_chain_register(&inet6addr_chain, nb);
3771} 3771}
3772 3772
3773int unregister_inet6addr_notifier(struct notifier_block *nb) 3773int unregister_inet6addr_notifier(struct notifier_block *nb)
3774{ 3774{
3775 return notifier_chain_unregister(&inet6addr_chain,nb); 3775 return atomic_notifier_chain_unregister(&inet6addr_chain,nb);
3776} 3776}
3777 3777
3778/* 3778/*
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 028b636687ec..d4cfec3f414e 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -228,6 +228,9 @@ static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
228 228
229 t->id.proto = IPPROTO_IPV6; 229 t->id.proto = IPPROTO_IPV6;
230 t->id.spi = xfrm6_tunnel_alloc_spi((xfrm_address_t *)&x->props.saddr); 230 t->id.spi = xfrm6_tunnel_alloc_spi((xfrm_address_t *)&x->props.saddr);
231 if (!t->id.spi)
232 goto error;
233
231 memcpy(t->id.daddr.a6, x->id.daddr.a6, sizeof(struct in6_addr)); 234 memcpy(t->id.daddr.a6, x->id.daddr.a6, sizeof(struct in6_addr));
232 memcpy(&t->sel, &x->sel, sizeof(t->sel)); 235 memcpy(&t->sel, &x->sel, sizeof(t->sel));
233 t->props.family = AF_INET6; 236 t->props.family = AF_INET6;
@@ -243,7 +246,9 @@ out:
243 return t; 246 return t;
244 247
245error: 248error:
249 t->km.state = XFRM_STATE_DEAD;
246 xfrm_state_put(t); 250 xfrm_state_put(t);
251 t = NULL;
247 goto out; 252 goto out;
248} 253}
249 254
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 759445648667..627b11342233 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1302,7 +1302,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
1302 if (sk->sk_state != TCP_ESTABLISHED) 1302 if (sk->sk_state != TCP_ESTABLISHED)
1303 return -ENOTCONN; 1303 return -ENOTCONN;
1304 1304
1305 /* Check that we don't send out to big frames */ 1305 /* Check that we don't send out too big frames */
1306 if (len > self->max_data_size) { 1306 if (len > self->max_data_size) {
1307 IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n", 1307 IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n",
1308 __FUNCTION__, len, self->max_data_size); 1308 __FUNCTION__, len, self->max_data_size);
@@ -1546,7 +1546,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
1546 IRDA_ASSERT(self != NULL, return -1;); 1546 IRDA_ASSERT(self != NULL, return -1;);
1547 1547
1548 /* 1548 /*
1549 * Check that we don't send out to big frames. This is an unreliable 1549 * Check that we don't send out too big frames. This is an unreliable
1550 * service, so we have no fragmentation and no coalescence 1550 * service, so we have no fragmentation and no coalescence
1551 */ 1551 */
1552 if (len > self->max_data_size) { 1552 if (len > self->max_data_size) {
@@ -1642,7 +1642,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
1642 } 1642 }
1643 1643
1644 /* 1644 /*
1645 * Check that we don't send out to big frames. This is an unreliable 1645 * Check that we don't send out too big frames. This is an unreliable
1646 * service, so we have no fragmentation and no coalescence 1646 * service, so we have no fragmentation and no coalescence
1647 */ 1647 */
1648 if (len > self->max_data_size) { 1648 if (len > self->max_data_size) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0ae281d9bfc3..56389c83557c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -90,8 +90,8 @@ static int nf_conntrack_vmalloc;
90static unsigned int nf_conntrack_next_id; 90static unsigned int nf_conntrack_next_id;
91static unsigned int nf_conntrack_expect_next_id; 91static unsigned int nf_conntrack_expect_next_id;
92#ifdef CONFIG_NF_CONNTRACK_EVENTS 92#ifdef CONFIG_NF_CONNTRACK_EVENTS
93struct notifier_block *nf_conntrack_chain; 93ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain);
94struct notifier_block *nf_conntrack_expect_chain; 94ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain);
95 95
96DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); 96DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
97 97
@@ -103,7 +103,7 @@ __nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache)
103 DEBUGP("ecache: delivering events for %p\n", ecache->ct); 103 DEBUGP("ecache: delivering events for %p\n", ecache->ct);
104 if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct) 104 if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct)
105 && ecache->events) 105 && ecache->events)
106 notifier_call_chain(&nf_conntrack_chain, ecache->events, 106 atomic_notifier_call_chain(&nf_conntrack_chain, ecache->events,
107 ecache->ct); 107 ecache->ct);
108 108
109 ecache->events = 0; 109 ecache->events = 0;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index d00a9034cb5f..2a233ffcf618 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -123,7 +123,7 @@ static void netlink_destroy_callback(struct netlink_callback *cb);
123static DEFINE_RWLOCK(nl_table_lock); 123static DEFINE_RWLOCK(nl_table_lock);
124static atomic_t nl_table_users = ATOMIC_INIT(0); 124static atomic_t nl_table_users = ATOMIC_INIT(0);
125 125
126static struct notifier_block *netlink_chain; 126static ATOMIC_NOTIFIER_HEAD(netlink_chain);
127 127
128static u32 netlink_group_mask(u32 group) 128static u32 netlink_group_mask(u32 group)
129{ 129{
@@ -469,7 +469,8 @@ static int netlink_release(struct socket *sock)
469 .protocol = sk->sk_protocol, 469 .protocol = sk->sk_protocol,
470 .pid = nlk->pid, 470 .pid = nlk->pid,
471 }; 471 };
472 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n); 472 atomic_notifier_call_chain(&netlink_chain,
473 NETLINK_URELEASE, &n);
473 } 474 }
474 475
475 if (nlk->module) 476 if (nlk->module)
@@ -1695,12 +1696,12 @@ static struct file_operations netlink_seq_fops = {
1695 1696
1696int netlink_register_notifier(struct notifier_block *nb) 1697int netlink_register_notifier(struct notifier_block *nb)
1697{ 1698{
1698 return notifier_chain_register(&netlink_chain, nb); 1699 return atomic_notifier_chain_register(&netlink_chain, nb);
1699} 1700}
1700 1701
1701int netlink_unregister_notifier(struct notifier_block *nb) 1702int netlink_unregister_notifier(struct notifier_block *nb)
1702{ 1703{
1703 return notifier_chain_unregister(&netlink_chain, nb); 1704 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1704} 1705}
1705 1706
1706static const struct proto_ops netlink_ops = { 1707static const struct proto_ops netlink_ops = {
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 43e72419c868..f329b72578f5 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -13,26 +13,27 @@
13#include <linux/socket.h> 13#include <linux/socket.h>
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/mutex.h>
16#include <net/sock.h> 17#include <net/sock.h>
17#include <net/genetlink.h> 18#include <net/genetlink.h>
18 19
19struct sock *genl_sock = NULL; 20struct sock *genl_sock = NULL;
20 21
21static DECLARE_MUTEX(genl_sem); /* serialization of message processing */ 22static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
22 23
23static void genl_lock(void) 24static void genl_lock(void)
24{ 25{
25 down(&genl_sem); 26 mutex_lock(&genl_mutex);
26} 27}
27 28
28static int genl_trylock(void) 29static int genl_trylock(void)
29{ 30{
30 return down_trylock(&genl_sem); 31 return !mutex_trylock(&genl_mutex);
31} 32}
32 33
33static void genl_unlock(void) 34static void genl_unlock(void)
34{ 35{
35 up(&genl_sem); 36 mutex_unlock(&genl_mutex);
36 37
37 if (genl_sock && genl_sock->sk_receive_queue.qlen) 38 if (genl_sock && genl_sock->sk_receive_queue.qlen)
38 genl_sock->sk_data_ready(genl_sock, 0); 39 genl_sock->sk_data_ready(genl_sock, 0);
diff --git a/net/nonet.c b/net/nonet.c
index 1230f0ae832e..92e76640c7cd 100644
--- a/net/nonet.c
+++ b/net/nonet.c
@@ -19,7 +19,7 @@ static int sock_no_open(struct inode *irrelevant, struct file *dontcare)
19 return -ENXIO; 19 return -ENXIO;
20} 20}
21 21
22struct file_operations bad_sock_fops = { 22const struct file_operations bad_sock_fops = {
23 .owner = THIS_MODULE, 23 .owner = THIS_MODULE,
24 .open = sock_no_open, 24 .open = sock_no_open,
25}; 25};
diff --git a/net/socket.c b/net/socket.c
index 5211ba270375..fcd77eac0ccf 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -539,7 +539,7 @@ static int sock_no_open(struct inode *irrelevant, struct file *dontcare)
539 return -ENXIO; 539 return -ENXIO;
540} 540}
541 541
542struct file_operations bad_sock_fops = { 542const struct file_operations bad_sock_fops = {
543 .owner = THIS_MODULE, 543 .owner = THIS_MODULE,
544 .open = sock_no_open, 544 .open = sock_no_open,
545}; 545};
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 23632d84d8d7..4d7eb9e704da 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -78,7 +78,8 @@ struct rsi {
78 78
79static struct cache_head *rsi_table[RSI_HASHMAX]; 79static struct cache_head *rsi_table[RSI_HASHMAX];
80static struct cache_detail rsi_cache; 80static struct cache_detail rsi_cache;
81static struct rsi *rsi_lookup(struct rsi *item, int set); 81static struct rsi *rsi_update(struct rsi *new, struct rsi *old);
82static struct rsi *rsi_lookup(struct rsi *item);
82 83
83static void rsi_free(struct rsi *rsii) 84static void rsi_free(struct rsi *rsii)
84{ 85{
@@ -88,13 +89,11 @@ static void rsi_free(struct rsi *rsii)
88 kfree(rsii->out_token.data); 89 kfree(rsii->out_token.data);
89} 90}
90 91
91static void rsi_put(struct cache_head *item, struct cache_detail *cd) 92static void rsi_put(struct kref *ref)
92{ 93{
93 struct rsi *rsii = container_of(item, struct rsi, h); 94 struct rsi *rsii = container_of(ref, struct rsi, h.ref);
94 if (cache_put(item, cd)) { 95 rsi_free(rsii);
95 rsi_free(rsii); 96 kfree(rsii);
96 kfree(rsii);
97 }
98} 97}
99 98
100static inline int rsi_hash(struct rsi *item) 99static inline int rsi_hash(struct rsi *item)
@@ -103,8 +102,10 @@ static inline int rsi_hash(struct rsi *item)
103 ^ hash_mem(item->in_token.data, item->in_token.len, RSI_HASHBITS); 102 ^ hash_mem(item->in_token.data, item->in_token.len, RSI_HASHBITS);
104} 103}
105 104
106static inline int rsi_match(struct rsi *item, struct rsi *tmp) 105static int rsi_match(struct cache_head *a, struct cache_head *b)
107{ 106{
107 struct rsi *item = container_of(a, struct rsi, h);
108 struct rsi *tmp = container_of(b, struct rsi, h);
108 return netobj_equal(&item->in_handle, &tmp->in_handle) 109 return netobj_equal(&item->in_handle, &tmp->in_handle)
109 && netobj_equal(&item->in_token, &tmp->in_token); 110 && netobj_equal(&item->in_token, &tmp->in_token);
110} 111}
@@ -125,8 +126,11 @@ static inline int dup_netobj(struct xdr_netobj *dst, struct xdr_netobj *src)
125 return dup_to_netobj(dst, src->data, src->len); 126 return dup_to_netobj(dst, src->data, src->len);
126} 127}
127 128
128static inline void rsi_init(struct rsi *new, struct rsi *item) 129static void rsi_init(struct cache_head *cnew, struct cache_head *citem)
129{ 130{
131 struct rsi *new = container_of(cnew, struct rsi, h);
132 struct rsi *item = container_of(citem, struct rsi, h);
133
130 new->out_handle.data = NULL; 134 new->out_handle.data = NULL;
131 new->out_handle.len = 0; 135 new->out_handle.len = 0;
132 new->out_token.data = NULL; 136 new->out_token.data = NULL;
@@ -141,8 +145,11 @@ static inline void rsi_init(struct rsi *new, struct rsi *item)
141 item->in_token.data = NULL; 145 item->in_token.data = NULL;
142} 146}
143 147
144static inline void rsi_update(struct rsi *new, struct rsi *item) 148static void update_rsi(struct cache_head *cnew, struct cache_head *citem)
145{ 149{
150 struct rsi *new = container_of(cnew, struct rsi, h);
151 struct rsi *item = container_of(citem, struct rsi, h);
152
146 BUG_ON(new->out_handle.data || new->out_token.data); 153 BUG_ON(new->out_handle.data || new->out_token.data);
147 new->out_handle.len = item->out_handle.len; 154 new->out_handle.len = item->out_handle.len;
148 item->out_handle.len = 0; 155 item->out_handle.len = 0;
@@ -157,6 +164,15 @@ static inline void rsi_update(struct rsi *new, struct rsi *item)
157 new->minor_status = item->minor_status; 164 new->minor_status = item->minor_status;
158} 165}
159 166
167static struct cache_head *rsi_alloc(void)
168{
169 struct rsi *rsii = kmalloc(sizeof(*rsii), GFP_KERNEL);
170 if (rsii)
171 return &rsii->h;
172 else
173 return NULL;
174}
175
160static void rsi_request(struct cache_detail *cd, 176static void rsi_request(struct cache_detail *cd,
161 struct cache_head *h, 177 struct cache_head *h,
162 char **bpp, int *blen) 178 char **bpp, int *blen)
@@ -198,6 +214,10 @@ static int rsi_parse(struct cache_detail *cd,
198 if (dup_to_netobj(&rsii.in_token, buf, len)) 214 if (dup_to_netobj(&rsii.in_token, buf, len))
199 goto out; 215 goto out;
200 216
217 rsip = rsi_lookup(&rsii);
218 if (!rsip)
219 goto out;
220
201 rsii.h.flags = 0; 221 rsii.h.flags = 0;
202 /* expiry */ 222 /* expiry */
203 expiry = get_expiry(&mesg); 223 expiry = get_expiry(&mesg);
@@ -240,12 +260,14 @@ static int rsi_parse(struct cache_detail *cd,
240 goto out; 260 goto out;
241 } 261 }
242 rsii.h.expiry_time = expiry; 262 rsii.h.expiry_time = expiry;
243 rsip = rsi_lookup(&rsii, 1); 263 rsip = rsi_update(&rsii, rsip);
244 status = 0; 264 status = 0;
245out: 265out:
246 rsi_free(&rsii); 266 rsi_free(&rsii);
247 if (rsip) 267 if (rsip)
248 rsi_put(&rsip->h, &rsi_cache); 268 cache_put(&rsip->h, &rsi_cache);
269 else
270 status = -ENOMEM;
249 return status; 271 return status;
250} 272}
251 273
@@ -257,9 +279,37 @@ static struct cache_detail rsi_cache = {
257 .cache_put = rsi_put, 279 .cache_put = rsi_put,
258 .cache_request = rsi_request, 280 .cache_request = rsi_request,
259 .cache_parse = rsi_parse, 281 .cache_parse = rsi_parse,
282 .match = rsi_match,
283 .init = rsi_init,
284 .update = update_rsi,
285 .alloc = rsi_alloc,
260}; 286};
261 287
262static DefineSimpleCacheLookup(rsi, 0) 288static struct rsi *rsi_lookup(struct rsi *item)
289{
290 struct cache_head *ch;
291 int hash = rsi_hash(item);
292
293 ch = sunrpc_cache_lookup(&rsi_cache, &item->h, hash);
294 if (ch)
295 return container_of(ch, struct rsi, h);
296 else
297 return NULL;
298}
299
300static struct rsi *rsi_update(struct rsi *new, struct rsi *old)
301{
302 struct cache_head *ch;
303 int hash = rsi_hash(new);
304
305 ch = sunrpc_cache_update(&rsi_cache, &new->h,
306 &old->h, hash);
307 if (ch)
308 return container_of(ch, struct rsi, h);
309 else
310 return NULL;
311}
312
263 313
264/* 314/*
265 * The rpcsec_context cache is used to store a context that is 315 * The rpcsec_context cache is used to store a context that is
@@ -293,7 +343,8 @@ struct rsc {
293 343
294static struct cache_head *rsc_table[RSC_HASHMAX]; 344static struct cache_head *rsc_table[RSC_HASHMAX];
295static struct cache_detail rsc_cache; 345static struct cache_detail rsc_cache;
296static struct rsc *rsc_lookup(struct rsc *item, int set); 346static struct rsc *rsc_update(struct rsc *new, struct rsc *old);
347static struct rsc *rsc_lookup(struct rsc *item);
297 348
298static void rsc_free(struct rsc *rsci) 349static void rsc_free(struct rsc *rsci)
299{ 350{
@@ -304,14 +355,12 @@ static void rsc_free(struct rsc *rsci)
304 put_group_info(rsci->cred.cr_group_info); 355 put_group_info(rsci->cred.cr_group_info);
305} 356}
306 357
307static void rsc_put(struct cache_head *item, struct cache_detail *cd) 358static void rsc_put(struct kref *ref)
308{ 359{
309 struct rsc *rsci = container_of(item, struct rsc, h); 360 struct rsc *rsci = container_of(ref, struct rsc, h.ref);
310 361
311 if (cache_put(item, cd)) { 362 rsc_free(rsci);
312 rsc_free(rsci); 363 kfree(rsci);
313 kfree(rsci);
314 }
315} 364}
316 365
317static inline int 366static inline int
@@ -320,15 +369,21 @@ rsc_hash(struct rsc *rsci)
320 return hash_mem(rsci->handle.data, rsci->handle.len, RSC_HASHBITS); 369 return hash_mem(rsci->handle.data, rsci->handle.len, RSC_HASHBITS);
321} 370}
322 371
323static inline int 372static int
324rsc_match(struct rsc *new, struct rsc *tmp) 373rsc_match(struct cache_head *a, struct cache_head *b)
325{ 374{
375 struct rsc *new = container_of(a, struct rsc, h);
376 struct rsc *tmp = container_of(b, struct rsc, h);
377
326 return netobj_equal(&new->handle, &tmp->handle); 378 return netobj_equal(&new->handle, &tmp->handle);
327} 379}
328 380
329static inline void 381static void
330rsc_init(struct rsc *new, struct rsc *tmp) 382rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
331{ 383{
384 struct rsc *new = container_of(cnew, struct rsc, h);
385 struct rsc *tmp = container_of(ctmp, struct rsc, h);
386
332 new->handle.len = tmp->handle.len; 387 new->handle.len = tmp->handle.len;
333 tmp->handle.len = 0; 388 tmp->handle.len = 0;
334 new->handle.data = tmp->handle.data; 389 new->handle.data = tmp->handle.data;
@@ -337,9 +392,12 @@ rsc_init(struct rsc *new, struct rsc *tmp)
337 new->cred.cr_group_info = NULL; 392 new->cred.cr_group_info = NULL;
338} 393}
339 394
340static inline void 395static void
341rsc_update(struct rsc *new, struct rsc *tmp) 396update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
342{ 397{
398 struct rsc *new = container_of(cnew, struct rsc, h);
399 struct rsc *tmp = container_of(ctmp, struct rsc, h);
400
343 new->mechctx = tmp->mechctx; 401 new->mechctx = tmp->mechctx;
344 tmp->mechctx = NULL; 402 tmp->mechctx = NULL;
345 memset(&new->seqdata, 0, sizeof(new->seqdata)); 403 memset(&new->seqdata, 0, sizeof(new->seqdata));
@@ -348,6 +406,16 @@ rsc_update(struct rsc *new, struct rsc *tmp)
348 tmp->cred.cr_group_info = NULL; 406 tmp->cred.cr_group_info = NULL;
349} 407}
350 408
409static struct cache_head *
410rsc_alloc(void)
411{
412 struct rsc *rsci = kmalloc(sizeof(*rsci), GFP_KERNEL);
413 if (rsci)
414 return &rsci->h;
415 else
416 return NULL;
417}
418
351static int rsc_parse(struct cache_detail *cd, 419static int rsc_parse(struct cache_detail *cd,
352 char *mesg, int mlen) 420 char *mesg, int mlen)
353{ 421{
@@ -373,6 +441,10 @@ static int rsc_parse(struct cache_detail *cd,
373 if (expiry == 0) 441 if (expiry == 0)
374 goto out; 442 goto out;
375 443
444 rscp = rsc_lookup(&rsci);
445 if (!rscp)
446 goto out;
447
376 /* uid, or NEGATIVE */ 448 /* uid, or NEGATIVE */
377 rv = get_int(&mesg, &rsci.cred.cr_uid); 449 rv = get_int(&mesg, &rsci.cred.cr_uid);
378 if (rv == -EINVAL) 450 if (rv == -EINVAL)
@@ -428,12 +500,14 @@ static int rsc_parse(struct cache_detail *cd,
428 gss_mech_put(gm); 500 gss_mech_put(gm);
429 } 501 }
430 rsci.h.expiry_time = expiry; 502 rsci.h.expiry_time = expiry;
431 rscp = rsc_lookup(&rsci, 1); 503 rscp = rsc_update(&rsci, rscp);
432 status = 0; 504 status = 0;
433out: 505out:
434 rsc_free(&rsci); 506 rsc_free(&rsci);
435 if (rscp) 507 if (rscp)
436 rsc_put(&rscp->h, &rsc_cache); 508 cache_put(&rscp->h, &rsc_cache);
509 else
510 status = -ENOMEM;
437 return status; 511 return status;
438} 512}
439 513
@@ -444,9 +518,37 @@ static struct cache_detail rsc_cache = {
444 .name = "auth.rpcsec.context", 518 .name = "auth.rpcsec.context",
445 .cache_put = rsc_put, 519 .cache_put = rsc_put,
446 .cache_parse = rsc_parse, 520 .cache_parse = rsc_parse,
521 .match = rsc_match,
522 .init = rsc_init,
523 .update = update_rsc,
524 .alloc = rsc_alloc,
447}; 525};
448 526
449static DefineSimpleCacheLookup(rsc, 0); 527static struct rsc *rsc_lookup(struct rsc *item)
528{
529 struct cache_head *ch;
530 int hash = rsc_hash(item);
531
532 ch = sunrpc_cache_lookup(&rsc_cache, &item->h, hash);
533 if (ch)
534 return container_of(ch, struct rsc, h);
535 else
536 return NULL;
537}
538
539static struct rsc *rsc_update(struct rsc *new, struct rsc *old)
540{
541 struct cache_head *ch;
542 int hash = rsc_hash(new);
543
544 ch = sunrpc_cache_update(&rsc_cache, &new->h,
545 &old->h, hash);
546 if (ch)
547 return container_of(ch, struct rsc, h);
548 else
549 return NULL;
550}
551
450 552
451static struct rsc * 553static struct rsc *
452gss_svc_searchbyctx(struct xdr_netobj *handle) 554gss_svc_searchbyctx(struct xdr_netobj *handle)
@@ -457,7 +559,7 @@ gss_svc_searchbyctx(struct xdr_netobj *handle)
457 memset(&rsci, 0, sizeof(rsci)); 559 memset(&rsci, 0, sizeof(rsci));
458 if (dup_to_netobj(&rsci.handle, handle->data, handle->len)) 560 if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
459 return NULL; 561 return NULL;
460 found = rsc_lookup(&rsci, 0); 562 found = rsc_lookup(&rsci);
461 rsc_free(&rsci); 563 rsc_free(&rsci);
462 if (!found) 564 if (!found)
463 return NULL; 565 return NULL;
@@ -645,6 +747,8 @@ find_gss_auth_domain(struct gss_ctx *ctx, u32 svc)
645 return auth_domain_find(name); 747 return auth_domain_find(name);
646} 748}
647 749
750static struct auth_ops svcauthops_gss;
751
648int 752int
649svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) 753svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
650{ 754{
@@ -655,20 +759,18 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
655 new = kmalloc(sizeof(*new), GFP_KERNEL); 759 new = kmalloc(sizeof(*new), GFP_KERNEL);
656 if (!new) 760 if (!new)
657 goto out; 761 goto out;
658 cache_init(&new->h.h); 762 kref_init(&new->h.ref);
659 new->h.name = kmalloc(strlen(name) + 1, GFP_KERNEL); 763 new->h.name = kmalloc(strlen(name) + 1, GFP_KERNEL);
660 if (!new->h.name) 764 if (!new->h.name)
661 goto out_free_dom; 765 goto out_free_dom;
662 strcpy(new->h.name, name); 766 strcpy(new->h.name, name);
663 new->h.flavour = RPC_AUTH_GSS; 767 new->h.flavour = &svcauthops_gss;
664 new->pseudoflavor = pseudoflavor; 768 new->pseudoflavor = pseudoflavor;
665 new->h.h.expiry_time = NEVER;
666 769
667 test = auth_domain_lookup(&new->h, 1); 770 test = auth_domain_lookup(name, &new->h);
668 if (test == &new->h) { 771 if (test != &new->h) { /* XXX Duplicate registration? */
669 BUG_ON(atomic_dec_and_test(&new->h.h.refcnt));
670 } else { /* XXX Duplicate registration? */
671 auth_domain_put(&new->h); 772 auth_domain_put(&new->h);
773 /* dangling ref-count... */
672 goto out; 774 goto out;
673 } 775 }
674 return 0; 776 return 0;
@@ -895,7 +997,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
895 goto drop; 997 goto drop;
896 } 998 }
897 999
898 rsip = rsi_lookup(&rsikey, 0); 1000 rsip = rsi_lookup(&rsikey);
899 rsi_free(&rsikey); 1001 rsi_free(&rsikey);
900 if (!rsip) { 1002 if (!rsip) {
901 goto drop; 1003 goto drop;
@@ -970,7 +1072,7 @@ drop:
970 ret = SVC_DROP; 1072 ret = SVC_DROP;
971out: 1073out:
972 if (rsci) 1074 if (rsci)
973 rsc_put(&rsci->h, &rsc_cache); 1075 cache_put(&rsci->h, &rsc_cache);
974 return ret; 1076 return ret;
975} 1077}
976 1078
@@ -1062,7 +1164,7 @@ out_err:
1062 put_group_info(rqstp->rq_cred.cr_group_info); 1164 put_group_info(rqstp->rq_cred.cr_group_info);
1063 rqstp->rq_cred.cr_group_info = NULL; 1165 rqstp->rq_cred.cr_group_info = NULL;
1064 if (gsd->rsci) 1166 if (gsd->rsci)
1065 rsc_put(&gsd->rsci->h, &rsc_cache); 1167 cache_put(&gsd->rsci->h, &rsc_cache);
1066 gsd->rsci = NULL; 1168 gsd->rsci = NULL;
1067 1169
1068 return stat; 1170 return stat;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 0acccfeeb284..3ac4193a78ed 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -37,16 +37,138 @@
37static void cache_defer_req(struct cache_req *req, struct cache_head *item); 37static void cache_defer_req(struct cache_req *req, struct cache_head *item);
38static void cache_revisit_request(struct cache_head *item); 38static void cache_revisit_request(struct cache_head *item);
39 39
40void cache_init(struct cache_head *h) 40static void cache_init(struct cache_head *h)
41{ 41{
42 time_t now = get_seconds(); 42 time_t now = get_seconds();
43 h->next = NULL; 43 h->next = NULL;
44 h->flags = 0; 44 h->flags = 0;
45 atomic_set(&h->refcnt, 1); 45 kref_init(&h->ref);
46 h->expiry_time = now + CACHE_NEW_EXPIRY; 46 h->expiry_time = now + CACHE_NEW_EXPIRY;
47 h->last_refresh = now; 47 h->last_refresh = now;
48} 48}
49 49
50struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
51 struct cache_head *key, int hash)
52{
53 struct cache_head **head, **hp;
54 struct cache_head *new = NULL;
55
56 head = &detail->hash_table[hash];
57
58 read_lock(&detail->hash_lock);
59
60 for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
61 struct cache_head *tmp = *hp;
62 if (detail->match(tmp, key)) {
63 cache_get(tmp);
64 read_unlock(&detail->hash_lock);
65 return tmp;
66 }
67 }
68 read_unlock(&detail->hash_lock);
69 /* Didn't find anything, insert an empty entry */
70
71 new = detail->alloc();
72 if (!new)
73 return NULL;
74 cache_init(new);
75
76 write_lock(&detail->hash_lock);
77
78 /* check if entry appeared while we slept */
79 for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
80 struct cache_head *tmp = *hp;
81 if (detail->match(tmp, key)) {
82 cache_get(tmp);
83 write_unlock(&detail->hash_lock);
84 cache_put(new, detail);
85 return tmp;
86 }
87 }
88 detail->init(new, key);
89 new->next = *head;
90 *head = new;
91 detail->entries++;
92 cache_get(new);
93 write_unlock(&detail->hash_lock);
94
95 return new;
96}
97EXPORT_SYMBOL(sunrpc_cache_lookup);
98
99
100static void queue_loose(struct cache_detail *detail, struct cache_head *ch);
101
102static int cache_fresh_locked(struct cache_head *head, time_t expiry)
103{
104 head->expiry_time = expiry;
105 head->last_refresh = get_seconds();
106 return !test_and_set_bit(CACHE_VALID, &head->flags);
107}
108
109static void cache_fresh_unlocked(struct cache_head *head,
110 struct cache_detail *detail, int new)
111{
112 if (new)
113 cache_revisit_request(head);
114 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
115 cache_revisit_request(head);
116 queue_loose(detail, head);
117 }
118}
119
120struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
121 struct cache_head *new, struct cache_head *old, int hash)
122{
123 /* The 'old' entry is to be replaced by 'new'.
124 * If 'old' is not VALID, we update it directly,
125 * otherwise we need to replace it
126 */
127 struct cache_head **head;
128 struct cache_head *tmp;
129 int is_new;
130
131 if (!test_bit(CACHE_VALID, &old->flags)) {
132 write_lock(&detail->hash_lock);
133 if (!test_bit(CACHE_VALID, &old->flags)) {
134 if (test_bit(CACHE_NEGATIVE, &new->flags))
135 set_bit(CACHE_NEGATIVE, &old->flags);
136 else
137 detail->update(old, new);
138 is_new = cache_fresh_locked(old, new->expiry_time);
139 write_unlock(&detail->hash_lock);
140 cache_fresh_unlocked(old, detail, is_new);
141 return old;
142 }
143 write_unlock(&detail->hash_lock);
144 }
145 /* We need to insert a new entry */
146 tmp = detail->alloc();
147 if (!tmp) {
148 cache_put(old, detail);
149 return NULL;
150 }
151 cache_init(tmp);
152 detail->init(tmp, old);
153 head = &detail->hash_table[hash];
154
155 write_lock(&detail->hash_lock);
156 if (test_bit(CACHE_NEGATIVE, &new->flags))
157 set_bit(CACHE_NEGATIVE, &tmp->flags);
158 else
159 detail->update(tmp, new);
160 tmp->next = *head;
161 *head = tmp;
162 cache_get(tmp);
163 is_new = cache_fresh_locked(tmp, new->expiry_time);
164 cache_fresh_locked(old, 0);
165 write_unlock(&detail->hash_lock);
166 cache_fresh_unlocked(tmp, detail, is_new);
167 cache_fresh_unlocked(old, detail, 0);
168 cache_put(old, detail);
169 return tmp;
170}
171EXPORT_SYMBOL(sunrpc_cache_update);
50 172
51static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h); 173static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h);
52/* 174/*
@@ -94,7 +216,8 @@ int cache_check(struct cache_detail *detail,
94 clear_bit(CACHE_PENDING, &h->flags); 216 clear_bit(CACHE_PENDING, &h->flags);
95 if (rv == -EAGAIN) { 217 if (rv == -EAGAIN) {
96 set_bit(CACHE_NEGATIVE, &h->flags); 218 set_bit(CACHE_NEGATIVE, &h->flags);
97 cache_fresh(detail, h, get_seconds()+CACHE_NEW_EXPIRY); 219 cache_fresh_unlocked(h, detail,
220 cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY));
98 rv = -ENOENT; 221 rv = -ENOENT;
99 } 222 }
100 break; 223 break;
@@ -110,25 +233,11 @@ int cache_check(struct cache_detail *detail,
110 if (rv == -EAGAIN) 233 if (rv == -EAGAIN)
111 cache_defer_req(rqstp, h); 234 cache_defer_req(rqstp, h);
112 235
113 if (rv && h) 236 if (rv)
114 detail->cache_put(h, detail); 237 cache_put(h, detail);
115 return rv; 238 return rv;
116} 239}
117 240
118static void queue_loose(struct cache_detail *detail, struct cache_head *ch);
119
120void cache_fresh(struct cache_detail *detail,
121 struct cache_head *head, time_t expiry)
122{
123
124 head->expiry_time = expiry;
125 head->last_refresh = get_seconds();
126 if (!test_and_set_bit(CACHE_VALID, &head->flags))
127 cache_revisit_request(head);
128 if (test_and_clear_bit(CACHE_PENDING, &head->flags))
129 queue_loose(detail, head);
130}
131
132/* 241/*
133 * caches need to be periodically cleaned. 242 * caches need to be periodically cleaned.
134 * For this we maintain a list of cache_detail and 243 * For this we maintain a list of cache_detail and
@@ -322,7 +431,7 @@ static int cache_clean(void)
322 if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) 431 if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
323 queue_loose(current_detail, ch); 432 queue_loose(current_detail, ch);
324 433
325 if (atomic_read(&ch->refcnt) == 1) 434 if (atomic_read(&ch->ref.refcount) == 1)
326 break; 435 break;
327 } 436 }
328 if (ch) { 437 if (ch) {
@@ -337,7 +446,7 @@ static int cache_clean(void)
337 current_index ++; 446 current_index ++;
338 spin_unlock(&cache_list_lock); 447 spin_unlock(&cache_list_lock);
339 if (ch) 448 if (ch)
340 d->cache_put(ch, d); 449 cache_put(ch, d);
341 } else 450 } else
342 spin_unlock(&cache_list_lock); 451 spin_unlock(&cache_list_lock);
343 452
@@ -453,7 +562,7 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item)
453 /* there was one too many */ 562 /* there was one too many */
454 dreq->revisit(dreq, 1); 563 dreq->revisit(dreq, 1);
455 } 564 }
456 if (test_bit(CACHE_VALID, &item->flags)) { 565 if (!test_bit(CACHE_PENDING, &item->flags)) {
457 /* must have just been validated... */ 566 /* must have just been validated... */
458 cache_revisit_request(item); 567 cache_revisit_request(item);
459 } 568 }
@@ -614,7 +723,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
614 !test_bit(CACHE_PENDING, &rq->item->flags)) { 723 !test_bit(CACHE_PENDING, &rq->item->flags)) {
615 list_del(&rq->q.list); 724 list_del(&rq->q.list);
616 spin_unlock(&queue_lock); 725 spin_unlock(&queue_lock);
617 cd->cache_put(rq->item, cd); 726 cache_put(rq->item, cd);
618 kfree(rq->buf); 727 kfree(rq->buf);
619 kfree(rq); 728 kfree(rq);
620 } else 729 } else
@@ -794,10 +903,10 @@ static void queue_loose(struct cache_detail *detail, struct cache_head *ch)
794 if (cr->item != ch) 903 if (cr->item != ch)
795 continue; 904 continue;
796 if (cr->readers != 0) 905 if (cr->readers != 0)
797 break; 906 continue;
798 list_del(&cr->q.list); 907 list_del(&cr->q.list);
799 spin_unlock(&queue_lock); 908 spin_unlock(&queue_lock);
800 detail->cache_put(cr->item, detail); 909 cache_put(cr->item, detail);
801 kfree(cr->buf); 910 kfree(cr->buf);
802 kfree(cr); 911 kfree(cr);
803 return; 912 return;
@@ -1082,8 +1191,8 @@ static int c_show(struct seq_file *m, void *p)
1082 return cd->cache_show(m, cd, NULL); 1191 return cd->cache_show(m, cd, NULL);
1083 1192
1084 ifdebug(CACHE) 1193 ifdebug(CACHE)
1085 seq_printf(m, "# expiry=%ld refcnt=%d\n", 1194 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1086 cp->expiry_time, atomic_read(&cp->refcnt)); 1195 cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags);
1087 cache_get(cp); 1196 cache_get(cp);
1088 if (cache_check(cd, cp, NULL)) 1197 if (cache_check(cd, cp, NULL))
1089 /* cache_check does a cache_put on failure */ 1198 /* cache_check does a cache_put on failure */
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index aa4158be9900..cc673dd8433f 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -395,7 +395,7 @@ enum {
395 */ 395 */
396struct rpc_filelist { 396struct rpc_filelist {
397 char *name; 397 char *name;
398 struct file_operations *i_fop; 398 const struct file_operations *i_fop;
399 int mode; 399 int mode;
400}; 400};
401 401
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index b9969b91a9f7..5c3eee768504 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -1167,16 +1167,12 @@ rpc_init_mempool(void)
1167 NULL, NULL); 1167 NULL, NULL);
1168 if (!rpc_buffer_slabp) 1168 if (!rpc_buffer_slabp)
1169 goto err_nomem; 1169 goto err_nomem;
1170 rpc_task_mempool = mempool_create(RPC_TASK_POOLSIZE, 1170 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1171 mempool_alloc_slab, 1171 rpc_task_slabp);
1172 mempool_free_slab,
1173 rpc_task_slabp);
1174 if (!rpc_task_mempool) 1172 if (!rpc_task_mempool)
1175 goto err_nomem; 1173 goto err_nomem;
1176 rpc_buffer_mempool = mempool_create(RPC_BUFFER_POOLSIZE, 1174 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1177 mempool_alloc_slab, 1175 rpc_buffer_slabp);
1178 mempool_free_slab,
1179 rpc_buffer_slabp);
1180 if (!rpc_buffer_mempool) 1176 if (!rpc_buffer_mempool)
1181 goto err_nomem; 1177 goto err_nomem;
1182 return 0; 1178 return 0;
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 790941e8af4d..dea529666d69 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -225,7 +225,7 @@ EXPORT_SYMBOL(rpc_print_iostats);
225 * Register/unregister RPC proc files 225 * Register/unregister RPC proc files
226 */ 226 */
227static inline struct proc_dir_entry * 227static inline struct proc_dir_entry *
228do_register(const char *name, void *data, struct file_operations *fops) 228do_register(const char *name, void *data, const struct file_operations *fops)
229{ 229{
230 struct proc_dir_entry *ent; 230 struct proc_dir_entry *ent;
231 231
@@ -253,7 +253,7 @@ rpc_proc_unregister(const char *name)
253} 253}
254 254
255struct proc_dir_entry * 255struct proc_dir_entry *
256svc_proc_register(struct svc_stat *statp, struct file_operations *fops) 256svc_proc_register(struct svc_stat *statp, const struct file_operations *fops)
257{ 257{
258 return do_register(statp->program->pg_name, statp, fops); 258 return do_register(statp->program->pg_name, statp, fops);
259} 259}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 9f7373203592..769114f0f886 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -105,8 +105,6 @@ EXPORT_SYMBOL(auth_unix_lookup);
105EXPORT_SYMBOL(cache_check); 105EXPORT_SYMBOL(cache_check);
106EXPORT_SYMBOL(cache_flush); 106EXPORT_SYMBOL(cache_flush);
107EXPORT_SYMBOL(cache_purge); 107EXPORT_SYMBOL(cache_purge);
108EXPORT_SYMBOL(cache_fresh);
109EXPORT_SYMBOL(cache_init);
110EXPORT_SYMBOL(cache_register); 108EXPORT_SYMBOL(cache_register);
111EXPORT_SYMBOL(cache_unregister); 109EXPORT_SYMBOL(cache_unregister);
112EXPORT_SYMBOL(qword_add); 110EXPORT_SYMBOL(qword_add);
@@ -142,6 +140,7 @@ EXPORT_SYMBOL(nlm_debug);
142 140
143extern int register_rpc_pipefs(void); 141extern int register_rpc_pipefs(void);
144extern void unregister_rpc_pipefs(void); 142extern void unregister_rpc_pipefs(void);
143extern struct cache_detail ip_map_cache;
145 144
146static int __init 145static int __init
147init_sunrpc(void) 146init_sunrpc(void)
@@ -158,7 +157,6 @@ init_sunrpc(void)
158#ifdef CONFIG_PROC_FS 157#ifdef CONFIG_PROC_FS
159 rpc_proc_init(); 158 rpc_proc_init();
160#endif 159#endif
161 cache_register(&auth_domain_cache);
162 cache_register(&ip_map_cache); 160 cache_register(&ip_map_cache);
163out: 161out:
164 return err; 162 return err;
@@ -169,8 +167,6 @@ cleanup_sunrpc(void)
169{ 167{
170 unregister_rpc_pipefs(); 168 unregister_rpc_pipefs();
171 rpc_destroy_mempool(); 169 rpc_destroy_mempool();
172 if (cache_unregister(&auth_domain_cache))
173 printk(KERN_ERR "sunrpc: failed to unregister auth_domain cache\n");
174 if (cache_unregister(&ip_map_cache)) 170 if (cache_unregister(&ip_map_cache))
175 printk(KERN_ERR "sunrpc: failed to unregister ip_map cache\n"); 171 printk(KERN_ERR "sunrpc: failed to unregister ip_map cache\n");
176#ifdef RPC_DEBUG 172#ifdef RPC_DEBUG
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index dda4f0c63511..5b28c6176806 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -106,112 +106,56 @@ svc_auth_unregister(rpc_authflavor_t flavor)
106EXPORT_SYMBOL(svc_auth_unregister); 106EXPORT_SYMBOL(svc_auth_unregister);
107 107
108/************************************************** 108/**************************************************
109 * cache for domain name to auth_domain 109 * 'auth_domains' are stored in a hash table indexed by name.
110 * Entries are only added by flavours which will normally 110 * When the last reference to an 'auth_domain' is dropped,
111 * have a structure that 'inherits' from auth_domain. 111 * the object is unhashed and freed.
112 * e.g. when an IP -> domainname is given to auth_unix, 112 * If auth_domain_lookup fails to find an entry, it will return
113 * and the domain name doesn't exist, it will create a 113 * it's second argument 'new'. If this is non-null, it will
114 * auth_unix_domain and add it to this hash table. 114 * have been atomically linked into the table.
115 * If it finds the name does exist, but isn't AUTH_UNIX,
116 * it will complain.
117 */ 115 */
118 116
119/*
120 * Auth auth_domain cache is somewhat different to other caches,
121 * largely because the entries are possibly of different types:
122 * each auth flavour has it's own type.
123 * One consequence of this that DefineCacheLookup cannot
124 * allocate a new structure as it cannot know the size.
125 * Notice that the "INIT" code fragment is quite different
126 * from other caches. When auth_domain_lookup might be
127 * creating a new domain, the new domain is passed in
128 * complete and it is used as-is rather than being copied into
129 * another structure.
130 */
131#define DN_HASHBITS 6 117#define DN_HASHBITS 6
132#define DN_HASHMAX (1<<DN_HASHBITS) 118#define DN_HASHMAX (1<<DN_HASHBITS)
133#define DN_HASHMASK (DN_HASHMAX-1) 119#define DN_HASHMASK (DN_HASHMAX-1)
134 120
135static struct cache_head *auth_domain_table[DN_HASHMAX]; 121static struct hlist_head auth_domain_table[DN_HASHMAX];
136 122static spinlock_t auth_domain_lock = SPIN_LOCK_UNLOCKED;
137static void auth_domain_drop(struct cache_head *item, struct cache_detail *cd)
138{
139 struct auth_domain *dom = container_of(item, struct auth_domain, h);
140 if (cache_put(item,cd))
141 authtab[dom->flavour]->domain_release(dom);
142}
143
144
145struct cache_detail auth_domain_cache = {
146 .owner = THIS_MODULE,
147 .hash_size = DN_HASHMAX,
148 .hash_table = auth_domain_table,
149 .name = "auth.domain",
150 .cache_put = auth_domain_drop,
151};
152 123
153void auth_domain_put(struct auth_domain *dom) 124void auth_domain_put(struct auth_domain *dom)
154{ 125{
155 auth_domain_drop(&dom->h, &auth_domain_cache); 126 if (atomic_dec_and_lock(&dom->ref.refcount, &auth_domain_lock)) {
156} 127 hlist_del(&dom->hash);
157 128 dom->flavour->domain_release(dom);
158static inline int auth_domain_hash(struct auth_domain *item) 129 }
159{
160 return hash_str(item->name, DN_HASHBITS);
161}
162static inline int auth_domain_match(struct auth_domain *tmp, struct auth_domain *item)
163{
164 return strcmp(tmp->name, item->name) == 0;
165} 130}
166 131
167struct auth_domain * 132struct auth_domain *
168auth_domain_lookup(struct auth_domain *item, int set) 133auth_domain_lookup(char *name, struct auth_domain *new)
169{ 134{
170 struct auth_domain *tmp = NULL; 135 struct auth_domain *hp;
171 struct cache_head **hp, **head; 136 struct hlist_head *head;
172 head = &auth_domain_cache.hash_table[auth_domain_hash(item)]; 137 struct hlist_node *np;
173 138
174 if (set) 139 head = &auth_domain_table[hash_str(name, DN_HASHBITS)];
175 write_lock(&auth_domain_cache.hash_lock); 140
176 else 141 spin_lock(&auth_domain_lock);
177 read_lock(&auth_domain_cache.hash_lock); 142
178 for (hp=head; *hp != NULL; hp = &tmp->h.next) { 143 hlist_for_each_entry(hp, np, head, hash) {
179 tmp = container_of(*hp, struct auth_domain, h); 144 if (strcmp(hp->name, name)==0) {
180 if (!auth_domain_match(tmp, item)) 145 kref_get(&hp->ref);
181 continue; 146 spin_unlock(&auth_domain_lock);
182 if (!set) { 147 return hp;
183 cache_get(&tmp->h);
184 goto out_noset;
185 } 148 }
186 *hp = tmp->h.next;
187 tmp->h.next = NULL;
188 auth_domain_drop(&tmp->h, &auth_domain_cache);
189 goto out_set;
190 } 149 }
191 /* Didn't find anything */ 150 if (new) {
192 if (!set) 151 hlist_add_head(&new->hash, head);
193 goto out_nada; 152 kref_get(&new->ref);
194 auth_domain_cache.entries++; 153 }
195out_set: 154 spin_unlock(&auth_domain_lock);
196 item->h.next = *head; 155 return new;
197 *head = &item->h;
198 cache_get(&item->h);
199 write_unlock(&auth_domain_cache.hash_lock);
200 cache_fresh(&auth_domain_cache, &item->h, item->h.expiry_time);
201 cache_get(&item->h);
202 return item;
203out_nada:
204 tmp = NULL;
205out_noset:
206 read_unlock(&auth_domain_cache.hash_lock);
207 return tmp;
208} 156}
209 157
210struct auth_domain *auth_domain_find(char *name) 158struct auth_domain *auth_domain_find(char *name)
211{ 159{
212 struct auth_domain *rv, ad; 160 return auth_domain_lookup(name, NULL);
213
214 ad.name = name;
215 rv = auth_domain_lookup(&ad, 0);
216 return rv;
217} 161}
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 3e6c694bbad1..7e5707e2d6b6 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -27,41 +27,35 @@ struct unix_domain {
27 /* other stuff later */ 27 /* other stuff later */
28}; 28};
29 29
30extern struct auth_ops svcauth_unix;
31
30struct auth_domain *unix_domain_find(char *name) 32struct auth_domain *unix_domain_find(char *name)
31{ 33{
32 struct auth_domain *rv, ud; 34 struct auth_domain *rv;
33 struct unix_domain *new; 35 struct unix_domain *new = NULL;
34 36
35 ud.name = name; 37 rv = auth_domain_lookup(name, NULL);
36 38 while(1) {
37 rv = auth_domain_lookup(&ud, 0); 39 if (rv) {
38 40 if (new && rv != &new->h)
39 foundit: 41 auth_domain_put(&new->h);
40 if (rv && rv->flavour != RPC_AUTH_UNIX) { 42
41 auth_domain_put(rv); 43 if (rv->flavour != &svcauth_unix) {
42 return NULL; 44 auth_domain_put(rv);
43 } 45 return NULL;
44 if (rv) 46 }
45 return rv; 47 return rv;
46 48 }
47 new = kmalloc(sizeof(*new), GFP_KERNEL); 49
48 if (new == NULL) 50 new = kmalloc(sizeof(*new), GFP_KERNEL);
49 return NULL; 51 if (new == NULL)
50 cache_init(&new->h.h); 52 return NULL;
51 new->h.name = kstrdup(name, GFP_KERNEL); 53 kref_init(&new->h.ref);
52 new->h.flavour = RPC_AUTH_UNIX; 54 new->h.name = kstrdup(name, GFP_KERNEL);
53 new->addr_changes = 0; 55 new->h.flavour = &svcauth_unix;
54 new->h.h.expiry_time = NEVER; 56 new->addr_changes = 0;
55 57 rv = auth_domain_lookup(name, &new->h);
56 rv = auth_domain_lookup(&new->h, 2);
57 if (rv == &new->h) {
58 if (atomic_dec_and_test(&new->h.h.refcnt)) BUG();
59 } else {
60 auth_domain_put(&new->h);
61 goto foundit;
62 } 58 }
63
64 return rv;
65} 59}
66 60
67static void svcauth_unix_domain_release(struct auth_domain *dom) 61static void svcauth_unix_domain_release(struct auth_domain *dom)
@@ -90,15 +84,15 @@ struct ip_map {
90}; 84};
91static struct cache_head *ip_table[IP_HASHMAX]; 85static struct cache_head *ip_table[IP_HASHMAX];
92 86
93static void ip_map_put(struct cache_head *item, struct cache_detail *cd) 87static void ip_map_put(struct kref *kref)
94{ 88{
89 struct cache_head *item = container_of(kref, struct cache_head, ref);
95 struct ip_map *im = container_of(item, struct ip_map,h); 90 struct ip_map *im = container_of(item, struct ip_map,h);
96 if (cache_put(item, cd)) { 91
97 if (test_bit(CACHE_VALID, &item->flags) && 92 if (test_bit(CACHE_VALID, &item->flags) &&
98 !test_bit(CACHE_NEGATIVE, &item->flags)) 93 !test_bit(CACHE_NEGATIVE, &item->flags))
99 auth_domain_put(&im->m_client->h); 94 auth_domain_put(&im->m_client->h);
100 kfree(im); 95 kfree(im);
101 }
102} 96}
103 97
104#if IP_HASHBITS == 8 98#if IP_HASHBITS == 8
@@ -112,28 +106,38 @@ static inline int hash_ip(unsigned long ip)
112 return (hash ^ (hash>>8)) & 0xff; 106 return (hash ^ (hash>>8)) & 0xff;
113} 107}
114#endif 108#endif
115 109static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
116static inline int ip_map_hash(struct ip_map *item)
117{
118 return hash_str(item->m_class, IP_HASHBITS) ^
119 hash_ip((unsigned long)item->m_addr.s_addr);
120}
121static inline int ip_map_match(struct ip_map *item, struct ip_map *tmp)
122{ 110{
123 return strcmp(tmp->m_class, item->m_class) == 0 111 struct ip_map *orig = container_of(corig, struct ip_map, h);
124 && tmp->m_addr.s_addr == item->m_addr.s_addr; 112 struct ip_map *new = container_of(cnew, struct ip_map, h);
113 return strcmp(orig->m_class, new->m_class) == 0
114 && orig->m_addr.s_addr == new->m_addr.s_addr;
125} 115}
126static inline void ip_map_init(struct ip_map *new, struct ip_map *item) 116static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
127{ 117{
118 struct ip_map *new = container_of(cnew, struct ip_map, h);
119 struct ip_map *item = container_of(citem, struct ip_map, h);
120
128 strcpy(new->m_class, item->m_class); 121 strcpy(new->m_class, item->m_class);
129 new->m_addr.s_addr = item->m_addr.s_addr; 122 new->m_addr.s_addr = item->m_addr.s_addr;
130} 123}
131static inline void ip_map_update(struct ip_map *new, struct ip_map *item) 124static void update(struct cache_head *cnew, struct cache_head *citem)
132{ 125{
133 cache_get(&item->m_client->h.h); 126 struct ip_map *new = container_of(cnew, struct ip_map, h);
127 struct ip_map *item = container_of(citem, struct ip_map, h);
128
129 kref_get(&item->m_client->h.ref);
134 new->m_client = item->m_client; 130 new->m_client = item->m_client;
135 new->m_add_change = item->m_add_change; 131 new->m_add_change = item->m_add_change;
136} 132}
133static struct cache_head *ip_map_alloc(void)
134{
135 struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL);
136 if (i)
137 return &i->h;
138 else
139 return NULL;
140}
137 141
138static void ip_map_request(struct cache_detail *cd, 142static void ip_map_request(struct cache_detail *cd,
139 struct cache_head *h, 143 struct cache_head *h,
@@ -154,7 +158,8 @@ static void ip_map_request(struct cache_detail *cd,
154 (*bpp)[-1] = '\n'; 158 (*bpp)[-1] = '\n';
155} 159}
156 160
157static struct ip_map *ip_map_lookup(struct ip_map *, int); 161static struct ip_map *ip_map_lookup(char *class, struct in_addr addr);
162static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
158 163
159static int ip_map_parse(struct cache_detail *cd, 164static int ip_map_parse(struct cache_detail *cd,
160 char *mesg, int mlen) 165 char *mesg, int mlen)
@@ -166,7 +171,11 @@ static int ip_map_parse(struct cache_detail *cd,
166 int len; 171 int len;
167 int b1,b2,b3,b4; 172 int b1,b2,b3,b4;
168 char c; 173 char c;
169 struct ip_map ipm, *ipmp; 174 char class[8];
175 struct in_addr addr;
176 int err;
177
178 struct ip_map *ipmp;
170 struct auth_domain *dom; 179 struct auth_domain *dom;
171 time_t expiry; 180 time_t expiry;
172 181
@@ -175,7 +184,7 @@ static int ip_map_parse(struct cache_detail *cd,
175 mesg[mlen-1] = 0; 184 mesg[mlen-1] = 0;
176 185
177 /* class */ 186 /* class */
178 len = qword_get(&mesg, ipm.m_class, sizeof(ipm.m_class)); 187 len = qword_get(&mesg, class, sizeof(class));
179 if (len <= 0) return -EINVAL; 188 if (len <= 0) return -EINVAL;
180 189
181 /* ip address */ 190 /* ip address */
@@ -200,25 +209,22 @@ static int ip_map_parse(struct cache_detail *cd,
200 } else 209 } else
201 dom = NULL; 210 dom = NULL;
202 211
203 ipm.m_addr.s_addr = 212 addr.s_addr =
204 htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4); 213 htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4);
205 ipm.h.flags = 0; 214
206 if (dom) { 215 ipmp = ip_map_lookup(class,addr);
207 ipm.m_client = container_of(dom, struct unix_domain, h); 216 if (ipmp) {
208 ipm.m_add_change = ipm.m_client->addr_changes; 217 err = ip_map_update(ipmp,
218 container_of(dom, struct unix_domain, h),
219 expiry);
209 } else 220 } else
210 set_bit(CACHE_NEGATIVE, &ipm.h.flags); 221 err = -ENOMEM;
211 ipm.h.expiry_time = expiry;
212 222
213 ipmp = ip_map_lookup(&ipm, 1);
214 if (ipmp)
215 ip_map_put(&ipmp->h, &ip_map_cache);
216 if (dom) 223 if (dom)
217 auth_domain_put(dom); 224 auth_domain_put(dom);
218 if (!ipmp) 225
219 return -ENOMEM;
220 cache_flush(); 226 cache_flush();
221 return 0; 227 return err;
222} 228}
223 229
224static int ip_map_show(struct seq_file *m, 230static int ip_map_show(struct seq_file *m,
@@ -262,32 +268,70 @@ struct cache_detail ip_map_cache = {
262 .cache_request = ip_map_request, 268 .cache_request = ip_map_request,
263 .cache_parse = ip_map_parse, 269 .cache_parse = ip_map_parse,
264 .cache_show = ip_map_show, 270 .cache_show = ip_map_show,
271 .match = ip_map_match,
272 .init = ip_map_init,
273 .update = update,
274 .alloc = ip_map_alloc,
265}; 275};
266 276
267static DefineSimpleCacheLookup(ip_map, 0) 277static struct ip_map *ip_map_lookup(char *class, struct in_addr addr)
278{
279 struct ip_map ip;
280 struct cache_head *ch;
281
282 strcpy(ip.m_class, class);
283 ip.m_addr = addr;
284 ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h,
285 hash_str(class, IP_HASHBITS) ^
286 hash_ip((unsigned long)addr.s_addr));
287
288 if (ch)
289 return container_of(ch, struct ip_map, h);
290 else
291 return NULL;
292}
268 293
294static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry)
295{
296 struct ip_map ip;
297 struct cache_head *ch;
298
299 ip.m_client = udom;
300 ip.h.flags = 0;
301 if (!udom)
302 set_bit(CACHE_NEGATIVE, &ip.h.flags);
303 else {
304 ip.m_add_change = udom->addr_changes;
305 /* if this is from the legacy set_client system call,
306 * we need m_add_change to be one higher
307 */
308 if (expiry == NEVER)
309 ip.m_add_change++;
310 }
311 ip.h.expiry_time = expiry;
312 ch = sunrpc_cache_update(&ip_map_cache,
313 &ip.h, &ipm->h,
314 hash_str(ipm->m_class, IP_HASHBITS) ^
315 hash_ip((unsigned long)ipm->m_addr.s_addr));
316 if (!ch)
317 return -ENOMEM;
318 cache_put(ch, &ip_map_cache);
319 return 0;
320}
269 321
270int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom) 322int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom)
271{ 323{
272 struct unix_domain *udom; 324 struct unix_domain *udom;
273 struct ip_map ip, *ipmp; 325 struct ip_map *ipmp;
274 326
275 if (dom->flavour != RPC_AUTH_UNIX) 327 if (dom->flavour != &svcauth_unix)
276 return -EINVAL; 328 return -EINVAL;
277 udom = container_of(dom, struct unix_domain, h); 329 udom = container_of(dom, struct unix_domain, h);
278 strcpy(ip.m_class, "nfsd"); 330 ipmp = ip_map_lookup("nfsd", addr);
279 ip.m_addr = addr;
280 ip.m_client = udom;
281 ip.m_add_change = udom->addr_changes+1;
282 ip.h.flags = 0;
283 ip.h.expiry_time = NEVER;
284
285 ipmp = ip_map_lookup(&ip, 1);
286 331
287 if (ipmp) { 332 if (ipmp)
288 ip_map_put(&ipmp->h, &ip_map_cache); 333 return ip_map_update(ipmp, udom, NEVER);
289 return 0; 334 else
290 } else
291 return -ENOMEM; 335 return -ENOMEM;
292} 336}
293 337
@@ -295,7 +339,7 @@ int auth_unix_forget_old(struct auth_domain *dom)
295{ 339{
296 struct unix_domain *udom; 340 struct unix_domain *udom;
297 341
298 if (dom->flavour != RPC_AUTH_UNIX) 342 if (dom->flavour != &svcauth_unix)
299 return -EINVAL; 343 return -EINVAL;
300 udom = container_of(dom, struct unix_domain, h); 344 udom = container_of(dom, struct unix_domain, h);
301 udom->addr_changes++; 345 udom->addr_changes++;
@@ -310,7 +354,7 @@ struct auth_domain *auth_unix_lookup(struct in_addr addr)
310 strcpy(key.m_class, "nfsd"); 354 strcpy(key.m_class, "nfsd");
311 key.m_addr = addr; 355 key.m_addr = addr;
312 356
313 ipm = ip_map_lookup(&key, 0); 357 ipm = ip_map_lookup("nfsd", addr);
314 358
315 if (!ipm) 359 if (!ipm)
316 return NULL; 360 return NULL;
@@ -323,31 +367,28 @@ struct auth_domain *auth_unix_lookup(struct in_addr addr)
323 rv = NULL; 367 rv = NULL;
324 } else { 368 } else {
325 rv = &ipm->m_client->h; 369 rv = &ipm->m_client->h;
326 cache_get(&rv->h); 370 kref_get(&rv->ref);
327 } 371 }
328 ip_map_put(&ipm->h, &ip_map_cache); 372 cache_put(&ipm->h, &ip_map_cache);
329 return rv; 373 return rv;
330} 374}
331 375
332void svcauth_unix_purge(void) 376void svcauth_unix_purge(void)
333{ 377{
334 cache_purge(&ip_map_cache); 378 cache_purge(&ip_map_cache);
335 cache_purge(&auth_domain_cache);
336} 379}
337 380
338static int 381static int
339svcauth_unix_set_client(struct svc_rqst *rqstp) 382svcauth_unix_set_client(struct svc_rqst *rqstp)
340{ 383{
341 struct ip_map key, *ipm; 384 struct ip_map *ipm;
342 385
343 rqstp->rq_client = NULL; 386 rqstp->rq_client = NULL;
344 if (rqstp->rq_proc == 0) 387 if (rqstp->rq_proc == 0)
345 return SVC_OK; 388 return SVC_OK;
346 389
347 strcpy(key.m_class, rqstp->rq_server->sv_program->pg_class); 390 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class,
348 key.m_addr = rqstp->rq_addr.sin_addr; 391 rqstp->rq_addr.sin_addr);
349
350 ipm = ip_map_lookup(&key, 0);
351 392
352 if (ipm == NULL) 393 if (ipm == NULL)
353 return SVC_DENIED; 394 return SVC_DENIED;
@@ -361,8 +402,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
361 return SVC_DENIED; 402 return SVC_DENIED;
362 case 0: 403 case 0:
363 rqstp->rq_client = &ipm->m_client->h; 404 rqstp->rq_client = &ipm->m_client->h;
364 cache_get(&rqstp->rq_client->h); 405 kref_get(&rqstp->rq_client->ref);
365 ip_map_put(&ipm->h, &ip_map_cache); 406 cache_put(&ipm->h, &ip_map_cache);
366 break; 407 break;
367 } 408 }
368 return SVC_OK; 409 return SVC_OK;
diff --git a/sound/core/init.c b/sound/core/init.c
index ad68761abba1..5bb8a8b23d51 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -223,7 +223,8 @@ int snd_card_disconnect(struct snd_card *card)
223 struct snd_monitor_file *mfile; 223 struct snd_monitor_file *mfile;
224 struct file *file; 224 struct file *file;
225 struct snd_shutdown_f_ops *s_f_ops; 225 struct snd_shutdown_f_ops *s_f_ops;
226 struct file_operations *f_ops, *old_f_ops; 226 struct file_operations *f_ops;
227 const struct file_operations *old_f_ops;
227 int err; 228 int err;
228 229
229 spin_lock(&card->files_lock); 230 spin_lock(&card->files_lock);
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 6b7a36774298..87b47c9564f7 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -631,7 +631,8 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
631 return -EINVAL; 631 return -EINVAL;
632 } 632 }
633 if (params->buffer_size != runtime->buffer_size) { 633 if (params->buffer_size != runtime->buffer_size) {
634 if ((newbuf = (char *) kmalloc(params->buffer_size, GFP_KERNEL)) == NULL) 634 newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
635 if (!newbuf)
635 return -ENOMEM; 636 return -ENOMEM;
636 kfree(runtime->buffer); 637 kfree(runtime->buffer);
637 runtime->buffer = newbuf; 638 runtime->buffer = newbuf;
@@ -657,7 +658,8 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
657 return -EINVAL; 658 return -EINVAL;
658 } 659 }
659 if (params->buffer_size != runtime->buffer_size) { 660 if (params->buffer_size != runtime->buffer_size) {
660 if ((newbuf = (char *) kmalloc(params->buffer_size, GFP_KERNEL)) == NULL) 661 newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
662 if (!newbuf)
661 return -ENOMEM; 663 return -ENOMEM;
662 kfree(runtime->buffer); 664 kfree(runtime->buffer);
663 runtime->buffer = newbuf; 665 runtime->buffer = newbuf;
diff --git a/sound/core/sound.c b/sound/core/sound.c
index 4d28e5212611..108e430b5036 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -137,7 +137,7 @@ static int snd_open(struct inode *inode, struct file *file)
137{ 137{
138 unsigned int minor = iminor(inode); 138 unsigned int minor = iminor(inode);
139 struct snd_minor *mptr = NULL; 139 struct snd_minor *mptr = NULL;
140 struct file_operations *old_fops; 140 const struct file_operations *old_fops;
141 int err = 0; 141 int err = 0;
142 142
143 if (minor >= ARRAY_SIZE(snd_minors)) 143 if (minor >= ARRAY_SIZE(snd_minors))
@@ -240,7 +240,7 @@ static int snd_kernel_minor(int type, struct snd_card *card, int dev)
240 * Retrurns zero if successful, or a negative error code on failure. 240 * Retrurns zero if successful, or a negative error code on failure.
241 */ 241 */
242int snd_register_device(int type, struct snd_card *card, int dev, 242int snd_register_device(int type, struct snd_card *card, int dev,
243 struct file_operations *f_ops, void *private_data, 243 const struct file_operations *f_ops, void *private_data,
244 const char *name) 244 const char *name)
245{ 245{
246 int minor; 246 int minor;
diff --git a/sound/core/sound_oss.c b/sound/core/sound_oss.c
index 4023d3b406de..9055c6de9587 100644
--- a/sound/core/sound_oss.c
+++ b/sound/core/sound_oss.c
@@ -95,7 +95,7 @@ static int snd_oss_kernel_minor(int type, struct snd_card *card, int dev)
95} 95}
96 96
97int snd_register_oss_device(int type, struct snd_card *card, int dev, 97int snd_register_oss_device(int type, struct snd_card *card, int dev,
98 struct file_operations *f_ops, void *private_data, 98 const struct file_operations *f_ops, void *private_data,
99 const char *name) 99 const char *name)
100{ 100{
101 int minor = snd_oss_kernel_minor(type, card, dev); 101 int minor = snd_oss_kernel_minor(type, card, dev);
diff --git a/sound/drivers/mpu401/mpu401.c b/sound/drivers/mpu401/mpu401.c
index 9d10d79e27af..9ea3059a7064 100644
--- a/sound/drivers/mpu401/mpu401.c
+++ b/sound/drivers/mpu401/mpu401.c
@@ -59,7 +59,8 @@ module_param_array(irq, int, NULL, 0444);
59MODULE_PARM_DESC(irq, "IRQ # for MPU-401 device."); 59MODULE_PARM_DESC(irq, "IRQ # for MPU-401 device.");
60 60
61static struct platform_device *platform_devices[SNDRV_CARDS]; 61static struct platform_device *platform_devices[SNDRV_CARDS];
62static int pnp_registered = 0; 62static int pnp_registered;
63static unsigned int snd_mpu401_devices;
63 64
64static int snd_mpu401_create(int dev, struct snd_card **rcard) 65static int snd_mpu401_create(int dev, struct snd_card **rcard)
65{ 66{
@@ -197,6 +198,7 @@ static int __devinit snd_mpu401_pnp_probe(struct pnp_dev *pnp_dev,
197 } 198 }
198 snd_card_set_dev(card, &pnp_dev->dev); 199 snd_card_set_dev(card, &pnp_dev->dev);
199 pnp_set_drvdata(pnp_dev, card); 200 pnp_set_drvdata(pnp_dev, card);
201 snd_mpu401_devices++;
200 ++dev; 202 ++dev;
201 return 0; 203 return 0;
202 } 204 }
@@ -234,12 +236,11 @@ static void __init_or_module snd_mpu401_unregister_all(void)
234 236
235static int __init alsa_card_mpu401_init(void) 237static int __init alsa_card_mpu401_init(void)
236{ 238{
237 int i, err, devices; 239 int i, err;
238 240
239 if ((err = platform_driver_register(&snd_mpu401_driver)) < 0) 241 if ((err = platform_driver_register(&snd_mpu401_driver)) < 0)
240 return err; 242 return err;
241 243
242 devices = 0;
243 for (i = 0; i < SNDRV_CARDS; i++) { 244 for (i = 0; i < SNDRV_CARDS; i++) {
244 struct platform_device *device; 245 struct platform_device *device;
245 if (! enable[i]) 246 if (! enable[i])
@@ -255,14 +256,13 @@ static int __init alsa_card_mpu401_init(void)
255 goto errout; 256 goto errout;
256 } 257 }
257 platform_devices[i] = device; 258 platform_devices[i] = device;
258 devices++; 259 snd_mpu401_devices++;
259 } 260 }
260 if ((err = pnp_register_driver(&snd_mpu401_pnp_driver)) >= 0) { 261 err = pnp_register_driver(&snd_mpu401_pnp_driver);
262 if (!err)
261 pnp_registered = 1; 263 pnp_registered = 1;
262 devices += err;
263 }
264 264
265 if (!devices) { 265 if (!snd_mpu401_devices) {
266#ifdef MODULE 266#ifdef MODULE
267 printk(KERN_ERR "MPU-401 device not found or device busy\n"); 267 printk(KERN_ERR "MPU-401 device not found or device busy\n");
268#endif 268#endif
diff --git a/sound/isa/ad1816a/ad1816a.c b/sound/isa/ad1816a/ad1816a.c
index 7051f7798ed7..31f299aed281 100644
--- a/sound/isa/ad1816a/ad1816a.c
+++ b/sound/isa/ad1816a/ad1816a.c
@@ -262,6 +262,8 @@ static int __devinit snd_card_ad1816a_probe(int dev, struct pnp_card_link *pcard
262 return 0; 262 return 0;
263} 263}
264 264
265static unsigned int __devinitdata ad1816a_devices;
266
265static int __devinit snd_ad1816a_pnp_detect(struct pnp_card_link *card, 267static int __devinit snd_ad1816a_pnp_detect(struct pnp_card_link *card,
266 const struct pnp_card_device_id *id) 268 const struct pnp_card_device_id *id)
267{ 269{
@@ -275,6 +277,7 @@ static int __devinit snd_ad1816a_pnp_detect(struct pnp_card_link *card,
275 if (res < 0) 277 if (res < 0)
276 return res; 278 return res;
277 dev++; 279 dev++;
280 ad1816a_devices++;
278 return 0; 281 return 0;
279 } 282 }
280 return -ENODEV; 283 return -ENODEV;
@@ -297,10 +300,13 @@ static struct pnp_card_driver ad1816a_pnpc_driver = {
297 300
298static int __init alsa_card_ad1816a_init(void) 301static int __init alsa_card_ad1816a_init(void)
299{ 302{
300 int cards; 303 int err;
304
305 err = pnp_register_card_driver(&ad1816a_pnpc_driver);
306 if (err)
307 return err;
301 308
302 cards = pnp_register_card_driver(&ad1816a_pnpc_driver); 309 if (!ad1816a_devices) {
303 if (cards <= 0) {
304 pnp_unregister_card_driver(&ad1816a_pnpc_driver); 310 pnp_unregister_card_driver(&ad1816a_pnpc_driver);
305#ifdef MODULE 311#ifdef MODULE
306 printk(KERN_ERR "no AD1816A based soundcards found.\n"); 312 printk(KERN_ERR "no AD1816A based soundcards found.\n");
diff --git a/sound/isa/als100.c b/sound/isa/als100.c
index 9b77c17b3f66..a52bd8a14c9b 100644
--- a/sound/isa/als100.c
+++ b/sound/isa/als100.c
@@ -199,7 +199,7 @@ static int __devinit snd_card_als100_pnp(int dev, struct snd_card_als100 *acard,
199 return 0; 199 return 0;
200} 200}
201 201
202static int __init snd_card_als100_probe(int dev, 202static int __devinit snd_card_als100_probe(int dev,
203 struct pnp_card_link *pcard, 203 struct pnp_card_link *pcard,
204 const struct pnp_card_device_id *pid) 204 const struct pnp_card_device_id *pid)
205{ 205{
@@ -281,6 +281,8 @@ static int __init snd_card_als100_probe(int dev,
281 return 0; 281 return 0;
282} 282}
283 283
284static unsigned int __devinitdata als100_devices;
285
284static int __devinit snd_als100_pnp_detect(struct pnp_card_link *card, 286static int __devinit snd_als100_pnp_detect(struct pnp_card_link *card,
285 const struct pnp_card_device_id *id) 287 const struct pnp_card_device_id *id)
286{ 288{
@@ -294,6 +296,7 @@ static int __devinit snd_als100_pnp_detect(struct pnp_card_link *card,
294 if (res < 0) 296 if (res < 0)
295 return res; 297 return res;
296 dev++; 298 dev++;
299 als100_devices++;
297 return 0; 300 return 0;
298 } 301 }
299 return -ENODEV; 302 return -ENODEV;
@@ -345,10 +348,13 @@ static struct pnp_card_driver als100_pnpc_driver = {
345 348
346static int __init alsa_card_als100_init(void) 349static int __init alsa_card_als100_init(void)
347{ 350{
348 int cards; 351 int err;
352
353 err = pnp_register_card_driver(&als100_pnpc_driver);
354 if (err)
355 return err;
349 356
350 cards = pnp_register_card_driver(&als100_pnpc_driver); 357 if (!als100_devices) {
351 if (cards <= 0) {
352 pnp_unregister_card_driver(&als100_pnpc_driver); 358 pnp_unregister_card_driver(&als100_pnpc_driver);
353#ifdef MODULE 359#ifdef MODULE
354 snd_printk(KERN_ERR "no ALS100 based soundcards found\n"); 360 snd_printk(KERN_ERR "no ALS100 based soundcards found\n");
diff --git a/sound/isa/azt2320.c b/sound/isa/azt2320.c
index a530691bf4f7..15e59283aac6 100644
--- a/sound/isa/azt2320.c
+++ b/sound/isa/azt2320.c
@@ -310,6 +310,8 @@ static int __devinit snd_card_azt2320_probe(int dev,
310 return 0; 310 return 0;
311} 311}
312 312
313static unsigned int __devinitdata azt2320_devices;
314
313static int __devinit snd_azt2320_pnp_detect(struct pnp_card_link *card, 315static int __devinit snd_azt2320_pnp_detect(struct pnp_card_link *card,
314 const struct pnp_card_device_id *id) 316 const struct pnp_card_device_id *id)
315{ 317{
@@ -323,6 +325,7 @@ static int __devinit snd_azt2320_pnp_detect(struct pnp_card_link *card,
323 if (res < 0) 325 if (res < 0)
324 return res; 326 return res;
325 dev++; 327 dev++;
328 azt2320_devices++;
326 return 0; 329 return 0;
327 } 330 }
328 return -ENODEV; 331 return -ENODEV;
@@ -372,10 +375,13 @@ static struct pnp_card_driver azt2320_pnpc_driver = {
372 375
373static int __init alsa_card_azt2320_init(void) 376static int __init alsa_card_azt2320_init(void)
374{ 377{
375 int cards; 378 int err;
379
380 err = pnp_register_card_driver(&azt2320_pnpc_driver);
381 if (err)
382 return err;
376 383
377 cards = pnp_register_card_driver(&azt2320_pnpc_driver); 384 if (!azt2320_devices) {
378 if (cards <= 0) {
379 pnp_unregister_card_driver(&azt2320_pnpc_driver); 385 pnp_unregister_card_driver(&azt2320_pnpc_driver);
380#ifdef MODULE 386#ifdef MODULE
381 snd_printk(KERN_ERR "no AZT2320 based soundcards found\n"); 387 snd_printk(KERN_ERR "no AZT2320 based soundcards found\n");
diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
index fd9bb2575de8..fa63048a8b9d 100644
--- a/sound/isa/cmi8330.c
+++ b/sound/isa/cmi8330.c
@@ -175,7 +175,7 @@ MODULE_DEVICE_TABLE(pnp_card, snd_cmi8330_pnpids);
175#endif 175#endif
176 176
177 177
178static struct ad1848_mix_elem snd_cmi8330_controls[] __initdata = { 178static struct ad1848_mix_elem snd_cmi8330_controls[] __devinitdata = {
179AD1848_DOUBLE("Master Playback Volume", 0, CMI8330_MASTVOL, CMI8330_MASTVOL, 4, 0, 15, 0), 179AD1848_DOUBLE("Master Playback Volume", 0, CMI8330_MASTVOL, CMI8330_MASTVOL, 4, 0, 15, 0),
180AD1848_SINGLE("Loud Playback Switch", 0, CMI8330_MUTEMUX, 6, 1, 1), 180AD1848_SINGLE("Loud Playback Switch", 0, CMI8330_MUTEMUX, 6, 1, 1),
181AD1848_DOUBLE("PCM Playback Switch", 0, AD1848_LEFT_OUTPUT, AD1848_RIGHT_OUTPUT, 7, 7, 1, 1), 181AD1848_DOUBLE("PCM Playback Switch", 0, AD1848_LEFT_OUTPUT, AD1848_RIGHT_OUTPUT, 7, 7, 1, 1),
@@ -204,7 +204,7 @@ AD1848_SINGLE(SNDRV_CTL_NAME_IEC958("Input ",PLAYBACK,SWITCH), 0, CMI8330_MUTEMU
204}; 204};
205 205
206#ifdef ENABLE_SB_MIXER 206#ifdef ENABLE_SB_MIXER
207static struct sbmix_elem cmi8330_sb_mixers[] __initdata = { 207static struct sbmix_elem cmi8330_sb_mixers[] __devinitdata = {
208SB_DOUBLE("SB Master Playback Volume", SB_DSP4_MASTER_DEV, (SB_DSP4_MASTER_DEV + 1), 3, 3, 31), 208SB_DOUBLE("SB Master Playback Volume", SB_DSP4_MASTER_DEV, (SB_DSP4_MASTER_DEV + 1), 3, 3, 31),
209SB_DOUBLE("Tone Control - Bass", SB_DSP4_BASS_DEV, (SB_DSP4_BASS_DEV + 1), 4, 4, 15), 209SB_DOUBLE("Tone Control - Bass", SB_DSP4_BASS_DEV, (SB_DSP4_BASS_DEV + 1), 4, 4, 15),
210SB_DOUBLE("Tone Control - Treble", SB_DSP4_TREBLE_DEV, (SB_DSP4_TREBLE_DEV + 1), 4, 4, 15), 210SB_DOUBLE("Tone Control - Treble", SB_DSP4_TREBLE_DEV, (SB_DSP4_TREBLE_DEV + 1), 4, 4, 15),
@@ -222,7 +222,7 @@ SB_DOUBLE("SB Playback Volume", SB_DSP4_OGAIN_DEV, (SB_DSP4_OGAIN_DEV + 1), 6, 6
222SB_SINGLE("SB Mic Auto Gain", SB_DSP4_MIC_AGC, 0, 1), 222SB_SINGLE("SB Mic Auto Gain", SB_DSP4_MIC_AGC, 0, 1),
223}; 223};
224 224
225static unsigned char cmi8330_sb_init_values[][2] __initdata = { 225static unsigned char cmi8330_sb_init_values[][2] __devinitdata = {
226 { SB_DSP4_MASTER_DEV + 0, 0 }, 226 { SB_DSP4_MASTER_DEV + 0, 0 },
227 { SB_DSP4_MASTER_DEV + 1, 0 }, 227 { SB_DSP4_MASTER_DEV + 1, 0 },
228 { SB_DSP4_PCM_DEV + 0, 0 }, 228 { SB_DSP4_PCM_DEV + 0, 0 },
@@ -545,7 +545,7 @@ static int __devinit snd_cmi8330_probe(struct snd_card *card, int dev)
545 return snd_card_register(card); 545 return snd_card_register(card);
546} 546}
547 547
548static int __init snd_cmi8330_nonpnp_probe(struct platform_device *pdev) 548static int __devinit snd_cmi8330_nonpnp_probe(struct platform_device *pdev)
549{ 549{
550 struct snd_card *card; 550 struct snd_card *card;
551 int err; 551 int err;
@@ -607,6 +607,8 @@ static struct platform_driver snd_cmi8330_driver = {
607 607
608 608
609#ifdef CONFIG_PNP 609#ifdef CONFIG_PNP
610static unsigned int __devinitdata cmi8330_pnp_devices;
611
610static int __devinit snd_cmi8330_pnp_detect(struct pnp_card_link *pcard, 612static int __devinit snd_cmi8330_pnp_detect(struct pnp_card_link *pcard,
611 const struct pnp_card_device_id *pid) 613 const struct pnp_card_device_id *pid)
612{ 614{
@@ -636,6 +638,7 @@ static int __devinit snd_cmi8330_pnp_detect(struct pnp_card_link *pcard,
636 } 638 }
637 pnp_set_card_drvdata(pcard, card); 639 pnp_set_card_drvdata(pcard, card);
638 dev++; 640 dev++;
641 cmi8330_pnp_devices++;
639 return 0; 642 return 0;
640} 643}
641 644
@@ -706,9 +709,9 @@ static int __init alsa_card_cmi8330_init(void)
706 709
707#ifdef CONFIG_PNP 710#ifdef CONFIG_PNP
708 err = pnp_register_card_driver(&cmi8330_pnpc_driver); 711 err = pnp_register_card_driver(&cmi8330_pnpc_driver);
709 if (err >= 0) { 712 if (!err) {
710 pnp_registered = 1; 713 pnp_registered = 1;
711 cards += err; 714 cards += cmi8330_pnp_devices;
712 } 715 }
713#endif 716#endif
714 717
diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c
index 4060918e0327..382bb17ef49f 100644
--- a/sound/isa/cs423x/cs4236.c
+++ b/sound/isa/cs423x/cs4236.c
@@ -133,6 +133,7 @@ static int pnpc_registered;
133static int pnp_registered; 133static int pnp_registered;
134#endif 134#endif
135#endif /* CONFIG_PNP */ 135#endif /* CONFIG_PNP */
136static unsigned int snd_cs423x_devices;
136 137
137struct snd_card_cs4236 { 138struct snd_card_cs4236 {
138 struct snd_cs4231 *chip; 139 struct snd_cs4231 *chip;
@@ -564,7 +565,7 @@ static int __init snd_cs423x_nonpnp_probe(struct platform_device *pdev)
564 snd_card_free(card); 565 snd_card_free(card);
565 return err; 566 return err;
566 } 567 }
567 568
568 platform_set_drvdata(pdev, card); 569 platform_set_drvdata(pdev, card);
569 return 0; 570 return 0;
570} 571}
@@ -650,6 +651,7 @@ static int __devinit snd_cs4232_pnpbios_detect(struct pnp_dev *pdev,
650 } 651 }
651 pnp_set_drvdata(pdev, card); 652 pnp_set_drvdata(pdev, card);
652 dev++; 653 dev++;
654 snd_cs423x_devices++;
653 return 0; 655 return 0;
654} 656}
655 657
@@ -713,6 +715,7 @@ static int __devinit snd_cs423x_pnpc_detect(struct pnp_card_link *pcard,
713 } 715 }
714 pnp_set_card_drvdata(pcard, card); 716 pnp_set_card_drvdata(pcard, card);
715 dev++; 717 dev++;
718 snd_cs423x_devices++;
716 return 0; 719 return 0;
717} 720}
718 721
@@ -721,7 +724,7 @@ static void __devexit snd_cs423x_pnpc_remove(struct pnp_card_link * pcard)
721 snd_card_free(pnp_get_card_drvdata(pcard)); 724 snd_card_free(pnp_get_card_drvdata(pcard));
722 pnp_set_card_drvdata(pcard, NULL); 725 pnp_set_card_drvdata(pcard, NULL);
723} 726}
724 727
725#ifdef CONFIG_PM 728#ifdef CONFIG_PM
726static int snd_cs423x_pnpc_suspend(struct pnp_card_link *pcard, pm_message_t state) 729static int snd_cs423x_pnpc_suspend(struct pnp_card_link *pcard, pm_message_t state)
727{ 730{
@@ -766,7 +769,7 @@ static void __init_or_module snd_cs423x_unregister_all(void)
766 769
767static int __init alsa_card_cs423x_init(void) 770static int __init alsa_card_cs423x_init(void)
768{ 771{
769 int i, err, cards = 0; 772 int i, err;
770 773
771 if ((err = platform_driver_register(&cs423x_nonpnp_driver)) < 0) 774 if ((err = platform_driver_register(&cs423x_nonpnp_driver)) < 0)
772 return err; 775 return err;
@@ -782,24 +785,20 @@ static int __init alsa_card_cs423x_init(void)
782 goto errout; 785 goto errout;
783 } 786 }
784 platform_devices[i] = device; 787 platform_devices[i] = device;
785 cards++; 788 snd_cs423x_devices++;
786 } 789 }
787#ifdef CONFIG_PNP 790#ifdef CONFIG_PNP
788#ifdef CS4232 791#ifdef CS4232
789 i = pnp_register_driver(&cs4232_pnp_driver); 792 err = pnp_register_driver(&cs4232_pnp_driver);
790 if (i >= 0) { 793 if (!err)
791 pnp_registered = 1; 794 pnp_registered = 1;
792 cards += i;
793 }
794#endif 795#endif
795 i = pnp_register_card_driver(&cs423x_pnpc_driver); 796 err = pnp_register_card_driver(&cs423x_pnpc_driver);
796 if (i >= 0) { 797 if (!err)
797 pnpc_registered = 1; 798 pnpc_registered = 1;
798 cards += i;
799 }
800#endif /* CONFIG_PNP */ 799#endif /* CONFIG_PNP */
801 800
802 if (!cards) { 801 if (!snd_cs423x_devices) {
803#ifdef MODULE 802#ifdef MODULE
804 printk(KERN_ERR IDENT " soundcard not found or device busy\n"); 803 printk(KERN_ERR IDENT " soundcard not found or device busy\n");
805#endif 804#endif
diff --git a/sound/isa/dt019x.c b/sound/isa/dt019x.c
index 50e7bc5ef561..0acb4e5da47f 100644
--- a/sound/isa/dt019x.c
+++ b/sound/isa/dt019x.c
@@ -272,6 +272,8 @@ static int __devinit snd_card_dt019x_probe(int dev, struct pnp_card_link *pcard,
272 return 0; 272 return 0;
273} 273}
274 274
275static unsigned int __devinitdata dt019x_devices;
276
275static int __devinit snd_dt019x_pnp_probe(struct pnp_card_link *card, 277static int __devinit snd_dt019x_pnp_probe(struct pnp_card_link *card,
276 const struct pnp_card_device_id *pid) 278 const struct pnp_card_device_id *pid)
277{ 279{
@@ -285,6 +287,7 @@ static int __devinit snd_dt019x_pnp_probe(struct pnp_card_link *card,
285 if (res < 0) 287 if (res < 0)
286 return res; 288 return res;
287 dev++; 289 dev++;
290 dt019x_devices++;
288 return 0; 291 return 0;
289 } 292 }
290 return -ENODEV; 293 return -ENODEV;
@@ -336,10 +339,13 @@ static struct pnp_card_driver dt019x_pnpc_driver = {
336 339
337static int __init alsa_card_dt019x_init(void) 340static int __init alsa_card_dt019x_init(void)
338{ 341{
339 int cards = 0; 342 int err;
343
344 err = pnp_register_card_driver(&dt019x_pnpc_driver);
345 if (err)
346 return err;
340 347
341 cards = pnp_register_card_driver(&dt019x_pnpc_driver); 348 if (!dt019x_devices) {
342 if (cards <= 0) {
343 pnp_unregister_card_driver(&dt019x_pnpc_driver); 349 pnp_unregister_card_driver(&dt019x_pnpc_driver);
344#ifdef MODULE 350#ifdef MODULE
345 snd_printk(KERN_ERR "no DT-019X / ALS-007 based soundcards found\n"); 351 snd_printk(KERN_ERR "no DT-019X / ALS-007 based soundcards found\n");
diff --git a/sound/isa/es18xx.c b/sound/isa/es18xx.c
index 721955d26194..9fbc185b4cc2 100644
--- a/sound/isa/es18xx.c
+++ b/sound/isa/es18xx.c
@@ -2204,7 +2204,7 @@ static int __devinit snd_audiodrive_probe(struct snd_card *card, int dev)
2204 return snd_card_register(card); 2204 return snd_card_register(card);
2205} 2205}
2206 2206
2207static int __init snd_es18xx_nonpnp_probe1(int dev, struct platform_device *devptr) 2207static int __devinit snd_es18xx_nonpnp_probe1(int dev, struct platform_device *devptr)
2208{ 2208{
2209 struct snd_card *card; 2209 struct snd_card *card;
2210 int err; 2210 int err;
@@ -2221,7 +2221,7 @@ static int __init snd_es18xx_nonpnp_probe1(int dev, struct platform_device *devp
2221 return 0; 2221 return 0;
2222} 2222}
2223 2223
2224static int __init snd_es18xx_nonpnp_probe(struct platform_device *pdev) 2224static int __devinit snd_es18xx_nonpnp_probe(struct platform_device *pdev)
2225{ 2225{
2226 int dev = pdev->id; 2226 int dev = pdev->id;
2227 int err; 2227 int err;
@@ -2297,6 +2297,8 @@ static struct platform_driver snd_es18xx_nonpnp_driver = {
2297 2297
2298 2298
2299#ifdef CONFIG_PNP 2299#ifdef CONFIG_PNP
2300static unsigned int __devinitdata es18xx_pnp_devices;
2301
2300static int __devinit snd_audiodrive_pnp_detect(struct pnp_card_link *pcard, 2302static int __devinit snd_audiodrive_pnp_detect(struct pnp_card_link *pcard,
2301 const struct pnp_card_device_id *pid) 2303 const struct pnp_card_device_id *pid)
2302{ 2304{
@@ -2327,6 +2329,7 @@ static int __devinit snd_audiodrive_pnp_detect(struct pnp_card_link *pcard,
2327 2329
2328 pnp_set_card_drvdata(pcard, card); 2330 pnp_set_card_drvdata(pcard, card);
2329 dev++; 2331 dev++;
2332 es18xx_pnp_devices++;
2330 return 0; 2333 return 0;
2331} 2334}
2332 2335
@@ -2397,10 +2400,10 @@ static int __init alsa_card_es18xx_init(void)
2397 } 2400 }
2398 2401
2399#ifdef CONFIG_PNP 2402#ifdef CONFIG_PNP
2400 i = pnp_register_card_driver(&es18xx_pnpc_driver); 2403 err = pnp_register_card_driver(&es18xx_pnpc_driver);
2401 if (i >= 0) { 2404 if (!err) {
2402 pnp_registered = 1; 2405 pnp_registered = 1;
2403 cards += i; 2406 cards += es18xx_pnp_devices;
2404 } 2407 }
2405#endif 2408#endif
2406 2409
diff --git a/sound/isa/gus/interwave.c b/sound/isa/gus/interwave.c
index 2cacd0fa6871..de71b7a99c83 100644
--- a/sound/isa/gus/interwave.c
+++ b/sound/isa/gus/interwave.c
@@ -791,7 +791,7 @@ static int __devinit snd_interwave_probe(struct snd_card *card, int dev)
791 return 0; 791 return 0;
792} 792}
793 793
794static int __init snd_interwave_nonpnp_probe1(int dev, struct platform_device *devptr) 794static int __devinit snd_interwave_nonpnp_probe1(int dev, struct platform_device *devptr)
795{ 795{
796 struct snd_card *card; 796 struct snd_card *card;
797 int err; 797 int err;
@@ -809,7 +809,7 @@ static int __init snd_interwave_nonpnp_probe1(int dev, struct platform_device *d
809 return 0; 809 return 0;
810} 810}
811 811
812static int __init snd_interwave_nonpnp_probe(struct platform_device *pdev) 812static int __devinit snd_interwave_nonpnp_probe(struct platform_device *pdev)
813{ 813{
814 int dev = pdev->id; 814 int dev = pdev->id;
815 int err; 815 int err;
@@ -867,6 +867,7 @@ static struct platform_driver snd_interwave_driver = {
867}; 867};
868 868
869#ifdef CONFIG_PNP 869#ifdef CONFIG_PNP
870static unsigned int __devinitdata interwave_pnp_devices;
870 871
871static int __devinit snd_interwave_pnp_detect(struct pnp_card_link *pcard, 872static int __devinit snd_interwave_pnp_detect(struct pnp_card_link *pcard,
872 const struct pnp_card_device_id *pid) 873 const struct pnp_card_device_id *pid)
@@ -897,6 +898,7 @@ static int __devinit snd_interwave_pnp_detect(struct pnp_card_link *pcard,
897 } 898 }
898 pnp_set_card_drvdata(pcard, card); 899 pnp_set_card_drvdata(pcard, card);
899 dev++; 900 dev++;
901 interwave_pnp_devices++;
900 return 0; 902 return 0;
901} 903}
902 904
@@ -954,10 +956,10 @@ static int __init alsa_card_interwave_init(void)
954 } 956 }
955 957
956 /* ISA PnP cards */ 958 /* ISA PnP cards */
957 i = pnp_register_card_driver(&interwave_pnpc_driver); 959 err = pnp_register_card_driver(&interwave_pnpc_driver);
958 if (i >= 0) { 960 if (!err) {
959 pnp_registered = 1; 961 pnp_registered = 1;
960 cards += i; 962 cards += interwave_pnp_devices;;
961 } 963 }
962 964
963 if (!cards) { 965 if (!cards) {
diff --git a/sound/isa/opl3sa2.c b/sound/isa/opl3sa2.c
index 56fcd8a946a4..c906e205d7d5 100644
--- a/sound/isa/opl3sa2.c
+++ b/sound/isa/opl3sa2.c
@@ -95,6 +95,7 @@ static struct platform_device *platform_devices[SNDRV_CARDS];
95static int pnp_registered; 95static int pnp_registered;
96static int pnpc_registered; 96static int pnpc_registered;
97#endif 97#endif
98static unsigned int snd_opl3sa2_devices;
98 99
99/* control ports */ 100/* control ports */
100#define OPL3SA2_PM_CTRL 0x01 101#define OPL3SA2_PM_CTRL 0x01
@@ -760,6 +761,7 @@ static int __devinit snd_opl3sa2_pnp_detect(struct pnp_dev *pdev,
760 } 761 }
761 pnp_set_drvdata(pdev, card); 762 pnp_set_drvdata(pdev, card);
762 dev++; 763 dev++;
764 snd_opl3sa2_devices++;
763 return 0; 765 return 0;
764} 766}
765 767
@@ -826,6 +828,7 @@ static int __devinit snd_opl3sa2_pnp_cdetect(struct pnp_card_link *pcard,
826 } 828 }
827 pnp_set_card_drvdata(pcard, card); 829 pnp_set_card_drvdata(pcard, card);
828 dev++; 830 dev++;
831 snd_opl3sa2_devices++;
829 return 0; 832 return 0;
830} 833}
831 834
@@ -944,7 +947,7 @@ static void __init_or_module snd_opl3sa2_unregister_all(void)
944 947
945static int __init alsa_card_opl3sa2_init(void) 948static int __init alsa_card_opl3sa2_init(void)
946{ 949{
947 int i, err, cards = 0; 950 int i, err;
948 951
949 if ((err = platform_driver_register(&snd_opl3sa2_nonpnp_driver)) < 0) 952 if ((err = platform_driver_register(&snd_opl3sa2_nonpnp_driver)) < 0)
950 return err; 953 return err;
@@ -964,23 +967,19 @@ static int __init alsa_card_opl3sa2_init(void)
964 goto errout; 967 goto errout;
965 } 968 }
966 platform_devices[i] = device; 969 platform_devices[i] = device;
967 cards++; 970 snd_opl3sa2_devices++;
968 } 971 }
969 972
970#ifdef CONFIG_PNP 973#ifdef CONFIG_PNP
971 err = pnp_register_driver(&opl3sa2_pnp_driver); 974 err = pnp_register_driver(&opl3sa2_pnp_driver);
972 if (err >= 0) { 975 if (!err)
973 pnp_registered = 1; 976 pnp_registered = 1;
974 cards += err;
975 }
976 err = pnp_register_card_driver(&opl3sa2_pnpc_driver); 977 err = pnp_register_card_driver(&opl3sa2_pnpc_driver);
977 if (err >= 0) { 978 if (!err)
978 pnpc_registered = 1; 979 pnpc_registered = 1;
979 cards += err;
980 }
981#endif 980#endif
982 981
983 if (!cards) { 982 if (!snd_opl3sa2_devices) {
984#ifdef MODULE 983#ifdef MODULE
985 snd_printk(KERN_ERR "Yamaha OPL3-SA soundcard not found or device busy\n"); 984 snd_printk(KERN_ERR "Yamaha OPL3-SA soundcard not found or device busy\n");
986#endif 985#endif
diff --git a/sound/isa/sb/es968.c b/sound/isa/sb/es968.c
index 9da80bfa3027..d4d65b84265a 100644
--- a/sound/isa/sb/es968.c
+++ b/sound/isa/sb/es968.c
@@ -124,7 +124,7 @@ static int __devinit snd_card_es968_pnp(int dev, struct snd_card_es968 *acard,
124 return 0; 124 return 0;
125} 125}
126 126
127static int __init snd_card_es968_probe(int dev, 127static int __devinit snd_card_es968_probe(int dev,
128 struct pnp_card_link *pcard, 128 struct pnp_card_link *pcard,
129 const struct pnp_card_device_id *pid) 129 const struct pnp_card_device_id *pid)
130{ 130{
@@ -182,6 +182,8 @@ static int __init snd_card_es968_probe(int dev,
182 return 0; 182 return 0;
183} 183}
184 184
185static unsigned int __devinitdata es968_devices;
186
185static int __devinit snd_es968_pnp_detect(struct pnp_card_link *card, 187static int __devinit snd_es968_pnp_detect(struct pnp_card_link *card,
186 const struct pnp_card_device_id *id) 188 const struct pnp_card_device_id *id)
187{ 189{
@@ -195,6 +197,7 @@ static int __devinit snd_es968_pnp_detect(struct pnp_card_link *card,
195 if (res < 0) 197 if (res < 0)
196 return res; 198 return res;
197 dev++; 199 dev++;
200 es968_devices++;
198 return 0; 201 return 0;
199 } 202 }
200 return -ENODEV; 203 return -ENODEV;
@@ -246,8 +249,11 @@ static struct pnp_card_driver es968_pnpc_driver = {
246 249
247static int __init alsa_card_es968_init(void) 250static int __init alsa_card_es968_init(void)
248{ 251{
249 int cards = pnp_register_card_driver(&es968_pnpc_driver); 252 int err = pnp_register_card_driver(&es968_pnpc_driver);
250 if (cards <= 0) { 253 if (err)
254 return err;
255
256 if (!es968_devices) {
251 pnp_unregister_card_driver(&es968_pnpc_driver); 257 pnp_unregister_card_driver(&es968_pnpc_driver);
252#ifdef MODULE 258#ifdef MODULE
253 snd_printk(KERN_ERR "no ES968 based soundcards found\n"); 259 snd_printk(KERN_ERR "no ES968 based soundcards found\n");
diff --git a/sound/isa/sb/sb16.c b/sound/isa/sb/sb16.c
index 5737ab76160c..21ea65925a9e 100644
--- a/sound/isa/sb/sb16.c
+++ b/sound/isa/sb/sb16.c
@@ -369,7 +369,7 @@ static struct snd_card *snd_sb16_card_new(int dev)
369 return card; 369 return card;
370} 370}
371 371
372static int __init snd_sb16_probe(struct snd_card *card, int dev) 372static int __devinit snd_sb16_probe(struct snd_card *card, int dev)
373{ 373{
374 int xirq, xdma8, xdma16; 374 int xirq, xdma8, xdma16;
375 struct snd_sb *chip; 375 struct snd_sb *chip;
@@ -518,7 +518,7 @@ static int snd_sb16_resume(struct snd_card *card)
518} 518}
519#endif 519#endif
520 520
521static int __init snd_sb16_nonpnp_probe1(int dev, struct platform_device *devptr) 521static int __devinit snd_sb16_nonpnp_probe1(int dev, struct platform_device *devptr)
522{ 522{
523 struct snd_card_sb16 *acard; 523 struct snd_card_sb16 *acard;
524 struct snd_card *card; 524 struct snd_card *card;
@@ -548,7 +548,7 @@ static int __init snd_sb16_nonpnp_probe1(int dev, struct platform_device *devptr
548} 548}
549 549
550 550
551static int __init snd_sb16_nonpnp_probe(struct platform_device *pdev) 551static int __devinit snd_sb16_nonpnp_probe(struct platform_device *pdev)
552{ 552{
553 int dev = pdev->id; 553 int dev = pdev->id;
554 int err; 554 int err;
@@ -629,6 +629,7 @@ static struct platform_driver snd_sb16_nonpnp_driver = {
629 629
630 630
631#ifdef CONFIG_PNP 631#ifdef CONFIG_PNP
632static unsigned int __devinitdata sb16_pnp_devices;
632 633
633static int __devinit snd_sb16_pnp_detect(struct pnp_card_link *pcard, 634static int __devinit snd_sb16_pnp_detect(struct pnp_card_link *pcard,
634 const struct pnp_card_device_id *pid) 635 const struct pnp_card_device_id *pid)
@@ -651,6 +652,7 @@ static int __devinit snd_sb16_pnp_detect(struct pnp_card_link *pcard,
651 } 652 }
652 pnp_set_card_drvdata(pcard, card); 653 pnp_set_card_drvdata(pcard, card);
653 dev++; 654 dev++;
655 sb16_pnp_devices++;
654 return 0; 656 return 0;
655 } 657 }
656 658
@@ -727,10 +729,10 @@ static int __init alsa_card_sb16_init(void)
727 } 729 }
728#ifdef CONFIG_PNP 730#ifdef CONFIG_PNP
729 /* PnP cards at last */ 731 /* PnP cards at last */
730 i = pnp_register_card_driver(&sb16_pnpc_driver); 732 err = pnp_register_card_driver(&sb16_pnpc_driver);
731 if (i >= 0) { 733 if (!err) {
732 pnp_registered = 1; 734 pnp_registered = 1;
733 cards += i; 735 cards += sb16_pnp_devices;
734 } 736 }
735#endif 737#endif
736 738
diff --git a/sound/isa/sscape.c b/sound/isa/sscape.c
index 29bba8cc3ef3..48e5552d3444 100644
--- a/sound/isa/sscape.c
+++ b/sound/isa/sscape.c
@@ -1255,7 +1255,7 @@ static int __devinit create_sscape(int dev, struct snd_card **rcardp)
1255} 1255}
1256 1256
1257 1257
1258static int __init snd_sscape_probe(struct platform_device *pdev) 1258static int __devinit snd_sscape_probe(struct platform_device *pdev)
1259{ 1259{
1260 int dev = pdev->id; 1260 int dev = pdev->id;
1261 struct snd_card *card; 1261 struct snd_card *card;
@@ -1469,7 +1469,7 @@ static int __init sscape_init(void)
1469 if (ret < 0) 1469 if (ret < 0)
1470 return ret; 1470 return ret;
1471#ifdef CONFIG_PNP 1471#ifdef CONFIG_PNP
1472 if (pnp_register_card_driver(&sscape_pnpc_driver) >= 0) 1472 if (pnp_register_card_driver(&sscape_pnpc_driver) == 0)
1473 pnp_registered = 1; 1473 pnp_registered = 1;
1474#endif 1474#endif
1475 return 0; 1475 return 0;
diff --git a/sound/isa/wavefront/wavefront.c b/sound/isa/wavefront/wavefront.c
index c0115bf9065e..2f13cd5d4dcb 100644
--- a/sound/isa/wavefront/wavefront.c
+++ b/sound/isa/wavefront/wavefront.c
@@ -589,7 +589,7 @@ snd_wavefront_probe (struct snd_card *card, int dev)
589 return snd_card_register(card); 589 return snd_card_register(card);
590} 590}
591 591
592static int __init snd_wavefront_nonpnp_probe(struct platform_device *pdev) 592static int __devinit snd_wavefront_nonpnp_probe(struct platform_device *pdev)
593{ 593{
594 int dev = pdev->id; 594 int dev = pdev->id;
595 struct snd_card *card; 595 struct snd_card *card;
@@ -637,6 +637,7 @@ static struct platform_driver snd_wavefront_driver = {
637 637
638 638
639#ifdef CONFIG_PNP 639#ifdef CONFIG_PNP
640static unsigned int __devinitdata wavefront_pnp_devices;
640 641
641static int __devinit snd_wavefront_pnp_detect(struct pnp_card_link *pcard, 642static int __devinit snd_wavefront_pnp_detect(struct pnp_card_link *pcard,
642 const struct pnp_card_device_id *pid) 643 const struct pnp_card_device_id *pid)
@@ -670,6 +671,7 @@ static int __devinit snd_wavefront_pnp_detect(struct pnp_card_link *pcard,
670 671
671 pnp_set_card_drvdata(pcard, card); 672 pnp_set_card_drvdata(pcard, card);
672 dev++; 673 dev++;
674 wavefront_pnp_devices++;
673 return 0; 675 return 0;
674} 676}
675 677
@@ -729,10 +731,10 @@ static int __init alsa_card_wavefront_init(void)
729 } 731 }
730 732
731#ifdef CONFIG_PNP 733#ifdef CONFIG_PNP
732 i = pnp_register_card_driver(&wavefront_pnpc_driver); 734 err = pnp_register_card_driver(&wavefront_pnpc_driver);
733 if (i >= 0) { 735 if (!err) {
734 pnp_registered = 1; 736 pnp_registered = 1;
735 cards += i; 737 cards += wavefront_pnp_devices;
736 } 738 }
737#endif 739#endif
738 740
diff --git a/sound/oss/cmpci.c b/sound/oss/cmpci.c
index 1fbd5137f6d7..de60a059ff5f 100644
--- a/sound/oss/cmpci.c
+++ b/sound/oss/cmpci.c
@@ -1713,7 +1713,7 @@ static int mixer_ioctl(struct cm_state *s, unsigned int cmd, unsigned long arg)
1713 case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */ 1713 case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */
1714 if (get_user(val, p)) 1714 if (get_user(val, p))
1715 return -EFAULT; 1715 return -EFAULT;
1716 i = generic_hweight32(val); 1716 i = hweight32(val);
1717 for (j = i = 0; i < SOUND_MIXER_NRDEVICES; i++) { 1717 for (j = i = 0; i < SOUND_MIXER_NRDEVICES; i++) {
1718 if (!(val & (1 << i))) 1718 if (!(val & (1 << i)))
1719 continue; 1719 continue;
diff --git a/sound/oss/cs4232.c b/sound/oss/cs4232.c
index 7c59e2d4003a..c7f86f09c28d 100644
--- a/sound/oss/cs4232.c
+++ b/sound/oss/cs4232.c
@@ -360,6 +360,8 @@ static int __initdata synthio = -1;
360static int __initdata synthirq = -1; 360static int __initdata synthirq = -1;
361static int __initdata isapnp = 1; 361static int __initdata isapnp = 1;
362 362
363static unsigned int cs4232_devices;
364
363MODULE_DESCRIPTION("CS4232 based soundcard driver"); 365MODULE_DESCRIPTION("CS4232 based soundcard driver");
364MODULE_AUTHOR("Hannu Savolainen, Paul Barton-Davis"); 366MODULE_AUTHOR("Hannu Savolainen, Paul Barton-Davis");
365MODULE_LICENSE("GPL"); 367MODULE_LICENSE("GPL");
@@ -421,6 +423,7 @@ static int cs4232_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev
421 return -ENODEV; 423 return -ENODEV;
422 } 424 }
423 pnp_set_drvdata(dev,isapnpcfg); 425 pnp_set_drvdata(dev,isapnpcfg);
426 cs4232_devices++;
424 return 0; 427 return 0;
425} 428}
426 429
@@ -455,10 +458,11 @@ static int __init init_cs4232(void)
455#endif 458#endif
456 cfg.irq = -1; 459 cfg.irq = -1;
457 460
458 if (isapnp && 461 if (isapnp) {
459 (pnp_register_driver(&cs4232_driver) > 0) 462 pnp_register_driver(&cs4232_driver);
460 ) 463 if (cs4232_devices)
461 return 0; 464 return 0;
465 }
462 466
463 if(io==-1||irq==-1||dma==-1) 467 if(io==-1||irq==-1||dma==-1)
464 { 468 {
@@ -503,7 +507,8 @@ static int __init setup_cs4232(char *str)
503 int ints[7]; 507 int ints[7];
504 508
505 /* If we have isapnp cards, no need for options */ 509 /* If we have isapnp cards, no need for options */
506 if (pnp_register_driver(&cs4232_driver) > 0) 510 pnp_register_driver(&cs4232_driver);
511 if (cs4232_devices)
507 return 1; 512 return 1;
508 513
509 str = get_options(str, ARRAY_SIZE(ints), ints); 514 str = get_options(str, ARRAY_SIZE(ints), ints);
diff --git a/sound/oss/dmasound/dmasound_awacs.c b/sound/oss/dmasound/dmasound_awacs.c
index 6ba8d6f45fe8..3bbc8105e9f1 100644
--- a/sound/oss/dmasound/dmasound_awacs.c
+++ b/sound/oss/dmasound/dmasound_awacs.c
@@ -2798,7 +2798,7 @@ __init setup_beep(void)
2798 DBDMA_ALIGN(beep_dbdma_cmd_space); 2798 DBDMA_ALIGN(beep_dbdma_cmd_space);
2799 /* set up emergency dbdma cmd */ 2799 /* set up emergency dbdma cmd */
2800 emergency_dbdma_cmd = beep_dbdma_cmd+1 ; 2800 emergency_dbdma_cmd = beep_dbdma_cmd+1 ;
2801 beep_buf = (short *) kmalloc(BEEP_BUFLEN * 4, GFP_KERNEL); 2801 beep_buf = kmalloc(BEEP_BUFLEN * 4, GFP_KERNEL);
2802 if (beep_buf == NULL) { 2802 if (beep_buf == NULL) {
2803 printk(KERN_ERR "dmasound_pmac: no memory for beep buffer\n"); 2803 printk(KERN_ERR "dmasound_pmac: no memory for beep buffer\n");
2804 kfree(beep_dbdma_cmd_space) ; 2804 kfree(beep_dbdma_cmd_space) ;
diff --git a/sound/oss/emu10k1/midi.c b/sound/oss/emu10k1/midi.c
index 959a96794dba..25ae8e4a488d 100644
--- a/sound/oss/emu10k1/midi.c
+++ b/sound/oss/emu10k1/midi.c
@@ -65,7 +65,8 @@ static int midiin_add_buffer(struct emu10k1_mididevice *midi_dev, struct midi_hd
65 65
66 init_midi_hdr(midihdr); 66 init_midi_hdr(midihdr);
67 67
68 if ((midihdr->data = (u8 *) kmalloc(MIDIIN_BUFLEN, GFP_KERNEL)) == NULL) { 68 midihdr->data = kmalloc(MIDIIN_BUFLEN, GFP_KERNEL);
69 if (!midihdr->data) {
69 ERROR(); 70 ERROR();
70 kfree(midihdr); 71 kfree(midihdr);
71 return -1; 72 return -1;
@@ -334,7 +335,8 @@ static ssize_t emu10k1_midi_write(struct file *file, const char __user *buffer,
334 midihdr->bytesrecorded = 0; 335 midihdr->bytesrecorded = 0;
335 midihdr->flags = 0; 336 midihdr->flags = 0;
336 337
337 if ((midihdr->data = (u8 *) kmalloc(count, GFP_KERNEL)) == NULL) { 338 midihdr->data = kmalloc(count, GFP_KERNEL);
339 if (!midihdr->data) {
338 ERROR(); 340 ERROR();
339 kfree(midihdr); 341 kfree(midihdr);
340 return -EINVAL; 342 return -EINVAL;
@@ -545,7 +547,8 @@ int emu10k1_seq_midi_out(int dev, unsigned char midi_byte)
545 midihdr->bytesrecorded = 0; 547 midihdr->bytesrecorded = 0;
546 midihdr->flags = 0; 548 midihdr->flags = 0;
547 549
548 if ((midihdr->data = (u8 *) kmalloc(1, GFP_KERNEL)) == NULL) { 550 midihdr->data = kmalloc(1, GFP_KERNEL);
551 if (!midihdr->data) {
549 ERROR(); 552 ERROR();
550 kfree(midihdr); 553 kfree(midihdr);
551 return -EINVAL; 554 return -EINVAL;
diff --git a/sound/oss/esssolo1.c b/sound/oss/esssolo1.c
index 78d3e29ce968..6861563d7525 100644
--- a/sound/oss/esssolo1.c
+++ b/sound/oss/esssolo1.c
@@ -2348,7 +2348,7 @@ static int __devinit solo1_probe(struct pci_dev *pcidev, const struct pci_device
2348 /* Recording requires 24-bit DMA, so attempt to set dma mask 2348 /* Recording requires 24-bit DMA, so attempt to set dma mask
2349 * to 24 bits first, then 32 bits (playback only) if that fails. 2349 * to 24 bits first, then 32 bits (playback only) if that fails.
2350 */ 2350 */
2351 if (pci_set_dma_mask(pcidev, 0x00ffffff) && 2351 if (pci_set_dma_mask(pcidev, DMA_24BIT_MASK) &&
2352 pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) { 2352 pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) {
2353 printk(KERN_WARNING "solo1: architecture does not support 24bit or 32bit PCI busmaster DMA\n"); 2353 printk(KERN_WARNING "solo1: architecture does not support 24bit or 32bit PCI busmaster DMA\n");
2354 return -ENODEV; 2354 return -ENODEV;
diff --git a/sound/oss/maestro3.c b/sound/oss/maestro3.c
index 66044aff2586..4a5e4237a110 100644
--- a/sound/oss/maestro3.c
+++ b/sound/oss/maestro3.c
@@ -2582,15 +2582,9 @@ static int alloc_dsp_suspendmem(struct m3_card *card)
2582 2582
2583 return 0; 2583 return 0;
2584} 2584}
2585static void free_dsp_suspendmem(struct m3_card *card)
2586{
2587 if(card->suspend_mem)
2588 vfree(card->suspend_mem);
2589}
2590 2585
2591#else 2586#else
2592#define alloc_dsp_suspendmem(args...) 0 2587#define alloc_dsp_suspendmem(args...) 0
2593#define free_dsp_suspendmem(args...)
2594#endif 2588#endif
2595 2589
2596/* 2590/*
@@ -2717,7 +2711,7 @@ out:
2717 if(ret) { 2711 if(ret) {
2718 if(card->iobase) 2712 if(card->iobase)
2719 release_region(pci_resource_start(pci_dev, 0), pci_resource_len(pci_dev, 0)); 2713 release_region(pci_resource_start(pci_dev, 0), pci_resource_len(pci_dev, 0));
2720 free_dsp_suspendmem(card); 2714 vfree(card->suspend_mem);
2721 if(card->ac97) { 2715 if(card->ac97) {
2722 unregister_sound_mixer(card->ac97->dev_mixer); 2716 unregister_sound_mixer(card->ac97->dev_mixer);
2723 kfree(card->ac97); 2717 kfree(card->ac97);
@@ -2760,7 +2754,7 @@ static void m3_remove(struct pci_dev *pci_dev)
2760 } 2754 }
2761 2755
2762 release_region(card->iobase, 256); 2756 release_region(card->iobase, 256);
2763 free_dsp_suspendmem(card); 2757 vfree(card->suspend_mem);
2764 kfree(card); 2758 kfree(card);
2765 } 2759 }
2766 devs = NULL; 2760 devs = NULL;
diff --git a/sound/oss/msnd.c b/sound/oss/msnd.c
index a7ad2b0a2ac0..5dbfc0f9c3c7 100644
--- a/sound/oss/msnd.c
+++ b/sound/oss/msnd.c
@@ -95,10 +95,8 @@ void msnd_fifo_init(msnd_fifo *f)
95 95
96void msnd_fifo_free(msnd_fifo *f) 96void msnd_fifo_free(msnd_fifo *f)
97{ 97{
98 if (f->data) { 98 vfree(f->data);
99 vfree(f->data); 99 f->data = NULL;
100 f->data = NULL;
101 }
102} 100}
103 101
104int msnd_fifo_alloc(msnd_fifo *f, size_t n) 102int msnd_fifo_alloc(msnd_fifo *f, size_t n)
diff --git a/sound/oss/sb_card.c b/sound/oss/sb_card.c
index 680b82e15298..4708cbdc3149 100644
--- a/sound/oss/sb_card.c
+++ b/sound/oss/sb_card.c
@@ -52,6 +52,7 @@ static int __initdata sm_games = 0; /* Logitech soundman games? */
52static struct sb_card_config *legacy = NULL; 52static struct sb_card_config *legacy = NULL;
53 53
54#ifdef CONFIG_PNP 54#ifdef CONFIG_PNP
55static int pnp_registered;
55static int __initdata pnp = 1; 56static int __initdata pnp = 1;
56/* 57/*
57static int __initdata uart401 = 0; 58static int __initdata uart401 = 0;
@@ -133,7 +134,7 @@ static void sb_unload(struct sb_card_config *scc)
133} 134}
134 135
135/* Register legacy card with OSS subsystem */ 136/* Register legacy card with OSS subsystem */
136static int sb_init_legacy(void) 137static int __init sb_init_legacy(void)
137{ 138{
138 struct sb_module_options sbmo = {0}; 139 struct sb_module_options sbmo = {0};
139 140
@@ -234,6 +235,8 @@ static void sb_dev2cfg(struct pnp_dev *dev, struct sb_card_config *scc)
234 } 235 }
235} 236}
236 237
238static unsigned int sb_pnp_devices;
239
237/* Probe callback function for the PnP API */ 240/* Probe callback function for the PnP API */
238static int sb_pnp_probe(struct pnp_card_link *card, const struct pnp_card_device_id *card_id) 241static int sb_pnp_probe(struct pnp_card_link *card, const struct pnp_card_device_id *card_id)
239{ 242{
@@ -264,6 +267,7 @@ static int sb_pnp_probe(struct pnp_card_link *card, const struct pnp_card_device
264 scc->conf.dma, scc->conf.dma2); 267 scc->conf.dma, scc->conf.dma2);
265 268
266 pnp_set_card_drvdata(card, scc); 269 pnp_set_card_drvdata(card, scc);
270 sb_pnp_devices++;
267 271
268 return sb_register_oss(scc, &sbmo); 272 return sb_register_oss(scc, &sbmo);
269} 273}
@@ -289,6 +293,14 @@ static struct pnp_card_driver sb_pnp_driver = {
289MODULE_DEVICE_TABLE(pnp_card, sb_pnp_card_table); 293MODULE_DEVICE_TABLE(pnp_card, sb_pnp_card_table);
290#endif /* CONFIG_PNP */ 294#endif /* CONFIG_PNP */
291 295
296static void __init_or_module sb_unregister_all(void)
297{
298#ifdef CONFIG_PNP
299 if (pnp_registered)
300 pnp_unregister_card_driver(&sb_pnp_driver);
301#endif
302}
303
292static int __init sb_init(void) 304static int __init sb_init(void)
293{ 305{
294 int lres = 0; 306 int lres = 0;
@@ -307,17 +319,18 @@ static int __init sb_init(void)
307 319
308#ifdef CONFIG_PNP 320#ifdef CONFIG_PNP
309 if(pnp) { 321 if(pnp) {
310 pres = pnp_register_card_driver(&sb_pnp_driver); 322 int err = pnp_register_card_driver(&sb_pnp_driver);
323 if (!err)
324 pnp_registered = 1;
325 pres = sb_pnp_devices;
311 } 326 }
312#endif 327#endif
313 printk(KERN_INFO "sb: Init: Done\n"); 328 printk(KERN_INFO "sb: Init: Done\n");
314 329
315 /* If either PnP or Legacy registered a card then return 330 /* If either PnP or Legacy registered a card then return
316 * success */ 331 * success */
317 if (pres <= 0 && lres <= 0) { 332 if (pres == 0 && lres <= 0) {
318#ifdef CONFIG_PNP 333 sb_unregister_all();
319 pnp_unregister_card_driver(&sb_pnp_driver);
320#endif
321 return -ENODEV; 334 return -ENODEV;
322 } 335 }
323 return 0; 336 return 0;
@@ -333,14 +346,10 @@ static void __exit sb_exit(void)
333 sb_unload(legacy); 346 sb_unload(legacy);
334 } 347 }
335 348
336#ifdef CONFIG_PNP 349 sb_unregister_all();
337 pnp_unregister_card_driver(&sb_pnp_driver);
338#endif
339 350
340 if (smw_free) { 351 vfree(smw_free);
341 vfree(smw_free); 352 smw_free = NULL;
342 smw_free = NULL;
343 }
344} 353}
345 354
346module_init(sb_init); 355module_init(sb_init);
diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c
index 347cd79c2502..6815c30e0bc1 100644
--- a/sound/oss/sequencer.c
+++ b/sound/oss/sequencer.c
@@ -1671,14 +1671,7 @@ void sequencer_init(void)
1671 1671
1672void sequencer_unload(void) 1672void sequencer_unload(void)
1673{ 1673{
1674 if(queue) 1674 vfree(queue);
1675 { 1675 vfree(iqueue);
1676 vfree(queue); 1676 queue = iqueue = NULL;
1677 queue=NULL;
1678 }
1679 if(iqueue)
1680 {
1681 vfree(iqueue);
1682 iqueue=NULL;
1683 }
1684} 1677}
diff --git a/sound/oss/sh_dac_audio.c b/sound/oss/sh_dac_audio.c
index 8a9917c919c2..3f7427cd195a 100644
--- a/sound/oss/sh_dac_audio.c
+++ b/sound/oss/sh_dac_audio.c
@@ -289,7 +289,7 @@ static int __init dac_audio_init(void)
289 289
290 in_use = 0; 290 in_use = 0;
291 291
292 data_buffer = (char *)kmalloc(BUFFER_SIZE, GFP_KERNEL); 292 data_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
293 if (data_buffer == NULL) 293 if (data_buffer == NULL)
294 return -ENOMEM; 294 return -ENOMEM;
295 295
diff --git a/sound/oss/sonicvibes.c b/sound/oss/sonicvibes.c
index 69a4b8778b51..42bd276cfc39 100644
--- a/sound/oss/sonicvibes.c
+++ b/sound/oss/sonicvibes.c
@@ -116,6 +116,7 @@
116#include <linux/spinlock.h> 116#include <linux/spinlock.h>
117#include <linux/smp_lock.h> 117#include <linux/smp_lock.h>
118#include <linux/gameport.h> 118#include <linux/gameport.h>
119#include <linux/dma-mapping.h>
119#include <linux/mutex.h> 120#include <linux/mutex.h>
120 121
121 122
@@ -407,24 +408,6 @@ static inline unsigned ld2(unsigned int x)
407 return r; 408 return r;
408} 409}
409 410
410/*
411 * hweightN: returns the hamming weight (i.e. the number
412 * of bits set) of a N-bit word
413 */
414
415#ifdef hweight32
416#undef hweight32
417#endif
418
419static inline unsigned int hweight32(unsigned int w)
420{
421 unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555);
422 res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
423 res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F);
424 res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF);
425 return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF);
426}
427
428/* --------------------------------------------------------------------- */ 411/* --------------------------------------------------------------------- */
429 412
430/* 413/*
@@ -2553,7 +2536,7 @@ static int __devinit sv_probe(struct pci_dev *pcidev, const struct pci_device_id
2553 return -ENODEV; 2536 return -ENODEV;
2554 if (pcidev->irq == 0) 2537 if (pcidev->irq == 0)
2555 return -ENODEV; 2538 return -ENODEV;
2556 if (pci_set_dma_mask(pcidev, 0x00ffffff)) { 2539 if (pci_set_dma_mask(pcidev, DMA_24BIT_MASK)) {
2557 printk(KERN_WARNING "sonicvibes: architecture does not support 24bit PCI busmaster DMA\n"); 2540 printk(KERN_WARNING "sonicvibes: architecture does not support 24bit PCI busmaster DMA\n");
2558 return -ENODEV; 2541 return -ENODEV;
2559 } 2542 }
diff --git a/sound/oss/vwsnd.c b/sound/oss/vwsnd.c
index b372e88e857f..5f140c7586b3 100644
--- a/sound/oss/vwsnd.c
+++ b/sound/oss/vwsnd.c
@@ -248,27 +248,6 @@ typedef struct lithium {
248} lithium_t; 248} lithium_t;
249 249
250/* 250/*
251 * li_create initializes the lithium_t structure and sets up vm mappings
252 * to access the registers.
253 * Returns 0 on success, -errno on failure.
254 */
255
256static int __init li_create(lithium_t *lith, unsigned long baseaddr)
257{
258 static void li_destroy(lithium_t *);
259
260 spin_lock_init(&lith->lock);
261 lith->page0 = ioremap_nocache(baseaddr + LI_PAGE0_OFFSET, PAGE_SIZE);
262 lith->page1 = ioremap_nocache(baseaddr + LI_PAGE1_OFFSET, PAGE_SIZE);
263 lith->page2 = ioremap_nocache(baseaddr + LI_PAGE2_OFFSET, PAGE_SIZE);
264 if (!lith->page0 || !lith->page1 || !lith->page2) {
265 li_destroy(lith);
266 return -ENOMEM;
267 }
268 return 0;
269}
270
271/*
272 * li_destroy destroys the lithium_t structure and vm mappings. 251 * li_destroy destroys the lithium_t structure and vm mappings.
273 */ 252 */
274 253
@@ -289,6 +268,25 @@ static void li_destroy(lithium_t *lith)
289} 268}
290 269
291/* 270/*
271 * li_create initializes the lithium_t structure and sets up vm mappings
272 * to access the registers.
273 * Returns 0 on success, -errno on failure.
274 */
275
276static int __init li_create(lithium_t *lith, unsigned long baseaddr)
277{
278 spin_lock_init(&lith->lock);
279 lith->page0 = ioremap_nocache(baseaddr + LI_PAGE0_OFFSET, PAGE_SIZE);
280 lith->page1 = ioremap_nocache(baseaddr + LI_PAGE1_OFFSET, PAGE_SIZE);
281 lith->page2 = ioremap_nocache(baseaddr + LI_PAGE2_OFFSET, PAGE_SIZE);
282 if (!lith->page0 || !lith->page1 || !lith->page2) {
283 li_destroy(lith);
284 return -ENOMEM;
285 }
286 return 0;
287}
288
289/*
292 * basic register accessors - read/write long/byte 290 * basic register accessors - read/write long/byte
293 */ 291 */
294 292
diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c
index 2aa5a7fdb6e0..c6c8333acc62 100644
--- a/sound/pci/ad1889.c
+++ b/sound/pci/ad1889.c
@@ -39,6 +39,7 @@
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/compiler.h> 40#include <linux/compiler.h>
41#include <linux/delay.h> 41#include <linux/delay.h>
42#include <linux/dma-mapping.h>
42 43
43#include <sound/driver.h> 44#include <sound/driver.h>
44#include <sound/core.h> 45#include <sound/core.h>
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index e264136e8fb4..fc92b6896c24 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -33,6 +33,7 @@
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/moduleparam.h> 35#include <linux/moduleparam.h>
36#include <linux/dma-mapping.h>
36#include <sound/core.h> 37#include <sound/core.h>
37#include <sound/pcm.h> 38#include <sound/pcm.h>
38#include <sound/info.h> 39#include <sound/info.h>
@@ -2220,8 +2221,8 @@ static int __devinit snd_ali_create(struct snd_card *card,
2220 if ((err = pci_enable_device(pci)) < 0) 2221 if ((err = pci_enable_device(pci)) < 0)
2221 return err; 2222 return err;
2222 /* check, if we can restrict PCI DMA transfers to 31 bits */ 2223 /* check, if we can restrict PCI DMA transfers to 31 bits */
2223 if (pci_set_dma_mask(pci, 0x7fffffff) < 0 || 2224 if (pci_set_dma_mask(pci, DMA_31BIT_MASK) < 0 ||
2224 pci_set_consistent_dma_mask(pci, 0x7fffffff) < 0) { 2225 pci_set_consistent_dma_mask(pci, DMA_31BIT_MASK) < 0) {
2225 snd_printk(KERN_ERR "architecture does not support 31bit PCI busmaster DMA\n"); 2226 snd_printk(KERN_ERR "architecture does not support 31bit PCI busmaster DMA\n");
2226 pci_disable_device(pci); 2227 pci_disable_device(pci);
2227 return -ENXIO; 2228 return -ENXIO;
diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c
index 7b2ff5f4672e..100d8127a411 100644
--- a/sound/pci/als4000.c
+++ b/sound/pci/als4000.c
@@ -70,6 +70,7 @@
70#include <linux/slab.h> 70#include <linux/slab.h>
71#include <linux/gameport.h> 71#include <linux/gameport.h>
72#include <linux/moduleparam.h> 72#include <linux/moduleparam.h>
73#include <linux/dma-mapping.h>
73#include <sound/core.h> 74#include <sound/core.h>
74#include <sound/pcm.h> 75#include <sound/pcm.h>
75#include <sound/rawmidi.h> 76#include <sound/rawmidi.h>
@@ -688,8 +689,8 @@ static int __devinit snd_card_als4000_probe(struct pci_dev *pci,
688 return err; 689 return err;
689 } 690 }
690 /* check, if we can restrict PCI DMA transfers to 24 bits */ 691 /* check, if we can restrict PCI DMA transfers to 24 bits */
691 if (pci_set_dma_mask(pci, 0x00ffffff) < 0 || 692 if (pci_set_dma_mask(pci, DMA_24BIT_MASK) < 0 ||
692 pci_set_consistent_dma_mask(pci, 0x00ffffff) < 0) { 693 pci_set_consistent_dma_mask(pci, DMA_24BIT_MASK) < 0) {
693 snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n"); 694 snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n");
694 pci_disable_device(pci); 695 pci_disable_device(pci);
695 return -ENXIO; 696 return -ENXIO;
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index e077eb3fbe2f..680077e1e057 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -104,6 +104,7 @@
104#include <linux/slab.h> 104#include <linux/slab.h>
105#include <linux/gameport.h> 105#include <linux/gameport.h>
106#include <linux/moduleparam.h> 106#include <linux/moduleparam.h>
107#include <linux/dma-mapping.h>
107#include <sound/core.h> 108#include <sound/core.h>
108#include <sound/control.h> 109#include <sound/control.h>
109#include <sound/pcm.h> 110#include <sound/pcm.h>
@@ -1669,8 +1670,8 @@ snd_azf3328_create(struct snd_card *card,
1669 chip->irq = -1; 1670 chip->irq = -1;
1670 1671
1671 /* check if we can restrict PCI DMA transfers to 24 bits */ 1672 /* check if we can restrict PCI DMA transfers to 24 bits */
1672 if (pci_set_dma_mask(pci, 0x00ffffff) < 0 || 1673 if (pci_set_dma_mask(pci, DMA_24BIT_MASK) < 0 ||
1673 pci_set_consistent_dma_mask(pci, 0x00ffffff) < 0) { 1674 pci_set_consistent_dma_mask(pci, DMA_24BIT_MASK) < 0) {
1674 snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n"); 1675 snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n");
1675 err = -ENXIO; 1676 err = -ENXIO;
1676 goto out_err; 1677 goto out_err;
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index 2208dbd48be9..3e332f398162 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -36,6 +36,7 @@
36#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/moduleparam.h> 38#include <linux/moduleparam.h>
39#include <linux/dma-mapping.h>
39#include <sound/core.h> 40#include <sound/core.h>
40#include <sound/initval.h> 41#include <sound/initval.h>
41#include <sound/pcm.h> 42#include <sound/pcm.h>
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index 0d556b09ad04..4d62fe439177 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -55,6 +55,7 @@
55#include <linux/gameport.h> 55#include <linux/gameport.h>
56#include <linux/moduleparam.h> 56#include <linux/moduleparam.h>
57#include <linux/delay.h> 57#include <linux/delay.h>
58#include <linux/dma-mapping.h>
58#include <sound/core.h> 59#include <sound/core.h>
59#include <sound/control.h> 60#include <sound/control.h>
60#include <sound/pcm.h> 61#include <sound/pcm.h>
@@ -1517,8 +1518,8 @@ static int __devinit snd_es1938_create(struct snd_card *card,
1517 if ((err = pci_enable_device(pci)) < 0) 1518 if ((err = pci_enable_device(pci)) < 0)
1518 return err; 1519 return err;
1519 /* check, if we can restrict PCI DMA transfers to 24 bits */ 1520 /* check, if we can restrict PCI DMA transfers to 24 bits */
1520 if (pci_set_dma_mask(pci, 0x00ffffff) < 0 || 1521 if (pci_set_dma_mask(pci, DMA_24BIT_MASK) < 0 ||
1521 pci_set_consistent_dma_mask(pci, 0x00ffffff) < 0) { 1522 pci_set_consistent_dma_mask(pci, DMA_24BIT_MASK) < 0) {
1522 snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n"); 1523 snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n");
1523 pci_disable_device(pci); 1524 pci_disable_device(pci);
1524 return -ENXIO; 1525 return -ENXIO;
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index dd465a186e11..e3ad17f53c29 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -104,6 +104,7 @@
104#include <linux/slab.h> 104#include <linux/slab.h>
105#include <linux/gameport.h> 105#include <linux/gameport.h>
106#include <linux/moduleparam.h> 106#include <linux/moduleparam.h>
107#include <linux/dma-mapping.h>
107#include <linux/mutex.h> 108#include <linux/mutex.h>
108 109
109#include <sound/core.h> 110#include <sound/core.h>
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index 672e198317e1..b88eeba2f5d1 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -56,7 +56,9 @@
56#include <linux/dma-mapping.h> 56#include <linux/dma-mapping.h>
57#include <linux/slab.h> 57#include <linux/slab.h>
58#include <linux/moduleparam.h> 58#include <linux/moduleparam.h>
59#include <linux/dma-mapping.h>
59#include <linux/mutex.h> 60#include <linux/mutex.h>
61
60#include <sound/core.h> 62#include <sound/core.h>
61#include <sound/cs8427.h> 63#include <sound/cs8427.h>
62#include <sound/info.h> 64#include <sound/info.h>
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index 8bc084956c28..44393e190929 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -41,6 +41,7 @@
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/vmalloc.h> 42#include <linux/vmalloc.h>
43#include <linux/moduleparam.h> 43#include <linux/moduleparam.h>
44#include <linux/dma-mapping.h>
44#include <sound/core.h> 45#include <sound/core.h>
45#include <sound/info.h> 46#include <sound/info.h>
46#include <sound/control.h> 47#include <sound/control.h>
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index 43ee3b2b948f..b5a095052d4c 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -28,6 +28,8 @@
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <linux/moduleparam.h> 29#include <linux/moduleparam.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31#include <linux/dma-mapping.h>
32
31#include <sound/core.h> 33#include <sound/core.h>
32#include <sound/initval.h> 34#include <sound/initval.h>
33#include <sound/info.h> 35#include <sound/info.h>
diff --git a/sound/pci/pcxhr/pcxhr.c b/sound/pci/pcxhr/pcxhr.c
index f679779d96e3..35875c8aa299 100644
--- a/sound/pci/pcxhr/pcxhr.c
+++ b/sound/pci/pcxhr/pcxhr.c
@@ -30,6 +30,7 @@
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/moduleparam.h> 31#include <linux/moduleparam.h>
32#include <linux/mutex.h> 32#include <linux/mutex.h>
33#include <linux/dma-mapping.h>
33 34
34#include <sound/core.h> 35#include <sound/core.h>
35#include <sound/initval.h> 36#include <sound/initval.h>
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index 0cbef5fe6c63..ab78544bf042 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -313,7 +313,7 @@ static int snd_rme32_capture_copy(struct snd_pcm_substream *substream, int chann
313} 313}
314 314
315/* 315/*
316 * SPDIF I/O capabilites (half-duplex mode) 316 * SPDIF I/O capabilities (half-duplex mode)
317 */ 317 */
318static struct snd_pcm_hardware snd_rme32_spdif_info = { 318static struct snd_pcm_hardware snd_rme32_spdif_info = {
319 .info = (SNDRV_PCM_INFO_MMAP_IOMEM | 319 .info = (SNDRV_PCM_INFO_MMAP_IOMEM |
@@ -339,7 +339,7 @@ static struct snd_pcm_hardware snd_rme32_spdif_info = {
339}; 339};
340 340
341/* 341/*
342 * ADAT I/O capabilites (half-duplex mode) 342 * ADAT I/O capabilities (half-duplex mode)
343 */ 343 */
344static struct snd_pcm_hardware snd_rme32_adat_info = 344static struct snd_pcm_hardware snd_rme32_adat_info =
345{ 345{
@@ -364,7 +364,7 @@ static struct snd_pcm_hardware snd_rme32_adat_info =
364}; 364};
365 365
366/* 366/*
367 * SPDIF I/O capabilites (full-duplex mode) 367 * SPDIF I/O capabilities (full-duplex mode)
368 */ 368 */
369static struct snd_pcm_hardware snd_rme32_spdif_fd_info = { 369static struct snd_pcm_hardware snd_rme32_spdif_fd_info = {
370 .info = (SNDRV_PCM_INFO_MMAP | 370 .info = (SNDRV_PCM_INFO_MMAP |
@@ -390,7 +390,7 @@ static struct snd_pcm_hardware snd_rme32_spdif_fd_info = {
390}; 390};
391 391
392/* 392/*
393 * ADAT I/O capabilites (full-duplex mode) 393 * ADAT I/O capabilities (full-duplex mode)
394 */ 394 */
395static struct snd_pcm_hardware snd_rme32_adat_fd_info = 395static struct snd_pcm_hardware snd_rme32_adat_fd_info =
396{ 396{
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 0e694b011dcc..6c2a9f4a7659 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -359,7 +359,7 @@ snd_rme96_capture_copy(struct snd_pcm_substream *substream,
359} 359}
360 360
361/* 361/*
362 * Digital output capabilites (S/PDIF) 362 * Digital output capabilities (S/PDIF)
363 */ 363 */
364static struct snd_pcm_hardware snd_rme96_playback_spdif_info = 364static struct snd_pcm_hardware snd_rme96_playback_spdif_info =
365{ 365{
@@ -388,7 +388,7 @@ static struct snd_pcm_hardware snd_rme96_playback_spdif_info =
388}; 388};
389 389
390/* 390/*
391 * Digital input capabilites (S/PDIF) 391 * Digital input capabilities (S/PDIF)
392 */ 392 */
393static struct snd_pcm_hardware snd_rme96_capture_spdif_info = 393static struct snd_pcm_hardware snd_rme96_capture_spdif_info =
394{ 394{
@@ -417,7 +417,7 @@ static struct snd_pcm_hardware snd_rme96_capture_spdif_info =
417}; 417};
418 418
419/* 419/*
420 * Digital output capabilites (ADAT) 420 * Digital output capabilities (ADAT)
421 */ 421 */
422static struct snd_pcm_hardware snd_rme96_playback_adat_info = 422static struct snd_pcm_hardware snd_rme96_playback_adat_info =
423{ 423{
@@ -442,7 +442,7 @@ static struct snd_pcm_hardware snd_rme96_playback_adat_info =
442}; 442};
443 443
444/* 444/*
445 * Digital input capabilites (ADAT) 445 * Digital input capabilities (ADAT)
446 */ 446 */
447static struct snd_pcm_hardware snd_rme96_capture_adat_info = 447static struct snd_pcm_hardware snd_rme96_capture_adat_info =
448{ 448{
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 980b9cd689dd..b5538efd146b 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -2256,7 +2256,7 @@ static int snd_hdspm_create_controls(struct snd_card *card, struct hdspm * hdspm
2256 } 2256 }
2257 2257
2258 /* Channel playback mixer as default control 2258 /* Channel playback mixer as default control
2259 Note: the whole matrix would be 128*HDSPM_MIXER_CHANNELS Faders, thats to big for any alsamixer 2259 Note: the whole matrix would be 128*HDSPM_MIXER_CHANNELS Faders, thats too big for any alsamixer
2260 they are accesible via special IOCTL on hwdep 2260 they are accesible via special IOCTL on hwdep
2261 and the mixer 2dimensional mixer control */ 2261 and the mixer 2dimensional mixer control */
2262 2262
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index 7bbea3738b8a..2d66a09fe5ee 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -30,6 +30,7 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/gameport.h> 31#include <linux/gameport.h>
32#include <linux/moduleparam.h> 32#include <linux/moduleparam.h>
33#include <linux/dma-mapping.h>
33 34
34#include <sound/core.h> 35#include <sound/core.h>
35#include <sound/pcm.h> 36#include <sound/pcm.h>
@@ -1227,8 +1228,8 @@ static int __devinit snd_sonicvibes_create(struct snd_card *card,
1227 if ((err = pci_enable_device(pci)) < 0) 1228 if ((err = pci_enable_device(pci)) < 0)
1228 return err; 1229 return err;
1229 /* check, if we can restrict PCI DMA transfers to 24 bits */ 1230 /* check, if we can restrict PCI DMA transfers to 24 bits */
1230 if (pci_set_dma_mask(pci, 0x00ffffff) < 0 || 1231 if (pci_set_dma_mask(pci, DMA_24BIT_MASK) < 0 ||
1231 pci_set_consistent_dma_mask(pci, 0x00ffffff) < 0) { 1232 pci_set_consistent_dma_mask(pci, DMA_24BIT_MASK) < 0) {
1232 snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n"); 1233 snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n");
1233 pci_disable_device(pci); 1234 pci_disable_device(pci);
1234 return -ENXIO; 1235 return -ENXIO;
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index 83b7d8aba9e6..52178b8ad49d 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -35,6 +35,7 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include <linux/gameport.h> 37#include <linux/gameport.h>
38#include <linux/dma-mapping.h>
38 39
39#include <sound/core.h> 40#include <sound/core.h>
40#include <sound/info.h> 41#include <sound/info.h>
@@ -3554,8 +3555,8 @@ int __devinit snd_trident_create(struct snd_card *card,
3554 if ((err = pci_enable_device(pci)) < 0) 3555 if ((err = pci_enable_device(pci)) < 0)
3555 return err; 3556 return err;
3556 /* check, if we can restrict PCI DMA transfers to 30 bits */ 3557 /* check, if we can restrict PCI DMA transfers to 30 bits */
3557 if (pci_set_dma_mask(pci, 0x3fffffff) < 0 || 3558 if (pci_set_dma_mask(pci, DMA_30BIT_MASK) < 0 ||
3558 pci_set_consistent_dma_mask(pci, 0x3fffffff) < 0) { 3559 pci_set_consistent_dma_mask(pci, DMA_30BIT_MASK) < 0) {
3559 snd_printk(KERN_ERR "architecture does not support 30bit PCI busmaster DMA\n"); 3560 snd_printk(KERN_ERR "architecture does not support 30bit PCI busmaster DMA\n");
3560 pci_disable_device(pci); 3561 pci_disable_device(pci);
3561 return -ENXIO; 3562 return -ENXIO;
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 394b53e20cb8..6f849720aef3 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -53,7 +53,7 @@
53struct sound_unit 53struct sound_unit
54{ 54{
55 int unit_minor; 55 int unit_minor;
56 struct file_operations *unit_fops; 56 const struct file_operations *unit_fops;
57 struct sound_unit *next; 57 struct sound_unit *next;
58 char name[32]; 58 char name[32];
59}; 59};
@@ -73,7 +73,7 @@ EXPORT_SYMBOL(sound_class);
73 * join into it. Called with the lock asserted 73 * join into it. Called with the lock asserted
74 */ 74 */
75 75
76static int __sound_insert_unit(struct sound_unit * s, struct sound_unit **list, struct file_operations *fops, int index, int low, int top) 76static int __sound_insert_unit(struct sound_unit * s, struct sound_unit **list, const struct file_operations *fops, int index, int low, int top)
77{ 77{
78 int n=low; 78 int n=low;
79 79
@@ -153,7 +153,7 @@ static DEFINE_SPINLOCK(sound_loader_lock);
153 * list. Acquires locks as needed 153 * list. Acquires locks as needed
154 */ 154 */
155 155
156static int sound_insert_unit(struct sound_unit **list, struct file_operations *fops, int index, int low, int top, const char *name, umode_t mode, struct device *dev) 156static int sound_insert_unit(struct sound_unit **list, const struct file_operations *fops, int index, int low, int top, const char *name, umode_t mode, struct device *dev)
157{ 157{
158 struct sound_unit *s = kmalloc(sizeof(*s), GFP_KERNEL); 158 struct sound_unit *s = kmalloc(sizeof(*s), GFP_KERNEL);
159 int r; 159 int r;
@@ -237,7 +237,7 @@ static struct sound_unit *chains[SOUND_STEP];
237 * a negative error code is returned. 237 * a negative error code is returned.
238 */ 238 */
239 239
240int register_sound_special_device(struct file_operations *fops, int unit, 240int register_sound_special_device(const struct file_operations *fops, int unit,
241 struct device *dev) 241 struct device *dev)
242{ 242{
243 const int chain = unit % SOUND_STEP; 243 const int chain = unit % SOUND_STEP;
@@ -301,7 +301,7 @@ int register_sound_special_device(struct file_operations *fops, int unit,
301 301
302EXPORT_SYMBOL(register_sound_special_device); 302EXPORT_SYMBOL(register_sound_special_device);
303 303
304int register_sound_special(struct file_operations *fops, int unit) 304int register_sound_special(const struct file_operations *fops, int unit)
305{ 305{
306 return register_sound_special_device(fops, unit, NULL); 306 return register_sound_special_device(fops, unit, NULL);
307} 307}
@@ -318,7 +318,7 @@ EXPORT_SYMBOL(register_sound_special);
318 * number is returned, on failure a negative error code is returned. 318 * number is returned, on failure a negative error code is returned.
319 */ 319 */
320 320
321int register_sound_mixer(struct file_operations *fops, int dev) 321int register_sound_mixer(const struct file_operations *fops, int dev)
322{ 322{
323 return sound_insert_unit(&chains[0], fops, dev, 0, 128, 323 return sound_insert_unit(&chains[0], fops, dev, 0, 128,
324 "mixer", S_IRUSR | S_IWUSR, NULL); 324 "mixer", S_IRUSR | S_IWUSR, NULL);
@@ -336,7 +336,7 @@ EXPORT_SYMBOL(register_sound_mixer);
336 * number is returned, on failure a negative error code is returned. 336 * number is returned, on failure a negative error code is returned.
337 */ 337 */
338 338
339int register_sound_midi(struct file_operations *fops, int dev) 339int register_sound_midi(const struct file_operations *fops, int dev)
340{ 340{
341 return sound_insert_unit(&chains[2], fops, dev, 2, 130, 341 return sound_insert_unit(&chains[2], fops, dev, 2, 130,
342 "midi", S_IRUSR | S_IWUSR, NULL); 342 "midi", S_IRUSR | S_IWUSR, NULL);
@@ -362,7 +362,7 @@ EXPORT_SYMBOL(register_sound_midi);
362 * and will always allocate them as a matching pair - eg dsp3/audio3 362 * and will always allocate them as a matching pair - eg dsp3/audio3
363 */ 363 */
364 364
365int register_sound_dsp(struct file_operations *fops, int dev) 365int register_sound_dsp(const struct file_operations *fops, int dev)
366{ 366{
367 return sound_insert_unit(&chains[3], fops, dev, 3, 131, 367 return sound_insert_unit(&chains[3], fops, dev, 3, 131,
368 "dsp", S_IWUSR | S_IRUSR, NULL); 368 "dsp", S_IWUSR | S_IRUSR, NULL);
@@ -381,7 +381,7 @@ EXPORT_SYMBOL(register_sound_dsp);
381 */ 381 */
382 382
383 383
384int register_sound_synth(struct file_operations *fops, int dev) 384int register_sound_synth(const struct file_operations *fops, int dev)
385{ 385{
386 return sound_insert_unit(&chains[9], fops, dev, 9, 137, 386 return sound_insert_unit(&chains[9], fops, dev, 9, 137,
387 "synth", S_IRUSR | S_IWUSR, NULL); 387 "synth", S_IRUSR | S_IWUSR, NULL);
@@ -501,7 +501,7 @@ int soundcore_open(struct inode *inode, struct file *file)
501 int chain; 501 int chain;
502 int unit = iminor(inode); 502 int unit = iminor(inode);
503 struct sound_unit *s; 503 struct sound_unit *s;
504 struct file_operations *new_fops = NULL; 504 const struct file_operations *new_fops = NULL;
505 505
506 chain=unit&0x0F; 506 chain=unit&0x0F;
507 if(chain==4 || chain==5) /* dsp/audio/dsp16 */ 507 if(chain==4 || chain==5) /* dsp/audio/dsp16 */
@@ -540,7 +540,7 @@ int soundcore_open(struct inode *inode, struct file *file)
540 * switching ->f_op in the first place. 540 * switching ->f_op in the first place.
541 */ 541 */
542 int err = 0; 542 int err = 0;
543 struct file_operations *old_fops = file->f_op; 543 const struct file_operations *old_fops = file->f_op;
544 file->f_op = new_fops; 544 file->f_op = new_fops;
545 spin_unlock(&sound_loader_lock); 545 spin_unlock(&sound_loader_lock);
546 if(file->f_op->open) 546 if(file->f_op->open)
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index 315855082fe1..fe67a92e2a1a 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -404,7 +404,7 @@ static void usX2Y_usbpcm_subs_startup(struct snd_usX2Y_substream *subs)
404 struct usX2Ydev * usX2Y = subs->usX2Y; 404 struct usX2Ydev * usX2Y = subs->usX2Y;
405 usX2Y->prepare_subs = subs; 405 usX2Y->prepare_subs = subs;
406 subs->urb[0]->start_frame = -1; 406 subs->urb[0]->start_frame = -1;
407 smp_wmb(); // Make shure above modifications are seen by i_usX2Y_subs_startup() 407 smp_wmb(); // Make sure above modifications are seen by i_usX2Y_subs_startup()
408 usX2Y_urbs_set_complete(usX2Y, i_usX2Y_usbpcm_subs_startup); 408 usX2Y_urbs_set_complete(usX2Y, i_usX2Y_usbpcm_subs_startup);
409} 409}
410 410