aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS44
-rw-r--r--Documentation/ABI/removed/devfs2
-rw-r--r--Documentation/DocBook/kernel-api.tmpl4
-rw-r--r--Documentation/MSI-HOWTO.txt6
-rw-r--r--Documentation/SubmitChecklist4
-rw-r--r--Documentation/SubmittingPatches3
-rw-r--r--Documentation/arm/Interrupts2
-rw-r--r--Documentation/arm/Samsung-S3C24XX/H1940.txt4
-rw-r--r--Documentation/auxdisplay/cfag12864b6
-rw-r--r--Documentation/binfmt_misc.txt2
-rw-r--r--Documentation/block/ioprio.txt8
-rw-r--r--Documentation/cpu-freq/cpufreq-stats.txt2
-rw-r--r--Documentation/cpu-hotplug.txt9
-rw-r--r--Documentation/crypto/api-intro.txt6
-rw-r--r--Documentation/device-mapper/delay.txt26
-rw-r--r--Documentation/driver-model/platform.txt4
-rw-r--r--Documentation/dvb/README.dvb-usb2
-rw-r--r--Documentation/dvb/contributors.txt2
-rw-r--r--Documentation/fb/arkfb.txt68
-rw-r--r--Documentation/fb/aty128fb.txt4
-rw-r--r--Documentation/fb/framebuffer.txt16
-rw-r--r--Documentation/fb/imacfb.txt2
-rw-r--r--Documentation/fb/sstfb.txt4
-rw-r--r--Documentation/fb/vt8623fb.txt64
-rw-r--r--Documentation/filesystems/Locking6
-rw-r--r--Documentation/filesystems/hpfs.txt2
-rw-r--r--Documentation/filesystems/ntfs.txt2
-rw-r--r--Documentation/filesystems/proc.txt2
-rw-r--r--Documentation/filesystems/relay.txt2
-rw-r--r--Documentation/filesystems/xip.txt2
-rw-r--r--Documentation/fujitsu/frv/gdbstub.txt2
-rw-r--r--Documentation/hwmon/adm10262
-rw-r--r--Documentation/hwmon/gl518sm2
-rw-r--r--Documentation/hwmon/lm832
-rw-r--r--Documentation/hwmon/sis55952
-rw-r--r--Documentation/hwmon/via686a2
-rw-r--r--Documentation/hwmon/w83792d2
-rw-r--r--Documentation/i2c/busses/i2c-i8102
-rw-r--r--Documentation/i2c/busses/i2c-sis96x2
-rw-r--r--Documentation/i2c/busses/i2c-via2
-rw-r--r--Documentation/i2c/busses/i2c-viapro2
-rw-r--r--Documentation/i2c/i2c-protocol2
-rw-r--r--Documentation/i2o/README4
-rw-r--r--Documentation/input/atarikbd.txt4
-rw-r--r--Documentation/input/xpad.txt6
-rw-r--r--Documentation/isdn/CREDITS4
-rw-r--r--Documentation/isdn/README2
-rw-r--r--Documentation/isdn/README.icn4
-rw-r--r--Documentation/java.txt2
-rw-r--r--Documentation/kernel-docs.txt2
-rw-r--r--Documentation/m68k/README.buddha2
-rw-r--r--Documentation/magic-number.txt2
-rw-r--r--Documentation/md.txt72
-rw-r--r--Documentation/netlabel/introduction.txt2
-rw-r--r--Documentation/networking/6pack.txt2
-rw-r--r--Documentation/networking/NAPI_HOWTO.txt2
-rw-r--r--Documentation/networking/packet_mmap.txt2
-rw-r--r--Documentation/networking/slicecom.hun2
-rw-r--r--Documentation/networking/slicecom.txt4
-rw-r--r--Documentation/networking/tms380tr.txt42
-rw-r--r--Documentation/networking/udplite.txt2
-rw-r--r--Documentation/networking/wan-router.txt4
-rw-r--r--Documentation/pci.txt2
-rw-r--r--Documentation/pcieaer-howto.txt2
-rw-r--r--Documentation/pnp.txt2
-rw-r--r--Documentation/power/swsusp.txt2
-rw-r--r--Documentation/power/userland-swsusp.txt26
-rw-r--r--Documentation/powerpc/booting-without-of.txt4
-rw-r--r--Documentation/s390/Debugging390.txt2
-rw-r--r--Documentation/scsi/aha152x.txt2
-rw-r--r--Documentation/scsi/aic7xxx.txt2
-rw-r--r--Documentation/scsi/aic7xxx_old.txt2
-rw-r--r--Documentation/scsi/ncr53c8xx.txt2
-rw-r--r--Documentation/scsi/st.txt2
-rw-r--r--Documentation/scsi/sym53c8xx_2.txt2
-rw-r--r--Documentation/scsi/tmscsim.txt2
-rw-r--r--Documentation/sonypi.txt2
-rw-r--r--Documentation/sound/oss/mwave2
-rw-r--r--Documentation/sysctl/kernel.txt4
-rw-r--r--Documentation/usb/CREDITS2
-rw-r--r--Documentation/usb/usb-serial.txt6
-rw-r--r--Documentation/video4linux/README.pvrusb22
-rw-r--r--Documentation/video4linux/Zoran2
-rw-r--r--Documentation/video4linux/meye.txt2
-rw-r--r--Documentation/video4linux/ov511.txt4
-rw-r--r--Documentation/vm/slabinfo.c426
-rw-r--r--MAINTAINERS12
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/Makefile5
-rw-r--r--arch/arm/kernel/head-nommu.S2
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/init_task.c2
-rw-r--r--arch/arm/kernel/module.c4
-rw-r--r--arch/arm/kernel/smp.c4
-rw-r--r--arch/arm/kernel/vmlinux.lds.S12
-rw-r--r--arch/arm/mach-at91/Kconfig2
-rw-r--r--arch/arm/mach-omap1/Kconfig7
-rw-r--r--arch/arm/mach-omap1/Makefile1
-rw-r--r--arch/arm/mach-omap1/board-fsample.c2
-rw-r--r--arch/arm/mach-omap1/board-h3.c2
-rw-r--r--arch/arm/mach-omap1/board-innovator.c2
-rw-r--r--arch/arm/mach-omap1/board-perseus2.c2
-rw-r--r--arch/arm/mach-omap1/devices.c71
-rw-r--r--arch/arm/mach-omap1/io.c4
-rw-r--r--arch/arm/mach-omap1/mailbox.c206
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/board-h4.c14
-rw-r--r--arch/arm/mach-omap2/devices.c66
-rw-r--r--arch/arm/mach-omap2/gpmc.c12
-rw-r--r--arch/arm/mach-omap2/io.c21
-rw-r--r--arch/arm/mach-omap2/mailbox.c318
-rw-r--r--arch/arm/mach-s3c2410/sleep.S2
-rw-r--r--arch/arm/mm/Kconfig32
-rw-r--r--arch/arm/mm/Makefile3
-rw-r--r--arch/arm/mm/abort-ev7.S32
-rw-r--r--arch/arm/mm/cache-v7.S253
-rw-r--r--arch/arm/mm/context.c17
-rw-r--r--arch/arm/mm/proc-macros.S12
-rw-r--r--arch/arm/mm/proc-v7.S262
-rw-r--r--arch/arm/plat-omap/Kconfig13
-rw-r--r--arch/arm/plat-omap/Makefile5
-rw-r--r--arch/arm/plat-omap/clock.c37
-rw-r--r--arch/arm/plat-omap/common.c8
-rw-r--r--arch/arm/plat-omap/debug-leds.c314
-rw-r--r--arch/arm/plat-omap/devices.c74
-rw-r--r--arch/arm/plat-omap/dma.c25
-rw-r--r--arch/arm/plat-omap/dmtimer.c2
-rw-r--r--arch/arm/plat-omap/fb.c305
-rw-r--r--arch/arm/plat-omap/gpio.c3
-rw-r--r--arch/arm/plat-omap/mailbox.c509
-rw-r--r--arch/arm/plat-omap/mailbox.h100
-rw-r--r--arch/arm/plat-omap/sram.c56
-rw-r--r--arch/arm/plat-omap/usb.c199
-rw-r--r--arch/arm/plat-s3c24xx/sleep.S2
-rw-r--r--arch/avr32/Makefile2
-rw-r--r--arch/avr32/kernel/process.c6
-rw-r--r--arch/avr32/kernel/ptrace.c2
-rw-r--r--arch/avr32/kernel/syscall_table.S1
-rw-r--r--arch/avr32/kernel/traps.c2
-rw-r--r--arch/avr32/kernel/vmlinux.lds.c2
-rw-r--r--arch/avr32/mach-at32ap/clock.c2
-rw-r--r--arch/avr32/mm/dma-coherent.c12
-rw-r--r--arch/blackfin/kernel/asm-offsets.c2
-rw-r--r--arch/blackfin/kernel/ptrace.c6
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig2
-rw-r--r--arch/frv/Kconfig6
-rw-r--r--arch/frv/kernel/process.c4
-rw-r--r--arch/frv/mm/pgalloc.c22
-rw-r--r--arch/h8300/Kconfig.debug4
-rw-r--r--arch/h8300/kernel/asm-offsets.c2
-rw-r--r--arch/i386/Kconfig4
-rw-r--r--arch/i386/Kconfig.cpu4
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/i386/kernel/cpu/mcheck/therm_throt.c4
-rw-r--r--arch/i386/kernel/cpu/transmeta.c6
-rw-r--r--arch/i386/kernel/cpuid.c2
-rw-r--r--arch/i386/kernel/microcode.c59
-rw-r--r--arch/i386/kernel/msr.c2
-rw-r--r--arch/i386/kernel/traps.c2
-rw-r--r--arch/i386/mach-generic/probe.c2
-rw-r--r--arch/i386/mach-voyager/voyager_basic.c4
-rw-r--r--arch/i386/pci/init.c2
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/ia32/ia32_entry.S39
-rw-r--r--arch/ia64/ia32/ia32_signal.c65
-rw-r--r--arch/ia64/kernel/entry.S30
-rw-r--r--arch/ia64/kernel/err_inject.c2
-rw-r--r--arch/ia64/kernel/iosapic.c2
-rw-r--r--arch/ia64/kernel/irq_ia64.c27
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/process.c8
-rw-r--r--arch/ia64/kernel/relocate_kernel.S11
-rw-r--r--arch/ia64/kernel/salinfo.c2
-rw-r--r--arch/ia64/kernel/setup.c2
-rw-r--r--arch/ia64/kernel/sigframe.h2
-rw-r--r--arch/ia64/kernel/signal.c71
-rw-r--r--arch/ia64/kernel/smp.c68
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/ia64/kernel/traps.c6
-rw-r--r--arch/ia64/kernel/unwind.c9
-rw-r--r--arch/ia64/mm/tlb.c6
-rw-r--r--arch/ia64/sn/kernel/irq.c58
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c65
-rw-r--r--arch/m68knommu/Kconfig.debug2
-rw-r--r--arch/m68knommu/kernel/asm-offsets.c2
-rw-r--r--arch/mips/Kconfig6
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/kernel/asm-offsets.c2
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/pci/fixup-sb1250.c4
-rw-r--r--arch/parisc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/Kconfig24
-rw-r--r--arch/powerpc/Kconfig.debug2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c18
-rw-r--r--arch/powerpc/kernel/lparmap.c3
-rw-r--r--arch/powerpc/kernel/sysfs.c2
-rw-r--r--arch/powerpc/mm/Makefile1
-rw-r--r--arch/powerpc/mm/hash_low_64.S5
-rw-r--r--arch/powerpc/mm/hash_utils_64.c142
-rw-r--r--arch/powerpc/mm/hugetlbpage.c548
-rw-r--r--arch/powerpc/mm/init_64.c17
-rw-r--r--arch/powerpc/mm/mem.c25
-rw-r--r--arch/powerpc/mm/mmu_context_64.c10
-rw-r--r--arch/powerpc/mm/numa.c3
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c2
-rw-r--r--arch/powerpc/mm/slb.c11
-rw-r--r--arch/powerpc/mm/slb_low.S52
-rw-r--r--arch/powerpc/mm/slice.c633
-rw-r--r--arch/powerpc/mm/tlb_32.c4
-rw-r--r--arch/powerpc/mm/tlb_64.c12
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c2
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_smp.c2
-rw-r--r--arch/powerpc/platforms/8xx/mpc86xads_setup.c2
-rw-r--r--arch/powerpc/platforms/8xx/mpc885ads_setup.c2
-rw-r--r--arch/powerpc/platforms/cell/Kconfig15
-rw-r--r--arch/powerpc/platforms/cell/io-workarounds.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c9
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c80
-rw-r--r--arch/powerpc/platforms/cell/spufs/lscsa_alloc.c181
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c28
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig4
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c87
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c14
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c2
-rw-r--r--arch/ppc/kernel/asm-offsets.c2
-rw-r--r--arch/ppc/platforms/mpc866ads_setup.c2
-rw-r--r--arch/ppc/syslib/ipic.c2
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/crypto/Kconfig2
-rw-r--r--arch/s390/kernel/asm-offsets.c2
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/sh/Kconfig29
-rw-r--r--arch/sh/boards/landisk/setup.c1
-rw-r--r--arch/sh/boards/se/7751/setup.c2
-rw-r--r--arch/sh/drivers/Makefile3
-rw-r--r--arch/sh/drivers/dma/Kconfig20
-rw-r--r--arch/sh/drivers/dma/Makefile4
-rw-r--r--arch/sh/drivers/dma/dmabrg.c196
-rw-r--r--arch/sh/kernel/cpu/sh2a/Makefile5
-rw-r--r--arch/sh/kernel/cpu/sh2a/opcode_helper.c55
-rw-r--r--arch/sh/kernel/cpu/sh2a/probe.c1
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S2
-rw-r--r--arch/sh/kernel/cpu/sh3/ex.S13
-rw-r--r--arch/sh/kernel/cpu/sh4/Makefile6
-rw-r--r--arch/sh/kernel/cpu/sh4/ex.S62
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh73180.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7343.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7770.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7780.c2
-rw-r--r--arch/sh/kernel/process.c16
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/kernel/sh_ksyms.c2
-rw-r--r--arch/sh/kernel/signal.c13
-rw-r--r--arch/sh/kernel/stacktrace.c2
-rw-r--r--arch/sh/kernel/syscalls.S1
-rw-r--r--arch/sh/kernel/time.c172
-rw-r--r--arch/sh/kernel/timers/timer-tmu.c182
-rw-r--r--arch/sh/kernel/traps.c20
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c2
-rw-r--r--arch/sh/lib/delay.c5
-rw-r--r--arch/sh/mm/Kconfig7
-rw-r--r--arch/sh/mm/fault.c2
-rw-r--r--arch/sh/mm/init.c2
-rw-r--r--arch/sparc/kernel/asm-offsets.c2
-rw-r--r--arch/sparc64/kernel/traps.c1
-rw-r--r--arch/um/Kconfig16
-rw-r--r--arch/um/Kconfig.scsi58
-rw-r--r--arch/um/include/sysdep-i386/archsetjmp.h2
-rw-r--r--arch/um/include/sysdep-x86_64/archsetjmp.h2
-rw-r--r--arch/um/kernel/skas/process.c9
-rw-r--r--arch/um/os-Linux/process.c2
-rw-r--r--arch/um/os-Linux/skas/mem.c2
-rw-r--r--arch/um/os-Linux/skas/process.c2
-rw-r--r--arch/v850/kernel/asm-offsets.c2
-rw-r--r--arch/v850/kernel/entry.S2
-rw-r--r--arch/x86_64/kernel/io_apic.c2
-rw-r--r--arch/x86_64/kernel/irq.c2
-rw-r--r--arch/x86_64/kernel/mce.c2
-rw-r--r--arch/x86_64/kernel/mce_amd.c2
-rw-r--r--arch/x86_64/kernel/vsyscall.c2
-rw-r--r--arch/xtensa/kernel/asm-offsets.c2
-rw-r--r--arch/xtensa/kernel/pci-dma.c2
-rw-r--r--block/as-iosched.c2
-rw-r--r--block/genhd.c53
-rw-r--r--block/ll_rw_blk.c11
-rw-r--r--crypto/Kconfig2
-rw-r--r--drivers/acpi/sleep/main.c67
-rw-r--r--drivers/acpi/sleep/proc.c2
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/libata-core.c8
-rw-r--r--drivers/base/devres.c32
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/topology.c3
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/nbd.c15
-rw-r--r--drivers/block/rd.c2
-rw-r--r--drivers/char/Kconfig3
-rw-r--r--drivers/char/drm/drm_dma.c2
-rw-r--r--drivers/char/drm/drm_vm.c2
-rw-r--r--drivers/char/drm/r300_reg.h2
-rw-r--r--drivers/char/genrtc.c4
-rw-r--r--drivers/char/hw_random/Kconfig14
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/pasemi-rng.c156
-rw-r--r--drivers/char/mmtimer.c4
-rw-r--r--drivers/char/pcmcia/Kconfig1
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c46
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c7
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tty_io.c22
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq_stats.c2
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/hwmon/coretemp.c2
-rw-r--r--drivers/i2c/chips/tps65010.c2
-rw-r--r--drivers/ide/pci/siimage.c2
-rw-r--r--drivers/ieee1394/nodemgr.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c6
-rw-r--r--drivers/isdn/capi/Kconfig2
-rw-r--r--drivers/isdn/hardware/eicon/divasync.h2
-rw-r--r--drivers/isdn/hisax/hfc_usb.c4
-rw-r--r--drivers/kvm/kvm_main.c3
-rw-r--r--drivers/leds/leds-h1940.c2
-rw-r--r--drivers/macintosh/Kconfig2
-rw-r--r--drivers/mca/mca-bus.c28
-rw-r--r--drivers/mca/mca-driver.c13
-rw-r--r--drivers/md/Kconfig9
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/dm-bio-list.h26
-rw-r--r--drivers/md/dm-crypt.c91
-rw-r--r--drivers/md/dm-delay.c383
-rw-r--r--drivers/md/dm-exception-store.c54
-rw-r--r--drivers/md/dm-hw-handler.h1
-rw-r--r--drivers/md/dm-io.c232
-rw-r--r--drivers/md/dm-io.h83
-rw-r--r--drivers/md/dm-log.c77
-rw-r--r--drivers/md/dm-mpath.c3
-rw-r--r--drivers/md/dm-raid1.c187
-rw-r--r--drivers/md/dm-table.c10
-rw-r--r--drivers/md/dm.c1
-rw-r--r--drivers/md/kcopyd.c28
-rw-r--r--drivers/md/md.c186
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-remote.c2
-rw-r--r--drivers/media/dvb/frontends/dib7000m.c2
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c2
-rw-r--r--drivers/media/dvb/frontends/tda10021.c2
-rw-r--r--drivers/media/dvb/frontends/ves1x93.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/video/pwc/philips.txt6
-rw-r--r--drivers/media/video/usbvideo/vicam.c2
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt2
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/mmc/core/core.c4
-rw-r--r--drivers/mtd/chips/Kconfig40
-rw-r--r--drivers/mtd/chips/Makefile4
-rw-r--r--drivers/mtd/chips/amd_flash.c1396
-rw-r--r--drivers/mtd/chips/jedec.c935
-rw-r--r--drivers/mtd/chips/sharp.c601
-rw-r--r--drivers/mtd/devices/block2mtd.c2
-rw-r--r--drivers/mtd/maps/Kconfig18
-rw-r--r--drivers/mtd/maps/Makefile2
-rw-r--r--drivers/mtd/maps/arctic-mtd.c145
-rw-r--r--drivers/mtd/maps/beech-mtd.c122
-rw-r--r--drivers/mtd/maps/nettel.c2
-rw-r--r--drivers/mtd/maps/physmap_of.c2
-rw-r--r--drivers/mtd/mtdpart.c1
-rw-r--r--drivers/mtd/nand/Kconfig21
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/at91_nand.c10
-rw-r--r--drivers/mtd/nand/cafe_ecc.c1381
-rw-r--r--drivers/mtd/nand/cafe_nand.c (renamed from drivers/mtd/nand/cafe.c)137
-rw-r--r--drivers/mtd/nand/nand_base.c11
-rw-r--r--drivers/mtd/nand/plat_nand.c150
-rw-r--r--drivers/mtd/onenand/onenand_base.c2
-rw-r--r--drivers/net/3c509.c5
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/atp.c8
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/eepro100.c2
-rw-r--r--drivers/net/epic100.c10
-rw-r--r--drivers/net/hamradio/Kconfig2
-rw-r--r--drivers/net/irda/donauboe.h2
-rw-r--r--drivers/net/ixgb/ixgb_ee.c2
-rw-r--r--drivers/net/meth.h2
-rw-r--r--drivers/net/natsemi.c1
-rw-r--r--drivers/net/ne2k-pci.c3
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/sundance.c3
-rw-r--r--drivers/net/tg3.c11
-rw-r--r--drivers/net/tg3.h2
-rw-r--r--drivers/net/tulip/interrupt.c2
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/wireless/airport.c2
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c2
-rw-r--r--drivers/net/wireless/wavelan_cs.c4
-rw-r--r--drivers/net/wireless/wavelan_cs.p.h2
-rw-r--r--drivers/net/yellowfin.c1
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/rtc/rtc-sh.c8
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/net/qeth_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/sbus/char/bpp.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c2
-rw-r--r--drivers/scsi/aic94xx/Makefile2
-rw-r--r--drivers/scsi/dc395x.c6
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/spi/atmel_spi.c5
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/misc/auerswald.c2
-rw-r--r--drivers/usb/net/usbnet.h2
-rw-r--r--drivers/usb/serial/Kconfig2
-rw-r--r--drivers/usb/serial/aircable.c4
-rw-r--r--drivers/usb/serial/io_edgeport.c4
-rw-r--r--drivers/video/Kconfig28
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/arkfb.c1200
-rw-r--r--drivers/video/console/softcursor.c2
-rw-r--r--drivers/video/fbmem.c4
-rw-r--r--drivers/video/i810/i810_main.c2
-rw-r--r--drivers/video/matrox/matroxfb_Ti3026.c2
-rw-r--r--drivers/video/matrox/matroxfb_accel.c2
-rw-r--r--drivers/video/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/matrox/matroxfb_misc.c2
-rw-r--r--drivers/video/nvidia/nv_hw.c7
-rw-r--r--drivers/video/nvidia/nvidia.c1
-rw-r--r--drivers/video/s3fb.c19
-rw-r--r--drivers/video/skeletonfb.c2
-rw-r--r--drivers/video/svgalib.c17
-rw-r--r--drivers/video/vt8623fb.c927
-rw-r--r--fs/Kconfig4
-rw-r--r--fs/affs/file.c6
-rw-r--r--fs/afs/Makefile3
-rw-r--r--fs/afs/afs_fs.h2
-rw-r--r--fs/afs/callback.c9
-rw-r--r--fs/afs/dir.c19
-rw-r--r--fs/afs/file.c107
-rw-r--r--fs/afs/fsclient.c369
-rw-r--r--fs/afs/inode.c70
-rw-r--r--fs/afs/internal.h95
-rw-r--r--fs/afs/main.c2
-rw-r--r--fs/afs/misc.c1
-rw-r--r--fs/afs/mntpt.c5
-rw-r--r--fs/afs/rxrpc.c80
-rw-r--r--fs/afs/security.c12
-rw-r--r--fs/afs/server.c3
-rw-r--r--fs/afs/super.c5
-rw-r--r--fs/afs/vnode.c121
-rw-r--r--fs/afs/write.c835
-rw-r--r--fs/aio.c7
-rw-r--r--fs/binfmt_misc.c13
-rw-r--r--fs/buffer.c58
-rw-r--r--fs/configfs/file.c33
-rw-r--r--fs/direct-io.c10
-rw-r--r--fs/ext3/inode.c12
-rw-r--r--fs/jbd/checkpoint.c2
-rw-r--r--fs/jbd/recovery.c2
-rw-r--r--fs/jbd/revoke.c2
-rw-r--r--fs/jbd/transaction.c2
-rw-r--r--fs/jbd2/checkpoint.c2
-rw-r--r--fs/jbd2/recovery.c2
-rw-r--r--fs/jbd2/revoke.c2
-rw-r--r--fs/jbd2/transaction.c2
-rw-r--r--fs/jffs2/histo_mips.h2
-rw-r--r--fs/jffs2/readinode.c78
-rw-r--r--fs/jffs2/wbuf.c5
-rw-r--r--fs/jfs/jfs_dmap.c2
-rw-r--r--fs/jfs/jfs_imap.c4
-rw-r--r--fs/jfs/jfs_logmgr.c2
-rw-r--r--fs/libfs.c5
-rw-r--r--fs/mpage.c15
-rw-r--r--fs/namei.c8
-rw-r--r--fs/nfs/dir.c37
-rw-r--r--fs/nfs/getroot.c1
-rw-r--r--fs/nfs/idmap.c4
-rw-r--r--fs/nfs/inode.c3
-rw-r--r--fs/nfs/nfs2xdr.c1
-rw-r--r--fs/nfs/nfs4xdr.c11
-rw-r--r--fs/nfs/pagelist.c7
-rw-r--r--fs/nfsd/Makefile1
-rw-r--r--fs/nfsd/export.c14
-rw-r--r--fs/nfsd/nfs3proc.c2
-rw-r--r--fs/nfsd/nfs3xdr.c71
-rw-r--r--fs/nfsd/nfs4acl.c17
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/nfsd/nfsfh.c56
-rw-r--r--fs/nfsd/nfsproc.c2
-rw-r--r--fs/nfsd/nfsxdr.c53
-rw-r--r--fs/reiserfs/file.c39
-rw-r--r--fs/reiserfs/inode.c13
-rw-r--r--fs/reiserfs/journal.c4
-rw-r--r--fs/select.c4
-rw-r--r--fs/sysfs/file.c33
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--fs/xfs/xfs_mount.c3
-rw-r--r--include/asm-alpha/smp.h1
-rw-r--r--include/asm-alpha/thread_info.h8
-rw-r--r--include/asm-arm/arch-at91/at91rm9200.h181
-rw-r--r--include/asm-arm/arch-at91/at91sam9260.h9
-rw-r--r--include/asm-arm/arch-at91/at91sam9261.h191
-rw-r--r--include/asm-arm/arch-at91/at91sam9263.h8
-rw-r--r--include/asm-arm/arch-at91/board.h2
-rw-r--r--include/asm-arm/arch-at91/cpu.h6
-rw-r--r--include/asm-arm/arch-imx/imx-regs.h2
-rw-r--r--include/asm-arm/arch-integrator/platform.h2
-rw-r--r--include/asm-arm/arch-iop32x/glantank.h2
-rw-r--r--include/asm-arm/arch-iop32x/n2100.h2
-rw-r--r--include/asm-arm/arch-omap/aic23.h4
-rw-r--r--include/asm-arm/arch-omap/board-apollon.h9
-rw-r--r--include/asm-arm/arch-omap/board-h4.h3
-rw-r--r--include/asm-arm/arch-omap/board.h43
-rw-r--r--include/asm-arm/arch-omap/dma.h2
-rw-r--r--include/asm-arm/arch-omap/dsp.h250
-rw-r--r--include/asm-arm/arch-omap/dsp_common.h32
-rw-r--r--include/asm-arm/arch-omap/gpio-switch.h54
-rw-r--r--include/asm-arm/arch-omap/gpio.h2
-rw-r--r--include/asm-arm/arch-omap/gpmc.h2
-rw-r--r--include/asm-arm/arch-omap/hardware.h9
-rw-r--r--include/asm-arm/arch-omap/hwa742.h12
-rw-r--r--include/asm-arm/arch-omap/io.h11
-rw-r--r--include/asm-arm/arch-omap/irqs.h20
-rw-r--r--include/asm-arm/arch-omap/lcd_lph8923.h14
-rw-r--r--include/asm-arm/arch-omap/lcd_mipid.h24
-rw-r--r--include/asm-arm/arch-omap/led.h24
-rw-r--r--include/asm-arm/arch-omap/mailbox.h73
-rw-r--r--include/asm-arm/arch-omap/mcspi.h1
-rw-r--r--include/asm-arm/arch-omap/memory.h13
-rw-r--r--include/asm-arm/arch-omap/menelaus.h17
-rw-r--r--include/asm-arm/arch-omap/omap16xx.h12
-rw-r--r--include/asm-arm/arch-omap/omap24xx.h9
-rw-r--r--include/asm-arm/arch-omap/omapfb.h188
-rw-r--r--include/asm-arm/arch-omap/sram.h3
-rw-r--r--include/asm-arm/arch-omap/usb.h40
-rw-r--r--include/asm-arm/arch-s3c2410/regs-power.h2
-rw-r--r--include/asm-arm/arch-s3c2410/regs-s3c2443-clock.h2
-rw-r--r--include/asm-arm/arch-s3c2410/regs-watchdog.h2
-rw-r--r--include/asm-arm/arch-s3c2410/udc.h2
-rw-r--r--include/asm-arm/cacheflush.h40
-rw-r--r--include/asm-arm/dma-mapping.h2
-rw-r--r--include/asm-arm/glue.h9
-rw-r--r--include/asm-arm/mmu_context.h8
-rw-r--r--include/asm-arm/proc-fns.h8
-rw-r--r--include/asm-arm/system.h7
-rw-r--r--include/asm-arm26/io.h2
-rw-r--r--include/asm-arm26/memory.h4
-rw-r--r--include/asm-arm26/setup.h2
-rw-r--r--include/asm-avr32/arch-at32ap/cpu.h33
-rw-r--r--include/asm-avr32/setup.h2
-rw-r--r--include/asm-avr32/unistd.h4
-rw-r--r--include/asm-blackfin/processor.h6
-rw-r--r--include/asm-blackfin/system.h4
-rw-r--r--include/asm-frv/tlb.h4
-rw-r--r--include/asm-generic/bitops/atomic.h2
-rw-r--r--include/asm-i386/bitops.h2
-rw-r--r--include/asm-i386/boot.h2
-rw-r--r--include/asm-i386/mmzone.h6
-rw-r--r--include/asm-i386/msr.h56
-rw-r--r--include/asm-i386/paravirt.h5
-rw-r--r--include/asm-i386/smp.h37
-rw-r--r--include/asm-i386/sync_bitops.h2
-rw-r--r--include/asm-i386/thread_info.h2
-rw-r--r--include/asm-ia64/hw_irq.h1
-rw-r--r--include/asm-ia64/iosapic.h2
-rw-r--r--include/asm-ia64/smp.h6
-rw-r--r--include/asm-ia64/sn/sn_sal.h1
-rw-r--r--include/asm-ia64/thread_info.h6
-rw-r--r--include/asm-ia64/tlbflush.h11
-rw-r--r--include/asm-ia64/unistd.h5
-rw-r--r--include/asm-m32r/smp.h6
-rw-r--r--include/asm-m32r/system.h2
-rw-r--r--include/asm-m68k/atarihw.h2
-rw-r--r--include/asm-m68k/atariints.h2
-rw-r--r--include/asm-m68k/thread_info.h6
-rw-r--r--include/asm-mips/bootinfo.h2
-rw-r--r--include/asm-mips/system.h2
-rw-r--r--include/asm-parisc/compat.h2
-rw-r--r--include/asm-powerpc/mmu-hash64.h11
-rw-r--r--include/asm-powerpc/paca.h2
-rw-r--r--include/asm-powerpc/page_64.h86
-rw-r--r--include/asm-powerpc/pgalloc-64.h31
-rw-r--r--include/asm-powerpc/pgtable-4k.h6
-rw-r--r--include/asm-powerpc/pgtable-64k.h7
-rw-r--r--include/asm-powerpc/ppc-pci.h2
-rw-r--r--include/asm-powerpc/smp.h1
-rw-r--r--include/asm-powerpc/spu_csa.h10
-rw-r--r--include/asm-ppc/hydra.h2
-rw-r--r--include/asm-s390/smp.h1
-rw-r--r--include/asm-sh/bug.h4
-rw-r--r--include/asm-sh/cpu-features.h1
-rw-r--r--include/asm-sh/cpu-sh3/dma.h2
-rw-r--r--include/asm-sh/cpu-sh4/dma-sh7780.h2
-rw-r--r--include/asm-sh/cpu-sh4/dma.h2
-rw-r--r--include/asm-sh/dmabrg.h23
-rw-r--r--include/asm-sh/edosk7705.h2
-rw-r--r--include/asm-sh/kdebug.h25
-rw-r--r--include/asm-sh/pgalloc.h44
-rw-r--r--include/asm-sh/snapgear.h2
-rw-r--r--include/asm-sh/system.h9
-rw-r--r--include/asm-sh/timer.h25
-rw-r--r--include/asm-sh/unistd.h5
-rw-r--r--include/asm-sparc/smp.h1
-rw-r--r--include/asm-sparc64/smp.h1
-rw-r--r--include/asm-um/required-features.h9
-rw-r--r--include/asm-um/smp.h4
-rw-r--r--include/asm-x86_64/smp.h14
-rw-r--r--include/asm-x86_64/system.h2
-rw-r--r--include/asm-x86_64/thread_info.h2
-rw-r--r--include/asm-xtensa/platform-iss/simcall.h2
-rw-r--r--include/linux/aio.h3
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/compiler-gcc.h1
-rw-r--r--include/linux/compiler-gcc3.h6
-rw-r--r--include/linux/compiler-gcc4.h3
-rw-r--r--include/linux/compiler.h21
-rw-r--r--include/linux/ext3_fs_i.h2
-rw-r--r--include/linux/ext4_fs_i.h2
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/futex.h42
-rw-r--r--include/linux/generic_acl.h2
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/gfp.h6
-rw-r--r--include/linux/highmem.h27
-rw-r--r--include/linux/i2c-algo-bit.h2
-rw-r--r--include/linux/i2c-algo-pcf.h2
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/irda.h2
-rw-r--r--include/linux/kthread.h3
-rw-r--r--include/linux/ktime.h6
-rw-r--r--include/linux/mca.h2
-rw-r--r--include/linux/meye.h2
-rw-r--r--include/linux/mmzone.h3
-rw-r--r--include/linux/module.h3
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/mtd/mtd.h7
-rw-r--r--include/linux/mtd/nand.h16
-rw-r--r--include/linux/mutex.h5
-rw-r--r--include/linux/nfs4_acl.h1
-rw-r--r--include/linux/notifier.h66
-rw-r--r--include/linux/pm.h31
-rw-r--r--include/linux/radix-tree.h4
-rw-r--r--include/linux/raid/md_k.h1
-rw-r--r--include/linux/relay.h3
-rw-r--r--include/linux/rslib.h4
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/signal.h125
-rw-r--r--include/linux/smp.h1
-rw-r--r--include/linux/sonypi.h2
-rw-r--r--include/linux/sunrpc/svc.h19
-rw-r--r--include/linux/sunrpc/svcsock.h3
-rw-r--r--include/linux/suspend.h35
-rw-r--r--include/linux/svga.h2
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/vmstat.h3
-rw-r--r--include/linux/workqueue.h95
-rw-r--r--include/net/irda/af_irda.h2
-rw-r--r--include/net/irda/irda.h2
-rw-r--r--include/net/irda/iriap.h2
-rw-r--r--include/net/irda/iriap_event.h2
-rw-r--r--include/net/irda/irias_object.h2
-rw-r--r--include/net/irda/irlan_client.h2
-rw-r--r--include/net/irda/irlan_common.h2
-rw-r--r--include/net/irda/irlan_eth.h2
-rw-r--r--include/net/irda/irlan_event.h2
-rw-r--r--include/net/irda/irlan_filter.h2
-rw-r--r--include/net/irda/irlan_provider.h2
-rw-r--r--include/net/irda/irlap.h2
-rw-r--r--include/net/irda/irlmp.h2
-rw-r--r--include/net/irda/irlmp_event.h2
-rw-r--r--include/net/irda/irlmp_frame.h2
-rw-r--r--include/net/irda/irmod.h2
-rw-r--r--include/net/irda/irqueue.h2
-rw-r--r--include/net/irda/irttp.h2
-rw-r--r--include/net/irda/parameters.h2
-rw-r--r--include/net/irda/timer.h2
-rw-r--r--include/net/irda/wrapper.h2
-rw-r--r--init/Kconfig30
-rw-r--r--init/do_mounts.c7
-rw-r--r--init/main.c5
-rw-r--r--kernel/Kconfig.preempt4
-rw-r--r--kernel/configs.c15
-rw-r--r--kernel/cpu.c66
-rw-r--r--kernel/cpuset.c7
-rw-r--r--kernel/exit.c18
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/futex.c988
-rw-r--r--kernel/futex_compat.c22
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/irq/handle.c1
-rw-r--r--kernel/kmod.c6
-rw-r--r--kernel/kthread.c113
-rw-r--r--kernel/module.c10
-rw-r--r--kernel/mutex.c8
-rw-r--r--kernel/power/disk.c195
-rw-r--r--kernel/power/main.c42
-rw-r--r--kernel/power/power.h7
-rw-r--r--kernel/power/snapshot.c14
-rw-r--r--kernel/power/user.c13
-rw-r--r--kernel/profile.c4
-rw-r--r--kernel/rcupdate.c2
-rw-r--r--kernel/relay.c37
-rw-r--r--kernel/rtmutex.c41
-rw-r--r--kernel/rtmutex_common.h34
-rw-r--r--kernel/sched.c38
-rw-r--r--kernel/signal.c140
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/sys.c98
-rw-r--r--kernel/sysctl.c12
-rw-r--r--kernel/time/clocksource.c51
-rw-r--r--kernel/time/timer_list.c25
-rw-r--r--kernel/timer.c14
-rw-r--r--kernel/wait.c2
-rw-r--r--kernel/workqueue.c783
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/reed_solomon/reed_solomon.c84
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/filemap_xip.c7
-rw-r--r--mm/hugetlb.c33
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/page_alloc.c50
-rw-r--r--mm/slab.c41
-rw-r--r--mm/slub.c1050
-rw-r--r--mm/swap.c2
-rw-r--r--mm/truncate.c3
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/vmstat.c95
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/flow.c2
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/ipv4/Kconfig4
-rw-r--r--net/ipv4/cipso_ipv4.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c1
-rw-r--r--net/ipv4/ipvs/ip_vs_sed.c2
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/netfilter/Kconfig2
-rw-r--r--net/iucv/iucv.c6
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/sctp/chunk.c2
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c15
-rw-r--r--net/sunrpc/rpc_pipe.c9
-rw-r--r--net/sunrpc/sched.c26
-rw-r--r--net/sunrpc/sunrpc_syms.c6
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/sunrpc/svcauth.c2
-rw-r--r--net/sunrpc/svcauth_unix.c10
-rw-r--r--net/sunrpc/svcsock.c34
-rw-r--r--scripts/basic/docproc.c2
-rwxr-xr-xscripts/kernel-doc7
-rw-r--r--scripts/mod/modpost.c1
-rw-r--r--security/selinux/Kconfig2
-rw-r--r--sound/core/Kconfig2
-rw-r--r--sound/oss/es1371.c2
-rw-r--r--sound/oss/pas2_pcm.c2
-rw-r--r--sound/oss/trident.c4
-rw-r--r--sound/pci/ac97/ac97_codec.c2
-rw-r--r--sound/pci/ice1712/delta.h4
-rw-r--r--sound/pci/mixart/mixart.c2
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.h2
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.h2
-rw-r--r--sound/sparc/dbri.c2
781 files changed, 15570 insertions, 10764 deletions
diff --git a/CREDITS b/CREDITS
index 80e241304b8e..6829e91a88d4 100644
--- a/CREDITS
+++ b/CREDITS
@@ -380,7 +380,7 @@ S: FutureTV Labs Ltd
380S: Brunswick House, 61-69 Newmarket Rd, Cambridge CB5 8EG 380S: Brunswick House, 61-69 Newmarket Rd, Cambridge CB5 8EG
381S: United Kingdom 381S: United Kingdom
382 382
383N: Thomas Bogendörfer 383N: Thomas Bogendörfer
384E: tsbogend@alpha.franken.de 384E: tsbogend@alpha.franken.de
385D: PCnet32 driver, SONIC driver, JAZZ_ESP driver 385D: PCnet32 driver, SONIC driver, JAZZ_ESP driver
386D: newport abscon driver, g364 framebuffer driver 386D: newport abscon driver, g364 framebuffer driver
@@ -400,7 +400,7 @@ W: http://math-www.uni-paderborn.de/~axel/
400D: Configuration help text support 400D: Configuration help text support
401D: Linux CD and Support Giveaway List 401D: Linux CD and Support Giveaway List
402 402
403N: Erik Inge Bolsø 403N: Erik Inge Bolsø
404E: knan@mo.himolde.no 404E: knan@mo.himolde.no
405D: Misc kernel hacks 405D: Misc kernel hacks
406 406
@@ -428,7 +428,7 @@ D: Various fixes (mostly networking)
428S: Montreal, Quebec 428S: Montreal, Quebec
429S: Canada 429S: Canada
430 430
431N: Zoltán Böszörményi 431N: Zoltán Böszörményi
432E: zboszor@mail.externet.hu 432E: zboszor@mail.externet.hu
433D: MTRR emulation with Cyrix style ARR registers, Athlon MTRR support 433D: MTRR emulation with Cyrix style ARR registers, Athlon MTRR support
434 434
@@ -1029,11 +1029,11 @@ D: Future Domain TMC-16x0 SCSI driver (author)
1029D: APM driver (early port) 1029D: APM driver (early port)
1030D: DRM drivers (author of several) 1030D: DRM drivers (author of several)
1031 1031
1032N: János Farkas 1032N: János Farkas
1033E: chexum@shadow.banki.hu 1033E: chexum@shadow.banki.hu
1034D: romfs, various (mostly networking) fixes 1034D: romfs, various (mostly networking) fixes
1035P: 1024/F81FB2E1 41 B7 E4 E6 3E D4 A6 71 6D 9C F3 9F F2 BF DF 6E 1035P: 1024/F81FB2E1 41 B7 E4 E6 3E D4 A6 71 6D 9C F3 9F F2 BF DF 6E
1036S: Madarász Viktor utca 25 1036S: Madarász Viktor utca 25
1037S: 1131 Budapest 1037S: 1131 Budapest
1038S: Hungary 1038S: Hungary
1039 1039
@@ -1044,10 +1044,10 @@ D: UDF filesystem
1044S: (ask for current address) 1044S: (ask for current address)
1045S: USA 1045S: USA
1046 1046
1047N: Jürgen Fischer 1047N: Jürgen Fischer
1048E: fischer@norbit.de (=?iso-8859-1?q?J=FCrgen?= Fischer) 1048E: fischer@norbit.de
1049D: Author of Adaptec AHA-152x SCSI driver 1049D: Author of Adaptec AHA-152x SCSI driver
1050S: Schulstraße 18 1050S: Schulstraße 18
1051S: 26506 Norden 1051S: 26506 Norden
1052S: Germany 1052S: Germany
1053 1053
@@ -1113,7 +1113,7 @@ E: fuganti@netbank.com.br
1113D: random kernel hacker, ZF MachZ Watchdog driver 1113D: random kernel hacker, ZF MachZ Watchdog driver
1114S: Conectiva S.A. 1114S: Conectiva S.A.
1115S: R. Tocantins, 89 - Cristo Rei 1115S: R. Tocantins, 89 - Cristo Rei
1116S: 80050-430 - Curitiba - Paraná 1116S: 80050-430 - Curitiba - Paraná
1117S: Brazil 1117S: Brazil
1118 1118
1119N: Kumar Gala 1119N: Kumar Gala
@@ -1258,12 +1258,12 @@ S: 44 St. Joseph Street, Suite 506
1258S: Toronto, Ontario, M4Y 2W4 1258S: Toronto, Ontario, M4Y 2W4
1259S: Canada 1259S: Canada
1260 1260
1261N: Richard Günther 1261N: Richard Günther
1262E: rguenth@tat.physik.uni-tuebingen.de 1262E: rguenth@tat.physik.uni-tuebingen.de
1263W: http://www.tat.physik.uni-tuebingen.de/~rguenth 1263W: http://www.tat.physik.uni-tuebingen.de/~rguenth
1264P: 2048/2E829319 2F 83 FC 93 E9 E4 19 E2 93 7A 32 42 45 37 23 57 1264P: 2048/2E829319 2F 83 FC 93 E9 E4 19 E2 93 7A 32 42 45 37 23 57
1265D: binfmt_misc 1265D: binfmt_misc
1266S: 72074 Tübingen 1266S: 72074 Tübingen
1267S: Germany 1267S: Germany
1268 1268
1269N: Justin Guyett 1269N: Justin Guyett
@@ -1287,7 +1287,7 @@ N: Bruno Haible
1287E: haible@ma2s2.mathematik.uni-karlsruhe.de 1287E: haible@ma2s2.mathematik.uni-karlsruhe.de
1288D: SysV FS, shm swapping, memory management fixes 1288D: SysV FS, shm swapping, memory management fixes
1289S: 17 rue Danton 1289S: 17 rue Danton
1290S: F - 94270 Le Kremlin-Bicêtre 1290S: F - 94270 Le Kremlin-Bicêtre
1291S: France 1291S: France
1292 1292
1293N: Greg Hankins 1293N: Greg Hankins
@@ -1701,7 +1701,7 @@ S: Czech Republic
1701N: Jakob Kemi 1701N: Jakob Kemi
1702E: jakob.kemi@telia.com 1702E: jakob.kemi@telia.com
1703D: V4L W9966 Webcam driver 1703D: V4L W9966 Webcam driver
1704S: Forsbyvägen 33 1704S: Forsbyvägen 33
1705S: 74143 Knivsta 1705S: 74143 Knivsta
1706S: Sweden 1706S: Sweden
1707 1707
@@ -2065,7 +2065,7 @@ D: misc. kernel hacking and debugging
2065S: Cambridge, MA 02139 2065S: Cambridge, MA 02139
2066S: USA 2066S: USA
2067 2067
2068N: Martin von Löwis 2068N: Martin von Löwis
2069E: loewis@informatik.hu-berlin.de 2069E: loewis@informatik.hu-berlin.de
2070D: script binary format 2070D: script binary format
2071D: NTFS driver 2071D: NTFS driver
@@ -2142,7 +2142,7 @@ S: PO BOX 220, HFX. CENTRAL
2142S: Halifax, Nova Scotia 2142S: Halifax, Nova Scotia
2143S: Canada B3J 3C8 2143S: Canada B3J 3C8
2144 2144
2145N: Kai Mäkisara 2145N: Kai Mäkisara
2146E: Kai.Makisara@kolumbus.fi 2146E: Kai.Makisara@kolumbus.fi
2147D: SCSI Tape Driver 2147D: SCSI Tape Driver
2148 2148
@@ -2785,10 +2785,10 @@ N: Juan Quintela
2785E: quintela@fi.udc.es 2785E: quintela@fi.udc.es
2786D: Memory Management hacking 2786D: Memory Management hacking
2787S: LFCIA 2787S: LFCIA
2788S: Departamento de Computación 2788S: Departamento de Computación
2789S: Universidade da Coruña 2789S: Universidade da Coruña
2790S: E-15071 2790S: E-15071
2791S: A Coruña 2791S: A Coruña
2792S: Spain 2792S: Spain
2793 2793
2794N: Augusto Cesar Radtke 2794N: Augusto Cesar Radtke
@@ -2939,7 +2939,7 @@ E: aris@cathedrallabs.org
2939D: Support for EtherExpress 10 ISA (i82595) in eepro driver 2939D: Support for EtherExpress 10 ISA (i82595) in eepro driver
2940D: User level driver support for input 2940D: User level driver support for input
2941S: R. Jose Serrato, 130 - Santa Candida 2941S: R. Jose Serrato, 130 - Santa Candida
2942S: 82640-320 - Curitiba - Paraná 2942S: 82640-320 - Curitiba - Paraná
2943S: Brazil 2943S: Brazil
2944 2944
2945N: Alessandro Rubini 2945N: Alessandro Rubini
@@ -3345,15 +3345,15 @@ P: 1024D/D0FE7AFB B24A 65C9 1D71 2AC2 DE87 CA26 189B 9946 D0FE 7AFB
3345D: rcutorture maintainer 3345D: rcutorture maintainer
3346D: lock annotations, finding and fixing lock bugs 3346D: lock annotations, finding and fixing lock bugs
3347 3347
3348N: Winfried Trümper 3348N: Winfried Trümper
3349E: winni@xpilot.org 3349E: winni@xpilot.org
3350W: http://www.shop.de/~winni/ 3350W: http://www.shop.de/~winni/
3351D: German HOWTO, Crash-Kurs Linux (German, 100 comprehensive pages) 3351D: German HOWTO, Crash-Kurs Linux (German, 100 comprehensive pages)
3352D: CD-Writing HOWTO, various mini-HOWTOs 3352D: CD-Writing HOWTO, various mini-HOWTOs
3353D: One-week tutorials on Linux twice a year (free of charge) 3353D: One-week tutorials on Linux twice a year (free of charge)
3354D: Linux-Workshop Köln (aka LUG Cologne, Germany), Installfests 3354D: Linux-Workshop Köln (aka LUG Cologne, Germany), Installfests
3355S: Tacitusstr. 6 3355S: Tacitusstr. 6
3356S: D-50968 Köln 3356S: D-50968 Köln
3357 3357
3358N: Tsu-Sheng Tsao 3358N: Tsu-Sheng Tsao
3359E: tsusheng@scf.usc.edu 3359E: tsusheng@scf.usc.edu
diff --git a/Documentation/ABI/removed/devfs b/Documentation/ABI/removed/devfs
index 8195c4e0d0a1..8ffd28bf6598 100644
--- a/Documentation/ABI/removed/devfs
+++ b/Documentation/ABI/removed/devfs
@@ -6,7 +6,7 @@ Description:
6 races, contains a naming policy within the kernel that is 6 races, contains a naming policy within the kernel that is
7 against the LSB, and can be replaced by using udev. 7 against the LSB, and can be replaced by using udev.
8 The files fs/devfs/*, include/linux/devfs_fs*.h were removed, 8 The files fs/devfs/*, include/linux/devfs_fs*.h were removed,
9 along with the the assorted devfs function calls throughout the 9 along with the assorted devfs function calls throughout the
10 kernel tree. 10 kernel tree.
11 11
12Users: 12Users:
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index a2b2b4d187c5..38f88b6ae405 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -84,6 +84,10 @@ X!Iinclude/linux/kobject.h
84!Ekernel/rcupdate.c 84!Ekernel/rcupdate.c
85 </sect1> 85 </sect1>
86 86
87 <sect1><title>Device Resource Management</title>
88!Edrivers/base/devres.c
89 </sect1>
90
87 </chapter> 91 </chapter>
88 92
89 <chapter id="adt"> 93 <chapter id="adt">
diff --git a/Documentation/MSI-HOWTO.txt b/Documentation/MSI-HOWTO.txt
index d389388c733e..0d8240774fca 100644
--- a/Documentation/MSI-HOWTO.txt
+++ b/Documentation/MSI-HOWTO.txt
@@ -480,8 +480,8 @@ The PCI stack provides 3 possible levels of MSI disabling:
480 480
4816.1. Disabling MSI on a single device 4816.1. Disabling MSI on a single device
482 482
483Under some circumstances, it might be required to disable MSI on a 483Under some circumstances it might be required to disable MSI on a
484single device, It may be achived by either not calling pci_enable_msi() 484single device. This may be achieved by either not calling pci_enable_msi()
485or all, or setting the pci_dev->no_msi flag before (most of the time 485or all, or setting the pci_dev->no_msi flag before (most of the time
486in a quirk). 486in a quirk).
487 487
@@ -492,7 +492,7 @@ being able to route MSI between busses. In this case, MSI have to be
492disabled on all devices behind this bridge. It is achieves by setting 492disabled on all devices behind this bridge. It is achieves by setting
493the PCI_BUS_FLAGS_NO_MSI flag in the pci_bus->bus_flags of the bridge 493the PCI_BUS_FLAGS_NO_MSI flag in the pci_bus->bus_flags of the bridge
494subordinate bus. There is no need to set the same flag on bridges that 494subordinate bus. There is no need to set the same flag on bridges that
495are below the broken brigde. When pci_enable_msi() is called to enable 495are below the broken bridge. When pci_enable_msi() is called to enable
496MSI on a device, pci_msi_supported() takes care of checking the NO_MSI 496MSI on a device, pci_msi_supported() takes care of checking the NO_MSI
497flag in all parent busses of the device. 497flag in all parent busses of the device.
498 498
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist
index bd23dc0bc0c7..6491b2c45dd4 100644
--- a/Documentation/SubmitChecklist
+++ b/Documentation/SubmitChecklist
@@ -80,3 +80,7 @@ kernel patches.
8023: Tested after it has been merged into the -mm patchset to make sure 8023: Tested after it has been merged into the -mm patchset to make sure
81 that it still works with all of the other queued patches and various 81 that it still works with all of the other queued patches and various
82 changes in the VM, VFS, and other subsystems. 82 changes in the VM, VFS, and other subsystems.
83
8424: Avoid whitespace damage such as indenting with spaces or whitespace
85 at the end of lines. You can test this by feeding the patch to
86 "git apply --check --whitespace=error-all"
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index b0d0043f7c46..a417b25fb1aa 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -363,7 +363,8 @@ area or subsystem of the kernel is being patched.
363The "summary phrase" in the email's Subject should concisely 363The "summary phrase" in the email's Subject should concisely
364describe the patch which that email contains. The "summary 364describe the patch which that email contains. The "summary
365phrase" should not be a filename. Do not use the same "summary 365phrase" should not be a filename. Do not use the same "summary
366phrase" for every patch in a whole patch series. 366phrase" for every patch in a whole patch series (where a "patch
367series" is an ordered sequence of multiple, related patches).
367 368
368Bear in mind that the "summary phrase" of your email becomes 369Bear in mind that the "summary phrase" of your email becomes
369a globally-unique identifier for that patch. It propagates 370a globally-unique identifier for that patch. It propagates
diff --git a/Documentation/arm/Interrupts b/Documentation/arm/Interrupts
index 72c93de8cd4e..0d3dbf1099bc 100644
--- a/Documentation/arm/Interrupts
+++ b/Documentation/arm/Interrupts
@@ -149,7 +149,7 @@ So, what's changed?
149 149
1503. set_GPIO_IRQ_edge() is obsolete, and should be replaced by set_irq_type. 1503. set_GPIO_IRQ_edge() is obsolete, and should be replaced by set_irq_type.
151 151
1524. Direct access to SA1111 INTPOL is depreciated. Use set_irq_type instead. 1524. Direct access to SA1111 INTPOL is deprecated. Use set_irq_type instead.
153 153
1545. A handler is expected to perform any necessary acknowledgement of the 1545. A handler is expected to perform any necessary acknowledgement of the
155 parent IRQ via the correct chip specific function. For instance, if 155 parent IRQ via the correct chip specific function. For instance, if
diff --git a/Documentation/arm/Samsung-S3C24XX/H1940.txt b/Documentation/arm/Samsung-S3C24XX/H1940.txt
index d6b1de92b111..f4a7b22c8664 100644
--- a/Documentation/arm/Samsung-S3C24XX/H1940.txt
+++ b/Documentation/arm/Samsung-S3C24XX/H1940.txt
@@ -23,7 +23,7 @@ Support
23 23
24 http://handhelds.org/moin/moin.cgi/HpIpaqH1940 24 http://handhelds.org/moin/moin.cgi/HpIpaqH1940
25 25
26 Herbert Pötzl pages: 26 Herbert Pötzl pages:
27 27
28 http://vserver.13thfloor.at/H1940/ 28 http://vserver.13thfloor.at/H1940/
29 29
@@ -32,7 +32,7 @@ Maintainers
32----------- 32-----------
33 33
34 This project is being maintained and developed by a variety 34 This project is being maintained and developed by a variety
35 of people, including Ben Dooks, Arnaud Patard, and Herbert Pötzl. 35 of people, including Ben Dooks, Arnaud Patard, and Herbert Pötzl.
36 36
37 Thanks to the many others who have also provided support. 37 Thanks to the many others who have also provided support.
38 38
diff --git a/Documentation/auxdisplay/cfag12864b b/Documentation/auxdisplay/cfag12864b
index 3572b98f45b8..b714183d4125 100644
--- a/Documentation/auxdisplay/cfag12864b
+++ b/Documentation/auxdisplay/cfag12864b
@@ -78,9 +78,9 @@ Select (17)------------------------------(16) Data / Instruction
78Ground (18)---[GND] [+5v]---(19) LED + 78Ground (18)---[GND] [+5v]---(19) LED +
79Ground (19)---[GND] 79Ground (19)---[GND]
80Ground (20)---[GND] E A Values: 80Ground (20)---[GND] E A Values:
81Ground (21)---[GND] [GND]---[P1]---(18) Vee · R = Resistor = 22 ohm 81Ground (21)---[GND] [GND]---[P1]---(18) Vee - R = Resistor = 22 ohm
82Ground (22)---[GND] | · P1 = Preset = 10 Kohm 82Ground (22)---[GND] | - P1 = Preset = 10 Kohm
83Ground (23)---[GND] ---- S ------( 3) V0 · P2 = Preset = 1 Kohm 83Ground (23)---[GND] ---- S ------( 3) V0 - P2 = Preset = 1 Kohm
84Ground (24)---[GND] | | 84Ground (24)---[GND] | |
85Ground (25)---[GND] [GND]---[P2]---[R]---(20) LED - 85Ground (25)---[GND] [GND]---[P2]---[R]---(20) LED -
86 86
diff --git a/Documentation/binfmt_misc.txt b/Documentation/binfmt_misc.txt
index d097f09ee15a..f609ebf9c78f 100644
--- a/Documentation/binfmt_misc.txt
+++ b/Documentation/binfmt_misc.txt
@@ -113,4 +113,4 @@ cause unexpected behaviour and can be a security hazard.
113There is a web page about binfmt_misc at 113There is a web page about binfmt_misc at
114http://www.tat.physik.uni-tuebingen.de/~rguenth/linux/binfmt_misc.html 114http://www.tat.physik.uni-tuebingen.de/~rguenth/linux/binfmt_misc.html
115 115
116Richard Günther <rguenth@tat.physik.uni-tuebingen.de> 116Richard Günther <rguenth@tat.physik.uni-tuebingen.de>
diff --git a/Documentation/block/ioprio.txt b/Documentation/block/ioprio.txt
index 96ccf681075e..1b930ef5a079 100644
--- a/Documentation/block/ioprio.txt
+++ b/Documentation/block/ioprio.txt
@@ -6,10 +6,10 @@ Intro
6----- 6-----
7 7
8With the introduction of cfq v3 (aka cfq-ts or time sliced cfq), basic io 8With the introduction of cfq v3 (aka cfq-ts or time sliced cfq), basic io
9priorities is supported for reads on files. This enables users to io nice 9priorities are supported for reads on files. This enables users to io nice
10processes or process groups, similar to what has been possible to cpu 10processes or process groups, similar to what has been possible with cpu
11scheduling for ages. This document mainly details the current possibilites 11scheduling for ages. This document mainly details the current possibilities
12with cfq, other io schedulers do not support io priorities so far. 12with cfq; other io schedulers do not support io priorities thus far.
13 13
14Scheduling classes 14Scheduling classes
15------------------ 15------------------
diff --git a/Documentation/cpu-freq/cpufreq-stats.txt b/Documentation/cpu-freq/cpufreq-stats.txt
index 53d62c1e1c94..fc647492e940 100644
--- a/Documentation/cpu-freq/cpufreq-stats.txt
+++ b/Documentation/cpu-freq/cpufreq-stats.txt
@@ -17,7 +17,7 @@ Contents
17 17
181. Introduction 181. Introduction
19 19
20cpufreq-stats is a driver that provices CPU frequency statistics for each CPU. 20cpufreq-stats is a driver that provides CPU frequency statistics for each CPU.
21These statistics are provided in /sysfs as a bunch of read_only interfaces. This 21These statistics are provided in /sysfs as a bunch of read_only interfaces. This
22interface (when configured) will appear in a separate directory under cpufreq 22interface (when configured) will appear in a separate directory under cpufreq
23in /sysfs (<sysfs root>/devices/system/cpu/cpuX/cpufreq/stats/) for each CPU. 23in /sysfs (<sysfs root>/devices/system/cpu/cpuX/cpufreq/stats/) for each CPU.
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index cc60d29b954c..b6d24c22274b 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -217,14 +217,17 @@ Q: What happens when a CPU is being logically offlined?
217A: The following happen, listed in no particular order :-) 217A: The following happen, listed in no particular order :-)
218 218
219- A notification is sent to in-kernel registered modules by sending an event 219- A notification is sent to in-kernel registered modules by sending an event
220 CPU_DOWN_PREPARE 220 CPU_DOWN_PREPARE or CPU_DOWN_PREPARE_FROZEN, depending on whether or not the
221 CPU is being offlined while tasks are frozen due to a suspend operation in
222 progress
221- All process is migrated away from this outgoing CPU to a new CPU 223- All process is migrated away from this outgoing CPU to a new CPU
222- All interrupts targeted to this CPU is migrated to a new CPU 224- All interrupts targeted to this CPU is migrated to a new CPU
223- timers/bottom half/task lets are also migrated to a new CPU 225- timers/bottom half/task lets are also migrated to a new CPU
224- Once all services are migrated, kernel calls an arch specific routine 226- Once all services are migrated, kernel calls an arch specific routine
225 __cpu_disable() to perform arch specific cleanup. 227 __cpu_disable() to perform arch specific cleanup.
226- Once this is successful, an event for successful cleanup is sent by an event 228- Once this is successful, an event for successful cleanup is sent by an event
227 CPU_DEAD. 229 CPU_DEAD (or CPU_DEAD_FROZEN if tasks are frozen due to a suspend while the
230 CPU is being offlined).
228 231
229 "It is expected that each service cleans up when the CPU_DOWN_PREPARE 232 "It is expected that each service cleans up when the CPU_DOWN_PREPARE
230 notifier is called, when CPU_DEAD is called its expected there is nothing 233 notifier is called, when CPU_DEAD is called its expected there is nothing
@@ -242,9 +245,11 @@ A: This is what you would need in your kernel code to receive notifications.
242 245
243 switch (action) { 246 switch (action) {
244 case CPU_ONLINE: 247 case CPU_ONLINE:
248 case CPU_ONLINE_FROZEN:
245 foobar_online_action(cpu); 249 foobar_online_action(cpu);
246 break; 250 break;
247 case CPU_DEAD: 251 case CPU_DEAD:
252 case CPU_DEAD_FROZEN:
248 foobar_dead_action(cpu); 253 foobar_dead_action(cpu);
249 break; 254 break;
250 } 255 }
diff --git a/Documentation/crypto/api-intro.txt b/Documentation/crypto/api-intro.txt
index 9b84b805ab75..a2ac6d294793 100644
--- a/Documentation/crypto/api-intro.txt
+++ b/Documentation/crypto/api-intro.txt
@@ -177,7 +177,7 @@ Portions of this API were derived from the following projects:
177and; 177and;
178 178
179 Nettle (http://www.lysator.liu.se/~nisse/nettle/) 179 Nettle (http://www.lysator.liu.se/~nisse/nettle/)
180 Niels Möller 180 Niels Möller
181 181
182Original developers of the crypto algorithms: 182Original developers of the crypto algorithms:
183 183
@@ -200,8 +200,8 @@ SHA1 algorithm contributors:
200 200
201DES algorithm contributors: 201DES algorithm contributors:
202 Raimar Falke 202 Raimar Falke
203 Gisle Sælensminde 203 Gisle Sælensminde
204 Niels Möller 204 Niels Möller
205 205
206Blowfish algorithm contributors: 206Blowfish algorithm contributors:
207 Herbert Valerio Riedel 207 Herbert Valerio Riedel
diff --git a/Documentation/device-mapper/delay.txt b/Documentation/device-mapper/delay.txt
new file mode 100644
index 000000000000..15adc55359e5
--- /dev/null
+++ b/Documentation/device-mapper/delay.txt
@@ -0,0 +1,26 @@
1dm-delay
2========
3
4Device-Mapper's "delay" target delays reads and/or writes
5and maps them to different devices.
6
7Parameters:
8 <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
9
10With separate write parameters, the first set is only used for reads.
11Delays are specified in milliseconds.
12
13Example scripts
14===============
15[[
16#!/bin/sh
17# Create device delaying rw operation for 500ms
18echo "0 `blockdev --getsize $1` delay $1 0 500" | dmsetup create delayed
19]]
20
21[[
22#!/bin/sh
23# Create device delaying only write operation for 500ms and
24# splitting reads and writes to different devices $1 $2
25echo "0 `blockdev --getsize $1` delay $1 0 0 $2 0 500" | dmsetup create delayed
26]]
diff --git a/Documentation/driver-model/platform.txt b/Documentation/driver-model/platform.txt
index f7c9262b2dc8..19c4a6e13676 100644
--- a/Documentation/driver-model/platform.txt
+++ b/Documentation/driver-model/platform.txt
@@ -16,7 +16,7 @@ host bridges to peripheral buses, and most controllers integrated
16into system-on-chip platforms. What they usually have in common 16into system-on-chip platforms. What they usually have in common
17is direct addressing from a CPU bus. Rarely, a platform_device will 17is direct addressing from a CPU bus. Rarely, a platform_device will
18be connected through a segment of some other kind of bus; but its 18be connected through a segment of some other kind of bus; but its
19registers will still be directly addressible. 19registers will still be directly addressable.
20 20
21Platform devices are given a name, used in driver binding, and a 21Platform devices are given a name, used in driver binding, and a
22list of resources such as addresses and IRQs. 22list of resources such as addresses and IRQs.
@@ -125,7 +125,7 @@ three different ways to find such a match:
125 usually register later during booting, or by module loading. 125 usually register later during booting, or by module loading.
126 126
127 - Registering a driver using platform_driver_probe() works just like 127 - Registering a driver using platform_driver_probe() works just like
128 using platform_driver_register(), except that the the driver won't 128 using platform_driver_register(), except that the driver won't
129 be probed later if another device registers. (Which is OK, since 129 be probed later if another device registers. (Which is OK, since
130 this interface is only for use with non-hotpluggable devices.) 130 this interface is only for use with non-hotpluggable devices.)
131 131
diff --git a/Documentation/dvb/README.dvb-usb b/Documentation/dvb/README.dvb-usb
index 46b78b7331c2..bf2a9cdfe7bb 100644
--- a/Documentation/dvb/README.dvb-usb
+++ b/Documentation/dvb/README.dvb-usb
@@ -228,5 +228,5 @@ Patches, comments and suggestions are very very welcome.
228 228
229 Ulf Hermenau for helping me out with traditional chinese. 229 Ulf Hermenau for helping me out with traditional chinese.
230 230
231 André Smoktun and Christian Frömmel for supporting me with 231 André Smoktun and Christian Frömmel for supporting me with
232 hardware and listening to my problems very patiently. 232 hardware and listening to my problems very patiently.
diff --git a/Documentation/dvb/contributors.txt b/Documentation/dvb/contributors.txt
index 4c33cced5f65..4865addebe1c 100644
--- a/Documentation/dvb/contributors.txt
+++ b/Documentation/dvb/contributors.txt
@@ -66,7 +66,7 @@ Michael Dreher <michael@5dot1.de>
66Andreas 'randy' Weinberger 66Andreas 'randy' Weinberger
67 for the support of the Fujitsu-Siemens Activy budget DVB-S 67 for the support of the Fujitsu-Siemens Activy budget DVB-S
68 68
69Kenneth Aafløy <ke-aa@frisurf.no> 69Kenneth Aafløy <ke-aa@frisurf.no>
70 for adding support for Typhoon DVB-S budget card 70 for adding support for Typhoon DVB-S budget card
71 71
72Ernst Peinlich <e.peinlich@inode.at> 72Ernst Peinlich <e.peinlich@inode.at>
diff --git a/Documentation/fb/arkfb.txt b/Documentation/fb/arkfb.txt
new file mode 100644
index 000000000000..e8487a9d6a05
--- /dev/null
+++ b/Documentation/fb/arkfb.txt
@@ -0,0 +1,68 @@
1
2 arkfb - fbdev driver for ARK Logic chips
3 ========================================
4
5
6Supported Hardware
7==================
8
9 ARK 2000PV chip
10 ICS 5342 ramdac
11
12 - only BIOS initialized VGA devices supported
13 - probably not working on big endian
14
15
16Supported Features
17==================
18
19 * 4 bpp pseudocolor modes (with 18bit palette, two variants)
20 * 8 bpp pseudocolor mode (with 18bit palette)
21 * 16 bpp truecolor modes (RGB 555 and RGB 565)
22 * 24 bpp truecolor mode (RGB 888)
23 * 32 bpp truecolor mode (RGB 888)
24 * text mode (activated by bpp = 0)
25 * doublescan mode variant (not available in text mode)
26 * panning in both directions
27 * suspend/resume support
28
29Text mode is supported even in higher resolutions, but there is limitation to
30lower pixclocks (i got maximum about 70 MHz, it is dependent on specific
31hardware). This limitation is not enforced by driver. Text mode supports 8bit
32wide fonts only (hardware limitation) and 16bit tall fonts (driver
33limitation). Unfortunately character attributes (like color) in text mode are
34broken for unknown reason, so its usefulness is limited.
35
36There are two 4 bpp modes. First mode (selected if nonstd == 0) is mode with
37packed pixels, high nibble first. Second mode (selected if nonstd == 1) is mode
38with interleaved planes (1 byte interleave), MSB first. Both modes support
398bit wide fonts only (driver limitation).
40
41Suspend/resume works on systems that initialize video card during resume and
42if device is active (for example used by fbcon).
43
44
45Missing Features
46================
47(alias TODO list)
48
49 * secondary (not initialized by BIOS) device support
50 * big endian support
51 * DPMS support
52 * MMIO support
53 * interlaced mode variant
54 * support for fontwidths != 8 in 4 bpp modes
55 * support for fontheight != 16 in text mode
56 * hardware cursor
57 * vsync synchronization
58 * feature connector support
59 * acceleration support (8514-like 2D)
60
61
62Known bugs
63==========
64
65 * character attributes (and cursor) in text mode are broken
66
67--
68Ondrej Zajicek <santiago@crfreenet.org>
diff --git a/Documentation/fb/aty128fb.txt b/Documentation/fb/aty128fb.txt
index 069262fb619d..b605204fcfe1 100644
--- a/Documentation/fb/aty128fb.txt
+++ b/Documentation/fb/aty128fb.txt
@@ -54,8 +54,8 @@ Accepted options:
54 54
55noaccel - do not use acceleration engine. It is default. 55noaccel - do not use acceleration engine. It is default.
56accel - use acceleration engine. Not finished. 56accel - use acceleration engine. Not finished.
57vmode:x - chooses PowerMacintosh video mode <x>. Depreciated. 57vmode:x - chooses PowerMacintosh video mode <x>. Deprecated.
58cmode:x - chooses PowerMacintosh colour mode <x>. Depreciated. 58cmode:x - chooses PowerMacintosh colour mode <x>. Deprecated.
59<XxX@X> - selects startup videomode. See modedb.txt for detailed 59<XxX@X> - selects startup videomode. See modedb.txt for detailed
60 explanation. Default is 640x480x8bpp. 60 explanation. Default is 640x480x8bpp.
61 61
diff --git a/Documentation/fb/framebuffer.txt b/Documentation/fb/framebuffer.txt
index 610e7801207b..b3e3a0356839 100644
--- a/Documentation/fb/framebuffer.txt
+++ b/Documentation/fb/framebuffer.txt
@@ -215,11 +215,11 @@ vertical retrace time is the sum of the upper margin, the lower margin and the
215vsync length. 215vsync length.
216 216
217 +----------+---------------------------------------------+----------+-------+ 217 +----------+---------------------------------------------+----------+-------+
218 | | ^ | | | 218 | | ↑ | | |
219 | | |upper_margin | | | 219 | | |upper_margin | | |
220 | | ¥ | | | 220 | | ↓ | | |
221 +----------###############################################----------+-------+ 221 +----------###############################################----------+-------+
222 | # ^ # | | 222 | # ↑ # | |
223 | # | # | | 223 | # | # | |
224 | # | # | | 224 | # | # | |
225 | # | # | | 225 | # | # | |
@@ -238,15 +238,15 @@ vsync length.
238 | # | # | | 238 | # | # | |
239 | # | # | | 239 | # | # | |
240 | # | # | | 240 | # | # | |
241 | # ¥ # | | 241 | # ↓ # | |
242 +----------###############################################----------+-------+ 242 +----------###############################################----------+-------+
243 | | ^ | | | 243 | | ↑ | | |
244 | | |lower_margin | | | 244 | | |lower_margin | | |
245 | | ¥ | | | 245 | | ↓ | | |
246 +----------+---------------------------------------------+----------+-------+ 246 +----------+---------------------------------------------+----------+-------+
247 | | ^ | | | 247 | | ↑ | | |
248 | | |vsync_len | | | 248 | | |vsync_len | | |
249 | | ¥ | | | 249 | | ↓ | | |
250 +----------+---------------------------------------------+----------+-------+ 250 +----------+---------------------------------------------+----------+-------+
251 251
252The frame buffer device expects all horizontal timings in number of dotclocks 252The frame buffer device expects all horizontal timings in number of dotclocks
diff --git a/Documentation/fb/imacfb.txt b/Documentation/fb/imacfb.txt
index 759028545a7e..316ec9bb7deb 100644
--- a/Documentation/fb/imacfb.txt
+++ b/Documentation/fb/imacfb.txt
@@ -17,7 +17,7 @@ How to use it?
17============== 17==============
18 18
19Imacfb does not have any kind of autodetection of your machine. 19Imacfb does not have any kind of autodetection of your machine.
20You have to add the fillowing kernel parameters in your elilo.conf: 20You have to add the following kernel parameters in your elilo.conf:
21 Macbook : 21 Macbook :
22 video=imacfb:macbook 22 video=imacfb:macbook
23 MacMini : 23 MacMini :
diff --git a/Documentation/fb/sstfb.txt b/Documentation/fb/sstfb.txt
index df27f5bf15db..550ca775a4cb 100644
--- a/Documentation/fb/sstfb.txt
+++ b/Documentation/fb/sstfb.txt
@@ -2,9 +2,9 @@
2Introduction 2Introduction
3 3
4 This is a frame buffer device driver for 3dfx' Voodoo Graphics 4 This is a frame buffer device driver for 3dfx' Voodoo Graphics
5 (aka voodoo 1, aka sst1) and Voodoo² (aka Voodoo 2, aka CVG) based 5 (aka voodoo 1, aka sst1) and Voodoo² (aka Voodoo 2, aka CVG) based
6 video boards. It's highly experimental code, but is guaranteed to work 6 video boards. It's highly experimental code, but is guaranteed to work
7 on my computer, with my "Maxi Gamer 3D" and "Maxi Gamer 3d²" boards, 7 on my computer, with my "Maxi Gamer 3D" and "Maxi Gamer 3d²" boards,
8 and with me "between chair and keyboard". Some people tested other 8 and with me "between chair and keyboard". Some people tested other
9 combinations and it seems that it works. 9 combinations and it seems that it works.
10 The main page is located at <http://sstfb.sourceforge.net>, and if 10 The main page is located at <http://sstfb.sourceforge.net>, and if
diff --git a/Documentation/fb/vt8623fb.txt b/Documentation/fb/vt8623fb.txt
new file mode 100644
index 000000000000..f654576c56b7
--- /dev/null
+++ b/Documentation/fb/vt8623fb.txt
@@ -0,0 +1,64 @@
1
2 vt8623fb - fbdev driver for graphics core in VIA VT8623 chipset
3 ===============================================================
4
5
6Supported Hardware
7==================
8
9 VIA VT8623 [CLE266] chipset and its graphics core
10 (known as CastleRock or Unichrome)
11
12I tested vt8623fb on VIA EPIA ML-6000
13
14
15Supported Features
16==================
17
18 * 4 bpp pseudocolor modes (with 18bit palette, two variants)
19 * 8 bpp pseudocolor mode (with 18bit palette)
20 * 16 bpp truecolor mode (RGB 565)
21 * 32 bpp truecolor mode (RGB 888)
22 * text mode (activated by bpp = 0)
23 * doublescan mode variant (not available in text mode)
24 * panning in both directions
25 * suspend/resume support
26 * DPMS support
27
28Text mode is supported even in higher resolutions, but there is limitation to
29lower pixclocks (maximum about 100 MHz). This limitation is not enforced by
30driver. Text mode supports 8bit wide fonts only (hardware limitation) and
3116bit tall fonts (driver limitation).
32
33There are two 4 bpp modes. First mode (selected if nonstd == 0) is mode with
34packed pixels, high nibble first. Second mode (selected if nonstd == 1) is mode
35with interleaved planes (1 byte interleave), MSB first. Both modes support
368bit wide fonts only (driver limitation).
37
38Suspend/resume works on systems that initialize video card during resume and
39if device is active (for example used by fbcon).
40
41
42Missing Features
43================
44(alias TODO list)
45
46 * secondary (not initialized by BIOS) device support
47 * MMIO support
48 * interlaced mode variant
49 * support for fontwidths != 8 in 4 bpp modes
50 * support for fontheight != 16 in text mode
51 * hardware cursor
52 * video overlay support
53 * vsync synchronization
54 * acceleration support (8514-like 2D, busmaster transfers)
55
56
57Known bugs
58==========
59
60 * cursor disable in text mode doesn't work
61
62
63--
64Ondrej Zajicek <santiago@crfreenet.org>
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 59c14159cc47..d866551be037 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -54,7 +54,7 @@ ata *);
54 54
55locking rules: 55locking rules:
56 all may block, none have BKL 56 all may block, none have BKL
57 i_sem(inode) 57 i_mutex(inode)
58lookup: yes 58lookup: yes
59create: yes 59create: yes
60link: yes (both) 60link: yes (both)
@@ -74,7 +74,7 @@ setxattr: yes
74getxattr: no 74getxattr: no
75listxattr: no 75listxattr: no
76removexattr: yes 76removexattr: yes
77 Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_sem on 77 Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
78victim. 78victim.
79 cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem. 79 cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
80 ->truncate() is never called directly - it's a callback, not a 80 ->truncate() is never called directly - it's a callback, not a
@@ -461,7 +461,7 @@ doesn't take the BKL.
461->read on directories probably must go away - we should just enforce -EISDIR 461->read on directories probably must go away - we should just enforce -EISDIR
462in sys_read() and friends. 462in sys_read() and friends.
463 463
464->fsync() has i_sem on inode. 464->fsync() has i_mutex on inode.
465 465
466--------------------------- dquot_operations ------------------------------- 466--------------------------- dquot_operations -------------------------------
467prototypes: 467prototypes:
diff --git a/Documentation/filesystems/hpfs.txt b/Documentation/filesystems/hpfs.txt
index 38aba03efc5e..fa45c3baed98 100644
--- a/Documentation/filesystems/hpfs.txt
+++ b/Documentation/filesystems/hpfs.txt
@@ -290,7 +290,7 @@ History
2902.07 More fixes for Warp Server. Now it really works 2902.07 More fixes for Warp Server. Now it really works
2912.08 Creating new files is not so slow on large disks 2912.08 Creating new files is not so slow on large disks
292 An attempt to sync deleted file does not generate filesystem error 292 An attempt to sync deleted file does not generate filesystem error
2932.09 Fixed error on extremly fragmented files 2932.09 Fixed error on extremely fragmented files
294 294
295 295
296 vim: set textwidth=80: 296 vim: set textwidth=80:
diff --git a/Documentation/filesystems/ntfs.txt b/Documentation/filesystems/ntfs.txt
index 81779068b09b..8ee10ec88293 100644
--- a/Documentation/filesystems/ntfs.txt
+++ b/Documentation/filesystems/ntfs.txt
@@ -349,7 +349,7 @@ end of the line.
349Note the "Should sync?" parameter "nosync" means that the two mirrors are 349Note the "Should sync?" parameter "nosync" means that the two mirrors are
350already in sync which will be the case on a clean shutdown of Windows. If the 350already in sync which will be the case on a clean shutdown of Windows. If the
351mirrors are not clean, you can specify the "sync" option instead of "nosync" 351mirrors are not clean, you can specify the "sync" option instead of "nosync"
352and the Device-Mapper driver will then copy the entirey of the "Source Device" 352and the Device-Mapper driver will then copy the entirety of the "Source Device"
353to the "Target Device" or if you specified multipled target devices to all of 353to the "Target Device" or if you specified multipled target devices to all of
354them. 354them.
355 355
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 4f3e84c520a5..8756a07f4dc3 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -229,7 +229,7 @@ Table 1-3: Kernel info in /proc
229 mounts Mounted filesystems 229 mounts Mounted filesystems
230 net Networking info (see text) 230 net Networking info (see text)
231 partitions Table of partitions known to the system 231 partitions Table of partitions known to the system
232 pci Depreciated info of PCI bus (new way -> /proc/bus/pci/, 232 pci Deprecated info of PCI bus (new way -> /proc/bus/pci/,
233 decoupled by lspci (2.4) 233 decoupled by lspci (2.4)
234 rtc Real time clock 234 rtc Real time clock
235 scsi SCSI info (see text) 235 scsi SCSI info (see text)
diff --git a/Documentation/filesystems/relay.txt b/Documentation/filesystems/relay.txt
index 7fbb6ffe5769..18d23f9a18c7 100644
--- a/Documentation/filesystems/relay.txt
+++ b/Documentation/filesystems/relay.txt
@@ -351,7 +351,7 @@ If the current buffer is full, i.e. all sub-buffers remain unconsumed,
351the callback returns 0 to indicate that the buffer switch should not 351the callback returns 0 to indicate that the buffer switch should not
352occur yet, i.e. until the consumer has had a chance to read the 352occur yet, i.e. until the consumer has had a chance to read the
353current set of ready sub-buffers. For the relay_buf_full() function 353current set of ready sub-buffers. For the relay_buf_full() function
354to make sense, the consumer is reponsible for notifying the relay 354to make sense, the consumer is responsible for notifying the relay
355interface when sub-buffers have been consumed via 355interface when sub-buffers have been consumed via
356relay_subbufs_consumed(). Any subsequent attempts to write into the 356relay_subbufs_consumed(). Any subsequent attempts to write into the
357buffer will again invoke the subbuf_start() callback with the same 357buffer will again invoke the subbuf_start() callback with the same
diff --git a/Documentation/filesystems/xip.txt b/Documentation/filesystems/xip.txt
index 6c0cef10eb4d..3cc4010521a0 100644
--- a/Documentation/filesystems/xip.txt
+++ b/Documentation/filesystems/xip.txt
@@ -19,7 +19,7 @@ completely. With execute-in-place, read&write type operations are performed
19directly from/to the memory backed storage device. For file mappings, the 19directly from/to the memory backed storage device. For file mappings, the
20storage device itself is mapped directly into userspace. 20storage device itself is mapped directly into userspace.
21 21
22This implementation was initialy written for shared memory segments between 22This implementation was initially written for shared memory segments between
23different virtual machines on s390 hardware to allow multiple machines to 23different virtual machines on s390 hardware to allow multiple machines to
24share the same binaries and libraries. 24share the same binaries and libraries.
25 25
diff --git a/Documentation/fujitsu/frv/gdbstub.txt b/Documentation/fujitsu/frv/gdbstub.txt
index 9304fb36ae8a..b92bfd902a4e 100644
--- a/Documentation/fujitsu/frv/gdbstub.txt
+++ b/Documentation/fujitsu/frv/gdbstub.txt
@@ -126,5 +126,5 @@ GDB stub and the debugger:
126 126
127Furthermore, the GDB stub will intercept a number of exceptions automatically 127Furthermore, the GDB stub will intercept a number of exceptions automatically
128if they are caused by kernel execution. It will also intercept BUG() macro 128if they are caused by kernel execution. It will also intercept BUG() macro
129invokation. 129invocation.
130 130
diff --git a/Documentation/hwmon/adm1026 b/Documentation/hwmon/adm1026
index 473c689d7924..f4327db2307e 100644
--- a/Documentation/hwmon/adm1026
+++ b/Documentation/hwmon/adm1026
@@ -80,7 +80,7 @@ temperature sensor inputs. Both the PWM output and the DAC output can be
80used to control fan speed. Usually only one of these two outputs will be 80used to control fan speed. Usually only one of these two outputs will be
81used. Write the minimum PWM or DAC value to the appropriate control 81used. Write the minimum PWM or DAC value to the appropriate control
82register. Then set the low temperature limit in the tmin values for each 82register. Then set the low temperature limit in the tmin values for each
83temperature sensor. The range of control is fixed at 20 °C, and the 83temperature sensor. The range of control is fixed at 20 °C, and the
84largest difference between current and tmin of the temperature sensors sets 84largest difference between current and tmin of the temperature sensors sets
85the control output. See the datasheet for several example circuits for 85the control output. See the datasheet for several example circuits for
86controlling fan speed with the PWM and DAC outputs. The fan speed sensors 86controlling fan speed with the PWM and DAC outputs. The fan speed sensors
diff --git a/Documentation/hwmon/gl518sm b/Documentation/hwmon/gl518sm
index ce0881883bca..229f8b789185 100644
--- a/Documentation/hwmon/gl518sm
+++ b/Documentation/hwmon/gl518sm
@@ -13,7 +13,7 @@ Supported chips:
13 13
14Authors: 14Authors:
15 Frodo Looijaard <frodol@dds.nl>, 15 Frodo Looijaard <frodol@dds.nl>,
16 Kyösti Mälkki <kmalkki@cc.hut.fi> 16 Kyösti Mälkki <kmalkki@cc.hut.fi>
17 Hong-Gunn Chew <hglinux@gunnet.org> 17 Hong-Gunn Chew <hglinux@gunnet.org>
18 Jean Delvare <khali@linux-fr.org> 18 Jean Delvare <khali@linux-fr.org>
19 19
diff --git a/Documentation/hwmon/lm83 b/Documentation/hwmon/lm83
index f7aad1489cb0..a04d1fe9269c 100644
--- a/Documentation/hwmon/lm83
+++ b/Documentation/hwmon/lm83
@@ -45,7 +45,7 @@ Unconfirmed motherboards:
45The LM82 is confirmed to have been found on most AMD Geode reference 45The LM82 is confirmed to have been found on most AMD Geode reference
46designs and test platforms. 46designs and test platforms.
47 47
48The driver has been successfully tested by Magnus Forsström, who I'd 48The driver has been successfully tested by Magnus Forsström, who I'd
49like to thank here. More testers will be of course welcome. 49like to thank here. More testers will be of course welcome.
50 50
51The fact that the LM83 is only scarcely used can be easily explained. 51The fact that the LM83 is only scarcely used can be easily explained.
diff --git a/Documentation/hwmon/sis5595 b/Documentation/hwmon/sis5595
index b7ae36b8cdf5..4f8877a34f37 100644
--- a/Documentation/hwmon/sis5595
+++ b/Documentation/hwmon/sis5595
@@ -8,7 +8,7 @@ Supported chips:
8 Datasheet: Publicly available at the Silicon Integrated Systems Corp. site. 8 Datasheet: Publicly available at the Silicon Integrated Systems Corp. site.
9 9
10Authors: 10Authors:
11 Kyösti Mälkki <kmalkki@cc.hut.fi>, 11 Kyösti Mälkki <kmalkki@cc.hut.fi>,
12 Mark D. Studebaker <mdsxyz123@yahoo.com>, 12 Mark D. Studebaker <mdsxyz123@yahoo.com>,
13 Aurelien Jarno <aurelien@aurel32.net> 2.6 port 13 Aurelien Jarno <aurelien@aurel32.net> 2.6 port
14 14
diff --git a/Documentation/hwmon/via686a b/Documentation/hwmon/via686a
index a936fb3824b2..d651b25f7519 100644
--- a/Documentation/hwmon/via686a
+++ b/Documentation/hwmon/via686a
@@ -8,7 +8,7 @@ Supported chips:
8 Datasheet: On request through web form (http://www.via.com.tw/en/support/datasheets/) 8 Datasheet: On request through web form (http://www.via.com.tw/en/support/datasheets/)
9 9
10Authors: 10Authors:
11 Kyösti Mälkki <kmalkki@cc.hut.fi>, 11 Kyösti Mälkki <kmalkki@cc.hut.fi>,
12 Mark D. Studebaker <mdsxyz123@yahoo.com> 12 Mark D. Studebaker <mdsxyz123@yahoo.com>
13 Bob Dougherty <bobd@stanford.edu> 13 Bob Dougherty <bobd@stanford.edu>
14 (Some conversion-factor data were contributed by 14 (Some conversion-factor data were contributed by
diff --git a/Documentation/hwmon/w83792d b/Documentation/hwmon/w83792d
index 8171c285bb55..14a668ed8aaa 100644
--- a/Documentation/hwmon/w83792d
+++ b/Documentation/hwmon/w83792d
@@ -107,7 +107,7 @@ Known problems:
107 by CR[0x49h]. 107 by CR[0x49h].
108 - The function of vid and vrm has not been finished, because I'm NOT 108 - The function of vid and vrm has not been finished, because I'm NOT
109 very familiar with them. Adding support is welcome. 109 very familiar with them. Adding support is welcome.
110  - The function of chassis open detection needs more tests. 110  - The function of chassis open detection needs more tests.
111 - If you have ASUS server board and chip was not found: Then you will 111 - If you have ASUS server board and chip was not found: Then you will
112 need to upgrade to latest (or beta) BIOS. If it does not help please 112 need to upgrade to latest (or beta) BIOS. If it does not help please
113 contact us. 113 contact us.
diff --git a/Documentation/i2c/busses/i2c-i810 b/Documentation/i2c/busses/i2c-i810
index 83c3b9743c3c..778210ee1583 100644
--- a/Documentation/i2c/busses/i2c-i810
+++ b/Documentation/i2c/busses/i2c-i810
@@ -7,7 +7,7 @@ Supported adapters:
7Authors: 7Authors:
8 Frodo Looijaard <frodol@dds.nl>, 8 Frodo Looijaard <frodol@dds.nl>,
9 Philip Edelbrock <phil@netroedge.com>, 9 Philip Edelbrock <phil@netroedge.com>,
10 Kyösti Mälkki <kmalkki@cc.hut.fi>, 10 Kyösti Mälkki <kmalkki@cc.hut.fi>,
11 Ralph Metzler <rjkm@thp.uni-koeln.de>, 11 Ralph Metzler <rjkm@thp.uni-koeln.de>,
12 Mark D. Studebaker <mdsxyz123@yahoo.com> 12 Mark D. Studebaker <mdsxyz123@yahoo.com>
13 13
diff --git a/Documentation/i2c/busses/i2c-sis96x b/Documentation/i2c/busses/i2c-sis96x
index 08d7b2dac69a..266481fd26e2 100644
--- a/Documentation/i2c/busses/i2c-sis96x
+++ b/Documentation/i2c/busses/i2c-sis96x
@@ -60,7 +60,7 @@ Mark D. Studebaker <mdsxyz123@yahoo.com>
60 - design hints and bug fixes 60 - design hints and bug fixes
61Alexander Maylsh <amalysh@web.de> 61Alexander Maylsh <amalysh@web.de>
62 - ditto, plus an important datasheet... almost the one I really wanted 62 - ditto, plus an important datasheet... almost the one I really wanted
63Hans-Günter Lütke Uphues <hg_lu@t-online.de> 63Hans-Günter Lütke Uphues <hg_lu@t-online.de>
64 - patch for SiS735 64 - patch for SiS735
65Robert Zwerus <arzie@dds.nl> 65Robert Zwerus <arzie@dds.nl>
66 - testing for SiS645DX 66 - testing for SiS645DX
diff --git a/Documentation/i2c/busses/i2c-via b/Documentation/i2c/busses/i2c-via
index 55edfe1a640b..343870661ac3 100644
--- a/Documentation/i2c/busses/i2c-via
+++ b/Documentation/i2c/busses/i2c-via
@@ -4,7 +4,7 @@ Supported adapters:
4 * VIA Technologies, InC. VT82C586B 4 * VIA Technologies, InC. VT82C586B
5 Datasheet: Publicly available at the VIA website 5 Datasheet: Publicly available at the VIA website
6 6
7Author: Kyösti Mälkki <kmalkki@cc.hut.fi> 7Author: Kyösti Mälkki <kmalkki@cc.hut.fi>
8 8
9Description 9Description
10----------- 10-----------
diff --git a/Documentation/i2c/busses/i2c-viapro b/Documentation/i2c/busses/i2c-viapro
index 775f489e86f6..06b4be3ef6d8 100644
--- a/Documentation/i2c/busses/i2c-viapro
+++ b/Documentation/i2c/busses/i2c-viapro
@@ -17,7 +17,7 @@ Supported adapters:
17 Datasheet: available on request and under NDA from VIA 17 Datasheet: available on request and under NDA from VIA
18 18
19Authors: 19Authors:
20 Kyösti Mälkki <kmalkki@cc.hut.fi>, 20 Kyösti Mälkki <kmalkki@cc.hut.fi>,
21 Mark D. Studebaker <mdsxyz123@yahoo.com>, 21 Mark D. Studebaker <mdsxyz123@yahoo.com>,
22 Jean Delvare <khali@linux-fr.org> 22 Jean Delvare <khali@linux-fr.org>
23 23
diff --git a/Documentation/i2c/i2c-protocol b/Documentation/i2c/i2c-protocol
index b4022c914210..579b92d5f3a3 100644
--- a/Documentation/i2c/i2c-protocol
+++ b/Documentation/i2c/i2c-protocol
@@ -68,7 +68,7 @@ We have found some I2C devices that needs the following modifications:
68 68
69 Flags I2C_M_IGNORE_NAK 69 Flags I2C_M_IGNORE_NAK
70 Normally message is interrupted immediately if there is [NA] from the 70 Normally message is interrupted immediately if there is [NA] from the
71 client. Setting this flag treats any [NA] as [A], and all of 71 client. Setting this flag treats any [NA] as [A], and all of
72 message is sent. 72 message is sent.
73 These messages may still fail to SCL lo->hi timeout. 73 These messages may still fail to SCL lo->hi timeout.
74 74
diff --git a/Documentation/i2o/README b/Documentation/i2o/README
index 9aa6ddb446eb..0ebf58c73f54 100644
--- a/Documentation/i2o/README
+++ b/Documentation/i2o/README
@@ -30,13 +30,13 @@ Juha Sievanen, University of Helsinki Finland
30 Bug fixes 30 Bug fixes
31 Core code extensions 31 Core code extensions
32 32
33Auvo Häkkinen, University of Helsinki Finland 33Auvo Häkkinen, University of Helsinki Finland
34 LAN OSM code 34 LAN OSM code
35 /Proc interface to LAN class 35 /Proc interface to LAN class
36 Bug fixes 36 Bug fixes
37 Core code extensions 37 Core code extensions
38 38
39Taneli Vähäkangas, University of Helsinki Finland 39Taneli Vähäkangas, University of Helsinki Finland
40 Fixes to i2o_config 40 Fixes to i2o_config
41 41
42CREDITS 42CREDITS
diff --git a/Documentation/input/atarikbd.txt b/Documentation/input/atarikbd.txt
index 668f4d0d97d6..ab050621e20f 100644
--- a/Documentation/input/atarikbd.txt
+++ b/Documentation/input/atarikbd.txt
@@ -179,9 +179,9 @@ reporting mode for joystick 1, with both buttons being logically assigned to
179the mouse. After any joystick command, the ikbd assumes that joysticks are 179the mouse. After any joystick command, the ikbd assumes that joysticks are
180connected to both Joystick0 and Joystick1. Any mouse command (except MOUSE 180connected to both Joystick0 and Joystick1. Any mouse command (except MOUSE
181DISABLE) then causes port 0 to again be scanned as if it were a mouse, and 181DISABLE) then causes port 0 to again be scanned as if it were a mouse, and
182both buttons are logically connected to it. If a mouse diable command is 182both buttons are logically connected to it. If a mouse disable command is
183received while port 0 is presumed to be a mouse, the button is logically 183received while port 0 is presumed to be a mouse, the button is logically
184assigned to Joystick1 ( until the mouse is reenabled by another mouse command). 184assigned to Joystick1 (until the mouse is reenabled by another mouse command).
185 185
1869. ikbd Command Set 1869. ikbd Command Set
187 187
diff --git a/Documentation/input/xpad.txt b/Documentation/input/xpad.txt
index 5427bdf225ed..aae0d404c566 100644
--- a/Documentation/input/xpad.txt
+++ b/Documentation/input/xpad.txt
@@ -65,15 +65,15 @@ of buttons, see section 0.3 - Unknown Controllers
65I've tested this with Stepmania, and it works quite well. 65I've tested this with Stepmania, and it works quite well.
66 66
67 67
680.3 Unkown Controllers 680.3 Unknown Controllers
69---------------------- 69----------------------
70If you have an unkown xbox controller, it should work just fine with 70If you have an unknown xbox controller, it should work just fine with
71the default settings. 71the default settings.
72 72
73HOWEVER if you have an unknown dance pad not listed below, it will not 73HOWEVER if you have an unknown dance pad not listed below, it will not
74work UNLESS you set "dpad_to_buttons" to 1 in the module configuration. 74work UNLESS you set "dpad_to_buttons" to 1 in the module configuration.
75 75
76PLEASE if you have an unkown controller, email Dom <binary1230@yahoo.com> with 76PLEASE, if you have an unknown controller, email Dom <binary1230@yahoo.com> with
77a dump from /proc/bus/usb and a description of the pad (manufacturer, country, 77a dump from /proc/bus/usb and a description of the pad (manufacturer, country,
78whether it is a dance pad or normal controller) so that we can add your pad 78whether it is a dance pad or normal controller) so that we can add your pad
79to the list of supported devices, ensuring that it will work out of the 79to the list of supported devices, ensuring that it will work out of the
diff --git a/Documentation/isdn/CREDITS b/Documentation/isdn/CREDITS
index e1b3023efaa8..7c17c837064f 100644
--- a/Documentation/isdn/CREDITS
+++ b/Documentation/isdn/CREDITS
@@ -2,7 +2,7 @@
2I want to thank all who contributed to this project and especially to: 2I want to thank all who contributed to this project and especially to:
3(in alphabetical order) 3(in alphabetical order)
4 4
5Thomas Bogendörfer (tsbogend@bigbug.franken.de) 5Thomas Bogendörfer (tsbogend@bigbug.franken.de)
6 Tester, lots of bugfixes and hints. 6 Tester, lots of bugfixes and hints.
7 7
8Alan Cox (alan@redhat.com) 8Alan Cox (alan@redhat.com)
@@ -11,7 +11,7 @@ Alan Cox (alan@redhat.com)
11Henner Eisen (eis@baty.hanse.de) 11Henner Eisen (eis@baty.hanse.de)
12 For X.25 implementation. 12 For X.25 implementation.
13 13
14Volker Götz (volker@oops.franken.de) 14Volker Götz (volker@oops.franken.de)
15 For contribution of man-pages, the imontty-tool and a perfect 15 For contribution of man-pages, the imontty-tool and a perfect
16 maintaining of the mailing-list at hub-wue. 16 maintaining of the mailing-list at hub-wue.
17 17
diff --git a/Documentation/isdn/README b/Documentation/isdn/README
index 761595243931..6783437f21c2 100644
--- a/Documentation/isdn/README
+++ b/Documentation/isdn/README
@@ -402,7 +402,7 @@ README for the ISDN-subsystem
402 the script tools/tcltk/isdnmon. You can add actions for line-status 402 the script tools/tcltk/isdnmon. You can add actions for line-status
403 changes. See the comments at the beginning of the script for how to 403 changes. See the comments at the beginning of the script for how to
404 do that. There are other tty-based tools in the tools-subdirectory 404 do that. There are other tty-based tools in the tools-subdirectory
405 contributed by Michael Knigge (imon), Volker Götz (imontty) and 405 contributed by Michael Knigge (imon), Volker Götz (imontty) and
406 Andreas Kool (isdnmon). 406 Andreas Kool (isdnmon).
407 407
408 l) For initial testing, you can set the verbose-level to 2 (default: 0). 408 l) For initial testing, you can set the verbose-level to 2 (default: 0).
diff --git a/Documentation/isdn/README.icn b/Documentation/isdn/README.icn
index a5f55eadb3ca..13f833d4e910 100644
--- a/Documentation/isdn/README.icn
+++ b/Documentation/isdn/README.icn
@@ -3,8 +3,8 @@ $Id: README.icn,v 1.7 2000/08/06 09:22:51 armin Exp $
3You can get the ICN-ISDN-card from: 3You can get the ICN-ISDN-card from:
4 4
5Thinking Objects Software GmbH 5Thinking Objects Software GmbH
6Versbacher Röthe 159 6Versbacher Röthe 159
797078 Würzburg 797078 Würzburg
8Tel: +49 931 2877950 8Tel: +49 931 2877950
9Fax: +49 931 2877951 9Fax: +49 931 2877951
10 10
diff --git a/Documentation/java.txt b/Documentation/java.txt
index c768dc63b34e..3cce3fbb6644 100644
--- a/Documentation/java.txt
+++ b/Documentation/java.txt
@@ -390,7 +390,7 @@ the execution bit, then just do
390 390
391 391
392originally by Brian A. Lantz, brian@lantz.com 392originally by Brian A. Lantz, brian@lantz.com
393heavily edited for binfmt_misc by Richard Günther 393heavily edited for binfmt_misc by Richard Günther
394new scripts by Colin J. Watson <cjw44@cam.ac.uk> 394new scripts by Colin J. Watson <cjw44@cam.ac.uk>
395added executable Jar file support by Kurt Huwig <kurt@iku-netz.de> 395added executable Jar file support by Kurt Huwig <kurt@iku-netz.de>
396 396
diff --git a/Documentation/kernel-docs.txt b/Documentation/kernel-docs.txt
index c68dafeda7a7..d9e3b199929b 100644
--- a/Documentation/kernel-docs.txt
+++ b/Documentation/kernel-docs.txt
@@ -236,7 +236,7 @@
236 236
237 * Title: "Design and Implementation of the Second Extended 237 * Title: "Design and Implementation of the Second Extended
238 Filesystem" 238 Filesystem"
239 Author: Rémy Card, Theodore Ts'o, Stephen Tweedie. 239 Author: Rémy Card, Theodore Ts'o, Stephen Tweedie.
240 URL: http://web.mit.edu/tytso/www/linux/ext2intro.html 240 URL: http://web.mit.edu/tytso/www/linux/ext2intro.html
241 Keywords: ext2, linux fs history, inode, directory, link, devices, 241 Keywords: ext2, linux fs history, inode, directory, link, devices,
242 VFS, physical structure, performance, benchmarks, ext2fs library, 242 VFS, physical structure, performance, benchmarks, ext2fs library,
diff --git a/Documentation/m68k/README.buddha b/Documentation/m68k/README.buddha
index ef484a719bb9..3ea9827ba3c7 100644
--- a/Documentation/m68k/README.buddha
+++ b/Documentation/m68k/README.buddha
@@ -204,7 +204,7 @@ always shows a "no IRQ here" on the Buddha, and accesses to
204the third IDE port are going into data's Nirwana on the 204the third IDE port are going into data's Nirwana on the
205Buddha. 205Buddha.
206 206
207 Jens Schönfeld february 19th, 1997 207 Jens Schönfeld february 19th, 1997
208 updated may 27th, 1997 208 updated may 27th, 1997
209 eMail: sysop@nostlgic.tng.oche.de 209 eMail: sysop@nostlgic.tng.oche.de
210 210
diff --git a/Documentation/magic-number.txt b/Documentation/magic-number.txt
index 0e740c812d12..bd450e797558 100644
--- a/Documentation/magic-number.txt
+++ b/Documentation/magic-number.txt
@@ -129,7 +129,7 @@ SAVEKMSG_MAGIC1 0x53415645 savekmsg arch/*/amiga/config.c
129GDA_MAGIC 0x58464552 gda include/asm-mips64/sn/gda.h 129GDA_MAGIC 0x58464552 gda include/asm-mips64/sn/gda.h
130RED_MAGIC1 0x5a2cf071 (any) mm/slab.c 130RED_MAGIC1 0x5a2cf071 (any) mm/slab.c
131STL_PORTMAGIC 0x5a7182c9 stlport include/linux/stallion.h 131STL_PORTMAGIC 0x5a7182c9 stlport include/linux/stallion.h
132EEPROM_MAGIC_VALUE 0X5ab478d2 lanai_dev drivers/atm/lanai.c 132EEPROM_MAGIC_VALUE 0x5ab478d2 lanai_dev drivers/atm/lanai.c
133HDLCDRV_MAGIC 0x5ac6e778 hdlcdrv_state include/linux/hdlcdrv.h 133HDLCDRV_MAGIC 0x5ac6e778 hdlcdrv_state include/linux/hdlcdrv.h
134EPCA_MAGIC 0x5c6df104 channel include/linux/epca.h 134EPCA_MAGIC 0x5c6df104 channel include/linux/epca.h
135PCXX_MAGIC 0x5c6df104 channel drivers/char/pcxx.h 135PCXX_MAGIC 0x5c6df104 channel drivers/char/pcxx.h
diff --git a/Documentation/md.txt b/Documentation/md.txt
index 2202f5dc8ac2..5818628207b5 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -178,6 +178,21 @@ All md devices contain:
178 The size should be at least PAGE_SIZE (4k) and should be a power 178 The size should be at least PAGE_SIZE (4k) and should be a power
179 of 2. This can only be set while assembling an array 179 of 2. This can only be set while assembling an array
180 180
181 layout
182 The "layout" for the array for the particular level. This is
183 simply a number that is interpretted differently by different
184 levels. It can be written while assembling an array.
185
186 reshape_position
187 This is either "none" or a sector number within the devices of
188 the array where "reshape" is up to. If this is set, the three
189 attributes mentioned above (raid_disks, chunk_size, layout) can
190 potentially have 2 values, an old and a new value. If these
191 values differ, reading the attribute returns
192 new (old)
193 and writing will effect the 'new' value, leaving the 'old'
194 unchanged.
195
181 component_size 196 component_size
182 For arrays with data redundancy (i.e. not raid0, linear, faulty, 197 For arrays with data redundancy (i.e. not raid0, linear, faulty,
183 multipath), all components must be the same size - or at least 198 multipath), all components must be the same size - or at least
@@ -193,11 +208,6 @@ All md devices contain:
193 1.2 (newer format in varying locations) or "none" indicating that 208 1.2 (newer format in varying locations) or "none" indicating that
194 the kernel isn't managing metadata at all. 209 the kernel isn't managing metadata at all.
195 210
196 layout
197 The "layout" for the array for the particular level. This is
198 simply a number that is interpretted differently by different
199 levels. It can be written while assembling an array.
200
201 resync_start 211 resync_start
202 The point at which resync should start. If no resync is needed, 212 The point at which resync should start. If no resync is needed,
203 this will be a very large number. At array creation it will 213 this will be a very large number. At array creation it will
@@ -259,29 +269,6 @@ All md devices contain:
259 like active, but no writes have been seen for a while (safe_mode_delay). 269 like active, but no writes have been seen for a while (safe_mode_delay).
260 270
261 271
262 sync_speed_min
263 sync_speed_max
264 This are similar to /proc/sys/dev/raid/speed_limit_{min,max}
265 however they only apply to the particular array.
266 If no value has been written to these, of if the word 'system'
267 is written, then the system-wide value is used. If a value,
268 in kibibytes-per-second is written, then it is used.
269 When the files are read, they show the currently active value
270 followed by "(local)" or "(system)" depending on whether it is
271 a locally set or system-wide value.
272
273 sync_completed
274 This shows the number of sectors that have been completed of
275 whatever the current sync_action is, followed by the number of
276 sectors in total that could need to be processed. The two
277 numbers are separated by a '/' thus effectively showing one
278 value, a fraction of the process that is complete.
279
280 sync_speed
281 This shows the current actual speed, in K/sec, of the current
282 sync_action. It is averaged over the last 30 seconds.
283
284
285As component devices are added to an md array, they appear in the 'md' 272As component devices are added to an md array, they appear in the 'md'
286directory as new directories named 273directory as new directories named
287 dev-XXX 274 dev-XXX
@@ -412,6 +399,35 @@ also have
412 Note that the numbers are 'bit' numbers, not 'block' numbers. 399 Note that the numbers are 'bit' numbers, not 'block' numbers.
413 They should be scaled by the bitmap_chunksize. 400 They should be scaled by the bitmap_chunksize.
414 401
402 sync_speed_min
403 sync_speed_max
404 This are similar to /proc/sys/dev/raid/speed_limit_{min,max}
405 however they only apply to the particular array.
406 If no value has been written to these, of if the word 'system'
407 is written, then the system-wide value is used. If a value,
408 in kibibytes-per-second is written, then it is used.
409 When the files are read, they show the currently active value
410 followed by "(local)" or "(system)" depending on whether it is
411 a locally set or system-wide value.
412
413 sync_completed
414 This shows the number of sectors that have been completed of
415 whatever the current sync_action is, followed by the number of
416 sectors in total that could need to be processed. The two
417 numbers are separated by a '/' thus effectively showing one
418 value, a fraction of the process that is complete.
419
420 sync_speed
421 This shows the current actual speed, in K/sec, of the current
422 sync_action. It is averaged over the last 30 seconds.
423
424 suspend_lo
425 suspend_hi
426 The two values, given as numbers of sectors, indicate a range
427 within the array where IO will be blocked. This is currently
428 only supported for raid4/5/6.
429
430
415Each active md device may also have attributes specific to the 431Each active md device may also have attributes specific to the
416personality module that manages it. 432personality module that manages it.
417These are specific to the implementation of the module and could 433These are specific to the implementation of the module and could
diff --git a/Documentation/netlabel/introduction.txt b/Documentation/netlabel/introduction.txt
index a4ffba1694c8..5ecd8d1dcf4e 100644
--- a/Documentation/netlabel/introduction.txt
+++ b/Documentation/netlabel/introduction.txt
@@ -30,7 +30,7 @@ The communication layer exists to allow NetLabel configuration and monitoring
30from user space. The NetLabel communication layer uses a message based 30from user space. The NetLabel communication layer uses a message based
31protocol built on top of the Generic NETLINK transport mechanism. The exact 31protocol built on top of the Generic NETLINK transport mechanism. The exact
32formatting of these NetLabel messages as well as the Generic NETLINK family 32formatting of these NetLabel messages as well as the Generic NETLINK family
33names can be found in the the 'net/netlabel/' directory as comments in the 33names can be found in the 'net/netlabel/' directory as comments in the
34header files as well as in 'include/net/netlabel.h'. 34header files as well as in 'include/net/netlabel.h'.
35 35
36 * Security Module API 36 * Security Module API
diff --git a/Documentation/networking/6pack.txt b/Documentation/networking/6pack.txt
index 48ed2b711bd2..d0777a1200e1 100644
--- a/Documentation/networking/6pack.txt
+++ b/Documentation/networking/6pack.txt
@@ -1,6 +1,6 @@
1This is the 6pack-mini-HOWTO, written by 1This is the 6pack-mini-HOWTO, written by
2 2
3Andreas Könsgen DG3KQ 3Andreas Könsgen DG3KQ
4Internet: ajk@iehk.rwth-aachen.de 4Internet: ajk@iehk.rwth-aachen.de
5AMPR-net: dg3kq@db0pra.ampr.org 5AMPR-net: dg3kq@db0pra.ampr.org
6AX.25: dg3kq@db0ach.#nrw.deu.eu 6AX.25: dg3kq@db0ach.#nrw.deu.eu
diff --git a/Documentation/networking/NAPI_HOWTO.txt b/Documentation/networking/NAPI_HOWTO.txt
index fb8dc6422a52..7907435a661c 100644
--- a/Documentation/networking/NAPI_HOWTO.txt
+++ b/Documentation/networking/NAPI_HOWTO.txt
@@ -160,7 +160,7 @@ on current cpu. This primitive is called by dev->poll(), when
160it completes its work. The device cannot be out of poll list at this 160it completes its work. The device cannot be out of poll list at this
161call, if it is then clearly it is a BUG(). You'll know ;-> 161call, if it is then clearly it is a BUG(). You'll know ;->
162 162
163All these above nethods are used below. So keep reading for clarity. 163All of the above methods are used below, so keep reading for clarity.
164 164
165Device driver changes to be made when porting NAPI 165Device driver changes to be made when porting NAPI
166================================================== 166==================================================
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 5a232d946be3..db0cd5169581 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -13,7 +13,7 @@ You can find the latest version of this document at
13 13
14Please send me your comments to 14Please send me your comments to
15 15
16 Ulisses Alonso Camaró <uaca@i.hate.spam.alumni.uv.es> 16 Ulisses Alonso Camaró <uaca@i.hate.spam.alumni.uv.es>
17 17
18------------------------------------------------------------------------------- 18-------------------------------------------------------------------------------
19+ Why use PACKET_MMAP 19+ Why use PACKET_MMAP
diff --git a/Documentation/networking/slicecom.hun b/Documentation/networking/slicecom.hun
index 5acf1918694a..bed2f045e550 100644
--- a/Documentation/networking/slicecom.hun
+++ b/Documentation/networking/slicecom.hun
@@ -1,7 +1,7 @@
1 1
2SliceCOM adapter felhasznaloi dokumentacioja - 0.51 verziohoz 2SliceCOM adapter felhasznaloi dokumentacioja - 0.51 verziohoz
3 3
4Bartók István <bartoki@itc.hu> 4Bartók István <bartoki@itc.hu>
5Utolso modositas: Wed Aug 29 17:26:58 CEST 2001 5Utolso modositas: Wed Aug 29 17:26:58 CEST 2001
6 6
7----------------------------------------------------------------- 7-----------------------------------------------------------------
diff --git a/Documentation/networking/slicecom.txt b/Documentation/networking/slicecom.txt
index 32d3b916afad..c82c0cf981b4 100644
--- a/Documentation/networking/slicecom.txt
+++ b/Documentation/networking/slicecom.txt
@@ -1,9 +1,9 @@
1 1
2SliceCOM adapter user's documentation - for the 0.51 driver version 2SliceCOM adapter user's documentation - for the 0.51 driver version
3 3
4Written by Bartók István <bartoki@itc.hu> 4Written by Bartók István <bartoki@itc.hu>
5 5
6English translation: Lakatos György <gyuri@itc.hu> 6English translation: Lakatos György <gyuri@itc.hu>
7Mon Dec 11 15:28:42 CET 2000 7Mon Dec 11 15:28:42 CET 2000
8 8
9Last modified: Wed Aug 29 17:25:37 CEST 2001 9Last modified: Wed Aug 29 17:25:37 CEST 2001
diff --git a/Documentation/networking/tms380tr.txt b/Documentation/networking/tms380tr.txt
index c169a57bc925..1f73e13058df 100644
--- a/Documentation/networking/tms380tr.txt
+++ b/Documentation/networking/tms380tr.txt
@@ -71,24 +71,24 @@ Below find attached the setting for the SK NET TR 4/16 ISA adapters
71 CHAPTER 1 LOCATION OF DIP-SWITCH 71 CHAPTER 1 LOCATION OF DIP-SWITCH
72 ============================================================== 72 ==============================================================
73 73
74UÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄ¿ 74UÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄ¿
75þUÄÄÄÄÄÄ¿ UÄÄÄÄÄ¿ UÄÄÄ¿ þ 75þUÄÄÄÄÄÄ¿ UÄÄÄÄÄ¿ UÄÄÄ¿ þ
76þAÄÄÄÄÄÄU W1 AÄÄÄÄÄU UÄÄÄÄ¿ þ þ þ 76þAÄÄÄÄÄÄU W1 AÄÄÄÄÄU UÄÄÄÄ¿ þ þ þ
77þUÄÄÄÄÄÄ¿ þ þ þ þ UÄÄÅ¿ 77þUÄÄÄÄÄÄ¿ þ þ þ þ UÄÄÅ¿
78þAÄÄÄÄÄÄU UÄÄÄÄÄÄÄÄÄÄÄ¿ AÄÄÄÄU þ þ þ þþ 78þAÄÄÄÄÄÄU UÄÄÄÄÄÄÄÄÄÄÄ¿ AÄÄÄÄU þ þ þ þþ
79þUÄÄÄÄÄÄ¿ þ þ UÄÄÄ¿ AÄÄÄU AÄÄÅU 79þUÄÄÄÄÄÄ¿ þ þ UÄÄÄ¿ AÄÄÄU AÄÄÅU
80þAÄÄÄÄÄÄU þ TMS380C26 þ þ þ þ 80þAÄÄÄÄÄÄU þ TMS380C26 þ þ þ þ
81þUÄÄÄÄÄÄ¿ þ þ AÄÄÄU AÄ¿ 81þUÄÄÄÄÄÄ¿ þ þ AÄÄÄU AÄ¿
82þAÄÄÄÄÄÄU þ þ þ þ 82þAÄÄÄÄÄÄU þ þ þ þ
83þ AÄÄÄÄÄÄÄÄÄÄÄU þ þ 83þ AÄÄÄÄÄÄÄÄÄÄÄU þ þ
84þ þ þ 84þ þ þ
85þ AÄU 85þ AÄU
86þ þ 86þ þ
87þ þ 87þ þ
88þ þ 88þ þ
89þ þ 89þ þ
90AÄÄÄÄÄÄÄÄÄÄÄÄAÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄAÄÄAÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄAÄÄÄÄÄÄÄÄÄU 90AÄÄÄÄÄÄÄÄÄÄÄÄAÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄAÄÄAÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄAÄÄÄÄÄÄÄÄÄU
91 AÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄU AÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄU 91 AÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄU AÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄU
92 92
93 ============================================================== 93 ==============================================================
94 CHAPTER 2 DEFAULT SETTINGS 94 CHAPTER 2 DEFAULT SETTINGS
@@ -108,9 +108,9 @@ AÄÄÄÄÄÄÄÄÄÄÄÄAÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄAÄÄAÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄAÄÄÄÄÄÄÄÄÄU
108 CHAPTER 3 DIP SWITCH W1 DESCRIPTION 108 CHAPTER 3 DIP SWITCH W1 DESCRIPTION
109 ============================================================== 109 ==============================================================
110 110
111 UÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄ¿ ON 111 UÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄ¿ ON
112 þ 1 þ 2 þ 3 þ 4 þ 5 þ 6 þ 7 þ 8 þ 112 þ 1 þ 2 þ 3 þ 4 þ 5 þ 6 þ 7 þ 8 þ
113 AÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄU OFF 113 AÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄU OFF
114 |AD | BootROM Addr. | I/O | 114 |AD | BootROM Addr. | I/O |
115 +-+-+-------+-------+-----+-----+ 115 +-+-+-------+-------+-----+-----+
116 | | | 116 | | |
diff --git a/Documentation/networking/udplite.txt b/Documentation/networking/udplite.txt
index dd6f46b83dab..6be09ba24a36 100644
--- a/Documentation/networking/udplite.txt
+++ b/Documentation/networking/udplite.txt
@@ -139,7 +139,7 @@
139 3) Disabling the Checksum Computation 139 3) Disabling the Checksum Computation
140 140
141 On both sender and receiver, checksumming will always be performed 141 On both sender and receiver, checksumming will always be performed
142 and can not be disabled using SO_NO_CHECK. Thus 142 and cannot be disabled using SO_NO_CHECK. Thus
143 143
144 setsockopt(sockfd, SOL_SOCKET, SO_NO_CHECK, ... ); 144 setsockopt(sockfd, SOL_SOCKET, SO_NO_CHECK, ... );
145 145
diff --git a/Documentation/networking/wan-router.txt b/Documentation/networking/wan-router.txt
index 07dd6d9930a1..bc2ab419a74a 100644
--- a/Documentation/networking/wan-router.txt
+++ b/Documentation/networking/wan-router.txt
@@ -335,7 +335,7 @@ REVISION HISTORY
335 creating applications using BiSync 335 creating applications using BiSync
336 streaming. 336 streaming.
337 337
3382.0.5 Aug 04, 1999 CHDLC initializatin bug fix. 3382.0.5 Aug 04, 1999 CHDLC initialization bug fix.
339 PPP interrupt driven driver: 339 PPP interrupt driven driver:
340 Fix to the PPP line hangup problem. 340 Fix to the PPP line hangup problem.
341 New PPP firmware 341 New PPP firmware
@@ -372,7 +372,7 @@ REVISION HISTORY
372 o cfgft1 GUI csu/dsu configurator 372 o cfgft1 GUI csu/dsu configurator
373 o wancfg GUI configuration file 373 o wancfg GUI configuration file
374 configurator. 374 configurator.
375 o Architectual directory changes. 375 o Architectural directory changes.
376 376
377beta-2.1.4 Jul 2000 o Dynamic interface configuration: 377beta-2.1.4 Jul 2000 o Dynamic interface configuration:
378 Network interfaces reflect the state 378 Network interfaces reflect the state
diff --git a/Documentation/pci.txt b/Documentation/pci.txt
index e2c9d0a0c43d..d38261b67905 100644
--- a/Documentation/pci.txt
+++ b/Documentation/pci.txt
@@ -373,7 +373,7 @@ E.g. clearing pending interrupts.
373 373
3743.6 Register IRQ handler 3743.6 Register IRQ handler
375~~~~~~~~~~~~~~~~~~~~~~~~ 375~~~~~~~~~~~~~~~~~~~~~~~~
376While calling request_irq() is the the last step described here, 376While calling request_irq() is the last step described here,
377this is often just another intermediate step to initialize a device. 377this is often just another intermediate step to initialize a device.
378This step can often be deferred until the device is opened for use. 378This step can often be deferred until the device is opened for use.
379 379
diff --git a/Documentation/pcieaer-howto.txt b/Documentation/pcieaer-howto.txt
index 16c251230c82..d5da86170106 100644
--- a/Documentation/pcieaer-howto.txt
+++ b/Documentation/pcieaer-howto.txt
@@ -13,7 +13,7 @@ Reporting (AER) driver and provides information on how to use it, as
13well as how to enable the drivers of endpoint devices to conform with 13well as how to enable the drivers of endpoint devices to conform with
14PCI Express AER driver. 14PCI Express AER driver.
15 15
161.2 Copyright © Intel Corporation 2006. 161.2 Copyright © Intel Corporation 2006.
17 17
181.3 What is the PCI Express AER Driver? 181.3 What is the PCI Express AER Driver?
19 19
diff --git a/Documentation/pnp.txt b/Documentation/pnp.txt
index 28037aa1846c..481faf515d53 100644
--- a/Documentation/pnp.txt
+++ b/Documentation/pnp.txt
@@ -140,7 +140,7 @@ Plug and Play but it is planned to be in the near future.
140Requirements for a Linux PnP protocol: 140Requirements for a Linux PnP protocol:
1411.) the protocol must use EISA IDs 1411.) the protocol must use EISA IDs
1422.) the protocol must inform the PnP Layer of a devices current configuration 1422.) the protocol must inform the PnP Layer of a devices current configuration
143- the ability to set resources is optional but prefered. 143- the ability to set resources is optional but preferred.
144 144
145The following are PnP protocol related functions: 145The following are PnP protocol related functions:
146 146
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index c55bd5079b90..5b8d6953f05e 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -48,7 +48,7 @@ before suspend (it is limited to 500 MB by default).
48 48
49Article about goals and implementation of Software Suspend for Linux 49Article about goals and implementation of Software Suspend for Linux
50~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 50~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
51Author: G‚ábor Kuti 51Author: GÂábor Kuti
52Last revised: 2003-10-20 by Pavel Machek 52Last revised: 2003-10-20 by Pavel Machek
53 53
54Idea and goals to achieve 54Idea and goals to achieve
diff --git a/Documentation/power/userland-swsusp.txt b/Documentation/power/userland-swsusp.txt
index 000556c932e9..e00c6cf09e85 100644
--- a/Documentation/power/userland-swsusp.txt
+++ b/Documentation/power/userland-swsusp.txt
@@ -93,21 +93,23 @@ SNAPSHOT_S2RAM - suspend to RAM; using this call causes the kernel to
93 to resume the system from RAM if there's enough battery power or restore 93 to resume the system from RAM if there's enough battery power or restore
94 its state on the basis of the saved suspend image otherwise) 94 its state on the basis of the saved suspend image otherwise)
95 95
96SNAPSHOT_PMOPS - enable the usage of the pmops->prepare, pmops->enter and 96SNAPSHOT_PMOPS - enable the usage of the hibernation_ops->prepare,
97 pmops->finish methods (the in-kernel swsusp knows these as the "platform 97 hibernate_ops->enter and hibernation_ops->finish methods (the in-kernel
98 method") which are needed on many machines to (among others) speed up 98 swsusp knows these as the "platform method") which are needed on many
99 the resume by letting the BIOS skip some steps or to let the system 99 machines to (among others) speed up the resume by letting the BIOS skip
100 recognise the correct state of the hardware after the resume (in 100 some steps or to let the system recognise the correct state of the
101 particular on many machines this ensures that unplugged AC 101 hardware after the resume (in particular on many machines this ensures
102 adapters get correctly detected and that kacpid does not run wild after 102 that unplugged AC adapters get correctly detected and that kacpid does
103 the resume). The last ioctl() argument can take one of the three 103 not run wild after the resume). The last ioctl() argument can take one
104 values, defined in kernel/power/power.h: 104 of the three values, defined in kernel/power/power.h:
105 PMOPS_PREPARE - make the kernel carry out the 105 PMOPS_PREPARE - make the kernel carry out the
106 pm_ops->prepare(PM_SUSPEND_DISK) operation 106 hibernation_ops->prepare() operation
107 PMOPS_ENTER - make the kernel power off the system by calling 107 PMOPS_ENTER - make the kernel power off the system by calling
108 pm_ops->enter(PM_SUSPEND_DISK) 108 hibernation_ops->enter()
109 PMOPS_FINISH - make the kernel carry out the 109 PMOPS_FINISH - make the kernel carry out the
110 pm_ops->finish(PM_SUSPEND_DISK) operation 110 hibernation_ops->finish() operation
111 Note that the actual constants are misnamed because they surface
112 internal kernel implementation details that have changed.
111 113
112The device's read() operation can be used to transfer the snapshot image from 114The device's read() operation can be used to transfer the snapshot image from
113the kernel. It has the following limitations: 115the kernel. It has the following limitations:
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index d4bfae75c946..b49ce169a63a 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -1444,7 +1444,7 @@ platforms are moved over to use the flattened-device-tree model.
1444 Basically, it is a bus of devices, that could act more or less 1444 Basically, it is a bus of devices, that could act more or less
1445 as a complete entity (UCC, USB etc ). All of them should be siblings on 1445 as a complete entity (UCC, USB etc ). All of them should be siblings on
1446 the "root" qe node, using the common properties from there. 1446 the "root" qe node, using the common properties from there.
1447 The description below applies to the the qe of MPC8360 and 1447 The description below applies to the qe of MPC8360 and
1448 more nodes and properties would be extended in the future. 1448 more nodes and properties would be extended in the future.
1449 1449
1450 i) Root QE device 1450 i) Root QE device
@@ -1633,7 +1633,7 @@ platforms are moved over to use the flattened-device-tree model.
1633 - assignment : function number of the pin according to the Pin Assignment 1633 - assignment : function number of the pin according to the Pin Assignment
1634 tables in User Manual. Each pin can have up to 4 possible functions in 1634 tables in User Manual. Each pin can have up to 4 possible functions in
1635 QE and two options for CPM. 1635 QE and two options for CPM.
1636 - has_irq : indicates if the pin is used as source of exteral 1636 - has_irq : indicates if the pin is used as source of external
1637 interrupts. 1637 interrupts.
1638 1638
1639 Example: 1639 Example:
diff --git a/Documentation/s390/Debugging390.txt b/Documentation/s390/Debugging390.txt
index 0993969609cf..d30a281c570f 100644
--- a/Documentation/s390/Debugging390.txt
+++ b/Documentation/s390/Debugging390.txt
@@ -2209,7 +2209,7 @@ Breakpoint 2 at 0x4d87a4: file top.c, line 2609.
2209#3 0x5167e6 in readline_internal_char () at readline.c:454 2209#3 0x5167e6 in readline_internal_char () at readline.c:454
2210#4 0x5168ee in readline_internal_charloop () at readline.c:507 2210#4 0x5168ee in readline_internal_charloop () at readline.c:507
2211#5 0x51692c in readline_internal () at readline.c:521 2211#5 0x51692c in readline_internal () at readline.c:521
2212#6 0x5164fe in readline (prompt=0x7ffff810 "\177ÿøx\177ÿ÷Ø\177ÿøxÀ") 2212#6 0x5164fe in readline (prompt=0x7ffff810 "\177ÂÿÂøx\177ÂÿÂ÷ÂØ\177ÂÿÂøxÂÀ")
2213 at readline.c:349 2213 at readline.c:349
2214#7 0x4d7a8a in command_line_input (prrompt=0x564420 "(gdb) ", repeat=1, 2214#7 0x4d7a8a in command_line_input (prrompt=0x564420 "(gdb) ", repeat=1,
2215 annotation_suffix=0x4d6b44 "prompt") at top.c:2091 2215 annotation_suffix=0x4d6b44 "prompt") at top.c:2091
diff --git a/Documentation/scsi/aha152x.txt b/Documentation/scsi/aha152x.txt
index 2ce022cec9be..29ce6d87e451 100644
--- a/Documentation/scsi/aha152x.txt
+++ b/Documentation/scsi/aha152x.txt
@@ -1,7 +1,7 @@
1$Id: README.aha152x,v 1.2 1999/12/25 15:32:30 fischer Exp fischer $ 1$Id: README.aha152x,v 1.2 1999/12/25 15:32:30 fischer Exp fischer $
2Adaptec AHA-1520/1522 SCSI driver for Linux (aha152x) 2Adaptec AHA-1520/1522 SCSI driver for Linux (aha152x)
3 3
4Copyright 1993-1999 Jürgen Fischer <fischer@norbit.de> 4Copyright 1993-1999 Jürgen Fischer <fischer@norbit.de>
5TC1550 patches by Luuk van Dijk (ldz@xs4all.nl) 5TC1550 patches by Luuk van Dijk (ldz@xs4all.nl)
6 6
7 7
diff --git a/Documentation/scsi/aic7xxx.txt b/Documentation/scsi/aic7xxx.txt
index 9b894f116d95..5f34d2ba69b4 100644
--- a/Documentation/scsi/aic7xxx.txt
+++ b/Documentation/scsi/aic7xxx.txt
@@ -40,7 +40,7 @@ The following information is available in this file:
40 2. Multi-function Twin Channel Device - Two controllers on one chip. 40 2. Multi-function Twin Channel Device - Two controllers on one chip.
41 3. Command Channel Secondary DMA Engine - Allows scatter gather list 41 3. Command Channel Secondary DMA Engine - Allows scatter gather list
42 and SCB prefetch. 42 and SCB prefetch.
43 4. 64 Byte SCB Support - Allows disconnected, unttagged request table 43 4. 64 Byte SCB Support - Allows disconnected, untagged request table
44 for all possible target/lun combinations. 44 for all possible target/lun combinations.
45 5. Block Move Instruction Support - Doubles the speed of certain 45 5. Block Move Instruction Support - Doubles the speed of certain
46 sequencer operations. 46 sequencer operations.
diff --git a/Documentation/scsi/aic7xxx_old.txt b/Documentation/scsi/aic7xxx_old.txt
index 05667e7308d4..7bd210ab45a1 100644
--- a/Documentation/scsi/aic7xxx_old.txt
+++ b/Documentation/scsi/aic7xxx_old.txt
@@ -356,7 +356,7 @@ linux-1.1.x and fairly stable since linux-1.2.x, and are also in FreeBSD
356 or enable Tagged Command Queueing (TCQ) on specific devices. As of 356 or enable Tagged Command Queueing (TCQ) on specific devices. As of
357 driver version 5.1.11, TCQ is now either on or off by default 357 driver version 5.1.11, TCQ is now either on or off by default
358 according to the setting you choose during the make config process. 358 according to the setting you choose during the make config process.
359 In order to en/disable TCQ for certian devices at boot time, a user 359 In order to en/disable TCQ for certain devices at boot time, a user
360 may use this boot param. The driver will then parse this message out 360 may use this boot param. The driver will then parse this message out
361 and en/disable the specific device entries that are present based upon 361 and en/disable the specific device entries that are present based upon
362 the value given. The param line is parsed in the following manner: 362 the value given. The param line is parsed in the following manner:
diff --git a/Documentation/scsi/ncr53c8xx.txt b/Documentation/scsi/ncr53c8xx.txt
index 88ef88b949f7..39d409a8efe5 100644
--- a/Documentation/scsi/ncr53c8xx.txt
+++ b/Documentation/scsi/ncr53c8xx.txt
@@ -1260,7 +1260,7 @@ then the request of the IRQ obviously will not succeed for all the drivers.
126015.1 Problem tracking 126015.1 Problem tracking
1261 1261
1262Most SCSI problems are due to a non conformant SCSI bus or to buggy 1262Most SCSI problems are due to a non conformant SCSI bus or to buggy
1263devices. If infortunately you have SCSI problems, you can check the 1263devices. If unfortunately you have SCSI problems, you can check the
1264following things: 1264following things:
1265 1265
1266- SCSI bus cables 1266- SCSI bus cables
diff --git a/Documentation/scsi/st.txt b/Documentation/scsi/st.txt
index 3c12422f7f41..b7be95b5bd24 100644
--- a/Documentation/scsi/st.txt
+++ b/Documentation/scsi/st.txt
@@ -1,5 +1,5 @@
1This file contains brief information about the SCSI tape driver. 1This file contains brief information about the SCSI tape driver.
2The driver is currently maintained by Kai Mäkisara (email 2The driver is currently maintained by Kai Mäkisara (email
3Kai.Makisara@kolumbus.fi) 3Kai.Makisara@kolumbus.fi)
4 4
5Last modified: Mon Mar 7 21:14:44 2005 by kai.makisara 5Last modified: Mon Mar 7 21:14:44 2005 by kai.makisara
diff --git a/Documentation/scsi/sym53c8xx_2.txt b/Documentation/scsi/sym53c8xx_2.txt
index 2c1745a9df00..3d9f06bb3d00 100644
--- a/Documentation/scsi/sym53c8xx_2.txt
+++ b/Documentation/scsi/sym53c8xx_2.txt
@@ -587,7 +587,7 @@ devices, ... may cause a SCSI signal to be wrong when te driver reads it.
58715.1 Problem tracking 58715.1 Problem tracking
588 588
589Most SCSI problems are due to a non conformant SCSI bus or too buggy 589Most SCSI problems are due to a non conformant SCSI bus or too buggy
590devices. If infortunately you have SCSI problems, you can check the 590devices. If unfortunately you have SCSI problems, you can check the
591following things: 591following things:
592 592
593- SCSI bus cables 593- SCSI bus cables
diff --git a/Documentation/scsi/tmscsim.txt b/Documentation/scsi/tmscsim.txt
index 8b2168aa4fc7..61c0531e044a 100644
--- a/Documentation/scsi/tmscsim.txt
+++ b/Documentation/scsi/tmscsim.txt
@@ -426,7 +426,7 @@ Thanks to Linus Torvalds, Alan Cox, the FSF people, the XFree86 team and
426all the others for the wonderful OS and software. 426all the others for the wonderful OS and software.
427Thanks to C.L. Huang and Philip Giang (Tekram) for the initial driver 427Thanks to C.L. Huang and Philip Giang (Tekram) for the initial driver
428release and support. 428release and support.
429Thanks to Doug Ledford, Gérard Roudier for support with SCSI coding. 429Thanks to Doug Ledford, Gérard Roudier for support with SCSI coding.
430Thanks to a lot of people (espec. Chiaki Ishikawa, Andreas Haumer, Hubert 430Thanks to a lot of people (espec. Chiaki Ishikawa, Andreas Haumer, Hubert
431Tonneau) for intensively testing the driver (and even risking data loss 431Tonneau) for intensively testing the driver (and even risking data loss
432doing this during early revisions). 432doing this during early revisions).
diff --git a/Documentation/sonypi.txt b/Documentation/sonypi.txt
index c1237a925505..4857acfc50f1 100644
--- a/Documentation/sonypi.txt
+++ b/Documentation/sonypi.txt
@@ -1,7 +1,7 @@
1Sony Programmable I/O Control Device Driver Readme 1Sony Programmable I/O Control Device Driver Readme
2-------------------------------------------------- 2--------------------------------------------------
3 Copyright (C) 2001-2004 Stelian Pop <stelian@popies.net> 3 Copyright (C) 2001-2004 Stelian Pop <stelian@popies.net>
4 Copyright (C) 2001-2002 Alcôve <www.alcove.com> 4 Copyright (C) 2001-2002 Alcôve <www.alcove.com>
5 Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au> 5 Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au>
6 Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp> 6 Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp>
7 Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp> 7 Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp>
diff --git a/Documentation/sound/oss/mwave b/Documentation/sound/oss/mwave
index 858334bb46b0..5fbcb1609275 100644
--- a/Documentation/sound/oss/mwave
+++ b/Documentation/sound/oss/mwave
@@ -163,7 +163,7 @@ OR the Default= line COULD be
163Default=SBPRO 163Default=SBPRO
164 164
165Reboot to Windows 95 and choose Linux. When booted, use sndconfig to configure 165Reboot to Windows 95 and choose Linux. When booted, use sndconfig to configure
166the sound modules and voilà - ThinkPad sound with Linux. 166the sound modules and voilà - ThinkPad sound with Linux.
167 167
168Now the gotchas - you can either have CD sound OR Mixers but not both. That's a 168Now the gotchas - you can either have CD sound OR Mixers but not both. That's a
169problem with the SB1.5 (CD sound) or SBPRO (Mixers) settings. No one knows why 169problem with the SB1.5 (CD sound) or SBPRO (Mixers) settings. No one knows why
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 5922e84d9133..111fd28727ec 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -221,14 +221,14 @@ Controls the kernel's behaviour when an oops or BUG is encountered.
221 221
2220: try to continue operation 2220: try to continue operation
223 223
2241: panic immediatly. If the `panic' sysctl is also non-zero then the 2241: panic immediately. If the `panic' sysctl is also non-zero then the
225 machine will be rebooted. 225 machine will be rebooted.
226 226
227============================================================== 227==============================================================
228 228
229pid_max: 229pid_max:
230 230
231PID allocation wrap value. When the kenrel's next PID value 231PID allocation wrap value. When the kernel's next PID value
232reaches this value, it wraps back to a minimum PID value. 232reaches this value, it wraps back to a minimum PID value.
233PIDs of value pid_max or larger are not allocated. 233PIDs of value pid_max or larger are not allocated.
234 234
diff --git a/Documentation/usb/CREDITS b/Documentation/usb/CREDITS
index 27a721635f92..67c59cdc9959 100644
--- a/Documentation/usb/CREDITS
+++ b/Documentation/usb/CREDITS
@@ -65,7 +65,7 @@ THANKS file in Inaky's driver):
65 will sell keyboards to some of the 3 million (at least) 65 will sell keyboards to some of the 3 million (at least)
66 Linux users. 66 Linux users.
67 67
68 - Many thanks to ing büro h doran [http://www.ibhdoran.com]! 68 - Many thanks to ing büro h doran [http://www.ibhdoran.com]!
69 It was almost impossible to get a PC backplate USB connector 69 It was almost impossible to get a PC backplate USB connector
70 for the motherboard here at Europe (mine, home-made, was 70 for the motherboard here at Europe (mine, home-made, was
71 quite lousy :). Now I know where to acquire nice USB stuff! 71 quite lousy :). Now I know where to acquire nice USB stuff!
diff --git a/Documentation/usb/usb-serial.txt b/Documentation/usb/usb-serial.txt
index b18e86a22506..5b635ae84944 100644
--- a/Documentation/usb/usb-serial.txt
+++ b/Documentation/usb/usb-serial.txt
@@ -45,9 +45,9 @@ ConnectTech WhiteHEAT 4 port converter
45 Connect Tech's Support Department at support@connecttech.com 45 Connect Tech's Support Department at support@connecttech.com
46 46
47 47
48HandSpring Visor, Palm USB, and Clié USB driver 48HandSpring Visor, Palm USB, and Clié USB driver
49 49
50 This driver works with all HandSpring USB, Palm USB, and Sony Clié USB 50 This driver works with all HandSpring USB, Palm USB, and Sony Clié USB
51 devices. 51 devices.
52 52
53 Only when the device tries to connect to the host, will the device show 53 Only when the device tries to connect to the host, will the device show
@@ -69,7 +69,7 @@ HandSpring Visor, Palm USB, and Clié USB driver
69 the port to use for the HotSync transfer. The "Generic" port can be used 69 the port to use for the HotSync transfer. The "Generic" port can be used
70 for other device communication, such as a PPP link. 70 for other device communication, such as a PPP link.
71 71
72 For some Sony Clié devices, /dev/ttyUSB0 must be used to talk to the 72 For some Sony Clié devices, /dev/ttyUSB0 must be used to talk to the
73 device. This is true for all OS version 3.5 devices, and most devices 73 device. This is true for all OS version 3.5 devices, and most devices
74 that have had a flash upgrade to a newer version of the OS. See the 74 that have had a flash upgrade to a newer version of the OS. See the
75 kernel system log for information on which is the correct port to use. 75 kernel system log for information on which is the correct port to use.
diff --git a/Documentation/video4linux/README.pvrusb2 b/Documentation/video4linux/README.pvrusb2
index a4b7ae800866..a747200fe67c 100644
--- a/Documentation/video4linux/README.pvrusb2
+++ b/Documentation/video4linux/README.pvrusb2
@@ -8,7 +8,7 @@ Background:
8 8
9 This driver is intended for the "Hauppauge WinTV PVR USB 2.0", which 9 This driver is intended for the "Hauppauge WinTV PVR USB 2.0", which
10 is a USB 2.0 hosted TV Tuner. This driver is a work in progress. 10 is a USB 2.0 hosted TV Tuner. This driver is a work in progress.
11 Its history started with the reverse-engineering effort by Björn 11 Its history started with the reverse-engineering effort by Björn
12 Danielsson <pvrusb2@dax.nu> whose web page can be found here: 12 Danielsson <pvrusb2@dax.nu> whose web page can be found here:
13 13
14 http://pvrusb2.dax.nu/ 14 http://pvrusb2.dax.nu/
diff --git a/Documentation/video4linux/Zoran b/Documentation/video4linux/Zoran
index 85c575ac4fb9..295462b2317a 100644
--- a/Documentation/video4linux/Zoran
+++ b/Documentation/video4linux/Zoran
@@ -242,7 +242,7 @@ can generate: PAL , NTSC , SECAM
242 242
243Conexant bt866 TV encoder 243Conexant bt866 TV encoder
244is used in AVS6EYES, and 244is used in AVS6EYES, and
245can generate: NTSC/PAL, PAL­M, PAL­N 245can generate: NTSC/PAL, PAL­M, PAL­N
246 246
247The adv717x, should be able to produce PAL N. But you find nothing PAL N 247The adv717x, should be able to produce PAL N. But you find nothing PAL N
248specific in the registers. Seem that you have to reuse a other standard 248specific in the registers. Seem that you have to reuse a other standard
diff --git a/Documentation/video4linux/meye.txt b/Documentation/video4linux/meye.txt
index 5e51c59bf2b0..bf3af5fe558f 100644
--- a/Documentation/video4linux/meye.txt
+++ b/Documentation/video4linux/meye.txt
@@ -1,7 +1,7 @@
1Vaio Picturebook Motion Eye Camera Driver Readme 1Vaio Picturebook Motion Eye Camera Driver Readme
2------------------------------------------------ 2------------------------------------------------
3 Copyright (C) 2001-2004 Stelian Pop <stelian@popies.net> 3 Copyright (C) 2001-2004 Stelian Pop <stelian@popies.net>
4 Copyright (C) 2001-2002 Alcôve <www.alcove.com> 4 Copyright (C) 2001-2002 Alcôve <www.alcove.com>
5 Copyright (C) 2000 Andrew Tridgell <tridge@samba.org> 5 Copyright (C) 2000 Andrew Tridgell <tridge@samba.org>
6 6
7This driver enable the use of video4linux compatible applications with the 7This driver enable the use of video4linux compatible applications with the
diff --git a/Documentation/video4linux/ov511.txt b/Documentation/video4linux/ov511.txt
index 79af610d4ba5..b3326b167ada 100644
--- a/Documentation/video4linux/ov511.txt
+++ b/Documentation/video4linux/ov511.txt
@@ -195,11 +195,11 @@ MODULE PARAMETERS:
195 NAME: bandingfilter 195 NAME: bandingfilter
196 TYPE: integer (Boolean) 196 TYPE: integer (Boolean)
197 DEFAULT: 0 (off) 197 DEFAULT: 0 (off)
198 DESC: Enables the sensor´s banding filter exposure algorithm. This reduces 198 DESC: Enables the sensor´s banding filter exposure algorithm. This reduces
199 or stabilizes the "banding" caused by some artificial light sources 199 or stabilizes the "banding" caused by some artificial light sources
200 (especially fluorescent). You might have to set lightfreq correctly for 200 (especially fluorescent). You might have to set lightfreq correctly for
201 this to work right. As an added bonus, this sometimes makes it 201 this to work right. As an added bonus, this sometimes makes it
202 possible to capture your monitor´s output. 202 possible to capture your monitor´s output.
203 203
204 NAME: fastset 204 NAME: fastset
205 TYPE: integer (Boolean) 205 TYPE: integer (Boolean)
diff --git a/Documentation/vm/slabinfo.c b/Documentation/vm/slabinfo.c
index 41710ccf3a29..686a8e04a4f3 100644
--- a/Documentation/vm/slabinfo.c
+++ b/Documentation/vm/slabinfo.c
@@ -16,6 +16,7 @@
16#include <stdarg.h> 16#include <stdarg.h>
17#include <getopt.h> 17#include <getopt.h>
18#include <regex.h> 18#include <regex.h>
19#include <errno.h>
19 20
20#define MAX_SLABS 500 21#define MAX_SLABS 500
21#define MAX_ALIASES 500 22#define MAX_ALIASES 500
@@ -41,12 +42,15 @@ struct aliasinfo {
41} aliasinfo[MAX_ALIASES]; 42} aliasinfo[MAX_ALIASES];
42 43
43int slabs = 0; 44int slabs = 0;
45int actual_slabs = 0;
44int aliases = 0; 46int aliases = 0;
45int alias_targets = 0; 47int alias_targets = 0;
46int highest_node = 0; 48int highest_node = 0;
47 49
48char buffer[4096]; 50char buffer[4096];
49 51
52int show_empty = 0;
53int show_report = 0;
50int show_alias = 0; 54int show_alias = 0;
51int show_slab = 0; 55int show_slab = 0;
52int skip_zero = 1; 56int skip_zero = 1;
@@ -59,6 +63,15 @@ int show_inverted = 0;
59int show_single_ref = 0; 63int show_single_ref = 0;
60int show_totals = 0; 64int show_totals = 0;
61int sort_size = 0; 65int sort_size = 0;
66int set_debug = 0;
67int show_ops = 0;
68
69/* Debug options */
70int sanity = 0;
71int redzone = 0;
72int poison = 0;
73int tracking = 0;
74int tracing = 0;
62 75
63int page_size; 76int page_size;
64 77
@@ -76,20 +89,33 @@ void fatal(const char *x, ...)
76 89
77void usage(void) 90void usage(void)
78{ 91{
79 printf("slabinfo [-ahnpvtsz] [slab-regexp]\n" 92 printf("slabinfo 5/7/2007. (c) 2007 sgi. clameter@sgi.com\n\n"
93 "slabinfo [-ahnpvtsz] [-d debugopts] [slab-regexp]\n"
80 "-a|--aliases Show aliases\n" 94 "-a|--aliases Show aliases\n"
95 "-d<options>|--debug=<options> Set/Clear Debug options\n"
96 "-e|--empty Show empty slabs\n"
97 "-f|--first-alias Show first alias\n"
81 "-h|--help Show usage information\n" 98 "-h|--help Show usage information\n"
99 "-i|--inverted Inverted list\n"
100 "-l|--slabs Show slabs\n"
82 "-n|--numa Show NUMA information\n" 101 "-n|--numa Show NUMA information\n"
102 "-o|--ops Show kmem_cache_ops\n"
83 "-s|--shrink Shrink slabs\n" 103 "-s|--shrink Shrink slabs\n"
84 "-v|--validate Validate slabs\n" 104 "-r|--report Detailed report on single slabs\n"
105 "-S|--Size Sort by size\n"
85 "-t|--tracking Show alloc/free information\n" 106 "-t|--tracking Show alloc/free information\n"
86 "-T|--Totals Show summary information\n" 107 "-T|--Totals Show summary information\n"
87 "-l|--slabs Show slabs\n" 108 "-v|--validate Validate slabs\n"
88 "-S|--Size Sort by size\n"
89 "-z|--zero Include empty slabs\n" 109 "-z|--zero Include empty slabs\n"
90 "-f|--first-alias Show first alias\n"
91 "-i|--inverted Inverted list\n"
92 "-1|--1ref Single reference\n" 110 "-1|--1ref Single reference\n"
111 "\nValid debug options (FZPUT may be combined)\n"
112 "a / A Switch on all debug options (=FZUP)\n"
113 "- Switch off all debug options\n"
114 "f / F Sanity Checks (SLAB_DEBUG_FREE)\n"
115 "z / Z Redzoning\n"
116 "p / P Poisoning\n"
117 "u / U Tracking\n"
118 "t / T Tracing\n"
93 ); 119 );
94} 120}
95 121
@@ -143,11 +169,10 @@ unsigned long get_obj_and_str(char *name, char **x)
143void set_obj(struct slabinfo *s, char *name, int n) 169void set_obj(struct slabinfo *s, char *name, int n)
144{ 170{
145 char x[100]; 171 char x[100];
172 FILE *f;
146 173
147 sprintf(x, "%s/%s", s->name, name); 174 sprintf(x, "%s/%s", s->name, name);
148 175 f = fopen(x, "w");
149 FILE *f = fopen(x, "w");
150
151 if (!f) 176 if (!f)
152 fatal("Cannot write to %s\n", x); 177 fatal("Cannot write to %s\n", x);
153 178
@@ -155,6 +180,26 @@ void set_obj(struct slabinfo *s, char *name, int n)
155 fclose(f); 180 fclose(f);
156} 181}
157 182
183unsigned long read_slab_obj(struct slabinfo *s, char *name)
184{
185 char x[100];
186 FILE *f;
187 int l;
188
189 sprintf(x, "%s/%s", s->name, name);
190 f = fopen(x, "r");
191 if (!f) {
192 buffer[0] = 0;
193 l = 0;
194 } else {
195 l = fread(buffer, 1, sizeof(buffer), f);
196 buffer[l] = 0;
197 fclose(f);
198 }
199 return l;
200}
201
202
158/* 203/*
159 * Put a size string together 204 * Put a size string together
160 */ 205 */
@@ -226,7 +271,7 @@ int line = 0;
226 271
227void first_line(void) 272void first_line(void)
228{ 273{
229 printf("Name Objects Objsize Space " 274 printf("Name Objects Objsize Space "
230 "Slabs/Part/Cpu O/S O %%Fr %%Ef Flg\n"); 275 "Slabs/Part/Cpu O/S O %%Fr %%Ef Flg\n");
231} 276}
232 277
@@ -246,10 +291,7 @@ struct aliasinfo *find_one_alias(struct slabinfo *find)
246 return best; 291 return best;
247 } 292 }
248 } 293 }
249 if (best) 294 return best;
250 return best;
251 fatal("Cannot find alias for %s\n", find->name);
252 return NULL;
253} 295}
254 296
255unsigned long slab_size(struct slabinfo *s) 297unsigned long slab_size(struct slabinfo *s)
@@ -257,6 +299,126 @@ unsigned long slab_size(struct slabinfo *s)
257 return s->slabs * (page_size << s->order); 299 return s->slabs * (page_size << s->order);
258} 300}
259 301
302void slab_numa(struct slabinfo *s, int mode)
303{
304 int node;
305
306 if (strcmp(s->name, "*") == 0)
307 return;
308
309 if (!highest_node) {
310 printf("\n%s: No NUMA information available.\n", s->name);
311 return;
312 }
313
314 if (skip_zero && !s->slabs)
315 return;
316
317 if (!line) {
318 printf("\n%-21s:", mode ? "NUMA nodes" : "Slab");
319 for(node = 0; node <= highest_node; node++)
320 printf(" %4d", node);
321 printf("\n----------------------");
322 for(node = 0; node <= highest_node; node++)
323 printf("-----");
324 printf("\n");
325 }
326 printf("%-21s ", mode ? "All slabs" : s->name);
327 for(node = 0; node <= highest_node; node++) {
328 char b[20];
329
330 store_size(b, s->numa[node]);
331 printf(" %4s", b);
332 }
333 printf("\n");
334 if (mode) {
335 printf("%-21s ", "Partial slabs");
336 for(node = 0; node <= highest_node; node++) {
337 char b[20];
338
339 store_size(b, s->numa_partial[node]);
340 printf(" %4s", b);
341 }
342 printf("\n");
343 }
344 line++;
345}
346
347void show_tracking(struct slabinfo *s)
348{
349 printf("\n%s: Kernel object allocation\n", s->name);
350 printf("-----------------------------------------------------------------------\n");
351 if (read_slab_obj(s, "alloc_calls"))
352 printf(buffer);
353 else
354 printf("No Data\n");
355
356 printf("\n%s: Kernel object freeing\n", s->name);
357 printf("------------------------------------------------------------------------\n");
358 if (read_slab_obj(s, "free_calls"))
359 printf(buffer);
360 else
361 printf("No Data\n");
362
363}
364
365void ops(struct slabinfo *s)
366{
367 if (strcmp(s->name, "*") == 0)
368 return;
369
370 if (read_slab_obj(s, "ops")) {
371 printf("\n%s: kmem_cache operations\n", s->name);
372 printf("--------------------------------------------\n");
373 printf(buffer);
374 } else
375 printf("\n%s has no kmem_cache operations\n", s->name);
376}
377
378const char *onoff(int x)
379{
380 if (x)
381 return "On ";
382 return "Off";
383}
384
385void report(struct slabinfo *s)
386{
387 if (strcmp(s->name, "*") == 0)
388 return;
389 printf("\nSlabcache: %-20s Aliases: %2d Order : %2d\n", s->name, s->aliases, s->order);
390 if (s->hwcache_align)
391 printf("** Hardware cacheline aligned\n");
392 if (s->cache_dma)
393 printf("** Memory is allocated in a special DMA zone\n");
394 if (s->destroy_by_rcu)
395 printf("** Slabs are destroyed via RCU\n");
396 if (s->reclaim_account)
397 printf("** Reclaim accounting active\n");
398
399 printf("\nSizes (bytes) Slabs Debug Memory\n");
400 printf("------------------------------------------------------------------------\n");
401 printf("Object : %7d Total : %7ld Sanity Checks : %s Total: %7ld\n",
402 s->object_size, s->slabs, onoff(s->sanity_checks),
403 s->slabs * (page_size << s->order));
404 printf("SlabObj: %7d Full : %7ld Redzoning : %s Used : %7ld\n",
405 s->slab_size, s->slabs - s->partial - s->cpu_slabs,
406 onoff(s->red_zone), s->objects * s->object_size);
407 printf("SlabSiz: %7d Partial: %7ld Poisoning : %s Loss : %7ld\n",
408 page_size << s->order, s->partial, onoff(s->poison),
409 s->slabs * (page_size << s->order) - s->objects * s->object_size);
410 printf("Loss : %7d CpuSlab: %7d Tracking : %s Lalig: %7ld\n",
411 s->slab_size - s->object_size, s->cpu_slabs, onoff(s->store_user),
412 (s->slab_size - s->object_size) * s->objects);
413 printf("Align : %7d Objects: %7d Tracing : %s Lpadd: %7ld\n",
414 s->align, s->objs_per_slab, onoff(s->trace),
415 ((page_size << s->order) - s->objs_per_slab * s->slab_size) *
416 s->slabs);
417
418 ops(s);
419 show_tracking(s);
420 slab_numa(s, 1);
421}
260 422
261void slabcache(struct slabinfo *s) 423void slabcache(struct slabinfo *s)
262{ 424{
@@ -265,7 +427,18 @@ void slabcache(struct slabinfo *s)
265 char flags[20]; 427 char flags[20];
266 char *p = flags; 428 char *p = flags;
267 429
268 if (skip_zero && !s->slabs) 430 if (strcmp(s->name, "*") == 0)
431 return;
432
433 if (actual_slabs == 1) {
434 report(s);
435 return;
436 }
437
438 if (skip_zero && !show_empty && !s->slabs)
439 return;
440
441 if (show_empty && s->slabs)
269 return; 442 return;
270 443
271 store_size(size_str, slab_size(s)); 444 store_size(size_str, slab_size(s));
@@ -303,48 +476,128 @@ void slabcache(struct slabinfo *s)
303 flags); 476 flags);
304} 477}
305 478
306void slab_numa(struct slabinfo *s) 479/*
480 * Analyze debug options. Return false if something is amiss.
481 */
482int debug_opt_scan(char *opt)
307{ 483{
308 int node; 484 if (!opt || !opt[0] || strcmp(opt, "-") == 0)
485 return 1;
486
487 if (strcasecmp(opt, "a") == 0) {
488 sanity = 1;
489 poison = 1;
490 redzone = 1;
491 tracking = 1;
492 return 1;
493 }
309 494
310 if (!highest_node) 495 for ( ; *opt; opt++)
311 fatal("No NUMA information available.\n"); 496 switch (*opt) {
497 case 'F' : case 'f':
498 if (sanity)
499 return 0;
500 sanity = 1;
501 break;
502 case 'P' : case 'p':
503 if (poison)
504 return 0;
505 poison = 1;
506 break;
312 507
313 if (skip_zero && !s->slabs) 508 case 'Z' : case 'z':
314 return; 509 if (redzone)
510 return 0;
511 redzone = 1;
512 break;
315 513
316 if (!line) { 514 case 'U' : case 'u':
317 printf("\nSlab Node "); 515 if (tracking)
318 for(node = 0; node <= highest_node; node++) 516 return 0;
319 printf(" %4d", node); 517 tracking = 1;
320 printf("\n----------------------"); 518 break;
321 for(node = 0; node <= highest_node; node++)
322 printf("-----");
323 printf("\n");
324 }
325 printf("%-21s ", s->name);
326 for(node = 0; node <= highest_node; node++) {
327 char b[20];
328 519
329 store_size(b, s->numa[node]); 520 case 'T' : case 't':
330 printf(" %4s", b); 521 if (tracing)
331 } 522 return 0;
332 printf("\n"); 523 tracing = 1;
333 line++; 524 break;
525 default:
526 return 0;
527 }
528 return 1;
334} 529}
335 530
336void show_tracking(struct slabinfo *s) 531int slab_empty(struct slabinfo *s)
337{ 532{
338 printf("\n%s: Calls to allocate a slab object\n", s->name); 533 if (s->objects > 0)
339 printf("---------------------------------------------------\n"); 534 return 0;
340 if (read_obj("alloc_calls"))
341 printf(buffer);
342 535
343 printf("%s: Calls to free a slab object\n", s->name); 536 /*
344 printf("-----------------------------------------------\n"); 537 * We may still have slabs even if there are no objects. Shrinking will
345 if (read_obj("free_calls")) 538 * remove them.
346 printf(buffer); 539 */
540 if (s->slabs != 0)
541 set_obj(s, "shrink", 1);
347 542
543 return 1;
544}
545
546void slab_debug(struct slabinfo *s)
547{
548 if (sanity && !s->sanity_checks) {
549 set_obj(s, "sanity", 1);
550 }
551 if (!sanity && s->sanity_checks) {
552 if (slab_empty(s))
553 set_obj(s, "sanity", 0);
554 else
555 fprintf(stderr, "%s not empty cannot disable sanity checks\n", s->name);
556 }
557 if (redzone && !s->red_zone) {
558 if (slab_empty(s))
559 set_obj(s, "red_zone", 1);
560 else
561 fprintf(stderr, "%s not empty cannot enable redzoning\n", s->name);
562 }
563 if (!redzone && s->red_zone) {
564 if (slab_empty(s))
565 set_obj(s, "red_zone", 0);
566 else
567 fprintf(stderr, "%s not empty cannot disable redzoning\n", s->name);
568 }
569 if (poison && !s->poison) {
570 if (slab_empty(s))
571 set_obj(s, "poison", 1);
572 else
573 fprintf(stderr, "%s not empty cannot enable poisoning\n", s->name);
574 }
575 if (!poison && s->poison) {
576 if (slab_empty(s))
577 set_obj(s, "poison", 0);
578 else
579 fprintf(stderr, "%s not empty cannot disable poisoning\n", s->name);
580 }
581 if (tracking && !s->store_user) {
582 if (slab_empty(s))
583 set_obj(s, "store_user", 1);
584 else
585 fprintf(stderr, "%s not empty cannot enable tracking\n", s->name);
586 }
587 if (!tracking && s->store_user) {
588 if (slab_empty(s))
589 set_obj(s, "store_user", 0);
590 else
591 fprintf(stderr, "%s not empty cannot disable tracking\n", s->name);
592 }
593 if (tracing && !s->trace) {
594 if (slabs == 1)
595 set_obj(s, "trace", 1);
596 else
597 fprintf(stderr, "%s can only enable trace for one slab at a time\n", s->name);
598 }
599 if (!tracing && s->trace)
600 set_obj(s, "trace", 1);
348} 601}
349 602
350void totals(void) 603void totals(void)
@@ -673,7 +926,7 @@ void link_slabs(void)
673 926
674 for (a = aliasinfo; a < aliasinfo + aliases; a++) { 927 for (a = aliasinfo; a < aliasinfo + aliases; a++) {
675 928
676 for(s = slabinfo; s < slabinfo + slabs; s++) 929 for (s = slabinfo; s < slabinfo + slabs; s++)
677 if (strcmp(a->ref, s->name) == 0) { 930 if (strcmp(a->ref, s->name) == 0) {
678 a->slab = s; 931 a->slab = s;
679 s->refs++; 932 s->refs++;
@@ -704,7 +957,7 @@ void alias(void)
704 continue; 957 continue;
705 } 958 }
706 } 959 }
707 printf("\n%-20s <- %s", a->slab->name, a->name); 960 printf("\n%-12s <- %s", a->slab->name, a->name);
708 active = a->slab->name; 961 active = a->slab->name;
709 } 962 }
710 else 963 else
@@ -729,7 +982,12 @@ void rename_slabs(void)
729 982
730 a = find_one_alias(s); 983 a = find_one_alias(s);
731 984
732 s->name = a->name; 985 if (a)
986 s->name = a->name;
987 else {
988 s->name = "*";
989 actual_slabs--;
990 }
733 } 991 }
734} 992}
735 993
@@ -748,11 +1006,14 @@ void read_slab_dir(void)
748 char *t; 1006 char *t;
749 int count; 1007 int count;
750 1008
1009 if (chdir("/sys/slab"))
1010 fatal("SYSFS support for SLUB not active\n");
1011
751 dir = opendir("."); 1012 dir = opendir(".");
752 while ((de = readdir(dir))) { 1013 while ((de = readdir(dir))) {
753 if (de->d_name[0] == '.' || 1014 if (de->d_name[0] == '.' ||
754 slab_mismatch(de->d_name)) 1015 (de->d_name[0] != ':' && slab_mismatch(de->d_name)))
755 continue; 1016 continue;
756 switch (de->d_type) { 1017 switch (de->d_type) {
757 case DT_LNK: 1018 case DT_LNK:
758 alias->name = strdup(de->d_name); 1019 alias->name = strdup(de->d_name);
@@ -807,6 +1068,7 @@ void read_slab_dir(void)
807 } 1068 }
808 closedir(dir); 1069 closedir(dir);
809 slabs = slab - slabinfo; 1070 slabs = slab - slabinfo;
1071 actual_slabs = slabs;
810 aliases = alias - aliasinfo; 1072 aliases = alias - aliasinfo;
811 if (slabs > MAX_SLABS) 1073 if (slabs > MAX_SLABS)
812 fatal("Too many slabs\n"); 1074 fatal("Too many slabs\n");
@@ -825,34 +1087,37 @@ void output_slabs(void)
825 1087
826 1088
827 if (show_numa) 1089 if (show_numa)
828 slab_numa(slab); 1090 slab_numa(slab, 0);
829 else 1091 else if (show_track)
830 if (show_track)
831 show_tracking(slab); 1092 show_tracking(slab);
832 else 1093 else if (validate)
833 if (validate)
834 slab_validate(slab); 1094 slab_validate(slab);
835 else 1095 else if (shrink)
836 if (shrink)
837 slab_shrink(slab); 1096 slab_shrink(slab);
838 else { 1097 else if (set_debug)
839 if (show_slab) 1098 slab_debug(slab);
840 slabcache(slab); 1099 else if (show_ops)
841 } 1100 ops(slab);
1101 else if (show_slab)
1102 slabcache(slab);
842 } 1103 }
843} 1104}
844 1105
845struct option opts[] = { 1106struct option opts[] = {
846 { "aliases", 0, NULL, 'a' }, 1107 { "aliases", 0, NULL, 'a' },
847 { "slabs", 0, NULL, 'l' }, 1108 { "debug", 2, NULL, 'd' },
848 { "numa", 0, NULL, 'n' }, 1109 { "empty", 0, NULL, 'e' },
849 { "zero", 0, NULL, 'z' },
850 { "help", 0, NULL, 'h' },
851 { "validate", 0, NULL, 'v' },
852 { "first-alias", 0, NULL, 'f' }, 1110 { "first-alias", 0, NULL, 'f' },
1111 { "help", 0, NULL, 'h' },
1112 { "inverted", 0, NULL, 'i'},
1113 { "numa", 0, NULL, 'n' },
1114 { "ops", 0, NULL, 'o' },
1115 { "report", 0, NULL, 'r' },
853 { "shrink", 0, NULL, 's' }, 1116 { "shrink", 0, NULL, 's' },
1117 { "slabs", 0, NULL, 'l' },
854 { "track", 0, NULL, 't'}, 1118 { "track", 0, NULL, 't'},
855 { "inverted", 0, NULL, 'i'}, 1119 { "validate", 0, NULL, 'v' },
1120 { "zero", 0, NULL, 'z' },
856 { "1ref", 0, NULL, '1'}, 1121 { "1ref", 0, NULL, '1'},
857 { NULL, 0, NULL, 0 } 1122 { NULL, 0, NULL, 0 }
858}; 1123};
@@ -864,10 +1129,9 @@ int main(int argc, char *argv[])
864 char *pattern_source; 1129 char *pattern_source;
865 1130
866 page_size = getpagesize(); 1131 page_size = getpagesize();
867 if (chdir("/sys/slab"))
868 fatal("This kernel does not have SLUB support.\n");
869 1132
870 while ((c = getopt_long(argc, argv, "afhil1npstvzTS", opts, NULL)) != -1) 1133 while ((c = getopt_long(argc, argv, "ad::efhil1noprstvzTS",
1134 opts, NULL)) != -1)
871 switch(c) { 1135 switch(c) {
872 case '1': 1136 case '1':
873 show_single_ref = 1; 1137 show_single_ref = 1;
@@ -875,6 +1139,14 @@ int main(int argc, char *argv[])
875 case 'a': 1139 case 'a':
876 show_alias = 1; 1140 show_alias = 1;
877 break; 1141 break;
1142 case 'd':
1143 set_debug = 1;
1144 if (!debug_opt_scan(optarg))
1145 fatal("Invalid debug option '%s'\n", optarg);
1146 break;
1147 case 'e':
1148 show_empty = 1;
1149 break;
878 case 'f': 1150 case 'f':
879 show_first_alias = 1; 1151 show_first_alias = 1;
880 break; 1152 break;
@@ -887,6 +1159,12 @@ int main(int argc, char *argv[])
887 case 'n': 1159 case 'n':
888 show_numa = 1; 1160 show_numa = 1;
889 break; 1161 break;
1162 case 'o':
1163 show_ops = 1;
1164 break;
1165 case 'r':
1166 show_report = 1;
1167 break;
890 case 's': 1168 case 's':
891 shrink = 1; 1169 shrink = 1;
892 break; 1170 break;
@@ -914,8 +1192,8 @@ int main(int argc, char *argv[])
914 1192
915 } 1193 }
916 1194
917 if (!show_slab && !show_alias && !show_track 1195 if (!show_slab && !show_alias && !show_track && !show_report
918 && !validate && !shrink) 1196 && !validate && !shrink && !set_debug && !show_ops)
919 show_slab = 1; 1197 show_slab = 1;
920 1198
921 if (argc > optind) 1199 if (argc > optind)
diff --git a/MAINTAINERS b/MAINTAINERS
index 41a4b477c576..cfd26ddccd7a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1038,6 +1038,8 @@ S: Maintained
1038CONEXANT ACCESSRUNNER USB DRIVER 1038CONEXANT ACCESSRUNNER USB DRIVER
1039P: Simon Arlott 1039P: Simon Arlott
1040M: cxacru@fire.lp0.eu 1040M: cxacru@fire.lp0.eu
1041L: accessrunner-general@lists.sourceforge.net
1042W: http://accessrunner.sourceforge.net/
1041S: Maintained 1043S: Maintained
1042 1044
1043CORETEMP HARDWARE MONITORING DRIVER 1045CORETEMP HARDWARE MONITORING DRIVER
@@ -2646,6 +2648,12 @@ M: corbet@lwn.net
2646L: video4linux-list@redhat.com 2648L: video4linux-list@redhat.com
2647S: Maintained 2649S: Maintained
2648 2650
2651ONENAND FLASH DRIVER
2652P: Kyungmin Park
2653M: kyungmin.park@samsung.com
2654L: linux-mtd@lists.infradead.org
2655S: Maintained
2656
2649ONSTREAM SCSI TAPE DRIVER 2657ONSTREAM SCSI TAPE DRIVER
2650P: Willem Riede 2658P: Willem Riede
2651M: osst@riede.org 2659M: osst@riede.org
@@ -2800,7 +2808,7 @@ L: linux-abi-devel@lists.sourceforge.net
2800S: Maintained 2808S: Maintained
2801 2809
2802PHRAM MTD DRIVER 2810PHRAM MTD DRIVER
2803P: Jörn Engel 2811P: Jörn Engel
2804M: joern@wh.fh-wedel.de 2812M: joern@wh.fh-wedel.de
2805L: linux-mtd@lists.infradead.org 2813L: linux-mtd@lists.infradead.org
2806S: Maintained 2814S: Maintained
@@ -3074,7 +3082,7 @@ T: git kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
3074S: Maintained 3082S: Maintained
3075 3083
3076SCSI TAPE DRIVER 3084SCSI TAPE DRIVER
3077P: Kai Mäkisara 3085P: Kai Mäkisara
3078M: Kai.Makisara@kolumbus.fi 3086M: Kai.Makisara@kolumbus.fi
3079L: linux-scsi@vger.kernel.org 3087L: linux-scsi@vger.kernel.org
3080S: Maintained 3088S: Maintained
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0d8fac3b0371..d7c0984d4a86 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -354,6 +354,7 @@ config ARCH_SA1100
354config ARCH_S3C2410 354config ARCH_S3C2410
355 bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442, S3C2443" 355 bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442, S3C2443"
356 select GENERIC_GPIO 356 select GENERIC_GPIO
357 select GENERIC_TIME
357 help 358 help
358 Samsung S3C2410X CPU based systems, such as the Simtec Electronics 359 Samsung S3C2410X CPU based systems, such as the Simtec Electronics
359 BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or 360 BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index ab9f2d4bd04e..00ea4305ad5d 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -47,8 +47,13 @@ comma = ,
47# Note that GCC does not numerically define an architecture version 47# Note that GCC does not numerically define an architecture version
48# macro, but instead defines a whole series of macros which makes 48# macro, but instead defines a whole series of macros which makes
49# testing for a specific architecture or later rather impossible. 49# testing for a specific architecture or later rather impossible.
50arch-$(CONFIG_CPU_32v7) :=-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7a,-march=armv5t -Wa$(comma)-march=armv7a)
50arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) 51arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6)
52# Only override the compiler option if ARMv6. The ARMv6K extensions are
53# always available in ARMv7
54ifeq ($(CONFIG_CPU_32v6),y)
51arch-$(CONFIG_CPU_32v6K) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k) 55arch-$(CONFIG_CPU_32v6K) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k)
56endif
52arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t) 57arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t)
53arch-$(CONFIG_CPU_32v4T) :=-D__LINUX_ARM_ARCH__=4 -march=armv4t 58arch-$(CONFIG_CPU_32v4T) :=-D__LINUX_ARM_ARCH__=4 -march=armv4t
54arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4 59arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 0119c0d5f978..5d78ffb8a9a7 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -33,7 +33,7 @@
33 * numbers for r1. 33 * numbers for r1.
34 * 34 *
35 */ 35 */
36 __INIT 36 .section ".text.head", "ax"
37 .type stext, %function 37 .type stext, %function
38ENTRY(stext) 38ENTRY(stext)
39 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode 39 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 1d35edacc011..41f98b4ba2ee 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -73,7 +73,7 @@
73 * crap here - that's what the boot loader (or in extreme, well justified 73 * crap here - that's what the boot loader (or in extreme, well justified
74 * circumstances, zImage) is for. 74 * circumstances, zImage) is for.
75 */ 75 */
76 __INIT 76 .section ".text.head", "ax"
77 .type stext, %function 77 .type stext, %function
78ENTRY(stext) 78ENTRY(stext)
79 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode 79 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c
index a00cca0000bd..bd4ef53bc6b9 100644
--- a/arch/arm/kernel/init_task.c
+++ b/arch/arm/kernel/init_task.c
@@ -31,7 +31,7 @@ EXPORT_SYMBOL(init_mm);
31 * The things we do for performance.. 31 * The things we do for performance..
32 */ 32 */
33union thread_union init_thread_union 33union thread_union init_thread_union
34 __attribute__((__section__(".init.task"))) = 34 __attribute__((__section__(".data.init_task"))) =
35 { INIT_THREAD_INFO(init_task) }; 35 { INIT_THREAD_INFO(init_task) };
36 36
37/* 37/*
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 1b061583408e..79b7e5cf5416 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -116,8 +116,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
116 116
117 offset += sym->st_value - loc; 117 offset += sym->st_value - loc;
118 if (offset & 3 || 118 if (offset & 3 ||
119 offset <= (s32)0xfc000000 || 119 offset <= (s32)0xfe000000 ||
120 offset >= (s32)0x04000000) { 120 offset >= (s32)0x02000000) {
121 printk(KERN_ERR 121 printk(KERN_ERR
122 "%s: relocation out of range, section " 122 "%s: relocation out of range, section "
123 "%d reloc %d sym '%s'\n", module->name, 123 "%d reloc %d sym '%s'\n", module->name,
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 070bcb7a6306..1b76d87fa335 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -486,7 +486,7 @@ static void ipi_timer(void)
486} 486}
487 487
488#ifdef CONFIG_LOCAL_TIMERS 488#ifdef CONFIG_LOCAL_TIMERS
489asmlinkage void do_local_timer(struct pt_regs *regs) 489asmlinkage void __exception do_local_timer(struct pt_regs *regs)
490{ 490{
491 struct pt_regs *old_regs = set_irq_regs(regs); 491 struct pt_regs *old_regs = set_irq_regs(regs);
492 int cpu = smp_processor_id(); 492 int cpu = smp_processor_id();
@@ -551,7 +551,7 @@ static void ipi_cpu_stop(unsigned int cpu)
551 * 551 *
552 * Bit 0 - Inter-processor function call 552 * Bit 0 - Inter-processor function call
553 */ 553 */
554asmlinkage void do_IPI(struct pt_regs *regs) 554asmlinkage void __exception do_IPI(struct pt_regs *regs)
555{ 555{
556 unsigned int cpu = smp_processor_id(); 556 unsigned int cpu = smp_processor_id();
557 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 557 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 6be67296f333..e4156e7868ce 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -23,11 +23,15 @@ SECTIONS
23#else 23#else
24 . = PAGE_OFFSET + TEXT_OFFSET; 24 . = PAGE_OFFSET + TEXT_OFFSET;
25#endif 25#endif
26 .init : { /* Init code and data */ 26 .text.head : {
27 _stext = .; 27 _stext = .;
28 _sinittext = .; 28 _sinittext = .;
29 *(.text.head)
30 }
31
32 .init : { /* Init code and data */
29 *(.init.text) 33 *(.init.text)
30 _einittext = .; 34 _einittext = .;
31 __proc_info_begin = .; 35 __proc_info_begin = .;
32 *(.proc.info.init) 36 *(.proc.info.init)
33 __proc_info_end = .; 37 __proc_info_end = .;
@@ -119,7 +123,7 @@ SECTIONS
119 * first, the init task union, aligned 123 * first, the init task union, aligned
120 * to an 8192 byte boundary. 124 * to an 8192 byte boundary.
121 */ 125 */
122 *(.init.task) 126 *(.data.init_task)
123 127
124#ifdef CONFIG_XIP_KERNEL 128#ifdef CONFIG_XIP_KERNEL
125 . = ALIGN(4096); 129 . = ALIGN(4096);
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index e238ad8cfd8f..018d637f87fc 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -107,7 +107,7 @@ config ARCH_AT91SAM9260_SAM9XE
107 depends on ARCH_AT91SAM9260 107 depends on ARCH_AT91SAM9260
108 help 108 help
109 Select this if you are using Atmel's AT91SAM9XE System-on-Chip. 109 Select this if you are using Atmel's AT91SAM9XE System-on-Chip.
110 They are basicaly AT91SAM9260s with various sizes of embedded Flash. 110 They are basically AT91SAM9260s with various sizes of embedded Flash.
111 111
112comment "AT91SAM9260 / AT91SAM9XE Board Type" 112comment "AT91SAM9260 / AT91SAM9XE Board Type"
113 113
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index 8781aaeb576b..856c681ebbbc 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -22,6 +22,7 @@ comment "OMAP Board Type"
22config MACH_OMAP_INNOVATOR 22config MACH_OMAP_INNOVATOR
23 bool "TI Innovator" 23 bool "TI Innovator"
24 depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX) 24 depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX)
25 select OMAP_MCBSP
25 help 26 help
26 TI OMAP 1510 or 1610 Innovator board support. Say Y here if you 27 TI OMAP 1510 or 1610 Innovator board support. Say Y here if you
27 have such a board. 28 have such a board.
@@ -29,6 +30,7 @@ config MACH_OMAP_INNOVATOR
29config MACH_OMAP_H2 30config MACH_OMAP_H2
30 bool "TI H2 Support" 31 bool "TI H2 Support"
31 depends on ARCH_OMAP1 && ARCH_OMAP16XX 32 depends on ARCH_OMAP1 && ARCH_OMAP16XX
33 select OMAP_MCBSP
32 help 34 help
33 TI OMAP 1610/1611B H2 board support. Say Y here if you have such 35 TI OMAP 1610/1611B H2 board support. Say Y here if you have such
34 a board. 36 a board.
@@ -36,6 +38,7 @@ config MACH_OMAP_H2
36config MACH_OMAP_H3 38config MACH_OMAP_H3
37 bool "TI H3 Support" 39 bool "TI H3 Support"
38 depends on ARCH_OMAP1 && ARCH_OMAP16XX 40 depends on ARCH_OMAP1 && ARCH_OMAP16XX
41 select GPIOEXPANDER_OMAP
39 help 42 help
40 TI OMAP 1710 H3 board support. Say Y here if you have such 43 TI OMAP 1710 H3 board support. Say Y here if you have such
41 a board. 44 a board.
@@ -43,7 +46,7 @@ config MACH_OMAP_H3
43config MACH_OMAP_OSK 46config MACH_OMAP_OSK
44 bool "TI OSK Support" 47 bool "TI OSK Support"
45 depends on ARCH_OMAP1 && ARCH_OMAP16XX 48 depends on ARCH_OMAP1 && ARCH_OMAP16XX
46 select TPS65010 49 select OMAP_MCBSP
47 help 50 help
48 TI OMAP 5912 OSK (OMAP Starter Kit) board support. Say Y here 51 TI OMAP 5912 OSK (OMAP Starter Kit) board support. Say Y here
49 if you have such a board. 52 if you have such a board.
@@ -84,7 +87,7 @@ config MACH_OMAP_PALMTE
84 Support for the Palm Tungsten E PDA. Currently only the LCD panel 87 Support for the Palm Tungsten E PDA. Currently only the LCD panel
85 is supported. To boot the kernel, you'll need a PalmOS compatible 88 is supported. To boot the kernel, you'll need a PalmOS compatible
86 bootloader; check out http://palmtelinux.sourceforge.net for more 89 bootloader; check out http://palmtelinux.sourceforge.net for more
87 informations. 90 information.
88 Say Y here if you have such a PDA, say NO otherwise. 91 Say Y here if you have such a PDA, say NO otherwise.
89 92
90config MACH_NOKIA770 93config MACH_NOKIA770
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index 7165f74f78da..a8b9a00cea22 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -37,4 +37,3 @@ led-$(CONFIG_MACH_OMAP_INNOVATOR) += leds-innovator.o
37led-$(CONFIG_MACH_OMAP_PERSEUS2) += leds-h2p2-debug.o 37led-$(CONFIG_MACH_OMAP_PERSEUS2) += leds-h2p2-debug.o
38led-$(CONFIG_MACH_OMAP_OSK) += leds-osk.o 38led-$(CONFIG_MACH_OMAP_OSK) += leds-osk.o
39obj-$(CONFIG_LEDS) += $(led-y) 39obj-$(CONFIG_LEDS) += $(led-y)
40
diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c
index 62e42c7a628e..f65baa95986e 100644
--- a/arch/arm/mach-omap1/board-fsample.c
+++ b/arch/arm/mach-omap1/board-fsample.c
@@ -246,7 +246,7 @@ static void __init fsample_init_smc91x(void)
246 mdelay(50); 246 mdelay(50);
247} 247}
248 248
249void omap_fsample_init_irq(void) 249static void __init omap_fsample_init_irq(void)
250{ 250{
251 omap1_init_common_hw(); 251 omap1_init_common_hw();
252 omap_init_irq(); 252 omap_init_irq();
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index 9d2346fb68f4..7b260b7c537b 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -455,7 +455,7 @@ static void __init h3_init_smc91x(void)
455 } 455 }
456} 456}
457 457
458void h3_init_irq(void) 458static void __init h3_init_irq(void)
459{ 459{
460 omap1_init_common_hw(); 460 omap1_init_common_hw();
461 omap_init_irq(); 461 omap_init_irq();
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index cb00530ad279..7e63a41e37c6 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -308,7 +308,7 @@ static void __init innovator_init_smc91x(void)
308 } 308 }
309} 309}
310 310
311void innovator_init_irq(void) 311static void __init innovator_init_irq(void)
312{ 312{
313 omap1_init_common_hw(); 313 omap1_init_common_hw();
314 omap_init_irq(); 314 omap_init_irq();
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index fa4be962df67..1d5c8d509722 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -246,7 +246,7 @@ static void __init perseus2_init_smc91x(void)
246 mdelay(50); 246 mdelay(50);
247} 247}
248 248
249void omap_perseus2_init_irq(void) 249static void __init omap_perseus2_init_irq(void)
250{ 250{
251 omap1_init_common_hw(); 251 omap1_init_common_hw();
252 omap_init_irq(); 252 omap_init_irq();
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index 6dcd10ab4496..da8a3ac47e13 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -24,35 +24,6 @@
24#include <asm/arch/mux.h> 24#include <asm/arch/mux.h>
25#include <asm/arch/gpio.h> 25#include <asm/arch/gpio.h>
26 26
27#if defined(CONFIG_OMAP1610_IR) || defined(CONFIG_OMAP161O_IR_MODULE)
28
29static u64 irda_dmamask = 0xffffffff;
30
31static struct platform_device omap1610ir_device = {
32 .name = "omap1610-ir",
33 .id = -1,
34 .dev = {
35 .dma_mask = &irda_dmamask,
36 },
37};
38
39static void omap_init_irda(void)
40{
41 /* FIXME define and use a boot tag, members something like:
42 * u8 uart; // uart1, or uart3
43 * ... but driver only handles uart3 for now
44 * s16 fir_sel; // gpio for SIR vs FIR
45 * ... may prefer a callback for SIR/MIR/FIR mode select;
46 * while h2 uses a GPIO, H3 uses a gpio expander
47 */
48 if (machine_is_omap_h2()
49 || machine_is_omap_h3())
50 (void) platform_device_register(&omap1610ir_device);
51}
52#else
53static inline void omap_init_irda(void) {}
54#endif
55
56/*-------------------------------------------------------------------------*/ 27/*-------------------------------------------------------------------------*/
57 28
58#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE) 29#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
@@ -90,6 +61,45 @@ static void omap_init_rtc(void)
90static inline void omap_init_rtc(void) {} 61static inline void omap_init_rtc(void) {}
91#endif 62#endif
92 63
64#if defined(CONFIG_OMAP_DSP) || defined(CONFIG_OMAP_DSP_MODULE)
65
66#if defined(CONFIG_ARCH_OMAP15XX)
67# define OMAP1_MBOX_SIZE 0x23
68# define INT_DSP_MAILBOX1 INT_1510_DSP_MAILBOX1
69#elif defined(CONFIG_ARCH_OMAP16XX)
70# define OMAP1_MBOX_SIZE 0x2f
71# define INT_DSP_MAILBOX1 INT_1610_DSP_MAILBOX1
72#endif
73
74#define OMAP1_MBOX_BASE IO_ADDRESS(OMAP16XX_MAILBOX_BASE)
75
76static struct resource mbox_resources[] = {
77 {
78 .start = OMAP1_MBOX_BASE,
79 .end = OMAP1_MBOX_BASE + OMAP1_MBOX_SIZE,
80 .flags = IORESOURCE_MEM,
81 },
82 {
83 .start = INT_DSP_MAILBOX1,
84 .flags = IORESOURCE_IRQ,
85 },
86};
87
88static struct platform_device mbox_device = {
89 .name = "mailbox",
90 .id = -1,
91 .num_resources = ARRAY_SIZE(mbox_resources),
92 .resource = mbox_resources,
93};
94
95static inline void omap_init_mbox(void)
96{
97 platform_device_register(&mbox_device);
98}
99#else
100static inline void omap_init_mbox(void) { }
101#endif
102
93#if defined(CONFIG_OMAP_STI) 103#if defined(CONFIG_OMAP_STI)
94 104
95#define OMAP1_STI_BASE IO_ADDRESS(0xfffea000) 105#define OMAP1_STI_BASE IO_ADDRESS(0xfffea000)
@@ -154,7 +164,8 @@ static int __init omap1_init_devices(void)
154 /* please keep these calls, and their implementations above, 164 /* please keep these calls, and their implementations above,
155 * in alphabetical order so they're easier to sort through. 165 * in alphabetical order so they're easier to sort through.
156 */ 166 */
157 omap_init_irda(); 167
168 omap_init_mbox();
158 omap_init_rtc(); 169 omap_init_rtc();
159 omap_init_sti(); 170 omap_init_sti();
160 171
diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
index fab8b0b27cfb..81c4e738506c 100644
--- a/arch/arm/mach-omap1/io.c
+++ b/arch/arm/mach-omap1/io.c
@@ -17,11 +17,11 @@
17#include <asm/io.h> 17#include <asm/io.h>
18#include <asm/arch/mux.h> 18#include <asm/arch/mux.h>
19#include <asm/arch/tc.h> 19#include <asm/arch/tc.h>
20#include <asm/arch/omapfb.h>
21 20
22extern int omap1_clk_init(void); 21extern int omap1_clk_init(void);
23extern void omap_check_revision(void); 22extern void omap_check_revision(void);
24extern void omap_sram_init(void); 23extern void omap_sram_init(void);
24extern void omapfb_reserve_sdram(void);
25 25
26/* 26/*
27 * The machine specific code may provide the extra mapping besides the 27 * The machine specific code may provide the extra mapping besides the
@@ -121,7 +121,7 @@ void __init omap1_map_common_io(void)
121#endif 121#endif
122 122
123 omap_sram_init(); 123 omap_sram_init();
124 omapfb_reserve_mem(); 124 omapfb_reserve_sdram();
125} 125}
126 126
127/* 127/*
diff --git a/arch/arm/mach-omap1/mailbox.c b/arch/arm/mach-omap1/mailbox.c
new file mode 100644
index 000000000000..d3abf5609902
--- /dev/null
+++ b/arch/arm/mach-omap1/mailbox.c
@@ -0,0 +1,206 @@
1/*
2 * Mailbox reservation modules for DSP
3 *
4 * Copyright (C) 2006 Nokia Corporation
5 * Written by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11
12#include <linux/kernel.h>
13#include <linux/resource.h>
14#include <linux/interrupt.h>
15#include <linux/platform_device.h>
16#include <asm/arch/mailbox.h>
17#include <asm/arch/irqs.h>
18#include <asm/io.h>
19
20#define MAILBOX_ARM2DSP1 0x00
21#define MAILBOX_ARM2DSP1b 0x04
22#define MAILBOX_DSP2ARM1 0x08
23#define MAILBOX_DSP2ARM1b 0x0c
24#define MAILBOX_DSP2ARM2 0x10
25#define MAILBOX_DSP2ARM2b 0x14
26#define MAILBOX_ARM2DSP1_Flag 0x18
27#define MAILBOX_DSP2ARM1_Flag 0x1c
28#define MAILBOX_DSP2ARM2_Flag 0x20
29
30unsigned long mbox_base;
31
32struct omap_mbox1_fifo {
33 unsigned long cmd;
34 unsigned long data;
35 unsigned long flag;
36};
37
38struct omap_mbox1_priv {
39 struct omap_mbox1_fifo tx_fifo;
40 struct omap_mbox1_fifo rx_fifo;
41};
42
43static inline int mbox_read_reg(unsigned int reg)
44{
45 return __raw_readw(mbox_base + reg);
46}
47
48static inline void mbox_write_reg(unsigned int val, unsigned int reg)
49{
50 __raw_writew(val, mbox_base + reg);
51}
52
53/* msg */
54static inline mbox_msg_t omap1_mbox_fifo_read(struct omap_mbox *mbox)
55{
56 struct omap_mbox1_fifo *fifo =
57 &((struct omap_mbox1_priv *)mbox->priv)->rx_fifo;
58 mbox_msg_t msg;
59
60 msg = mbox_read_reg(fifo->data);
61 msg |= ((mbox_msg_t) mbox_read_reg(fifo->cmd)) << 16;
62
63 return msg;
64}
65
66static inline void
67omap1_mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
68{
69 struct omap_mbox1_fifo *fifo =
70 &((struct omap_mbox1_priv *)mbox->priv)->tx_fifo;
71
72 mbox_write_reg(msg & 0xffff, fifo->data);
73 mbox_write_reg(msg >> 16, fifo->cmd);
74}
75
76static inline int omap1_mbox_fifo_empty(struct omap_mbox *mbox)
77{
78 return 0;
79}
80
81static inline int omap1_mbox_fifo_full(struct omap_mbox *mbox)
82{
83 struct omap_mbox1_fifo *fifo =
84 &((struct omap_mbox1_priv *)mbox->priv)->rx_fifo;
85
86 return (mbox_read_reg(fifo->flag));
87}
88
89/* irq */
90static inline void
91omap1_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_type_t irq)
92{
93 if (irq == IRQ_RX)
94 enable_irq(mbox->irq);
95}
96
97static inline void
98omap1_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_type_t irq)
99{
100 if (irq == IRQ_RX)
101 disable_irq(mbox->irq);
102}
103
104static inline int
105omap1_mbox_is_irq(struct omap_mbox *mbox, omap_mbox_type_t irq)
106{
107 if (irq == IRQ_TX)
108 return 0;
109 return 1;
110}
111
112static struct omap_mbox_ops omap1_mbox_ops = {
113 .type = OMAP_MBOX_TYPE1,
114 .fifo_read = omap1_mbox_fifo_read,
115 .fifo_write = omap1_mbox_fifo_write,
116 .fifo_empty = omap1_mbox_fifo_empty,
117 .fifo_full = omap1_mbox_fifo_full,
118 .enable_irq = omap1_mbox_enable_irq,
119 .disable_irq = omap1_mbox_disable_irq,
120 .is_irq = omap1_mbox_is_irq,
121};
122
123/* FIXME: the following struct should be created automatically by the user id */
124
125/* DSP */
126static struct omap_mbox1_priv omap1_mbox_dsp_priv = {
127 .tx_fifo = {
128 .cmd = MAILBOX_ARM2DSP1b,
129 .data = MAILBOX_ARM2DSP1,
130 .flag = MAILBOX_ARM2DSP1_Flag,
131 },
132 .rx_fifo = {
133 .cmd = MAILBOX_DSP2ARM1b,
134 .data = MAILBOX_DSP2ARM1,
135 .flag = MAILBOX_DSP2ARM1_Flag,
136 },
137};
138
139struct omap_mbox mbox_dsp_info = {
140 .name = "dsp",
141 .ops = &omap1_mbox_ops,
142 .priv = &omap1_mbox_dsp_priv,
143};
144EXPORT_SYMBOL(mbox_dsp_info);
145
146static int __init omap1_mbox_probe(struct platform_device *pdev)
147{
148 struct resource *res;
149 int ret = 0;
150
151 if (pdev->num_resources != 2) {
152 dev_err(&pdev->dev, "invalid number of resources: %d\n",
153 pdev->num_resources);
154 return -ENODEV;
155 }
156
157 /* MBOX base */
158 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
159 if (unlikely(!res)) {
160 dev_err(&pdev->dev, "invalid mem resource\n");
161 return -ENODEV;
162 }
163 mbox_base = res->start;
164
165 /* DSP IRQ */
166 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
167 if (unlikely(!res)) {
168 dev_err(&pdev->dev, "invalid irq resource\n");
169 return -ENODEV;
170 }
171 mbox_dsp_info.irq = res->start;
172
173 ret = omap_mbox_register(&mbox_dsp_info);
174
175 return ret;
176}
177
178static int omap1_mbox_remove(struct platform_device *pdev)
179{
180 omap_mbox_unregister(&mbox_dsp_info);
181
182 return 0;
183}
184
185static struct platform_driver omap1_mbox_driver = {
186 .probe = omap1_mbox_probe,
187 .remove = omap1_mbox_remove,
188 .driver = {
189 .name = "mailbox",
190 },
191};
192
193static int __init omap1_mbox_init(void)
194{
195 return platform_driver_register(&omap1_mbox_driver);
196}
197
198static void __exit omap1_mbox_exit(void)
199{
200 platform_driver_unregister(&omap1_mbox_driver);
201}
202
203module_init(omap1_mbox_init);
204module_exit(omap1_mbox_exit);
205
206MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index aab97ccf1e63..7393109f5c30 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -9,6 +9,7 @@ config ARCH_OMAP2420
9 bool "OMAP2420 support" 9 bool "OMAP2420 support"
10 depends on ARCH_OMAP24XX 10 depends on ARCH_OMAP24XX
11 select OMAP_DM_TIMER 11 select OMAP_DM_TIMER
12 select ARCH_OMAP_OTG
12 13
13comment "OMAP Board Type" 14comment "OMAP Board Type"
14 depends on ARCH_OMAP2 15 depends on ARCH_OMAP2
@@ -20,6 +21,7 @@ config MACH_OMAP_GENERIC
20config MACH_OMAP_H4 21config MACH_OMAP_H4
21 bool "OMAP 2420 H4 board" 22 bool "OMAP 2420 H4 board"
22 depends on ARCH_OMAP2 && ARCH_OMAP24XX 23 depends on ARCH_OMAP2 && ARCH_OMAP24XX
24 select OMAP_DEBUG_LEDS if LEDS || LEDS_OMAP_DEBUG
23 25
24config MACH_OMAP_APOLLON 26config MACH_OMAP_APOLLON
25 bool "OMAP 2420 Apollon board" 27 bool "OMAP 2420 Apollon board"
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 1e7ed6d22ca9..452193f01531 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -266,12 +266,26 @@ static struct platform_device h4_lcd_device = {
266 .id = -1, 266 .id = -1,
267}; 267};
268 268
269static struct resource h4_led_resources[] = {
270 [0] = {
271 .flags = IORESOURCE_MEM,
272 },
273};
274
275static struct platform_device h4_led_device = {
276 .name = "omap_dbg_led",
277 .id = -1,
278 .num_resources = ARRAY_SIZE(h4_led_resources),
279 .resource = h4_led_resources,
280};
281
269static struct platform_device *h4_devices[] __initdata = { 282static struct platform_device *h4_devices[] __initdata = {
270 &h4_smc91x_device, 283 &h4_smc91x_device,
271 &h4_flash_device, 284 &h4_flash_device,
272 &h4_irda_device, 285 &h4_irda_device,
273 &h4_kp_device, 286 &h4_kp_device,
274 &h4_lcd_device, 287 &h4_lcd_device,
288 &h4_led_device,
275}; 289};
276 290
277static inline void __init h4_init_smc91x(void) 291static inline void __init h4_init_smc91x(void)
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index aa4322451e8b..52ec2f2d6360 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -24,7 +24,7 @@
24#include <asm/arch/mux.h> 24#include <asm/arch/mux.h>
25#include <asm/arch/gpio.h> 25#include <asm/arch/gpio.h>
26 26
27#if defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE) 27#if defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE)
28 28
29#define OMAP2_I2C_BASE2 0x48072000 29#define OMAP2_I2C_BASE2 0x48072000
30#define OMAP2_I2C_INT2 57 30#define OMAP2_I2C_INT2 57
@@ -42,8 +42,8 @@ static struct resource i2c_resources2[] = {
42}; 42};
43 43
44static struct platform_device omap_i2c_device2 = { 44static struct platform_device omap_i2c_device2 = {
45 .name = "i2c_omap", 45 .name = "i2c_omap",
46 .id = 2, 46 .id = 2,
47 .num_resources = ARRAY_SIZE(i2c_resources2), 47 .num_resources = ARRAY_SIZE(i2c_resources2),
48 .resource = i2c_resources2, 48 .resource = i2c_resources2,
49}; 49};
@@ -66,6 +66,40 @@ static void omap_init_i2c(void) {}
66 66
67#endif 67#endif
68 68
69#if defined(CONFIG_OMAP_DSP) || defined(CONFIG_OMAP_DSP_MODULE)
70#define OMAP2_MBOX_BASE IO_ADDRESS(OMAP24XX_MAILBOX_BASE)
71
72static struct resource mbox_resources[] = {
73 {
74 .start = OMAP2_MBOX_BASE,
75 .end = OMAP2_MBOX_BASE + 0x11f,
76 .flags = IORESOURCE_MEM,
77 },
78 {
79 .start = INT_24XX_MAIL_U0_MPU,
80 .flags = IORESOURCE_IRQ,
81 },
82 {
83 .start = INT_24XX_MAIL_U3_MPU,
84 .flags = IORESOURCE_IRQ,
85 },
86};
87
88static struct platform_device mbox_device = {
89 .name = "mailbox",
90 .id = -1,
91 .num_resources = ARRAY_SIZE(mbox_resources),
92 .resource = mbox_resources,
93};
94
95static inline void omap_init_mbox(void)
96{
97 platform_device_register(&mbox_device);
98}
99#else
100static inline void omap_init_mbox(void) { }
101#endif
102
69#if defined(CONFIG_OMAP_STI) 103#if defined(CONFIG_OMAP_STI)
70 104
71#define OMAP2_STI_BASE IO_ADDRESS(0x48068000) 105#define OMAP2_STI_BASE IO_ADDRESS(0x48068000)
@@ -111,29 +145,45 @@ static inline void omap_init_sti(void) {}
111#define OMAP2_MCSPI1_BASE 0x48098000 145#define OMAP2_MCSPI1_BASE 0x48098000
112#define OMAP2_MCSPI2_BASE 0x4809a000 146#define OMAP2_MCSPI2_BASE 0x4809a000
113 147
114/* FIXME: use resources instead */
115
116static struct omap2_mcspi_platform_config omap2_mcspi1_config = { 148static struct omap2_mcspi_platform_config omap2_mcspi1_config = {
117 .base = io_p2v(OMAP2_MCSPI1_BASE),
118 .num_cs = 4, 149 .num_cs = 4,
119}; 150};
120 151
152static struct resource omap2_mcspi1_resources[] = {
153 {
154 .start = OMAP2_MCSPI1_BASE,
155 .end = OMAP2_MCSPI1_BASE + 0xff,
156 .flags = IORESOURCE_MEM,
157 },
158};
159
121struct platform_device omap2_mcspi1 = { 160struct platform_device omap2_mcspi1 = {
122 .name = "omap2_mcspi", 161 .name = "omap2_mcspi",
123 .id = 1, 162 .id = 1,
163 .num_resources = ARRAY_SIZE(omap2_mcspi1_resources),
164 .resource = omap2_mcspi1_resources,
124 .dev = { 165 .dev = {
125 .platform_data = &omap2_mcspi1_config, 166 .platform_data = &omap2_mcspi1_config,
126 }, 167 },
127}; 168};
128 169
129static struct omap2_mcspi_platform_config omap2_mcspi2_config = { 170static struct omap2_mcspi_platform_config omap2_mcspi2_config = {
130 .base = io_p2v(OMAP2_MCSPI2_BASE),
131 .num_cs = 2, 171 .num_cs = 2,
132}; 172};
133 173
174static struct resource omap2_mcspi2_resources[] = {
175 {
176 .start = OMAP2_MCSPI2_BASE,
177 .end = OMAP2_MCSPI2_BASE + 0xff,
178 .flags = IORESOURCE_MEM,
179 },
180};
181
134struct platform_device omap2_mcspi2 = { 182struct platform_device omap2_mcspi2 = {
135 .name = "omap2_mcspi", 183 .name = "omap2_mcspi",
136 .id = 2, 184 .id = 2,
185 .num_resources = ARRAY_SIZE(omap2_mcspi2_resources),
186 .resource = omap2_mcspi2_resources,
137 .dev = { 187 .dev = {
138 .platform_data = &omap2_mcspi2_config, 188 .platform_data = &omap2_mcspi2_config,
139 }, 189 },
@@ -157,10 +207,10 @@ static int __init omap2_init_devices(void)
157 * in alphabetical order so they're easier to sort through. 207 * in alphabetical order so they're easier to sort through.
158 */ 208 */
159 omap_init_i2c(); 209 omap_init_i2c();
210 omap_init_mbox();
160 omap_init_mcspi(); 211 omap_init_mcspi();
161 omap_init_sti(); 212 omap_init_sti();
162 213
163 return 0; 214 return 0;
164} 215}
165arch_initcall(omap2_init_devices); 216arch_initcall(omap2_init_devices);
166
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index d8f57824423f..54c836a98456 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -246,14 +246,22 @@ static int gpmc_cs_mem_enabled(int cs)
246 return l & (1 << 6); 246 return l & (1 << 6);
247} 247}
248 248
249static void gpmc_cs_set_reserved(int cs, int reserved) 249int gpmc_cs_set_reserved(int cs, int reserved)
250{ 250{
251 if (cs > GPMC_CS_NUM)
252 return -ENODEV;
253
251 gpmc_cs_map &= ~(1 << cs); 254 gpmc_cs_map &= ~(1 << cs);
252 gpmc_cs_map |= (reserved ? 1 : 0) << cs; 255 gpmc_cs_map |= (reserved ? 1 : 0) << cs;
256
257 return 0;
253} 258}
254 259
255static int gpmc_cs_reserved(int cs) 260int gpmc_cs_reserved(int cs)
256{ 261{
262 if (cs > GPMC_CS_NUM)
263 return -ENODEV;
264
257 return gpmc_cs_map & (1 << cs); 265 return gpmc_cs_map & (1 << cs);
258} 266}
259 267
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index a0728c33e5d9..82dc70f6b779 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -27,6 +27,7 @@ extern void omap_sram_init(void);
27extern int omap2_clk_init(void); 27extern int omap2_clk_init(void);
28extern void omap2_check_revision(void); 28extern void omap2_check_revision(void);
29extern void gpmc_init(void); 29extern void gpmc_init(void);
30extern void omapfb_reserve_sdram(void);
30 31
31/* 32/*
32 * The machine specific code may provide the extra mapping besides the 33 * The machine specific code may provide the extra mapping besides the
@@ -40,9 +41,21 @@ static struct map_desc omap2_io_desc[] __initdata = {
40 .type = MT_DEVICE 41 .type = MT_DEVICE
41 }, 42 },
42 { 43 {
43 .virtual = L4_24XX_VIRT, 44 .virtual = DSP_MEM_24XX_VIRT,
44 .pfn = __phys_to_pfn(L4_24XX_PHYS), 45 .pfn = __phys_to_pfn(DSP_MEM_24XX_PHYS),
45 .length = L4_24XX_SIZE, 46 .length = DSP_MEM_24XX_SIZE,
47 .type = MT_DEVICE
48 },
49 {
50 .virtual = DSP_IPI_24XX_VIRT,
51 .pfn = __phys_to_pfn(DSP_IPI_24XX_PHYS),
52 .length = DSP_IPI_24XX_SIZE,
53 .type = MT_DEVICE
54 },
55 {
56 .virtual = DSP_MMU_24XX_VIRT,
57 .pfn = __phys_to_pfn(DSP_MMU_24XX_PHYS),
58 .length = DSP_MMU_24XX_SIZE,
46 .type = MT_DEVICE 59 .type = MT_DEVICE
47 } 60 }
48}; 61};
@@ -60,7 +73,7 @@ void __init omap2_map_common_io(void)
60 73
61 omap2_check_revision(); 74 omap2_check_revision();
62 omap_sram_init(); 75 omap_sram_init();
63 omapfb_reserve_mem(); 76 omapfb_reserve_sdram();
64} 77}
65 78
66void __init omap2_init_common_hw(void) 79void __init omap2_init_common_hw(void)
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
new file mode 100644
index 000000000000..b03cd06e055b
--- /dev/null
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -0,0 +1,318 @@
1/*
2 * Mailbox reservation modules for OMAP2
3 *
4 * Copyright (C) 2006 Nokia Corporation
5 * Written by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
6 * and Paul Mundt <paul.mundt@nokia.com>
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/clk.h>
15#include <linux/err.h>
16#include <linux/platform_device.h>
17#include <asm/arch/mailbox.h>
18#include <asm/arch/irqs.h>
19#include <asm/io.h>
20
21#define MAILBOX_REVISION 0x00
22#define MAILBOX_SYSCONFIG 0x10
23#define MAILBOX_SYSSTATUS 0x14
24#define MAILBOX_MESSAGE_0 0x40
25#define MAILBOX_MESSAGE_1 0x44
26#define MAILBOX_MESSAGE_2 0x48
27#define MAILBOX_MESSAGE_3 0x4c
28#define MAILBOX_MESSAGE_4 0x50
29#define MAILBOX_MESSAGE_5 0x54
30#define MAILBOX_FIFOSTATUS_0 0x80
31#define MAILBOX_FIFOSTATUS_1 0x84
32#define MAILBOX_FIFOSTATUS_2 0x88
33#define MAILBOX_FIFOSTATUS_3 0x8c
34#define MAILBOX_FIFOSTATUS_4 0x90
35#define MAILBOX_FIFOSTATUS_5 0x94
36#define MAILBOX_MSGSTATUS_0 0xc0
37#define MAILBOX_MSGSTATUS_1 0xc4
38#define MAILBOX_MSGSTATUS_2 0xc8
39#define MAILBOX_MSGSTATUS_3 0xcc
40#define MAILBOX_MSGSTATUS_4 0xd0
41#define MAILBOX_MSGSTATUS_5 0xd4
42#define MAILBOX_IRQSTATUS_0 0x100
43#define MAILBOX_IRQENABLE_0 0x104
44#define MAILBOX_IRQSTATUS_1 0x108
45#define MAILBOX_IRQENABLE_1 0x10c
46#define MAILBOX_IRQSTATUS_2 0x110
47#define MAILBOX_IRQENABLE_2 0x114
48#define MAILBOX_IRQSTATUS_3 0x118
49#define MAILBOX_IRQENABLE_3 0x11c
50
51static unsigned long mbox_base;
52
53#define MAILBOX_IRQ_NOTFULL(n) (1 << (2 * (n) + 1))
54#define MAILBOX_IRQ_NEWMSG(n) (1 << (2 * (n)))
55
56struct omap_mbox2_fifo {
57 unsigned long msg;
58 unsigned long fifo_stat;
59 unsigned long msg_stat;
60};
61
62struct omap_mbox2_priv {
63 struct omap_mbox2_fifo tx_fifo;
64 struct omap_mbox2_fifo rx_fifo;
65 unsigned long irqenable;
66 unsigned long irqstatus;
67 u32 newmsg_bit;
68 u32 notfull_bit;
69};
70
71static struct clk *mbox_ick_handle;
72
73static inline unsigned int mbox_read_reg(unsigned int reg)
74{
75 return __raw_readl(mbox_base + reg);
76}
77
78static inline void mbox_write_reg(unsigned int val, unsigned int reg)
79{
80 __raw_writel(val, mbox_base + reg);
81}
82
83/* Mailbox H/W preparations */
84static inline int omap2_mbox_startup(struct omap_mbox *mbox)
85{
86 unsigned int l;
87
88 mbox_ick_handle = clk_get(NULL, "mailboxes_ick");
89 if (IS_ERR(mbox_ick_handle)) {
90 printk("Could not get mailboxes_ick\n");
91 return -ENODEV;
92 }
93 clk_enable(mbox_ick_handle);
94
95 /* set smart-idle & autoidle */
96 l = mbox_read_reg(MAILBOX_SYSCONFIG);
97 l |= 0x00000011;
98 mbox_write_reg(l, MAILBOX_SYSCONFIG);
99
100 return 0;
101}
102
103static inline void omap2_mbox_shutdown(struct omap_mbox *mbox)
104{
105 clk_disable(mbox_ick_handle);
106 clk_put(mbox_ick_handle);
107}
108
109/* Mailbox FIFO handle functions */
110static inline mbox_msg_t omap2_mbox_fifo_read(struct omap_mbox *mbox)
111{
112 struct omap_mbox2_fifo *fifo =
113 &((struct omap_mbox2_priv *)mbox->priv)->rx_fifo;
114 return (mbox_msg_t) mbox_read_reg(fifo->msg);
115}
116
117static inline void omap2_mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
118{
119 struct omap_mbox2_fifo *fifo =
120 &((struct omap_mbox2_priv *)mbox->priv)->tx_fifo;
121 mbox_write_reg(msg, fifo->msg);
122}
123
124static inline int omap2_mbox_fifo_empty(struct omap_mbox *mbox)
125{
126 struct omap_mbox2_fifo *fifo =
127 &((struct omap_mbox2_priv *)mbox->priv)->rx_fifo;
128 return (mbox_read_reg(fifo->msg_stat) == 0);
129}
130
131static inline int omap2_mbox_fifo_full(struct omap_mbox *mbox)
132{
133 struct omap_mbox2_fifo *fifo =
134 &((struct omap_mbox2_priv *)mbox->priv)->tx_fifo;
135 return (mbox_read_reg(fifo->fifo_stat));
136}
137
138/* Mailbox IRQ handle functions */
139static inline void omap2_mbox_enable_irq(struct omap_mbox *mbox,
140 omap_mbox_type_t irq)
141{
142 struct omap_mbox2_priv *p = (struct omap_mbox2_priv *)mbox->priv;
143 u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
144
145 l = mbox_read_reg(p->irqenable);
146 l |= bit;
147 mbox_write_reg(l, p->irqenable);
148}
149
150static inline void omap2_mbox_disable_irq(struct omap_mbox *mbox,
151 omap_mbox_type_t irq)
152{
153 struct omap_mbox2_priv *p = (struct omap_mbox2_priv *)mbox->priv;
154 u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
155
156 l = mbox_read_reg(p->irqenable);
157 l &= ~bit;
158 mbox_write_reg(l, p->irqenable);
159}
160
161static inline void omap2_mbox_ack_irq(struct omap_mbox *mbox,
162 omap_mbox_type_t irq)
163{
164 struct omap_mbox2_priv *p = (struct omap_mbox2_priv *)mbox->priv;
165 u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
166
167 mbox_write_reg(bit, p->irqstatus);
168}
169
170static inline int omap2_mbox_is_irq(struct omap_mbox *mbox,
171 omap_mbox_type_t irq)
172{
173 struct omap_mbox2_priv *p = (struct omap_mbox2_priv *)mbox->priv;
174 u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
175 u32 enable = mbox_read_reg(p->irqenable);
176 u32 status = mbox_read_reg(p->irqstatus);
177
178 return (enable & status & bit);
179}
180
181static struct omap_mbox_ops omap2_mbox_ops = {
182 .type = OMAP_MBOX_TYPE2,
183 .startup = omap2_mbox_startup,
184 .shutdown = omap2_mbox_shutdown,
185 .fifo_read = omap2_mbox_fifo_read,
186 .fifo_write = omap2_mbox_fifo_write,
187 .fifo_empty = omap2_mbox_fifo_empty,
188 .fifo_full = omap2_mbox_fifo_full,
189 .enable_irq = omap2_mbox_enable_irq,
190 .disable_irq = omap2_mbox_disable_irq,
191 .ack_irq = omap2_mbox_ack_irq,
192 .is_irq = omap2_mbox_is_irq,
193};
194
195/*
196 * MAILBOX 0: ARM -> DSP,
197 * MAILBOX 1: ARM <- DSP.
198 * MAILBOX 2: ARM -> IVA,
199 * MAILBOX 3: ARM <- IVA.
200 */
201
202/* FIXME: the following structs should be filled automatically by the user id */
203
204/* DSP */
205static struct omap_mbox2_priv omap2_mbox_dsp_priv = {
206 .tx_fifo = {
207 .msg = MAILBOX_MESSAGE_0,
208 .fifo_stat = MAILBOX_FIFOSTATUS_0,
209 },
210 .rx_fifo = {
211 .msg = MAILBOX_MESSAGE_1,
212 .msg_stat = MAILBOX_MSGSTATUS_1,
213 },
214 .irqenable = MAILBOX_IRQENABLE_0,
215 .irqstatus = MAILBOX_IRQSTATUS_0,
216 .notfull_bit = MAILBOX_IRQ_NOTFULL(0),
217 .newmsg_bit = MAILBOX_IRQ_NEWMSG(1),
218};
219
220struct omap_mbox mbox_dsp_info = {
221 .name = "dsp",
222 .ops = &omap2_mbox_ops,
223 .priv = &omap2_mbox_dsp_priv,
224};
225EXPORT_SYMBOL(mbox_dsp_info);
226
227/* IVA */
228static struct omap_mbox2_priv omap2_mbox_iva_priv = {
229 .tx_fifo = {
230 .msg = MAILBOX_MESSAGE_2,
231 .fifo_stat = MAILBOX_FIFOSTATUS_2,
232 },
233 .rx_fifo = {
234 .msg = MAILBOX_MESSAGE_3,
235 .msg_stat = MAILBOX_MSGSTATUS_3,
236 },
237 .irqenable = MAILBOX_IRQENABLE_3,
238 .irqstatus = MAILBOX_IRQSTATUS_3,
239 .notfull_bit = MAILBOX_IRQ_NOTFULL(2),
240 .newmsg_bit = MAILBOX_IRQ_NEWMSG(3),
241};
242
243static struct omap_mbox mbox_iva_info = {
244 .name = "iva",
245 .ops = &omap2_mbox_ops,
246 .priv = &omap2_mbox_iva_priv,
247};
248
249static int __init omap2_mbox_probe(struct platform_device *pdev)
250{
251 struct resource *res;
252 int ret = 0;
253
254 if (pdev->num_resources != 3) {
255 dev_err(&pdev->dev, "invalid number of resources: %d\n",
256 pdev->num_resources);
257 return -ENODEV;
258 }
259
260 /* MBOX base */
261 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
262 if (unlikely(!res)) {
263 dev_err(&pdev->dev, "invalid mem resource\n");
264 return -ENODEV;
265 }
266 mbox_base = res->start;
267
268 /* DSP IRQ */
269 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
270 if (unlikely(!res)) {
271 dev_err(&pdev->dev, "invalid irq resource\n");
272 return -ENODEV;
273 }
274 mbox_dsp_info.irq = res->start;
275
276 ret = omap_mbox_register(&mbox_dsp_info);
277
278 /* IVA IRQ */
279 res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
280 if (unlikely(!res)) {
281 dev_err(&pdev->dev, "invalid irq resource\n");
282 return -ENODEV;
283 }
284 mbox_iva_info.irq = res->start;
285
286 ret = omap_mbox_register(&mbox_iva_info);
287
288 return ret;
289}
290
291static int omap2_mbox_remove(struct platform_device *pdev)
292{
293 omap_mbox_unregister(&mbox_dsp_info);
294 return 0;
295}
296
297static struct platform_driver omap2_mbox_driver = {
298 .probe = omap2_mbox_probe,
299 .remove = omap2_mbox_remove,
300 .driver = {
301 .name = "mailbox",
302 },
303};
304
305static int __init omap2_mbox_init(void)
306{
307 return platform_driver_register(&omap2_mbox_driver);
308}
309
310static void __exit omap2_mbox_exit(void)
311{
312 platform_driver_unregister(&omap2_mbox_driver);
313}
314
315module_init(omap2_mbox_init);
316module_exit(omap2_mbox_exit);
317
318MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-s3c2410/sleep.S b/arch/arm/mach-s3c2410/sleep.S
index 637aaba65390..d1eeed2ad47c 100644
--- a/arch/arm/mach-s3c2410/sleep.S
+++ b/arch/arm/mach-s3c2410/sleep.S
@@ -1,4 +1,4 @@
1/* linux/arch/arm/mach-s3c2410/s3c2410-sleep.S 1/* linux/arch/arm/mach-s3c2410/sleep.S
2 * 2 *
3 * Copyright (c) 2004 Simtec Electronics 3 * Copyright (c) 2004 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk> 4 * Ben Dooks <ben@simtec.co.uk>
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index e684e9b38216..b81391a4e374 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -366,6 +366,19 @@ config CPU_32v6K
366 enabled will not boot on processors with do not support these 366 enabled will not boot on processors with do not support these
367 instructions. 367 instructions.
368 368
369# ARMv7
370config CPU_V7
371 bool "Support ARM V7 processor"
372 depends on ARCH_INTEGRATOR
373 select CPU_32v6K
374 select CPU_32v7
375 select CPU_ABRT_EV7
376 select CPU_CACHE_V7
377 select CPU_CACHE_VIPT
378 select CPU_CP15_MMU
379 select CPU_COPY_V6 if MMU
380 select CPU_TLB_V6 if MMU
381
369# Figure out what processor architecture version we should be using. 382# Figure out what processor architecture version we should be using.
370# This defines the compiler instruction set which depends on the machine type. 383# This defines the compiler instruction set which depends on the machine type.
371config CPU_32v3 384config CPU_32v3
@@ -391,6 +404,9 @@ config CPU_32v5
391config CPU_32v6 404config CPU_32v6
392 bool 405 bool
393 406
407config CPU_32v7
408 bool
409
394# The abort model 410# The abort model
395config CPU_ABRT_NOMMU 411config CPU_ABRT_NOMMU
396 bool 412 bool
@@ -413,6 +429,9 @@ config CPU_ABRT_EV5TJ
413config CPU_ABRT_EV6 429config CPU_ABRT_EV6
414 bool 430 bool
415 431
432config CPU_ABRT_EV7
433 bool
434
416# The cache model 435# The cache model
417config CPU_CACHE_V3 436config CPU_CACHE_V3
418 bool 437 bool
@@ -429,6 +448,9 @@ config CPU_CACHE_V4WB
429config CPU_CACHE_V6 448config CPU_CACHE_V6
430 bool 449 bool
431 450
451config CPU_CACHE_V7
452 bool
453
432config CPU_CACHE_VIVT 454config CPU_CACHE_VIVT
433 bool 455 bool
434 456
@@ -503,7 +525,7 @@ comment "Processor Features"
503 525
504config ARM_THUMB 526config ARM_THUMB
505 bool "Support Thumb user binaries" 527 bool "Support Thumb user binaries"
506 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6 528 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6 || CPU_V7
507 default y 529 default y
508 help 530 help
509 Say Y if you want to include kernel support for running user space 531 Say Y if you want to include kernel support for running user space
@@ -578,9 +600,15 @@ config CPU_CACHE_ROUND_ROBIN
578 Say Y here to use the predictable round-robin cache replacement 600 Say Y here to use the predictable round-robin cache replacement
579 policy. Unless you specifically require this or are unsure, say N. 601 policy. Unless you specifically require this or are unsure, say N.
580 602
603config CPU_L2CACHE_DISABLE
604 bool "Disable level 2 cache"
605 depends on CPU_V7
606 help
607 Say Y here to disable the level 2 cache. If unsure, say N.
608
581config CPU_BPREDICT_DISABLE 609config CPU_BPREDICT_DISABLE
582 bool "Disable branch prediction" 610 bool "Disable branch prediction"
583 depends on CPU_ARM1020 || CPU_V6 || CPU_XSC3 611 depends on CPU_ARM1020 || CPU_V6 || CPU_XSC3 || CPU_V7
584 help 612 help
585 Say Y here to disable branch prediction. If unsure, say N. 613 Say Y here to disable branch prediction. If unsure, say N.
586 614
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 2f8b95947774..b5bd335ff14a 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -24,12 +24,14 @@ obj-$(CONFIG_CPU_ABRT_LV4T) += abort-lv4t.o
24obj-$(CONFIG_CPU_ABRT_EV5T) += abort-ev5t.o 24obj-$(CONFIG_CPU_ABRT_EV5T) += abort-ev5t.o
25obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o 25obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o
26obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o 26obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o
27obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o
27 28
28obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o 29obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
29obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o 30obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
30obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o 31obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
31obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o 32obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
32obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o 33obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o
34obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o
33 35
34obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o 36obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o
35obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o 37obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
@@ -66,5 +68,6 @@ obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
66obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o 68obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
67obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o 69obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o
68obj-$(CONFIG_CPU_V6) += proc-v6.o 70obj-$(CONFIG_CPU_V6) += proc-v6.o
71obj-$(CONFIG_CPU_V7) += proc-v7.o
69 72
70obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o 73obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
new file mode 100644
index 000000000000..eb90bce38e14
--- /dev/null
+++ b/arch/arm/mm/abort-ev7.S
@@ -0,0 +1,32 @@
1#include <linux/linkage.h>
2#include <asm/assembler.h>
3/*
4 * Function: v7_early_abort
5 *
6 * Params : r2 = address of aborted instruction
7 * : r3 = saved SPSR
8 *
9 * Returns : r0 = address of abort
10 * : r1 = FSR, bit 11 = write
11 * : r2-r8 = corrupted
12 * : r9 = preserved
13 * : sp = pointer to registers
14 *
15 * Purpose : obtain information about current aborted instruction.
16 */
17 .align 5
18ENTRY(v7_early_abort)
19 /*
20 * The effect of data aborts on on the exclusive access monitor are
21 * UNPREDICTABLE. Do a CLREX to clear the state
22 */
23 clrex
24
25 mrc p15, 0, r1, c5, c0, 0 @ get FSR
26 mrc p15, 0, r0, c6, c0, 0 @ get FAR
27
28 /*
29 * V6 code adjusts the returned DFSR.
30 * New designs should not need to patch up faults.
31 */
32 mov pc, lr
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
new file mode 100644
index 000000000000..35ffc4d95997
--- /dev/null
+++ b/arch/arm/mm/cache-v7.S
@@ -0,0 +1,253 @@
1/*
2 * linux/arch/arm/mm/cache-v7.S
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2005 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This is the "shell" of the ARMv7 processor support.
12 */
13#include <linux/linkage.h>
14#include <linux/init.h>
15#include <asm/assembler.h>
16
17#include "proc-macros.S"
18
19/*
20 * v7_flush_dcache_all()
21 *
22 * Flush the whole D-cache.
23 *
24 * Corrupted registers: r0-r5, r7, r9-r11
25 *
26 * - mm - mm_struct describing address space
27 */
28ENTRY(v7_flush_dcache_all)
29 mrc p15, 1, r0, c0, c0, 1 @ read clidr
30 ands r3, r0, #0x7000000 @ extract loc from clidr
31 mov r3, r3, lsr #23 @ left align loc bit field
32 beq finished @ if loc is 0, then no need to clean
33 mov r10, #0 @ start clean at cache level 0
34loop1:
35 add r2, r10, r10, lsr #1 @ work out 3x current cache level
36 mov r1, r0, lsr r2 @ extract cache type bits from clidr
37 and r1, r1, #7 @ mask of the bits for current cache only
38 cmp r1, #2 @ see what cache we have at this level
39 blt skip @ skip if no cache, or just i-cache
40 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
41 isb @ isb to sych the new cssr&csidr
42 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
43 and r2, r1, #7 @ extract the length of the cache lines
44 add r2, r2, #4 @ add 4 (line length offset)
45 ldr r4, =0x3ff
46 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
47 clz r5, r4 @ find bit position of way size increment
48 ldr r7, =0x7fff
49 ands r7, r7, r1, lsr #13 @ extract max number of the index size
50loop2:
51 mov r9, r4 @ create working copy of max way size
52loop3:
53 orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
54 orr r11, r11, r7, lsl r2 @ factor index number into r11
55 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
56 subs r9, r9, #1 @ decrement the way
57 bge loop3
58 subs r7, r7, #1 @ decrement the index
59 bge loop2
60skip:
61 add r10, r10, #2 @ increment cache number
62 cmp r3, r10
63 bgt loop1
64finished:
65 mov r10, #0 @ swith back to cache level 0
66 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
67 isb
68 mov pc, lr
69
70/*
71 * v7_flush_cache_all()
72 *
73 * Flush the entire cache system.
74 * The data cache flush is now achieved using atomic clean / invalidates
75 * working outwards from L1 cache. This is done using Set/Way based cache
76 * maintainance instructions.
77 * The instruction cache can still be invalidated back to the point of
78 * unification in a single instruction.
79 *
80 */
81ENTRY(v7_flush_kern_cache_all)
82 stmfd sp!, {r4-r5, r7, r9-r11, lr}
83 bl v7_flush_dcache_all
84 mov r0, #0
85 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
86 ldmfd sp!, {r4-r5, r7, r9-r11, lr}
87 mov pc, lr
88
89/*
90 * v7_flush_cache_all()
91 *
92 * Flush all TLB entries in a particular address space
93 *
94 * - mm - mm_struct describing address space
95 */
96ENTRY(v7_flush_user_cache_all)
97 /*FALLTHROUGH*/
98
99/*
100 * v7_flush_cache_range(start, end, flags)
101 *
102 * Flush a range of TLB entries in the specified address space.
103 *
104 * - start - start address (may not be aligned)
105 * - end - end address (exclusive, may not be aligned)
106 * - flags - vm_area_struct flags describing address space
107 *
108 * It is assumed that:
109 * - we have a VIPT cache.
110 */
111ENTRY(v7_flush_user_cache_range)
112 mov pc, lr
113
114/*
115 * v7_coherent_kern_range(start,end)
116 *
117 * Ensure that the I and D caches are coherent within specified
118 * region. This is typically used when code has been written to
119 * a memory region, and will be executed.
120 *
121 * - start - virtual start address of region
122 * - end - virtual end address of region
123 *
124 * It is assumed that:
125 * - the Icache does not read data from the write buffer
126 */
127ENTRY(v7_coherent_kern_range)
128 /* FALLTHROUGH */
129
130/*
131 * v7_coherent_user_range(start,end)
132 *
133 * Ensure that the I and D caches are coherent within specified
134 * region. This is typically used when code has been written to
135 * a memory region, and will be executed.
136 *
137 * - start - virtual start address of region
138 * - end - virtual end address of region
139 *
140 * It is assumed that:
141 * - the Icache does not read data from the write buffer
142 */
143ENTRY(v7_coherent_user_range)
144 dcache_line_size r2, r3
145 sub r3, r2, #1
146 bic r0, r0, r3
1471: mcr p15, 0, r0, c7, c11, 1 @ clean D line to the point of unification
148 dsb
149 mcr p15, 0, r0, c7, c5, 1 @ invalidate I line
150 add r0, r0, r2
151 cmp r0, r1
152 blo 1b
153 mov r0, #0
154 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
155 dsb
156 isb
157 mov pc, lr
158
159/*
160 * v7_flush_kern_dcache_page(kaddr)
161 *
162 * Ensure that the data held in the page kaddr is written back
163 * to the page in question.
164 *
165 * - kaddr - kernel address (guaranteed to be page aligned)
166 */
167ENTRY(v7_flush_kern_dcache_page)
168 dcache_line_size r2, r3
169 add r1, r0, #PAGE_SZ
1701:
171 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
172 add r0, r0, r2
173 cmp r0, r1
174 blo 1b
175 dsb
176 mov pc, lr
177
178/*
179 * v7_dma_inv_range(start,end)
180 *
181 * Invalidate the data cache within the specified region; we will
182 * be performing a DMA operation in this region and we want to
183 * purge old data in the cache.
184 *
185 * - start - virtual start address of region
186 * - end - virtual end address of region
187 */
188ENTRY(v7_dma_inv_range)
189 dcache_line_size r2, r3
190 sub r3, r2, #1
191 tst r0, r3
192 bic r0, r0, r3
193 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
194
195 tst r1, r3
196 bic r1, r1, r3
197 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
1981:
199 mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
200 add r0, r0, r2
201 cmp r0, r1
202 blo 1b
203 dsb
204 mov pc, lr
205
206/*
207 * v7_dma_clean_range(start,end)
208 * - start - virtual start address of region
209 * - end - virtual end address of region
210 */
211ENTRY(v7_dma_clean_range)
212 dcache_line_size r2, r3
213 sub r3, r2, #1
214 bic r0, r0, r3
2151:
216 mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
217 add r0, r0, r2
218 cmp r0, r1
219 blo 1b
220 dsb
221 mov pc, lr
222
223/*
224 * v7_dma_flush_range(start,end)
225 * - start - virtual start address of region
226 * - end - virtual end address of region
227 */
228ENTRY(v7_dma_flush_range)
229 dcache_line_size r2, r3
230 sub r3, r2, #1
231 bic r0, r0, r3
2321:
233 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
234 add r0, r0, r2
235 cmp r0, r1
236 blo 1b
237 dsb
238 mov pc, lr
239
240 __INITDATA
241
242 .type v7_cache_fns, #object
243ENTRY(v7_cache_fns)
244 .long v7_flush_kern_cache_all
245 .long v7_flush_user_cache_all
246 .long v7_flush_user_cache_range
247 .long v7_coherent_kern_range
248 .long v7_coherent_user_range
249 .long v7_flush_kern_dcache_page
250 .long v7_dma_inv_range
251 .long v7_dma_clean_range
252 .long v7_dma_flush_range
253 .size v7_cache_fns, . - v7_cache_fns
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 9da43a0fdcdf..fc84fcc74380 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -14,7 +14,8 @@
14#include <asm/mmu_context.h> 14#include <asm/mmu_context.h>
15#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
16 16
17unsigned int cpu_last_asid = { 1 << ASID_BITS }; 17static DEFINE_SPINLOCK(cpu_asid_lock);
18unsigned int cpu_last_asid = ASID_FIRST_VERSION;
18 19
19/* 20/*
20 * We fork()ed a process, and we need a new context for the child 21 * We fork()ed a process, and we need a new context for the child
@@ -31,15 +32,16 @@ void __new_context(struct mm_struct *mm)
31{ 32{
32 unsigned int asid; 33 unsigned int asid;
33 34
35 spin_lock(&cpu_asid_lock);
34 asid = ++cpu_last_asid; 36 asid = ++cpu_last_asid;
35 if (asid == 0) 37 if (asid == 0)
36 asid = cpu_last_asid = 1 << ASID_BITS; 38 asid = cpu_last_asid = ASID_FIRST_VERSION;
37 39
38 /* 40 /*
39 * If we've used up all our ASIDs, we need 41 * If we've used up all our ASIDs, we need
40 * to start a new version and flush the TLB. 42 * to start a new version and flush the TLB.
41 */ 43 */
42 if ((asid & ~ASID_MASK) == 0) { 44 if (unlikely((asid & ~ASID_MASK) == 0)) {
43 asid = ++cpu_last_asid; 45 asid = ++cpu_last_asid;
44 /* set the reserved ASID before flushing the TLB */ 46 /* set the reserved ASID before flushing the TLB */
45 asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n" 47 asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n"
@@ -47,7 +49,16 @@ void __new_context(struct mm_struct *mm)
47 : "r" (0)); 49 : "r" (0));
48 isb(); 50 isb();
49 flush_tlb_all(); 51 flush_tlb_all();
52 if (icache_is_vivt_asid_tagged()) {
53 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
54 "mcr p15, 0, %0, c7, c5, 6 @ flush BTAC/BTB\n"
55 :
56 : "r" (0));
57 dsb();
58 }
50 } 59 }
60 spin_unlock(&cpu_asid_lock);
51 61
62 mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
52 mm->context.id = asid; 63 mm->context.id = asid;
53} 64}
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 9e2c89eb2115..b13150052a76 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -59,3 +59,15 @@
59 .word \ucset 59 .word \ucset
60#endif 60#endif
61 .endm 61 .endm
62
63/*
64 * cache_line_size - get the cache line size from the CSIDR register
65 * (available on ARMv7+). It assumes that the CSSR register was configured
66 * to access the L1 data cache CSIDR.
67 */
68 .macro dcache_line_size, reg, tmp
69 mrc p15, 1, \tmp, c0, c0, 0 @ read CSIDR
70 and \tmp, \tmp, #7 @ cache line size encoding
71 mov \reg, #16 @ size offset
72 mov \reg, \reg, lsl \tmp @ actual cache line size
73 .endm
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
new file mode 100644
index 000000000000..dd823dd4a374
--- /dev/null
+++ b/arch/arm/mm/proc-v7.S
@@ -0,0 +1,262 @@
1/*
2 * linux/arch/arm/mm/proc-v7.S
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This is the "shell" of the ARMv7 processor support.
11 */
12#include <linux/linkage.h>
13#include <asm/assembler.h>
14#include <asm/asm-offsets.h>
15#include <asm/elf.h>
16#include <asm/pgtable-hwdef.h>
17#include <asm/pgtable.h>
18
19#include "proc-macros.S"
20
21#define TTB_C (1 << 0)
22#define TTB_S (1 << 1)
23#define TTB_RGN_OC_WT (2 << 3)
24#define TTB_RGN_OC_WB (3 << 3)
25
26ENTRY(cpu_v7_proc_init)
27 mov pc, lr
28
29ENTRY(cpu_v7_proc_fin)
30 mov pc, lr
31
32/*
33 * cpu_v7_reset(loc)
34 *
35 * Perform a soft reset of the system. Put the CPU into the
36 * same state as it would be if it had been reset, and branch
37 * to what would be the reset vector.
38 *
39 * - loc - location to jump to for soft reset
40 *
41 * It is assumed that:
42 */
43 .align 5
44ENTRY(cpu_v7_reset)
45 mov pc, r0
46
47/*
48 * cpu_v7_do_idle()
49 *
50 * Idle the processor (eg, wait for interrupt).
51 *
52 * IRQs are already disabled.
53 */
54ENTRY(cpu_v7_do_idle)
55 .long 0xe320f003 @ ARM V7 WFI instruction
56 mov pc, lr
57
58ENTRY(cpu_v7_dcache_clean_area)
59#ifndef TLB_CAN_READ_FROM_L1_CACHE
60 dcache_line_size r2, r3
611: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
62 add r0, r0, r2
63 subs r1, r1, r2
64 bhi 1b
65 dsb
66#endif
67 mov pc, lr
68
69/*
70 * cpu_v7_switch_mm(pgd_phys, tsk)
71 *
72 * Set the translation table base pointer to be pgd_phys
73 *
74 * - pgd_phys - physical address of new TTB
75 *
76 * It is assumed that:
77 * - we are not using split page tables
78 */
79ENTRY(cpu_v7_switch_mm)
80 mov r2, #0
81 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
82 orr r0, r0, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB
83 mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
84 isb
851: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
86 isb
87 mcr p15, 0, r1, c13, c0, 1 @ set context ID
88 isb
89 mov pc, lr
90
91/*
92 * cpu_v7_set_pte_ext(ptep, pte)
93 *
94 * Set a level 2 translation table entry.
95 *
96 * - ptep - pointer to level 2 translation table entry
97 * (hardware version is stored at -1024 bytes)
98 * - pte - PTE value to store
99 * - ext - value for extended PTE bits
100 *
101 * Permissions:
102 * YUWD APX AP1 AP0 SVC User
103 * 0xxx 0 0 0 no acc no acc
104 * 100x 1 0 1 r/o no acc
105 * 10x0 1 0 1 r/o no acc
106 * 1011 0 0 1 r/w no acc
107 * 110x 0 1 0 r/w r/o
108 * 11x0 0 1 0 r/w r/o
109 * 1111 0 1 1 r/w r/w
110 */
111ENTRY(cpu_v7_set_pte_ext)
112 str r1, [r0], #-2048 @ linux version
113
114 bic r3, r1, #0x000003f0
115 bic r3, r3, #0x00000003
116 orr r3, r3, r2
117 orr r3, r3, #PTE_EXT_AP0 | 2
118
119 tst r1, #L_PTE_WRITE
120 tstne r1, #L_PTE_DIRTY
121 orreq r3, r3, #PTE_EXT_APX
122
123 tst r1, #L_PTE_USER
124 orrne r3, r3, #PTE_EXT_AP1
125 tstne r3, #PTE_EXT_APX
126 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
127
128 tst r1, #L_PTE_YOUNG
129 biceq r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK
130
131 tst r1, #L_PTE_EXEC
132 orreq r3, r3, #PTE_EXT_XN
133
134 tst r1, #L_PTE_PRESENT
135 moveq r3, #0
136
137 str r3, [r0]
138 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
139 mov pc, lr
140
141cpu_v7_name:
142 .ascii "ARMv7 Processor"
143 .align
144
145 .section ".text.init", #alloc, #execinstr
146
147/*
148 * __v7_setup
149 *
150 * Initialise TLB, Caches, and MMU state ready to switch the MMU
151 * on. Return in r0 the new CP15 C1 control register setting.
152 *
153 * We automatically detect if we have a Harvard cache, and use the
154 * Harvard cache control instructions insead of the unified cache
155 * control instructions.
156 *
157 * This should be able to cover all ARMv7 cores.
158 *
159 * It is assumed that:
160 * - cache type register is implemented
161 */
162__v7_setup:
163 adr r12, __v7_setup_stack @ the local stack
164 stmia r12, {r0-r5, r7, r9, r11, lr}
165 bl v7_flush_dcache_all
166 ldmia r12, {r0-r5, r7, r9, r11, lr}
167 mov r10, #0
168#ifdef HARVARD_CACHE
169 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
170#endif
171 dsb
172 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
173 mcr p15, 0, r10, c2, c0, 2 @ TTB control register
174 orr r4, r4, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB
175 mcr p15, 0, r4, c2, c0, 0 @ load TTB0
176 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
177 mov r10, #0x1f @ domains 0, 1 = manager
178 mcr p15, 0, r10, c3, c0, 0 @ load domain access register
179#ifndef CONFIG_CPU_L2CACHE_DISABLE
180 @ L2 cache configuration in the L2 aux control register
181 mrc p15, 1, r10, c9, c0, 2
182 bic r10, r10, #(1 << 16) @ L2 outer cache
183 mcr p15, 1, r10, c9, c0, 2
184 @ L2 cache is enabled in the aux control register
185 mrc p15, 0, r10, c1, c0, 1
186 orr r10, r10, #2
187 mcr p15, 0, r10, c1, c0, 1
188#endif
189 mrc p15, 0, r0, c1, c0, 0 @ read control register
190 ldr r10, cr1_clear @ get mask for bits to clear
191 bic r0, r0, r10 @ clear bits them
192 ldr r10, cr1_set @ get mask for bits to set
193 orr r0, r0, r10 @ set them
194 mov pc, lr @ return to head.S:__ret
195
196 /*
197 * V X F I D LR
198 * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM
199 * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced
200 * 0 110 0011 1.00 .111 1101 < we want
201 */
202 .type cr1_clear, #object
203 .type cr1_set, #object
204cr1_clear:
205 .word 0x0120c302
206cr1_set:
207 .word 0x00c0387d
208
209__v7_setup_stack:
210 .space 4 * 11 @ 11 registers
211
212 .type v7_processor_functions, #object
213ENTRY(v7_processor_functions)
214 .word v7_early_abort
215 .word cpu_v7_proc_init
216 .word cpu_v7_proc_fin
217 .word cpu_v7_reset
218 .word cpu_v7_do_idle
219 .word cpu_v7_dcache_clean_area
220 .word cpu_v7_switch_mm
221 .word cpu_v7_set_pte_ext
222 .size v7_processor_functions, . - v7_processor_functions
223
224 .type cpu_arch_name, #object
225cpu_arch_name:
226 .asciz "armv7"
227 .size cpu_arch_name, . - cpu_arch_name
228
229 .type cpu_elf_name, #object
230cpu_elf_name:
231 .asciz "v7"
232 .size cpu_elf_name, . - cpu_elf_name
233 .align
234
235 .section ".proc.info.init", #alloc, #execinstr
236
237 /*
238 * Match any ARMv7 processor core.
239 */
240 .type __v7_proc_info, #object
241__v7_proc_info:
242 .long 0x000f0000 @ Required ID value
243 .long 0x000f0000 @ Mask for ID
244 .long PMD_TYPE_SECT | \
245 PMD_SECT_BUFFERABLE | \
246 PMD_SECT_CACHEABLE | \
247 PMD_SECT_AP_WRITE | \
248 PMD_SECT_AP_READ
249 .long PMD_TYPE_SECT | \
250 PMD_SECT_XN | \
251 PMD_SECT_AP_WRITE | \
252 PMD_SECT_AP_READ
253 b __v7_setup
254 .long cpu_arch_name
255 .long cpu_elf_name
256 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
257 .long cpu_v7_name
258 .long v7_processor_functions
259 .long v6wbi_tlb_fns
260 .long v6_user_fns
261 .long v7_cache_fns
262 .size __v7_proc_info, . - __v7_proc_info
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index 9e8d21eca4ec..cfc69f3842fd 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -20,6 +20,11 @@ endchoice
20 20
21comment "OMAP Feature Selections" 21comment "OMAP Feature Selections"
22 22
23config OMAP_DEBUG_LEDS
24 bool
25 help
26 For debug card leds on TI reference boards.
27
23config OMAP_RESET_CLOCKS 28config OMAP_RESET_CLOCKS
24 bool "Reset unused clocks during boot" 29 bool "Reset unused clocks during boot"
25 depends on ARCH_OMAP 30 depends on ARCH_OMAP
@@ -58,6 +63,14 @@ config OMAP_MUX_WARNINGS
58 to change the pin multiplexing setup. When there are no warnings 63 to change the pin multiplexing setup. When there are no warnings
59 printed, it's safe to deselect OMAP_MUX for your product. 64 printed, it's safe to deselect OMAP_MUX for your product.
60 65
66config OMAP_MCBSP
67 bool "McBSP support"
68 depends on ARCH_OMAP
69 default y
70 help
71 Say Y here if you want support for the OMAP Multichannel
72 Buffered Serial Port.
73
61choice 74choice
62 prompt "System timer" 75 prompt "System timer"
63 default OMAP_MPU_TIMER 76 default OMAP_MPU_TIMER
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index 2896b4546411..41a3c1cf3bd4 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -3,7 +3,8 @@
3# 3#
4 4
5# Common support 5# Common support
6obj-y := common.o sram.o sram-fn.o clock.o devices.o dma.o mux.o gpio.o mcbsp.o usb.o fb.o 6obj-y := common.o sram.o sram-fn.o clock.o devices.o dma.o mux.o gpio.o \
7 usb.o fb.o
7obj-m := 8obj-m :=
8obj-n := 9obj-n :=
9obj- := 10obj- :=
@@ -16,4 +17,4 @@ obj-$(CONFIG_ARCH_OMAP16XX) += ocpi.o
16 17
17obj-$(CONFIG_CPU_FREQ) += cpu-omap.o 18obj-$(CONFIG_CPU_FREQ) += cpu-omap.o
18obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o 19obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o
19 20obj-$(CONFIG_OMAP_DEBUG_LEDS) += debug-leds.o
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index f1179ad4be1b..0a603242f367 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -33,6 +33,41 @@ static DEFINE_SPINLOCK(clockfw_lock);
33 33
34static struct clk_functions *arch_clock; 34static struct clk_functions *arch_clock;
35 35
36#ifdef CONFIG_PM_DEBUG
37
38static void print_parents(struct clk *clk)
39{
40 struct clk *p;
41 int printed = 0;
42
43 list_for_each_entry(p, &clocks, node) {
44 if (p->parent == clk && p->usecount) {
45 if (!clk->usecount && !printed) {
46 printk("MISMATCH: %s\n", clk->name);
47 printed = 1;
48 }
49 printk("\t%-15s\n", p->name);
50 }
51 }
52}
53
54void clk_print_usecounts(void)
55{
56 unsigned long flags;
57 struct clk *p;
58
59 spin_lock_irqsave(&clockfw_lock, flags);
60 list_for_each_entry(p, &clocks, node) {
61 if (p->usecount)
62 printk("%-15s: %d\n", p->name, p->usecount);
63 print_parents(p);
64
65 }
66 spin_unlock_irqrestore(&clockfw_lock, flags);
67}
68
69#endif
70
36/*------------------------------------------------------------------------- 71/*-------------------------------------------------------------------------
37 * Standard clock functions defined in include/linux/clk.h 72 * Standard clock functions defined in include/linux/clk.h
38 *-------------------------------------------------------------------------*/ 73 *-------------------------------------------------------------------------*/
@@ -249,6 +284,8 @@ void followparent_recalc(struct clk *clk)
249 return; 284 return;
250 285
251 clk->rate = clk->parent->rate; 286 clk->rate = clk->parent->rate;
287 if (unlikely(clk->flags & RATE_PROPAGATES))
288 propagate_rate(clk);
252} 289}
253 290
254/* Propagate rate to children */ 291/* Propagate rate to children */
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index fecd3d625995..dd8708ad0a71 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -93,8 +93,12 @@ static const void *get_config(u16 tag, size_t len, int skip, size_t *len_out)
93 * in the kernel. */ 93 * in the kernel. */
94 for (i = 0; i < omap_board_config_size; i++) { 94 for (i = 0; i < omap_board_config_size; i++) {
95 if (omap_board_config[i].tag == tag) { 95 if (omap_board_config[i].tag == tag) {
96 kinfo = &omap_board_config[i]; 96 if (skip == 0) {
97 break; 97 kinfo = &omap_board_config[i];
98 break;
99 } else {
100 skip--;
101 }
98 } 102 }
99 } 103 }
100 if (kinfo == NULL) 104 if (kinfo == NULL)
diff --git a/arch/arm/plat-omap/debug-leds.c b/arch/arm/plat-omap/debug-leds.c
new file mode 100644
index 000000000000..9128a80d228f
--- /dev/null
+++ b/arch/arm/plat-omap/debug-leds.c
@@ -0,0 +1,314 @@
1/*
2 * linux/arch/arm/plat-omap/debug-leds.c
3 *
4 * Copyright 2003 by Texas Instruments Incorporated
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/init.h>
12#include <linux/platform_device.h>
13#include <linux/leds.h>
14
15#include <asm/io.h>
16#include <asm/hardware.h>
17#include <asm/leds.h>
18#include <asm/system.h>
19#include <asm/mach-types.h>
20
21#include <asm/arch/fpga.h>
22#include <asm/arch/gpio.h>
23
24
25/* Many OMAP development platforms reuse the same "debug board"; these
26 * platforms include H2, H3, H4, and Perseus2. There are 16 LEDs on the
27 * debug board (all green), accessed through FPGA registers.
28 *
29 * The "surfer" expansion board and H2 sample board also have two-color
30 * green+red LEDs (in parallel), used here for timer and idle indicators
31 * in preference to the ones on the debug board, for a "Disco LED" effect.
32 *
33 * This driver exports either the original ARM LED API, the new generic
34 * one, or both.
35 */
36
37static spinlock_t lock;
38static struct h2p2_dbg_fpga __iomem *fpga;
39static u16 led_state, hw_led_state;
40
41
42#ifdef CONFIG_LEDS_OMAP_DEBUG
43#define new_led_api() 1
44#else
45#define new_led_api() 0
46#endif
47
48
49/*-------------------------------------------------------------------------*/
50
51/* original ARM debug LED API:
52 * - timer and idle leds (some boards use non-FPGA leds here);
53 * - up to 4 generic leds, easily accessed in-kernel (any context)
54 */
55
56#define GPIO_LED_RED 3
57#define GPIO_LED_GREEN OMAP_MPUIO(4)
58
59#define LED_STATE_ENABLED 0x01
60#define LED_STATE_CLAIMED 0x02
61#define LED_TIMER_ON 0x04
62
63#define GPIO_IDLE GPIO_LED_GREEN
64#define GPIO_TIMER GPIO_LED_RED
65
66static void h2p2_dbg_leds_event(led_event_t evt)
67{
68 unsigned long flags;
69
70 spin_lock_irqsave(&lock, flags);
71
72 if (!(led_state & LED_STATE_ENABLED) && evt != led_start)
73 goto done;
74
75 switch (evt) {
76 case led_start:
77 if (fpga)
78 led_state |= LED_STATE_ENABLED;
79 break;
80
81 case led_stop:
82 case led_halted:
83 /* all leds off during suspend or shutdown */
84
85 if (!(machine_is_omap_perseus2() || machine_is_omap_h4())) {
86 omap_set_gpio_dataout(GPIO_TIMER, 0);
87 omap_set_gpio_dataout(GPIO_IDLE, 0);
88 }
89
90 __raw_writew(~0, &fpga->leds);
91 led_state &= ~LED_STATE_ENABLED;
92 goto done;
93
94 case led_claim:
95 led_state |= LED_STATE_CLAIMED;
96 hw_led_state = 0;
97 break;
98
99 case led_release:
100 led_state &= ~LED_STATE_CLAIMED;
101 break;
102
103#ifdef CONFIG_LEDS_TIMER
104 case led_timer:
105 led_state ^= LED_TIMER_ON;
106
107 if (machine_is_omap_perseus2() || machine_is_omap_h4())
108 hw_led_state ^= H2P2_DBG_FPGA_P2_LED_TIMER;
109 else {
110 omap_set_gpio_dataout(GPIO_TIMER,
111 led_state & LED_TIMER_ON);
112 goto done;
113 }
114
115 break;
116#endif
117
118#ifdef CONFIG_LEDS_CPU
119 /* LED lit iff busy */
120 case led_idle_start:
121 if (machine_is_omap_perseus2() || machine_is_omap_h4())
122 hw_led_state &= ~H2P2_DBG_FPGA_P2_LED_IDLE;
123 else {
124 omap_set_gpio_dataout(GPIO_IDLE, 1);
125 goto done;
126 }
127
128 break;
129
130 case led_idle_end:
131 if (machine_is_omap_perseus2() || machine_is_omap_h4())
132 hw_led_state |= H2P2_DBG_FPGA_P2_LED_IDLE;
133 else {
134 omap_set_gpio_dataout(GPIO_IDLE, 0);
135 goto done;
136 }
137
138 break;
139#endif
140
141 case led_green_on:
142 hw_led_state |= H2P2_DBG_FPGA_LED_GREEN;
143 break;
144 case led_green_off:
145 hw_led_state &= ~H2P2_DBG_FPGA_LED_GREEN;
146 break;
147
148 case led_amber_on:
149 hw_led_state |= H2P2_DBG_FPGA_LED_AMBER;
150 break;
151 case led_amber_off:
152 hw_led_state &= ~H2P2_DBG_FPGA_LED_AMBER;
153 break;
154
155 case led_red_on:
156 hw_led_state |= H2P2_DBG_FPGA_LED_RED;
157 break;
158 case led_red_off:
159 hw_led_state &= ~H2P2_DBG_FPGA_LED_RED;
160 break;
161
162 case led_blue_on:
163 hw_led_state |= H2P2_DBG_FPGA_LED_BLUE;
164 break;
165 case led_blue_off:
166 hw_led_state &= ~H2P2_DBG_FPGA_LED_BLUE;
167 break;
168
169 default:
170 break;
171 }
172
173
174 /*
175 * Actually burn the LEDs
176 */
177 if (led_state & LED_STATE_ENABLED)
178 __raw_writew(~hw_led_state, &fpga->leds);
179
180done:
181 spin_unlock_irqrestore(&lock, flags);
182}
183
184/*-------------------------------------------------------------------------*/
185
186/* "new" LED API
187 * - with syfs access and generic triggering
188 * - not readily accessible to in-kernel drivers
189 */
190
191struct dbg_led {
192 struct led_classdev cdev;
193 u16 mask;
194};
195
196static struct dbg_led dbg_leds[] = {
197 /* REVISIT at least H2 uses different timer & cpu leds... */
198#ifndef CONFIG_LEDS_TIMER
199 { .mask = 1 << 0, .cdev.name = "d4:green",
200 .cdev.default_trigger = "heartbeat", },
201#endif
202#ifndef CONFIG_LEDS_CPU
203 { .mask = 1 << 1, .cdev.name = "d5:green", }, /* !idle */
204#endif
205 { .mask = 1 << 2, .cdev.name = "d6:green", },
206 { .mask = 1 << 3, .cdev.name = "d7:green", },
207
208 { .mask = 1 << 4, .cdev.name = "d8:green", },
209 { .mask = 1 << 5, .cdev.name = "d9:green", },
210 { .mask = 1 << 6, .cdev.name = "d10:green", },
211 { .mask = 1 << 7, .cdev.name = "d11:green", },
212
213 { .mask = 1 << 8, .cdev.name = "d12:green", },
214 { .mask = 1 << 9, .cdev.name = "d13:green", },
215 { .mask = 1 << 10, .cdev.name = "d14:green", },
216 { .mask = 1 << 11, .cdev.name = "d15:green", },
217
218#ifndef CONFIG_LEDS
219 { .mask = 1 << 12, .cdev.name = "d16:green", },
220 { .mask = 1 << 13, .cdev.name = "d17:green", },
221 { .mask = 1 << 14, .cdev.name = "d18:green", },
222 { .mask = 1 << 15, .cdev.name = "d19:green", },
223#endif
224};
225
226static void
227fpga_led_set(struct led_classdev *cdev, enum led_brightness value)
228{
229 struct dbg_led *led = container_of(cdev, struct dbg_led, cdev);
230 unsigned long flags;
231
232 spin_lock_irqsave(&lock, flags);
233 if (value == LED_OFF)
234 hw_led_state &= ~led->mask;
235 else
236 hw_led_state |= led->mask;
237 __raw_writew(~hw_led_state, &fpga->leds);
238 spin_unlock_irqrestore(&lock, flags);
239}
240
241static void __init newled_init(struct device *dev)
242{
243 unsigned i;
244 struct dbg_led *led;
245 int status;
246
247 for (i = 0, led = dbg_leds; i < ARRAY_SIZE(dbg_leds); i++, led++) {
248 led->cdev.brightness_set = fpga_led_set;
249 status = led_classdev_register(dev, &led->cdev);
250 if (status < 0)
251 break;
252 }
253 return;
254}
255
256
257/*-------------------------------------------------------------------------*/
258
259static int /* __init */ fpga_probe(struct platform_device *pdev)
260{
261 struct resource *iomem;
262
263 spin_lock_init(&lock);
264
265 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
266 if (!iomem)
267 return -ENODEV;
268
269 fpga = ioremap(iomem->start, H2P2_DBG_FPGA_SIZE);
270 __raw_writew(~0, &fpga->leds);
271
272#ifdef CONFIG_LEDS
273 leds_event = h2p2_dbg_leds_event;
274 leds_event(led_start);
275#endif
276
277 if (new_led_api()) {
278 newled_init(&pdev->dev);
279 }
280
281 return 0;
282}
283
284static int fpga_suspend_late(struct platform_device *pdev, pm_message_t mesg)
285{
286 __raw_writew(~0, &fpga->leds);
287 return 0;
288}
289
290static int fpga_resume_early(struct platform_device *pdev)
291{
292 __raw_writew(~hw_led_state, &fpga->leds);
293 return 0;
294}
295
296
297static struct platform_driver led_driver = {
298 .driver.name = "omap_dbg_led",
299 .probe = fpga_probe,
300 .suspend_late = fpga_suspend_late,
301 .resume_early = fpga_resume_early,
302};
303
304static int __init fpga_init(void)
305{
306 if (machine_is_omap_h4()
307 || machine_is_omap_h3()
308 || machine_is_omap_h2()
309 || machine_is_omap_perseus2()
310 )
311 return platform_driver_register(&led_driver);
312 return 0;
313}
314fs_initcall(fpga_init);
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index eeb33fed6f7c..c5dab1d6417e 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -25,7 +25,71 @@
25#include <asm/arch/gpio.h> 25#include <asm/arch/gpio.h>
26#include <asm/arch/menelaus.h> 26#include <asm/arch/menelaus.h>
27 27
28#if defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE) 28#if defined(CONFIG_OMAP_DSP) || defined(CONFIG_OMAP_DSP_MODULE)
29
30#include "../plat-omap/dsp/dsp_common.h"
31
32static struct dsp_platform_data dsp_pdata = {
33 .kdev_list = LIST_HEAD_INIT(dsp_pdata.kdev_list),
34};
35
36static struct resource omap_dsp_resources[] = {
37 {
38 .name = "dsp_mmu",
39 .start = -1,
40 .flags = IORESOURCE_IRQ,
41 },
42};
43
44static struct platform_device omap_dsp_device = {
45 .name = "dsp",
46 .id = -1,
47 .num_resources = ARRAY_SIZE(omap_dsp_resources),
48 .resource = omap_dsp_resources,
49 .dev = {
50 .platform_data = &dsp_pdata,
51 },
52};
53
54static inline void omap_init_dsp(void)
55{
56 struct resource *res;
57 int irq;
58
59 if (cpu_is_omap15xx())
60 irq = INT_1510_DSP_MMU;
61 else if (cpu_is_omap16xx())
62 irq = INT_1610_DSP_MMU;
63 else if (cpu_is_omap24xx())
64 irq = INT_24XX_DSP_MMU;
65
66 res = platform_get_resource_byname(&omap_dsp_device,
67 IORESOURCE_IRQ, "dsp_mmu");
68 res->start = irq;
69
70 platform_device_register(&omap_dsp_device);
71}
72
73int dsp_kfunc_device_register(struct dsp_kfunc_device *kdev)
74{
75 static DEFINE_MUTEX(dsp_pdata_lock);
76
77 mutex_init(&kdev->lock);
78
79 mutex_lock(&dsp_pdata_lock);
80 list_add_tail(&kdev->entry, &dsp_pdata.kdev_list);
81 mutex_unlock(&dsp_pdata_lock);
82
83 return 0;
84}
85EXPORT_SYMBOL(dsp_kfunc_device_register);
86
87#else
88static inline void omap_init_dsp(void) { }
89#endif /* CONFIG_OMAP_DSP */
90
91/*-------------------------------------------------------------------------*/
92#if defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE)
29 93
30#define OMAP1_I2C_BASE 0xfffb3800 94#define OMAP1_I2C_BASE 0xfffb3800
31#define OMAP2_I2C_BASE1 0x48070000 95#define OMAP2_I2C_BASE1 0x48070000
@@ -48,8 +112,8 @@ static struct resource i2c_resources1[] = {
48/* DMA not used; works around erratum writing to non-empty i2c fifo */ 112/* DMA not used; works around erratum writing to non-empty i2c fifo */
49 113
50static struct platform_device omap_i2c_device1 = { 114static struct platform_device omap_i2c_device1 = {
51 .name = "i2c_omap", 115 .name = "i2c_omap",
52 .id = 1, 116 .id = 1,
53 .num_resources = ARRAY_SIZE(i2c_resources1), 117 .num_resources = ARRAY_SIZE(i2c_resources1),
54 .resource = i2c_resources1, 118 .resource = i2c_resources1,
55}; 119};
@@ -376,7 +440,7 @@ static inline void omap_init_wdt(void) {}
376 440
377/*-------------------------------------------------------------------------*/ 441/*-------------------------------------------------------------------------*/
378 442
379#if defined(CONFIG_OMAP_RNG) || defined(CONFIG_OMAP_RNG_MODULE) 443#if defined(CONFIG_HW_RANDOM_OMAP) || defined(CONFIG_HW_RANDOM_OMAP_MODULE)
380 444
381#ifdef CONFIG_ARCH_OMAP24XX 445#ifdef CONFIG_ARCH_OMAP24XX
382#define OMAP_RNG_BASE 0x480A0000 446#define OMAP_RNG_BASE 0x480A0000
@@ -436,6 +500,7 @@ static int __init omap_init_devices(void)
436 /* please keep these calls, and their implementations above, 500 /* please keep these calls, and their implementations above,
437 * in alphabetical order so they're easier to sort through. 501 * in alphabetical order so they're easier to sort through.
438 */ 502 */
503 omap_init_dsp();
439 omap_init_i2c(); 504 omap_init_i2c();
440 omap_init_kp(); 505 omap_init_kp();
441 omap_init_mmc(); 506 omap_init_mmc();
@@ -446,4 +511,3 @@ static int __init omap_init_devices(void)
446 return 0; 511 return 0;
447} 512}
448arch_initcall(omap_init_devices); 513arch_initcall(omap_init_devices);
449
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index f3f84fbf8b87..2d86b106ff3e 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -925,10 +925,17 @@ static int omap2_dma_handle_ch(int ch)
925{ 925{
926 u32 status = OMAP_DMA_CSR_REG(ch); 926 u32 status = OMAP_DMA_CSR_REG(ch);
927 927
928 if (!status) 928 if (!status) {
929 if (printk_ratelimit())
930 printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n", ch);
929 return 0; 931 return 0;
930 if (unlikely(dma_chan[ch].dev_id == -1)) 932 }
933 if (unlikely(dma_chan[ch].dev_id == -1)) {
934 if (printk_ratelimit())
935 printk(KERN_WARNING "IRQ %04x for non-allocated DMA"
936 "channel %d\n", status, ch);
931 return 0; 937 return 0;
938 }
932 if (unlikely(status & OMAP_DMA_DROP_IRQ)) 939 if (unlikely(status & OMAP_DMA_DROP_IRQ))
933 printk(KERN_INFO 940 printk(KERN_INFO
934 "DMA synchronization event drop occurred with device " 941 "DMA synchronization event drop occurred with device "
@@ -959,11 +966,15 @@ static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
959 int i; 966 int i;
960 967
961 val = omap_readl(OMAP_DMA4_IRQSTATUS_L0); 968 val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
962 969 if (val == 0) {
963 for (i = 1; i <= OMAP_LOGICAL_DMA_CH_COUNT; i++) { 970 if (printk_ratelimit())
964 int active = val & (1 << (i - 1)); 971 printk(KERN_WARNING "Spurious DMA IRQ\n");
965 if (active) 972 return IRQ_HANDLED;
966 omap2_dma_handle_ch(i - 1); 973 }
974 for (i = 0; i < OMAP_LOGICAL_DMA_CH_COUNT && val != 0; i++) {
975 if (val & 1)
976 omap2_dma_handle_ch(i);
977 val >>= 1;
967 } 978 }
968 979
969 return IRQ_HANDLED; 980 return IRQ_HANDLED;
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 659619f235ca..36073dfaa4db 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -372,7 +372,7 @@ void omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
372 372
373 /* When the functional clock disappears, too quick writes seem to 373 /* When the functional clock disappears, too quick writes seem to
374 * cause an abort. */ 374 * cause an abort. */
375 __delay(15000); 375 __delay(150000);
376} 376}
377 377
378#endif 378#endif
diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c
index 56acb8720f78..4493bcff5172 100644
--- a/arch/arm/plat-omap/fb.c
+++ b/arch/arm/plat-omap/fb.c
@@ -1,3 +1,26 @@
1/*
2 * File: arch/arm/plat-omap/fb.c
3 *
4 * Framebuffer device registration for TI OMAP platforms
5 *
6 * Copyright (C) 2006 Nokia Corporation
7 * Author: Imre Deak <imre.deak@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
1#include <linux/module.h> 24#include <linux/module.h>
2#include <linux/kernel.h> 25#include <linux/kernel.h>
3#include <linux/init.h> 26#include <linux/init.h>
@@ -16,6 +39,8 @@
16#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE) 39#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
17 40
18static struct omapfb_platform_data omapfb_config; 41static struct omapfb_platform_data omapfb_config;
42static int config_invalid;
43static int configured_regions;
19 44
20static u64 omap_fb_dma_mask = ~(u32)0; 45static u64 omap_fb_dma_mask = ~(u32)0;
21 46
@@ -30,39 +55,270 @@ static struct platform_device omap_fb_device = {
30 .num_resources = 0, 55 .num_resources = 0,
31}; 56};
32 57
33/* called from map_io */ 58static inline int ranges_overlap(unsigned long start1, unsigned long size1,
34void omapfb_reserve_mem(void) 59 unsigned long start2, unsigned long size2)
35{ 60{
36 const struct omap_fbmem_config *fbmem_conf; 61 return (start1 >= start2 && start1 < start2 + size2) ||
62 (start2 >= start1 && start2 < start1 + size1);
63}
37 64
38 omapfb_config.fbmem.fb_sram_start = omap_fb_sram_start; 65static inline int range_included(unsigned long start1, unsigned long size1,
39 omapfb_config.fbmem.fb_sram_size = omap_fb_sram_size; 66 unsigned long start2, unsigned long size2)
67{
68 return start1 >= start2 && start1 + size1 <= start2 + size2;
69}
40 70
41 fbmem_conf = omap_get_config(OMAP_TAG_FBMEM, struct omap_fbmem_config);
42 71
43 if (fbmem_conf != NULL) { 72/* Check if there is an overlapping region. */
44 /* indicate that the bootloader already initialized the 73static int fbmem_region_reserved(unsigned long start, size_t size)
45 * fb device, so we'll skip that part in the fb driver 74{
46 */ 75 struct omapfb_mem_region *rg;
47 omapfb_config.fbmem.fb_sdram_start = fbmem_conf->fb_sdram_start; 76 int i;
48 omapfb_config.fbmem.fb_sdram_size = fbmem_conf->fb_sdram_size; 77
49 if (fbmem_conf->fb_sdram_size) { 78 rg = &omapfb_config.mem_desc.region[0];
50 pr_info("Reserving %u bytes SDRAM for frame buffer\n", 79 for (i = 0; i < OMAPFB_PLANE_NUM; i++, rg++) {
51 fbmem_conf->fb_sdram_size); 80 if (!rg->paddr)
52 reserve_bootmem(fbmem_conf->fb_sdram_start, 81 /* Empty slot. */
53 fbmem_conf->fb_sdram_size); 82 continue;
83 if (ranges_overlap(start, size, rg->paddr, rg->size))
84 return 1;
85 }
86 return 0;
87}
88
89/*
90 * Get the region_idx`th region from board config/ATAG and convert it to
91 * our internal format.
92 */
93static int get_fbmem_region(int region_idx, struct omapfb_mem_region *rg)
94{
95 const struct omap_fbmem_config *conf;
96 u32 paddr;
97
98 conf = omap_get_nr_config(OMAP_TAG_FBMEM,
99 struct omap_fbmem_config, region_idx);
100 if (conf == NULL)
101 return -ENOENT;
102
103 paddr = conf->start;
104 /*
105 * Low bits encode the page allocation mode, if high bits
106 * are zero. Otherwise we need a page aligned fixed
107 * address.
108 */
109 memset(rg, 0, sizeof(*rg));
110 rg->type = paddr & ~PAGE_MASK;
111 rg->paddr = paddr & PAGE_MASK;
112 rg->size = PAGE_ALIGN(conf->size);
113 return 0;
114}
115
116static int set_fbmem_region_type(struct omapfb_mem_region *rg, int mem_type,
117 unsigned long mem_start,
118 unsigned long mem_size)
119{
120 /*
121 * Check if the configuration specifies the type explicitly.
122 * type = 0 && paddr = 0, a default don't care case maps to
123 * the SDRAM type.
124 */
125 if (rg->type || (!rg->type && !rg->paddr))
126 return 0;
127 if (ranges_overlap(rg->paddr, rg->size, mem_start, mem_size)) {
128 rg->type = mem_type;
129 return 0;
130 }
131 /* Can't determine it. */
132 return -1;
133}
134
135static int check_fbmem_region(int region_idx, struct omapfb_mem_region *rg,
136 unsigned long start_avail, unsigned size_avail)
137{
138 unsigned long paddr = rg->paddr;
139 size_t size = rg->size;
140
141 if (rg->type > OMAPFB_MEMTYPE_MAX) {
142 printk(KERN_ERR
143 "Invalid start address for FB region %d\n", region_idx);
144 return -EINVAL;
145 }
146
147 if (!rg->size) {
148 printk(KERN_ERR "Zero size for FB region %d\n", region_idx);
149 return -EINVAL;
150 }
151
152 if (!paddr)
153 /* Allocate this dynamically, leave paddr 0 for now. */
154 return 0;
155
156 /*
157 * Fixed region for the given RAM range. Check if it's already
158 * reserved by the FB code or someone else.
159 */
160 if (fbmem_region_reserved(paddr, size) ||
161 !range_included(paddr, size, start_avail, size_avail)) {
162 printk(KERN_ERR "Trying to use reserved memory "
163 "for FB region %d\n", region_idx);
164 return -EINVAL;
165 }
166
167 return 0;
168}
169
170/*
171 * Called from map_io. We need to call to this early enough so that we
172 * can reserve the fixed SDRAM regions before VM could get hold of them.
173 */
174void omapfb_reserve_sdram(void)
175{
176 struct bootmem_data *bdata;
177 unsigned long sdram_start, sdram_size;
178 unsigned long reserved;
179 int i;
180
181 if (config_invalid)
182 return;
183
184 bdata = NODE_DATA(0)->bdata;
185 sdram_start = bdata->node_boot_start;
186 sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start;
187 reserved = 0;
188 for (i = 0; ; i++) {
189 struct omapfb_mem_region rg;
190
191 if (get_fbmem_region(i, &rg) < 0)
192 break;
193 if (i == OMAPFB_PLANE_NUM) {
194 printk(KERN_ERR
195 "Extraneous FB mem configuration entries\n");
196 config_invalid = 1;
197 return;
54 } 198 }
199 /* Check if it's our memory type. */
200 if (set_fbmem_region_type(&rg, OMAPFB_MEMTYPE_SDRAM,
201 sdram_start, sdram_size) < 0 ||
202 (rg.type != OMAPFB_MEMTYPE_SDRAM))
203 continue;
204 BUG_ON(omapfb_config.mem_desc.region[i].size);
205 if (check_fbmem_region(i, &rg, sdram_start, sdram_size) < 0) {
206 config_invalid = 1;
207 return;
208 }
209 if (rg.paddr)
210 reserve_bootmem(rg.paddr, rg.size);
211 reserved += rg.size;
212 omapfb_config.mem_desc.region[i] = rg;
213 configured_regions++;
55 } 214 }
215 omapfb_config.mem_desc.region_cnt = i;
216 if (reserved)
217 pr_info("Reserving %lu bytes SDRAM for frame buffer\n",
218 reserved);
219}
220
221/*
222 * Called at sram init time, before anything is pushed to the SRAM stack.
223 * Because of the stack scheme, we will allocate everything from the
224 * start of the lowest address region to the end of SRAM. This will also
225 * include padding for page alignment and possible holes between regions.
226 *
227 * As opposed to the SDRAM case, we'll also do any dynamic allocations at
228 * this point, since the driver built as a module would have problem with
229 * freeing / reallocating the regions.
230 */
231unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
232 unsigned long sram_vstart,
233 unsigned long sram_size,
234 unsigned long pstart_avail,
235 unsigned long size_avail)
236{
237 struct omapfb_mem_region rg;
238 unsigned long pend_avail;
239 unsigned long reserved;
240 int i;
241
242 if (config_invalid)
243 return 0;
244
245 reserved = 0;
246 pend_avail = pstart_avail + size_avail;
247 for (i = 0; ; i++) {
248 if (get_fbmem_region(i, &rg) < 0)
249 break;
250 if (i == OMAPFB_PLANE_NUM) {
251 printk(KERN_ERR
252 "Extraneous FB mem configuration entries\n");
253 config_invalid = 1;
254 return 0;
255 }
256
257 /* Check if it's our memory type. */
258 if (set_fbmem_region_type(&rg, OMAPFB_MEMTYPE_SRAM,
259 sram_pstart, sram_size) < 0 ||
260 (rg.type != OMAPFB_MEMTYPE_SRAM))
261 continue;
262 BUG_ON(omapfb_config.mem_desc.region[i].size);
263
264 if (check_fbmem_region(i, &rg, pstart_avail, size_avail) < 0) {
265 config_invalid = 1;
266 return 0;
267 }
268
269 if (!rg.paddr) {
270 /* Dynamic allocation */
271 if ((size_avail & PAGE_MASK) < rg.size) {
272 printk("Not enough SRAM for FB region %d\n",
273 i);
274 config_invalid = 1;
275 return 0;
276 }
277 size_avail = (size_avail - rg.size) & PAGE_MASK;
278 rg.paddr = pstart_avail + size_avail;
279 }
280 /* Reserve everything above the start of the region. */
281 if (pend_avail - rg.paddr > reserved)
282 reserved = pend_avail - rg.paddr;
283 size_avail = pend_avail - reserved - pstart_avail;
284
285 /*
286 * We have a kernel mapping for this already, so the
287 * driver won't have to make one.
288 */
289 rg.vaddr = (void *)(sram_vstart + rg.paddr - sram_pstart);
290 omapfb_config.mem_desc.region[i] = rg;
291 configured_regions++;
292 }
293 omapfb_config.mem_desc.region_cnt = i;
294 if (reserved)
295 pr_info("Reserving %lu bytes SRAM for frame buffer\n",
296 reserved);
297 return reserved;
298}
299
300void omapfb_set_ctrl_platform_data(void *data)
301{
302 omapfb_config.ctrl_platform_data = data;
56} 303}
57 304
58static inline int omap_init_fb(void) 305static inline int omap_init_fb(void)
59{ 306{
60 const struct omap_lcd_config *conf; 307 const struct omap_lcd_config *conf;
61 308
309 if (config_invalid)
310 return 0;
311 if (configured_regions != omapfb_config.mem_desc.region_cnt) {
312 printk(KERN_ERR "Invalid FB mem configuration entries\n");
313 return 0;
314 }
62 conf = omap_get_config(OMAP_TAG_LCD, struct omap_lcd_config); 315 conf = omap_get_config(OMAP_TAG_LCD, struct omap_lcd_config);
63 if (conf == NULL) 316 if (conf == NULL) {
317 if (configured_regions)
318 /* FB mem config, but no LCD config? */
319 printk(KERN_ERR "Missing LCD configuration\n");
64 return 0; 320 return 0;
65 321 }
66 omapfb_config.lcd = *conf; 322 omapfb_config.lcd = *conf;
67 323
68 return platform_device_register(&omap_fb_device); 324 return platform_device_register(&omap_fb_device);
@@ -72,7 +328,16 @@ arch_initcall(omap_init_fb);
72 328
73#else 329#else
74 330
75void omapfb_reserve_mem(void) {} 331void omapfb_reserve_sdram(void) {}
332unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
333 unsigned long sram_vstart,
334 unsigned long sram_size,
335 unsigned long start_avail,
336 unsigned long size_avail)
337{
338 return 0;
339}
340
76 341
77#endif 342#endif
78 343
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 9dc6d3617bdb..337455dfe64d 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -13,7 +13,6 @@
13 13
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/interrupt.h> 16#include <linux/interrupt.h>
18#include <linux/sysdev.h> 17#include <linux/sysdev.h>
19#include <linux/err.h> 18#include <linux/err.h>
@@ -1545,7 +1544,7 @@ void omap2_gpio_resume_after_retention(void)
1545 * This may get called early from board specific init 1544 * This may get called early from board specific init
1546 * for boards that have interrupts routed via FPGA. 1545 * for boards that have interrupts routed via FPGA.
1547 */ 1546 */
1548int omap_gpio_init(void) 1547int __init omap_gpio_init(void)
1549{ 1548{
1550 if (!initialized) 1549 if (!initialized)
1551 return _omap_gpio_init(); 1550 return _omap_gpio_init();
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
new file mode 100644
index 000000000000..de7e6ef48bd0
--- /dev/null
+++ b/arch/arm/plat-omap/mailbox.c
@@ -0,0 +1,509 @@
1/*
2 * OMAP mailbox driver
3 *
4 * Copyright (C) 2006 Nokia Corporation. All rights reserved.
5 *
6 * Contact: Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
7 * Restructured by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/sched.h>
28#include <linux/interrupt.h>
29#include <linux/device.h>
30#include <linux/blkdev.h>
31#include <linux/err.h>
32#include <linux/delay.h>
33#include <asm/io.h>
34#include <asm/arch/mailbox.h>
35#include "mailbox.h"
36
37static struct omap_mbox *mboxes;
38static DEFINE_RWLOCK(mboxes_lock);
39
40/* Mailbox Sequence Bit function */
41void omap_mbox_init_seq(struct omap_mbox *mbox)
42{
43 mbox_seq_init(mbox);
44}
45EXPORT_SYMBOL(omap_mbox_init_seq);
46
47/*
48 * message sender
49 */
50static int __mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void *arg)
51{
52 int ret = 0, i = 1000;
53
54 while (mbox_fifo_full(mbox)) {
55 if (mbox->ops->type == OMAP_MBOX_TYPE2)
56 return -1;
57 if (--i == 0)
58 return -1;
59 udelay(1);
60 }
61
62 if (arg && mbox->txq->callback) {
63 ret = mbox->txq->callback(arg);
64 if (ret)
65 goto out;
66 }
67
68 mbox_seq_toggle(mbox, &msg);
69 mbox_fifo_write(mbox, msg);
70 out:
71 return ret;
72}
73
74int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void* arg)
75{
76 struct request *rq;
77 struct request_queue *q = mbox->txq->queue;
78 int ret = 0;
79
80 rq = blk_get_request(q, WRITE, GFP_ATOMIC);
81 if (unlikely(!rq)) {
82 ret = -ENOMEM;
83 goto fail;
84 }
85
86 rq->data = (void *)msg;
87 blk_insert_request(q, rq, 0, arg);
88
89 schedule_work(&mbox->txq->work);
90 fail:
91 return ret;
92}
93EXPORT_SYMBOL(omap_mbox_msg_send);
94
95static void mbox_tx_work(struct work_struct *work)
96{
97 int ret;
98 struct request *rq;
99 struct omap_mbox_queue *mq = container_of(work,
100 struct omap_mbox_queue, work);
101 struct omap_mbox *mbox = mq->queue->queuedata;
102 struct request_queue *q = mbox->txq->queue;
103
104 while (1) {
105 spin_lock(q->queue_lock);
106 rq = elv_next_request(q);
107 spin_unlock(q->queue_lock);
108
109 if (!rq)
110 break;
111
112 ret = __mbox_msg_send(mbox, (mbox_msg_t) rq->data, rq->special);
113 if (ret) {
114 enable_mbox_irq(mbox, IRQ_TX);
115 return;
116 }
117
118 spin_lock(q->queue_lock);
119 blkdev_dequeue_request(rq);
120 end_that_request_last(rq, 0);
121 spin_unlock(q->queue_lock);
122 }
123}
124
125/*
126 * Message receiver(workqueue)
127 */
128static void mbox_rx_work(struct work_struct *work)
129{
130 struct omap_mbox_queue *mq =
131 container_of(work, struct omap_mbox_queue, work);
132 struct omap_mbox *mbox = mq->queue->queuedata;
133 struct request_queue *q = mbox->rxq->queue;
134 struct request *rq;
135 mbox_msg_t msg;
136 unsigned long flags;
137
138 if (mbox->rxq->callback == NULL) {
139 sysfs_notify(&mbox->dev.kobj, NULL, "mbox");
140 return;
141 }
142
143 while (1) {
144 spin_lock_irqsave(q->queue_lock, flags);
145 rq = elv_next_request(q);
146 spin_unlock_irqrestore(q->queue_lock, flags);
147 if (!rq)
148 break;
149
150 msg = (mbox_msg_t) rq->data;
151
152 spin_lock_irqsave(q->queue_lock, flags);
153 blkdev_dequeue_request(rq);
154 end_that_request_last(rq, 0);
155 spin_unlock_irqrestore(q->queue_lock, flags);
156
157 mbox->rxq->callback((void *)msg);
158 }
159}
160
161/*
162 * Mailbox interrupt handler
163 */
164static void mbox_txq_fn(request_queue_t * q)
165{
166}
167
168static void mbox_rxq_fn(request_queue_t * q)
169{
170}
171
172static void __mbox_tx_interrupt(struct omap_mbox *mbox)
173{
174 disable_mbox_irq(mbox, IRQ_TX);
175 ack_mbox_irq(mbox, IRQ_TX);
176 schedule_work(&mbox->txq->work);
177}
178
179static void __mbox_rx_interrupt(struct omap_mbox *mbox)
180{
181 struct request *rq;
182 mbox_msg_t msg;
183 request_queue_t *q = mbox->rxq->queue;
184
185 disable_mbox_irq(mbox, IRQ_RX);
186
187 while (!mbox_fifo_empty(mbox)) {
188 rq = blk_get_request(q, WRITE, GFP_ATOMIC);
189 if (unlikely(!rq))
190 goto nomem;
191
192 msg = mbox_fifo_read(mbox);
193 rq->data = (void *)msg;
194
195 if (unlikely(mbox_seq_test(mbox, msg))) {
196 pr_info("mbox: Illegal seq bit!(%08x)\n", msg);
197 if (mbox->err_notify)
198 mbox->err_notify();
199 }
200
201 blk_insert_request(q, rq, 0, NULL);
202 if (mbox->ops->type == OMAP_MBOX_TYPE1)
203 break;
204 }
205
206 /* no more messages in the fifo. clear IRQ source. */
207 ack_mbox_irq(mbox, IRQ_RX);
208 enable_mbox_irq(mbox, IRQ_RX);
209 nomem:
210 schedule_work(&mbox->rxq->work);
211}
212
213static irqreturn_t mbox_interrupt(int irq, void *p)
214{
215 struct omap_mbox *mbox = (struct omap_mbox *)p;
216
217 if (is_mbox_irq(mbox, IRQ_TX))
218 __mbox_tx_interrupt(mbox);
219
220 if (is_mbox_irq(mbox, IRQ_RX))
221 __mbox_rx_interrupt(mbox);
222
223 return IRQ_HANDLED;
224}
225
226/*
227 * sysfs files
228 */
229static ssize_t
230omap_mbox_write(struct device *dev, struct device_attribute *attr,
231 const char * buf, size_t count)
232{
233 int ret;
234 mbox_msg_t *p = (mbox_msg_t *)buf;
235 struct omap_mbox *mbox = dev_get_drvdata(dev);
236
237 for (; count >= sizeof(mbox_msg_t); count -= sizeof(mbox_msg_t)) {
238 ret = omap_mbox_msg_send(mbox, be32_to_cpu(*p), NULL);
239 if (ret)
240 return -EAGAIN;
241 p++;
242 }
243
244 return (size_t)((char *)p - buf);
245}
246
247static ssize_t
248omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
249{
250 unsigned long flags;
251 struct request *rq;
252 mbox_msg_t *p = (mbox_msg_t *) buf;
253 struct omap_mbox *mbox = dev_get_drvdata(dev);
254 struct request_queue *q = mbox->rxq->queue;
255
256 while (1) {
257 spin_lock_irqsave(q->queue_lock, flags);
258 rq = elv_next_request(q);
259 spin_unlock_irqrestore(q->queue_lock, flags);
260
261 if (!rq)
262 break;
263
264 *p = (mbox_msg_t) rq->data;
265
266 spin_lock_irqsave(q->queue_lock, flags);
267 blkdev_dequeue_request(rq);
268 end_that_request_last(rq, 0);
269 spin_unlock_irqrestore(q->queue_lock, flags);
270
271 if (unlikely(mbox_seq_test(mbox, *p))) {
272 pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);
273 continue;
274 }
275 p++;
276 }
277
278 pr_debug("%02x %02x %02x %02x\n", buf[0], buf[1], buf[2], buf[3]);
279
280 return (size_t) ((char *)p - buf);
281}
282
283static DEVICE_ATTR(mbox, S_IRUGO | S_IWUSR, omap_mbox_read, omap_mbox_write);
284
285static ssize_t mbox_show(struct class *class, char *buf)
286{
287 return sprintf(buf, "mbox");
288}
289
290static CLASS_ATTR(mbox, S_IRUGO, mbox_show, NULL);
291
292static struct class omap_mbox_class = {
293 .name = "omap_mbox",
294};
295
296static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
297 request_fn_proc * proc,
298 void (*work) (struct work_struct *))
299{
300 request_queue_t *q;
301 struct omap_mbox_queue *mq;
302
303 mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);
304 if (!mq)
305 return NULL;
306
307 spin_lock_init(&mq->lock);
308
309 q = blk_init_queue(proc, &mq->lock);
310 if (!q)
311 goto error;
312 q->queuedata = mbox;
313 mq->queue = q;
314
315 INIT_WORK(&mq->work, work);
316
317 return mq;
318error:
319 kfree(mq);
320 return NULL;
321}
322
323static void mbox_queue_free(struct omap_mbox_queue *q)
324{
325 blk_cleanup_queue(q->queue);
326 kfree(q);
327}
328
329static int omap_mbox_init(struct omap_mbox *mbox)
330{
331 int ret;
332 struct omap_mbox_queue *mq;
333
334 if (likely(mbox->ops->startup)) {
335 ret = mbox->ops->startup(mbox);
336 if (unlikely(ret))
337 return ret;
338 }
339
340 mbox->dev.class = &omap_mbox_class;
341 strlcpy(mbox->dev.bus_id, mbox->name, KOBJ_NAME_LEN);
342 dev_set_drvdata(&mbox->dev, mbox);
343
344 ret = device_register(&mbox->dev);
345 if (unlikely(ret))
346 goto fail_device_reg;
347
348 ret = device_create_file(&mbox->dev, &dev_attr_mbox);
349 if (unlikely(ret)) {
350 printk(KERN_ERR
351 "device_create_file failed: %d\n", ret);
352 goto fail_create_mbox;
353 }
354
355 ret = request_irq(mbox->irq, mbox_interrupt, IRQF_DISABLED,
356 mbox->name, mbox);
357 if (unlikely(ret)) {
358 printk(KERN_ERR
359 "failed to register mailbox interrupt:%d\n", ret);
360 goto fail_request_irq;
361 }
362 enable_mbox_irq(mbox, IRQ_RX);
363
364 mq = mbox_queue_alloc(mbox, mbox_txq_fn, mbox_tx_work);
365 if (!mq) {
366 ret = -ENOMEM;
367 goto fail_alloc_txq;
368 }
369 mbox->txq = mq;
370
371 mq = mbox_queue_alloc(mbox, mbox_rxq_fn, mbox_rx_work);
372 if (!mq) {
373 ret = -ENOMEM;
374 goto fail_alloc_rxq;
375 }
376 mbox->rxq = mq;
377
378 return 0;
379
380 fail_alloc_rxq:
381 mbox_queue_free(mbox->txq);
382 fail_alloc_txq:
383 free_irq(mbox->irq, mbox);
384 fail_request_irq:
385 device_remove_file(&mbox->dev, &dev_attr_mbox);
386 fail_create_mbox:
387 device_unregister(&mbox->dev);
388 fail_device_reg:
389 if (unlikely(mbox->ops->shutdown))
390 mbox->ops->shutdown(mbox);
391
392 return ret;
393}
394
395static void omap_mbox_fini(struct omap_mbox *mbox)
396{
397 mbox_queue_free(mbox->txq);
398 mbox_queue_free(mbox->rxq);
399
400 free_irq(mbox->irq, mbox);
401 device_remove_file(&mbox->dev, &dev_attr_mbox);
402 class_unregister(&omap_mbox_class);
403
404 if (unlikely(mbox->ops->shutdown))
405 mbox->ops->shutdown(mbox);
406}
407
408static struct omap_mbox **find_mboxes(const char *name)
409{
410 struct omap_mbox **p;
411
412 for (p = &mboxes; *p; p = &(*p)->next) {
413 if (strcmp((*p)->name, name) == 0)
414 break;
415 }
416
417 return p;
418}
419
420struct omap_mbox *omap_mbox_get(const char *name)
421{
422 struct omap_mbox *mbox;
423 int ret;
424
425 read_lock(&mboxes_lock);
426 mbox = *(find_mboxes(name));
427 if (mbox == NULL) {
428 read_unlock(&mboxes_lock);
429 return ERR_PTR(-ENOENT);
430 }
431
432 read_unlock(&mboxes_lock);
433
434 ret = omap_mbox_init(mbox);
435 if (ret)
436 return ERR_PTR(-ENODEV);
437
438 return mbox;
439}
440EXPORT_SYMBOL(omap_mbox_get);
441
442void omap_mbox_put(struct omap_mbox *mbox)
443{
444 omap_mbox_fini(mbox);
445}
446EXPORT_SYMBOL(omap_mbox_put);
447
448int omap_mbox_register(struct omap_mbox *mbox)
449{
450 int ret = 0;
451 struct omap_mbox **tmp;
452
453 if (!mbox)
454 return -EINVAL;
455 if (mbox->next)
456 return -EBUSY;
457
458 write_lock(&mboxes_lock);
459 tmp = find_mboxes(mbox->name);
460 if (*tmp)
461 ret = -EBUSY;
462 else
463 *tmp = mbox;
464 write_unlock(&mboxes_lock);
465
466 return ret;
467}
468EXPORT_SYMBOL(omap_mbox_register);
469
470int omap_mbox_unregister(struct omap_mbox *mbox)
471{
472 struct omap_mbox **tmp;
473
474 write_lock(&mboxes_lock);
475 tmp = &mboxes;
476 while (*tmp) {
477 if (mbox == *tmp) {
478 *tmp = mbox->next;
479 mbox->next = NULL;
480 write_unlock(&mboxes_lock);
481 return 0;
482 }
483 tmp = &(*tmp)->next;
484 }
485 write_unlock(&mboxes_lock);
486
487 return -EINVAL;
488}
489EXPORT_SYMBOL(omap_mbox_unregister);
490
491static int __init omap_mbox_class_init(void)
492{
493 int ret = class_register(&omap_mbox_class);
494 if (!ret)
495 ret = class_create_file(&omap_mbox_class, &class_attr_mbox);
496
497 return ret;
498}
499
500static void __exit omap_mbox_class_exit(void)
501{
502 class_remove_file(&omap_mbox_class, &class_attr_mbox);
503 class_unregister(&omap_mbox_class);
504}
505
506subsys_initcall(omap_mbox_class_init);
507module_exit(omap_mbox_class_exit);
508
509MODULE_LICENSE("GPL");
diff --git a/arch/arm/plat-omap/mailbox.h b/arch/arm/plat-omap/mailbox.h
new file mode 100644
index 000000000000..67c6740b8ad5
--- /dev/null
+++ b/arch/arm/plat-omap/mailbox.h
@@ -0,0 +1,100 @@
1/*
2 * Mailbox internal functions
3 *
4 * Copyright (C) 2006 Nokia Corporation
5 * Written by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11
12#ifndef __ARCH_ARM_PLAT_MAILBOX_H
13#define __ARCH_ARM_PLAT_MAILBOX_H
14
15/*
16 * Mailbox sequence bit API
17 */
18#if defined(CONFIG_ARCH_OMAP1)
19# define MBOX_USE_SEQ_BIT
20#elif defined(CONFIG_ARCH_OMAP2)
21# define MBOX_USE_SEQ_BIT
22#endif
23
24#ifdef MBOX_USE_SEQ_BIT
25/* seq_rcv should be initialized with any value other than
26 * 0 and 1 << 31, to allow either value for the first
27 * message. */
28static inline void mbox_seq_init(struct omap_mbox *mbox)
29{
30 /* any value other than 0 and 1 << 31 */
31 mbox->seq_rcv = 0xffffffff;
32}
33
34static inline void mbox_seq_toggle(struct omap_mbox *mbox, mbox_msg_t * msg)
35{
36 /* add seq_snd to msg */
37 *msg = (*msg & 0x7fffffff) | mbox->seq_snd;
38 /* flip seq_snd */
39 mbox->seq_snd ^= 1 << 31;
40}
41
42static inline int mbox_seq_test(struct omap_mbox *mbox, mbox_msg_t msg)
43{
44 mbox_msg_t seq = msg & (1 << 31);
45 if (seq == mbox->seq_rcv)
46 return -1;
47 mbox->seq_rcv = seq;
48 return 0;
49}
50#else
51static inline void mbox_seq_init(struct omap_mbox *mbox)
52{
53}
54static inline void mbox_seq_toggle(struct omap_mbox *mbox, mbox_msg_t * msg)
55{
56}
57static inline int mbox_seq_test(struct omap_mbox *mbox, mbox_msg_t msg)
58{
59 return 0;
60}
61#endif
62
63/* Mailbox FIFO handle functions */
64static inline mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox)
65{
66 return mbox->ops->fifo_read(mbox);
67}
68static inline void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
69{
70 mbox->ops->fifo_write(mbox, msg);
71}
72static inline int mbox_fifo_empty(struct omap_mbox *mbox)
73{
74 return mbox->ops->fifo_empty(mbox);
75}
76static inline int mbox_fifo_full(struct omap_mbox *mbox)
77{
78 return mbox->ops->fifo_full(mbox);
79}
80
81/* Mailbox IRQ handle functions */
82static inline void enable_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
83{
84 mbox->ops->enable_irq(mbox, irq);
85}
86static inline void disable_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
87{
88 mbox->ops->disable_irq(mbox, irq);
89}
90static inline void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
91{
92 if (mbox->ops->ack_irq)
93 mbox->ops->ack_irq(mbox, irq);
94}
95static inline int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
96{
97 return mbox->ops->is_irq(mbox, irq);
98}
99
100#endif /* __ARCH_ARM_PLAT_MAILBOX_H */
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 19014b2ff4c6..bc46f33aede3 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -46,14 +46,19 @@
46 46
47#define ROUND_DOWN(value,boundary) ((value) & (~((boundary)-1))) 47#define ROUND_DOWN(value,boundary) ((value) & (~((boundary)-1)))
48 48
49static unsigned long omap_sram_start;
49static unsigned long omap_sram_base; 50static unsigned long omap_sram_base;
50static unsigned long omap_sram_size; 51static unsigned long omap_sram_size;
51static unsigned long omap_sram_ceil; 52static unsigned long omap_sram_ceil;
52 53
53unsigned long omap_fb_sram_start; 54extern unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
54unsigned long omap_fb_sram_size; 55 unsigned long sram_vstart,
56 unsigned long sram_size,
57 unsigned long pstart_avail,
58 unsigned long size_avail);
55 59
56/* Depending on the target RAMFS firewall setup, the public usable amount of 60/*
61 * Depending on the target RAMFS firewall setup, the public usable amount of
57 * SRAM varies. The default accessable size for all device types is 2k. A GP 62 * SRAM varies. The default accessable size for all device types is 2k. A GP
58 * device allows ARM11 but not other initators for full size. This 63 * device allows ARM11 but not other initators for full size. This
59 * functionality seems ok until some nice security API happens. 64 * functionality seems ok until some nice security API happens.
@@ -77,32 +82,6 @@ static int is_sram_locked(void)
77 return 1; /* assume locked with no PPA or security driver */ 82 return 1; /* assume locked with no PPA or security driver */
78} 83}
79 84
80void get_fb_sram_conf(unsigned long start_avail, unsigned size_avail,
81 unsigned long *start, unsigned long *size)
82{
83 const struct omap_fbmem_config *fbmem_conf;
84
85 fbmem_conf = omap_get_config(OMAP_TAG_FBMEM, struct omap_fbmem_config);
86 if (fbmem_conf != NULL) {
87 *start = fbmem_conf->fb_sram_start;
88 *size = fbmem_conf->fb_sram_size;
89 } else {
90 *size = 0;
91 *start = 0;
92 }
93
94 if (*size && (
95 *start < start_avail ||
96 *start + *size > start_avail + size_avail)) {
97 printk(KERN_ERR "invalid FB SRAM configuration\n");
98 *start = start_avail;
99 *size = size_avail;
100 }
101
102 if (*size)
103 pr_info("Reserving %lu bytes SRAM for frame buffer\n", *size);
104}
105
106/* 85/*
107 * The amount of SRAM depends on the core type. 86 * The amount of SRAM depends on the core type.
108 * Note that we cannot try to test for SRAM here because writes 87 * Note that we cannot try to test for SRAM here because writes
@@ -111,16 +90,16 @@ void get_fb_sram_conf(unsigned long start_avail, unsigned size_avail,
111 */ 90 */
112void __init omap_detect_sram(void) 91void __init omap_detect_sram(void)
113{ 92{
114 unsigned long sram_start; 93 unsigned long reserved;
115 94
116 if (cpu_is_omap24xx()) { 95 if (cpu_is_omap24xx()) {
117 if (is_sram_locked()) { 96 if (is_sram_locked()) {
118 omap_sram_base = OMAP2_SRAM_PUB_VA; 97 omap_sram_base = OMAP2_SRAM_PUB_VA;
119 sram_start = OMAP2_SRAM_PUB_PA; 98 omap_sram_start = OMAP2_SRAM_PUB_PA;
120 omap_sram_size = 0x800; /* 2K */ 99 omap_sram_size = 0x800; /* 2K */
121 } else { 100 } else {
122 omap_sram_base = OMAP2_SRAM_VA; 101 omap_sram_base = OMAP2_SRAM_VA;
123 sram_start = OMAP2_SRAM_PA; 102 omap_sram_start = OMAP2_SRAM_PA;
124 if (cpu_is_omap242x()) 103 if (cpu_is_omap242x())
125 omap_sram_size = 0xa0000; /* 640K */ 104 omap_sram_size = 0xa0000; /* 640K */
126 else if (cpu_is_omap243x()) 105 else if (cpu_is_omap243x())
@@ -128,7 +107,7 @@ void __init omap_detect_sram(void)
128 } 107 }
129 } else { 108 } else {
130 omap_sram_base = OMAP1_SRAM_VA; 109 omap_sram_base = OMAP1_SRAM_VA;
131 sram_start = OMAP1_SRAM_PA; 110 omap_sram_start = OMAP1_SRAM_PA;
132 111
133 if (cpu_is_omap730()) 112 if (cpu_is_omap730())
134 omap_sram_size = 0x32000; /* 200K */ 113 omap_sram_size = 0x32000; /* 200K */
@@ -144,12 +123,11 @@ void __init omap_detect_sram(void)
144 omap_sram_size = 0x4000; 123 omap_sram_size = 0x4000;
145 } 124 }
146 } 125 }
147 get_fb_sram_conf(sram_start + SRAM_BOOTLOADER_SZ, 126 reserved = omapfb_reserve_sram(omap_sram_start, omap_sram_base,
148 omap_sram_size - SRAM_BOOTLOADER_SZ, 127 omap_sram_size,
149 &omap_fb_sram_start, &omap_fb_sram_size); 128 omap_sram_start + SRAM_BOOTLOADER_SZ,
150 if (omap_fb_sram_size) 129 omap_sram_size - SRAM_BOOTLOADER_SZ);
151 omap_sram_size -= sram_start + omap_sram_size - 130 omap_sram_size -= reserved;
152 omap_fb_sram_start;
153 omap_sram_ceil = omap_sram_base + omap_sram_size; 131 omap_sram_ceil = omap_sram_base + omap_sram_size;
154} 132}
155 133
diff --git a/arch/arm/plat-omap/usb.c b/arch/arm/plat-omap/usb.c
index 7e8096809be2..25489aafb113 100644
--- a/arch/arm/plat-omap/usb.c
+++ b/arch/arm/plat-omap/usb.c
@@ -37,9 +37,27 @@
37#include <asm/arch/usb.h> 37#include <asm/arch/usb.h>
38#include <asm/arch/board.h> 38#include <asm/arch/board.h>
39 39
40#ifdef CONFIG_ARCH_OMAP1
41
42#define INT_USB_IRQ_GEN IH2_BASE + 20
43#define INT_USB_IRQ_NISO IH2_BASE + 30
44#define INT_USB_IRQ_ISO IH2_BASE + 29
45#define INT_USB_IRQ_HGEN INT_USB_HHC_1
46#define INT_USB_IRQ_OTG IH2_BASE + 8
47
48#else
49
50#define INT_USB_IRQ_GEN INT_24XX_USB_IRQ_GEN
51#define INT_USB_IRQ_NISO INT_24XX_USB_IRQ_NISO
52#define INT_USB_IRQ_ISO INT_24XX_USB_IRQ_ISO
53#define INT_USB_IRQ_HGEN INT_24XX_USB_IRQ_HGEN
54#define INT_USB_IRQ_OTG INT_24XX_USB_IRQ_OTG
55
56#endif
57
58
40/* These routines should handle the standard chip-specific modes 59/* These routines should handle the standard chip-specific modes
41 * for usb0/1/2 ports, covering basic mux and transceiver setup. 60 * for usb0/1/2 ports, covering basic mux and transceiver setup.
42 * Call omap_usb_init() once, from INIT_MACHINE().
43 * 61 *
44 * Some board-*.c files will need to set up additional mux options, 62 * Some board-*.c files will need to set up additional mux options,
45 * like for suspend handling, vbus sensing, GPIOs, and the D+ pullup. 63 * like for suspend handling, vbus sensing, GPIOs, and the D+ pullup.
@@ -96,19 +114,26 @@ static u32 __init omap_usb0_init(unsigned nwires, unsigned is_device)
96{ 114{
97 u32 syscon1 = 0; 115 u32 syscon1 = 0;
98 116
117 if (cpu_is_omap24xx())
118 CONTROL_DEVCONF_REG &= ~USBT0WRMODEI(USB_BIDIR_TLL);
119
99 if (nwires == 0) { 120 if (nwires == 0) {
100 if (!cpu_is_omap15xx()) { 121 if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
101 /* pulldown D+/D- */ 122 /* pulldown D+/D- */
102 USB_TRANSCEIVER_CTRL_REG &= ~(3 << 1); 123 USB_TRANSCEIVER_CTRL_REG &= ~(3 << 1);
103 } 124 }
104 return 0; 125 return 0;
105 } 126 }
106 127
107 if (is_device) 128 if (is_device) {
108 omap_cfg_reg(W4_USB_PUEN); 129 if (cpu_is_omap24xx())
130 omap_cfg_reg(J20_24XX_USB0_PUEN);
131 else
132 omap_cfg_reg(W4_USB_PUEN);
133 }
109 134
110 /* internal transceiver */ 135 /* internal transceiver (unavailable on 17xx, 24xx) */
111 if (nwires == 2) { 136 if (!cpu_class_is_omap2() && nwires == 2) {
112 // omap_cfg_reg(P9_USB_DP); 137 // omap_cfg_reg(P9_USB_DP);
113 // omap_cfg_reg(R8_USB_DM); 138 // omap_cfg_reg(R8_USB_DM);
114 139
@@ -136,29 +161,50 @@ static u32 __init omap_usb0_init(unsigned nwires, unsigned is_device)
136 return 0; 161 return 0;
137 } 162 }
138 163
139 omap_cfg_reg(V6_USB0_TXD); 164 if (cpu_is_omap24xx()) {
140 omap_cfg_reg(W9_USB0_TXEN); 165 omap_cfg_reg(K18_24XX_USB0_DAT);
141 omap_cfg_reg(W5_USB0_SE0); 166 omap_cfg_reg(K19_24XX_USB0_TXEN);
167 omap_cfg_reg(J14_24XX_USB0_SE0);
168 if (nwires != 3)
169 omap_cfg_reg(J18_24XX_USB0_RCV);
170 } else {
171 omap_cfg_reg(V6_USB0_TXD);
172 omap_cfg_reg(W9_USB0_TXEN);
173 omap_cfg_reg(W5_USB0_SE0);
174 if (nwires != 3)
175 omap_cfg_reg(Y5_USB0_RCV);
176 }
142 177
143 /* NOTE: SPEED and SUSP aren't configured here */ 178 /* NOTE: SPEED and SUSP aren't configured here. OTG hosts
179 * may be able to use I2C requests to set those bits along
180 * with VBUS switching and overcurrent detction.
181 */
144 182
145 if (nwires != 3) 183 if (cpu_class_is_omap1() && nwires != 6)
146 omap_cfg_reg(Y5_USB0_RCV);
147 if (nwires != 6)
148 USB_TRANSCEIVER_CTRL_REG &= ~CONF_USB2_UNI_R; 184 USB_TRANSCEIVER_CTRL_REG &= ~CONF_USB2_UNI_R;
149 185
150 switch (nwires) { 186 switch (nwires) {
151 case 3: 187 case 3:
152 syscon1 = 2; 188 syscon1 = 2;
189 if (cpu_is_omap24xx())
190 CONTROL_DEVCONF_REG |= USBT0WRMODEI(USB_BIDIR);
153 break; 191 break;
154 case 4: 192 case 4:
155 syscon1 = 1; 193 syscon1 = 1;
194 if (cpu_is_omap24xx())
195 CONTROL_DEVCONF_REG |= USBT0WRMODEI(USB_BIDIR);
156 break; 196 break;
157 case 6: 197 case 6:
158 syscon1 = 3; 198 syscon1 = 3;
159 omap_cfg_reg(AA9_USB0_VP); 199 if (cpu_is_omap24xx()) {
160 omap_cfg_reg(R9_USB0_VM); 200 omap_cfg_reg(J19_24XX_USB0_VP);
161 USB_TRANSCEIVER_CTRL_REG |= CONF_USB2_UNI_R; 201 omap_cfg_reg(K20_24XX_USB0_VM);
202 CONTROL_DEVCONF_REG |= USBT0WRMODEI(USB_UNIDIR);
203 } else {
204 omap_cfg_reg(AA9_USB0_VP);
205 omap_cfg_reg(R9_USB0_VM);
206 USB_TRANSCEIVER_CTRL_REG |= CONF_USB2_UNI_R;
207 }
162 break; 208 break;
163 default: 209 default:
164 printk(KERN_ERR "illegal usb%d %d-wire transceiver\n", 210 printk(KERN_ERR "illegal usb%d %d-wire transceiver\n",
@@ -171,14 +217,22 @@ static u32 __init omap_usb1_init(unsigned nwires)
171{ 217{
172 u32 syscon1 = 0; 218 u32 syscon1 = 0;
173 219
174 if (nwires != 6 && !cpu_is_omap15xx()) 220 if (cpu_class_is_omap1() && !cpu_is_omap15xx() && nwires != 6)
175 USB_TRANSCEIVER_CTRL_REG &= ~CONF_USB1_UNI_R; 221 USB_TRANSCEIVER_CTRL_REG &= ~CONF_USB1_UNI_R;
222 if (cpu_is_omap24xx())
223 CONTROL_DEVCONF_REG &= ~USBT1WRMODEI(USB_BIDIR_TLL);
224
176 if (nwires == 0) 225 if (nwires == 0)
177 return 0; 226 return 0;
178 227
179 /* external transceiver */ 228 /* external transceiver */
180 omap_cfg_reg(USB1_TXD); 229 if (cpu_class_is_omap1()) {
181 omap_cfg_reg(USB1_TXEN); 230 omap_cfg_reg(USB1_TXD);
231 omap_cfg_reg(USB1_TXEN);
232 if (nwires != 3)
233 omap_cfg_reg(USB1_RCV);
234 }
235
182 if (cpu_is_omap15xx()) { 236 if (cpu_is_omap15xx()) {
183 omap_cfg_reg(USB1_SEO); 237 omap_cfg_reg(USB1_SEO);
184 omap_cfg_reg(USB1_SPEED); 238 omap_cfg_reg(USB1_SPEED);
@@ -190,20 +244,38 @@ static u32 __init omap_usb1_init(unsigned nwires)
190 } else if (cpu_is_omap1710()) { 244 } else if (cpu_is_omap1710()) {
191 omap_cfg_reg(R13_1710_USB1_SE0); 245 omap_cfg_reg(R13_1710_USB1_SE0);
192 // SUSP 246 // SUSP
247 } else if (cpu_is_omap24xx()) {
248 /* NOTE: board-specific code must set up pin muxing for usb1,
249 * since each signal could come out on either of two balls.
250 */
193 } else { 251 } else {
194 pr_debug("usb unrecognized\n"); 252 pr_debug("usb%d cpu unrecognized\n", 1);
253 return 0;
195 } 254 }
196 if (nwires != 3)
197 omap_cfg_reg(USB1_RCV);
198 255
199 switch (nwires) { 256 switch (nwires) {
257 case 2:
258 if (!cpu_is_omap24xx())
259 goto bad;
260 /* NOTE: board-specific code must override this setting if
261 * this TLL link is not using DP/DM
262 */
263 syscon1 = 1;
264 CONTROL_DEVCONF_REG |= USBT1WRMODEI(USB_BIDIR_TLL);
265 break;
200 case 3: 266 case 3:
201 syscon1 = 2; 267 syscon1 = 2;
268 if (cpu_is_omap24xx())
269 CONTROL_DEVCONF_REG |= USBT1WRMODEI(USB_BIDIR);
202 break; 270 break;
203 case 4: 271 case 4:
204 syscon1 = 1; 272 syscon1 = 1;
273 if (cpu_is_omap24xx())
274 CONTROL_DEVCONF_REG |= USBT1WRMODEI(USB_BIDIR);
205 break; 275 break;
206 case 6: 276 case 6:
277 if (cpu_is_omap24xx())
278 goto bad;
207 syscon1 = 3; 279 syscon1 = 3;
208 omap_cfg_reg(USB1_VP); 280 omap_cfg_reg(USB1_VP);
209 omap_cfg_reg(USB1_VM); 281 omap_cfg_reg(USB1_VM);
@@ -211,6 +283,7 @@ static u32 __init omap_usb1_init(unsigned nwires)
211 USB_TRANSCEIVER_CTRL_REG |= CONF_USB1_UNI_R; 283 USB_TRANSCEIVER_CTRL_REG |= CONF_USB1_UNI_R;
212 break; 284 break;
213 default: 285 default:
286bad:
214 printk(KERN_ERR "illegal usb%d %d-wire transceiver\n", 287 printk(KERN_ERR "illegal usb%d %d-wire transceiver\n",
215 1, nwires); 288 1, nwires);
216 } 289 }
@@ -221,10 +294,17 @@ static u32 __init omap_usb2_init(unsigned nwires, unsigned alt_pingroup)
221{ 294{
222 u32 syscon1 = 0; 295 u32 syscon1 = 0;
223 296
224 /* NOTE erratum: must leave USB2_UNI_R set if usb0 in use */ 297 if (cpu_is_omap24xx()) {
298 CONTROL_DEVCONF_REG &= ~(USBT2WRMODEI(USB_BIDIR_TLL)
299 | USBT2TLL5PI);
300 alt_pingroup = 0;
301 }
302
303 /* NOTE omap1 erratum: must leave USB2_UNI_R set if usb0 in use */
225 if (alt_pingroup || nwires == 0) 304 if (alt_pingroup || nwires == 0)
226 return 0; 305 return 0;
227 if (nwires != 6 && !cpu_is_omap15xx()) 306
307 if (cpu_class_is_omap1() && !cpu_is_omap15xx() && nwires != 6)
228 USB_TRANSCEIVER_CTRL_REG &= ~CONF_USB2_UNI_R; 308 USB_TRANSCEIVER_CTRL_REG &= ~CONF_USB2_UNI_R;
229 309
230 /* external transceiver */ 310 /* external transceiver */
@@ -242,19 +322,54 @@ static u32 __init omap_usb2_init(unsigned nwires, unsigned alt_pingroup)
242 if (nwires != 3) 322 if (nwires != 3)
243 omap_cfg_reg(Y5_USB2_RCV); 323 omap_cfg_reg(Y5_USB2_RCV);
244 // FIXME omap_cfg_reg(USB2_SPEED); 324 // FIXME omap_cfg_reg(USB2_SPEED);
325 } else if (cpu_is_omap24xx()) {
326 omap_cfg_reg(Y11_24XX_USB2_DAT);
327 omap_cfg_reg(AA10_24XX_USB2_SE0);
328 if (nwires > 2)
329 omap_cfg_reg(AA12_24XX_USB2_TXEN);
330 if (nwires > 3)
331 omap_cfg_reg(AA6_24XX_USB2_RCV);
245 } else { 332 } else {
246 pr_debug("usb unrecognized\n"); 333 pr_debug("usb%d cpu unrecognized\n", 1);
334 return 0;
247 } 335 }
248 // omap_cfg_reg(USB2_SUSP); 336 // if (cpu_class_is_omap1()) omap_cfg_reg(USB2_SUSP);
249 337
250 switch (nwires) { 338 switch (nwires) {
339 case 2:
340 if (!cpu_is_omap24xx())
341 goto bad;
342 /* NOTE: board-specific code must override this setting if
343 * this TLL link is not using DP/DM
344 */
345 syscon1 = 1;
346 CONTROL_DEVCONF_REG |= USBT2WRMODEI(USB_BIDIR_TLL);
347 break;
251 case 3: 348 case 3:
252 syscon1 = 2; 349 syscon1 = 2;
350 if (cpu_is_omap24xx())
351 CONTROL_DEVCONF_REG |= USBT2WRMODEI(USB_BIDIR);
253 break; 352 break;
254 case 4: 353 case 4:
255 syscon1 = 1; 354 syscon1 = 1;
355 if (cpu_is_omap24xx())
356 CONTROL_DEVCONF_REG |= USBT2WRMODEI(USB_BIDIR);
357 break;
358 case 5:
359 if (!cpu_is_omap24xx())
360 goto bad;
361 omap_cfg_reg(AA4_24XX_USB2_TLLSE0);
362 /* NOTE: board-specific code must override this setting if
363 * this TLL link is not using DP/DM. Something must also
364 * set up OTG_SYSCON2.HMC_TLL{ATTACH,SPEED}
365 */
366 syscon1 = 3;
367 CONTROL_DEVCONF_REG |= USBT2WRMODEI(USB_UNIDIR_TLL)
368 | USBT2TLL5PI;
256 break; 369 break;
257 case 6: 370 case 6:
371 if (cpu_is_omap24xx())
372 goto bad;
258 syscon1 = 3; 373 syscon1 = 3;
259 if (cpu_is_omap15xx()) { 374 if (cpu_is_omap15xx()) {
260 omap_cfg_reg(USB2_VP); 375 omap_cfg_reg(USB2_VP);
@@ -266,6 +381,7 @@ static u32 __init omap_usb2_init(unsigned nwires, unsigned alt_pingroup)
266 } 381 }
267 break; 382 break;
268 default: 383 default:
384bad:
269 printk(KERN_ERR "illegal usb%d %d-wire transceiver\n", 385 printk(KERN_ERR "illegal usb%d %d-wire transceiver\n",
270 2, nwires); 386 2, nwires);
271 } 387 }
@@ -294,13 +410,13 @@ static struct resource udc_resources[] = {
294 .end = UDC_BASE + 0xff, 410 .end = UDC_BASE + 0xff,
295 .flags = IORESOURCE_MEM, 411 .flags = IORESOURCE_MEM,
296 }, { /* general IRQ */ 412 }, { /* general IRQ */
297 .start = IH2_BASE + 20, 413 .start = INT_USB_IRQ_GEN,
298 .flags = IORESOURCE_IRQ, 414 .flags = IORESOURCE_IRQ,
299 }, { /* PIO IRQ */ 415 }, { /* PIO IRQ */
300 .start = IH2_BASE + 30, 416 .start = INT_USB_IRQ_NISO,
301 .flags = IORESOURCE_IRQ, 417 .flags = IORESOURCE_IRQ,
302 }, { /* SOF IRQ */ 418 }, { /* SOF IRQ */
303 .start = IH2_BASE + 29, 419 .start = INT_USB_IRQ_ISO,
304 .flags = IORESOURCE_IRQ, 420 .flags = IORESOURCE_IRQ,
305 }, 421 },
306}; 422};
@@ -329,11 +445,11 @@ static u64 ohci_dmamask = ~(u32)0;
329static struct resource ohci_resources[] = { 445static struct resource ohci_resources[] = {
330 { 446 {
331 .start = OMAP_OHCI_BASE, 447 .start = OMAP_OHCI_BASE,
332 .end = OMAP_OHCI_BASE + 4096 - 1, 448 .end = OMAP_OHCI_BASE + 0xff,
333 .flags = IORESOURCE_MEM, 449 .flags = IORESOURCE_MEM,
334 }, 450 },
335 { 451 {
336 .start = INT_USB_HHC_1, 452 .start = INT_USB_IRQ_HGEN,
337 .flags = IORESOURCE_IRQ, 453 .flags = IORESOURCE_IRQ,
338 }, 454 },
339}; 455};
@@ -361,7 +477,7 @@ static struct resource otg_resources[] = {
361 .end = OTG_BASE + 0xff, 477 .end = OTG_BASE + 0xff,
362 .flags = IORESOURCE_MEM, 478 .flags = IORESOURCE_MEM,
363 }, { 479 }, {
364 .start = IH2_BASE + 8, 480 .start = INT_USB_IRQ_OTG,
365 .flags = IORESOURCE_IRQ, 481 .flags = IORESOURCE_IRQ,
366 }, 482 },
367}; 483};
@@ -385,7 +501,7 @@ static struct platform_device otg_device = {
385 501
386 502
387// FIXME correct answer depends on hmc_mode, 503// FIXME correct answer depends on hmc_mode,
388// as does any nonzero value for config->otg port number 504// as does (on omap1) any nonzero value for config->otg port number
389#ifdef CONFIG_USB_GADGET_OMAP 505#ifdef CONFIG_USB_GADGET_OMAP
390#define is_usb0_device(config) 1 506#define is_usb0_device(config) 1
391#else 507#else
@@ -426,12 +542,13 @@ omap_otg_init(struct omap_usb_config *config)
426 if (config->otg) 542 if (config->otg)
427 syscon |= OTG_EN; 543 syscon |= OTG_EN;
428#endif 544#endif
429 pr_debug("USB_TRANSCEIVER_CTRL_REG = %03x\n", USB_TRANSCEIVER_CTRL_REG); 545 if (cpu_class_is_omap1())
546 pr_debug("USB_TRANSCEIVER_CTRL_REG = %03x\n", USB_TRANSCEIVER_CTRL_REG);
430 pr_debug("OTG_SYSCON_2_REG = %08x\n", syscon); 547 pr_debug("OTG_SYSCON_2_REG = %08x\n", syscon);
431 OTG_SYSCON_2_REG = syscon; 548 OTG_SYSCON_2_REG = syscon;
432 549
433 printk("USB: hmc %d", config->hmc_mode); 550 printk("USB: hmc %d", config->hmc_mode);
434 if (alt_pingroup) 551 if (!alt_pingroup)
435 printk(", usb2 alt %d wires", config->pins[2]); 552 printk(", usb2 alt %d wires", config->pins[2]);
436 else if (config->pins[0]) 553 else if (config->pins[0])
437 printk(", usb0 %d wires%s", config->pins[0], 554 printk(", usb0 %d wires%s", config->pins[0],
@@ -444,10 +561,12 @@ omap_otg_init(struct omap_usb_config *config)
444 printk(", Mini-AB on usb%d", config->otg - 1); 561 printk(", Mini-AB on usb%d", config->otg - 1);
445 printk("\n"); 562 printk("\n");
446 563
447 /* leave USB clocks/controllers off until needed */ 564 if (cpu_class_is_omap1()) {
448 ULPD_SOFT_REQ_REG &= ~SOFT_USB_CLK_REQ; 565 /* leave USB clocks/controllers off until needed */
449 ULPD_CLOCK_CTRL_REG &= ~USB_MCLK_EN; 566 ULPD_SOFT_REQ_REG &= ~SOFT_USB_CLK_REQ;
450 ULPD_CLOCK_CTRL_REG |= DIS_USB_PVCI_CLK; 567 ULPD_CLOCK_CTRL_REG &= ~USB_MCLK_EN;
568 ULPD_CLOCK_CTRL_REG |= DIS_USB_PVCI_CLK;
569 }
451 syscon = OTG_SYSCON_1_REG; 570 syscon = OTG_SYSCON_1_REG;
452 syscon |= HST_IDLE_EN|DEV_IDLE_EN|OTG_IDLE_EN; 571 syscon |= HST_IDLE_EN|DEV_IDLE_EN|OTG_IDLE_EN;
453 572
@@ -585,7 +704,7 @@ omap_usb_init(void)
585 } 704 }
586 platform_data = *config; 705 platform_data = *config;
587 706
588 if (cpu_is_omap730() || cpu_is_omap16xx()) 707 if (cpu_is_omap730() || cpu_is_omap16xx() || cpu_is_omap24xx())
589 omap_otg_init(&platform_data); 708 omap_otg_init(&platform_data);
590 else if (cpu_is_omap15xx()) 709 else if (cpu_is_omap15xx())
591 omap_1510_usb_init(&platform_data); 710 omap_1510_usb_init(&platform_data);
diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/plat-s3c24xx/sleep.S
index 435349dc3243..7b7ae790b00d 100644
--- a/arch/arm/plat-s3c24xx/sleep.S
+++ b/arch/arm/plat-s3c24xx/sleep.S
@@ -1,4 +1,4 @@
1/* linux/arch/arm/mach-s3c2410/sleep.S 1/* linux/arch/arm/plat-s3c24xx/sleep.S
2 * 2 *
3 * Copyright (c) 2004 Simtec Electronics 3 * Copyright (c) 2004 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk> 4 * Ben Dooks <ben@simtec.co.uk>
diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
index 6115fc1f0cfa..dc6bc01f232c 100644
--- a/arch/avr32/Makefile
+++ b/arch/avr32/Makefile
@@ -16,7 +16,7 @@ AFLAGS += -mrelax -mno-pic
16CFLAGS_MODULE += -mno-relax 16CFLAGS_MODULE += -mno-relax
17LDFLAGS_vmlinux += --relax 17LDFLAGS_vmlinux += --relax
18 18
19cpuflags-$(CONFIG_CPU_AP7000) += -mcpu=ap7000 19cpuflags-$(CONFIG_CPU_AT32AP7000) += -mcpu=ap7000
20 20
21CFLAGS += $(cpuflags-y) 21CFLAGS += $(cpuflags-y)
22AFLAGS += $(cpuflags-y) 22AFLAGS += $(cpuflags-y)
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index 4e4181ed1c6d..13f988402613 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -330,13 +330,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
330{ 330{
331 struct pt_regs *childregs; 331 struct pt_regs *childregs;
332 332
333 childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long)p->thread_info)) - 1; 333 childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long)task_stack_page(p))) - 1;
334 *childregs = *regs; 334 *childregs = *regs;
335 335
336 if (user_mode(regs)) 336 if (user_mode(regs))
337 childregs->sp = usp; 337 childregs->sp = usp;
338 else 338 else
339 childregs->sp = (unsigned long)p->thread_info + THREAD_SIZE; 339 childregs->sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
340 340
341 childregs->r12 = 0; /* Set return value for child */ 341 childregs->r12 = 0; /* Set return value for child */
342 342
@@ -403,7 +403,7 @@ unsigned long get_wchan(struct task_struct *p)
403 if (!p || p == current || p->state == TASK_RUNNING) 403 if (!p || p == current || p->state == TASK_RUNNING)
404 return 0; 404 return 0;
405 405
406 stack_page = (unsigned long)p->thread_info; 406 stack_page = (unsigned long)task_stack_page(p);
407 BUG_ON(!stack_page); 407 BUG_ON(!stack_page);
408 408
409 /* 409 /*
diff --git a/arch/avr32/kernel/ptrace.c b/arch/avr32/kernel/ptrace.c
index 8ac74dddbbde..3c36c2d16148 100644
--- a/arch/avr32/kernel/ptrace.c
+++ b/arch/avr32/kernel/ptrace.c
@@ -24,7 +24,7 @@
24 24
25static struct pt_regs *get_user_regs(struct task_struct *tsk) 25static struct pt_regs *get_user_regs(struct task_struct *tsk)
26{ 26{
27 return (struct pt_regs *)((unsigned long) tsk->thread_info + 27 return (struct pt_regs *)((unsigned long)task_stack_page(tsk) +
28 THREAD_SIZE - sizeof(struct pt_regs)); 28 THREAD_SIZE - sizeof(struct pt_regs));
29} 29}
30 30
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S
index 7c279586fbba..07f6a6fa340d 100644
--- a/arch/avr32/kernel/syscall_table.S
+++ b/arch/avr32/kernel/syscall_table.S
@@ -291,4 +291,5 @@ sys_call_table:
291 .long sys_shmget /* 275 */ 291 .long sys_shmget /* 275 */
292 .long sys_shmdt 292 .long sys_shmdt
293 .long sys_shmctl 293 .long sys_shmctl
294 .long sys_utimensat
294 .long sys_ni_syscall /* r8 is saturated at nr_syscalls */ 295 .long sys_ni_syscall /* r8 is saturated at nr_syscalls */
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c
index 4de9edf96ed2..86d107511dd4 100644
--- a/arch/avr32/kernel/traps.c
+++ b/arch/avr32/kernel/traps.c
@@ -123,7 +123,7 @@ asmlinkage void do_address_exception(unsigned long ecr, struct pt_regs *regs)
123 123
124/* This way of handling undefined instructions is stolen from ARM */ 124/* This way of handling undefined instructions is stolen from ARM */
125static LIST_HEAD(undef_hook); 125static LIST_HEAD(undef_hook);
126static spinlock_t undef_lock = SPIN_LOCK_UNLOCKED; 126static DEFINE_SPINLOCK(undef_lock);
127 127
128void register_undef_hook(struct undef_hook *hook) 128void register_undef_hook(struct undef_hook *hook)
129{ 129{
diff --git a/arch/avr32/kernel/vmlinux.lds.c b/arch/avr32/kernel/vmlinux.lds.c
index 7ad20cfb48a8..e7f72c995a32 100644
--- a/arch/avr32/kernel/vmlinux.lds.c
+++ b/arch/avr32/kernel/vmlinux.lds.c
@@ -35,7 +35,7 @@ SECTIONS
35 _einittext = .; 35 _einittext = .;
36 . = ALIGN(4); 36 . = ALIGN(4);
37 __tagtable_begin = .; 37 __tagtable_begin = .;
38 *(.taglist) 38 *(.taglist.init)
39 __tagtable_end = .; 39 __tagtable_end = .;
40 *(.init.data) 40 *(.init.data)
41 . = ALIGN(16); 41 . = ALIGN(16);
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c
index 00c435452d7e..0f8c89c9f832 100644
--- a/arch/avr32/mach-at32ap/clock.c
+++ b/arch/avr32/mach-at32ap/clock.c
@@ -18,7 +18,7 @@
18 18
19#include "clock.h" 19#include "clock.h"
20 20
21static spinlock_t clk_lock = SPIN_LOCK_UNLOCKED; 21static DEFINE_SPINLOCK(clk_lock);
22 22
23struct clk *clk_get(struct device *dev, const char *id) 23struct clk *clk_get(struct device *dev, const char *id)
24{ 24{
diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c
index b68d669f823d..099212d4567c 100644
--- a/arch/avr32/mm/dma-coherent.c
+++ b/arch/avr32/mm/dma-coherent.c
@@ -112,16 +112,21 @@ void dma_free_coherent(struct device *dev, size_t size,
112} 112}
113EXPORT_SYMBOL(dma_free_coherent); 113EXPORT_SYMBOL(dma_free_coherent);
114 114
115#if 0
116void *dma_alloc_writecombine(struct device *dev, size_t size, 115void *dma_alloc_writecombine(struct device *dev, size_t size,
117 dma_addr_t *handle, gfp_t gfp) 116 dma_addr_t *handle, gfp_t gfp)
118{ 117{
119 struct page *page; 118 struct page *page;
119 dma_addr_t phys;
120 120
121 page = __dma_alloc(dev, size, handle, gfp); 121 page = __dma_alloc(dev, size, handle, gfp);
122 if (!page)
123 return NULL;
124
125 phys = page_to_phys(page);
126 *handle = phys;
122 127
123 /* Now, map the page into P3 with write-combining turned on */ 128 /* Now, map the page into P3 with write-combining turned on */
124 return __ioremap(page_to_phys(page), size, _PAGE_BUFFER); 129 return __ioremap(phys, size, _PAGE_BUFFER);
125} 130}
126EXPORT_SYMBOL(dma_alloc_writecombine); 131EXPORT_SYMBOL(dma_alloc_writecombine);
127 132
@@ -132,8 +137,7 @@ void dma_free_writecombine(struct device *dev, size_t size,
132 137
133 iounmap(cpu_addr); 138 iounmap(cpu_addr);
134 139
135 page = bus_to_page(handle); 140 page = phys_to_page(handle);
136 __dma_free(dev, size, page, handle); 141 __dma_free(dev, size, page, handle);
137} 142}
138EXPORT_SYMBOL(dma_free_writecombine); 143EXPORT_SYMBOL(dma_free_writecombine);
139#endif
diff --git a/arch/blackfin/kernel/asm-offsets.c b/arch/blackfin/kernel/asm-offsets.c
index 41d9a9f89700..e455f4504509 100644
--- a/arch/blackfin/kernel/asm-offsets.c
+++ b/arch/blackfin/kernel/asm-offsets.c
@@ -46,7 +46,7 @@ int main(void)
46 DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); 46 DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
47 DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); 47 DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
48 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); 48 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
49 DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info)); 49 DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
50 DEFINE(TASK_MM, offsetof(struct task_struct, mm)); 50 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
51 DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); 51 DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
52 DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending)); 52 DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index d7c8e514cb92..e718bb4a1ef0 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -73,7 +73,7 @@
73static inline struct pt_regs *get_user_regs(struct task_struct *task) 73static inline struct pt_regs *get_user_regs(struct task_struct *task)
74{ 74{
75 return (struct pt_regs *) 75 return (struct pt_regs *)
76 ((unsigned long)task->thread_info + 76 ((unsigned long)task_stack_page(task) +
77 (THREAD_SIZE - sizeof(struct pt_regs))); 77 (THREAD_SIZE - sizeof(struct pt_regs)));
78} 78}
79 79
@@ -99,7 +99,7 @@ static inline long get_reg(struct task_struct *task, int regno)
99 unsigned char *reg_ptr; 99 unsigned char *reg_ptr;
100 100
101 struct pt_regs *regs = 101 struct pt_regs *regs =
102 (struct pt_regs *)((unsigned long)task->thread_info + 102 (struct pt_regs *)((unsigned long)task_stack_page(task) +
103 (THREAD_SIZE - sizeof(struct pt_regs))); 103 (THREAD_SIZE - sizeof(struct pt_regs)));
104 reg_ptr = (char *)regs; 104 reg_ptr = (char *)regs;
105 105
@@ -125,7 +125,7 @@ put_reg(struct task_struct *task, int regno, unsigned long data)
125 char * reg_ptr; 125 char * reg_ptr;
126 126
127 struct pt_regs *regs = 127 struct pt_regs *regs =
128 (struct pt_regs *)((unsigned long)task->thread_info + 128 (struct pt_regs *)((unsigned long)task_stack_page(task) +
129 (THREAD_SIZE - sizeof(struct pt_regs))); 129 (THREAD_SIZE - sizeof(struct pt_regs)));
130 reg_ptr = (char *)regs; 130 reg_ptr = (char *)regs;
131 131
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index f64624fc4504..1d859c16931e 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -603,7 +603,7 @@ config ETRAX_CARDBUS
603 select HOTPLUG 603 select HOTPLUG
604 select PCCARD_NONSTATIC 604 select PCCARD_NONSTATIC
605 help 605 help
606 Enabled the ETRAX Carbus driver. 606 Enabled the ETRAX Cardbus driver.
607 607
608config PCI 608config PCI
609 bool 609 bool
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index eed694312a79..114738a45582 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -45,15 +45,15 @@ config TIME_LOW_RES
45 bool 45 bool
46 default y 46 default y
47 47
48config ARCH_HAS_ILOG2_U32 48config QUICKLIST
49 bool 49 bool
50 default y 50 default y
51 51
52config ARCH_HAS_ILOG2_U64 52config ARCH_HAS_ILOG2_U32
53 bool 53 bool
54 default y 54 default y
55 55
56config ARCH_USES_SLAB_PAGE_STRUCT 56config ARCH_HAS_ILOG2_U64
57 bool 57 bool
58 default y 58 default y
59 59
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 515a5cea5469..9583a338e9d6 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -25,12 +25,14 @@
25#include <linux/elf.h> 25#include <linux/elf.h>
26#include <linux/reboot.h> 26#include <linux/reboot.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/pagemap.h>
28 29
29#include <asm/asm-offsets.h> 30#include <asm/asm-offsets.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/system.h> 32#include <asm/system.h>
32#include <asm/setup.h> 33#include <asm/setup.h>
33#include <asm/pgtable.h> 34#include <asm/pgtable.h>
35#include <asm/tlb.h>
34#include <asm/gdb-stub.h> 36#include <asm/gdb-stub.h>
35#include <asm/mb-regs.h> 37#include <asm/mb-regs.h>
36 38
@@ -88,6 +90,8 @@ void cpu_idle(void)
88 while (!need_resched()) { 90 while (!need_resched()) {
89 irq_stat[cpu].idle_timestamp = jiffies; 91 irq_stat[cpu].idle_timestamp = jiffies;
90 92
93 check_pgt_cache();
94
91 if (!frv_dma_inprogress && idle) 95 if (!frv_dma_inprogress && idle)
92 idle(); 96 idle();
93 } 97 }
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c
index 598a26ab8ad8..7787c3cc52c6 100644
--- a/arch/frv/mm/pgalloc.c
+++ b/arch/frv/mm/pgalloc.c
@@ -13,12 +13,12 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/highmem.h> 15#include <linux/highmem.h>
16#include <linux/quicklist.h>
16#include <asm/pgalloc.h> 17#include <asm/pgalloc.h>
17#include <asm/page.h> 18#include <asm/page.h>
18#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
19 20
20pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE))); 21pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
21struct kmem_cache *pgd_cache;
22 22
23pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 23pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
24{ 24{
@@ -100,7 +100,7 @@ static inline void pgd_list_del(pgd_t *pgd)
100 set_page_private(next, (unsigned long) pprev); 100 set_page_private(next, (unsigned long) pprev);
101} 101}
102 102
103void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) 103void pgd_ctor(void *pgd)
104{ 104{
105 unsigned long flags; 105 unsigned long flags;
106 106
@@ -120,7 +120,7 @@ void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
120} 120}
121 121
122/* never called when PTRS_PER_PMD > 1 */ 122/* never called when PTRS_PER_PMD > 1 */
123void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused) 123void pgd_dtor(void *pgd)
124{ 124{
125 unsigned long flags; /* can be called from interrupt context */ 125 unsigned long flags; /* can be called from interrupt context */
126 126
@@ -133,7 +133,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
133{ 133{
134 pgd_t *pgd; 134 pgd_t *pgd;
135 135
136 pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); 136 pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
137 if (!pgd) 137 if (!pgd)
138 return pgd; 138 return pgd;
139 139
@@ -143,15 +143,15 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
143void pgd_free(pgd_t *pgd) 143void pgd_free(pgd_t *pgd)
144{ 144{
145 /* in the non-PAE case, clear_page_tables() clears user pgd entries */ 145 /* in the non-PAE case, clear_page_tables() clears user pgd entries */
146 kmem_cache_free(pgd_cache, pgd); 146 quicklist_free(0, pgd_dtor, pgd);
147} 147}
148 148
149void __init pgtable_cache_init(void) 149void __init pgtable_cache_init(void)
150{ 150{
151 pgd_cache = kmem_cache_create("pgd",
152 PTRS_PER_PGD * sizeof(pgd_t),
153 PTRS_PER_PGD * sizeof(pgd_t),
154 SLAB_PANIC,
155 pgd_ctor,
156 pgd_dtor);
157} 151}
152
153void check_pgt_cache(void)
154{
155 quicklist_trim(0, pgd_dtor, 25, 16);
156}
157
diff --git a/arch/h8300/Kconfig.debug b/arch/h8300/Kconfig.debug
index e0e9bcb015a9..554efe604a08 100644
--- a/arch/h8300/Kconfig.debug
+++ b/arch/h8300/Kconfig.debug
@@ -21,12 +21,12 @@ config GDB_MAGICPRINT
21 bool "Message Output for GDB MagicPrint service" 21 bool "Message Output for GDB MagicPrint service"
22 depends on (H8300H_SIM || H8S_SIM) 22 depends on (H8300H_SIM || H8S_SIM)
23 help 23 help
24 kernel messages output useing MagicPrint service from GDB 24 kernel messages output using MagicPrint service from GDB
25 25
26config SYSCALL_PRINT 26config SYSCALL_PRINT
27 bool "SystemCall trace print" 27 bool "SystemCall trace print"
28 help 28 help
29 outout history of systemcall 29 output history of systemcall
30 30
31config GDB_DEBUG 31config GDB_DEBUG
32 bool "Use gdb stub" 32 bool "Use gdb stub"
diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
index b78b82ad28a3..fc30b4fd0914 100644
--- a/arch/h8300/kernel/asm-offsets.c
+++ b/arch/h8300/kernel/asm-offsets.c
@@ -30,7 +30,7 @@ int main(void)
30 DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); 30 DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
31 DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); 31 DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
32 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); 32 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
33 DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info)); 33 DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
34 DEFINE(TASK_MM, offsetof(struct task_struct, mm)); 34 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
35 DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); 35 DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
36 36
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 64ad10f984a1..30944ee2e61a 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -858,9 +858,9 @@ config RELOCATABLE
858 bool "Build a relocatable kernel(EXPERIMENTAL)" 858 bool "Build a relocatable kernel(EXPERIMENTAL)"
859 depends on EXPERIMENTAL 859 depends on EXPERIMENTAL
860 help 860 help
861 This build a kernel image that retains relocation information 861 This builds a kernel image that retains relocation information
862 so it can be loaded someplace besides the default 1MB. 862 so it can be loaded someplace besides the default 1MB.
863 The relocations tend to the kernel binary about 10% larger, 863 The relocations tend to make the kernel binary about 10% larger,
864 but are discarded at runtime. 864 but are discarded at runtime.
865 865
866 One use is for the kexec on panic case where the recovery kernel 866 One use is for the kexec on panic case where the recovery kernel
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
index dce6124cb842..d7f6fb0b30f2 100644
--- a/arch/i386/Kconfig.cpu
+++ b/arch/i386/Kconfig.cpu
@@ -108,7 +108,7 @@ config MCORE2
108 bool "Core 2/newer Xeon" 108 bool "Core 2/newer Xeon"
109 help 109 help
110 Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and 53xx) 110 Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and 53xx)
111 CPUs. You can distingush newer from older Xeons by the CPU family 111 CPUs. You can distinguish newer from older Xeons by the CPU family
112 in /proc/cpuinfo. Newer ones have 6. 112 in /proc/cpuinfo. Newer ones have 6.
113 113
114config MPENTIUM4 114config MPENTIUM4
@@ -172,7 +172,7 @@ config MWINCHIP3D
172 help 172 help
173 Select this for an IDT Winchip-2A or 3. Linux and GCC 173 Select this for an IDT Winchip-2A or 3. Linux and GCC
174 treat this chip as a 586TSC with some extended instructions 174 treat this chip as a 586TSC with some extended instructions
175 and alignment reqirements. Also enable out of order memory 175 and alignment requirements. Also enable out of order memory
176 stores for this CPU, which can increase performance of some 176 stores for this CPU, which can increase performance of some
177 operations. 177 operations.
178 178
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 80b4c5d421b1..e5be819492ef 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -733,9 +733,11 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
733 sys_dev = get_cpu_sysdev(cpu); 733 sys_dev = get_cpu_sysdev(cpu);
734 switch (action) { 734 switch (action) {
735 case CPU_ONLINE: 735 case CPU_ONLINE:
736 case CPU_ONLINE_FROZEN:
736 cache_add_dev(sys_dev); 737 cache_add_dev(sys_dev);
737 break; 738 break;
738 case CPU_DEAD: 739 case CPU_DEAD:
740 case CPU_DEAD_FROZEN:
739 cache_remove_dev(sys_dev); 741 cache_remove_dev(sys_dev);
740 break; 742 break;
741 } 743 }
diff --git a/arch/i386/kernel/cpu/mcheck/therm_throt.c b/arch/i386/kernel/cpu/mcheck/therm_throt.c
index 065005c3f168..7ba7c3abd3a4 100644
--- a/arch/i386/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/i386/kerne/cpu/mcheck/therm_throt.c 2 * linux/arch/i386/kernel/cpu/mcheck/therm_throt.c
3 * 3 *
4 * Thermal throttle event support code (such as syslog messaging and rate 4 * Thermal throttle event support code (such as syslog messaging and rate
5 * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c). 5 * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
@@ -137,10 +137,12 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb,
137 mutex_lock(&therm_cpu_lock); 137 mutex_lock(&therm_cpu_lock);
138 switch (action) { 138 switch (action) {
139 case CPU_ONLINE: 139 case CPU_ONLINE:
140 case CPU_ONLINE_FROZEN:
140 err = thermal_throttle_add_dev(sys_dev); 141 err = thermal_throttle_add_dev(sys_dev);
141 WARN_ON(err); 142 WARN_ON(err);
142 break; 143 break;
143 case CPU_DEAD: 144 case CPU_DEAD:
145 case CPU_DEAD_FROZEN:
144 thermal_throttle_remove_dev(sys_dev); 146 thermal_throttle_remove_dev(sys_dev);
145 break; 147 break;
146 } 148 }
diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c
index 6471a5a13202..200fb3f9ebfb 100644
--- a/arch/i386/kernel/cpu/transmeta.c
+++ b/arch/i386/kernel/cpu/transmeta.c
@@ -77,8 +77,10 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
77 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); 77 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
78 78
79 /* If we can run i686 user-space code, call us an i686 */ 79 /* If we can run i686 user-space code, call us an i686 */
80#define USER686 (X86_FEATURE_TSC|X86_FEATURE_CX8|X86_FEATURE_CMOV) 80#define USER686 ((1 << X86_FEATURE_TSC)|\
81 if ( c->x86 == 5 && (c->x86_capability[0] & USER686) == USER686 ) 81 (1 << X86_FEATURE_CX8)|\
82 (1 << X86_FEATURE_CMOV))
83 if (c->x86 == 5 && (c->x86_capability[0] & USER686) == USER686)
82 c->x86 = 6; 84 c->x86 = 6;
83 85
84#ifdef CONFIG_SYSCTL 86#ifdef CONFIG_SYSCTL
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index eeae0d992337..5c2faa10e9fa 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -169,9 +169,11 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long ac
169 169
170 switch (action) { 170 switch (action) {
171 case CPU_ONLINE: 171 case CPU_ONLINE:
172 case CPU_ONLINE_FROZEN:
172 cpuid_device_create(cpu); 173 cpuid_device_create(cpu);
173 break; 174 break;
174 case CPU_DEAD: 175 case CPU_DEAD:
176 case CPU_DEAD_FROZEN:
175 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); 177 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
176 break; 178 break;
177 } 179 }
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index cbe7ec8dbb9f..83f825f2e2d7 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -567,7 +567,7 @@ static int cpu_request_microcode(int cpu)
567 return error; 567 return error;
568} 568}
569 569
570static int apply_microcode_on_cpu(int cpu) 570static int apply_microcode_check_cpu(int cpu)
571{ 571{
572 struct cpuinfo_x86 *c = cpu_data + cpu; 572 struct cpuinfo_x86 *c = cpu_data + cpu;
573 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 573 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
@@ -575,8 +575,9 @@ static int apply_microcode_on_cpu(int cpu)
575 unsigned int val[2]; 575 unsigned int val[2];
576 int err = 0; 576 int err = 0;
577 577
578 /* Check if the microcode is available */
578 if (!uci->mc) 579 if (!uci->mc)
579 return -EINVAL; 580 return 0;
580 581
581 old = current->cpus_allowed; 582 old = current->cpus_allowed;
582 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 583 set_cpus_allowed(current, cpumask_of_cpu(cpu));
@@ -614,7 +615,7 @@ static int apply_microcode_on_cpu(int cpu)
614 return err; 615 return err;
615} 616}
616 617
617static void microcode_init_cpu(int cpu) 618static void microcode_init_cpu(int cpu, int resume)
618{ 619{
619 cpumask_t old; 620 cpumask_t old;
620 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 621 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
@@ -624,8 +625,7 @@ static void microcode_init_cpu(int cpu)
624 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 625 set_cpus_allowed(current, cpumask_of_cpu(cpu));
625 mutex_lock(&microcode_mutex); 626 mutex_lock(&microcode_mutex);
626 collect_cpu_info(cpu); 627 collect_cpu_info(cpu);
627 if (uci->valid && system_state == SYSTEM_RUNNING && 628 if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
628 !suspend_cpu_hotplug)
629 cpu_request_microcode(cpu); 629 cpu_request_microcode(cpu);
630 mutex_unlock(&microcode_mutex); 630 mutex_unlock(&microcode_mutex);
631 set_cpus_allowed(current, old); 631 set_cpus_allowed(current, old);
@@ -702,7 +702,7 @@ static struct attribute_group mc_attr_group = {
702 .name = "microcode", 702 .name = "microcode",
703}; 703};
704 704
705static int mc_sysdev_add(struct sys_device *sys_dev) 705static int __mc_sysdev_add(struct sys_device *sys_dev, int resume)
706{ 706{
707 int err, cpu = sys_dev->id; 707 int err, cpu = sys_dev->id;
708 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 708 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
@@ -711,39 +711,31 @@ static int mc_sysdev_add(struct sys_device *sys_dev)
711 return 0; 711 return 0;
712 712
713 pr_debug("Microcode:CPU %d added\n", cpu); 713 pr_debug("Microcode:CPU %d added\n", cpu);
714 /* If suspend_cpu_hotplug is set, the system is resuming and we should 714 memset(uci, 0, sizeof(*uci));
715 * use the data from before the suspend.
716 */
717 if (suspend_cpu_hotplug) {
718 err = apply_microcode_on_cpu(cpu);
719 if (err)
720 microcode_fini_cpu(cpu);
721 }
722 if (!uci->valid)
723 memset(uci, 0, sizeof(*uci));
724 715
725 err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group); 716 err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group);
726 if (err) 717 if (err)
727 return err; 718 return err;
728 719
729 if (!uci->valid) 720 microcode_init_cpu(cpu, resume);
730 microcode_init_cpu(cpu);
731 721
732 return 0; 722 return 0;
733} 723}
734 724
725static int mc_sysdev_add(struct sys_device *sys_dev)
726{
727 return __mc_sysdev_add(sys_dev, 0);
728}
729
735static int mc_sysdev_remove(struct sys_device *sys_dev) 730static int mc_sysdev_remove(struct sys_device *sys_dev)
736{ 731{
737 int cpu = sys_dev->id; 732 int cpu = sys_dev->id;
738 733
739 if (!cpu_online(cpu)) 734 if (!cpu_online(cpu))
740 return 0; 735 return 0;
736
741 pr_debug("Microcode:CPU %d removed\n", cpu); 737 pr_debug("Microcode:CPU %d removed\n", cpu);
742 /* If suspend_cpu_hotplug is set, the system is suspending and we should 738 microcode_fini_cpu(cpu);
743 * keep the microcode in memory for the resume.
744 */
745 if (!suspend_cpu_hotplug)
746 microcode_fini_cpu(cpu);
747 sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); 739 sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
748 return 0; 740 return 0;
749} 741}
@@ -774,13 +766,34 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
774 766
775 sys_dev = get_cpu_sysdev(cpu); 767 sys_dev = get_cpu_sysdev(cpu);
776 switch (action) { 768 switch (action) {
769 case CPU_UP_CANCELED_FROZEN:
770 /* The CPU refused to come up during a system resume */
771 microcode_fini_cpu(cpu);
772 break;
777 case CPU_ONLINE: 773 case CPU_ONLINE:
778 case CPU_DOWN_FAILED: 774 case CPU_DOWN_FAILED:
779 mc_sysdev_add(sys_dev); 775 mc_sysdev_add(sys_dev);
780 break; 776 break;
777 case CPU_ONLINE_FROZEN:
778 /* System-wide resume is in progress, try to apply microcode */
779 if (apply_microcode_check_cpu(cpu)) {
780 /* The application of microcode failed */
781 microcode_fini_cpu(cpu);
782 __mc_sysdev_add(sys_dev, 1);
783 break;
784 }
785 case CPU_DOWN_FAILED_FROZEN:
786 if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group))
787 printk(KERN_ERR "Microcode: Failed to create the sysfs "
788 "group for CPU%d\n", cpu);
789 break;
781 case CPU_DOWN_PREPARE: 790 case CPU_DOWN_PREPARE:
782 mc_sysdev_remove(sys_dev); 791 mc_sysdev_remove(sys_dev);
783 break; 792 break;
793 case CPU_DOWN_PREPARE_FROZEN:
794 /* Suspend is in progress, only remove the interface */
795 sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
796 break;
784 } 797 }
785 return NOTIFY_OK; 798 return NOTIFY_OK;
786} 799}
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index 8cd0a91ce107..0c1069b8d638 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -153,9 +153,11 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
153 153
154 switch (action) { 154 switch (action) {
155 case CPU_ONLINE: 155 case CPU_ONLINE:
156 case CPU_ONLINE_FROZEN:
156 msr_device_create(cpu); 157 msr_device_create(cpu);
157 break; 158 break;
158 case CPU_DEAD: 159 case CPU_DEAD:
160 case CPU_DEAD_FROZEN:
159 device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); 161 device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
160 break; 162 break;
161 } 163 }
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 4bec0cbf407a..c05e7e861b29 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -305,7 +305,7 @@ void show_registers(struct pt_regs *regs)
305 regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss); 305 regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
306 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)", 306 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
307 TASK_COMM_LEN, current->comm, current->pid, 307 TASK_COMM_LEN, current->comm, current->pid,
308 current_thread_info(), current, current->thread_info); 308 current_thread_info(), current, task_thread_info(current));
309 /* 309 /*
310 * When in-kernel, we also print out the stack and code at the 310 * When in-kernel, we also print out the stack and code at the
311 * time of the fault.. 311 * time of the fault..
diff --git a/arch/i386/mach-generic/probe.c b/arch/i386/mach-generic/probe.c
index a7b3999bb37a..74f3da634423 100644
--- a/arch/i386/mach-generic/probe.c
+++ b/arch/i386/mach-generic/probe.c
@@ -119,9 +119,7 @@ int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
119 return 0; 119 return 0;
120} 120}
121 121
122#ifdef CONFIG_SMP
123int hard_smp_processor_id(void) 122int hard_smp_processor_id(void)
124{ 123{
125 return genapic->get_apic_id(*(unsigned long *)(APIC_BASE+APIC_ID)); 124 return genapic->get_apic_id(*(unsigned long *)(APIC_BASE+APIC_ID));
126} 125}
127#endif
diff --git a/arch/i386/mach-voyager/voyager_basic.c b/arch/i386/mach-voyager/voyager_basic.c
index 8fe7e4593d5f..9b77b39b71a6 100644
--- a/arch/i386/mach-voyager/voyager_basic.c
+++ b/arch/i386/mach-voyager/voyager_basic.c
@@ -292,8 +292,8 @@ machine_emergency_restart(void)
292void 292void
293mca_nmi_hook(void) 293mca_nmi_hook(void)
294{ 294{
295 __u8 dumpval __attribute__((unused)) = inb(0xf823); 295 __u8 dumpval __maybe_unused = inb(0xf823);
296 __u8 swnmi __attribute__((unused)) = inb(0xf813); 296 __u8 swnmi __maybe_unused = inb(0xf813);
297 297
298 /* FIXME: assume dump switch pressed */ 298 /* FIXME: assume dump switch pressed */
299 /* check to see if the dump switch was pressed */ 299 /* check to see if the dump switch was pressed */
diff --git a/arch/i386/pci/init.c b/arch/i386/pci/init.c
index 1cf11af96de2..3de9f9ba2da6 100644
--- a/arch/i386/pci/init.c
+++ b/arch/i386/pci/init.c
@@ -6,7 +6,7 @@
6 in the right sequence from here. */ 6 in the right sequence from here. */
7static __init int pci_access_init(void) 7static __init int pci_access_init(void)
8{ 8{
9 int type __attribute__((unused)) = 0; 9 int type __maybe_unused = 0;
10 10
11#ifdef CONFIG_PCI_DIRECT 11#ifdef CONFIG_PCI_DIRECT
12 type = pci_direct_probe(); 12 type = pci_direct_probe();
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index e23af4b6ae8c..6e41471449c0 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -468,7 +468,7 @@ config KEXEC
468 help 468 help
469 kexec is a system call that implements the ability to shutdown your 469 kexec is a system call that implements the ability to shutdown your
470 current kernel, and to start another kernel. It is like a reboot 470 current kernel, and to start another kernel. It is like a reboot
471 but it is indepedent of the system firmware. And like a reboot 471 but it is independent of the system firmware. And like a reboot
472 you can start any kernel with it, not just Linux. 472 you can start any kernel with it, not just Linux.
473 473
474 The name comes from the similiarity to the exec system call. 474 The name comes from the similiarity to the exec system call.
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index 687e5fdc9683..99b665e2b1d5 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -52,43 +52,6 @@ ENTRY(ia32_clone)
52 br.ret.sptk.many rp 52 br.ret.sptk.many rp
53END(ia32_clone) 53END(ia32_clone)
54 54
55ENTRY(sys32_rt_sigsuspend)
56 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
57 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs
58 mov loc0=rp
59 mov out0=in0 // mask
60 mov out1=in1 // sigsetsize
61 mov out2=sp // out2 = &sigscratch
62 .fframe 16
63 adds sp=-16,sp // allocate dummy "sigscratch"
64 ;;
65 .body
66 br.call.sptk.many rp=ia32_rt_sigsuspend
671: .restore sp
68 adds sp=16,sp
69 mov rp=loc0
70 mov ar.pfs=loc1
71 br.ret.sptk.many rp
72END(sys32_rt_sigsuspend)
73
74ENTRY(sys32_sigsuspend)
75 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
76 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs
77 mov loc0=rp
78 mov out0=in2 // mask (first two args are ignored)
79 ;;
80 mov out1=sp // out1 = &sigscratch
81 .fframe 16
82 adds sp=-16,sp // allocate dummy "sigscratch"
83 .body
84 br.call.sptk.many rp=ia32_sigsuspend
851: .restore sp
86 adds sp=16,sp
87 mov rp=loc0
88 mov ar.pfs=loc1
89 br.ret.sptk.many rp
90END(sys32_sigsuspend)
91
92GLOBAL_ENTRY(ia32_ret_from_clone) 55GLOBAL_ENTRY(ia32_ret_from_clone)
93 PT_REGS_UNWIND_INFO(0) 56 PT_REGS_UNWIND_INFO(0)
94{ /* 57{ /*
@@ -389,7 +352,7 @@ ia32_syscall_table:
389 data8 sys_rt_sigpending 352 data8 sys_rt_sigpending
390 data8 compat_sys_rt_sigtimedwait 353 data8 compat_sys_rt_sigtimedwait
391 data8 sys32_rt_sigqueueinfo 354 data8 sys32_rt_sigqueueinfo
392 data8 sys32_rt_sigsuspend 355 data8 compat_sys_rt_sigsuspend
393 data8 sys32_pread /* 180 */ 356 data8 sys32_pread /* 180 */
394 data8 sys32_pwrite 357 data8 sys32_pwrite
395 data8 sys_chown /* 16-bit version */ 358 data8 sys_chown /* 16-bit version */
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c
index 10510e585204..85e82f32e480 100644
--- a/arch/ia64/ia32/ia32_signal.c
+++ b/arch/ia64/ia32/ia32_signal.c
@@ -451,59 +451,20 @@ sigact_set_handler (struct k_sigaction *sa, unsigned int handler, unsigned int r
451 sa->sa.sa_handler = (__sighandler_t) (((unsigned long) restorer << 32) | handler); 451 sa->sa.sa_handler = (__sighandler_t) (((unsigned long) restorer << 32) | handler);
452} 452}
453 453
454long 454asmlinkage long
455__ia32_rt_sigsuspend (compat_sigset_t *sset, unsigned int sigsetsize, struct sigscratch *scr) 455sys32_sigsuspend (int history0, int history1, old_sigset_t mask)
456{ 456{
457 extern long ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall); 457 mask &= _BLOCKABLE;
458 sigset_t oldset, set;
459
460 scr->scratch_unat = 0; /* avoid leaking kernel bits to user level */
461 memset(&set, 0, sizeof(set));
462
463 memcpy(&set.sig, &sset->sig, sigsetsize);
464
465 sigdelsetmask(&set, ~_BLOCKABLE);
466
467 spin_lock_irq(&current->sighand->siglock); 458 spin_lock_irq(&current->sighand->siglock);
468 { 459 current->saved_sigmask = current->blocked;
469 oldset = current->blocked; 460 siginitset(&current->blocked, mask);
470 current->blocked = set; 461 recalc_sigpending();
471 recalc_sigpending();
472 }
473 spin_unlock_irq(&current->sighand->siglock); 462 spin_unlock_irq(&current->sighand->siglock);
474 463
475 /* 464 current->state = TASK_INTERRUPTIBLE;
476 * The return below usually returns to the signal handler. We need to pre-set the 465 schedule();
477 * correct error code here to ensure that the right values get saved in sigcontext 466 set_thread_flag(TIF_RESTORE_SIGMASK);
478 * by ia64_do_signal. 467 return -ERESTARTNOHAND;
479 */
480 scr->pt.r8 = -EINTR;
481 while (1) {
482 current->state = TASK_INTERRUPTIBLE;
483 schedule();
484 if (ia64_do_signal(&oldset, scr, 1))
485 return -EINTR;
486 }
487}
488
489asmlinkage long
490ia32_rt_sigsuspend (compat_sigset_t __user *uset, unsigned int sigsetsize, struct sigscratch *scr)
491{
492 compat_sigset_t set;
493
494 if (sigsetsize > sizeof(compat_sigset_t))
495 return -EINVAL;
496
497 if (copy_from_user(&set.sig, &uset->sig, sigsetsize))
498 return -EFAULT;
499
500 return __ia32_rt_sigsuspend(&set, sigsetsize, scr);
501}
502
503asmlinkage long
504ia32_sigsuspend (unsigned int mask, struct sigscratch *scr)
505{
506 return __ia32_rt_sigsuspend((compat_sigset_t *) &mask, sizeof(mask), scr);
507} 468}
508 469
509asmlinkage long 470asmlinkage long
@@ -810,7 +771,11 @@ get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
810 } 771 }
811 /* Legacy stack switching not supported */ 772 /* Legacy stack switching not supported */
812 773
813 return (void __user *)((esp - frame_size) & -8ul); 774 esp -= frame_size;
775 /* Align the stack pointer according to the i386 ABI,
776 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
777 esp = ((esp + 4) & -16ul) - 4;
778 return (void __user *) esp;
814} 779}
815 780
816static int 781static int
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 55fd2d5471e1..b50bf208678e 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1199,32 +1199,6 @@ ENTRY(notify_resume_user)
1199 br.ret.sptk.many rp 1199 br.ret.sptk.many rp
1200END(notify_resume_user) 1200END(notify_resume_user)
1201 1201
1202GLOBAL_ENTRY(sys_rt_sigsuspend)
1203 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1204 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
1205 mov r9=ar.unat
1206 mov loc0=rp // save return address
1207 mov out0=in0 // mask
1208 mov out1=in1 // sigsetsize
1209 adds out2=8,sp // out2=&sigscratch->ar_pfs
1210 ;;
1211 .fframe 16
1212 .spillsp ar.unat, 16
1213 st8 [sp]=r9,-16 // allocate space for ar.unat and save it
1214 st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch
1215 .body
1216 br.call.sptk.many rp=ia64_rt_sigsuspend
1217.ret17: .restore sp
1218 adds sp=16,sp // pop scratch stack space
1219 ;;
1220 ld8 r9=[sp] // load new unat from sw->caller_unat
1221 mov rp=loc0
1222 ;;
1223 mov ar.unat=r9
1224 mov ar.pfs=loc1
1225 br.ret.sptk.many rp
1226END(sys_rt_sigsuspend)
1227
1228ENTRY(sys_rt_sigreturn) 1202ENTRY(sys_rt_sigreturn)
1229 PT_REGS_UNWIND_INFO(0) 1203 PT_REGS_UNWIND_INFO(0)
1230 /* 1204 /*
@@ -1598,8 +1572,8 @@ sys_call_table:
1598 data8 sys_readlinkat 1572 data8 sys_readlinkat
1599 data8 sys_fchmodat 1573 data8 sys_fchmodat
1600 data8 sys_faccessat 1574 data8 sys_faccessat
1601 data8 sys_ni_syscall // reserved for pselect 1575 data8 sys_pselect6
1602 data8 sys_ni_syscall // 1295 reserved for ppoll 1576 data8 sys_ppoll
1603 data8 sys_unshare 1577 data8 sys_unshare
1604 data8 sys_splice 1578 data8 sys_splice
1605 data8 sys_set_robust_list 1579 data8 sys_set_robust_list
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index d3e9f33e8bdd..6a49600cf337 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -236,9 +236,11 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
236 sys_dev = get_cpu_sysdev(cpu); 236 sys_dev = get_cpu_sysdev(cpu);
237 switch (action) { 237 switch (action) {
238 case CPU_ONLINE: 238 case CPU_ONLINE:
239 case CPU_ONLINE_FROZEN:
239 err_inject_add_dev(sys_dev); 240 err_inject_add_dev(sys_dev);
240 break; 241 break;
241 case CPU_DEAD: 242 case CPU_DEAD:
243 case CPU_DEAD_FROZEN:
242 err_inject_remove_dev(sys_dev); 244 err_inject_remove_dev(sys_dev);
243 break; 245 break;
244 } 246 }
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 93d9ab14ba24..37f46527d233 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -1012,7 +1012,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
1012/* 1012/*
1013 * ACPI calls this when it finds an entry for a legacy ISA IRQ override. 1013 * ACPI calls this when it finds an entry for a legacy ISA IRQ override.
1014 */ 1014 */
1015void __init 1015void __devinit
1016iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, 1016iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
1017 unsigned long polarity, 1017 unsigned long polarity,
1018 unsigned long trigger) 1018 unsigned long trigger)
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 1c5044a80958..bc47049f060f 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -38,6 +38,7 @@
38#include <asm/machvec.h> 38#include <asm/machvec.h>
39#include <asm/pgtable.h> 39#include <asm/pgtable.h>
40#include <asm/system.h> 40#include <asm/system.h>
41#include <asm/tlbflush.h>
41 42
42#ifdef CONFIG_PERFMON 43#ifdef CONFIG_PERFMON
43# include <asm/perfmon.h> 44# include <asm/perfmon.h>
@@ -126,8 +127,10 @@ void destroy_irq(unsigned int irq)
126 127
127#ifdef CONFIG_SMP 128#ifdef CONFIG_SMP
128# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) 129# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
130# define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
129#else 131#else
130# define IS_RESCHEDULE(vec) (0) 132# define IS_RESCHEDULE(vec) (0)
133# define IS_LOCAL_TLB_FLUSH(vec) (0)
131#endif 134#endif
132/* 135/*
133 * That's where the IVT branches when we get an external 136 * That's where the IVT branches when we get an external
@@ -179,8 +182,11 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
179 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); 182 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
180 ia64_srlz_d(); 183 ia64_srlz_d();
181 while (vector != IA64_SPURIOUS_INT_VECTOR) { 184 while (vector != IA64_SPURIOUS_INT_VECTOR) {
182 if (unlikely(IS_RESCHEDULE(vector))) 185 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
183 kstat_this_cpu.irqs[vector]++; 186 smp_local_flush_tlb();
187 kstat_this_cpu.irqs[vector]++;
188 } else if (unlikely(IS_RESCHEDULE(vector)))
189 kstat_this_cpu.irqs[vector]++;
184 else { 190 else {
185 ia64_setreg(_IA64_REG_CR_TPR, vector); 191 ia64_setreg(_IA64_REG_CR_TPR, vector);
186 ia64_srlz_d(); 192 ia64_srlz_d();
@@ -226,8 +232,11 @@ void ia64_process_pending_intr(void)
226 * Perform normal interrupt style processing 232 * Perform normal interrupt style processing
227 */ 233 */
228 while (vector != IA64_SPURIOUS_INT_VECTOR) { 234 while (vector != IA64_SPURIOUS_INT_VECTOR) {
229 if (unlikely(IS_RESCHEDULE(vector))) 235 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
230 kstat_this_cpu.irqs[vector]++; 236 smp_local_flush_tlb();
237 kstat_this_cpu.irqs[vector]++;
238 } else if (unlikely(IS_RESCHEDULE(vector)))
239 kstat_this_cpu.irqs[vector]++;
231 else { 240 else {
232 struct pt_regs *old_regs = set_irq_regs(NULL); 241 struct pt_regs *old_regs = set_irq_regs(NULL);
233 242
@@ -259,12 +268,12 @@ void ia64_process_pending_intr(void)
259 268
260 269
261#ifdef CONFIG_SMP 270#ifdef CONFIG_SMP
262extern irqreturn_t handle_IPI (int irq, void *dev_id);
263 271
264static irqreturn_t dummy_handler (int irq, void *dev_id) 272static irqreturn_t dummy_handler (int irq, void *dev_id)
265{ 273{
266 BUG(); 274 BUG();
267} 275}
276extern irqreturn_t handle_IPI (int irq, void *dev_id);
268 277
269static struct irqaction ipi_irqaction = { 278static struct irqaction ipi_irqaction = {
270 .handler = handle_IPI, 279 .handler = handle_IPI,
@@ -277,6 +286,13 @@ static struct irqaction resched_irqaction = {
277 .flags = IRQF_DISABLED, 286 .flags = IRQF_DISABLED,
278 .name = "resched" 287 .name = "resched"
279}; 288};
289
290static struct irqaction tlb_irqaction = {
291 .handler = dummy_handler,
292 .flags = IRQF_DISABLED,
293 .name = "tlb_flush"
294};
295
280#endif 296#endif
281 297
282void 298void
@@ -302,6 +318,7 @@ init_IRQ (void)
302#ifdef CONFIG_SMP 318#ifdef CONFIG_SMP
303 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); 319 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
304 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); 320 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
321 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
305#endif 322#endif
306#ifdef CONFIG_PERFMON 323#ifdef CONFIG_PERFMON
307 pfm_init_percpu(); 324 pfm_init_percpu();
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 1d7cc7e2ce32..f8ae709de0b5 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1689,7 +1689,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
1689 ti->preempt_count = 1; 1689 ti->preempt_count = 1;
1690 ti->task = p; 1690 ti->task = p;
1691 ti->cpu = cpu; 1691 ti->cpu = cpu;
1692 p->thread_info = ti; 1692 p->stack = ti;
1693 p->state = TASK_UNINTERRUPTIBLE; 1693 p->state = TASK_UNINTERRUPTIBLE;
1694 cpu_set(cpu, p->cpus_allowed); 1694 cpu_set(cpu, p->cpus_allowed);
1695 INIT_LIST_HEAD(&p->tasks); 1695 INIT_LIST_HEAD(&p->tasks);
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index a71df9ae0397..85829e27785c 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -975,9 +975,11 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
975 975
976 switch (action) { 976 switch (action) {
977 case CPU_ONLINE: 977 case CPU_ONLINE:
978 case CPU_ONLINE_FROZEN:
978 create_palinfo_proc_entries(hotcpu); 979 create_palinfo_proc_entries(hotcpu);
979 break; 980 break;
980 case CPU_DEAD: 981 case CPU_DEAD:
982 case CPU_DEAD_FROZEN:
981 remove_palinfo_proc_entries(hotcpu); 983 remove_palinfo_proc_entries(hotcpu);
982 break; 984 break;
983 } 985 }
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 8bb571a8a738..d1c3ed9943e5 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -155,7 +155,7 @@ show_regs (struct pt_regs *regs)
155} 155}
156 156
157void 157void
158do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall) 158do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall)
159{ 159{
160 if (fsys_mode(current, &scr->pt)) { 160 if (fsys_mode(current, &scr->pt)) {
161 /* defer signal-handling etc. until we return to privilege-level 0. */ 161 /* defer signal-handling etc. until we return to privilege-level 0. */
@@ -170,8 +170,8 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
170#endif 170#endif
171 171
172 /* deal with pending signal delivery */ 172 /* deal with pending signal delivery */
173 if (test_thread_flag(TIF_SIGPENDING)) 173 if (test_thread_flag(TIF_SIGPENDING)||test_thread_flag(TIF_RESTORE_SIGMASK))
174 ia64_do_signal(oldset, scr, in_syscall); 174 ia64_do_signal(scr, in_syscall);
175} 175}
176 176
177static int pal_halt = 1; 177static int pal_halt = 1;
@@ -236,6 +236,7 @@ void cpu_idle_wait(void)
236{ 236{
237 unsigned int cpu, this_cpu = get_cpu(); 237 unsigned int cpu, this_cpu = get_cpu();
238 cpumask_t map; 238 cpumask_t map;
239 cpumask_t tmp = current->cpus_allowed;
239 240
240 set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); 241 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
241 put_cpu(); 242 put_cpu();
@@ -257,6 +258,7 @@ void cpu_idle_wait(void)
257 } 258 }
258 cpus_and(map, map, cpu_online_map); 259 cpus_and(map, map, cpu_online_map);
259 } while (!cpus_empty(map)); 260 } while (!cpus_empty(map));
261 set_cpus_allowed(current, tmp);
260} 262}
261EXPORT_SYMBOL_GPL(cpu_idle_wait); 263EXPORT_SYMBOL_GPL(cpu_idle_wait);
262 264
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
index ae473e3f2a0d..903babd22d62 100644
--- a/arch/ia64/kernel/relocate_kernel.S
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -94,7 +94,7 @@ GLOBAL_ENTRY(relocate_new_kernel)
944: 944:
95 srlz.i 95 srlz.i
96 ;; 96 ;;
97 //purge TR entry for kernel text and data 97 // purge TR entry for kernel text and data
98 movl r16=KERNEL_START 98 movl r16=KERNEL_START
99 mov r18=KERNEL_TR_PAGE_SHIFT<<2 99 mov r18=KERNEL_TR_PAGE_SHIFT<<2
100 ;; 100 ;;
@@ -104,15 +104,6 @@ GLOBAL_ENTRY(relocate_new_kernel)
104 srlz.i 104 srlz.i
105 ;; 105 ;;
106 106
107 // purge TR entry for percpu data
108 movl r16=PERCPU_ADDR
109 mov r18=PERCPU_PAGE_SHIFT<<2
110 ;;
111 ptr.d r16,r18
112 ;;
113 srlz.d
114 ;;
115
116 // purge TR entry for pal code 107 // purge TR entry for pal code
117 mov r16=in3 108 mov r16=in3
118 mov r18=IA64_GRANULE_SHIFT<<2 109 mov r18=IA64_GRANULE_SHIFT<<2
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index a51f1d0bfb70..89f6b138a62c 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -582,6 +582,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
582 struct salinfo_data *data; 582 struct salinfo_data *data;
583 switch (action) { 583 switch (action) {
584 case CPU_ONLINE: 584 case CPU_ONLINE:
585 case CPU_ONLINE_FROZEN:
585 spin_lock_irqsave(&data_saved_lock, flags); 586 spin_lock_irqsave(&data_saved_lock, flags);
586 for (i = 0, data = salinfo_data; 587 for (i = 0, data = salinfo_data;
587 i < ARRAY_SIZE(salinfo_data); 588 i < ARRAY_SIZE(salinfo_data);
@@ -592,6 +593,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
592 spin_unlock_irqrestore(&data_saved_lock, flags); 593 spin_unlock_irqrestore(&data_saved_lock, flags);
593 break; 594 break;
594 case CPU_DEAD: 595 case CPU_DEAD:
596 case CPU_DEAD_FROZEN:
595 spin_lock_irqsave(&data_saved_lock, flags); 597 spin_lock_irqsave(&data_saved_lock, flags);
596 for (i = 0, data = salinfo_data; 598 for (i = 0, data = salinfo_data;
597 i < ARRAY_SIZE(salinfo_data); 599 i < ARRAY_SIZE(salinfo_data);
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 6e19da122ae3..9df1efe7487d 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -786,7 +786,7 @@ identify_cpu (struct cpuinfo_ia64 *c)
786 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); 786 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
787} 787}
788 788
789void 789void __init
790setup_per_cpu_areas (void) 790setup_per_cpu_areas (void)
791{ 791{
792 /* start_kernel() requires this... */ 792 /* start_kernel() requires this... */
diff --git a/arch/ia64/kernel/sigframe.h b/arch/ia64/kernel/sigframe.h
index 37b986cb86e0..9fd9a1933b3d 100644
--- a/arch/ia64/kernel/sigframe.h
+++ b/arch/ia64/kernel/sigframe.h
@@ -22,4 +22,4 @@ struct sigframe {
22 struct sigcontext sc; 22 struct sigcontext sc;
23}; 23};
24 24
25extern long ia64_do_signal (sigset_t *, struct sigscratch *, long); 25extern void ia64_do_signal (struct sigscratch *, long);
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 0dcd56da6001..aeec8184e862 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -40,47 +40,6 @@
40# define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0]) 40# define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0])
41#endif 41#endif
42 42
43long
44ia64_rt_sigsuspend (sigset_t __user *uset, size_t sigsetsize, struct sigscratch *scr)
45{
46 sigset_t oldset, set;
47
48 /* XXX: Don't preclude handling different sized sigset_t's. */
49 if (sigsetsize != sizeof(sigset_t))
50 return -EINVAL;
51
52 if (!access_ok(VERIFY_READ, uset, sigsetsize))
53 return -EFAULT;
54
55 if (GET_SIGSET(&set, uset))
56 return -EFAULT;
57
58 sigdelsetmask(&set, ~_BLOCKABLE);
59
60 spin_lock_irq(&current->sighand->siglock);
61 {
62 oldset = current->blocked;
63 current->blocked = set;
64 recalc_sigpending();
65 }
66 spin_unlock_irq(&current->sighand->siglock);
67
68 /*
69 * The return below usually returns to the signal handler. We need to
70 * pre-set the correct error code here to ensure that the right values
71 * get saved in sigcontext by ia64_do_signal.
72 */
73 scr->pt.r8 = EINTR;
74 scr->pt.r10 = -1;
75
76 while (1) {
77 current->state = TASK_INTERRUPTIBLE;
78 schedule();
79 if (ia64_do_signal(&oldset, scr, 1))
80 return -EINTR;
81 }
82}
83
84asmlinkage long 43asmlinkage long
85sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2, 44sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2,
86 long arg3, long arg4, long arg5, long arg6, long arg7, 45 long arg3, long arg4, long arg5, long arg6, long arg7,
@@ -477,10 +436,11 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse
477 * Note that `init' is a special process: it doesn't get signals it doesn't want to 436 * Note that `init' is a special process: it doesn't get signals it doesn't want to
478 * handle. Thus you cannot kill init even with a SIGKILL even by mistake. 437 * handle. Thus you cannot kill init even with a SIGKILL even by mistake.
479 */ 438 */
480long 439void
481ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) 440ia64_do_signal (struct sigscratch *scr, long in_syscall)
482{ 441{
483 struct k_sigaction ka; 442 struct k_sigaction ka;
443 sigset_t *oldset;
484 siginfo_t info; 444 siginfo_t info;
485 long restart = in_syscall; 445 long restart = in_syscall;
486 long errno = scr->pt.r8; 446 long errno = scr->pt.r8;
@@ -492,9 +452,11 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
492 * doing anything if so. 452 * doing anything if so.
493 */ 453 */
494 if (!user_mode(&scr->pt)) 454 if (!user_mode(&scr->pt))
495 return 0; 455 return;
496 456
497 if (!oldset) 457 if (test_thread_flag(TIF_RESTORE_SIGMASK))
458 oldset = &current->saved_sigmask;
459 else
498 oldset = &current->blocked; 460 oldset = &current->blocked;
499 461
500 /* 462 /*
@@ -557,8 +519,15 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
557 * Whee! Actually deliver the signal. If the delivery failed, we need to 519 * Whee! Actually deliver the signal. If the delivery failed, we need to
558 * continue to iterate in this loop so we can deliver the SIGSEGV... 520 * continue to iterate in this loop so we can deliver the SIGSEGV...
559 */ 521 */
560 if (handle_signal(signr, &ka, &info, oldset, scr)) 522 if (handle_signal(signr, &ka, &info, oldset, scr)) {
561 return 1; 523 /* a signal was successfully delivered; the saved
524 * sigmask will have been stored in the signal frame,
525 * and will be restored by sigreturn, so we can simply
526 * clear the TIF_RESTORE_SIGMASK flag */
527 if (test_thread_flag(TIF_RESTORE_SIGMASK))
528 clear_thread_flag(TIF_RESTORE_SIGMASK);
529 return;
530 }
562 } 531 }
563 532
564 /* Did we come from a system call? */ 533 /* Did we come from a system call? */
@@ -584,5 +553,11 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
584 } 553 }
585 } 554 }
586 } 555 }
587 return 0; 556
557 /* if there's no signal to deliver, we just put the saved sigmask
558 * back */
559 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
560 clear_thread_flag(TIF_RESTORE_SIGMASK);
561 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
562 }
588} 563}
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 55ddd809b02d..221de3804560 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -50,6 +50,18 @@
50#include <asm/mca.h> 50#include <asm/mca.h>
51 51
52/* 52/*
53 * Note: alignment of 4 entries/cacheline was empirically determined
54 * to be a good tradeoff between hot cachelines & spreading the array
55 * across too many cacheline.
56 */
57static struct local_tlb_flush_counts {
58 unsigned int count;
59} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
60
61static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
62
63
64/*
53 * Structure and data for smp_call_function(). This is designed to minimise static memory 65 * Structure and data for smp_call_function(). This is designed to minimise static memory
54 * requirements. It also looks cleaner. 66 * requirements. It also looks cleaner.
55 */ 67 */
@@ -248,6 +260,62 @@ smp_send_reschedule (int cpu)
248 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); 260 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
249} 261}
250 262
263/*
264 * Called with preeemption disabled.
265 */
266static void
267smp_send_local_flush_tlb (int cpu)
268{
269 platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
270}
271
272void
273smp_local_flush_tlb(void)
274{
275 /*
276 * Use atomic ops. Otherwise, the load/increment/store sequence from
277 * a "++" operation can have the line stolen between the load & store.
278 * The overhead of the atomic op in negligible in this case & offers
279 * significant benefit for the brief periods where lots of cpus
280 * are simultaneously flushing TLBs.
281 */
282 ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
283 local_flush_tlb_all();
284}
285
286#define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */
287
288void
289smp_flush_tlb_cpumask(cpumask_t xcpumask)
290{
291 unsigned int *counts = __ia64_per_cpu_var(shadow_flush_counts);
292 cpumask_t cpumask = xcpumask;
293 int mycpu, cpu, flush_mycpu = 0;
294
295 preempt_disable();
296 mycpu = smp_processor_id();
297
298 for_each_cpu_mask(cpu, cpumask)
299 counts[cpu] = local_tlb_flush_counts[cpu].count;
300
301 mb();
302 for_each_cpu_mask(cpu, cpumask) {
303 if (cpu == mycpu)
304 flush_mycpu = 1;
305 else
306 smp_send_local_flush_tlb(cpu);
307 }
308
309 if (flush_mycpu)
310 smp_local_flush_tlb();
311
312 for_each_cpu_mask(cpu, cpumask)
313 while(counts[cpu] == local_tlb_flush_counts[cpu].count)
314 udelay(FLUSH_DELAY);
315
316 preempt_enable();
317}
318
251void 319void
252smp_flush_tlb_all (void) 320smp_flush_tlb_all (void)
253{ 321{
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 687500ddb4b8..94ae3c87d828 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -412,9 +412,11 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
412 sys_dev = get_cpu_sysdev(cpu); 412 sys_dev = get_cpu_sysdev(cpu);
413 switch (action) { 413 switch (action) {
414 case CPU_ONLINE: 414 case CPU_ONLINE:
415 case CPU_ONLINE_FROZEN:
415 cache_add_dev(sys_dev); 416 cache_add_dev(sys_dev);
416 break; 417 break;
417 case CPU_DEAD: 418 case CPU_DEAD:
419 case CPU_DEAD_FROZEN:
418 cache_remove_dev(sys_dev); 420 cache_remove_dev(sys_dev);
419 break; 421 break;
420 } 422 }
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 5bfb8be02b70..b8e0d70bf989 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -43,9 +43,9 @@ die (const char *str, struct pt_regs *regs, long err)
43 u32 lock_owner; 43 u32 lock_owner;
44 int lock_owner_depth; 44 int lock_owner_depth;
45 } die = { 45 } die = {
46 .lock = SPIN_LOCK_UNLOCKED, 46 .lock = __SPIN_LOCK_UNLOCKED(die.lock),
47 .lock_owner = -1, 47 .lock_owner = -1,
48 .lock_owner_depth = 0 48 .lock_owner_depth = 0
49 }; 49 };
50 static int die_counter; 50 static int die_counter;
51 int cpu = get_cpu(); 51 int cpu = get_cpu();
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index 93d5a3b41f69..fe1426266b9b 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -60,6 +60,7 @@
60# define UNW_DEBUG_ON(n) unw_debug_level >= n 60# define UNW_DEBUG_ON(n) unw_debug_level >= n
61 /* Do not code a printk level, not all debug lines end in newline */ 61 /* Do not code a printk level, not all debug lines end in newline */
62# define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__) 62# define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
63# undef inline
63# define inline 64# define inline
64#else /* !UNW_DEBUG */ 65#else /* !UNW_DEBUG */
65# define UNW_DEBUG_ON(n) 0 66# define UNW_DEBUG_ON(n) 0
@@ -145,7 +146,7 @@ static struct {
145# endif 146# endif
146} unw = { 147} unw = {
147 .tables = &unw.kernel_table, 148 .tables = &unw.kernel_table,
148 .lock = SPIN_LOCK_UNLOCKED, 149 .lock = __SPIN_LOCK_UNLOCKED(unw.lock),
149 .save_order = { 150 .save_order = {
150 UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR, 151 UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
151 UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR 152 UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
@@ -1943,9 +1944,9 @@ EXPORT_SYMBOL(unw_unwind);
1943int 1944int
1944unw_unwind_to_user (struct unw_frame_info *info) 1945unw_unwind_to_user (struct unw_frame_info *info)
1945{ 1946{
1946 unsigned long ip, sp, pr = 0; 1947 unsigned long ip, sp, pr = info->pr;
1947 1948
1948 while (unw_unwind(info) >= 0) { 1949 do {
1949 unw_get_sp(info, &sp); 1950 unw_get_sp(info, &sp);
1950 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp) 1951 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
1951 < IA64_PT_REGS_SIZE) { 1952 < IA64_PT_REGS_SIZE) {
@@ -1963,7 +1964,7 @@ unw_unwind_to_user (struct unw_frame_info *info)
1963 __FUNCTION__, ip); 1964 __FUNCTION__, ip);
1964 return -1; 1965 return -1;
1965 } 1966 }
1966 } 1967 } while (unw_unwind(info) >= 0);
1967 unw_get_ip(info, &ip); 1968 unw_get_ip(info, &ip);
1968 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", 1969 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
1969 __FUNCTION__, ip); 1970 __FUNCTION__, ip);
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index ffad7624436c..fa4e6d4810f3 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -32,9 +32,9 @@ static struct {
32} purge; 32} purge;
33 33
34struct ia64_ctx ia64_ctx = { 34struct ia64_ctx ia64_ctx = {
35 .lock = SPIN_LOCK_UNLOCKED, 35 .lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock),
36 .next = 1, 36 .next = 1,
37 .max_ctx = ~0U 37 .max_ctx = ~0U
38}; 38};
39 39
40DEFINE_PER_CPU(u8, ia64_need_tlb_flush); 40DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 8d2a1bfbfe7c..7f6d2360a262 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -59,6 +59,22 @@ void sn_intr_free(nasid_t local_nasid, int local_widget,
59 (u64) sn_irq_info->irq_cookie, 0, 0); 59 (u64) sn_irq_info->irq_cookie, 0, 0);
60} 60}
61 61
62u64 sn_intr_redirect(nasid_t local_nasid, int local_widget,
63 struct sn_irq_info *sn_irq_info,
64 nasid_t req_nasid, int req_slice)
65{
66 struct ia64_sal_retval ret_stuff;
67 ret_stuff.status = 0;
68 ret_stuff.v0 = 0;
69
70 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
71 (u64) SAL_INTR_REDIRECT, (u64) local_nasid,
72 (u64) local_widget, __pa(sn_irq_info),
73 (u64) req_nasid, (u64) req_slice, 0);
74
75 return ret_stuff.status;
76}
77
62static unsigned int sn_startup_irq(unsigned int irq) 78static unsigned int sn_startup_irq(unsigned int irq)
63{ 79{
64 return 0; 80 return 0;
@@ -127,15 +143,8 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
127 struct sn_irq_info *new_irq_info; 143 struct sn_irq_info *new_irq_info;
128 struct sn_pcibus_provider *pci_provider; 144 struct sn_pcibus_provider *pci_provider;
129 145
130 new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); 146 bridge = (u64) sn_irq_info->irq_bridge;
131 if (new_irq_info == NULL)
132 return NULL;
133
134 memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
135
136 bridge = (u64) new_irq_info->irq_bridge;
137 if (!bridge) { 147 if (!bridge) {
138 kfree(new_irq_info);
139 return NULL; /* irq is not a device interrupt */ 148 return NULL; /* irq is not a device interrupt */
140 } 149 }
141 150
@@ -145,8 +154,25 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
145 local_widget = TIO_SWIN_WIDGETNUM(bridge); 154 local_widget = TIO_SWIN_WIDGETNUM(bridge);
146 else 155 else
147 local_widget = SWIN_WIDGETNUM(bridge); 156 local_widget = SWIN_WIDGETNUM(bridge);
148
149 vector = sn_irq_info->irq_irq; 157 vector = sn_irq_info->irq_irq;
158
159 /* Make use of SAL_INTR_REDIRECT if PROM supports it */
160 status = sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice);
161 if (!status) {
162 new_irq_info = sn_irq_info;
163 goto finish_up;
164 }
165
166 /*
167 * PROM does not support SAL_INTR_REDIRECT, or it failed.
168 * Revert to old method.
169 */
170 new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
171 if (new_irq_info == NULL)
172 return NULL;
173
174 memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
175
150 /* Free the old PROM new_irq_info structure */ 176 /* Free the old PROM new_irq_info structure */
151 sn_intr_free(local_nasid, local_widget, new_irq_info); 177 sn_intr_free(local_nasid, local_widget, new_irq_info);
152 unregister_intr_pda(new_irq_info); 178 unregister_intr_pda(new_irq_info);
@@ -162,11 +188,18 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
162 return NULL; 188 return NULL;
163 } 189 }
164 190
191 register_intr_pda(new_irq_info);
192 spin_lock(&sn_irq_info_lock);
193 list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
194 spin_unlock(&sn_irq_info_lock);
195 call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
196
197
198finish_up:
165 /* Update kernels new_irq_info with new target info */ 199 /* Update kernels new_irq_info with new target info */
166 cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid, 200 cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid,
167 new_irq_info->irq_slice); 201 new_irq_info->irq_slice);
168 new_irq_info->irq_cpuid = cpuid; 202 new_irq_info->irq_cpuid = cpuid;
169 register_intr_pda(new_irq_info);
170 203
171 pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; 204 pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
172 205
@@ -178,11 +211,6 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
178 pci_provider && pci_provider->target_interrupt) 211 pci_provider && pci_provider->target_interrupt)
179 (pci_provider->target_interrupt)(new_irq_info); 212 (pci_provider->target_interrupt)(new_irq_info);
180 213
181 spin_lock(&sn_irq_info_lock);
182 list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
183 spin_unlock(&sn_irq_info_lock);
184 call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
185
186#ifdef CONFIG_SMP 214#ifdef CONFIG_SMP
187 cpuphys = cpu_physical_id(cpuid); 215 cpuphys = cpu_physical_id(cpuid);
188 set_irq_affinity_info((vector & 0xff), cpuphys, 0); 216 set_irq_affinity_info((vector & 0xff), cpuphys, 0);
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 601747b1e22a..5d318b579fb1 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -46,6 +46,9 @@ DECLARE_PER_CPU(struct ptc_stats, ptcstats);
46 46
47static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); 47static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
48 48
49/* 0 = old algorithm (no IPI flushes), 1 = ipi deadlock flush, 2 = ipi instead of SHUB ptc, >2 = always ipi */
50static int sn2_flush_opt = 0;
51
49extern unsigned long 52extern unsigned long
50sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, 53sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
51 volatile unsigned long *, unsigned long, 54 volatile unsigned long *, unsigned long,
@@ -76,6 +79,8 @@ struct ptc_stats {
76 unsigned long shub_itc_clocks; 79 unsigned long shub_itc_clocks;
77 unsigned long shub_itc_clocks_max; 80 unsigned long shub_itc_clocks_max;
78 unsigned long shub_ptc_flushes_not_my_mm; 81 unsigned long shub_ptc_flushes_not_my_mm;
82 unsigned long shub_ipi_flushes;
83 unsigned long shub_ipi_flushes_itc_clocks;
79}; 84};
80 85
81#define sn2_ptctest 0 86#define sn2_ptctest 0
@@ -121,6 +126,18 @@ void sn_tlb_migrate_finish(struct mm_struct *mm)
121 flush_tlb_mm(mm); 126 flush_tlb_mm(mm);
122} 127}
123 128
129static void
130sn2_ipi_flush_all_tlb(struct mm_struct *mm)
131{
132 unsigned long itc;
133
134 itc = ia64_get_itc();
135 smp_flush_tlb_cpumask(mm->cpu_vm_mask);
136 itc = ia64_get_itc() - itc;
137 __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc;
138 __get_cpu_var(ptcstats).shub_ipi_flushes++;
139}
140
124/** 141/**
125 * sn2_global_tlb_purge - globally purge translation cache of virtual address range 142 * sn2_global_tlb_purge - globally purge translation cache of virtual address range
126 * @mm: mm_struct containing virtual address range 143 * @mm: mm_struct containing virtual address range
@@ -154,7 +171,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
154 unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0; 171 unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0;
155 short nasids[MAX_NUMNODES], nix; 172 short nasids[MAX_NUMNODES], nix;
156 nodemask_t nodes_flushed; 173 nodemask_t nodes_flushed;
157 int active, max_active, deadlock; 174 int active, max_active, deadlock, flush_opt = sn2_flush_opt;
175
176 if (flush_opt > 2) {
177 sn2_ipi_flush_all_tlb(mm);
178 return;
179 }
158 180
159 nodes_clear(nodes_flushed); 181 nodes_clear(nodes_flushed);
160 i = 0; 182 i = 0;
@@ -189,6 +211,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
189 return; 211 return;
190 } 212 }
191 213
214 if (flush_opt == 2) {
215 sn2_ipi_flush_all_tlb(mm);
216 preempt_enable();
217 return;
218 }
219
192 itc = ia64_get_itc(); 220 itc = ia64_get_itc();
193 nix = 0; 221 nix = 0;
194 for_each_node_mask(cnode, nodes_flushed) 222 for_each_node_mask(cnode, nodes_flushed)
@@ -256,6 +284,8 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
256 } 284 }
257 if (active >= max_active || i == (nix - 1)) { 285 if (active >= max_active || i == (nix - 1)) {
258 if ((deadlock = wait_piowc())) { 286 if ((deadlock = wait_piowc())) {
287 if (flush_opt == 1)
288 goto done;
259 sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1); 289 sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1);
260 if (reset_max_active_on_deadlock()) 290 if (reset_max_active_on_deadlock())
261 max_active = 1; 291 max_active = 1;
@@ -267,6 +297,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
267 start += (1UL << nbits); 297 start += (1UL << nbits);
268 } while (start < end); 298 } while (start < end);
269 299
300done:
270 itc2 = ia64_get_itc() - itc2; 301 itc2 = ia64_get_itc() - itc2;
271 __get_cpu_var(ptcstats).shub_itc_clocks += itc2; 302 __get_cpu_var(ptcstats).shub_itc_clocks += itc2;
272 if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max) 303 if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
@@ -279,6 +310,11 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
279 310
280 spin_unlock_irqrestore(PTC_LOCK(shub1), flags); 311 spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
281 312
313 if (flush_opt == 1 && deadlock) {
314 __get_cpu_var(ptcstats).deadlocks++;
315 sn2_ipi_flush_all_tlb(mm);
316 }
317
282 preempt_enable(); 318 preempt_enable();
283} 319}
284 320
@@ -425,24 +461,42 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
425 461
426 if (!cpu) { 462 if (!cpu) {
427 seq_printf(file, 463 seq_printf(file,
428 "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2\n"); 464 "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2 ipi_fluches ipi_nsec\n");
429 seq_printf(file, "# ptctest %d\n", sn2_ptctest); 465 seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt);
430 } 466 }
431 467
432 if (cpu < NR_CPUS && cpu_online(cpu)) { 468 if (cpu < NR_CPUS && cpu_online(cpu)) {
433 stat = &per_cpu(ptcstats, cpu); 469 stat = &per_cpu(ptcstats, cpu);
434 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, 470 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
435 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, 471 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
436 stat->deadlocks, 472 stat->deadlocks,
437 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, 473 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
438 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, 474 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
439 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec, 475 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
440 stat->shub_ptc_flushes_not_my_mm, 476 stat->shub_ptc_flushes_not_my_mm,
441 stat->deadlocks2); 477 stat->deadlocks2,
478 stat->shub_ipi_flushes,
479 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec);
442 } 480 }
443 return 0; 481 return 0;
444} 482}
445 483
484static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, size_t count, loff_t *data)
485{
486 int cpu;
487 char optstr[64];
488
489 if (copy_from_user(optstr, user, count))
490 return -EFAULT;
491 optstr[count - 1] = '\0';
492 sn2_flush_opt = simple_strtoul(optstr, NULL, 0);
493
494 for_each_online_cpu(cpu)
495 memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats));
496
497 return count;
498}
499
446static struct seq_operations sn2_ptc_seq_ops = { 500static struct seq_operations sn2_ptc_seq_ops = {
447 .start = sn2_ptc_seq_start, 501 .start = sn2_ptc_seq_start,
448 .next = sn2_ptc_seq_next, 502 .next = sn2_ptc_seq_next,
@@ -458,6 +512,7 @@ static int sn2_ptc_proc_open(struct inode *inode, struct file *file)
458static const struct file_operations proc_sn2_ptc_operations = { 512static const struct file_operations proc_sn2_ptc_operations = {
459 .open = sn2_ptc_proc_open, 513 .open = sn2_ptc_proc_open,
460 .read = seq_read, 514 .read = seq_read,
515 .write = sn2_ptc_proc_write,
461 .llseek = seq_lseek, 516 .llseek = seq_lseek,
462 .release = seq_release, 517 .release = seq_release,
463}; 518};
diff --git a/arch/m68knommu/Kconfig.debug b/arch/m68knommu/Kconfig.debug
index 763c9aa0b4fd..9ff47bd09aee 100644
--- a/arch/m68knommu/Kconfig.debug
+++ b/arch/m68knommu/Kconfig.debug
@@ -5,7 +5,7 @@ source "lib/Kconfig.debug"
5config FULLDEBUG 5config FULLDEBUG
6 bool "Full Symbolic/Source Debugging support" 6 bool "Full Symbolic/Source Debugging support"
7 help 7 help
8 Enable debuging symbols on kernel build. 8 Enable debugging symbols on kernel build.
9 9
10config HIGHPROFILE 10config HIGHPROFILE
11 bool "Use fast second timer for profiling" 11 bool "Use fast second timer for profiling"
diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c
index b988c7bdc6e4..7cd183d346ef 100644
--- a/arch/m68knommu/kernel/asm-offsets.c
+++ b/arch/m68knommu/kernel/asm-offsets.c
@@ -31,7 +31,7 @@ int main(void)
31 DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); 31 DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
32 DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); 32 DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
33 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); 33 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
34 DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info)); 34 DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
35 DEFINE(TASK_MM, offsetof(struct task_struct, mm)); 35 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
36 DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); 36 DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
37 37
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index b7cb048bc771..16ecea3c0813 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -956,7 +956,7 @@ choice
956 byte order. These modes require different kernels and a different 956 byte order. These modes require different kernels and a different
957 Linux distribution. In general there is one preferred byteorder for a 957 Linux distribution. In general there is one preferred byteorder for a
958 particular system but some systems are just as commonly used in the 958 particular system but some systems are just as commonly used in the
959 one or the other endianess. 959 one or the other endianness.
960 960
961config CPU_BIG_ENDIAN 961config CPU_BIG_ENDIAN
962 bool "Big endian" 962 bool "Big endian"
@@ -1750,7 +1750,7 @@ config ARCH_DISCONTIGMEM_ENABLE
1750 bool 1750 bool
1751 default y if SGI_IP27 1751 default y if SGI_IP27
1752 help 1752 help
1753 Say Y to upport efficient handling of discontiguous physical memory, 1753 Say Y to support efficient handling of discontiguous physical memory,
1754 for architectures which are either NUMA (Non-Uniform Memory Access) 1754 for architectures which are either NUMA (Non-Uniform Memory Access)
1755 or have huge holes in the physical address space for other reasons. 1755 or have huge holes in the physical address space for other reasons.
1756 See <file:Documentation/vm/numa> for more. 1756 See <file:Documentation/vm/numa> for more.
@@ -1938,7 +1938,7 @@ config KEXEC
1938 help 1938 help
1939 kexec is a system call that implements the ability to shutdown your 1939 kexec is a system call that implements the ability to shutdown your
1940 current kernel, and to start another kernel. It is like a reboot 1940 current kernel, and to start another kernel. It is like a reboot
1941 but it is indepedent of the system firmware. And like a reboot 1941 but it is independent of the system firmware. And like a reboot
1942 you can start any kernel with it, not just Linux. 1942 you can start any kernel with it, not just Linux.
1943 1943
1944 The name comes from the similiarity to the exec system call. 1944 The name comes from the similiarity to the exec system call.
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index f2f742df32c7..4892db88a86a 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -92,7 +92,7 @@ cflags-y += -ffreestanding
92# when fed the toolchain default! 92# when fed the toolchain default!
93# 93#
94# Certain gcc versions upto gcc 4.1.1 (probably 4.2-subversion as of 94# Certain gcc versions upto gcc 4.1.1 (probably 4.2-subversion as of
95# 2006-10-10 don't properly change the the predefined symbols if -EB / -EL 95# 2006-10-10 don't properly change the predefined symbols if -EB / -EL
96# are used, so we kludge that here. A bug has been filed at 96# are used, so we kludge that here. A bug has been filed at
97# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=29413. 97# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=29413.
98# 98#
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 761a779d5c4f..3b27309d54b1 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -82,7 +82,7 @@ void output_task_defines(void)
82{ 82{
83 text("/* MIPS task_struct offsets. */"); 83 text("/* MIPS task_struct offsets. */");
84 offset("#define TASK_STATE ", struct task_struct, state); 84 offset("#define TASK_STATE ", struct task_struct, state);
85 offset("#define TASK_THREAD_INFO ", struct task_struct, thread_info); 85 offset("#define TASK_THREAD_INFO ", struct task_struct, stack);
86 offset("#define TASK_FLAGS ", struct task_struct, flags); 86 offset("#define TASK_FLAGS ", struct task_struct, flags);
87 offset("#define TASK_MM ", struct task_struct, mm); 87 offset("#define TASK_MM ", struct task_struct, mm);
88 offset("#define TASK_PID ", struct task_struct, pid); 88 offset("#define TASK_PID ", struct task_struct, pid);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 5dcfab6b288e..b361edb83dc6 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -560,7 +560,7 @@ void smtc_boot_secondary(int cpu, struct task_struct *idle)
560 write_tc_gpr_sp(__KSTK_TOS(idle)); 560 write_tc_gpr_sp(__KSTK_TOS(idle));
561 561
562 /* global pointer */ 562 /* global pointer */
563 write_tc_gpr_gp((unsigned long)idle->thread_info); 563 write_tc_gpr_gp((unsigned long)task_thread_info(idle));
564 564
565 smtc_status |= SMTC_MTC_ACTIVE; 565 smtc_status |= SMTC_MTC_ACTIVE;
566 write_tc_c0_tchalt(0); 566 write_tc_c0_tchalt(0);
diff --git a/arch/mips/pci/fixup-sb1250.c b/arch/mips/pci/fixup-sb1250.c
index 7a7444874e80..0ad39e53f7b1 100644
--- a/arch/mips/pci/fixup-sb1250.c
+++ b/arch/mips/pci/fixup-sb1250.c
@@ -14,7 +14,7 @@
14#include <linux/pci.h> 14#include <linux/pci.h>
15 15
16/* 16/*
17 * Set the the BCM1250, etc. PCI host bridge's TRDY timeout 17 * Set the BCM1250, etc. PCI host bridge's TRDY timeout
18 * to the finite max. 18 * to the finite max.
19 */ 19 */
20static void __init quirk_sb1250_pci(struct pci_dev *dev) 20static void __init quirk_sb1250_pci(struct pci_dev *dev)
@@ -35,7 +35,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_HT,
35 quirk_sb1250_ht); 35 quirk_sb1250_ht);
36 36
37/* 37/*
38 * Set the the SP1011 HT/PCI bridge's TRDY timeout to the finite max. 38 * Set the SP1011 HT/PCI bridge's TRDY timeout to the finite max.
39 */ 39 */
40static void __init quirk_sp1011(struct pci_dev *dev) 40static void __init quirk_sp1011(struct pci_dev *dev)
41{ 41{
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 54fdb959149c..d3b7917a87cb 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -54,7 +54,7 @@
54 54
55int main(void) 55int main(void)
56{ 56{
57 DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info)); 57 DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
58 DEFINE(TASK_STATE, offsetof(struct task_struct, state)); 58 DEFINE(TASK_STATE, offsetof(struct task_struct, state));
59 DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); 59 DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
60 DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending)); 60 DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 808d2ef80e2f..ccc5410af996 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -120,19 +120,6 @@ config GENERIC_BUG
120config SYS_SUPPORTS_APM_EMULATION 120config SYS_SUPPORTS_APM_EMULATION
121 bool 121 bool
122 122
123#
124# Powerpc uses the slab allocator to manage its ptes and the
125# page structs of ptes are used for splitting the page table
126# lock for configurations supporting more than SPLIT_PTLOCK_CPUS.
127#
128# In that special configuration the page structs of slabs are modified.
129# This setting disables the selection of SLUB as a slab allocator.
130#
131config ARCH_USES_SLAB_PAGE_STRUCT
132 bool
133 default y
134 depends on SPLIT_PTLOCK_CPUS <= NR_CPUS
135
136config DEFAULT_UIMAGE 123config DEFAULT_UIMAGE
137 bool 124 bool
138 help 125 help
@@ -352,6 +339,11 @@ config PPC_STD_MMU_32
352 def_bool y 339 def_bool y
353 depends on PPC_STD_MMU && PPC32 340 depends on PPC_STD_MMU && PPC32
354 341
342config PPC_MM_SLICES
343 bool
344 default y if HUGETLB_PAGE
345 default n
346
355config VIRT_CPU_ACCOUNTING 347config VIRT_CPU_ACCOUNTING
356 bool "Deterministic task and CPU time accounting" 348 bool "Deterministic task and CPU time accounting"
357 depends on PPC64 349 depends on PPC64
@@ -541,9 +533,15 @@ config NODES_SPAN_OTHER_NODES
541 def_bool y 533 def_bool y
542 depends on NEED_MULTIPLE_NODES 534 depends on NEED_MULTIPLE_NODES
543 535
536config PPC_HAS_HASH_64K
537 bool
538 depends on PPC64
539 default n
540
544config PPC_64K_PAGES 541config PPC_64K_PAGES
545 bool "64k page size" 542 bool "64k page size"
546 depends on PPC64 543 depends on PPC64
544 select PPC_HAS_HASH_64K
547 help 545 help
548 This option changes the kernel logical page size to 64k. On machines 546 This option changes the kernel logical page size to 64k. On machines
549 without processor support for 64k pages, the kernel will simulate 547 without processor support for 64k pages, the kernel will simulate
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index f70e795c262e..346cd3befe1e 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -32,7 +32,7 @@ config HCALL_STATS
32 depends on PPC_PSERIES && DEBUG_FS 32 depends on PPC_PSERIES && DEBUG_FS
33 help 33 help
34 Adds code to keep track of the number of hypervisor calls made and 34 Adds code to keep track of the number of hypervisor calls made and
35 the amount of time spent in hypervisor callsr. Wall time spent in 35 the amount of time spent in hypervisor calls. Wall time spent in
36 each call is always calculated, and if available CPU cycles spent 36 each call is always calculated, and if available CPU cycles spent
37 are also calculated. A directory named hcall_inst is added at the 37 are also calculated. A directory named hcall_inst is added at the
38 root of the debugfs filesystem. Within the hcall_inst directory 38 root of the debugfs filesystem. Within the hcall_inst directory
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 8f48560b7ee2..2cb1d9487796 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -58,7 +58,7 @@ int main(void)
58#ifdef CONFIG_PPC64 58#ifdef CONFIG_PPC64
59 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); 59 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
60#else 60#else
61 DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info)); 61 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
62 DEFINE(PTRACE, offsetof(struct task_struct, ptrace)); 62 DEFINE(PTRACE, offsetof(struct task_struct, ptrace));
63#endif /* CONFIG_PPC64 */ 63#endif /* CONFIG_PPC64 */
64 64
@@ -122,12 +122,18 @@ int main(void)
122 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); 122 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
123 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); 123 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
124 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 124 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
125 DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
126 DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp)); 125 DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
127#ifdef CONFIG_HUGETLB_PAGE 126#ifdef CONFIG_PPC_MM_SLICES
128 DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); 127 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
129 DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); 128 context.low_slices_psize));
130#endif /* CONFIG_HUGETLB_PAGE */ 129 DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct,
130 context.high_slices_psize));
131 DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
132 DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp));
133#else
134 DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
135
136#endif /* CONFIG_PPC_MM_SLICES */
131 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); 137 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
132 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); 138 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
133 DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); 139 DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
diff --git a/arch/powerpc/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c
index 584d1e3c013d..af11285ffbd1 100644
--- a/arch/powerpc/kernel/lparmap.c
+++ b/arch/powerpc/kernel/lparmap.c
@@ -10,7 +10,8 @@
10#include <asm/pgtable.h> 10#include <asm/pgtable.h>
11#include <asm/iseries/lpar_map.h> 11#include <asm/iseries/lpar_map.h>
12 12
13const struct LparMap __attribute__((__section__(".text"))) xLparMap = { 13/* The # is to stop gcc trying to make .text nonexecutable */
14const struct LparMap __attribute__((__section__(".text #"))) xLparMap = {
14 .xNumberEsids = HvEsidsToMap, 15 .xNumberEsids = HvEsidsToMap,
15 .xNumberRanges = HvRangesToMap, 16 .xNumberRanges = HvRangesToMap,
16 .xSegmentTableOffs = STAB0_PAGE, 17 .xSegmentTableOffs = STAB0_PAGE,
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index cae39d9dfe48..68991c2d4a1b 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -342,10 +342,12 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
342 342
343 switch (action) { 343 switch (action) {
344 case CPU_ONLINE: 344 case CPU_ONLINE:
345 case CPU_ONLINE_FROZEN:
345 register_cpu_online(cpu); 346 register_cpu_online(cpu);
346 break; 347 break;
347#ifdef CONFIG_HOTPLUG_CPU 348#ifdef CONFIG_HOTPLUG_CPU
348 case CPU_DEAD: 349 case CPU_DEAD:
350 case CPU_DEAD_FROZEN:
349 unregister_cpu_online(cpu); 351 unregister_cpu_online(cpu);
350 break; 352 break;
351#endif 353#endif
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 38a81967ca07..4f839c6a9768 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -18,4 +18,5 @@ obj-$(CONFIG_40x) += 4xx_mmu.o
18obj-$(CONFIG_44x) += 44x_mmu.o 18obj-$(CONFIG_44x) += 44x_mmu.o
19obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o 19obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o
20obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o 20obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
21obj-$(CONFIG_PPC_MM_SLICES) += slice.o
21obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 22obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index e64ce3eec36e..4762ff7c14df 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -615,6 +615,9 @@ htab_pte_insert_failure:
615 li r3,-1 615 li r3,-1
616 b htab_bail 616 b htab_bail
617 617
618#endif /* CONFIG_PPC_64K_PAGES */
619
620#ifdef CONFIG_PPC_HAS_HASH_64K
618 621
619/***************************************************************************** 622/*****************************************************************************
620 * * 623 * *
@@ -870,7 +873,7 @@ ht64_pte_insert_failure:
870 b ht64_bail 873 b ht64_bail
871 874
872 875
873#endif /* CONFIG_PPC_64K_PAGES */ 876#endif /* CONFIG_PPC_HAS_HASH_64K */
874 877
875 878
876/***************************************************************************** 879/*****************************************************************************
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 9b226fa7006f..028ba4ed03d2 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -51,6 +51,7 @@
51#include <asm/cputable.h> 51#include <asm/cputable.h>
52#include <asm/abs_addr.h> 52#include <asm/abs_addr.h>
53#include <asm/sections.h> 53#include <asm/sections.h>
54#include <asm/spu.h>
54 55
55#ifdef DEBUG 56#ifdef DEBUG
56#define DBG(fmt...) udbg_printf(fmt) 57#define DBG(fmt...) udbg_printf(fmt)
@@ -419,7 +420,7 @@ static void __init htab_finish_init(void)
419 extern unsigned int *htab_call_hpte_remove; 420 extern unsigned int *htab_call_hpte_remove;
420 extern unsigned int *htab_call_hpte_updatepp; 421 extern unsigned int *htab_call_hpte_updatepp;
421 422
422#ifdef CONFIG_PPC_64K_PAGES 423#ifdef CONFIG_PPC_HAS_HASH_64K
423 extern unsigned int *ht64_call_hpte_insert1; 424 extern unsigned int *ht64_call_hpte_insert1;
424 extern unsigned int *ht64_call_hpte_insert2; 425 extern unsigned int *ht64_call_hpte_insert2;
425 extern unsigned int *ht64_call_hpte_remove; 426 extern unsigned int *ht64_call_hpte_remove;
@@ -596,22 +597,23 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
596 * Demote a segment to using 4k pages. 597 * Demote a segment to using 4k pages.
597 * For now this makes the whole process use 4k pages. 598 * For now this makes the whole process use 4k pages.
598 */ 599 */
599void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
600{
601#ifdef CONFIG_PPC_64K_PAGES 600#ifdef CONFIG_PPC_64K_PAGES
601static void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
602{
602 if (mm->context.user_psize == MMU_PAGE_4K) 603 if (mm->context.user_psize == MMU_PAGE_4K)
603 return; 604 return;
605#ifdef CONFIG_PPC_MM_SLICES
606 slice_set_user_psize(mm, MMU_PAGE_4K);
607#else /* CONFIG_PPC_MM_SLICES */
604 mm->context.user_psize = MMU_PAGE_4K; 608 mm->context.user_psize = MMU_PAGE_4K;
605 mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp; 609 mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp;
606 get_paca()->context = mm->context; 610#endif /* CONFIG_PPC_MM_SLICES */
607 slb_flush_and_rebolt(); 611
608#ifdef CONFIG_SPE_BASE 612#ifdef CONFIG_SPE_BASE
609 spu_flush_all_slbs(mm); 613 spu_flush_all_slbs(mm);
610#endif 614#endif
611#endif
612} 615}
613 616#endif /* CONFIG_PPC_64K_PAGES */
614EXPORT_SYMBOL_GPL(demote_segment_4k);
615 617
616/* Result code is: 618/* Result code is:
617 * 0 - handled 619 * 0 - handled
@@ -646,7 +648,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
646 return 1; 648 return 1;
647 } 649 }
648 vsid = get_vsid(mm->context.id, ea); 650 vsid = get_vsid(mm->context.id, ea);
651#ifdef CONFIG_PPC_MM_SLICES
652 psize = get_slice_psize(mm, ea);
653#else
649 psize = mm->context.user_psize; 654 psize = mm->context.user_psize;
655#endif
650 break; 656 break;
651 case VMALLOC_REGION_ID: 657 case VMALLOC_REGION_ID:
652 mm = &init_mm; 658 mm = &init_mm;
@@ -674,11 +680,22 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
674 if (user_region && cpus_equal(mm->cpu_vm_mask, tmp)) 680 if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
675 local = 1; 681 local = 1;
676 682
683#ifdef CONFIG_HUGETLB_PAGE
677 /* Handle hugepage regions */ 684 /* Handle hugepage regions */
678 if (unlikely(in_hugepage_area(mm->context, ea))) { 685 if (HPAGE_SHIFT && psize == mmu_huge_psize) {
679 DBG_LOW(" -> huge page !\n"); 686 DBG_LOW(" -> huge page !\n");
680 return hash_huge_page(mm, access, ea, vsid, local, trap); 687 return hash_huge_page(mm, access, ea, vsid, local, trap);
681 } 688 }
689#endif /* CONFIG_HUGETLB_PAGE */
690
691#ifndef CONFIG_PPC_64K_PAGES
692 /* If we use 4K pages and our psize is not 4K, then we are hitting
693 * a special driver mapping, we need to align the address before
694 * we fetch the PTE
695 */
696 if (psize != MMU_PAGE_4K)
697 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
698#endif /* CONFIG_PPC_64K_PAGES */
682 699
683 /* Get PTE and page size from page tables */ 700 /* Get PTE and page size from page tables */
684 ptep = find_linux_pte(pgdir, ea); 701 ptep = find_linux_pte(pgdir, ea);
@@ -702,54 +719,56 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
702 } 719 }
703 720
704 /* Do actual hashing */ 721 /* Do actual hashing */
705#ifndef CONFIG_PPC_64K_PAGES 722#ifdef CONFIG_PPC_64K_PAGES
706 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
707#else
708 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */ 723 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
709 if (pte_val(*ptep) & _PAGE_4K_PFN) { 724 if (pte_val(*ptep) & _PAGE_4K_PFN) {
710 demote_segment_4k(mm, ea); 725 demote_segment_4k(mm, ea);
711 psize = MMU_PAGE_4K; 726 psize = MMU_PAGE_4K;
712 } 727 }
713 728
714 if (mmu_ci_restrictions) { 729 /* If this PTE is non-cacheable and we have restrictions on
715 /* If this PTE is non-cacheable, switch to 4k */ 730 * using non cacheable large pages, then we switch to 4k
716 if (psize == MMU_PAGE_64K && 731 */
717 (pte_val(*ptep) & _PAGE_NO_CACHE)) { 732 if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
718 if (user_region) { 733 (pte_val(*ptep) & _PAGE_NO_CACHE)) {
719 demote_segment_4k(mm, ea); 734 if (user_region) {
720 psize = MMU_PAGE_4K; 735 demote_segment_4k(mm, ea);
721 } else if (ea < VMALLOC_END) { 736 psize = MMU_PAGE_4K;
722 /* 737 } else if (ea < VMALLOC_END) {
723 * some driver did a non-cacheable mapping 738 /*
724 * in vmalloc space, so switch vmalloc 739 * some driver did a non-cacheable mapping
725 * to 4k pages 740 * in vmalloc space, so switch vmalloc
726 */ 741 * to 4k pages
727 printk(KERN_ALERT "Reducing vmalloc segment " 742 */
728 "to 4kB pages because of " 743 printk(KERN_ALERT "Reducing vmalloc segment "
729 "non-cacheable mapping\n"); 744 "to 4kB pages because of "
730 psize = mmu_vmalloc_psize = MMU_PAGE_4K; 745 "non-cacheable mapping\n");
731 } 746 psize = mmu_vmalloc_psize = MMU_PAGE_4K;
732#ifdef CONFIG_SPE_BASE 747#ifdef CONFIG_SPE_BASE
733 spu_flush_all_slbs(mm); 748 spu_flush_all_slbs(mm);
734#endif 749#endif
735 } 750 }
736 if (user_region) { 751 }
737 if (psize != get_paca()->context.user_psize) { 752 if (user_region) {
738 get_paca()->context = mm->context; 753 if (psize != get_paca()->context.user_psize) {
739 slb_flush_and_rebolt(); 754 get_paca()->context.user_psize =
740 } 755 mm->context.user_psize;
741 } else if (get_paca()->vmalloc_sllp !=
742 mmu_psize_defs[mmu_vmalloc_psize].sllp) {
743 get_paca()->vmalloc_sllp =
744 mmu_psize_defs[mmu_vmalloc_psize].sllp;
745 slb_flush_and_rebolt(); 756 slb_flush_and_rebolt();
746 } 757 }
758 } else if (get_paca()->vmalloc_sllp !=
759 mmu_psize_defs[mmu_vmalloc_psize].sllp) {
760 get_paca()->vmalloc_sllp =
761 mmu_psize_defs[mmu_vmalloc_psize].sllp;
762 slb_flush_and_rebolt();
747 } 763 }
764#endif /* CONFIG_PPC_64K_PAGES */
765
766#ifdef CONFIG_PPC_HAS_HASH_64K
748 if (psize == MMU_PAGE_64K) 767 if (psize == MMU_PAGE_64K)
749 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); 768 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
750 else 769 else
770#endif /* CONFIG_PPC_HAS_HASH_64K */
751 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); 771 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
752#endif /* CONFIG_PPC_64K_PAGES */
753 772
754#ifndef CONFIG_PPC_64K_PAGES 773#ifndef CONFIG_PPC_64K_PAGES
755 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); 774 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
@@ -772,42 +791,55 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
772 unsigned long flags; 791 unsigned long flags;
773 int local = 0; 792 int local = 0;
774 793
775 /* We don't want huge pages prefaulted for now 794 BUG_ON(REGION_ID(ea) != USER_REGION_ID);
776 */ 795
777 if (unlikely(in_hugepage_area(mm->context, ea))) 796#ifdef CONFIG_PPC_MM_SLICES
797 /* We only prefault standard pages for now */
798 if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize));
778 return; 799 return;
800#endif
779 801
780 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx," 802 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
781 " trap=%lx\n", mm, mm->pgd, ea, access, trap); 803 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
782 804
783 /* Get PTE, VSID, access mask */ 805 /* Get Linux PTE if available */
784 pgdir = mm->pgd; 806 pgdir = mm->pgd;
785 if (pgdir == NULL) 807 if (pgdir == NULL)
786 return; 808 return;
787 ptep = find_linux_pte(pgdir, ea); 809 ptep = find_linux_pte(pgdir, ea);
788 if (!ptep) 810 if (!ptep)
789 return; 811 return;
812
813#ifdef CONFIG_PPC_64K_PAGES
814 /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
815 * a 64K kernel), then we don't preload, hash_page() will take
816 * care of it once we actually try to access the page.
817 * That way we don't have to duplicate all of the logic for segment
818 * page size demotion here
819 */
820 if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
821 return;
822#endif /* CONFIG_PPC_64K_PAGES */
823
824 /* Get VSID */
790 vsid = get_vsid(mm->context.id, ea); 825 vsid = get_vsid(mm->context.id, ea);
791 826
792 /* Hash it in */ 827 /* Hash doesn't like irqs */
793 local_irq_save(flags); 828 local_irq_save(flags);
829
830 /* Is that local to this CPU ? */
794 mask = cpumask_of_cpu(smp_processor_id()); 831 mask = cpumask_of_cpu(smp_processor_id());
795 if (cpus_equal(mm->cpu_vm_mask, mask)) 832 if (cpus_equal(mm->cpu_vm_mask, mask))
796 local = 1; 833 local = 1;
797#ifndef CONFIG_PPC_64K_PAGES 834
798 __hash_page_4K(ea, access, vsid, ptep, trap, local); 835 /* Hash it in */
799#else 836#ifdef CONFIG_PPC_HAS_HASH_64K
800 if (mmu_ci_restrictions) {
801 /* If this PTE is non-cacheable, switch to 4k */
802 if (mm->context.user_psize == MMU_PAGE_64K &&
803 (pte_val(*ptep) & _PAGE_NO_CACHE))
804 demote_segment_4k(mm, ea);
805 }
806 if (mm->context.user_psize == MMU_PAGE_64K) 837 if (mm->context.user_psize == MMU_PAGE_64K)
807 __hash_page_64K(ea, access, vsid, ptep, trap, local); 838 __hash_page_64K(ea, access, vsid, ptep, trap, local);
808 else 839 else
809 __hash_page_4K(ea, access, vsid, ptep, trap, local);
810#endif /* CONFIG_PPC_64K_PAGES */ 840#endif /* CONFIG_PPC_64K_PAGES */
841 __hash_page_4K(ea, access, vsid, ptep, trap, local);
842
811 local_irq_restore(flags); 843 local_irq_restore(flags);
812} 844}
813 845
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index fb959264c104..92a1b16fb7e3 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -91,7 +91,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
91 pgd_t *pg; 91 pgd_t *pg;
92 pud_t *pu; 92 pud_t *pu;
93 93
94 BUG_ON(! in_hugepage_area(mm->context, addr)); 94 BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
95 95
96 addr &= HPAGE_MASK; 96 addr &= HPAGE_MASK;
97 97
@@ -119,7 +119,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
119 pud_t *pu; 119 pud_t *pu;
120 hugepd_t *hpdp = NULL; 120 hugepd_t *hpdp = NULL;
121 121
122 BUG_ON(! in_hugepage_area(mm->context, addr)); 122 BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
123 123
124 addr &= HPAGE_MASK; 124 addr &= HPAGE_MASK;
125 125
@@ -302,7 +302,7 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb,
302 start = addr; 302 start = addr;
303 pgd = pgd_offset((*tlb)->mm, addr); 303 pgd = pgd_offset((*tlb)->mm, addr);
304 do { 304 do {
305 BUG_ON(! in_hugepage_area((*tlb)->mm->context, addr)); 305 BUG_ON(get_slice_psize((*tlb)->mm, addr) != mmu_huge_psize);
306 next = pgd_addr_end(addr, end); 306 next = pgd_addr_end(addr, end);
307 if (pgd_none_or_clear_bad(pgd)) 307 if (pgd_none_or_clear_bad(pgd))
308 continue; 308 continue;
@@ -331,203 +331,13 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
331 return __pte(old); 331 return __pte(old);
332} 332}
333 333
334struct slb_flush_info {
335 struct mm_struct *mm;
336 u16 newareas;
337};
338
339static void flush_low_segments(void *parm)
340{
341 struct slb_flush_info *fi = parm;
342 unsigned long i;
343
344 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);
345
346 if (current->active_mm != fi->mm)
347 return;
348
349 /* Only need to do anything if this CPU is working in the same
350 * mm as the one which has changed */
351
352 /* update the paca copy of the context struct */
353 get_paca()->context = current->active_mm->context;
354
355 asm volatile("isync" : : : "memory");
356 for (i = 0; i < NUM_LOW_AREAS; i++) {
357 if (! (fi->newareas & (1U << i)))
358 continue;
359 asm volatile("slbie %0"
360 : : "r" ((i << SID_SHIFT) | SLBIE_C));
361 }
362 asm volatile("isync" : : : "memory");
363}
364
365static void flush_high_segments(void *parm)
366{
367 struct slb_flush_info *fi = parm;
368 unsigned long i, j;
369
370
371 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);
372
373 if (current->active_mm != fi->mm)
374 return;
375
376 /* Only need to do anything if this CPU is working in the same
377 * mm as the one which has changed */
378
379 /* update the paca copy of the context struct */
380 get_paca()->context = current->active_mm->context;
381
382 asm volatile("isync" : : : "memory");
383 for (i = 0; i < NUM_HIGH_AREAS; i++) {
384 if (! (fi->newareas & (1U << i)))
385 continue;
386 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
387 asm volatile("slbie %0"
388 :: "r" (((i << HTLB_AREA_SHIFT)
389 + (j << SID_SHIFT)) | SLBIE_C));
390 }
391 asm volatile("isync" : : : "memory");
392}
393
394static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
395{
396 unsigned long start = area << SID_SHIFT;
397 unsigned long end = (area+1) << SID_SHIFT;
398 struct vm_area_struct *vma;
399
400 BUG_ON(area >= NUM_LOW_AREAS);
401
402 /* Check no VMAs are in the region */
403 vma = find_vma(mm, start);
404 if (vma && (vma->vm_start < end))
405 return -EBUSY;
406
407 return 0;
408}
409
410static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
411{
412 unsigned long start = area << HTLB_AREA_SHIFT;
413 unsigned long end = (area+1) << HTLB_AREA_SHIFT;
414 struct vm_area_struct *vma;
415
416 BUG_ON(area >= NUM_HIGH_AREAS);
417
418 /* Hack, so that each addresses is controlled by exactly one
419 * of the high or low area bitmaps, the first high area starts
420 * at 4GB, not 0 */
421 if (start == 0)
422 start = 0x100000000UL;
423
424 /* Check no VMAs are in the region */
425 vma = find_vma(mm, start);
426 if (vma && (vma->vm_start < end))
427 return -EBUSY;
428
429 return 0;
430}
431
432static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
433{
434 unsigned long i;
435 struct slb_flush_info fi;
436
437 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
438 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
439
440 newareas &= ~(mm->context.low_htlb_areas);
441 if (! newareas)
442 return 0; /* The segments we want are already open */
443
444 for (i = 0; i < NUM_LOW_AREAS; i++)
445 if ((1 << i) & newareas)
446 if (prepare_low_area_for_htlb(mm, i) != 0)
447 return -EBUSY;
448
449 mm->context.low_htlb_areas |= newareas;
450
451 /* the context change must make it to memory before the flush,
452 * so that further SLB misses do the right thing. */
453 mb();
454
455 fi.mm = mm;
456 fi.newareas = newareas;
457 on_each_cpu(flush_low_segments, &fi, 0, 1);
458
459 return 0;
460}
461
462static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
463{
464 struct slb_flush_info fi;
465 unsigned long i;
466
467 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
468 BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
469 != NUM_HIGH_AREAS);
470
471 newareas &= ~(mm->context.high_htlb_areas);
472 if (! newareas)
473 return 0; /* The areas we want are already open */
474
475 for (i = 0; i < NUM_HIGH_AREAS; i++)
476 if ((1 << i) & newareas)
477 if (prepare_high_area_for_htlb(mm, i) != 0)
478 return -EBUSY;
479
480 mm->context.high_htlb_areas |= newareas;
481
482 /* the context change must make it to memory before the flush,
483 * so that further SLB misses do the right thing. */
484 mb();
485
486 fi.mm = mm;
487 fi.newareas = newareas;
488 on_each_cpu(flush_high_segments, &fi, 0, 1);
489
490 return 0;
491}
492
493int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
494{
495 int err = 0;
496
497 if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
498 return -EINVAL;
499 if (len & ~HPAGE_MASK)
500 return -EINVAL;
501 if (addr & ~HPAGE_MASK)
502 return -EINVAL;
503
504 if (addr < 0x100000000UL)
505 err = open_low_hpage_areas(current->mm,
506 LOW_ESID_MASK(addr, len));
507 if ((addr + len) > 0x100000000UL)
508 err = open_high_hpage_areas(current->mm,
509 HTLB_AREA_MASK(addr, len));
510#ifdef CONFIG_SPE_BASE
511 spu_flush_all_slbs(current->mm);
512#endif
513 if (err) {
514 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
515 " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
516 addr, len,
517 LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
518 return err;
519 }
520
521 return 0;
522}
523
524struct page * 334struct page *
525follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) 335follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
526{ 336{
527 pte_t *ptep; 337 pte_t *ptep;
528 struct page *page; 338 struct page *page;
529 339
530 if (! in_hugepage_area(mm->context, address)) 340 if (get_slice_psize(mm, address) != mmu_huge_psize)
531 return ERR_PTR(-EINVAL); 341 return ERR_PTR(-EINVAL);
532 342
533 ptep = huge_pte_offset(mm, address); 343 ptep = huge_pte_offset(mm, address);
@@ -551,359 +361,13 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
551 return NULL; 361 return NULL;
552} 362}
553 363
554/* Because we have an exclusive hugepage region which lies within the
555 * normal user address space, we have to take special measures to make
556 * non-huge mmap()s evade the hugepage reserved regions. */
557unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
558 unsigned long len, unsigned long pgoff,
559 unsigned long flags)
560{
561 struct mm_struct *mm = current->mm;
562 struct vm_area_struct *vma;
563 unsigned long start_addr;
564
565 if (len > TASK_SIZE)
566 return -ENOMEM;
567
568 /* handle fixed mapping: prevent overlap with huge pages */
569 if (flags & MAP_FIXED) {
570 if (is_hugepage_only_range(mm, addr, len))
571 return -EINVAL;
572 return addr;
573 }
574
575 if (addr) {
576 addr = PAGE_ALIGN(addr);
577 vma = find_vma(mm, addr);
578 if (((TASK_SIZE - len) >= addr)
579 && (!vma || (addr+len) <= vma->vm_start)
580 && !is_hugepage_only_range(mm, addr,len))
581 return addr;
582 }
583 if (len > mm->cached_hole_size) {
584 start_addr = addr = mm->free_area_cache;
585 } else {
586 start_addr = addr = TASK_UNMAPPED_BASE;
587 mm->cached_hole_size = 0;
588 }
589
590full_search:
591 vma = find_vma(mm, addr);
592 while (TASK_SIZE - len >= addr) {
593 BUG_ON(vma && (addr >= vma->vm_end));
594
595 if (touches_hugepage_low_range(mm, addr, len)) {
596 addr = ALIGN(addr+1, 1<<SID_SHIFT);
597 vma = find_vma(mm, addr);
598 continue;
599 }
600 if (touches_hugepage_high_range(mm, addr, len)) {
601 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
602 vma = find_vma(mm, addr);
603 continue;
604 }
605 if (!vma || addr + len <= vma->vm_start) {
606 /*
607 * Remember the place where we stopped the search:
608 */
609 mm->free_area_cache = addr + len;
610 return addr;
611 }
612 if (addr + mm->cached_hole_size < vma->vm_start)
613 mm->cached_hole_size = vma->vm_start - addr;
614 addr = vma->vm_end;
615 vma = vma->vm_next;
616 }
617
618 /* Make sure we didn't miss any holes */
619 if (start_addr != TASK_UNMAPPED_BASE) {
620 start_addr = addr = TASK_UNMAPPED_BASE;
621 mm->cached_hole_size = 0;
622 goto full_search;
623 }
624 return -ENOMEM;
625}
626
627/*
628 * This mmap-allocator allocates new areas top-down from below the
629 * stack's low limit (the base):
630 *
631 * Because we have an exclusive hugepage region which lies within the
632 * normal user address space, we have to take special measures to make
633 * non-huge mmap()s evade the hugepage reserved regions.
634 */
635unsigned long
636arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
637 const unsigned long len, const unsigned long pgoff,
638 const unsigned long flags)
639{
640 struct vm_area_struct *vma, *prev_vma;
641 struct mm_struct *mm = current->mm;
642 unsigned long base = mm->mmap_base, addr = addr0;
643 unsigned long largest_hole = mm->cached_hole_size;
644 int first_time = 1;
645
646 /* requested length too big for entire address space */
647 if (len > TASK_SIZE)
648 return -ENOMEM;
649
650 /* handle fixed mapping: prevent overlap with huge pages */
651 if (flags & MAP_FIXED) {
652 if (is_hugepage_only_range(mm, addr, len))
653 return -EINVAL;
654 return addr;
655 }
656
657 /* dont allow allocations above current base */
658 if (mm->free_area_cache > base)
659 mm->free_area_cache = base;
660
661 /* requesting a specific address */
662 if (addr) {
663 addr = PAGE_ALIGN(addr);
664 vma = find_vma(mm, addr);
665 if (TASK_SIZE - len >= addr &&
666 (!vma || addr + len <= vma->vm_start)
667 && !is_hugepage_only_range(mm, addr,len))
668 return addr;
669 }
670
671 if (len <= largest_hole) {
672 largest_hole = 0;
673 mm->free_area_cache = base;
674 }
675try_again:
676 /* make sure it can fit in the remaining address space */
677 if (mm->free_area_cache < len)
678 goto fail;
679
680 /* either no address requested or cant fit in requested address hole */
681 addr = (mm->free_area_cache - len) & PAGE_MASK;
682 do {
683hugepage_recheck:
684 if (touches_hugepage_low_range(mm, addr, len)) {
685 addr = (addr & ((~0) << SID_SHIFT)) - len;
686 goto hugepage_recheck;
687 } else if (touches_hugepage_high_range(mm, addr, len)) {
688 addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
689 goto hugepage_recheck;
690 }
691
692 /*
693 * Lookup failure means no vma is above this address,
694 * i.e. return with success:
695 */
696 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
697 return addr;
698
699 /*
700 * new region fits between prev_vma->vm_end and
701 * vma->vm_start, use it:
702 */
703 if (addr+len <= vma->vm_start &&
704 (!prev_vma || (addr >= prev_vma->vm_end))) {
705 /* remember the address as a hint for next time */
706 mm->cached_hole_size = largest_hole;
707 return (mm->free_area_cache = addr);
708 } else {
709 /* pull free_area_cache down to the first hole */
710 if (mm->free_area_cache == vma->vm_end) {
711 mm->free_area_cache = vma->vm_start;
712 mm->cached_hole_size = largest_hole;
713 }
714 }
715
716 /* remember the largest hole we saw so far */
717 if (addr + largest_hole < vma->vm_start)
718 largest_hole = vma->vm_start - addr;
719
720 /* try just below the current vma->vm_start */
721 addr = vma->vm_start-len;
722 } while (len <= vma->vm_start);
723
724fail:
725 /*
726 * if hint left us with no space for the requested
727 * mapping then try again:
728 */
729 if (first_time) {
730 mm->free_area_cache = base;
731 largest_hole = 0;
732 first_time = 0;
733 goto try_again;
734 }
735 /*
736 * A failed mmap() very likely causes application failure,
737 * so fall back to the bottom-up function here. This scenario
738 * can happen with large stack limits and large mmap()
739 * allocations.
740 */
741 mm->free_area_cache = TASK_UNMAPPED_BASE;
742 mm->cached_hole_size = ~0UL;
743 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
744 /*
745 * Restore the topdown base:
746 */
747 mm->free_area_cache = base;
748 mm->cached_hole_size = ~0UL;
749
750 return addr;
751}
752
753static int htlb_check_hinted_area(unsigned long addr, unsigned long len)
754{
755 struct vm_area_struct *vma;
756
757 vma = find_vma(current->mm, addr);
758 if (TASK_SIZE - len >= addr &&
759 (!vma || ((addr + len) <= vma->vm_start)))
760 return 0;
761
762 return -ENOMEM;
763}
764
765static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
766{
767 unsigned long addr = 0;
768 struct vm_area_struct *vma;
769
770 vma = find_vma(current->mm, addr);
771 while (addr + len <= 0x100000000UL) {
772 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
773
774 if (! __within_hugepage_low_range(addr, len, segmask)) {
775 addr = ALIGN(addr+1, 1<<SID_SHIFT);
776 vma = find_vma(current->mm, addr);
777 continue;
778 }
779
780 if (!vma || (addr + len) <= vma->vm_start)
781 return addr;
782 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
783 /* Depending on segmask this might not be a confirmed
784 * hugepage region, so the ALIGN could have skipped
785 * some VMAs */
786 vma = find_vma(current->mm, addr);
787 }
788
789 return -ENOMEM;
790}
791
792static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
793{
794 unsigned long addr = 0x100000000UL;
795 struct vm_area_struct *vma;
796
797 vma = find_vma(current->mm, addr);
798 while (addr + len <= TASK_SIZE_USER64) {
799 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
800
801 if (! __within_hugepage_high_range(addr, len, areamask)) {
802 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
803 vma = find_vma(current->mm, addr);
804 continue;
805 }
806
807 if (!vma || (addr + len) <= vma->vm_start)
808 return addr;
809 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
810 /* Depending on segmask this might not be a confirmed
811 * hugepage region, so the ALIGN could have skipped
812 * some VMAs */
813 vma = find_vma(current->mm, addr);
814 }
815
816 return -ENOMEM;
817}
818 364
819unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 365unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
820 unsigned long len, unsigned long pgoff, 366 unsigned long len, unsigned long pgoff,
821 unsigned long flags) 367 unsigned long flags)
822{ 368{
823 int lastshift; 369 return slice_get_unmapped_area(addr, len, flags,
824 u16 areamask, curareas; 370 mmu_huge_psize, 1, 0);
825
826 if (HPAGE_SHIFT == 0)
827 return -EINVAL;
828 if (len & ~HPAGE_MASK)
829 return -EINVAL;
830 if (len > TASK_SIZE)
831 return -ENOMEM;
832
833 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
834 return -EINVAL;
835
836 /* Paranoia, caller should have dealt with this */
837 BUG_ON((addr + len) < addr);
838
839 /* Handle MAP_FIXED */
840 if (flags & MAP_FIXED) {
841 if (prepare_hugepage_range(addr, len, pgoff))
842 return -EINVAL;
843 return addr;
844 }
845
846 if (test_thread_flag(TIF_32BIT)) {
847 curareas = current->mm->context.low_htlb_areas;
848
849 /* First see if we can use the hint address */
850 if (addr && (htlb_check_hinted_area(addr, len) == 0)) {
851 areamask = LOW_ESID_MASK(addr, len);
852 if (open_low_hpage_areas(current->mm, areamask) == 0)
853 return addr;
854 }
855
856 /* Next see if we can map in the existing low areas */
857 addr = htlb_get_low_area(len, curareas);
858 if (addr != -ENOMEM)
859 return addr;
860
861 /* Finally go looking for areas to open */
862 lastshift = 0;
863 for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
864 ! lastshift; areamask >>=1) {
865 if (areamask & 1)
866 lastshift = 1;
867
868 addr = htlb_get_low_area(len, curareas | areamask);
869 if ((addr != -ENOMEM)
870 && open_low_hpage_areas(current->mm, areamask) == 0)
871 return addr;
872 }
873 } else {
874 curareas = current->mm->context.high_htlb_areas;
875
876 /* First see if we can use the hint address */
877 /* We discourage 64-bit processes from doing hugepage
878 * mappings below 4GB (must use MAP_FIXED) */
879 if ((addr >= 0x100000000UL)
880 && (htlb_check_hinted_area(addr, len) == 0)) {
881 areamask = HTLB_AREA_MASK(addr, len);
882 if (open_high_hpage_areas(current->mm, areamask) == 0)
883 return addr;
884 }
885
886 /* Next see if we can map in the existing high areas */
887 addr = htlb_get_high_area(len, curareas);
888 if (addr != -ENOMEM)
889 return addr;
890
891 /* Finally go looking for areas to open */
892 lastshift = 0;
893 for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
894 ! lastshift; areamask >>=1) {
895 if (areamask & 1)
896 lastshift = 1;
897
898 addr = htlb_get_high_area(len, curareas | areamask);
899 if ((addr != -ENOMEM)
900 && open_high_hpage_areas(current->mm, areamask) == 0)
901 return addr;
902 }
903 }
904 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
905 " enough areas\n");
906 return -ENOMEM;
907} 371}
908 372
909/* 373/*
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index fe1fe852181a..7312a265545f 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -146,21 +146,16 @@ static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
146 memset(addr, 0, kmem_cache_size(cache)); 146 memset(addr, 0, kmem_cache_size(cache));
147} 147}
148 148
149#ifdef CONFIG_PPC_64K_PAGES
150static const unsigned int pgtable_cache_size[3] = {
151 PTE_TABLE_SIZE, PMD_TABLE_SIZE, PGD_TABLE_SIZE
152};
153static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
154 "pte_pmd_cache", "pmd_cache", "pgd_cache",
155};
156#else
157static const unsigned int pgtable_cache_size[2] = { 149static const unsigned int pgtable_cache_size[2] = {
158 PTE_TABLE_SIZE, PMD_TABLE_SIZE 150 PGD_TABLE_SIZE, PMD_TABLE_SIZE
159}; 151};
160static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { 152static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
161 "pgd_pte_cache", "pud_pmd_cache", 153#ifdef CONFIG_PPC_64K_PAGES
162}; 154 "pgd_cache", "pmd_cache",
155#else
156 "pgd_cache", "pud_pmd_cache",
163#endif /* CONFIG_PPC_64K_PAGES */ 157#endif /* CONFIG_PPC_64K_PAGES */
158};
164 159
165#ifdef CONFIG_HUGETLB_PAGE 160#ifdef CONFIG_HUGETLB_PAGE
166/* Hugepages need one extra cache, initialized in hugetlbpage.c. We 161/* Hugepages need one extra cache, initialized in hugetlbpage.c. We
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 1a6e08f3298f..246eeea40ece 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -31,6 +31,7 @@
31#include <linux/highmem.h> 31#include <linux/highmem.h>
32#include <linux/initrd.h> 32#include <linux/initrd.h>
33#include <linux/pagemap.h> 33#include <linux/pagemap.h>
34#include <linux/suspend.h>
34 35
35#include <asm/pgalloc.h> 36#include <asm/pgalloc.h>
36#include <asm/prom.h> 37#include <asm/prom.h>
@@ -276,6 +277,28 @@ void __init do_init_bootmem(void)
276 init_bootmem_done = 1; 277 init_bootmem_done = 1;
277} 278}
278 279
280/* mark pages that don't exist as nosave */
281static int __init mark_nonram_nosave(void)
282{
283 unsigned long lmb_next_region_start_pfn,
284 lmb_region_max_pfn;
285 int i;
286
287 for (i = 0; i < lmb.memory.cnt - 1; i++) {
288 lmb_region_max_pfn =
289 (lmb.memory.region[i].base >> PAGE_SHIFT) +
290 (lmb.memory.region[i].size >> PAGE_SHIFT);
291 lmb_next_region_start_pfn =
292 lmb.memory.region[i+1].base >> PAGE_SHIFT;
293
294 if (lmb_region_max_pfn < lmb_next_region_start_pfn)
295 register_nosave_region(lmb_region_max_pfn,
296 lmb_next_region_start_pfn);
297 }
298
299 return 0;
300}
301
279/* 302/*
280 * paging_init() sets up the page tables - in fact we've already done this. 303 * paging_init() sets up the page tables - in fact we've already done this.
281 */ 304 */
@@ -307,6 +330,8 @@ void __init paging_init(void)
307 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; 330 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
308#endif 331#endif
309 free_area_init_nodes(max_zone_pfns); 332 free_area_init_nodes(max_zone_pfns);
333
334 mark_nonram_nosave();
310} 335}
311#endif /* ! CONFIG_NEED_MULTIPLE_NODES */ 336#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
312 337
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_64.c
index 90a06ac02d5e..7a78cdc0515a 100644
--- a/arch/powerpc/mm/mmu_context_64.c
+++ b/arch/powerpc/mm/mmu_context_64.c
@@ -28,6 +28,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
28{ 28{
29 int index; 29 int index;
30 int err; 30 int err;
31 int new_context = (mm->context.id == 0);
31 32
32again: 33again:
33 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) 34 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
@@ -50,9 +51,18 @@ again:
50 } 51 }
51 52
52 mm->context.id = index; 53 mm->context.id = index;
54#ifdef CONFIG_PPC_MM_SLICES
55 /* The old code would re-promote on fork, we don't do that
56 * when using slices as it could cause problem promoting slices
57 * that have been forced down to 4K
58 */
59 if (new_context)
60 slice_set_user_psize(mm, mmu_virtual_psize);
61#else
53 mm->context.user_psize = mmu_virtual_psize; 62 mm->context.user_psize = mmu_virtual_psize;
54 mm->context.sllp = SLB_VSID_USER | 63 mm->context.sllp = SLB_VSID_USER |
55 mmu_psize_defs[mmu_virtual_psize].sllp; 64 mmu_psize_defs[mmu_virtual_psize].sllp;
65#endif
56 66
57 return 0; 67 return 0;
58} 68}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b3a592b25ab3..de45aa82d97b 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -252,12 +252,15 @@ static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
252 252
253 switch (action) { 253 switch (action) {
254 case CPU_UP_PREPARE: 254 case CPU_UP_PREPARE:
255 case CPU_UP_PREPARE_FROZEN:
255 numa_setup_cpu(lcpu); 256 numa_setup_cpu(lcpu);
256 ret = NOTIFY_OK; 257 ret = NOTIFY_OK;
257 break; 258 break;
258#ifdef CONFIG_HOTPLUG_CPU 259#ifdef CONFIG_HOTPLUG_CPU
259 case CPU_DEAD: 260 case CPU_DEAD:
261 case CPU_DEAD_FROZEN:
260 case CPU_UP_CANCELED: 262 case CPU_UP_CANCELED:
263 case CPU_UP_CANCELED_FROZEN:
261 unmap_cpu_from_node(lcpu); 264 unmap_cpu_from_node(lcpu);
262 break; 265 break;
263 ret = NOTIFY_OK; 266 ret = NOTIFY_OK;
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 05066674a7a0..ec1421a20aaa 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -185,7 +185,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
185 185
186 if (Hash == 0) 186 if (Hash == 0)
187 return; 187 return;
188 pmd = pmd_offset(pgd_offset(mm, ea), ea); 188 pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea);
189 if (!pmd_none(*pmd)) 189 if (!pmd_none(*pmd))
190 add_hash_page(mm->context.id, ea, pmd_val(*pmd)); 190 add_hash_page(mm->context.id, ea, pmd_val(*pmd));
191} 191}
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 224e960650a0..304375a73574 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -198,12 +198,6 @@ void slb_initialize(void)
198 static int slb_encoding_inited; 198 static int slb_encoding_inited;
199 extern unsigned int *slb_miss_kernel_load_linear; 199 extern unsigned int *slb_miss_kernel_load_linear;
200 extern unsigned int *slb_miss_kernel_load_io; 200 extern unsigned int *slb_miss_kernel_load_io;
201#ifdef CONFIG_HUGETLB_PAGE
202 extern unsigned int *slb_miss_user_load_huge;
203 unsigned long huge_llp;
204
205 huge_llp = mmu_psize_defs[mmu_huge_psize].sllp;
206#endif
207 201
208 /* Prepare our SLB miss handler based on our page size */ 202 /* Prepare our SLB miss handler based on our page size */
209 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 203 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
@@ -220,11 +214,6 @@ void slb_initialize(void)
220 214
221 DBG("SLB: linear LLP = %04x\n", linear_llp); 215 DBG("SLB: linear LLP = %04x\n", linear_llp);
222 DBG("SLB: io LLP = %04x\n", io_llp); 216 DBG("SLB: io LLP = %04x\n", io_llp);
223#ifdef CONFIG_HUGETLB_PAGE
224 patch_slb_encoding(slb_miss_user_load_huge,
225 SLB_VSID_USER | huge_llp);
226 DBG("SLB: huge LLP = %04x\n", huge_llp);
227#endif
228 } 217 }
229 218
230 get_paca()->stab_rr = SLB_NUM_BOLTED; 219 get_paca()->stab_rr = SLB_NUM_BOLTED;
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index b10e4707d7c1..cd1a93d4948c 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -82,31 +82,45 @@ _GLOBAL(slb_miss_kernel_load_io)
82 srdi. r9,r10,USER_ESID_BITS 82 srdi. r9,r10,USER_ESID_BITS
83 bne- 8f /* invalid ea bits set */ 83 bne- 8f /* invalid ea bits set */
84 84
85 /* Figure out if the segment contains huge pages */ 85
86#ifdef CONFIG_HUGETLB_PAGE 86 /* when using slices, we extract the psize off the slice bitmaps
87BEGIN_FTR_SECTION 87 * and then we need to get the sllp encoding off the mmu_psize_defs
88 b 1f 88 * array.
89END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE) 89 *
90 * XXX This is a bit inefficient especially for the normal case,
91 * so we should try to implement a fast path for the standard page
92 * size using the old sllp value so we avoid the array. We cannot
93 * really do dynamic patching unfortunately as processes might flip
94 * between 4k and 64k standard page size
95 */
96#ifdef CONFIG_PPC_MM_SLICES
90 cmpldi r10,16 97 cmpldi r10,16
91 98
92 lhz r9,PACALOWHTLBAREAS(r13) 99 /* Get the slice index * 4 in r11 and matching slice size mask in r9 */
93 mr r11,r10 100 ld r9,PACALOWSLICESPSIZE(r13)
101 sldi r11,r10,2
94 blt 5f 102 blt 5f
103 ld r9,PACAHIGHSLICEPSIZE(r13)
104 srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2)
105 andi. r11,r11,0x3c
95 106
96 lhz r9,PACAHIGHHTLBAREAS(r13) 1075: /* Extract the psize and multiply to get an array offset */
97 srdi r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT) 108 srd r9,r9,r11
98 109 andi. r9,r9,0xf
995: srd r9,r9,r11 110 mulli r9,r9,MMUPSIZEDEFSIZE
100 andi. r9,r9,1
101 beq 1f
102_GLOBAL(slb_miss_user_load_huge)
103 li r11,0
104 b 2f
1051:
106#endif /* CONFIG_HUGETLB_PAGE */
107 111
112 /* Now get to the array and obtain the sllp
113 */
114 ld r11,PACATOC(r13)
115 ld r11,mmu_psize_defs@got(r11)
116 add r11,r11,r9
117 ld r11,MMUPSIZESLLP(r11)
118 ori r11,r11,SLB_VSID_USER
119#else
120 /* paca context sllp already contains the SLB_VSID_USER bits */
108 lhz r11,PACACONTEXTSLLP(r13) 121 lhz r11,PACACONTEXTSLLP(r13)
1092: 122#endif /* CONFIG_PPC_MM_SLICES */
123
110 ld r9,PACACONTEXTID(r13) 124 ld r9,PACACONTEXTID(r13)
111 rldimi r10,r9,USER_ESID_BITS,0 125 rldimi r10,r9,USER_ESID_BITS,0
112 b slb_finish_load 126 b slb_finish_load
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
new file mode 100644
index 000000000000..f833dba2a028
--- /dev/null
+++ b/arch/powerpc/mm/slice.c
@@ -0,0 +1,633 @@
1/*
2 * address space "slices" (meta-segments) support
3 *
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
5 *
6 * Based on hugetlb implementation
7 *
8 * Copyright (C) 2003 David Gibson, IBM Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#undef DEBUG
26
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/pagemap.h>
30#include <linux/err.h>
31#include <linux/spinlock.h>
32#include <linux/module.h>
33#include <asm/mman.h>
34#include <asm/mmu.h>
35#include <asm/spu.h>
36
37static spinlock_t slice_convert_lock = SPIN_LOCK_UNLOCKED;
38
39
40#ifdef DEBUG
41int _slice_debug = 1;
42
43static void slice_print_mask(const char *label, struct slice_mask mask)
44{
45 char *p, buf[16 + 3 + 16 + 1];
46 int i;
47
48 if (!_slice_debug)
49 return;
50 p = buf;
51 for (i = 0; i < SLICE_NUM_LOW; i++)
52 *(p++) = (mask.low_slices & (1 << i)) ? '1' : '0';
53 *(p++) = ' ';
54 *(p++) = '-';
55 *(p++) = ' ';
56 for (i = 0; i < SLICE_NUM_HIGH; i++)
57 *(p++) = (mask.high_slices & (1 << i)) ? '1' : '0';
58 *(p++) = 0;
59
60 printk(KERN_DEBUG "%s:%s\n", label, buf);
61}
62
63#define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)
64
65#else
66
67static void slice_print_mask(const char *label, struct slice_mask mask) {}
68#define slice_dbg(fmt...)
69
70#endif
71
72static struct slice_mask slice_range_to_mask(unsigned long start,
73 unsigned long len)
74{
75 unsigned long end = start + len - 1;
76 struct slice_mask ret = { 0, 0 };
77
78 if (start < SLICE_LOW_TOP) {
79 unsigned long mend = min(end, SLICE_LOW_TOP);
80 unsigned long mstart = min(start, SLICE_LOW_TOP);
81
82 ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
83 - (1u << GET_LOW_SLICE_INDEX(mstart));
84 }
85
86 if ((start + len) > SLICE_LOW_TOP)
87 ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1))
88 - (1u << GET_HIGH_SLICE_INDEX(start));
89
90 return ret;
91}
92
93static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
94 unsigned long len)
95{
96 struct vm_area_struct *vma;
97
98 if ((mm->task_size - len) < addr)
99 return 0;
100 vma = find_vma(mm, addr);
101 return (!vma || (addr + len) <= vma->vm_start);
102}
103
104static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
105{
106 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
107 1ul << SLICE_LOW_SHIFT);
108}
109
110static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
111{
112 unsigned long start = slice << SLICE_HIGH_SHIFT;
113 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
114
115 /* Hack, so that each addresses is controlled by exactly one
116 * of the high or low area bitmaps, the first high area starts
117 * at 4GB, not 0 */
118 if (start == 0)
119 start = SLICE_LOW_TOP;
120
121 return !slice_area_is_free(mm, start, end - start);
122}
123
124static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
125{
126 struct slice_mask ret = { 0, 0 };
127 unsigned long i;
128
129 for (i = 0; i < SLICE_NUM_LOW; i++)
130 if (!slice_low_has_vma(mm, i))
131 ret.low_slices |= 1u << i;
132
133 if (mm->task_size <= SLICE_LOW_TOP)
134 return ret;
135
136 for (i = 0; i < SLICE_NUM_HIGH; i++)
137 if (!slice_high_has_vma(mm, i))
138 ret.high_slices |= 1u << i;
139
140 return ret;
141}
142
143static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
144{
145 struct slice_mask ret = { 0, 0 };
146 unsigned long i;
147 u64 psizes;
148
149 psizes = mm->context.low_slices_psize;
150 for (i = 0; i < SLICE_NUM_LOW; i++)
151 if (((psizes >> (i * 4)) & 0xf) == psize)
152 ret.low_slices |= 1u << i;
153
154 psizes = mm->context.high_slices_psize;
155 for (i = 0; i < SLICE_NUM_HIGH; i++)
156 if (((psizes >> (i * 4)) & 0xf) == psize)
157 ret.high_slices |= 1u << i;
158
159 return ret;
160}
161
162static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
163{
164 return (mask.low_slices & available.low_slices) == mask.low_slices &&
165 (mask.high_slices & available.high_slices) == mask.high_slices;
166}
167
168static void slice_flush_segments(void *parm)
169{
170 struct mm_struct *mm = parm;
171 unsigned long flags;
172
173 if (mm != current->active_mm)
174 return;
175
176 /* update the paca copy of the context struct */
177 get_paca()->context = current->active_mm->context;
178
179 local_irq_save(flags);
180 slb_flush_and_rebolt();
181 local_irq_restore(flags);
182}
183
184static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
185{
186 /* Write the new slice psize bits */
187 u64 lpsizes, hpsizes;
188 unsigned long i, flags;
189
190 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
191 slice_print_mask(" mask", mask);
192
193 /* We need to use a spinlock here to protect against
194 * concurrent 64k -> 4k demotion ...
195 */
196 spin_lock_irqsave(&slice_convert_lock, flags);
197
198 lpsizes = mm->context.low_slices_psize;
199 for (i = 0; i < SLICE_NUM_LOW; i++)
200 if (mask.low_slices & (1u << i))
201 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
202 (((unsigned long)psize) << (i * 4));
203
204 hpsizes = mm->context.high_slices_psize;
205 for (i = 0; i < SLICE_NUM_HIGH; i++)
206 if (mask.high_slices & (1u << i))
207 hpsizes = (hpsizes & ~(0xful << (i * 4))) |
208 (((unsigned long)psize) << (i * 4));
209
210 mm->context.low_slices_psize = lpsizes;
211 mm->context.high_slices_psize = hpsizes;
212
213 slice_dbg(" lsps=%lx, hsps=%lx\n",
214 mm->context.low_slices_psize,
215 mm->context.high_slices_psize);
216
217 spin_unlock_irqrestore(&slice_convert_lock, flags);
218 mb();
219
220 /* XXX this is sub-optimal but will do for now */
221 on_each_cpu(slice_flush_segments, mm, 0, 1);
222#ifdef CONFIG_SPU_BASE
223 spu_flush_all_slbs(mm);
224#endif
225}
226
227static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
228 unsigned long len,
229 struct slice_mask available,
230 int psize, int use_cache)
231{
232 struct vm_area_struct *vma;
233 unsigned long start_addr, addr;
234 struct slice_mask mask;
235 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
236
237 if (use_cache) {
238 if (len <= mm->cached_hole_size) {
239 start_addr = addr = TASK_UNMAPPED_BASE;
240 mm->cached_hole_size = 0;
241 } else
242 start_addr = addr = mm->free_area_cache;
243 } else
244 start_addr = addr = TASK_UNMAPPED_BASE;
245
246full_search:
247 for (;;) {
248 addr = _ALIGN_UP(addr, 1ul << pshift);
249 if ((TASK_SIZE - len) < addr)
250 break;
251 vma = find_vma(mm, addr);
252 BUG_ON(vma && (addr >= vma->vm_end));
253
254 mask = slice_range_to_mask(addr, len);
255 if (!slice_check_fit(mask, available)) {
256 if (addr < SLICE_LOW_TOP)
257 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_LOW_SHIFT);
258 else
259 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
260 continue;
261 }
262 if (!vma || addr + len <= vma->vm_start) {
263 /*
264 * Remember the place where we stopped the search:
265 */
266 if (use_cache)
267 mm->free_area_cache = addr + len;
268 return addr;
269 }
270 if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
271 mm->cached_hole_size = vma->vm_start - addr;
272 addr = vma->vm_end;
273 }
274
275 /* Make sure we didn't miss any holes */
276 if (use_cache && start_addr != TASK_UNMAPPED_BASE) {
277 start_addr = addr = TASK_UNMAPPED_BASE;
278 mm->cached_hole_size = 0;
279 goto full_search;
280 }
281 return -ENOMEM;
282}
283
284static unsigned long slice_find_area_topdown(struct mm_struct *mm,
285 unsigned long len,
286 struct slice_mask available,
287 int psize, int use_cache)
288{
289 struct vm_area_struct *vma;
290 unsigned long addr;
291 struct slice_mask mask;
292 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
293
294 /* check if free_area_cache is useful for us */
295 if (use_cache) {
296 if (len <= mm->cached_hole_size) {
297 mm->cached_hole_size = 0;
298 mm->free_area_cache = mm->mmap_base;
299 }
300
301 /* either no address requested or can't fit in requested
302 * address hole
303 */
304 addr = mm->free_area_cache;
305
306 /* make sure it can fit in the remaining address space */
307 if (addr > len) {
308 addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
309 mask = slice_range_to_mask(addr, len);
310 if (slice_check_fit(mask, available) &&
311 slice_area_is_free(mm, addr, len))
312 /* remember the address as a hint for
313 * next time
314 */
315 return (mm->free_area_cache = addr);
316 }
317 }
318
319 addr = mm->mmap_base;
320 while (addr > len) {
321 /* Go down by chunk size */
322 addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
323
324 /* Check for hit with different page size */
325 mask = slice_range_to_mask(addr, len);
326 if (!slice_check_fit(mask, available)) {
327 if (addr < SLICE_LOW_TOP)
328 addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT);
329 else if (addr < (1ul << SLICE_HIGH_SHIFT))
330 addr = SLICE_LOW_TOP;
331 else
332 addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT);
333 continue;
334 }
335
336 /*
337 * Lookup failure means no vma is above this address,
338 * else if new region fits below vma->vm_start,
339 * return with success:
340 */
341 vma = find_vma(mm, addr);
342 if (!vma || (addr + len) <= vma->vm_start) {
343 /* remember the address as a hint for next time */
344 if (use_cache)
345 mm->free_area_cache = addr;
346 return addr;
347 }
348
349 /* remember the largest hole we saw so far */
350 if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
351 mm->cached_hole_size = vma->vm_start - addr;
352
353 /* try just below the current vma->vm_start */
354 addr = vma->vm_start;
355 }
356
357 /*
358 * A failed mmap() very likely causes application failure,
359 * so fall back to the bottom-up function here. This scenario
360 * can happen with large stack limits and large mmap()
361 * allocations.
362 */
363 addr = slice_find_area_bottomup(mm, len, available, psize, 0);
364
365 /*
366 * Restore the topdown base:
367 */
368 if (use_cache) {
369 mm->free_area_cache = mm->mmap_base;
370 mm->cached_hole_size = ~0UL;
371 }
372
373 return addr;
374}
375
376
377static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
378 struct slice_mask mask, int psize,
379 int topdown, int use_cache)
380{
381 if (topdown)
382 return slice_find_area_topdown(mm, len, mask, psize, use_cache);
383 else
384 return slice_find_area_bottomup(mm, len, mask, psize, use_cache);
385}
386
387unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
388 unsigned long flags, unsigned int psize,
389 int topdown, int use_cache)
390{
391 struct slice_mask mask;
392 struct slice_mask good_mask;
393 struct slice_mask potential_mask = {0,0} /* silence stupid warning */;
394 int pmask_set = 0;
395 int fixed = (flags & MAP_FIXED);
396 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
397 struct mm_struct *mm = current->mm;
398
399 /* Sanity checks */
400 BUG_ON(mm->task_size == 0);
401
402 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
403 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n",
404 addr, len, flags, topdown, use_cache);
405
406 if (len > mm->task_size)
407 return -ENOMEM;
408 if (fixed && (addr & ((1ul << pshift) - 1)))
409 return -EINVAL;
410 if (fixed && addr > (mm->task_size - len))
411 return -EINVAL;
412
413 /* If hint, make sure it matches our alignment restrictions */
414 if (!fixed && addr) {
415 addr = _ALIGN_UP(addr, 1ul << pshift);
416 slice_dbg(" aligned addr=%lx\n", addr);
417 }
418
419 /* First makeup a "good" mask of slices that have the right size
420 * already
421 */
422 good_mask = slice_mask_for_size(mm, psize);
423 slice_print_mask(" good_mask", good_mask);
424
425 /* First check hint if it's valid or if we have MAP_FIXED */
426 if ((addr != 0 || fixed) && (mm->task_size - len) >= addr) {
427
428 /* Don't bother with hint if it overlaps a VMA */
429 if (!fixed && !slice_area_is_free(mm, addr, len))
430 goto search;
431
432 /* Build a mask for the requested range */
433 mask = slice_range_to_mask(addr, len);
434 slice_print_mask(" mask", mask);
435
436 /* Check if we fit in the good mask. If we do, we just return,
437 * nothing else to do
438 */
439 if (slice_check_fit(mask, good_mask)) {
440 slice_dbg(" fits good !\n");
441 return addr;
442 }
443
444 /* We don't fit in the good mask, check what other slices are
445 * empty and thus can be converted
446 */
447 potential_mask = slice_mask_for_free(mm);
448 potential_mask.low_slices |= good_mask.low_slices;
449 potential_mask.high_slices |= good_mask.high_slices;
450 pmask_set = 1;
451 slice_print_mask(" potential", potential_mask);
452 if (slice_check_fit(mask, potential_mask)) {
453 slice_dbg(" fits potential !\n");
454 goto convert;
455 }
456 }
457
458 /* If we have MAP_FIXED and failed the above step, then error out */
459 if (fixed)
460 return -EBUSY;
461
462 search:
463 slice_dbg(" search...\n");
464
465 /* Now let's see if we can find something in the existing slices
466 * for that size
467 */
468 addr = slice_find_area(mm, len, good_mask, psize, topdown, use_cache);
469 if (addr != -ENOMEM) {
470 /* Found within the good mask, we don't have to setup,
471 * we thus return directly
472 */
473 slice_dbg(" found area at 0x%lx\n", addr);
474 return addr;
475 }
476
477 /* Won't fit, check what can be converted */
478 if (!pmask_set) {
479 potential_mask = slice_mask_for_free(mm);
480 potential_mask.low_slices |= good_mask.low_slices;
481 potential_mask.high_slices |= good_mask.high_slices;
482 pmask_set = 1;
483 slice_print_mask(" potential", potential_mask);
484 }
485
486 /* Now let's see if we can find something in the existing slices
487 * for that size
488 */
489 addr = slice_find_area(mm, len, potential_mask, psize, topdown,
490 use_cache);
491 if (addr == -ENOMEM)
492 return -ENOMEM;
493
494 mask = slice_range_to_mask(addr, len);
495 slice_dbg(" found potential area at 0x%lx\n", addr);
496 slice_print_mask(" mask", mask);
497
498 convert:
499 slice_convert(mm, mask, psize);
500 return addr;
501
502}
503EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
504
505unsigned long arch_get_unmapped_area(struct file *filp,
506 unsigned long addr,
507 unsigned long len,
508 unsigned long pgoff,
509 unsigned long flags)
510{
511 return slice_get_unmapped_area(addr, len, flags,
512 current->mm->context.user_psize,
513 0, 1);
514}
515
516unsigned long arch_get_unmapped_area_topdown(struct file *filp,
517 const unsigned long addr0,
518 const unsigned long len,
519 const unsigned long pgoff,
520 const unsigned long flags)
521{
522 return slice_get_unmapped_area(addr0, len, flags,
523 current->mm->context.user_psize,
524 1, 1);
525}
526
527unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
528{
529 u64 psizes;
530 int index;
531
532 if (addr < SLICE_LOW_TOP) {
533 psizes = mm->context.low_slices_psize;
534 index = GET_LOW_SLICE_INDEX(addr);
535 } else {
536 psizes = mm->context.high_slices_psize;
537 index = GET_HIGH_SLICE_INDEX(addr);
538 }
539
540 return (psizes >> (index * 4)) & 0xf;
541}
542EXPORT_SYMBOL_GPL(get_slice_psize);
543
544/*
545 * This is called by hash_page when it needs to do a lazy conversion of
546 * an address space from real 64K pages to combo 4K pages (typically
547 * when hitting a non cacheable mapping on a processor or hypervisor
548 * that won't allow them for 64K pages).
549 *
550 * This is also called in init_new_context() to change back the user
551 * psize from whatever the parent context had it set to
552 *
553 * This function will only change the content of the {low,high)_slice_psize
554 * masks, it will not flush SLBs as this shall be handled lazily by the
555 * caller.
556 */
557void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
558{
559 unsigned long flags, lpsizes, hpsizes;
560 unsigned int old_psize;
561 int i;
562
563 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);
564
565 spin_lock_irqsave(&slice_convert_lock, flags);
566
567 old_psize = mm->context.user_psize;
568 slice_dbg(" old_psize=%d\n", old_psize);
569 if (old_psize == psize)
570 goto bail;
571
572 mm->context.user_psize = psize;
573 wmb();
574
575 lpsizes = mm->context.low_slices_psize;
576 for (i = 0; i < SLICE_NUM_LOW; i++)
577 if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
578 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
579 (((unsigned long)psize) << (i * 4));
580
581 hpsizes = mm->context.high_slices_psize;
582 for (i = 0; i < SLICE_NUM_HIGH; i++)
583 if (((hpsizes >> (i * 4)) & 0xf) == old_psize)
584 hpsizes = (hpsizes & ~(0xful << (i * 4))) |
585 (((unsigned long)psize) << (i * 4));
586
587 mm->context.low_slices_psize = lpsizes;
588 mm->context.high_slices_psize = hpsizes;
589
590 slice_dbg(" lsps=%lx, hsps=%lx\n",
591 mm->context.low_slices_psize,
592 mm->context.high_slices_psize);
593
594 bail:
595 spin_unlock_irqrestore(&slice_convert_lock, flags);
596}
597
598/*
599 * is_hugepage_only_range() is used by generic code to verify wether
600 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
601 *
602 * until the generic code provides a more generic hook and/or starts
603 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
604 * here knows how to deal with), we hijack it to keep standard mappings
605 * away from us.
606 *
607 * because of that generic code limitation, MAP_FIXED mapping cannot
608 * "convert" back a slice with no VMAs to the standard page size, only
609 * get_unmapped_area() can. It would be possible to fix it here but I
610 * prefer working on fixing the generic code instead.
611 *
612 * WARNING: This will not work if hugetlbfs isn't enabled since the
613 * generic code will redefine that function as 0 in that. This is ok
614 * for now as we only use slices with hugetlbfs enabled. This should
615 * be fixed as the generic code gets fixed.
616 */
617int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
618 unsigned long len)
619{
620 struct slice_mask mask, available;
621
622 mask = slice_range_to_mask(addr, len);
623 available = slice_mask_for_size(mm, mm->context.user_psize);
624
625#if 0 /* too verbose */
626 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
627 mm, addr, len);
628 slice_print_mask(" mask", mask);
629 slice_print_mask(" available", available);
630#endif
631 return !slice_check_fit(mask, available);
632}
633
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
index 925ff70be8ba..6a69417cbc0e 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_32.c
@@ -111,7 +111,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
111 if (start >= end) 111 if (start >= end)
112 return; 112 return;
113 end = (end - 1) | ~PAGE_MASK; 113 end = (end - 1) | ~PAGE_MASK;
114 pmd = pmd_offset(pgd_offset(mm, start), start); 114 pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start);
115 for (;;) { 115 for (;;) {
116 pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; 116 pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
117 if (pmd_end > end) 117 if (pmd_end > end)
@@ -169,7 +169,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
169 return; 169 return;
170 } 170 }
171 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; 171 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
172 pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr); 172 pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr);
173 if (!pmd_none(*pmd)) 173 if (!pmd_none(*pmd))
174 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); 174 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
175 FINISH_FLUSH; 175 FINISH_FLUSH;
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index fd8d08c325eb..2bfc4d7e1aa2 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -143,16 +143,22 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
143 */ 143 */
144 addr &= PAGE_MASK; 144 addr &= PAGE_MASK;
145 145
146 /* Get page size (maybe move back to caller) */ 146 /* Get page size (maybe move back to caller).
147 *
148 * NOTE: when using special 64K mappings in 4K environment like
149 * for SPEs, we obtain the page size from the slice, which thus
150 * must still exist (and thus the VMA not reused) at the time
151 * of this call
152 */
147 if (huge) { 153 if (huge) {
148#ifdef CONFIG_HUGETLB_PAGE 154#ifdef CONFIG_HUGETLB_PAGE
149 psize = mmu_huge_psize; 155 psize = mmu_huge_psize;
150#else 156#else
151 BUG(); 157 BUG();
152 psize = pte_pagesize_index(pte); /* shutup gcc */ 158 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
153#endif 159#endif
154 } else 160 } else
155 psize = pte_pagesize_index(pte); 161 psize = pte_pagesize_index(mm, addr, pte);
156 162
157 /* Build full vaddr */ 163 /* Build full vaddr */
158 if (!is_kernel_addr(addr)) { 164 if (!is_kernel_addr(addr)) {
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index 626b29f38304..c29293befba9 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -747,7 +747,7 @@ cell_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr)
747 * counter value etc.) are not copied to the actual registers 747 * counter value etc.) are not copied to the actual registers
748 * until the performance monitor is enabled. In order to get 748 * until the performance monitor is enabled. In order to get
749 * this to work as desired, the permormance monitor needs to 749 * this to work as desired, the permormance monitor needs to
750 * be disabled while writting to the latches. This is a 750 * be disabled while writing to the latches. This is a
751 * HW design issue. 751 * HW design issue.
752 */ 752 */
753 cbe_enable_pm(cpu); 753 cbe_enable_pm(cpu);
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index 7ef0c6854799..ba55b0ff0f74 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -15,8 +15,8 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17 17
18#include <asm/pgtable.h>
19#include <asm/page.h> 18#include <asm/page.h>
19#include <asm/pgtable.h>
20#include <asm/pci-bridge.h> 20#include <asm/pci-bridge.h>
21#include <asm-powerpc/mpic.h> 21#include <asm-powerpc/mpic.h>
22#include <asm/mpc86xx.h> 22#include <asm/mpc86xx.h>
diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
index a35315af5c53..cf0e7bc8c2e7 100644
--- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
@@ -1,4 +1,4 @@
1/*arch/ppc/platforms/mpc86xads-setup.c 1/*arch/powerpc/platforms/8xx/mpc86xads_setup.c
2 * 2 *
3 * Platform setup for the Freescale mpc86xads board 3 * Platform setup for the Freescale mpc86xads board
4 * 4 *
diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
index a57b57785acd..c36e475d93dc 100644
--- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
@@ -1,4 +1,4 @@
1/*arch/ppc/platforms/mpc885ads-setup.c 1/*arch/powerpc/platforms/8xx/mpc885ads_setup.c
2 * 2 *
3 * Platform setup for the Freescale mpc885ads board 3 * Platform setup for the Freescale mpc885ads board
4 * 4 *
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 82551770917c..9b2b386ccf48 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -35,6 +35,21 @@ config SPU_FS
35 Units on machines implementing the Broadband Processor 35 Units on machines implementing the Broadband Processor
36 Architecture. 36 Architecture.
37 37
38config SPU_FS_64K_LS
39 bool "Use 64K pages to map SPE local store"
40 # we depend on PPC_MM_SLICES for now rather than selecting
41 # it because we depend on hugetlbfs hooks being present. We
42 # will fix that when the generic code has been improved to
43 # not require hijacking hugetlbfs hooks.
44 depends on SPU_FS && PPC_MM_SLICES && !PPC_64K_PAGES
45 default y
46 select PPC_HAS_HASH_64K
47 help
48 This option causes SPE local stores to be mapped in process
49 address spaces using 64K pages while the rest of the kernel
50 uses 4K pages. This can improve performances of applications
51 using multiple SPEs by lowering the TLB pressure on them.
52
38config SPU_BASE 53config SPU_BASE
39 bool 54 bool
40 default n 55 default n
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
index d68d920eb2c4..7fb92f23f380 100644
--- a/arch/powerpc/platforms/cell/io-workarounds.c
+++ b/arch/powerpc/platforms/cell/io-workarounds.c
@@ -74,7 +74,7 @@ static void spider_io_flush(const volatile void __iomem *addr)
74 /* Fast path if we have a non-0 token, it indicates which bus we 74 /* Fast path if we have a non-0 token, it indicates which bus we
75 * are on. 75 * are on.
76 * 76 *
77 * If the token is 0, that means either the the ioremap was done 77 * If the token is 0, that means either that the ioremap was done
78 * before we initialized this layer, or it's a PIO operation. We 78 * before we initialized this layer, or it's a PIO operation. We
79 * fallback to a low path in this case. Hopefully, internal devices 79 * fallback to a low path in this case. Hopefully, internal devices
80 * which are ioremap'ed early should use in_XX/out_XX functions 80 * which are ioremap'ed early should use in_XX/out_XX functions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index fec51525252e..a7f5a7653c62 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -144,12 +144,11 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
144 144
145 switch(REGION_ID(ea)) { 145 switch(REGION_ID(ea)) {
146 case USER_REGION_ID: 146 case USER_REGION_ID:
147#ifdef CONFIG_HUGETLB_PAGE 147#ifdef CONFIG_PPC_MM_SLICES
148 if (in_hugepage_area(mm->context, ea)) 148 psize = get_slice_psize(mm, ea);
149 psize = mmu_huge_psize; 149#else
150 else 150 psize = mm->context.user_psize;
151#endif 151#endif
152 psize = mm->context.user_psize;
153 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | 152 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
154 SLB_VSID_USER; 153 SLB_VSID_USER;
155 break; 154 break;
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index 2cd89c11af5a..328afcf89503 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -1,4 +1,4 @@
1obj-y += switch.o fault.o 1obj-y += switch.o fault.o lscsa_alloc.o
2 2
3obj-$(CONFIG_SPU_FS) += spufs.o 3obj-$(CONFIG_SPU_FS) += spufs.o
4spufs-y += inode.o file.o context.o syscalls.o coredump.o 4spufs-y += inode.o file.o context.o syscalls.o coredump.o
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index a87d9ca3dba2..8654749e317b 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -36,10 +36,8 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
36 /* Binding to physical processor deferred 36 /* Binding to physical processor deferred
37 * until spu_activate(). 37 * until spu_activate().
38 */ 38 */
39 spu_init_csa(&ctx->csa); 39 if (spu_init_csa(&ctx->csa))
40 if (!ctx->csa.lscsa) {
41 goto out_free; 40 goto out_free;
42 }
43 spin_lock_init(&ctx->mmio_lock); 41 spin_lock_init(&ctx->mmio_lock);
44 spin_lock_init(&ctx->mapping_lock); 42 spin_lock_init(&ctx->mapping_lock);
45 kref_init(&ctx->kref); 43 kref_init(&ctx->kref);
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index d010b2464a98..45614c73c784 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -118,14 +118,32 @@ spufs_mem_write(struct file *file, const char __user *buffer,
118static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma, 118static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
119 unsigned long address) 119 unsigned long address)
120{ 120{
121 struct spu_context *ctx = vma->vm_file->private_data; 121 struct spu_context *ctx = vma->vm_file->private_data;
122 unsigned long pfn, offset = address - vma->vm_start; 122 unsigned long pfn, offset, addr0 = address;
123 123#ifdef CONFIG_SPU_FS_64K_LS
124 offset += vma->vm_pgoff << PAGE_SHIFT; 124 struct spu_state *csa = &ctx->csa;
125 int psize;
126
127 /* Check what page size we are using */
128 psize = get_slice_psize(vma->vm_mm, address);
129
130 /* Some sanity checking */
131 BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
132
133 /* Wow, 64K, cool, we need to align the address though */
134 if (csa->use_big_pages) {
135 BUG_ON(vma->vm_start & 0xffff);
136 address &= ~0xfffful;
137 }
138#endif /* CONFIG_SPU_FS_64K_LS */
125 139
140 offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
126 if (offset >= LS_SIZE) 141 if (offset >= LS_SIZE)
127 return NOPFN_SIGBUS; 142 return NOPFN_SIGBUS;
128 143
144 pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
145 addr0, address, offset);
146
129 spu_acquire(ctx); 147 spu_acquire(ctx);
130 148
131 if (ctx->state == SPU_STATE_SAVED) { 149 if (ctx->state == SPU_STATE_SAVED) {
@@ -149,9 +167,24 @@ static struct vm_operations_struct spufs_mem_mmap_vmops = {
149 .nopfn = spufs_mem_mmap_nopfn, 167 .nopfn = spufs_mem_mmap_nopfn,
150}; 168};
151 169
152static int 170static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
153spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) 171{
154{ 172#ifdef CONFIG_SPU_FS_64K_LS
173 struct spu_context *ctx = file->private_data;
174 struct spu_state *csa = &ctx->csa;
175
176 /* Sanity check VMA alignment */
177 if (csa->use_big_pages) {
178 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
179 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
180 vma->vm_pgoff);
181 if (vma->vm_start & 0xffff)
182 return -EINVAL;
183 if (vma->vm_pgoff & 0xf)
184 return -EINVAL;
185 }
186#endif /* CONFIG_SPU_FS_64K_LS */
187
155 if (!(vma->vm_flags & VM_SHARED)) 188 if (!(vma->vm_flags & VM_SHARED))
156 return -EINVAL; 189 return -EINVAL;
157 190
@@ -163,13 +196,34 @@ spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
163 return 0; 196 return 0;
164} 197}
165 198
199#ifdef CONFIG_SPU_FS_64K_LS
200unsigned long spufs_get_unmapped_area(struct file *file, unsigned long addr,
201 unsigned long len, unsigned long pgoff,
202 unsigned long flags)
203{
204 struct spu_context *ctx = file->private_data;
205 struct spu_state *csa = &ctx->csa;
206
207 /* If not using big pages, fallback to normal MM g_u_a */
208 if (!csa->use_big_pages)
209 return current->mm->get_unmapped_area(file, addr, len,
210 pgoff, flags);
211
212 /* Else, try to obtain a 64K pages slice */
213 return slice_get_unmapped_area(addr, len, flags,
214 MMU_PAGE_64K, 1, 0);
215}
216#endif /* CONFIG_SPU_FS_64K_LS */
217
166static const struct file_operations spufs_mem_fops = { 218static const struct file_operations spufs_mem_fops = {
167 .open = spufs_mem_open, 219 .open = spufs_mem_open,
168 .release = spufs_mem_release, 220 .read = spufs_mem_read,
169 .read = spufs_mem_read, 221 .write = spufs_mem_write,
170 .write = spufs_mem_write, 222 .llseek = generic_file_llseek,
171 .llseek = generic_file_llseek, 223 .mmap = spufs_mem_mmap,
172 .mmap = spufs_mem_mmap, 224#ifdef CONFIG_SPU_FS_64K_LS
225 .get_unmapped_area = spufs_get_unmapped_area,
226#endif
173}; 227};
174 228
175static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, 229static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
new file mode 100644
index 000000000000..f4b3c052dabf
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
@@ -0,0 +1,181 @@
1/*
2 * SPU local store allocation routines
3 *
4 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#undef DEBUG
22
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/vmalloc.h>
26
27#include <asm/spu.h>
28#include <asm/spu_csa.h>
29#include <asm/mmu.h>
30
31static int spu_alloc_lscsa_std(struct spu_state *csa)
32{
33 struct spu_lscsa *lscsa;
34 unsigned char *p;
35
36 lscsa = vmalloc(sizeof(struct spu_lscsa));
37 if (!lscsa)
38 return -ENOMEM;
39 memset(lscsa, 0, sizeof(struct spu_lscsa));
40 csa->lscsa = lscsa;
41
42 /* Set LS pages reserved to allow for user-space mapping. */
43 for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
44 SetPageReserved(vmalloc_to_page(p));
45
46 return 0;
47}
48
49static void spu_free_lscsa_std(struct spu_state *csa)
50{
51 /* Clear reserved bit before vfree. */
52 unsigned char *p;
53
54 if (csa->lscsa == NULL)
55 return;
56
57 for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
58 ClearPageReserved(vmalloc_to_page(p));
59
60 vfree(csa->lscsa);
61}
62
63#ifdef CONFIG_SPU_FS_64K_LS
64
65#define SPU_64K_PAGE_SHIFT 16
66#define SPU_64K_PAGE_ORDER (SPU_64K_PAGE_SHIFT - PAGE_SHIFT)
67#define SPU_64K_PAGE_COUNT (1ul << SPU_64K_PAGE_ORDER)
68
69int spu_alloc_lscsa(struct spu_state *csa)
70{
71 struct page **pgarray;
72 unsigned char *p;
73 int i, j, n_4k;
74
75 /* Check availability of 64K pages */
76 if (mmu_psize_defs[MMU_PAGE_64K].shift == 0)
77 goto fail;
78
79 csa->use_big_pages = 1;
80
81 pr_debug("spu_alloc_lscsa(csa=0x%p), trying to allocate 64K pages\n",
82 csa);
83
84 /* First try to allocate our 64K pages. We need 5 of them
85 * with the current implementation. In the future, we should try
86 * to separate the lscsa with the actual local store image, thus
87 * allowing us to require only 4 64K pages per context
88 */
89 for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) {
90 /* XXX This is likely to fail, we should use a special pool
91 * similiar to what hugetlbfs does.
92 */
93 csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL,
94 SPU_64K_PAGE_ORDER);
95 if (csa->lscsa_pages[i] == NULL)
96 goto fail;
97 }
98
99 pr_debug(" success ! creating vmap...\n");
100
101 /* Now we need to create a vmalloc mapping of these for the kernel
102 * and SPU context switch code to use. Currently, we stick to a
103 * normal kernel vmalloc mapping, which in our case will be 4K
104 */
105 n_4k = SPU_64K_PAGE_COUNT * SPU_LSCSA_NUM_BIG_PAGES;
106 pgarray = kmalloc(sizeof(struct page *) * n_4k, GFP_KERNEL);
107 if (pgarray == NULL)
108 goto fail;
109 for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
110 for (j = 0; j < SPU_64K_PAGE_COUNT; j++)
111 /* We assume all the struct page's are contiguous
112 * which should be hopefully the case for an order 4
113 * allocation..
114 */
115 pgarray[i * SPU_64K_PAGE_COUNT + j] =
116 csa->lscsa_pages[i] + j;
117 csa->lscsa = vmap(pgarray, n_4k, VM_USERMAP, PAGE_KERNEL);
118 kfree(pgarray);
119 if (csa->lscsa == NULL)
120 goto fail;
121
122 memset(csa->lscsa, 0, sizeof(struct spu_lscsa));
123
124 /* Set LS pages reserved to allow for user-space mapping.
125 *
126 * XXX isn't that a bit obsolete ? I think we should just
127 * make sure the page count is high enough. Anyway, won't harm
128 * for now
129 */
130 for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
131 SetPageReserved(vmalloc_to_page(p));
132
133 pr_debug(" all good !\n");
134
135 return 0;
136fail:
137 pr_debug("spufs: failed to allocate lscsa 64K pages, falling back\n");
138 spu_free_lscsa(csa);
139 return spu_alloc_lscsa_std(csa);
140}
141
142void spu_free_lscsa(struct spu_state *csa)
143{
144 unsigned char *p;
145 int i;
146
147 if (!csa->use_big_pages) {
148 spu_free_lscsa_std(csa);
149 return;
150 }
151 csa->use_big_pages = 0;
152
153 if (csa->lscsa == NULL)
154 goto free_pages;
155
156 for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
157 ClearPageReserved(vmalloc_to_page(p));
158
159 vunmap(csa->lscsa);
160 csa->lscsa = NULL;
161
162 free_pages:
163
164 for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
165 if (csa->lscsa_pages[i])
166 __free_pages(csa->lscsa_pages[i], SPU_64K_PAGE_ORDER);
167}
168
169#else /* CONFIG_SPU_FS_64K_LS */
170
171int spu_alloc_lscsa(struct spu_state *csa)
172{
173 return spu_alloc_lscsa_std(csa);
174}
175
176void spu_free_lscsa(struct spu_state *csa)
177{
178 spu_free_lscsa_std(csa);
179}
180
181#endif /* !defined(CONFIG_SPU_FS_64K_LS) */
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 29dc59cefc38..71a0b41adb8c 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -2188,40 +2188,30 @@ static void init_priv2(struct spu_state *csa)
2188 * as it is by far the largest of the context save regions, 2188 * as it is by far the largest of the context save regions,
2189 * and may need to be pinned or otherwise specially aligned. 2189 * and may need to be pinned or otherwise specially aligned.
2190 */ 2190 */
2191void spu_init_csa(struct spu_state *csa) 2191int spu_init_csa(struct spu_state *csa)
2192{ 2192{
2193 struct spu_lscsa *lscsa; 2193 int rc;
2194 unsigned char *p;
2195 2194
2196 if (!csa) 2195 if (!csa)
2197 return; 2196 return -EINVAL;
2198 memset(csa, 0, sizeof(struct spu_state)); 2197 memset(csa, 0, sizeof(struct spu_state));
2199 2198
2200 lscsa = vmalloc(sizeof(struct spu_lscsa)); 2199 rc = spu_alloc_lscsa(csa);
2201 if (!lscsa) 2200 if (rc)
2202 return; 2201 return rc;
2203 2202
2204 memset(lscsa, 0, sizeof(struct spu_lscsa));
2205 csa->lscsa = lscsa;
2206 spin_lock_init(&csa->register_lock); 2203 spin_lock_init(&csa->register_lock);
2207 2204
2208 /* Set LS pages reserved to allow for user-space mapping. */
2209 for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
2210 SetPageReserved(vmalloc_to_page(p));
2211
2212 init_prob(csa); 2205 init_prob(csa);
2213 init_priv1(csa); 2206 init_priv1(csa);
2214 init_priv2(csa); 2207 init_priv2(csa);
2208
2209 return 0;
2215} 2210}
2216EXPORT_SYMBOL_GPL(spu_init_csa); 2211EXPORT_SYMBOL_GPL(spu_init_csa);
2217 2212
2218void spu_fini_csa(struct spu_state *csa) 2213void spu_fini_csa(struct spu_state *csa)
2219{ 2214{
2220 /* Clear reserved bit before vfree. */ 2215 spu_free_lscsa(csa);
2221 unsigned char *p;
2222 for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
2223 ClearPageReserved(vmalloc_to_page(p));
2224
2225 vfree(csa->lscsa);
2226} 2216}
2227EXPORT_SYMBOL_GPL(spu_fini_csa); 2217EXPORT_SYMBOL_GPL(spu_fini_csa);
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
index 46c3a8e7c3a8..761d9e971fc4 100644
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -7,7 +7,9 @@ menu "iSeries device drivers"
7 depends on PPC_ISERIES 7 depends on PPC_ISERIES
8 8
9config VIOCONS 9config VIOCONS
10 tristate "iSeries Virtual Console Support (Obsolete)" 10 bool "iSeries Virtual Console Support (Obsolete)"
11 depends on !HVC_ISERIES
12 default n
11 help 13 help
12 This is the old virtual console driver for legacy iSeries. 14 This is the old virtual console driver for legacy iSeries.
13 You should use the iSeries Hypervisor Virtual Console 15 You should use the iSeries Hypervisor Virtual Console
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 63e23062e982..093438b93bd9 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -100,6 +100,9 @@ static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
100static DEFINE_SPINLOCK(slot_errbuf_lock); 100static DEFINE_SPINLOCK(slot_errbuf_lock);
101static int eeh_error_buf_size; 101static int eeh_error_buf_size;
102 102
103#define EEH_PCI_REGS_LOG_LEN 4096
104static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN];
105
103/* System monitoring statistics */ 106/* System monitoring statistics */
104static unsigned long no_device; 107static unsigned long no_device;
105static unsigned long no_dn; 108static unsigned long no_dn;
@@ -115,7 +118,8 @@ static unsigned long slot_resets;
115/* --------------------------------------------------------------- */ 118/* --------------------------------------------------------------- */
116/* Below lies the EEH event infrastructure */ 119/* Below lies the EEH event infrastructure */
117 120
118void eeh_slot_error_detail (struct pci_dn *pdn, int severity) 121static void rtas_slot_error_detail(struct pci_dn *pdn, int severity,
122 char *driver_log, size_t loglen)
119{ 123{
120 int config_addr; 124 int config_addr;
121 unsigned long flags; 125 unsigned long flags;
@@ -133,7 +137,8 @@ void eeh_slot_error_detail (struct pci_dn *pdn, int severity)
133 rc = rtas_call(ibm_slot_error_detail, 137 rc = rtas_call(ibm_slot_error_detail,
134 8, 1, NULL, config_addr, 138 8, 1, NULL, config_addr,
135 BUID_HI(pdn->phb->buid), 139 BUID_HI(pdn->phb->buid),
136 BUID_LO(pdn->phb->buid), NULL, 0, 140 BUID_LO(pdn->phb->buid),
141 virt_to_phys(driver_log), loglen,
137 virt_to_phys(slot_errbuf), 142 virt_to_phys(slot_errbuf),
138 eeh_error_buf_size, 143 eeh_error_buf_size,
139 severity); 144 severity);
@@ -144,6 +149,84 @@ void eeh_slot_error_detail (struct pci_dn *pdn, int severity)
144} 149}
145 150
146/** 151/**
152 * gather_pci_data - copy assorted PCI config space registers to buff
153 * @pdn: device to report data for
154 * @buf: point to buffer in which to log
155 * @len: amount of room in buffer
156 *
157 * This routine captures assorted PCI configuration space data,
158 * and puts them into a buffer for RTAS error logging.
159 */
160static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
161{
162 u32 cfg;
163 int cap, i;
164 int n = 0;
165
166 n += scnprintf(buf+n, len-n, "%s\n", pdn->node->full_name);
167 printk(KERN_WARNING "EEH: of node=%s\n", pdn->node->full_name);
168
169 rtas_read_config(pdn, PCI_VENDOR_ID, 4, &cfg);
170 n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
171 printk(KERN_WARNING "EEH: PCI device/vendor: %08x\n", cfg);
172
173 rtas_read_config(pdn, PCI_COMMAND, 4, &cfg);
174 n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
175 printk(KERN_WARNING "EEH: PCI cmd/status register: %08x\n", cfg);
176
177 /* Dump out the PCI-X command and status regs */
178 cap = pci_find_capability(pdn->pcidev, PCI_CAP_ID_PCIX);
179 if (cap) {
180 rtas_read_config(pdn, cap, 4, &cfg);
181 n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
182 printk(KERN_WARNING "EEH: PCI-X cmd: %08x\n", cfg);
183
184 rtas_read_config(pdn, cap+4, 4, &cfg);
185 n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
186 printk(KERN_WARNING "EEH: PCI-X status: %08x\n", cfg);
187 }
188
189 /* If PCI-E capable, dump PCI-E cap 10, and the AER */
190 cap = pci_find_capability(pdn->pcidev, PCI_CAP_ID_EXP);
191 if (cap) {
192 n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
193 printk(KERN_WARNING
194 "EEH: PCI-E capabilities and status follow:\n");
195
196 for (i=0; i<=8; i++) {
197 rtas_read_config(pdn, cap+4*i, 4, &cfg);
198 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
199 printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg);
200 }
201
202 cap = pci_find_ext_capability(pdn->pcidev,PCI_EXT_CAP_ID_ERR);
203 if (cap) {
204 n += scnprintf(buf+n, len-n, "pci-e AER:\n");
205 printk(KERN_WARNING
206 "EEH: PCI-E AER capability register set follows:\n");
207
208 for (i=0; i<14; i++) {
209 rtas_read_config(pdn, cap+4*i, 4, &cfg);
210 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
211 printk(KERN_WARNING "EEH: PCI-E AER %02x: %08x\n", i, cfg);
212 }
213 }
214 }
215 return n;
216}
217
218void eeh_slot_error_detail(struct pci_dn *pdn, int severity)
219{
220 size_t loglen = 0;
221 memset(pci_regs_buf, 0, EEH_PCI_REGS_LOG_LEN);
222
223 rtas_pci_enable(pdn, EEH_THAW_MMIO);
224 loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN);
225
226 rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen);
227}
228
229/**
147 * read_slot_reset_state - Read the reset state of a device node's slot 230 * read_slot_reset_state - Read the reset state of a device node's slot
148 * @dn: device node to read 231 * @dn: device node to read
149 * @rets: array to return results in 232 * @rets: array to return results in
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 3170e003f76a..f07d849cfc84 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -361,11 +361,12 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
361 goto hard_fail; 361 goto hard_fail;
362 } 362 }
363 363
364 eeh_slot_error_detail(frozen_pdn, 1 /* Temporary Error */);
365 printk(KERN_WARNING 364 printk(KERN_WARNING
366 "EEH: This PCI device has failed %d times since last reboot: " 365 "EEH: This PCI device has failed %d times in the last hour:\n",
367 "location=%s driver=%s pci addr=%s\n", 366 frozen_pdn->eeh_freeze_count);
368 frozen_pdn->eeh_freeze_count, location, drv_str, pci_str); 367 printk(KERN_WARNING
368 "EEH: location=%s driver=%s pci addr=%s\n",
369 location, drv_str, pci_str);
369 370
370 /* Walk the various device drivers attached to this slot through 371 /* Walk the various device drivers attached to this slot through
371 * a reset sequence, giving each an opportunity to do what it needs 372 * a reset sequence, giving each an opportunity to do what it needs
@@ -375,6 +376,11 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
375 */ 376 */
376 pci_walk_bus(frozen_bus, eeh_report_error, &result); 377 pci_walk_bus(frozen_bus, eeh_report_error, &result);
377 378
379 /* Since rtas may enable MMIO when posting the error log,
380 * don't post the error log until after all dev drivers
381 * have been informed. */
382 eeh_slot_error_detail(frozen_pdn, 1 /* Temporary Error */);
383
378 /* If all device drivers were EEH-unaware, then shut 384 /* If all device drivers were EEH-unaware, then shut
379 * down all of the device drivers, and hope they 385 * down all of the device drivers, and hope they
380 * go down willingly, without panicing the system. 386 * go down willingly, without panicing the system.
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 8a123c71449f..cad175724359 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -907,7 +907,7 @@ static int __init fs_enet_of_init(void)
907 struct fs_platform_info fs_enet_data; 907 struct fs_platform_info fs_enet_data;
908 const unsigned int *id; 908 const unsigned int *id;
909 const unsigned int *phy_addr; 909 const unsigned int *phy_addr;
910 void *mac_addr; 910 const void *mac_addr;
911 const phandle *ph; 911 const phandle *ph;
912 const char *model; 912 const char *model;
913 913
diff --git a/arch/ppc/kernel/asm-offsets.c b/arch/ppc/kernel/asm-offsets.c
index c5850a272650..e8e94321b59e 100644
--- a/arch/ppc/kernel/asm-offsets.c
+++ b/arch/ppc/kernel/asm-offsets.c
@@ -35,7 +35,7 @@ int
35main(void) 35main(void)
36{ 36{
37 DEFINE(THREAD, offsetof(struct task_struct, thread)); 37 DEFINE(THREAD, offsetof(struct task_struct, thread));
38 DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info)); 38 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
39 DEFINE(MM, offsetof(struct task_struct, mm)); 39 DEFINE(MM, offsetof(struct task_struct, mm));
40 DEFINE(PTRACE, offsetof(struct task_struct, ptrace)); 40 DEFINE(PTRACE, offsetof(struct task_struct, ptrace));
41 DEFINE(KSP, offsetof(struct thread_struct, ksp)); 41 DEFINE(KSP, offsetof(struct thread_struct, ksp));
diff --git a/arch/ppc/platforms/mpc866ads_setup.c b/arch/ppc/platforms/mpc866ads_setup.c
index 7ce5364fdb3b..bf72204125c5 100644
--- a/arch/ppc/platforms/mpc866ads_setup.c
+++ b/arch/ppc/platforms/mpc866ads_setup.c
@@ -1,4 +1,4 @@
1/*arch/ppc/platforms/mpc866ads-setup.c 1/*arch/ppc/platforms/mpc866ads_setup.c
2 * 2 *
3 * Platform setup for the Freescale mpc866ads board 3 * Platform setup for the Freescale mpc866ads board
4 * 4 *
diff --git a/arch/ppc/syslib/ipic.c b/arch/ppc/syslib/ipic.c
index 10659c24b1be..9192777d0f78 100644
--- a/arch/ppc/syslib/ipic.c
+++ b/arch/ppc/syslib/ipic.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * include/asm-ppc/ipic.c 2 * arch/ppc/syslib/ipic.c
3 * 3 *
4 * IPIC routines implementations. 4 * IPIC routines implementations.
5 * 5 *
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index ee89b33145d5..81a2b92ab0c2 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -567,9 +567,11 @@ appldata_cpu_notify(struct notifier_block *self,
567{ 567{
568 switch (action) { 568 switch (action) {
569 case CPU_ONLINE: 569 case CPU_ONLINE:
570 case CPU_ONLINE_FROZEN:
570 appldata_online_cpu((long) hcpu); 571 appldata_online_cpu((long) hcpu);
571 break; 572 break;
572 case CPU_DEAD: 573 case CPU_DEAD:
574 case CPU_DEAD_FROZEN:
573 appldata_offline_cpu((long) hcpu); 575 appldata_offline_cpu((long) hcpu);
574 break; 576 break;
575 default: 577 default:
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
index 99ff9f08e4d7..d1defbbfcd81 100644
--- a/arch/s390/crypto/Kconfig
+++ b/arch/s390/crypto/Kconfig
@@ -54,7 +54,7 @@ config S390_PRNG
54 default "m" 54 default "m"
55 help 55 help
56 Select this option if you want to use the s390 pseudo random number 56 Select this option if you want to use the s390 pseudo random number
57 generator. The PRNG is part of the cryptograhic processor functions 57 generator. The PRNG is part of the cryptographic processor functions
58 and uses triple-DES to generate secure random numbers like the 58 and uses triple-DES to generate secure random numbers like the
59 ANSI X9.17 standard. The PRNG is usable via the char device 59 ANSI X9.17 standard. The PRNG is usable via the char device
60 /dev/prandom. 60 /dev/prandom.
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index ec514fe5ccd0..1375f8a4469e 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -15,7 +15,7 @@
15 15
16int main(void) 16int main(void)
17{ 17{
18 DEFINE(__THREAD_info, offsetof(struct task_struct, thread_info),); 18 DEFINE(__THREAD_info, offsetof(struct task_struct, stack),);
19 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp),); 19 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp),);
20 DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info),); 20 DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info),);
21 DEFINE(__THREAD_mm_segment, 21 DEFINE(__THREAD_mm_segment,
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index b7977027a28f..09f028a3266b 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -789,10 +789,12 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
789 789
790 switch (action) { 790 switch (action) {
791 case CPU_ONLINE: 791 case CPU_ONLINE:
792 case CPU_ONLINE_FROZEN:
792 if (sysdev_create_file(s, &attr_capability)) 793 if (sysdev_create_file(s, &attr_capability))
793 return NOTIFY_BAD; 794 return NOTIFY_BAD;
794 break; 795 break;
795 case CPU_DEAD: 796 case CPU_DEAD:
797 case CPU_DEAD_FROZEN:
796 sysdev_remove_file(s, &attr_capability); 798 sysdev_remove_file(s, &attr_capability);
797 break; 799 break;
798 } 800 }
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index d74eb120a9c6..038179ecf6a9 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -52,6 +52,9 @@ config GENERIC_IOMAP
52config GENERIC_TIME 52config GENERIC_TIME
53 def_bool n 53 def_bool n
54 54
55config GENERIC_CLOCKEVENTS
56 def_bool n
57
55config SYS_SUPPORTS_APM_EMULATION 58config SYS_SUPPORTS_APM_EMULATION
56 bool 59 bool
57 60
@@ -436,11 +439,11 @@ endmenu
436 439
437menu "Timer and clock configuration" 440menu "Timer and clock configuration"
438 441
439if !GENERIC_TIME
440
441config SH_TMU 442config SH_TMU
442 bool "TMU timer support" 443 bool "TMU timer support"
443 depends on CPU_SH3 || CPU_SH4 444 depends on CPU_SH3 || CPU_SH4
445 select GENERIC_TIME
446 select GENERIC_CLOCKEVENTS
444 default y 447 default y
445 help 448 help
446 This enables the use of the TMU as the system timer. 449 This enables the use of the TMU as the system timer.
@@ -459,8 +462,6 @@ config SH_MTU2
459 help 462 help
460 This enables the use of the MTU2 as the system timer. 463 This enables the use of the MTU2 as the system timer.
461 464
462endif
463
464config SH_TIMER_IRQ 465config SH_TIMER_IRQ
465 int 466 int
466 default "28" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 467 default "28" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785
@@ -468,24 +469,6 @@ config SH_TIMER_IRQ
468 default "140" if CPU_SUBTYPE_SH7206 469 default "140" if CPU_SUBTYPE_SH7206
469 default "16" 470 default "16"
470 471
471config NO_IDLE_HZ
472 bool "Dynamic tick timer"
473 help
474 Select this option if you want to disable continuous timer ticks
475 and have them programmed to occur as required. This option saves
476 power as the system can remain in idle state for longer.
477
478 By default dynamic tick is disabled during the boot, and can be
479 manually enabled with:
480
481 echo 1 > /sys/devices/system/timer/timer0/dyn_tick
482
483 Alternatively, if you want dynamic tick automatically enabled
484 during boot, pass "dyntick=enable" via the kernel command string.
485
486 Please note that dynamic tick may affect the accuracy of
487 timekeeping on some platforms depending on the implementation.
488
489config SH_PCLK_FREQ 472config SH_PCLK_FREQ
490 int "Peripheral clock frequency (in Hz)" 473 int "Peripheral clock frequency (in Hz)"
491 default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343 474 default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343
@@ -509,6 +492,8 @@ config SH_CLK_MD
509 help 492 help
510 MD2 - MD0 pin setting. 493 MD2 - MD0 pin setting.
511 494
495source "kernel/time/Kconfig"
496
512endmenu 497endmenu
513 498
514menu "CPU Frequency scaling" 499menu "CPU Frequency scaling"
diff --git a/arch/sh/boards/landisk/setup.c b/arch/sh/boards/landisk/setup.c
index a83a5d9587bb..4058b4f50d44 100644
--- a/arch/sh/boards/landisk/setup.c
+++ b/arch/sh/boards/landisk/setup.c
@@ -93,6 +93,7 @@ static void __init landisk_setup(char **cmdline_p)
93 */ 93 */
94struct sh_machine_vector mv_landisk __initmv = { 94struct sh_machine_vector mv_landisk __initmv = {
95 .mv_name = "LANDISK", 95 .mv_name = "LANDISK",
96 .mv_nr_irqs = 72,
96 .mv_setup = landisk_setup, 97 .mv_setup = landisk_setup,
97 .mv_init_irq = init_landisk_IRQ, 98 .mv_init_irq = init_landisk_IRQ,
98}; 99};
diff --git a/arch/sh/boards/se/7751/setup.c b/arch/sh/boards/se/7751/setup.c
index 770defed9c4a..52c7bfa57c2c 100644
--- a/arch/sh/boards/se/7751/setup.c
+++ b/arch/sh/boards/se/7751/setup.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/sh/kernel/setup_7751se.c 2 * linux/arch/sh/boards/se/7751/setup.c
3 * 3 *
4 * Copyright (C) 2000 Kazumoto Kojima 4 * Copyright (C) 2000 Kazumoto Kojima
5 * 5 *
diff --git a/arch/sh/drivers/Makefile b/arch/sh/drivers/Makefile
index 6cb92676c5fc..e13f06bebd92 100644
--- a/arch/sh/drivers/Makefile
+++ b/arch/sh/drivers/Makefile
@@ -2,8 +2,9 @@
2# Makefile for the Linux SuperH-specific device drivers. 2# Makefile for the Linux SuperH-specific device drivers.
3# 3#
4 4
5obj-y += dma/
6
5obj-$(CONFIG_PCI) += pci/ 7obj-$(CONFIG_PCI) += pci/
6obj-$(CONFIG_SH_DMA) += dma/
7obj-$(CONFIG_SUPERHYWAY) += superhyway/ 8obj-$(CONFIG_SUPERHYWAY) += superhyway/
8obj-$(CONFIG_PUSH_SWITCH) += push-switch.o 9obj-$(CONFIG_PUSH_SWITCH) += push-switch.o
9obj-$(CONFIG_HEARTBEAT) += heartbeat.o 10obj-$(CONFIG_HEARTBEAT) += heartbeat.o
diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig
index defc13c37d48..99935f9daf4b 100644
--- a/arch/sh/drivers/dma/Kconfig
+++ b/arch/sh/drivers/dma/Kconfig
@@ -1,12 +1,12 @@
1menu "DMA support" 1menu "DMA support"
2 2
3config SH_DMA 3config SH_DMA_API
4 bool "DMA controller (DMAC) support" 4 bool
5 help
6 Selecting this option will provide same API as PC's Direct Memory
7 Access Controller(8237A) for SuperH DMAC.
8 5
9 If unsure, say N. 6config SH_DMA
7 bool "SuperH on-chip DMA controller (DMAC) support"
8 select SH_DMA_API
9 default n
10 10
11config NR_ONCHIP_DMA_CHANNELS 11config NR_ONCHIP_DMA_CHANNELS
12 depends on SH_DMA 12 depends on SH_DMA
@@ -53,4 +53,12 @@ config DMA_PAGE_OPS_CHANNEL
53 in case channel 3 is unavailable. On the SH4, channels 1,2, and 3 53 in case channel 3 is unavailable. On the SH4, channels 1,2, and 3
54 are dual-address capable. 54 are dual-address capable.
55 55
56config SH_DMABRG
57 bool "SH7760 DMABRG support"
58 depends on CPU_SUBTYPE_SH7760
59 help
60 The DMABRG does data transfers from main memory to Audio/USB units
61 of the SH7760.
62 Say Y if you want to use Audio/USB DMA on your SH7760 board.
63
56endmenu 64endmenu
diff --git a/arch/sh/drivers/dma/Makefile b/arch/sh/drivers/dma/Makefile
index db1295d32268..1ac812d24488 100644
--- a/arch/sh/drivers/dma/Makefile
+++ b/arch/sh/drivers/dma/Makefile
@@ -2,8 +2,8 @@
2# Makefile for the SuperH DMA specific kernel interface routines under Linux. 2# Makefile for the SuperH DMA specific kernel interface routines under Linux.
3# 3#
4 4
5obj-y += dma-api.o 5obj-$(CONFIG_SH_DMA_API) += dma-api.o dma-sysfs.o
6obj-$(CONFIG_ISA_DMA_API) += dma-isa.o 6obj-$(CONFIG_ISA_DMA_API) += dma-isa.o
7obj-$(CONFIG_SYSFS) += dma-sysfs.o
8obj-$(CONFIG_SH_DMA) += dma-sh.o 7obj-$(CONFIG_SH_DMA) += dma-sh.o
9obj-$(CONFIG_SH_DREAMCAST) += dma-pvr2.o dma-g2.o 8obj-$(CONFIG_SH_DREAMCAST) += dma-pvr2.o dma-g2.o
9obj-$(CONFIG_SH_DMABRG) += dmabrg.o
diff --git a/arch/sh/drivers/dma/dmabrg.c b/arch/sh/drivers/dma/dmabrg.c
new file mode 100644
index 000000000000..9d0a29370f21
--- /dev/null
+++ b/arch/sh/drivers/dma/dmabrg.c
@@ -0,0 +1,196 @@
1/*
2 * SH7760 DMABRG IRQ handling
3 *
4 * (c) 2007 MSC Vertriebsges.m.b.H, Manuel Lauss <mlau@msc-ge.com>
5 * licensed under the GPLv2.
6 *
7 */
8
9#include <linux/interrupt.h>
10#include <linux/kernel.h>
11#include <asm/dma.h>
12#include <asm/dmabrg.h>
13#include <asm/io.h>
14
15/*
16 * The DMABRG is a special DMA unit within the SH7760. It does transfers
17 * from USB-SRAM/Audio units to main memory (and also the LCDC; but that
18 * part is sensibly placed in the LCDC registers and requires no irqs)
19 * It has 3 IRQ lines which trigger 10 events, and works independently
20 * from the traditional SH DMAC (although it blocks usage of DMAC 0)
21 *
22 * BRGIRQID | component | dir | meaning | source
23 * -----------------------------------------------------
24 * 0 | USB-DMA | ... | xfer done | DMABRGI1
25 * 1 | USB-UAE | ... | USB addr err.| DMABRGI0
26 * 2 | HAC0/SSI0 | play| all done | DMABRGI1
27 * 3 | HAC0/SSI0 | play| half done | DMABRGI2
28 * 4 | HAC0/SSI0 | rec | all done | DMABRGI1
29 * 5 | HAC0/SSI0 | rec | half done | DMABRGI2
30 * 6 | HAC1/SSI1 | play| all done | DMABRGI1
31 * 7 | HAC1/SSI1 | play| half done | DMABRGI2
32 * 8 | HAC1/SSI1 | rec | all done | DMABRGI1
33 * 9 | HAC1/SSI1 | rec | half done | DMABRGI2
34 *
35 * all can be enabled/disabled in the DMABRGCR register,
36 * as well as checked if they occured.
37 *
38 * DMABRGI0 services USB DMA Address errors, but it still must be
39 * enabled/acked in the DMABRGCR register. USB-DMA complete indicator
40 * is grouped together with the audio buffer end indicators, too bad...
41 *
42 * DMABRGCR: Bits 31-24: audio-dma ENABLE flags,
43 * Bits 23-16: audio-dma STATUS flags,
44 * Bits 9-8: USB error/xfer ENABLE,
45 * Bits 1-0: USB error/xfer STATUS.
46 * Ack an IRQ by writing 0 to the STATUS flag.
47 * Mask IRQ by writing 0 to ENABLE flag.
48 *
49 * Usage is almost like with any other IRQ:
50 * dmabrg_request_irq(BRGIRQID, handler, data)
51 * dmabrg_free_irq(BRGIRQID)
52 *
53 * handler prototype: void brgirqhandler(void *data)
54 */
55
56#define DMARSRA 0xfe090000
57#define DMAOR 0xffa00040
58#define DMACHCR0 0xffa0000c
59#define DMABRGCR 0xfe3c0000
60
61#define DMAOR_BRG 0x0000c000
62#define DMAOR_DMEN 0x00000001
63
64#define DMABRGI0 68
65#define DMABRGI1 69
66#define DMABRGI2 70
67
68struct dmabrg_handler {
69 void (*handler)(void *);
70 void *data;
71} *dmabrg_handlers;
72
73static inline void dmabrg_call_handler(int i)
74{
75 dmabrg_handlers[i].handler(dmabrg_handlers[i].data);
76}
77
78/*
79 * main DMABRG irq handler. It acks irqs and then
80 * handles every set and unmasked bit sequentially.
81 * No locking and no validity checks; it should be
82 * as fast as possible (audio!)
83 */
84static irqreturn_t dmabrg_irq(int irq, void *data)
85{
86 unsigned long dcr;
87 unsigned int i;
88
89 dcr = ctrl_inl(DMABRGCR);
90 ctrl_outl(dcr & ~0x00ff0003, DMABRGCR); /* ack all */
91 dcr &= dcr >> 8; /* ignore masked */
92
93 /* USB stuff, get it out of the way first */
94 if (dcr & 1)
95 dmabrg_call_handler(DMABRGIRQ_USBDMA);
96 if (dcr & 2)
97 dmabrg_call_handler(DMABRGIRQ_USBDMAERR);
98
99 /* Audio */
100 dcr >>= 16;
101 while (dcr) {
102 i = __ffs(dcr);
103 dcr &= dcr - 1;
104 dmabrg_call_handler(i + DMABRGIRQ_A0TXF);
105 }
106 return IRQ_HANDLED;
107}
108
109static void dmabrg_disable_irq(unsigned int dmairq)
110{
111 unsigned long dcr;
112 dcr = ctrl_inl(DMABRGCR);
113 dcr &= ~(1 << ((dmairq > 1) ? dmairq + 22 : dmairq + 8));
114 ctrl_outl(dcr, DMABRGCR);
115}
116
117static void dmabrg_enable_irq(unsigned int dmairq)
118{
119 unsigned long dcr;
120 dcr = ctrl_inl(DMABRGCR);
121 dcr |= (1 << ((dmairq > 1) ? dmairq + 22 : dmairq + 8));
122 ctrl_outl(dcr, DMABRGCR);
123}
124
125int dmabrg_request_irq(unsigned int dmairq, void(*handler)(void*),
126 void *data)
127{
128 if ((dmairq > 9) || !handler)
129 return -ENOENT;
130 if (dmabrg_handlers[dmairq].handler)
131 return -EBUSY;
132
133 dmabrg_handlers[dmairq].handler = handler;
134 dmabrg_handlers[dmairq].data = data;
135
136 dmabrg_enable_irq(dmairq);
137 return 0;
138}
139EXPORT_SYMBOL_GPL(dmabrg_request_irq);
140
141void dmabrg_free_irq(unsigned int dmairq)
142{
143 if (likely(dmairq < 10)) {
144 dmabrg_disable_irq(dmairq);
145 dmabrg_handlers[dmairq].handler = NULL;
146 dmabrg_handlers[dmairq].data = NULL;
147 }
148}
149EXPORT_SYMBOL_GPL(dmabrg_free_irq);
150
151static int __init dmabrg_init(void)
152{
153 unsigned long or;
154 int ret;
155
156 dmabrg_handlers = kzalloc(10 * sizeof(struct dmabrg_handler),
157 GFP_KERNEL);
158 if (!dmabrg_handlers)
159 return -ENOMEM;
160
161#ifdef CONFIG_SH_DMA
162 /* request DMAC channel 0 before anyone else can get it */
163 ret = request_dma(0, "DMAC 0 (DMABRG)");
164 if (ret < 0)
165 printk(KERN_INFO "DMABRG: DMAC ch0 not reserved!\n");
166#endif
167
168 ctrl_outl(0, DMABRGCR);
169 ctrl_outl(0, DMACHCR0);
170 ctrl_outl(0x94000000, DMARSRA); /* enable DMABRG in DMAC 0 */
171
172 /* enable DMABRG mode, enable the DMAC */
173 or = ctrl_inl(DMAOR);
174 ctrl_outl(or | DMAOR_BRG | DMAOR_DMEN, DMAOR);
175
176 ret = request_irq(DMABRGI0, dmabrg_irq, IRQF_DISABLED,
177 "DMABRG USB address error", NULL);
178 if (ret)
179 goto out0;
180
181 ret = request_irq(DMABRGI1, dmabrg_irq, IRQF_DISABLED,
182 "DMABRG Transfer End", NULL);
183 if (ret)
184 goto out1;
185
186 ret = request_irq(DMABRGI2, dmabrg_irq, IRQF_DISABLED,
187 "DMABRG Transfer Half", NULL);
188 if (ret == 0)
189 return ret;
190
191 free_irq(DMABRGI1, 0);
192out1: free_irq(DMABRGI0, 0);
193out0: kfree(dmabrg_handlers);
194 return ret;
195}
196subsys_initcall(dmabrg_init);
diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile
index 350972ae9410..965fa2572b23 100644
--- a/arch/sh/kernel/cpu/sh2a/Makefile
+++ b/arch/sh/kernel/cpu/sh2a/Makefile
@@ -2,9 +2,8 @@
2# Makefile for the Linux/SuperH SH-2A backends. 2# Makefile for the Linux/SuperH SH-2A backends.
3# 3#
4 4
5obj-y := common.o probe.o 5obj-y := common.o probe.o opcode_helper.o
6 6
7common-y += $(addprefix ../sh2/, ex.o) 7common-y += $(addprefix ../sh2/, ex.o entry.o)
8common-y += $(addprefix ../sh2/, entry.o)
9 8
10obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o 9obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o
diff --git a/arch/sh/kernel/cpu/sh2a/opcode_helper.c b/arch/sh/kernel/cpu/sh2a/opcode_helper.c
new file mode 100644
index 000000000000..9704b7926d8b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/opcode_helper.c
@@ -0,0 +1,55 @@
1/*
2 * arch/sh/kernel/cpu/sh2a/opcode_helper.c
3 *
4 * Helper for the SH-2A 32-bit opcodes.
5 *
6 * Copyright (C) 2007 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/kernel.h>
13#include <asm/system.h>
14
15/*
16 * Instructions on SH are generally fixed at 16-bits, however, SH-2A
17 * introduces some 32-bit instructions. Since there are no real
18 * constraints on their use (and they can be mixed and matched), we need
19 * to check the instruction encoding to work out if it's a true 32-bit
20 * instruction or not.
21 *
22 * Presently, 32-bit opcodes have only slight variations in what the
23 * actual encoding looks like in the first-half of the instruction, which
24 * makes it fairly straightforward to differentiate from the 16-bit ones.
25 *
26 * First 16-bits of encoding Used by
27 *
28 * 0011nnnnmmmm0001 mov.b, mov.w, mov.l, fmov.d,
29 * fmov.s, movu.b, movu.w
30 *
31 * 0011nnnn0iii1001 bclr.b, bld.b, bset.b, bst.b, band.b,
32 * bandnot.b, bldnot.b, bor.b, bornot.b,
33 * bxor.b
34 *
35 * 0000nnnniiii0000 movi20
36 * 0000nnnniiii0001 movi20s
37 */
38unsigned int instruction_size(unsigned int insn)
39{
40 /* Look for the common cases */
41 switch ((insn & 0xf00f)) {
42 case 0x0000: /* movi20 */
43 case 0x0001: /* movi20s */
44 case 0x3001: /* 32-bit mov/fmov/movu variants */
45 return 4;
46 }
47
48 /* And the special cases.. */
49 switch ((insn & 0xf08f)) {
50 case 0x3009: /* 32-bit b*.b bit operations */
51 return 4;
52 }
53
54 return 2;
55}
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
index 426f6db01fc6..f455c3509789 100644
--- a/arch/sh/kernel/cpu/sh2a/probe.c
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -18,6 +18,7 @@ int __init detect_cpu_and_cache_system(void)
18{ 18{
19 /* Just SH7206 for now .. */ 19 /* Just SH7206 for now .. */
20 current_cpu_data.type = CPU_SH7206; 20 current_cpu_data.type = CPU_SH7206;
21 current_cpu_data.flags |= CPU_HAS_OP32;
21 22
22 current_cpu_data.dcache.ways = 4; 23 current_cpu_data.dcache.ways = 4;
23 current_cpu_data.dcache.way_incr = (1 << 11); 24 current_cpu_data.dcache.way_incr = (1 << 11);
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index f3e827f29a46..832c0b4a1e6c 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/sh/kernel/entry.S 2 * arch/sh/kernel/cpu/sh3/entry.S
3 * 3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka 4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2003 - 2006 Paul Mundt 5 * Copyright (C) 2003 - 2006 Paul Mundt
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
index ba3082d640b5..2b2a9e02fb75 100644
--- a/arch/sh/kernel/cpu/sh3/ex.S
+++ b/arch/sh/kernel/cpu/sh3/ex.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * arch/sh/kernel/cpu/sh3/ex.S 2 * arch/sh/kernel/cpu/sh3/ex.S
3 * 3 *
4 * The SH-3 exception vector table. 4 * The SH-3 and SH-4 exception vector table.
5 5
6 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka 6 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
7 * Copyright (C) 2003 - 2006 Paul Mundt 7 * Copyright (C) 2003 - 2006 Paul Mundt
@@ -9,7 +9,6 @@
9 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive 10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details. 11 * for more details.
12 *
13 */ 12 */
14#include <linux/linkage.h> 13#include <linux/linkage.h>
15 14
@@ -36,8 +35,12 @@ ENTRY(exception_handling_table)
36 .long exception_error ! address error load 35 .long exception_error ! address error load
37 .long exception_error ! address error store /* 100 */ 36 .long exception_error ! address error store /* 100 */
38#endif 37#endif
39 .long exception_error ! fpu_exception /* 120 */ 38#if defined(CONFIG_SH_FPU)
40 .long exception_error /* 140 */ 39 .long do_fpu_error /* 120 */
40#else
41 .long exception_error /* 120 */
42#endif
43 .long exception_error /* 140 */
41 .long system_call ! Unconditional Trap /* 160 */ 44 .long system_call ! Unconditional Trap /* 160 */
42 .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */ 45 .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
43 .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/ 46 .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
@@ -55,4 +58,4 @@ ENTRY(user_break_point_trap)
55 * away offsets can be manually inserted in to their appropriate 58 * away offsets can be manually inserted in to their appropriate
56 * location via set_exception_table_{evt,vec}(). 59 * location via set_exception_table_{evt,vec}().
57 */ 60 */
58 .balign 4096,0,4096 61 .balign 4096,0,4096
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
index 19ca68c71884..8add10bd8268 100644
--- a/arch/sh/kernel/cpu/sh4/Makefile
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -2,10 +2,10 @@
2# Makefile for the Linux/SuperH SH-4 backends. 2# Makefile for the Linux/SuperH SH-4 backends.
3# 3#
4 4
5obj-y := ex.o probe.o common.o 5obj-y := probe.o common.o
6common-y += $(addprefix ../sh3/, entry.o) 6common-y += $(addprefix ../sh3/, entry.o ex.o)
7 7
8obj-$(CONFIG_SH_FPU) += fpu.o 8obj-$(CONFIG_SH_FPU) += fpu.o
9obj-$(CONFIG_SH_STORE_QUEUES) += sq.o 9obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
10 10
11# CPU subtype setup 11# CPU subtype setup
diff --git a/arch/sh/kernel/cpu/sh4/ex.S b/arch/sh/kernel/cpu/sh4/ex.S
deleted file mode 100644
index ac8ab57413cc..000000000000
--- a/arch/sh/kernel/cpu/sh4/ex.S
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * arch/sh/kernel/cpu/sh4/ex.S
3 *
4 * The SH-4 exception vector table.
5
6 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
7 * Copyright (C) 2003 - 2006 Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 *
13 */
14#include <linux/linkage.h>
15
16 .align 2
17 .data
18
19ENTRY(exception_handling_table)
20 .long exception_error /* 000 */
21 .long exception_error
22#if defined(CONFIG_MMU)
23 .long tlb_miss_load /* 040 */
24 .long tlb_miss_store
25 .long initial_page_write
26 .long tlb_protection_violation_load
27 .long tlb_protection_violation_store
28 .long address_error_load
29 .long address_error_store /* 100 */
30#else
31 .long exception_error ! tlb miss load /* 040 */
32 .long exception_error ! tlb miss store
33 .long exception_error ! initial page write
34 .long exception_error ! tlb prot violation load
35 .long exception_error ! tlb prot violation store
36 .long exception_error ! address error load
37 .long exception_error ! address error store /* 100 */
38#endif
39#if defined(CONFIG_SH_FPU)
40 .long do_fpu_error /* 120 */
41#else
42 .long exception_error /* 120 */
43#endif
44 .long exception_error /* 140 */
45 .long system_call ! Unconditional Trap /* 160 */
46 .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
47 .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
48ENTRY(nmi_slot)
49#if defined (CONFIG_KGDB_NMI)
50 .long debug_enter /* 1C0 */ ! Allow trap to debugger
51#else
52 .long exception_none /* 1C0 */ ! Not implemented yet
53#endif
54ENTRY(user_break_point_trap)
55 .long break_point_trap /* 1E0 */
56
57 /*
58 * Pad the remainder of the table out, exceptions residing in far
59 * away offsets can be manually inserted in to their appropriate
60 * location via set_exception_table_{evt,vec}().
61 */
62 .balign 4096,0,4096
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index 7624677f6628..d61dd599169f 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -16,6 +16,7 @@
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/signal.h> 17#include <linux/signal.h>
18#include <asm/processor.h> 18#include <asm/processor.h>
19#include <asm/system.h>
19#include <asm/io.h> 20#include <asm/io.h>
20 21
21/* The PR (precision) bit in the FP Status Register must be clear when 22/* The PR (precision) bit in the FP Status Register must be clear when
@@ -265,7 +266,7 @@ ieee_fpe_handler (struct pt_regs *regs)
265 nextpc = regs->pr; 266 nextpc = regs->pr;
266 finsn = *(unsigned short *) (regs->pc + 2); 267 finsn = *(unsigned short *) (regs->pc + 2);
267 } else { 268 } else {
268 nextpc = regs->pc + 2; 269 nextpc = regs->pc + instruction_size(insn);
269 finsn = insn; 270 finsn = insn;
270 } 271 }
271 272
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh73180.c b/arch/sh/kernel/cpu/sh4a/clock-sh73180.c
index 2fa5cb2ae68d..6d5ba373a75e 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh73180.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh73180.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/sh/kernel/cpu/sh4/clock-sh73180.c 2 * arch/sh/kernel/cpu/sh4a/clock-sh73180.c
3 * 3 *
4 * SH73180 support for the clock framework 4 * SH73180 support for the clock framework
5 * 5 *
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
index 1707a213f0cf..7adc4f16e95a 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/sh/kernel/cpu/sh4/clock-sh7343.c 2 * arch/sh/kernel/cpu/sh4a/clock-sh7343.c
3 * 3 *
4 * SH7343/SH7722 support for the clock framework 4 * SH7343/SH7722 support for the clock framework
5 * 5 *
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
index c8694bac6477..8e236062c721 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/sh/kernel/cpu/sh4/clock-sh7770.c 2 * arch/sh/kernel/cpu/sh4a/clock-sh7770.c
3 * 3 *
4 * SH7770 support for the clock framework 4 * SH7770 support for the clock framework
5 * 5 *
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
index 9e6a216750c8..01f3da619d3d 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/sh/kernel/cpu/sh4/clock-sh7780.c 2 * arch/sh/kernel/cpu/sh4a/clock-sh7780.c
3 * 3 *
4 * SH7780 support for the clock framework 4 * SH7780 support for the clock framework
5 * 5 *
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 329b3f3051de..6b4f5748d0be 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -15,9 +15,12 @@
15#include <linux/pm.h> 15#include <linux/pm.h>
16#include <linux/kallsyms.h> 16#include <linux/kallsyms.h>
17#include <linux/kexec.h> 17#include <linux/kexec.h>
18#include <asm/kdebug.h> 18#include <linux/kdebug.h>
19#include <linux/tick.h>
19#include <asm/uaccess.h> 20#include <asm/uaccess.h>
20#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
22#include <asm/pgalloc.h>
23#include <asm/system.h>
21#include <asm/ubc.h> 24#include <asm/ubc.h>
22 25
23static int hlt_counter; 26static int hlt_counter;
@@ -58,12 +61,15 @@ void cpu_idle(void)
58 if (!idle) 61 if (!idle)
59 idle = default_idle; 62 idle = default_idle;
60 63
64 tick_nohz_stop_sched_tick();
61 while (!need_resched()) 65 while (!need_resched())
62 idle(); 66 idle();
67 tick_nohz_restart_sched_tick();
63 68
64 preempt_enable_no_resched(); 69 preempt_enable_no_resched();
65 schedule(); 70 schedule();
66 preempt_disable(); 71 preempt_disable();
72 check_pgt_cache();
67 } 73 }
68} 74}
69 75
@@ -495,9 +501,9 @@ asmlinkage void debug_trap_handler(unsigned long r4, unsigned long r5,
495 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 501 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
496 502
497 /* Rewind */ 503 /* Rewind */
498 regs->pc -= 2; 504 regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
499 505
500 if (notify_die(DIE_TRAP, regs, regs->tra & 0xff, 506 if (notify_die(DIE_TRAP, "debug trap", regs, 0, regs->tra & 0xff,
501 SIGTRAP) == NOTIFY_STOP) 507 SIGTRAP) == NOTIFY_STOP)
502 return; 508 return;
503 509
@@ -514,9 +520,9 @@ asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5,
514 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 520 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
515 521
516 /* Rewind */ 522 /* Rewind */
517 regs->pc -= 2; 523 regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
518 524
519 if (notify_die(DIE_TRAP, regs, TRAPA_BUG_OPCODE & 0xff, 525 if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
520 SIGTRAP) == NOTIFY_STOP) 526 SIGTRAP) == NOTIFY_STOP)
521 return; 527 return;
522 528
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 477d2a854fc4..c27729135935 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -431,7 +431,7 @@ const char *get_cpu_subtype(struct sh_cpuinfo *c)
431/* Symbolic CPU flags, keep in sync with asm/cpu-features.h */ 431/* Symbolic CPU flags, keep in sync with asm/cpu-features.h */
432static const char *cpu_flags[] = { 432static const char *cpu_flags[] = {
433 "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr", 433 "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
434 "ptea", "llsc", "l2", NULL 434 "ptea", "llsc", "l2", "op32", NULL
435}; 435};
436 436
437static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c) 437static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
index fa91641c1f62..c1cfcb9f047c 100644
--- a/arch/sh/kernel/sh_ksyms.c
+++ b/arch/sh/kernel/sh_ksyms.c
@@ -58,8 +58,6 @@ EXPORT_SYMBOL(__udelay);
58EXPORT_SYMBOL(__ndelay); 58EXPORT_SYMBOL(__ndelay);
59EXPORT_SYMBOL(__const_udelay); 59EXPORT_SYMBOL(__const_udelay);
60 60
61EXPORT_SYMBOL(__div64_32);
62
63#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name) 61#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
64 62
65/* These symbols are generated by the compiler itself */ 63/* These symbols are generated by the compiler itself */
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index eb0191c374b6..b32c35a7c0a3 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -23,7 +23,7 @@
23#include <linux/personality.h> 23#include <linux/personality.h>
24#include <linux/binfmts.h> 24#include <linux/binfmts.h>
25#include <linux/freezer.h> 25#include <linux/freezer.h>
26 26#include <asm/system.h>
27#include <asm/ucontext.h> 27#include <asm/ucontext.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/pgtable.h> 29#include <asm/pgtable.h>
@@ -500,7 +500,9 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
500 } 500 }
501 /* fallthrough */ 501 /* fallthrough */
502 case -ERESTARTNOINTR: 502 case -ERESTARTNOINTR:
503 regs->pc -= 2; 503 regs->pc -= instruction_size(
504 ctrl_inw(regs->pc - 4));
505 break;
504 } 506 }
505 } else { 507 } else {
506 /* gUSA handling */ 508 /* gUSA handling */
@@ -516,7 +518,8 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
516 regs->regs[15] = regs->regs[1]; 518 regs->regs[15] = regs->regs[1];
517 if (regs->pc < regs->regs[0]) 519 if (regs->pc < regs->regs[0])
518 /* Go to rewind point #1 */ 520 /* Go to rewind point #1 */
519 regs->pc = regs->regs[0] + offset - 2; 521 regs->pc = regs->regs[0] + offset -
522 instruction_size(ctrl_inw(regs->pc-4));
520 } 523 }
521#ifdef CONFIG_PREEMPT 524#ifdef CONFIG_PREEMPT
522 local_irq_restore(flags); 525 local_irq_restore(flags);
@@ -600,9 +603,9 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
600 regs->regs[0] == -ERESTARTSYS || 603 regs->regs[0] == -ERESTARTSYS ||
601 regs->regs[0] == -ERESTARTNOINTR) { 604 regs->regs[0] == -ERESTARTNOINTR) {
602 regs->regs[0] = save_r0; 605 regs->regs[0] = save_r0;
603 regs->pc -= 2; 606 regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
604 } else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) { 607 } else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) {
605 regs->pc -= 2; 608 regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
606 regs->regs[3] = __NR_restart_syscall; 609 regs->regs[3] = __NR_restart_syscall;
607 } 610 }
608 } 611 }
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
index 4bdd2f83535d..d41e561be20e 100644
--- a/arch/sh/kernel/stacktrace.c
+++ b/arch/sh/kernel/stacktrace.c
@@ -17,7 +17,7 @@
17/* 17/*
18 * Save stack-backtrace addresses into a stack_trace buffer. 18 * Save stack-backtrace addresses into a stack_trace buffer.
19 */ 19 */
20void save_stack_trace(struct stack_trace *trace, struct task_struct *task) 20void save_stack_trace(struct stack_trace *trace)
21{ 21{
22 unsigned long *sp = (unsigned long *)current_stack_pointer; 22 unsigned long *sp = (unsigned long *)current_stack_pointer;
23 23
diff --git a/arch/sh/kernel/syscalls.S b/arch/sh/kernel/syscalls.S
index 38fc8cd3ea3a..4357d1a6358f 100644
--- a/arch/sh/kernel/syscalls.S
+++ b/arch/sh/kernel/syscalls.S
@@ -354,3 +354,4 @@ ENTRY(sys_call_table)
354 .long sys_move_pages 354 .long sys_move_pages
355 .long sys_getcpu 355 .long sys_getcpu
356 .long sys_epoll_pwait 356 .long sys_epoll_pwait
357 .long sys_utimensat /* 320 */
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index d47e775962e9..a3a67d151e52 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka 4 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
5 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> 5 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
6 * Copyright (C) 2002 - 2006 Paul Mundt 6 * Copyright (C) 2002 - 2007 Paul Mundt
7 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> 7 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
8 * 8 *
9 * Some code taken from i386 version. 9 * Some code taken from i386 version.
@@ -15,6 +15,7 @@
15#include <linux/profile.h> 15#include <linux/profile.h>
16#include <linux/timex.h> 16#include <linux/timex.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/clockchips.h>
18#include <asm/clock.h> 19#include <asm/clock.h>
19#include <asm/rtc.h> 20#include <asm/rtc.h>
20#include <asm/timer.h> 21#include <asm/timer.h>
@@ -38,6 +39,14 @@ static int null_rtc_set_time(const time_t secs)
38 return 0; 39 return 0;
39} 40}
40 41
42/*
43 * Null high precision timer functions for systems lacking one.
44 */
45static cycle_t null_hpt_read(void)
46{
47 return 0;
48}
49
41void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; 50void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
42int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; 51int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
43 52
@@ -101,6 +110,7 @@ int do_settimeofday(struct timespec *tv)
101EXPORT_SYMBOL(do_settimeofday); 110EXPORT_SYMBOL(do_settimeofday);
102#endif /* !CONFIG_GENERIC_TIME */ 111#endif /* !CONFIG_GENERIC_TIME */
103 112
113#ifndef CONFIG_GENERIC_CLOCKEVENTS
104/* last time the RTC clock got updated */ 114/* last time the RTC clock got updated */
105static long last_rtc_update; 115static long last_rtc_update;
106 116
@@ -138,6 +148,7 @@ void handle_timer_tick(void)
138 last_rtc_update = xtime.tv_sec - 600; 148 last_rtc_update = xtime.tv_sec - 600;
139 } 149 }
140} 150}
151#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
141 152
142#ifdef CONFIG_PM 153#ifdef CONFIG_PM
143int timer_suspend(struct sys_device *dev, pm_message_t state) 154int timer_suspend(struct sys_device *dev, pm_message_t state)
@@ -168,136 +179,58 @@ static struct sysdev_class timer_sysclass = {
168 .resume = timer_resume, 179 .resume = timer_resume,
169}; 180};
170 181
171#ifdef CONFIG_NO_IDLE_HZ 182static int __init timer_init_sysfs(void)
172static int timer_dyn_tick_enable(void)
173{ 183{
174 struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick; 184 int ret = sysdev_class_register(&timer_sysclass);
175 unsigned long flags; 185 if (ret != 0)
176 int ret = -ENODEV; 186 return ret;
177
178 if (dyn_tick) {
179 spin_lock_irqsave(&dyn_tick->lock, flags);
180 ret = 0;
181 if (!(dyn_tick->state & DYN_TICK_ENABLED)) {
182 ret = dyn_tick->enable();
183
184 if (ret == 0)
185 dyn_tick->state |= DYN_TICK_ENABLED;
186 }
187 spin_unlock_irqrestore(&dyn_tick->lock, flags);
188 }
189 187
190 return ret; 188 sys_timer->dev.cls = &timer_sysclass;
189 return sysdev_register(&sys_timer->dev);
191} 190}
191device_initcall(timer_init_sysfs);
192 192
193static int timer_dyn_tick_disable(void) 193void (*board_time_init)(void);
194{
195 struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
196 unsigned long flags;
197 int ret = -ENODEV;
198
199 if (dyn_tick) {
200 spin_lock_irqsave(&dyn_tick->lock, flags);
201 ret = 0;
202 if (dyn_tick->state & DYN_TICK_ENABLED) {
203 ret = dyn_tick->disable();
204
205 if (ret == 0)
206 dyn_tick->state &= ~DYN_TICK_ENABLED;
207 }
208 spin_unlock_irqrestore(&dyn_tick->lock, flags);
209 }
210
211 return ret;
212}
213 194
214/* 195/*
215 * Reprogram the system timer for at least the calculated time interval. 196 * Shamelessly based on the MIPS and Sparc64 work.
216 * This function should be called from the idle thread with IRQs disabled,
217 * immediately before sleeping.
218 */ 197 */
219void timer_dyn_reprogram(void) 198static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
220{ 199unsigned long sh_hpt_frequency = 0;
221 struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick; 200
222 unsigned long next, seq, flags; 201#define NSEC_PER_CYC_SHIFT 10
223 202
224 if (!dyn_tick) 203struct clocksource clocksource_sh = {
225 return; 204 .name = "SuperH",
226 205 .rating = 200,
227 spin_lock_irqsave(&dyn_tick->lock, flags); 206 .mask = CLOCKSOURCE_MASK(32),
228 if (dyn_tick->state & DYN_TICK_ENABLED) { 207 .read = null_hpt_read,
229 next = next_timer_interrupt(); 208 .shift = 16,
230 do { 209 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
231 seq = read_seqbegin(&xtime_lock); 210};
232 dyn_tick->reprogram(next - jiffies);
233 } while (read_seqretry(&xtime_lock, seq));
234 }
235 spin_unlock_irqrestore(&dyn_tick->lock, flags);
236}
237 211
238static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf) 212static void __init init_sh_clocksource(void)
239{ 213{
240 return sprintf(buf, "%i\n", 214 if (!sh_hpt_frequency || clocksource_sh.read == null_hpt_read)
241 (sys_timer->dyn_tick->state & DYN_TICK_ENABLED) >> 1); 215 return;
242}
243 216
244static ssize_t timer_set_dyn_tick(struct sys_device *dev, const char *buf, 217 clocksource_sh.mult = clocksource_hz2mult(sh_hpt_frequency,
245 size_t count) 218 clocksource_sh.shift);
246{
247 unsigned int enable = simple_strtoul(buf, NULL, 2);
248 219
249 if (enable) 220 timer_ticks_per_nsec_quotient =
250 timer_dyn_tick_enable(); 221 clocksource_hz2mult(sh_hpt_frequency, NSEC_PER_CYC_SHIFT);
251 else
252 timer_dyn_tick_disable();
253 222
254 return count; 223 clocksource_register(&clocksource_sh);
255} 224}
256static SYSDEV_ATTR(dyn_tick, 0644, timer_show_dyn_tick, timer_set_dyn_tick);
257 225
258/* 226#ifdef CONFIG_GENERIC_TIME
259 * dyntick=enable|disable 227unsigned long long sched_clock(void)
260 */
261static char dyntick_str[4] __initdata = "";
262
263static int __init dyntick_setup(char *str)
264{ 228{
265 if (str) 229 unsigned long long ticks = clocksource_sh.read();
266 strlcpy(dyntick_str, str, sizeof(dyntick_str)); 230 return (ticks * timer_ticks_per_nsec_quotient) >> NSEC_PER_CYC_SHIFT;
267 return 1;
268} 231}
269
270__setup("dyntick=", dyntick_setup);
271#endif
272
273static int __init timer_init_sysfs(void)
274{
275 int ret = sysdev_class_register(&timer_sysclass);
276 if (ret != 0)
277 return ret;
278
279 sys_timer->dev.cls = &timer_sysclass;
280 ret = sysdev_register(&sys_timer->dev);
281
282#ifdef CONFIG_NO_IDLE_HZ
283 if (ret == 0 && sys_timer->dyn_tick) {
284 ret = sysdev_create_file(&sys_timer->dev, &attr_dyn_tick);
285
286 /*
287 * Turn on dynamic tick after calibrate delay
288 * for correct bogomips
289 */
290 if (ret == 0 && dyntick_str[0] == 'e')
291 ret = timer_dyn_tick_enable();
292 }
293#endif 232#endif
294 233
295 return ret;
296}
297device_initcall(timer_init_sysfs);
298
299void (*board_time_init)(void);
300
301void __init time_init(void) 234void __init time_init(void)
302{ 235{
303 if (board_time_init) 236 if (board_time_init)
@@ -316,10 +249,15 @@ void __init time_init(void)
316 sys_timer = get_sys_timer(); 249 sys_timer = get_sys_timer();
317 printk(KERN_INFO "Using %s for system timer\n", sys_timer->name); 250 printk(KERN_INFO "Using %s for system timer\n", sys_timer->name);
318 251
319#ifdef CONFIG_NO_IDLE_HZ 252 if (sys_timer->ops->read)
320 if (sys_timer->dyn_tick) 253 clocksource_sh.read = sys_timer->ops->read;
321 spin_lock_init(&sys_timer->dyn_tick->lock); 254
322#endif 255 init_sh_clocksource();
256
257 if (sh_hpt_frequency)
258 printk("Using %lu.%03lu MHz high precision timer.\n",
259 ((sh_hpt_frequency + 500) / 1000) / 1000,
260 ((sh_hpt_frequency + 500) / 1000) % 1000);
323 261
324#if defined(CONFIG_SH_KGDB) 262#if defined(CONFIG_SH_KGDB)
325 /* 263 /*
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index d9e3151c891e..2d997e2a5b6c 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * arch/sh/kernel/timers/timer-tmu.c - TMU Timer Support 2 * arch/sh/kernel/timers/timer-tmu.c - TMU Timer Support
3 * 3 *
4 * Copyright (C) 2005 Paul Mundt 4 * Copyright (C) 2005 - 2007 Paul Mundt
5 * 5 *
6 * TMU handling code hacked out of arch/sh/kernel/time.c 6 * TMU handling code hacked out of arch/sh/kernel/time.c
7 * 7 *
@@ -18,6 +18,7 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/seqlock.h> 20#include <linux/seqlock.h>
21#include <linux/clockchips.h>
21#include <asm/timer.h> 22#include <asm/timer.h>
22#include <asm/rtc.h> 23#include <asm/rtc.h>
23#include <asm/io.h> 24#include <asm/io.h>
@@ -25,56 +26,75 @@
25#include <asm/clock.h> 26#include <asm/clock.h>
26 27
27#define TMU_TOCR_INIT 0x00 28#define TMU_TOCR_INIT 0x00
28#define TMU0_TCR_INIT 0x0020 29#define TMU_TCR_INIT 0x0020
29#define TMU_TSTR_INIT 1
30 30
31#define TMU0_TCR_CALIB 0x0000 31static int tmu_timer_start(void)
32{
33 ctrl_outb(ctrl_inb(TMU_TSTR) | 0x3, TMU_TSTR);
34 return 0;
35}
32 36
33static unsigned long tmu_timer_get_offset(void) 37static void tmu0_timer_set_interval(unsigned long interval, unsigned int reload)
34{ 38{
35 int count; 39 ctrl_outl(interval, TMU0_TCNT);
36 static int count_p = 0x7fffffff; /* for the first call after boot */
37 static unsigned long jiffies_p = 0;
38 40
39 /* 41 /*
40 * cache volatile jiffies temporarily; we have IRQs turned off. 42 * TCNT reloads from TCOR on underflow, clear it if we don't
43 * intend to auto-reload
41 */ 44 */
42 unsigned long jiffies_t; 45 if (reload)
46 ctrl_outl(interval, TMU0_TCOR);
47 else
48 ctrl_outl(0, TMU0_TCOR);
43 49
44 /* timer count may underflow right here */ 50 tmu_timer_start();
45 count = ctrl_inl(TMU0_TCNT); /* read the latched count */ 51}
46 52
47 jiffies_t = jiffies; 53static int tmu_timer_stop(void)
54{
55 ctrl_outb(ctrl_inb(TMU_TSTR) & ~0x3, TMU_TSTR);
56 return 0;
57}
48 58
49 /* 59static cycle_t tmu_timer_read(void)
50 * avoiding timer inconsistencies (they are rare, but they happen)... 60{
51 * there is one kind of problem that must be avoided here: 61 return ~ctrl_inl(TMU1_TCNT);
52 * 1. the timer counter underflows 62}
53 */ 63
64static int tmu_set_next_event(unsigned long cycles,
65 struct clock_event_device *evt)
66{
67 tmu0_timer_set_interval(cycles, 1);
68 return 0;
69}
54 70
55 if (jiffies_t == jiffies_p) { 71static void tmu_set_mode(enum clock_event_mode mode,
56 if (count > count_p) { 72 struct clock_event_device *evt)
57 /* the nutcase */ 73{
58 if (ctrl_inw(TMU0_TCR) & 0x100) { /* Check UNF bit */ 74 switch (mode) {
59 count -= LATCH; 75 case CLOCK_EVT_MODE_PERIODIC:
60 } else { 76 ctrl_outl(ctrl_inl(TMU0_TCNT), TMU0_TCOR);
61 printk("%s (): hardware timer problem?\n", 77 break;
62 __FUNCTION__); 78 case CLOCK_EVT_MODE_ONESHOT:
63 } 79 ctrl_outl(0, TMU0_TCOR);
64 } 80 break;
65 } else 81 case CLOCK_EVT_MODE_UNUSED:
66 jiffies_p = jiffies_t; 82 case CLOCK_EVT_MODE_SHUTDOWN:
67 83 break;
68 count_p = count; 84 }
69
70 count = ((LATCH-1) - count) * TICK_SIZE;
71 count = (count + LATCH/2) / LATCH;
72
73 return count;
74} 85}
75 86
87static struct clock_event_device tmu0_clockevent = {
88 .name = "tmu0",
89 .shift = 32,
90 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
91 .set_mode = tmu_set_mode,
92 .set_next_event = tmu_set_next_event,
93};
94
76static irqreturn_t tmu_timer_interrupt(int irq, void *dummy) 95static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
77{ 96{
97 struct clock_event_device *evt = &tmu0_clockevent;
78 unsigned long timer_status; 98 unsigned long timer_status;
79 99
80 /* Clear UNF bit */ 100 /* Clear UNF bit */
@@ -82,72 +102,76 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
82 timer_status &= ~0x100; 102 timer_status &= ~0x100;
83 ctrl_outw(timer_status, TMU0_TCR); 103 ctrl_outw(timer_status, TMU0_TCR);
84 104
85 /* 105 evt->event_handler(evt);
86 * Here we are in the timer irq handler. We just have irqs locally
87 * disabled but we don't know if the timer_bh is running on the other
88 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
89 * the irq version of write_lock because as just said we have irq
90 * locally disabled. -arca
91 */
92 write_seqlock(&xtime_lock);
93 handle_timer_tick();
94 write_sequnlock(&xtime_lock);
95 106
96 return IRQ_HANDLED; 107 return IRQ_HANDLED;
97} 108}
98 109
99static struct irqaction tmu_irq = { 110static struct irqaction tmu0_irq = {
100 .name = "timer", 111 .name = "periodic timer",
101 .handler = tmu_timer_interrupt, 112 .handler = tmu_timer_interrupt,
102 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, 113 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
103 .mask = CPU_MASK_NONE, 114 .mask = CPU_MASK_NONE,
104}; 115};
105 116
106static void tmu_clk_init(struct clk *clk) 117static void tmu0_clk_init(struct clk *clk)
107{ 118{
108 u8 divisor = TMU0_TCR_INIT & 0x7; 119 u8 divisor = TMU_TCR_INIT & 0x7;
109 ctrl_outw(TMU0_TCR_INIT, TMU0_TCR); 120 ctrl_outw(TMU_TCR_INIT, TMU0_TCR);
110 clk->rate = clk->parent->rate / (4 << (divisor << 1)); 121 clk->rate = clk->parent->rate / (4 << (divisor << 1));
111} 122}
112 123
113static void tmu_clk_recalc(struct clk *clk) 124static void tmu0_clk_recalc(struct clk *clk)
114{ 125{
115 u8 divisor = ctrl_inw(TMU0_TCR) & 0x7; 126 u8 divisor = ctrl_inw(TMU0_TCR) & 0x7;
116 clk->rate = clk->parent->rate / (4 << (divisor << 1)); 127 clk->rate = clk->parent->rate / (4 << (divisor << 1));
117} 128}
118 129
119static struct clk_ops tmu_clk_ops = { 130static struct clk_ops tmu0_clk_ops = {
120 .init = tmu_clk_init, 131 .init = tmu0_clk_init,
121 .recalc = tmu_clk_recalc, 132 .recalc = tmu0_clk_recalc,
122}; 133};
123 134
124static struct clk tmu0_clk = { 135static struct clk tmu0_clk = {
125 .name = "tmu0_clk", 136 .name = "tmu0_clk",
126 .ops = &tmu_clk_ops, 137 .ops = &tmu0_clk_ops,
127}; 138};
128 139
129static int tmu_timer_start(void) 140static void tmu1_clk_init(struct clk *clk)
130{ 141{
131 ctrl_outb(TMU_TSTR_INIT, TMU_TSTR); 142 u8 divisor = TMU_TCR_INIT & 0x7;
132 return 0; 143 ctrl_outw(divisor, TMU1_TCR);
144 clk->rate = clk->parent->rate / (4 << (divisor << 1));
133} 145}
134 146
135static int tmu_timer_stop(void) 147static void tmu1_clk_recalc(struct clk *clk)
136{ 148{
137 ctrl_outb(0, TMU_TSTR); 149 u8 divisor = ctrl_inw(TMU1_TCR) & 0x7;
138 return 0; 150 clk->rate = clk->parent->rate / (4 << (divisor << 1));
139} 151}
140 152
153static struct clk_ops tmu1_clk_ops = {
154 .init = tmu1_clk_init,
155 .recalc = tmu1_clk_recalc,
156};
157
158static struct clk tmu1_clk = {
159 .name = "tmu1_clk",
160 .ops = &tmu1_clk_ops,
161};
162
141static int tmu_timer_init(void) 163static int tmu_timer_init(void)
142{ 164{
143 unsigned long interval; 165 unsigned long interval;
166 unsigned long frequency;
144 167
145 setup_irq(CONFIG_SH_TIMER_IRQ, &tmu_irq); 168 setup_irq(CONFIG_SH_TIMER_IRQ, &tmu0_irq);
146 169
147 tmu0_clk.parent = clk_get(NULL, "module_clk"); 170 tmu0_clk.parent = clk_get(NULL, "module_clk");
171 tmu1_clk.parent = clk_get(NULL, "module_clk");
148 172
149 /* Start TMU0 */
150 tmu_timer_stop(); 173 tmu_timer_stop();
174
151#if !defined(CONFIG_CPU_SUBTYPE_SH7300) && \ 175#if !defined(CONFIG_CPU_SUBTYPE_SH7300) && \
152 !defined(CONFIG_CPU_SUBTYPE_SH7760) && \ 176 !defined(CONFIG_CPU_SUBTYPE_SH7760) && \
153 !defined(CONFIG_CPU_SUBTYPE_SH7785) 177 !defined(CONFIG_CPU_SUBTYPE_SH7785)
@@ -155,15 +179,29 @@ static int tmu_timer_init(void)
155#endif 179#endif
156 180
157 clk_register(&tmu0_clk); 181 clk_register(&tmu0_clk);
182 clk_register(&tmu1_clk);
158 clk_enable(&tmu0_clk); 183 clk_enable(&tmu0_clk);
184 clk_enable(&tmu1_clk);
159 185
160 interval = (clk_get_rate(&tmu0_clk) + HZ / 2) / HZ; 186 frequency = clk_get_rate(&tmu0_clk);
161 printk(KERN_INFO "Interval = %ld\n", interval); 187 interval = (frequency + HZ / 2) / HZ;
162 188
163 ctrl_outl(interval, TMU0_TCOR); 189 sh_hpt_frequency = clk_get_rate(&tmu1_clk);
164 ctrl_outl(interval, TMU0_TCNT); 190 ctrl_outl(~0, TMU1_TCNT);
191 ctrl_outl(~0, TMU1_TCOR);
165 192
166 tmu_timer_start(); 193 tmu0_timer_set_interval(interval, 1);
194
195 tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC,
196 tmu0_clockevent.shift);
197 tmu0_clockevent.max_delta_ns =
198 clockevent_delta2ns(-1, &tmu0_clockevent);
199 tmu0_clockevent.min_delta_ns =
200 clockevent_delta2ns(1, &tmu0_clockevent);
201
202 tmu0_clockevent.cpumask = cpumask_of_cpu(0);
203
204 clockevents_register_device(&tmu0_clockevent);
167 205
168 return 0; 206 return 0;
169} 207}
@@ -172,9 +210,7 @@ struct sys_timer_ops tmu_timer_ops = {
172 .init = tmu_timer_init, 210 .init = tmu_timer_init,
173 .start = tmu_timer_start, 211 .start = tmu_timer_start,
174 .stop = tmu_timer_stop, 212 .stop = tmu_timer_stop,
175#ifndef CONFIG_GENERIC_TIME 213 .read = tmu_timer_read,
176 .get_offset = tmu_timer_get_offset,
177#endif
178}; 214};
179 215
180struct sys_timer tmu_timer = { 216struct sys_timer tmu_timer = {
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 7b40f0ff3dfc..3a197649cd83 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -20,10 +20,10 @@
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/bug.h> 21#include <linux/bug.h>
22#include <linux/debug_locks.h> 22#include <linux/debug_locks.h>
23#include <linux/kdebug.h>
23#include <linux/limits.h> 24#include <linux/limits.h>
24#include <asm/system.h> 25#include <asm/system.h>
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26#include <asm/kdebug.h>
27 27
28#ifdef CONFIG_SH_KGDB 28#ifdef CONFIG_SH_KGDB
29#include <asm/kgdb.h> 29#include <asm/kgdb.h>
@@ -76,20 +76,6 @@ static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
76 } 76 }
77} 77}
78 78
79ATOMIC_NOTIFIER_HEAD(shdie_chain);
80
81int register_die_notifier(struct notifier_block *nb)
82{
83 return atomic_notifier_chain_register(&shdie_chain, nb);
84}
85EXPORT_SYMBOL(register_die_notifier);
86
87int unregister_die_notifier(struct notifier_block *nb)
88{
89 return atomic_notifier_chain_unregister(&shdie_chain, nb);
90}
91EXPORT_SYMBOL(unregister_die_notifier);
92
93static DEFINE_SPINLOCK(die_lock); 79static DEFINE_SPINLOCK(die_lock);
94 80
95void die(const char * str, struct pt_regs * regs, long err) 81void die(const char * str, struct pt_regs * regs, long err)
@@ -505,7 +491,7 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
505 simple: 491 simple:
506 ret = handle_unaligned_ins(instruction,regs); 492 ret = handle_unaligned_ins(instruction,regs);
507 if (ret==0) 493 if (ret==0)
508 regs->pc += 2; 494 regs->pc += instruction_size(instruction);
509 return ret; 495 return ret;
510} 496}
511#endif /* CONFIG_CPU_SH2A */ 497#endif /* CONFIG_CPU_SH2A */
@@ -682,7 +668,7 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
682 668
683 err = do_fpu_inst(inst, regs); 669 err = do_fpu_inst(inst, regs);
684 if (!err) { 670 if (!err) {
685 regs->pc += 2; 671 regs->pc += instruction_size(inst);
686 return; 672 return;
687 } 673 }
688 /* not a FPU inst. */ 674 /* not a FPU inst. */
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index 7b0f66f03319..e146bafcd14f 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/sh/kernel/vsyscall.c 2 * arch/sh/kernel/vsyscall/vsyscall.c
3 * 3 *
4 * Copyright (C) 2006 Paul Mundt 4 * Copyright (C) 2006 Paul Mundt
5 * 5 *
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c
index 351714694d6d..f3ddd2133e6f 100644
--- a/arch/sh/lib/delay.c
+++ b/arch/sh/lib/delay.c
@@ -24,9 +24,10 @@ inline void __const_udelay(unsigned long xloops)
24 __asm__("dmulu.l %0, %2\n\t" 24 __asm__("dmulu.l %0, %2\n\t"
25 "sts mach, %0" 25 "sts mach, %0"
26 : "=r" (xloops) 26 : "=r" (xloops)
27 : "0" (xloops), "r" (cpu_data[raw_smp_processor_id()].loops_per_jiffy) 27 : "0" (xloops),
28 "r" (HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy)
28 : "macl", "mach"); 29 : "macl", "mach");
29 __delay(xloops * HZ); 30 __delay(xloops);
30} 31}
31 32
32void __udelay(unsigned long usecs) 33void __udelay(unsigned long usecs)
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 12f3d394dc28..253346d7b316 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -218,6 +218,9 @@ endmenu
218 218
219menu "Memory management options" 219menu "Memory management options"
220 220
221config QUICKLIST
222 def_bool y
223
221config MMU 224config MMU
222 bool "Support for memory management hardware" 225 bool "Support for memory management hardware"
223 depends on !CPU_SH2 226 depends on !CPU_SH2
@@ -300,6 +303,10 @@ config NODES_SHIFT
300config ARCH_FLATMEM_ENABLE 303config ARCH_FLATMEM_ENABLE
301 def_bool y 304 def_bool y
302 305
306config MAX_ACTIVE_REGIONS
307 int
308 default "1"
309
303config ARCH_POPULATES_NODE_MAP 310config ARCH_POPULATES_NODE_MAP
304 def_bool y 311 def_bool y
305 312
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 0ecc117cade4..9207da67ff8a 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -15,7 +15,7 @@
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/hardirq.h> 16#include <linux/hardirq.h>
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18#include <asm/kdebug.h> 18#include <linux/kdebug.h>
19#include <asm/system.h> 19#include <asm/system.h>
20#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 4d030988b368..8fe223a890ed 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -67,6 +67,8 @@ void show_mem(void)
67 printk("%d slab pages\n", slab); 67 printk("%d slab pages\n", slab);
68 printk("%d pages shared\n", shared); 68 printk("%d pages shared\n", shared);
69 printk("%d pages swap cached\n", cached); 69 printk("%d pages swap cached\n", cached);
70 printk(KERN_INFO "Total of %ld pages in page table cache\n",
71 quicklist_total_size());
70} 72}
71 73
72#ifdef CONFIG_MMU 74#ifdef CONFIG_MMU
diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c
index 29d7cfd1c970..6773ed76e414 100644
--- a/arch/sparc/kernel/asm-offsets.c
+++ b/arch/sparc/kernel/asm-offsets.c
@@ -28,7 +28,7 @@ int foo(void)
28 DEFINE(AOFF_task_gid, offsetof(struct task_struct, gid)); 28 DEFINE(AOFF_task_gid, offsetof(struct task_struct, gid));
29 DEFINE(AOFF_task_euid, offsetof(struct task_struct, euid)); 29 DEFINE(AOFF_task_euid, offsetof(struct task_struct, euid));
30 DEFINE(AOFF_task_egid, offsetof(struct task_struct, egid)); 30 DEFINE(AOFF_task_egid, offsetof(struct task_struct, egid));
31 /* DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info)); */ 31 /* DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); */
32 DEFINE(ASIZ_task_uid, sizeof(current->uid)); 32 DEFINE(ASIZ_task_uid, sizeof(current->uid));
33 DEFINE(ASIZ_task_gid, sizeof(current->gid)); 33 DEFINE(ASIZ_task_gid, sizeof(current->gid));
34 DEFINE(ASIZ_task_euid, sizeof(current->euid)); 34 DEFINE(ASIZ_task_euid, sizeof(current->euid));
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index dc652f210290..d0fde36395b4 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/kdebug.h> 20#include <linux/kdebug.h>
21 21
22#include <asm/smp.h>
22#include <asm/delay.h> 23#include <asm/delay.h>
23#include <asm/system.h> 24#include <asm/system.h>
24#include <asm/ptrace.h> 25#include <asm/ptrace.h>
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 354cc6b70530..b9c0f307a8fa 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -320,21 +320,7 @@ source "crypto/Kconfig"
320 320
321source "lib/Kconfig" 321source "lib/Kconfig"
322 322
323menu "SCSI support" 323source "drivers/scsi/Kconfig"
324depends on BROKEN
325
326config SCSI
327 tristate "SCSI support"
328
329# This gives us free_dma, which scsi.c wants.
330config GENERIC_ISA_DMA
331 bool
332 depends on SCSI
333 default y
334
335source "arch/um/Kconfig.scsi"
336
337endmenu
338 324
339source "drivers/md/Kconfig" 325source "drivers/md/Kconfig"
340 326
diff --git a/arch/um/Kconfig.scsi b/arch/um/Kconfig.scsi
deleted file mode 100644
index c291c942b1a8..000000000000
--- a/arch/um/Kconfig.scsi
+++ /dev/null
@@ -1,58 +0,0 @@
1comment "SCSI support type (disk, tape, CD-ROM)"
2 depends on SCSI
3
4config BLK_DEV_SD
5 tristate "SCSI disk support"
6 depends on SCSI
7
8config SD_EXTRA_DEVS
9 int "Maximum number of SCSI disks that can be loaded as modules"
10 depends on BLK_DEV_SD
11 default "40"
12
13config CHR_DEV_ST
14 tristate "SCSI tape support"
15 depends on SCSI
16
17config BLK_DEV_SR
18 tristate "SCSI CD-ROM support"
19 depends on SCSI
20
21config BLK_DEV_SR_VENDOR
22 bool "Enable vendor-specific extensions (for SCSI CDROM)"
23 depends on BLK_DEV_SR
24
25config SR_EXTRA_DEVS
26 int "Maximum number of CDROM devices that can be loaded as modules"
27 depends on BLK_DEV_SR
28 default "2"
29
30config CHR_DEV_SG
31 tristate "SCSI generic support"
32 depends on SCSI
33
34comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
35 depends on SCSI
36
37#if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
38config SCSI_DEBUG_QUEUES
39 bool "Enable extra checks in new queueing code"
40 depends on SCSI
41
42#fi
43config SCSI_MULTI_LUN
44 bool "Probe all LUNs on each SCSI device"
45 depends on SCSI
46
47config SCSI_CONSTANTS
48 bool "Verbose SCSI error reporting (kernel size +=12K)"
49 depends on SCSI
50
51config SCSI_LOGGING
52 bool "SCSI logging facility"
53 depends on SCSI
54
55config SCSI_DEBUG
56 tristate "SCSI debugging host simulator (EXPERIMENTAL)"
57 depends on SCSI
58
diff --git a/arch/um/include/sysdep-i386/archsetjmp.h b/arch/um/include/sysdep-i386/archsetjmp.h
index 11bafab669e9..0f312085ce1d 100644
--- a/arch/um/include/sysdep-i386/archsetjmp.h
+++ b/arch/um/include/sysdep-i386/archsetjmp.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/i386/include/klibc/archsetjmp.h 2 * arch/um/include/sysdep-i386/archsetjmp.h
3 */ 3 */
4 4
5#ifndef _KLIBC_ARCHSETJMP_H 5#ifndef _KLIBC_ARCHSETJMP_H
diff --git a/arch/um/include/sysdep-x86_64/archsetjmp.h b/arch/um/include/sysdep-x86_64/archsetjmp.h
index 9a5e1a6ec800..2af8f12ca161 100644
--- a/arch/um/include/sysdep-x86_64/archsetjmp.h
+++ b/arch/um/include/sysdep-x86_64/archsetjmp.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/x86_64/include/klibc/archsetjmp.h 2 * arch/um/include/sysdep-x86_64/archsetjmp.h
3 */ 3 */
4 4
5#ifndef _KLIBC_ARCHSETJMP_H 5#ifndef _KLIBC_ARCHSETJMP_H
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index ef36facd8fe9..a96ae1a0610e 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -178,20 +178,23 @@ int start_uml_skas(void)
178 178
179int external_pid_skas(struct task_struct *task) 179int external_pid_skas(struct task_struct *task)
180{ 180{
181#warning Need to look up userspace_pid by cpu 181 /* FIXME: Need to look up userspace_pid by cpu */
182 return(userspace_pid[0]); 182 return(userspace_pid[0]);
183} 183}
184 184
185int thread_pid_skas(struct task_struct *task) 185int thread_pid_skas(struct task_struct *task)
186{ 186{
187#warning Need to look up userspace_pid by cpu 187 /* FIXME: Need to look up userspace_pid by cpu */
188 return(userspace_pid[0]); 188 return(userspace_pid[0]);
189} 189}
190 190
191void kill_off_processes_skas(void) 191void kill_off_processes_skas(void)
192{ 192{
193 if(proc_mm) 193 if(proc_mm)
194#warning need to loop over userspace_pids in kill_off_processes_skas 194 /*
195 * FIXME: need to loop over userspace_pids in
196 * kill_off_processes_skas
197 */
195 os_kill_ptraced_process(userspace_pid[0], 1); 198 os_kill_ptraced_process(userspace_pid[0], 1);
196 else { 199 else {
197 struct task_struct *p; 200 struct task_struct *p;
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index 92a7b59120d6..2d9d2ca39299 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -239,6 +239,7 @@ out:
239 return ok; 239 return ok;
240} 240}
241 241
242#ifdef UML_CONFIG_MODE_TT
242void init_new_thread_stack(void *sig_stack, void (*usr1_handler)(int)) 243void init_new_thread_stack(void *sig_stack, void (*usr1_handler)(int))
243{ 244{
244 int flags = 0, pages; 245 int flags = 0, pages;
@@ -260,6 +261,7 @@ void init_new_thread_stack(void *sig_stack, void (*usr1_handler)(int))
260 "errno = %d\n", errno); 261 "errno = %d\n", errno);
261 } 262 }
262} 263}
264#endif
263 265
264void init_new_thread_signals(void) 266void init_new_thread_signals(void)
265{ 267{
diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c
index 8e490fff3d47..5c8946320799 100644
--- a/arch/um/os-Linux/skas/mem.c
+++ b/arch/um/os-Linux/skas/mem.c
@@ -68,7 +68,7 @@ static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr)
68 int err, pid = mm_idp->u.pid; 68 int err, pid = mm_idp->u.pid;
69 69
70 if(proc_mm) 70 if(proc_mm)
71#warning Need to look up userspace_pid by cpu 71 /* FIXME: Need to look up userspace_pid by cpu */
72 pid = userspace_pid[0]; 72 pid = userspace_pid[0];
73 73
74 multi_count++; 74 multi_count++;
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 5c088a55396c..6a0e466d01e3 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -586,7 +586,7 @@ void switch_mm_skas(struct mm_id *mm_idp)
586{ 586{
587 int err; 587 int err;
588 588
589#warning need cpu pid in switch_mm_skas 589 /* FIXME: need cpu pid in switch_mm_skas */
590 if(proc_mm){ 590 if(proc_mm){
591 err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0, 591 err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
592 mm_idp->u.mm_fd); 592 mm_idp->u.mm_fd);
diff --git a/arch/v850/kernel/asm-offsets.c b/arch/v850/kernel/asm-offsets.c
index 24f291369070..cee5c3142d41 100644
--- a/arch/v850/kernel/asm-offsets.c
+++ b/arch/v850/kernel/asm-offsets.c
@@ -29,7 +29,7 @@ int main (void)
29 DEFINE (TASK_PTRACE, offsetof (struct task_struct, ptrace)); 29 DEFINE (TASK_PTRACE, offsetof (struct task_struct, ptrace));
30 DEFINE (TASK_BLOCKED, offsetof (struct task_struct, blocked)); 30 DEFINE (TASK_BLOCKED, offsetof (struct task_struct, blocked));
31 DEFINE (TASK_THREAD, offsetof (struct task_struct, thread)); 31 DEFINE (TASK_THREAD, offsetof (struct task_struct, thread));
32 DEFINE (TASK_THREAD_INFO, offsetof (struct task_struct, thread_info)); 32 DEFINE (TASK_THREAD_INFO, offsetof (struct task_struct, stack));
33 DEFINE (TASK_MM, offsetof (struct task_struct, mm)); 33 DEFINE (TASK_MM, offsetof (struct task_struct, mm));
34 DEFINE (TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm)); 34 DEFINE (TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
35 DEFINE (TASK_PID, offsetof (struct task_struct, pid)); 35 DEFINE (TASK_PID, offsetof (struct task_struct, pid));
diff --git a/arch/v850/kernel/entry.S b/arch/v850/kernel/entry.S
index 8bc521ca081f..e4327a8d6bcd 100644
--- a/arch/v850/kernel/entry.S
+++ b/arch/v850/kernel/entry.S
@@ -523,7 +523,7 @@ END(ret_from_trap)
523 523
524 524
525/* This the initial entry point for a new child thread, with an appropriate 525/* This the initial entry point for a new child thread, with an appropriate
526 stack in place that makes it look the the child is in the middle of an 526 stack in place that makes it look that the child is in the middle of an
527 syscall. This function is actually `returned to' from switch_thread 527 syscall. This function is actually `returned to' from switch_thread
528 (copy_thread makes ret_from_fork the return address in each new thread's 528 (copy_thread makes ret_from_fork the return address in each new thread's
529 saved context). */ 529 saved context). */
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 4d582589fa89..d8bfe315471c 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -1413,7 +1413,7 @@ static void ack_apic_level(unsigned int irq)
1413 1413
1414 /* 1414 /*
1415 * We must acknowledge the irq before we move it or the acknowledge will 1415 * We must acknowledge the irq before we move it or the acknowledge will
1416 * not propogate properly. 1416 * not propagate properly.
1417 */ 1417 */
1418 ack_APIC_irq(); 1418 ack_APIC_irq();
1419 1419
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index 3bc30d2c13d3..3eaceac32481 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -32,7 +32,7 @@ atomic_t irq_err_count;
32 */ 32 */
33static inline void stack_overflow_check(struct pt_regs *regs) 33static inline void stack_overflow_check(struct pt_regs *regs)
34{ 34{
35 u64 curbase = (u64) current->thread_info; 35 u64 curbase = (u64)task_stack_page(current);
36 static unsigned long warned = -60*HZ; 36 static unsigned long warned = -60*HZ;
37 37
38 if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE && 38 if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 442169640e45..a14375dd5425 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -720,9 +720,11 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
720 720
721 switch (action) { 721 switch (action) {
722 case CPU_ONLINE: 722 case CPU_ONLINE:
723 case CPU_ONLINE_FROZEN:
723 mce_create_device(cpu); 724 mce_create_device(cpu);
724 break; 725 break;
725 case CPU_DEAD: 726 case CPU_DEAD:
727 case CPU_DEAD_FROZEN:
726 mce_remove_device(cpu); 728 mce_remove_device(cpu);
727 break; 729 break;
728 } 730 }
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index d0bd5d66e103..03356e64f9c8 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -654,9 +654,11 @@ static int threshold_cpu_callback(struct notifier_block *nfb,
654 654
655 switch (action) { 655 switch (action) {
656 case CPU_ONLINE: 656 case CPU_ONLINE:
657 case CPU_ONLINE_FROZEN:
657 threshold_create_device(cpu); 658 threshold_create_device(cpu);
658 break; 659 break;
659 case CPU_DEAD: 660 case CPU_DEAD:
661 case CPU_DEAD_FROZEN:
660 threshold_remove_device(cpu); 662 threshold_remove_device(cpu);
661 break; 663 break;
662 default: 664 default:
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index dc32cef96195..51d4c6fa88c8 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -327,7 +327,7 @@ static int __cpuinit
327cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) 327cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
328{ 328{
329 long cpu = (long)arg; 329 long cpu = (long)arg;
330 if (action == CPU_ONLINE) 330 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
331 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); 331 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
332 return NOTIFY_DONE; 332 return NOTIFY_DONE;
333} 333}
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index b256cfbef344..698079b3a336 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -70,7 +70,7 @@ int main(void)
70 DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm)); 70 DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
71 DEFINE(TASK_PID, offsetof (struct task_struct, pid)); 71 DEFINE(TASK_PID, offsetof (struct task_struct, pid));
72 DEFINE(TASK_THREAD, offsetof (struct task_struct, thread)); 72 DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
73 DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, thread_info)); 73 DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack));
74 DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct)); 74 DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
75 BLANK(); 75 BLANK();
76 76
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index ca76f071666e..f5319d78c876 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/xtensa/pci-dma.c 2 * arch/xtensa/kernel/pci-dma.c
3 * 3 *
4 * DMA coherent memory allocation. 4 * DMA coherent memory allocation.
5 * 5 *
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 640aa839d63f..109e91b91ffa 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1306,7 +1306,7 @@ static void as_exit_queue(elevator_t *e)
1306 struct as_data *ad = e->elevator_data; 1306 struct as_data *ad = e->elevator_data;
1307 1307
1308 del_timer_sync(&ad->antic_timer); 1308 del_timer_sync(&ad->antic_timer);
1309 kblockd_flush(); 1309 kblockd_flush_work(&ad->antic_work);
1310 1310
1311 BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC])); 1311 BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC]));
1312 BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC])); 1312 BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC]));
diff --git a/block/genhd.c b/block/genhd.c
index b5664440896c..93a2cf654597 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -213,6 +213,59 @@ struct gendisk *get_gendisk(dev_t dev, int *part)
213 return kobj ? to_disk(kobj) : NULL; 213 return kobj ? to_disk(kobj) : NULL;
214} 214}
215 215
216/*
217 * print a full list of all partitions - intended for places where the root
218 * filesystem can't be mounted and thus to give the victim some idea of what
219 * went wrong
220 */
221void __init printk_all_partitions(void)
222{
223 int n;
224 struct gendisk *sgp;
225
226 mutex_lock(&block_subsys_lock);
227 /* For each block device... */
228 list_for_each_entry(sgp, &block_subsys.list, kobj.entry) {
229 char buf[BDEVNAME_SIZE];
230 /*
231 * Don't show empty devices or things that have been surpressed
232 */
233 if (get_capacity(sgp) == 0 ||
234 (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
235 continue;
236
237 /*
238 * Note, unlike /proc/partitions, I am showing the numbers in
239 * hex - the same format as the root= option takes.
240 */
241 printk("%02x%02x %10llu %s",
242 sgp->major, sgp->first_minor,
243 (unsigned long long)get_capacity(sgp) >> 1,
244 disk_name(sgp, 0, buf));
245 if (sgp->driverfs_dev != NULL &&
246 sgp->driverfs_dev->driver != NULL)
247 printk(" driver: %s\n",
248 sgp->driverfs_dev->driver->name);
249 else
250 printk(" (driver?)\n");
251
252 /* now show the partitions */
253 for (n = 0; n < sgp->minors - 1; ++n) {
254 if (sgp->part[n] == NULL)
255 continue;
256 if (sgp->part[n]->nr_sects == 0)
257 continue;
258 printk(" %02x%02x %10llu %s\n",
259 sgp->major, n + 1 + sgp->first_minor,
260 (unsigned long long)sgp->part[n]->nr_sects >> 1,
261 disk_name(sgp, n + 1, buf));
262 } /* partition subloop */
263 } /* Block device loop */
264
265 mutex_unlock(&block_subsys_lock);
266 return;
267}
268
216#ifdef CONFIG_PROC_FS 269#ifdef CONFIG_PROC_FS
217/* iterator */ 270/* iterator */
218static void *part_start(struct seq_file *part, loff_t *pos) 271static void *part_start(struct seq_file *part, loff_t *pos)
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index d99d402953a3..17e188973428 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1704,7 +1704,7 @@ EXPORT_SYMBOL(blk_stop_queue);
1704 * on a queue, such as calling the unplug function after a timeout. 1704 * on a queue, such as calling the unplug function after a timeout.
1705 * A block device may call blk_sync_queue to ensure that any 1705 * A block device may call blk_sync_queue to ensure that any
1706 * such activity is cancelled, thus allowing it to release resources 1706 * such activity is cancelled, thus allowing it to release resources
1707 * the the callbacks might use. The caller must already have made sure 1707 * that the callbacks might use. The caller must already have made sure
1708 * that its ->make_request_fn will not re-add plugging prior to calling 1708 * that its ->make_request_fn will not re-add plugging prior to calling
1709 * this function. 1709 * this function.
1710 * 1710 *
@@ -1712,7 +1712,6 @@ EXPORT_SYMBOL(blk_stop_queue);
1712void blk_sync_queue(struct request_queue *q) 1712void blk_sync_queue(struct request_queue *q)
1713{ 1713{
1714 del_timer_sync(&q->unplug_timer); 1714 del_timer_sync(&q->unplug_timer);
1715 kblockd_flush();
1716} 1715}
1717EXPORT_SYMBOL(blk_sync_queue); 1716EXPORT_SYMBOL(blk_sync_queue);
1718 1717
@@ -3508,7 +3507,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
3508 * If a CPU goes away, splice its entries to the current CPU 3507 * If a CPU goes away, splice its entries to the current CPU
3509 * and trigger a run of the softirq 3508 * and trigger a run of the softirq
3510 */ 3509 */
3511 if (action == CPU_DEAD) { 3510 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
3512 int cpu = (unsigned long) hcpu; 3511 int cpu = (unsigned long) hcpu;
3513 3512
3514 local_irq_disable(); 3513 local_irq_disable();
@@ -3632,11 +3631,11 @@ int kblockd_schedule_work(struct work_struct *work)
3632 3631
3633EXPORT_SYMBOL(kblockd_schedule_work); 3632EXPORT_SYMBOL(kblockd_schedule_work);
3634 3633
3635void kblockd_flush(void) 3634void kblockd_flush_work(struct work_struct *work)
3636{ 3635{
3637 flush_workqueue(kblockd_workqueue); 3636 cancel_work_sync(work);
3638} 3637}
3639EXPORT_SYMBOL(kblockd_flush); 3638EXPORT_SYMBOL(kblockd_flush_work);
3640 3639
3641int __init blk_dev_init(void) 3640int __init blk_dev_init(void)
3642{ 3641{
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 620e14cabdc6..4ca0ab3448d9 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -271,7 +271,7 @@ config CRYPTO_SERPENT
271 271
272 Keys are allowed to be from 0 to 256 bits in length, in steps 272 Keys are allowed to be from 0 to 256 bits in length, in steps
273 of 8 bits. Also includes the 'Tnepres' algorithm, a reversed 273 of 8 bits. Also includes the 'Tnepres' algorithm, a reversed
274 variant of Serpent for compatibility with old kerneli code. 274 variant of Serpent for compatibility with old kerneli.org code.
275 275
276 See also: 276 See also:
277 <http://www.cl.cam.ac.uk/~rja14/serpent.html> 277 <http://www.cl.cam.ac.uk/~rja14/serpent.html>
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index f8c63410bcbf..52b23471dd69 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -29,7 +29,6 @@ static u32 acpi_suspend_states[] = {
29 [PM_SUSPEND_ON] = ACPI_STATE_S0, 29 [PM_SUSPEND_ON] = ACPI_STATE_S0,
30 [PM_SUSPEND_STANDBY] = ACPI_STATE_S1, 30 [PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
31 [PM_SUSPEND_MEM] = ACPI_STATE_S3, 31 [PM_SUSPEND_MEM] = ACPI_STATE_S3,
32 [PM_SUSPEND_DISK] = ACPI_STATE_S4,
33 [PM_SUSPEND_MAX] = ACPI_STATE_S5 32 [PM_SUSPEND_MAX] = ACPI_STATE_S5
34}; 33};
35 34
@@ -94,14 +93,6 @@ static int acpi_pm_enter(suspend_state_t pm_state)
94 do_suspend_lowlevel(); 93 do_suspend_lowlevel();
95 break; 94 break;
96 95
97 case PM_SUSPEND_DISK:
98 if (acpi_pm_ops.pm_disk_mode == PM_DISK_PLATFORM)
99 status = acpi_enter_sleep_state(acpi_state);
100 break;
101 case PM_SUSPEND_MAX:
102 acpi_power_off();
103 break;
104
105 default: 96 default:
106 return -EINVAL; 97 return -EINVAL;
107 } 98 }
@@ -157,12 +148,13 @@ int acpi_suspend(u32 acpi_state)
157 suspend_state_t states[] = { 148 suspend_state_t states[] = {
158 [1] = PM_SUSPEND_STANDBY, 149 [1] = PM_SUSPEND_STANDBY,
159 [3] = PM_SUSPEND_MEM, 150 [3] = PM_SUSPEND_MEM,
160 [4] = PM_SUSPEND_DISK,
161 [5] = PM_SUSPEND_MAX 151 [5] = PM_SUSPEND_MAX
162 }; 152 };
163 153
164 if (acpi_state < 6 && states[acpi_state]) 154 if (acpi_state < 6 && states[acpi_state])
165 return pm_suspend(states[acpi_state]); 155 return pm_suspend(states[acpi_state]);
156 if (acpi_state == 4)
157 return hibernate();
166 return -EINVAL; 158 return -EINVAL;
167} 159}
168 160
@@ -189,6 +181,49 @@ static struct pm_ops acpi_pm_ops = {
189 .finish = acpi_pm_finish, 181 .finish = acpi_pm_finish,
190}; 182};
191 183
184#ifdef CONFIG_SOFTWARE_SUSPEND
185static int acpi_hibernation_prepare(void)
186{
187 return acpi_sleep_prepare(ACPI_STATE_S4);
188}
189
190static int acpi_hibernation_enter(void)
191{
192 acpi_status status = AE_OK;
193 unsigned long flags = 0;
194
195 ACPI_FLUSH_CPU_CACHE();
196
197 local_irq_save(flags);
198 acpi_enable_wakeup_device(ACPI_STATE_S4);
199 /* This shouldn't return. If it returns, we have a problem */
200 status = acpi_enter_sleep_state(ACPI_STATE_S4);
201 local_irq_restore(flags);
202
203 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
204}
205
206static void acpi_hibernation_finish(void)
207{
208 acpi_leave_sleep_state(ACPI_STATE_S4);
209 acpi_disable_wakeup_device(ACPI_STATE_S4);
210
211 /* reset firmware waking vector */
212 acpi_set_firmware_waking_vector((acpi_physical_address) 0);
213
214 if (init_8259A_after_S1) {
215 printk("Broken toshiba laptop -> kicking interrupts\n");
216 init_8259A(0);
217 }
218}
219
220static struct hibernation_ops acpi_hibernation_ops = {
221 .prepare = acpi_hibernation_prepare,
222 .enter = acpi_hibernation_enter,
223 .finish = acpi_hibernation_finish,
224};
225#endif /* CONFIG_SOFTWARE_SUSPEND */
226
192/* 227/*
193 * Toshiba fails to preserve interrupts over S1, reinitialization 228 * Toshiba fails to preserve interrupts over S1, reinitialization
194 * of 8259 is needed after S1 resume. 229 * of 8259 is needed after S1 resume.
@@ -227,14 +262,18 @@ int __init acpi_sleep_init(void)
227 sleep_states[i] = 1; 262 sleep_states[i] = 1;
228 printk(" S%d", i); 263 printk(" S%d", i);
229 } 264 }
230 if (i == ACPI_STATE_S4) {
231 if (sleep_states[i])
232 acpi_pm_ops.pm_disk_mode = PM_DISK_PLATFORM;
233 }
234 } 265 }
235 printk(")\n"); 266 printk(")\n");
236 267
237 pm_set_ops(&acpi_pm_ops); 268 pm_set_ops(&acpi_pm_ops);
269
270#ifdef CONFIG_SOFTWARE_SUSPEND
271 if (sleep_states[ACPI_STATE_S4])
272 hibernation_set_ops(&acpi_hibernation_ops);
273#else
274 sleep_states[ACPI_STATE_S4] = 0;
275#endif
276
238 return 0; 277 return 0;
239} 278}
240 279
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 5a76e5be61d5..76b45f0b8341 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -60,7 +60,7 @@ acpi_system_write_sleep(struct file *file,
60 state = simple_strtoul(str, NULL, 0); 60 state = simple_strtoul(str, NULL, 0);
61#ifdef CONFIG_SOFTWARE_SUSPEND 61#ifdef CONFIG_SOFTWARE_SUSPEND
62 if (state == 4) { 62 if (state == 4) {
63 error = pm_suspend(PM_SUSPEND_DISK); 63 error = hibernate();
64 goto Done; 64 goto Done;
65 } 65 }
66#endif 66#endif
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 45dbdc14915f..c7219663f2b9 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -435,7 +435,7 @@ config PATA_OPTIDMA
435 help 435 help
436 This option enables DMA/PIO support for the later OPTi 436 This option enables DMA/PIO support for the later OPTi
437 controllers found on some old motherboards and in some 437 controllers found on some old motherboards and in some
438 latops 438 laptops.
439 439
440 If unsure, say N. 440 If unsure, say N.
441 441
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index a7950885d18e..fef87dd70d17 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1316,7 +1316,7 @@ void ata_port_flush_task(struct ata_port *ap)
1316 spin_unlock_irqrestore(ap->lock, flags); 1316 spin_unlock_irqrestore(ap->lock, flags);
1317 1317
1318 DPRINTK("flush #1\n"); 1318 DPRINTK("flush #1\n");
1319 flush_workqueue(ata_wq); 1319 cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
1320 1320
1321 /* 1321 /*
1322 * At this point, if a task is running, it's guaranteed to see 1322 * At this point, if a task is running, it's guaranteed to see
@@ -1327,7 +1327,7 @@ void ata_port_flush_task(struct ata_port *ap)
1327 if (ata_msg_ctl(ap)) 1327 if (ata_msg_ctl(ap))
1328 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", 1328 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1329 __FUNCTION__); 1329 __FUNCTION__);
1330 flush_workqueue(ata_wq); 1330 cancel_work_sync(&ap->port_task.work);
1331 } 1331 }
1332 1332
1333 spin_lock_irqsave(ap->lock, flags); 1333 spin_lock_irqsave(ap->lock, flags);
@@ -6475,9 +6475,9 @@ void ata_port_detach(struct ata_port *ap)
6475 /* Flush hotplug task. The sequence is similar to 6475 /* Flush hotplug task. The sequence is similar to
6476 * ata_port_flush_task(). 6476 * ata_port_flush_task().
6477 */ 6477 */
6478 flush_workqueue(ata_aux_wq); 6478 cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
6479 cancel_delayed_work(&ap->hotplug_task); 6479 cancel_delayed_work(&ap->hotplug_task);
6480 flush_workqueue(ata_aux_wq); 6480 cancel_work_sync(&ap->hotplug_task.work);
6481 6481
6482 skip_eh: 6482 skip_eh:
6483 /* remove the associated SCSI host */ 6483 /* remove the associated SCSI host */
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index e177c9533b6c..e1c0730a3b99 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -101,19 +101,6 @@ static void add_dr(struct device *dev, struct devres_node *node)
101 list_add_tail(&node->entry, &dev->devres_head); 101 list_add_tail(&node->entry, &dev->devres_head);
102} 102}
103 103
104/**
105 * devres_alloc - Allocate device resource data
106 * @release: Release function devres will be associated with
107 * @size: Allocation size
108 * @gfp: Allocation flags
109 *
110 * allocate devres of @size bytes. The allocated area is zeroed, then
111 * associated with @release. The returned pointer can be passed to
112 * other devres_*() functions.
113 *
114 * RETURNS:
115 * Pointer to allocated devres on success, NULL on failure.
116 */
117#ifdef CONFIG_DEBUG_DEVRES 104#ifdef CONFIG_DEBUG_DEVRES
118void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp, 105void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
119 const char *name) 106 const char *name)
@@ -128,6 +115,19 @@ void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
128} 115}
129EXPORT_SYMBOL_GPL(__devres_alloc); 116EXPORT_SYMBOL_GPL(__devres_alloc);
130#else 117#else
118/**
119 * devres_alloc - Allocate device resource data
120 * @release: Release function devres will be associated with
121 * @size: Allocation size
122 * @gfp: Allocation flags
123 *
124 * Allocate devres of @size bytes. The allocated area is zeroed, then
125 * associated with @release. The returned pointer can be passed to
126 * other devres_*() functions.
127 *
128 * RETURNS:
129 * Pointer to allocated devres on success, NULL on failure.
130 */
131void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp) 131void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
132{ 132{
133 struct devres *dr; 133 struct devres *dr;
@@ -416,7 +416,7 @@ static int release_nodes(struct device *dev, struct list_head *first,
416} 416}
417 417
418/** 418/**
419 * devres_release_all - Release all resources 419 * devres_release_all - Release all managed resources
420 * @dev: Device to release resources for 420 * @dev: Device to release resources for
421 * 421 *
422 * Release all resources associated with @dev. This function is 422 * Release all resources associated with @dev. This function is
@@ -600,7 +600,7 @@ static int devm_kzalloc_match(struct device *dev, void *res, void *data)
600} 600}
601 601
602/** 602/**
603 * devm_kzalloc - Managed kzalloc 603 * devm_kzalloc - Resource-managed kzalloc
604 * @dev: Device to allocate memory for 604 * @dev: Device to allocate memory for
605 * @size: Allocation size 605 * @size: Allocation size
606 * @gfp: Allocation gfp flags 606 * @gfp: Allocation gfp flags
@@ -628,7 +628,7 @@ void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
628EXPORT_SYMBOL_GPL(devm_kzalloc); 628EXPORT_SYMBOL_GPL(devm_kzalloc);
629 629
630/** 630/**
631 * devm_kfree - Managed kfree 631 * devm_kfree - Resource-managed kfree
632 * @dev: Device this memory belongs to 632 * @dev: Device this memory belongs to
633 * @p: Memory to free 633 * @p: Memory to free
634 * 634 *
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index eb84d9d44645..869ff8c00146 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -360,7 +360,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
360 * This function creates a simple platform device that requires minimal 360 * This function creates a simple platform device that requires minimal
361 * resource and memory management. Canned release function freeing 361 * resource and memory management. Canned release function freeing
362 * memory allocated for the device allows drivers using such devices 362 * memory allocated for the device allows drivers using such devices
363 * to be unloaded iwithout waiting for the last reference to the device 363 * to be unloaded without waiting for the last reference to the device
364 * to be dropped. 364 * to be dropped.
365 * 365 *
366 * This interface is primarily intended for use with legacy drivers 366 * This interface is primarily intended for use with legacy drivers
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 067a9e8bc377..8d8cdfec6529 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -126,10 +126,13 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
126 126
127 switch (action) { 127 switch (action) {
128 case CPU_UP_PREPARE: 128 case CPU_UP_PREPARE:
129 case CPU_UP_PREPARE_FROZEN:
129 rc = topology_add_dev(cpu); 130 rc = topology_add_dev(cpu);
130 break; 131 break;
131 case CPU_UP_CANCELED: 132 case CPU_UP_CANCELED:
133 case CPU_UP_CANCELED_FROZEN:
132 case CPU_DEAD: 134 case CPU_DEAD:
135 case CPU_DEAD_FROZEN:
133 topology_remove_dev(cpu); 136 topology_remove_dev(cpu);
134 break; 137 break;
135 } 138 }
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index af6d7274a7cc..18cdd8c77626 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -243,17 +243,13 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
243 transfer_result = lo_do_transfer(lo, WRITE, page, offset, 243 transfer_result = lo_do_transfer(lo, WRITE, page, offset,
244 bvec->bv_page, bv_offs, size, IV); 244 bvec->bv_page, bv_offs, size, IV);
245 if (unlikely(transfer_result)) { 245 if (unlikely(transfer_result)) {
246 char *kaddr;
247
248 /* 246 /*
249 * The transfer failed, but we still write the data to 247 * The transfer failed, but we still write the data to
250 * keep prepare/commit calls balanced. 248 * keep prepare/commit calls balanced.
251 */ 249 */
252 printk(KERN_ERR "loop: transfer error block %llu\n", 250 printk(KERN_ERR "loop: transfer error block %llu\n",
253 (unsigned long long)index); 251 (unsigned long long)index);
254 kaddr = kmap_atomic(page, KM_USER0); 252 zero_user_page(page, offset, size, KM_USER0);
255 memset(kaddr + offset, 0, size);
256 kunmap_atomic(kaddr, KM_USER0);
257 } 253 }
258 flush_dcache_page(page); 254 flush_dcache_page(page);
259 ret = aops->commit_write(file, page, offset, 255 ret = aops->commit_write(file, page, offset,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 090796bef78f..069ae39a9cd9 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -366,20 +366,25 @@ static struct disk_attribute pid_attr = {
366 .show = pid_show, 366 .show = pid_show,
367}; 367};
368 368
369static void nbd_do_it(struct nbd_device *lo) 369static int nbd_do_it(struct nbd_device *lo)
370{ 370{
371 struct request *req; 371 struct request *req;
372 int ret;
372 373
373 BUG_ON(lo->magic != LO_MAGIC); 374 BUG_ON(lo->magic != LO_MAGIC);
374 375
375 lo->pid = current->pid; 376 lo->pid = current->pid;
376 sysfs_create_file(&lo->disk->kobj, &pid_attr.attr); 377 ret = sysfs_create_file(&lo->disk->kobj, &pid_attr.attr);
378 if (ret) {
379 printk(KERN_ERR "nbd: sysfs_create_file failed!");
380 return ret;
381 }
377 382
378 while ((req = nbd_read_stat(lo)) != NULL) 383 while ((req = nbd_read_stat(lo)) != NULL)
379 nbd_end_request(req); 384 nbd_end_request(req);
380 385
381 sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr); 386 sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr);
382 return; 387 return 0;
383} 388}
384 389
385static void nbd_clear_que(struct nbd_device *lo) 390static void nbd_clear_que(struct nbd_device *lo)
@@ -569,7 +574,9 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
569 case NBD_DO_IT: 574 case NBD_DO_IT:
570 if (!lo->file) 575 if (!lo->file)
571 return -EINVAL; 576 return -EINVAL;
572 nbd_do_it(lo); 577 error = nbd_do_it(lo);
578 if (error)
579 return error;
573 /* on return tidy up in case we have a signal */ 580 /* on return tidy up in case we have a signal */
574 /* Forcibly shutdown the socket causing all listeners 581 /* Forcibly shutdown the socket causing all listeners
575 * to error 582 * to error
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 43d4ebcb3b44..a1512da32410 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -151,7 +151,7 @@ static int ramdisk_commit_write(struct file *file, struct page *page,
151} 151}
152 152
153/* 153/*
154 * ->writepage to the the blockdev's mapping has to redirty the page so that the 154 * ->writepage to the blockdev's mapping has to redirty the page so that the
155 * VM doesn't go and steal it. We return AOP_WRITEPAGE_ACTIVATE so that the VM 155 * VM doesn't go and steal it. We return AOP_WRITEPAGE_ACTIVATE so that the VM
156 * won't try to (pointlessly) write the page again for a while. 156 * won't try to (pointlessly) write the page again for a while.
157 * 157 *
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 1e32fb834eb8..2df42fdcdc91 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -631,7 +631,8 @@ config HVC_CONSOLE
631 631
632config HVC_ISERIES 632config HVC_ISERIES
633 bool "iSeries Hypervisor Virtual Console support" 633 bool "iSeries Hypervisor Virtual Console support"
634 depends on PPC_ISERIES && !VIOCONS 634 depends on PPC_ISERIES
635 default y
635 select HVC_DRIVER 636 select HVC_DRIVER
636 help 637 help
637 iSeries machines support a hypervisor virtual console. 638 iSeries machines support a hypervisor virtual console.
diff --git a/drivers/char/drm/drm_dma.c b/drivers/char/drm/drm_dma.c
index 892db7096986..32ed19c9ec1c 100644
--- a/drivers/char/drm/drm_dma.c
+++ b/drivers/char/drm/drm_dma.c
@@ -65,7 +65,7 @@ int drm_dma_setup(drm_device_t * dev)
65 * \param dev DRM device. 65 * \param dev DRM device.
66 * 66 *
67 * Free all pages associated with DMA buffers, the buffers and pages lists, and 67 * Free all pages associated with DMA buffers, the buffers and pages lists, and
68 * finally the the drm_device::dma structure itself. 68 * finally the drm_device::dma structure itself.
69 */ 69 */
70void drm_dma_takedown(drm_device_t * dev) 70void drm_dma_takedown(drm_device_t * dev)
71{ 71{
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 35540cfb43dd..b5c5b9fa84c3 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -157,7 +157,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
157 * \param address access address. 157 * \param address access address.
158 * \return pointer to the page structure. 158 * \return pointer to the page structure.
159 * 159 *
160 * Get the the mapping, find the real physical page to map, get the page, and 160 * Get the mapping, find the real physical page to map, get the page, and
161 * return it. 161 * return it.
162 */ 162 */
163static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, 163static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
index a881f96c983e..ecda760ae8c0 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/char/drm/r300_reg.h
@@ -293,7 +293,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
293# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0 293# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0
294# define R300_PVS_CNTL_1_POS_END_SHIFT 10 294# define R300_PVS_CNTL_1_POS_END_SHIFT 10
295# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20 295# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20
296/* Addresses are relative the the vertex program parameters area. */ 296/* Addresses are relative to the vertex program parameters area. */
297#define R300_VAP_PVS_CNTL_2 0x22D4 297#define R300_VAP_PVS_CNTL_2 0x22D4
298# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0 298# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
299# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16 299# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 49f914e79216..9e1fc02967ff 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -12,7 +12,7 @@
12 * 12 *
13 * This driver allows use of the real time clock (built into 13 * This driver allows use of the real time clock (built into
14 * nearly all computers) from user space. It exports the /dev/rtc 14 * nearly all computers) from user space. It exports the /dev/rtc
15 * interface supporting various ioctl() and also the /proc/dev/rtc 15 * interface supporting various ioctl() and also the /proc/driver/rtc
16 * pseudo-file for status information. 16 * pseudo-file for status information.
17 * 17 *
18 * The ioctls can be used to set the interrupt behaviour where 18 * The ioctls can be used to set the interrupt behaviour where
@@ -377,7 +377,7 @@ static int gen_rtc_release(struct inode *inode, struct file *file)
377#ifdef CONFIG_PROC_FS 377#ifdef CONFIG_PROC_FS
378 378
379/* 379/*
380 * Info exported via "/proc/rtc". 380 * Info exported via "/proc/driver/rtc".
381 */ 381 */
382 382
383static int gen_rtc_proc_output(char *buf) 383static int gen_rtc_proc_output(char *buf)
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 5f3acd8e64b8..7cda04b33534 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -91,3 +91,17 @@ config HW_RANDOM_OMAP
91 module will be called omap-rng. 91 module will be called omap-rng.
92 92
93 If unsure, say Y. 93 If unsure, say Y.
94
95config HW_RANDOM_PASEMI
96 tristate "PA Semi HW Random Number Generator support"
97 depends on HW_RANDOM && PPC_PASEMI
98 default HW_RANDOM
99 ---help---
100 This driver provides kernel-side support for the Random Number
101 Generator hardware found on PA6T-1682M processor.
102
103 To compile this driver as a module, choose M here: the
104 module will be called pasemi-rng.
105
106 If unsure, say Y.
107
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index c41fa19454e3..c8b7300e2fb1 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
10obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o 10obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
11obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o 11obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
12obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o 12obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
13obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
new file mode 100644
index 000000000000..fa6040b6c8f2
--- /dev/null
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright (C) 2006-2007 PA Semi, Inc
3 *
4 * Maintained by: Olof Johansson <olof@lixom.net>
5 *
6 * Driver for the PWRficient onchip rng
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/platform_device.h>
25#include <linux/hw_random.h>
26#include <asm/of_platform.h>
27#include <asm/io.h>
28
29#define SDCRNG_CTL_REG 0x00
30#define SDCRNG_CTL_FVLD_M 0x0000f000
31#define SDCRNG_CTL_FVLD_S 12
32#define SDCRNG_CTL_KSZ 0x00000800
33#define SDCRNG_CTL_RSRC_CRG 0x00000010
34#define SDCRNG_CTL_RSRC_RRG 0x00000000
35#define SDCRNG_CTL_CE 0x00000004
36#define SDCRNG_CTL_RE 0x00000002
37#define SDCRNG_CTL_DR 0x00000001
38#define SDCRNG_CTL_SELECT_RRG_RNG (SDCRNG_CTL_RE | SDCRNG_CTL_RSRC_RRG)
39#define SDCRNG_CTL_SELECT_CRG_RNG (SDCRNG_CTL_CE | SDCRNG_CTL_RSRC_CRG)
40#define SDCRNG_VAL_REG 0x20
41
42#define MODULE_NAME "pasemi_rng"
43
44static int pasemi_rng_data_present(struct hwrng *rng)
45{
46 void __iomem *rng_regs = (void __iomem *)rng->priv;
47
48 return (in_le32(rng_regs + SDCRNG_CTL_REG)
49 & SDCRNG_CTL_FVLD_M) ? 1 : 0;
50}
51
52static int pasemi_rng_data_read(struct hwrng *rng, u32 *data)
53{
54 void __iomem *rng_regs = (void __iomem *)rng->priv;
55 *data = in_le32(rng_regs + SDCRNG_VAL_REG);
56 return 4;
57}
58
59static int pasemi_rng_init(struct hwrng *rng)
60{
61 void __iomem *rng_regs = (void __iomem *)rng->priv;
62 u32 ctl;
63
64 ctl = SDCRNG_CTL_DR | SDCRNG_CTL_SELECT_RRG_RNG | SDCRNG_CTL_KSZ;
65 out_le32(rng_regs + SDCRNG_CTL_REG, ctl);
66 out_le32(rng_regs + SDCRNG_CTL_REG, ctl & ~SDCRNG_CTL_DR);
67
68 return 0;
69}
70
71static void pasemi_rng_cleanup(struct hwrng *rng)
72{
73 void __iomem *rng_regs = (void __iomem *)rng->priv;
74 u32 ctl;
75
76 ctl = SDCRNG_CTL_RE | SDCRNG_CTL_CE;
77 out_le32(rng_regs + SDCRNG_CTL_REG,
78 in_le32(rng_regs + SDCRNG_CTL_REG) & ~ctl);
79}
80
81static struct hwrng pasemi_rng = {
82 .name = MODULE_NAME,
83 .init = pasemi_rng_init,
84 .cleanup = pasemi_rng_cleanup,
85 .data_present = pasemi_rng_data_present,
86 .data_read = pasemi_rng_data_read,
87};
88
89static int __devinit rng_probe(struct of_device *ofdev,
90 const struct of_device_id *match)
91{
92 void __iomem *rng_regs;
93 struct device_node *rng_np = ofdev->node;
94 struct resource res;
95 int err = 0;
96
97 err = of_address_to_resource(rng_np, 0, &res);
98 if (err)
99 return -ENODEV;
100
101 rng_regs = ioremap(res.start, 0x100);
102
103 if (!rng_regs)
104 return -ENOMEM;
105
106 pasemi_rng.priv = (unsigned long)rng_regs;
107
108 printk(KERN_INFO "Registering PA Semi RNG\n");
109
110 err = hwrng_register(&pasemi_rng);
111
112 if (err)
113 iounmap(rng_regs);
114
115 return err;
116}
117
118static int __devexit rng_remove(struct of_device *dev)
119{
120 void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv;
121
122 hwrng_unregister(&pasemi_rng);
123 iounmap(rng_regs);
124
125 return 0;
126}
127
128static struct of_device_id rng_match[] = {
129 {
130 .compatible = "1682m-rng",
131 },
132 {},
133};
134
135static struct of_platform_driver rng_driver = {
136 .name = "pasemi-rng",
137 .match_table = rng_match,
138 .probe = rng_probe,
139 .remove = rng_remove,
140};
141
142static int __init rng_init(void)
143{
144 return of_register_platform_driver(&rng_driver);
145}
146module_init(rng_init);
147
148static void __exit rng_exit(void)
149{
150 of_unregister_platform_driver(&rng_driver);
151}
152module_exit(rng_exit);
153
154MODULE_LICENSE("GPL");
155MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
156MODULE_DESCRIPTION("H/W RNG driver for PA Semi processor");
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index c09160383a53..6e55cfb9c65a 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -705,15 +705,13 @@ static int __init mmtimer_init(void)
705 maxn++; 705 maxn++;
706 706
707 /* Allocate list of node ptrs to mmtimer_t's */ 707 /* Allocate list of node ptrs to mmtimer_t's */
708 timers = kmalloc(sizeof(mmtimer_t *)*maxn, GFP_KERNEL); 708 timers = kzalloc(sizeof(mmtimer_t *)*maxn, GFP_KERNEL);
709 if (timers == NULL) { 709 if (timers == NULL) {
710 printk(KERN_ERR "%s: failed to allocate memory for device\n", 710 printk(KERN_ERR "%s: failed to allocate memory for device\n",
711 MMTIMER_NAME); 711 MMTIMER_NAME);
712 goto out3; 712 goto out3;
713 } 713 }
714 714
715 memset(timers,0,(sizeof(mmtimer_t *)*maxn));
716
717 /* Allocate mmtimer_t's for each online node */ 715 /* Allocate mmtimer_t's for each online node */
718 for_each_online_node(node) { 716 for_each_online_node(node) {
719 timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node); 717 timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node);
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index 27c1179ee527..f25facd97bb4 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -21,6 +21,7 @@ config SYNCLINK_CS
21config CARDMAN_4000 21config CARDMAN_4000
22 tristate "Omnikey Cardman 4000 support" 22 tristate "Omnikey Cardman 4000 support"
23 depends on PCMCIA 23 depends on PCMCIA
24 select BITREVERSE
24 help 25 help
25 Enable support for the Omnikey Cardman 4000 PCMCIA Smartcard 26 Enable support for the Omnikey Cardman 4000 PCMCIA Smartcard
26 reader. 27 reader.
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index e91b43a014b0..fee58e03dbe2 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -31,6 +31,7 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/bitrev.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/io.h> 36#include <asm/io.h>
36 37
@@ -194,41 +195,17 @@ static inline unsigned char xinb(unsigned short port)
194} 195}
195#endif 196#endif
196 197
197#define b_0000 15 198static inline unsigned char invert_revert(unsigned char ch)
198#define b_0001 14 199{
199#define b_0010 13 200 return bitrev8(~ch);
200#define b_0011 12 201}
201#define b_0100 11
202#define b_0101 10
203#define b_0110 9
204#define b_0111 8
205#define b_1000 7
206#define b_1001 6
207#define b_1010 5
208#define b_1011 4
209#define b_1100 3
210#define b_1101 2
211#define b_1110 1
212#define b_1111 0
213
214static unsigned char irtab[16] = {
215 b_0000, b_1000, b_0100, b_1100,
216 b_0010, b_1010, b_0110, b_1110,
217 b_0001, b_1001, b_0101, b_1101,
218 b_0011, b_1011, b_0111, b_1111
219};
220 202
221static void str_invert_revert(unsigned char *b, int len) 203static void str_invert_revert(unsigned char *b, int len)
222{ 204{
223 int i; 205 int i;
224 206
225 for (i = 0; i < len; i++) 207 for (i = 0; i < len; i++)
226 b[i] = (irtab[b[i] & 0x0f] << 4) | irtab[b[i] >> 4]; 208 b[i] = invert_revert(b[i]);
227}
228
229static unsigned char invert_revert(unsigned char ch)
230{
231 return (irtab[ch & 0x0f] << 4) | irtab[ch >> 4];
232} 209}
233 210
234#define ATRLENCK(dev,pos) \ 211#define ATRLENCK(dev,pos) \
@@ -1114,7 +1091,7 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf,
1114 /* 1091 /*
1115 * wait for atr to become valid. 1092 * wait for atr to become valid.
1116 * note: it is important to lock this code. if we dont, the monitor 1093 * note: it is important to lock this code. if we dont, the monitor
1117 * could be run between test_bit and the the call the sleep on the 1094 * could be run between test_bit and the call to sleep on the
1118 * atr-queue. if *then* the monitor detects atr valid, it will wake up 1095 * atr-queue. if *then* the monitor detects atr valid, it will wake up
1119 * any process on the atr-queue, *but* since we have been interrupted, 1096 * any process on the atr-queue, *but* since we have been interrupted,
1120 * we do not yet sleep on this queue. this would result in a missed 1097 * we do not yet sleep on this queue. this would result in a missed
@@ -1881,8 +1858,11 @@ static int cm4000_probe(struct pcmcia_device *link)
1881 init_waitqueue_head(&dev->readq); 1858 init_waitqueue_head(&dev->readq);
1882 1859
1883 ret = cm4000_config(link, i); 1860 ret = cm4000_config(link, i);
1884 if (ret) 1861 if (ret) {
1862 dev_table[i] = NULL;
1863 kfree(dev);
1885 return ret; 1864 return ret;
1865 }
1886 1866
1887 class_device_create(cmm_class, NULL, MKDEV(major, i), NULL, 1867 class_device_create(cmm_class, NULL, MKDEV(major, i), NULL,
1888 "cmm%d", i); 1868 "cmm%d", i);
@@ -1907,7 +1887,7 @@ static void cm4000_detach(struct pcmcia_device *link)
1907 cm4000_release(link); 1887 cm4000_release(link);
1908 1888
1909 dev_table[devno] = NULL; 1889 dev_table[devno] = NULL;
1910 kfree(dev); 1890 kfree(dev);
1911 1891
1912 class_device_destroy(cmm_class, MKDEV(major, devno)); 1892 class_device_destroy(cmm_class, MKDEV(major, devno));
1913 1893
@@ -1956,12 +1936,14 @@ static int __init cmm_init(void)
1956 if (major < 0) { 1936 if (major < 0) {
1957 printk(KERN_WARNING MODULE_NAME 1937 printk(KERN_WARNING MODULE_NAME
1958 ": could not get major number\n"); 1938 ": could not get major number\n");
1939 class_destroy(cmm_class);
1959 return major; 1940 return major;
1960 } 1941 }
1961 1942
1962 rc = pcmcia_register_driver(&cm4000_driver); 1943 rc = pcmcia_register_driver(&cm4000_driver);
1963 if (rc < 0) { 1944 if (rc < 0) {
1964 unregister_chrdev(major, DEVICE_NAME); 1945 unregister_chrdev(major, DEVICE_NAME);
1946 class_destroy(cmm_class);
1965 return rc; 1947 return rc;
1966 } 1948 }
1967 1949
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index f2e4ec4fd407..af88181a17f4 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -636,8 +636,11 @@ static int reader_probe(struct pcmcia_device *link)
636 setup_timer(&dev->poll_timer, cm4040_do_poll, 0); 636 setup_timer(&dev->poll_timer, cm4040_do_poll, 0);
637 637
638 ret = reader_config(link, i); 638 ret = reader_config(link, i);
639 if (ret) 639 if (ret) {
640 dev_table[i] = NULL;
641 kfree(dev);
640 return ret; 642 return ret;
643 }
641 644
642 class_device_create(cmx_class, NULL, MKDEV(major, i), NULL, 645 class_device_create(cmx_class, NULL, MKDEV(major, i), NULL,
643 "cmx%d", i); 646 "cmx%d", i);
@@ -708,12 +711,14 @@ static int __init cm4040_init(void)
708 if (major < 0) { 711 if (major < 0) {
709 printk(KERN_WARNING MODULE_NAME 712 printk(KERN_WARNING MODULE_NAME
710 ": could not get major number\n"); 713 ": could not get major number\n");
714 class_destroy(cmx_class);
711 return major; 715 return major;
712 } 716 }
713 717
714 rc = pcmcia_register_driver(&reader_driver); 718 rc = pcmcia_register_driver(&reader_driver);
715 if (rc < 0) { 719 if (rc < 0) {
716 unregister_chrdev(major, DEVICE_NAME); 720 unregister_chrdev(major, DEVICE_NAME);
721 class_destroy(cmx_class);
717 return rc; 722 return rc;
718 } 723 }
719 724
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index fe00c7dfb649..11089be0691b 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -33,7 +33,7 @@ config TCG_NSC
33 tristate "National Semiconductor TPM Interface" 33 tristate "National Semiconductor TPM Interface"
34 depends on TCG_TPM && PNPACPI 34 depends on TCG_TPM && PNPACPI
35 ---help--- 35 ---help---
36 If you have a TPM security chip from National Semicondutor 36 If you have a TPM security chip from National Semiconductor
37 say Yes and it will be accessible from within Linux. To 37 say Yes and it will be accessible from within Linux. To
38 compile this driver as a module, choose M here; the module 38 compile this driver as a module, choose M here; the module
39 will be called tpm_nsc. 39 will be called tpm_nsc.
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 7710a6a77d97..fc662e4ce58a 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -934,13 +934,6 @@ restart:
934 return -EINVAL; 934 return -EINVAL;
935 935
936 /* 936 /*
937 * No more input please, we are switching. The new ldisc
938 * will update this value in the ldisc open function
939 */
940
941 tty->receive_room = 0;
942
943 /*
944 * Problem: What do we do if this blocks ? 937 * Problem: What do we do if this blocks ?
945 */ 938 */
946 939
@@ -951,6 +944,13 @@ restart:
951 return 0; 944 return 0;
952 } 945 }
953 946
947 /*
948 * No more input please, we are switching. The new ldisc
949 * will update this value in the ldisc open function
950 */
951
952 tty->receive_room = 0;
953
954 o_ldisc = tty->ldisc; 954 o_ldisc = tty->ldisc;
955 o_tty = tty->link; 955 o_tty = tty->link;
956 956
@@ -1573,11 +1573,11 @@ void no_tty(void)
1573 1573
1574 1574
1575/** 1575/**
1576 * stop_tty - propogate flow control 1576 * stop_tty - propagate flow control
1577 * @tty: tty to stop 1577 * @tty: tty to stop
1578 * 1578 *
1579 * Perform flow control to the driver. For PTY/TTY pairs we 1579 * Perform flow control to the driver. For PTY/TTY pairs we
1580 * must also propogate the TIOCKPKT status. May be called 1580 * must also propagate the TIOCKPKT status. May be called
1581 * on an already stopped device and will not re-call the driver 1581 * on an already stopped device and will not re-call the driver
1582 * method. 1582 * method.
1583 * 1583 *
@@ -1607,11 +1607,11 @@ void stop_tty(struct tty_struct *tty)
1607EXPORT_SYMBOL(stop_tty); 1607EXPORT_SYMBOL(stop_tty);
1608 1608
1609/** 1609/**
1610 * start_tty - propogate flow control 1610 * start_tty - propagate flow control
1611 * @tty: tty to start 1611 * @tty: tty to start
1612 * 1612 *
1613 * Start a tty that has been stopped if at all possible. Perform 1613 * Start a tty that has been stopped if at all possible. Perform
1614 * any neccessary wakeups and propogate the TIOCPKT status. If this 1614 * any neccessary wakeups and propagate the TIOCPKT status. If this
1615 * is the tty was previous stopped and is being started then the 1615 * is the tty was previous stopped and is being started then the
1616 * driver start method is invoked and the line discipline woken. 1616 * driver start method is invoked and the line discipline woken.
1617 * 1617 *
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 893dbaf386fb..eb37fba9b7ef 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1685,9 +1685,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
1685 if (sys_dev) { 1685 if (sys_dev) {
1686 switch (action) { 1686 switch (action) {
1687 case CPU_ONLINE: 1687 case CPU_ONLINE:
1688 case CPU_ONLINE_FROZEN:
1688 cpufreq_add_dev(sys_dev); 1689 cpufreq_add_dev(sys_dev);
1689 break; 1690 break;
1690 case CPU_DOWN_PREPARE: 1691 case CPU_DOWN_PREPARE:
1692 case CPU_DOWN_PREPARE_FROZEN:
1691 if (unlikely(lock_policy_rwsem_write(cpu))) 1693 if (unlikely(lock_policy_rwsem_write(cpu)))
1692 BUG(); 1694 BUG();
1693 1695
@@ -1699,6 +1701,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
1699 __cpufreq_remove_dev(sys_dev); 1701 __cpufreq_remove_dev(sys_dev);
1700 break; 1702 break;
1701 case CPU_DOWN_FAILED: 1703 case CPU_DOWN_FAILED:
1704 case CPU_DOWN_FAILED_FROZEN:
1702 cpufreq_add_dev(sys_dev); 1705 cpufreq_add_dev(sys_dev);
1703 break; 1706 break;
1704 } 1707 }
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index d1c7cac9316c..d2f0cbd8b8f3 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -313,9 +313,11 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
313 313
314 switch (action) { 314 switch (action) {
315 case CPU_ONLINE: 315 case CPU_ONLINE:
316 case CPU_ONLINE_FROZEN:
316 cpufreq_update_policy(cpu); 317 cpufreq_update_policy(cpu);
317 break; 318 break;
318 case CPU_DEAD: 319 case CPU_DEAD:
320 case CPU_DEAD_FROZEN:
319 cpufreq_stats_free_table(cpu); 321 cpufreq_stats_free_table(cpu);
320 break; 322 break;
321 } 323 }
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index f21fe66c9eef..f4c634504d1a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -51,7 +51,7 @@ config CRYPTO_DEV_GEODE
51 default m 51 default m
52 help 52 help
53 Say 'Y' here to use the AMD Geode LX processor on-board AES 53 Say 'Y' here to use the AMD Geode LX processor on-board AES
54 engine for the CryptoAPI AES alogrithm. 54 engine for the CryptoAPI AES algorithm.
55 55
56 To compile this driver as a module, choose M here: the module 56 To compile this driver as a module, choose M here: the module
57 will be called geode-aes. 57 will be called geode-aes.
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 03b1f650d1c4..75e3911810a3 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -309,9 +309,11 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
309 309
310 switch (action) { 310 switch (action) {
311 case CPU_ONLINE: 311 case CPU_ONLINE:
312 case CPU_ONLINE_FROZEN:
312 coretemp_device_add(cpu); 313 coretemp_device_add(cpu);
313 break; 314 break;
314 case CPU_DEAD: 315 case CPU_DEAD:
316 case CPU_DEAD_FROZEN:
315 coretemp_device_remove(cpu); 317 coretemp_device_remove(cpu);
316 break; 318 break;
317 } 319 }
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index 7ed92dc3d833..3c3f2ebf3fc9 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -354,7 +354,7 @@ static void tps65010_interrupt(struct tps65010 *tps)
354 * also needs to get error handling and probably 354 * also needs to get error handling and probably
355 * an #ifdef CONFIG_SOFTWARE_SUSPEND 355 * an #ifdef CONFIG_SOFTWARE_SUSPEND
356 */ 356 */
357 pm_suspend(PM_SUSPEND_DISK); 357 hibernate();
358#endif 358#endif
359 poll = 1; 359 poll = 1;
360 } 360 }
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 5314ec913724..d09e74c2996e 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -827,7 +827,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
827 827
828 /* 828 /*
829 * Now set up the hw. We have to do this ourselves as 829 * Now set up the hw. We have to do this ourselves as
830 * the MMIO layout isnt the same as the the standard port 830 * the MMIO layout isnt the same as the standard port
831 * based I/O 831 * based I/O
832 */ 832 */
833 833
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 6a1a0572275e..835937e38529 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -1702,7 +1702,7 @@ static int nodemgr_host_thread(void *__hi)
1702 generation = get_hpsb_generation(host); 1702 generation = get_hpsb_generation(host);
1703 1703
1704 /* If we get a reset before we are done waiting, then 1704 /* If we get a reset before we are done waiting, then
1705 * start the the waiting over again */ 1705 * start the waiting over again */
1706 if (generation != g) 1706 if (generation != g)
1707 g = generation, i = 0; 1707 g = generation, i = 0;
1708 } 1708 }
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index f284be1c9166..82dda2faf4d0 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -745,6 +745,7 @@ static int comp_pool_callback(struct notifier_block *nfb,
745 745
746 switch (action) { 746 switch (action) {
747 case CPU_UP_PREPARE: 747 case CPU_UP_PREPARE:
748 case CPU_UP_PREPARE_FROZEN:
748 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); 749 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
749 if(!create_comp_task(pool, cpu)) { 750 if(!create_comp_task(pool, cpu)) {
750 ehca_gen_err("Can't create comp_task for cpu: %x", cpu); 751 ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
@@ -752,24 +753,29 @@ static int comp_pool_callback(struct notifier_block *nfb,
752 } 753 }
753 break; 754 break;
754 case CPU_UP_CANCELED: 755 case CPU_UP_CANCELED:
756 case CPU_UP_CANCELED_FROZEN:
755 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); 757 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
756 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); 758 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
757 kthread_bind(cct->task, any_online_cpu(cpu_online_map)); 759 kthread_bind(cct->task, any_online_cpu(cpu_online_map));
758 destroy_comp_task(pool, cpu); 760 destroy_comp_task(pool, cpu);
759 break; 761 break;
760 case CPU_ONLINE: 762 case CPU_ONLINE:
763 case CPU_ONLINE_FROZEN:
761 ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu); 764 ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
762 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); 765 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
763 kthread_bind(cct->task, cpu); 766 kthread_bind(cct->task, cpu);
764 wake_up_process(cct->task); 767 wake_up_process(cct->task);
765 break; 768 break;
766 case CPU_DOWN_PREPARE: 769 case CPU_DOWN_PREPARE:
770 case CPU_DOWN_PREPARE_FROZEN:
767 ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu); 771 ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
768 break; 772 break;
769 case CPU_DOWN_FAILED: 773 case CPU_DOWN_FAILED:
774 case CPU_DOWN_FAILED_FROZEN:
770 ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu); 775 ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
771 break; 776 break;
772 case CPU_DEAD: 777 case CPU_DEAD:
778 case CPU_DEAD_FROZEN:
773 ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu); 779 ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
774 destroy_comp_task(pool, cpu); 780 destroy_comp_task(pool, cpu);
775 take_over_work(pool, cpu); 781 take_over_work(pool, cpu);
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index c921d6c522f5..c92f9d764fce 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -17,7 +17,7 @@ config CAPI_TRACE
17 help 17 help
18 If you say Y here, the kernelcapi driver can make verbose traces 18 If you say Y here, the kernelcapi driver can make verbose traces
19 of CAPI messages. This feature can be enabled/disabled via IOCTL for 19 of CAPI messages. This feature can be enabled/disabled via IOCTL for
20 every controler (default disabled). 20 every controller (default disabled).
21 This will increase the size of the kernelcapi module by 20 KB. 21 This will increase the size of the kernelcapi module by 20 KB.
22 If unsure, say Y. 22 If unsure, say Y.
23 23
diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
index af3eb9e795b5..85784a7ffb25 100644
--- a/drivers/isdn/hardware/eicon/divasync.h
+++ b/drivers/isdn/hardware/eicon/divasync.h
@@ -216,7 +216,7 @@ typedef struct
216#define SERIAL_HOOK_RING 0x85 216#define SERIAL_HOOK_RING 0x85
217#define SERIAL_HOOK_DETACH 0x8f 217#define SERIAL_HOOK_DETACH 0x8f
218 unsigned char Flags; /* function refinements */ 218 unsigned char Flags; /* function refinements */
219 /* parameters passed by the the ATTACH request */ 219 /* parameters passed by the ATTACH request */
220 SERIAL_INT_CB InterruptHandler; /* called on each interrupt */ 220 SERIAL_INT_CB InterruptHandler; /* called on each interrupt */
221 SERIAL_DPC_CB DeferredHandler; /* called on hook state changes */ 221 SERIAL_DPC_CB DeferredHandler; /* called on hook state changes */
222 void *HandlerContext; /* context for both handlers */ 222 void *HandlerContext; /* context for both handlers */
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 99e70d4103b6..1f18f1993387 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -1217,11 +1217,11 @@ usb_init(hfcusb_data * hfc)
1217 /* aux = output, reset off */ 1217 /* aux = output, reset off */
1218 write_usb(hfc, HFCUSB_CIRM, 0x10); 1218 write_usb(hfc, HFCUSB_CIRM, 0x10);
1219 1219
1220 /* set USB_SIZE to match the the wMaxPacketSize for INT or BULK transfers */ 1220 /* set USB_SIZE to match the wMaxPacketSize for INT or BULK transfers */
1221 write_usb(hfc, HFCUSB_USB_SIZE, 1221 write_usb(hfc, HFCUSB_USB_SIZE,
1222 (hfc->packet_size / 8) | ((hfc->packet_size / 8) << 4)); 1222 (hfc->packet_size / 8) | ((hfc->packet_size / 8) << 4));
1223 1223
1224 /* set USB_SIZE_I to match the the wMaxPacketSize for ISO transfers */ 1224 /* set USB_SIZE_I to match the wMaxPacketSize for ISO transfers */
1225 write_usb(hfc, HFCUSB_USB_SIZE_I, hfc->iso_packet_size); 1225 write_usb(hfc, HFCUSB_USB_SIZE_I, hfc->iso_packet_size);
1226 1226
1227 /* enable PCM/GCI master mode */ 1227 /* enable PCM/GCI master mode */
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index c8b8cfa332bb..0d892600ff00 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -2889,7 +2889,9 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2889 2889
2890 switch (val) { 2890 switch (val) {
2891 case CPU_DOWN_PREPARE: 2891 case CPU_DOWN_PREPARE:
2892 case CPU_DOWN_PREPARE_FROZEN:
2892 case CPU_UP_CANCELED: 2893 case CPU_UP_CANCELED:
2894 case CPU_UP_CANCELED_FROZEN:
2893 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 2895 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2894 cpu); 2896 cpu);
2895 decache_vcpus_on_cpu(cpu); 2897 decache_vcpus_on_cpu(cpu);
@@ -2897,6 +2899,7 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2897 NULL, 0, 1); 2899 NULL, 0, 1);
2898 break; 2900 break;
2899 case CPU_ONLINE: 2901 case CPU_ONLINE:
2902 case CPU_ONLINE_FROZEN:
2900 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 2903 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2901 cpu); 2904 cpu);
2902 smp_call_function_single(cpu, kvm_arch_ops->hardware_enable, 2905 smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
diff --git a/drivers/leds/leds-h1940.c b/drivers/leds/leds-h1940.c
index 1d49d2ade557..677c99325be5 100644
--- a/drivers/leds/leds-h1940.c
+++ b/drivers/leds/leds-h1940.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/leds/h1940-leds.c 2 * drivers/leds/leds-h1940.c
3 * Copyright (c) Arnaud Patard <arnaud.patard@rtp-net.org> 3 * Copyright (c) Arnaud Patard <arnaud.patard@rtp-net.org>
4 * 4 *
5 * This file is subject to the terms and conditions of the GNU General Public 5 * This file is subject to the terms and conditions of the GNU General Public
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index a32c91e27b3c..58926da0ae18 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -237,7 +237,7 @@ config PMAC_RACKMETER
237 tristate "Support for Apple XServe front panel LEDs" 237 tristate "Support for Apple XServe front panel LEDs"
238 depends on PPC_PMAC 238 depends on PPC_PMAC
239 help 239 help
240 This driver procides some support to control the front panel 240 This driver provides some support to control the front panel
241 blue LEDs "vu-meter" of the XServer macs. 241 blue LEDs "vu-meter" of the XServer macs.
242 242
243endif # MACINTOSH_DRIVERS 243endif # MACINTOSH_DRIVERS
diff --git a/drivers/mca/mca-bus.c b/drivers/mca/mca-bus.c
index da862e4632dd..67b8e9453b19 100644
--- a/drivers/mca/mca-bus.c
+++ b/drivers/mca/mca-bus.c
@@ -47,19 +47,25 @@ static int mca_bus_match (struct device *dev, struct device_driver *drv)
47{ 47{
48 struct mca_device *mca_dev = to_mca_device (dev); 48 struct mca_device *mca_dev = to_mca_device (dev);
49 struct mca_driver *mca_drv = to_mca_driver (drv); 49 struct mca_driver *mca_drv = to_mca_driver (drv);
50 const short *mca_ids = mca_drv->id_table; 50 const unsigned short *mca_ids = mca_drv->id_table;
51 int i; 51 int i = 0;
52 52
53 if (!mca_ids) 53 if (mca_ids) {
54 return 0; 54 for(i = 0; mca_ids[i]; i++) {
55 55 if (mca_ids[i] == mca_dev->pos_id) {
56 for(i = 0; mca_ids[i]; i++) { 56 mca_dev->index = i;
57 if (mca_ids[i] == mca_dev->pos_id) { 57 return 1;
58 mca_dev->index = i; 58 }
59 return 1;
60 } 59 }
61 } 60 }
62 61 /* If the integrated id is present, treat it as though it were an
62 * additional id in the id_table (it can't be because by definition,
63 * integrated id's overflow a short */
64 if (mca_drv->integrated_id && mca_dev->pos_id ==
65 mca_drv->integrated_id) {
66 mca_dev->index = i;
67 return 1;
68 }
63 return 0; 69 return 0;
64} 70}
65 71
diff --git a/drivers/mca/mca-driver.c b/drivers/mca/mca-driver.c
index 2223466b3d8a..32cd39bcc715 100644
--- a/drivers/mca/mca-driver.c
+++ b/drivers/mca/mca-driver.c
@@ -36,12 +36,25 @@ int mca_register_driver(struct mca_driver *mca_drv)
36 mca_drv->driver.bus = &mca_bus_type; 36 mca_drv->driver.bus = &mca_bus_type;
37 if ((r = driver_register(&mca_drv->driver)) < 0) 37 if ((r = driver_register(&mca_drv->driver)) < 0)
38 return r; 38 return r;
39 mca_drv->integrated_id = 0;
39 } 40 }
40 41
41 return 0; 42 return 0;
42} 43}
43EXPORT_SYMBOL(mca_register_driver); 44EXPORT_SYMBOL(mca_register_driver);
44 45
46int mca_register_driver_integrated(struct mca_driver *mca_driver,
47 int integrated_id)
48{
49 int r = mca_register_driver(mca_driver);
50
51 if (!r)
52 mca_driver->integrated_id = integrated_id;
53
54 return r;
55}
56EXPORT_SYMBOL(mca_register_driver_integrated);
57
45void mca_unregister_driver(struct mca_driver *mca_drv) 58void mca_unregister_driver(struct mca_driver *mca_drv)
46{ 59{
47 if (MCA_bus) 60 if (MCA_bus)
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 4540ade6b6b5..7df934d69134 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -262,6 +262,15 @@ config DM_MULTIPATH_EMC
262 ---help--- 262 ---help---
263 Multipath support for EMC CX/AX series hardware. 263 Multipath support for EMC CX/AX series hardware.
264 264
265config DM_DELAY
266 tristate "I/O delaying target (EXPERIMENTAL)"
267 depends on BLK_DEV_DM && EXPERIMENTAL
268 ---help---
269 A target that delays reads and/or writes and can send
270 them to different devices. Useful for testing.
271
272 If unsure, say N.
273
265endmenu 274endmenu
266 275
267endif 276endif
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 34957a68d921..38754084eac7 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_MD_FAULTY) += faulty.o
31obj-$(CONFIG_BLK_DEV_MD) += md-mod.o 31obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
32obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o 32obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
33obj-$(CONFIG_DM_CRYPT) += dm-crypt.o 33obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
34obj-$(CONFIG_DM_DELAY) += dm-delay.o
34obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o 35obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
35obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o 36obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o
36obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o 37obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
diff --git a/drivers/md/dm-bio-list.h b/drivers/md/dm-bio-list.h
index da4349649f7f..c6be88826fae 100644
--- a/drivers/md/dm-bio-list.h
+++ b/drivers/md/dm-bio-list.h
@@ -8,17 +8,43 @@
8#define DM_BIO_LIST_H 8#define DM_BIO_LIST_H
9 9
10#include <linux/bio.h> 10#include <linux/bio.h>
11#include <linux/prefetch.h>
11 12
12struct bio_list { 13struct bio_list {
13 struct bio *head; 14 struct bio *head;
14 struct bio *tail; 15 struct bio *tail;
15}; 16};
16 17
18static inline int bio_list_empty(const struct bio_list *bl)
19{
20 return bl->head == NULL;
21}
22
23#define BIO_LIST_INIT { .head = NULL, .tail = NULL }
24
25#define BIO_LIST(bl) \
26 struct bio_list bl = BIO_LIST_INIT
27
17static inline void bio_list_init(struct bio_list *bl) 28static inline void bio_list_init(struct bio_list *bl)
18{ 29{
19 bl->head = bl->tail = NULL; 30 bl->head = bl->tail = NULL;
20} 31}
21 32
33#define bio_list_for_each(bio, bl) \
34 for (bio = (bl)->head; bio && ({ prefetch(bio->bi_next); 1; }); \
35 bio = bio->bi_next)
36
37static inline unsigned bio_list_size(const struct bio_list *bl)
38{
39 unsigned sz = 0;
40 struct bio *bio;
41
42 bio_list_for_each(bio, bl)
43 sz++;
44
45 return sz;
46}
47
22static inline void bio_list_add(struct bio_list *bl, struct bio *bio) 48static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
23{ 49{
24 bio->bi_next = NULL; 50 bio->bi_next = NULL;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d8121234c347..7b0fcfc9eaa5 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -33,7 +33,6 @@
33struct crypt_io { 33struct crypt_io {
34 struct dm_target *target; 34 struct dm_target *target;
35 struct bio *base_bio; 35 struct bio *base_bio;
36 struct bio *first_clone;
37 struct work_struct work; 36 struct work_struct work;
38 atomic_t pending; 37 atomic_t pending;
39 int error; 38 int error;
@@ -107,6 +106,8 @@ struct crypt_config {
107 106
108static struct kmem_cache *_crypt_io_pool; 107static struct kmem_cache *_crypt_io_pool;
109 108
109static void clone_init(struct crypt_io *, struct bio *);
110
110/* 111/*
111 * Different IV generation algorithms: 112 * Different IV generation algorithms:
112 * 113 *
@@ -120,6 +121,9 @@ static struct kmem_cache *_crypt_io_pool;
120 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 121 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
121 * (needed for LRW-32-AES and possible other narrow block modes) 122 * (needed for LRW-32-AES and possible other narrow block modes)
122 * 123 *
124 * null: the initial vector is always zero. Provides compatibility with
125 * obsolete loop_fish2 devices. Do not use for new devices.
126 *
123 * plumb: unimplemented, see: 127 * plumb: unimplemented, see:
124 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 128 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
125 */ 129 */
@@ -256,6 +260,13 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
256 return 0; 260 return 0;
257} 261}
258 262
263static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
264{
265 memset(iv, 0, cc->iv_size);
266
267 return 0;
268}
269
259static struct crypt_iv_operations crypt_iv_plain_ops = { 270static struct crypt_iv_operations crypt_iv_plain_ops = {
260 .generator = crypt_iv_plain_gen 271 .generator = crypt_iv_plain_gen
261}; 272};
@@ -272,6 +283,10 @@ static struct crypt_iv_operations crypt_iv_benbi_ops = {
272 .generator = crypt_iv_benbi_gen 283 .generator = crypt_iv_benbi_gen
273}; 284};
274 285
286static struct crypt_iv_operations crypt_iv_null_ops = {
287 .generator = crypt_iv_null_gen
288};
289
275static int 290static int
276crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, 291crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
277 struct scatterlist *in, unsigned int length, 292 struct scatterlist *in, unsigned int length,
@@ -378,36 +393,21 @@ static int crypt_convert(struct crypt_config *cc,
378 * This should never violate the device limitations 393 * This should never violate the device limitations
379 * May return a smaller bio when running out of pages 394 * May return a smaller bio when running out of pages
380 */ 395 */
381static struct bio * 396static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size)
382crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
383 struct bio *base_bio, unsigned int *bio_vec_idx)
384{ 397{
398 struct crypt_config *cc = io->target->private;
385 struct bio *clone; 399 struct bio *clone;
386 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 400 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
387 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 401 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
388 unsigned int i; 402 unsigned int i;
389 403
390 if (base_bio) { 404 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
391 clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs);
392 __bio_clone(clone, base_bio);
393 } else
394 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
395
396 if (!clone) 405 if (!clone)
397 return NULL; 406 return NULL;
398 407
399 clone->bi_destructor = dm_crypt_bio_destructor; 408 clone_init(io, clone);
400
401 /* if the last bio was not complete, continue where that one ended */
402 clone->bi_idx = *bio_vec_idx;
403 clone->bi_vcnt = *bio_vec_idx;
404 clone->bi_size = 0;
405 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
406
407 /* clone->bi_idx pages have already been allocated */
408 size -= clone->bi_idx * PAGE_SIZE;
409 409
410 for (i = clone->bi_idx; i < nr_iovecs; i++) { 410 for (i = 0; i < nr_iovecs; i++) {
411 struct bio_vec *bv = bio_iovec_idx(clone, i); 411 struct bio_vec *bv = bio_iovec_idx(clone, i);
412 412
413 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); 413 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
@@ -419,7 +419,7 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
419 * return a partially allocated bio, the caller will then try 419 * return a partially allocated bio, the caller will then try
420 * to allocate additional bios while submitting this partial bio 420 * to allocate additional bios while submitting this partial bio
421 */ 421 */
422 if ((i - clone->bi_idx) == (MIN_BIO_PAGES - 1)) 422 if (i == (MIN_BIO_PAGES - 1))
423 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 423 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
424 424
425 bv->bv_offset = 0; 425 bv->bv_offset = 0;
@@ -438,12 +438,6 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
438 return NULL; 438 return NULL;
439 } 439 }
440 440
441 /*
442 * Remember the last bio_vec allocated to be able
443 * to correctly continue after the splitting.
444 */
445 *bio_vec_idx = clone->bi_vcnt;
446
447 return clone; 441 return clone;
448} 442}
449 443
@@ -495,9 +489,6 @@ static void dec_pending(struct crypt_io *io, int error)
495 if (!atomic_dec_and_test(&io->pending)) 489 if (!atomic_dec_and_test(&io->pending))
496 return; 490 return;
497 491
498 if (io->first_clone)
499 bio_put(io->first_clone);
500
501 bio_endio(io->base_bio, io->base_bio->bi_size, io->error); 492 bio_endio(io->base_bio, io->base_bio->bi_size, io->error);
502 493
503 mempool_free(io, cc->io_pool); 494 mempool_free(io, cc->io_pool);
@@ -562,6 +553,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone)
562 clone->bi_end_io = crypt_endio; 553 clone->bi_end_io = crypt_endio;
563 clone->bi_bdev = cc->dev->bdev; 554 clone->bi_bdev = cc->dev->bdev;
564 clone->bi_rw = io->base_bio->bi_rw; 555 clone->bi_rw = io->base_bio->bi_rw;
556 clone->bi_destructor = dm_crypt_bio_destructor;
565} 557}
566 558
567static void process_read(struct crypt_io *io) 559static void process_read(struct crypt_io *io)
@@ -585,7 +577,6 @@ static void process_read(struct crypt_io *io)
585 } 577 }
586 578
587 clone_init(io, clone); 579 clone_init(io, clone);
588 clone->bi_destructor = dm_crypt_bio_destructor;
589 clone->bi_idx = 0; 580 clone->bi_idx = 0;
590 clone->bi_vcnt = bio_segments(base_bio); 581 clone->bi_vcnt = bio_segments(base_bio);
591 clone->bi_size = base_bio->bi_size; 582 clone->bi_size = base_bio->bi_size;
@@ -604,7 +595,6 @@ static void process_write(struct crypt_io *io)
604 struct convert_context ctx; 595 struct convert_context ctx;
605 unsigned remaining = base_bio->bi_size; 596 unsigned remaining = base_bio->bi_size;
606 sector_t sector = base_bio->bi_sector - io->target->begin; 597 sector_t sector = base_bio->bi_sector - io->target->begin;
607 unsigned bvec_idx = 0;
608 598
609 atomic_inc(&io->pending); 599 atomic_inc(&io->pending);
610 600
@@ -615,14 +605,14 @@ static void process_write(struct crypt_io *io)
615 * so repeat the whole process until all the data can be handled. 605 * so repeat the whole process until all the data can be handled.
616 */ 606 */
617 while (remaining) { 607 while (remaining) {
618 clone = crypt_alloc_buffer(cc, base_bio->bi_size, 608 clone = crypt_alloc_buffer(io, remaining);
619 io->first_clone, &bvec_idx);
620 if (unlikely(!clone)) { 609 if (unlikely(!clone)) {
621 dec_pending(io, -ENOMEM); 610 dec_pending(io, -ENOMEM);
622 return; 611 return;
623 } 612 }
624 613
625 ctx.bio_out = clone; 614 ctx.bio_out = clone;
615 ctx.idx_out = 0;
626 616
627 if (unlikely(crypt_convert(cc, &ctx) < 0)) { 617 if (unlikely(crypt_convert(cc, &ctx) < 0)) {
628 crypt_free_buffer_pages(cc, clone, clone->bi_size); 618 crypt_free_buffer_pages(cc, clone, clone->bi_size);
@@ -631,31 +621,26 @@ static void process_write(struct crypt_io *io)
631 return; 621 return;
632 } 622 }
633 623
634 clone_init(io, clone); 624 /* crypt_convert should have filled the clone bio */
635 clone->bi_sector = cc->start + sector; 625 BUG_ON(ctx.idx_out < clone->bi_vcnt);
636
637 if (!io->first_clone) {
638 /*
639 * hold a reference to the first clone, because it
640 * holds the bio_vec array and that can't be freed
641 * before all other clones are released
642 */
643 bio_get(clone);
644 io->first_clone = clone;
645 }
646 626
627 clone->bi_sector = cc->start + sector;
647 remaining -= clone->bi_size; 628 remaining -= clone->bi_size;
648 sector += bio_sectors(clone); 629 sector += bio_sectors(clone);
649 630
650 /* prevent bio_put of first_clone */ 631 /* Grab another reference to the io struct
632 * before we kick off the request */
651 if (remaining) 633 if (remaining)
652 atomic_inc(&io->pending); 634 atomic_inc(&io->pending);
653 635
654 generic_make_request(clone); 636 generic_make_request(clone);
655 637
638 /* Do not reference clone after this - it
639 * may be gone already. */
640
656 /* out of memory -> run queues */ 641 /* out of memory -> run queues */
657 if (remaining) 642 if (remaining)
658 congestion_wait(bio_data_dir(clone), HZ/100); 643 congestion_wait(WRITE, HZ/100);
659 } 644 }
660} 645}
661 646
@@ -832,6 +817,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
832 cc->iv_gen_ops = &crypt_iv_essiv_ops; 817 cc->iv_gen_ops = &crypt_iv_essiv_ops;
833 else if (strcmp(ivmode, "benbi") == 0) 818 else if (strcmp(ivmode, "benbi") == 0)
834 cc->iv_gen_ops = &crypt_iv_benbi_ops; 819 cc->iv_gen_ops = &crypt_iv_benbi_ops;
820 else if (strcmp(ivmode, "null") == 0)
821 cc->iv_gen_ops = &crypt_iv_null_ops;
835 else { 822 else {
836 ti->error = "Invalid IV mode"; 823 ti->error = "Invalid IV mode";
837 goto bad2; 824 goto bad2;
@@ -954,10 +941,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
954 struct crypt_config *cc = ti->private; 941 struct crypt_config *cc = ti->private;
955 struct crypt_io *io; 942 struct crypt_io *io;
956 943
944 if (bio_barrier(bio))
945 return -EOPNOTSUPP;
946
957 io = mempool_alloc(cc->io_pool, GFP_NOIO); 947 io = mempool_alloc(cc->io_pool, GFP_NOIO);
958 io->target = ti; 948 io->target = ti;
959 io->base_bio = bio; 949 io->base_bio = bio;
960 io->first_clone = NULL;
961 io->error = io->post_process = 0; 950 io->error = io->post_process = 0;
962 atomic_set(&io->pending, 0); 951 atomic_set(&io->pending, 0);
963 kcryptd_queue_io(io); 952 kcryptd_queue_io(io);
@@ -1057,7 +1046,7 @@ error:
1057 1046
1058static struct target_type crypt_target = { 1047static struct target_type crypt_target = {
1059 .name = "crypt", 1048 .name = "crypt",
1060 .version= {1, 3, 0}, 1049 .version= {1, 5, 0},
1061 .module = THIS_MODULE, 1050 .module = THIS_MODULE,
1062 .ctr = crypt_ctr, 1051 .ctr = crypt_ctr,
1063 .dtr = crypt_dtr, 1052 .dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
new file mode 100644
index 000000000000..52c7cf9e5803
--- /dev/null
+++ b/drivers/md/dm-delay.c
@@ -0,0 +1,383 @@
1/*
2 * Copyright (C) 2005-2007 Red Hat GmbH
3 *
4 * A target that delays reads and/or writes and can send
5 * them to different devices.
6 *
7 * This file is released under the GPL.
8 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/blkdev.h>
13#include <linux/bio.h>
14#include <linux/slab.h>
15
16#include "dm.h"
17#include "dm-bio-list.h"
18
19#define DM_MSG_PREFIX "delay"
20
21struct delay_c {
22 struct timer_list delay_timer;
23 struct semaphore timer_lock;
24 struct work_struct flush_expired_bios;
25 struct list_head delayed_bios;
26 atomic_t may_delay;
27 mempool_t *delayed_pool;
28
29 struct dm_dev *dev_read;
30 sector_t start_read;
31 unsigned read_delay;
32 unsigned reads;
33
34 struct dm_dev *dev_write;
35 sector_t start_write;
36 unsigned write_delay;
37 unsigned writes;
38};
39
40struct delay_info {
41 struct delay_c *context;
42 struct list_head list;
43 struct bio *bio;
44 unsigned long expires;
45};
46
47static DEFINE_MUTEX(delayed_bios_lock);
48
49static struct workqueue_struct *kdelayd_wq;
50static struct kmem_cache *delayed_cache;
51
52static void handle_delayed_timer(unsigned long data)
53{
54 struct delay_c *dc = (struct delay_c *)data;
55
56 queue_work(kdelayd_wq, &dc->flush_expired_bios);
57}
58
59static void queue_timeout(struct delay_c *dc, unsigned long expires)
60{
61 down(&dc->timer_lock);
62
63 if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
64 mod_timer(&dc->delay_timer, expires);
65
66 up(&dc->timer_lock);
67}
68
69static void flush_bios(struct bio *bio)
70{
71 struct bio *n;
72
73 while (bio) {
74 n = bio->bi_next;
75 bio->bi_next = NULL;
76 generic_make_request(bio);
77 bio = n;
78 }
79}
80
81static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
82{
83 struct delay_info *delayed, *next;
84 unsigned long next_expires = 0;
85 int start_timer = 0;
86 BIO_LIST(flush_bios);
87
88 mutex_lock(&delayed_bios_lock);
89 list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
90 if (flush_all || time_after_eq(jiffies, delayed->expires)) {
91 list_del(&delayed->list);
92 bio_list_add(&flush_bios, delayed->bio);
93 if ((bio_data_dir(delayed->bio) == WRITE))
94 delayed->context->writes--;
95 else
96 delayed->context->reads--;
97 mempool_free(delayed, dc->delayed_pool);
98 continue;
99 }
100
101 if (!start_timer) {
102 start_timer = 1;
103 next_expires = delayed->expires;
104 } else
105 next_expires = min(next_expires, delayed->expires);
106 }
107
108 mutex_unlock(&delayed_bios_lock);
109
110 if (start_timer)
111 queue_timeout(dc, next_expires);
112
113 return bio_list_get(&flush_bios);
114}
115
116static void flush_expired_bios(struct work_struct *work)
117{
118 struct delay_c *dc;
119
120 dc = container_of(work, struct delay_c, flush_expired_bios);
121 flush_bios(flush_delayed_bios(dc, 0));
122}
123
124/*
125 * Mapping parameters:
126 * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
127 *
128 * With separate write parameters, the first set is only used for reads.
129 * Delays are specified in milliseconds.
130 */
131static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
132{
133 struct delay_c *dc;
134 unsigned long long tmpll;
135
136 if (argc != 3 && argc != 6) {
137 ti->error = "requires exactly 3 or 6 arguments";
138 return -EINVAL;
139 }
140
141 dc = kmalloc(sizeof(*dc), GFP_KERNEL);
142 if (!dc) {
143 ti->error = "Cannot allocate context";
144 return -ENOMEM;
145 }
146
147 dc->reads = dc->writes = 0;
148
149 if (sscanf(argv[1], "%llu", &tmpll) != 1) {
150 ti->error = "Invalid device sector";
151 goto bad;
152 }
153 dc->start_read = tmpll;
154
155 if (sscanf(argv[2], "%u", &dc->read_delay) != 1) {
156 ti->error = "Invalid delay";
157 goto bad;
158 }
159
160 if (dm_get_device(ti, argv[0], dc->start_read, ti->len,
161 dm_table_get_mode(ti->table), &dc->dev_read)) {
162 ti->error = "Device lookup failed";
163 goto bad;
164 }
165
166 if (argc == 3) {
167 dc->dev_write = NULL;
168 goto out;
169 }
170
171 if (sscanf(argv[4], "%llu", &tmpll) != 1) {
172 ti->error = "Invalid write device sector";
173 goto bad;
174 }
175 dc->start_write = tmpll;
176
177 if (sscanf(argv[5], "%u", &dc->write_delay) != 1) {
178 ti->error = "Invalid write delay";
179 goto bad;
180 }
181
182 if (dm_get_device(ti, argv[3], dc->start_write, ti->len,
183 dm_table_get_mode(ti->table), &dc->dev_write)) {
184 ti->error = "Write device lookup failed";
185 dm_put_device(ti, dc->dev_read);
186 goto bad;
187 }
188
189out:
190 dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache);
191 if (!dc->delayed_pool) {
192 DMERR("Couldn't create delayed bio pool.");
193 goto bad;
194 }
195
196 init_timer(&dc->delay_timer);
197 dc->delay_timer.function = handle_delayed_timer;
198 dc->delay_timer.data = (unsigned long)dc;
199
200 INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
201 INIT_LIST_HEAD(&dc->delayed_bios);
202 init_MUTEX(&dc->timer_lock);
203 atomic_set(&dc->may_delay, 1);
204
205 ti->private = dc;
206 return 0;
207
208bad:
209 kfree(dc);
210 return -EINVAL;
211}
212
213static void delay_dtr(struct dm_target *ti)
214{
215 struct delay_c *dc = ti->private;
216
217 flush_workqueue(kdelayd_wq);
218
219 dm_put_device(ti, dc->dev_read);
220
221 if (dc->dev_write)
222 dm_put_device(ti, dc->dev_write);
223
224 mempool_destroy(dc->delayed_pool);
225 kfree(dc);
226}
227
228static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
229{
230 struct delay_info *delayed;
231 unsigned long expires = 0;
232
233 if (!delay || !atomic_read(&dc->may_delay))
234 return 1;
235
236 delayed = mempool_alloc(dc->delayed_pool, GFP_NOIO);
237
238 delayed->context = dc;
239 delayed->bio = bio;
240 delayed->expires = expires = jiffies + (delay * HZ / 1000);
241
242 mutex_lock(&delayed_bios_lock);
243
244 if (bio_data_dir(bio) == WRITE)
245 dc->writes++;
246 else
247 dc->reads++;
248
249 list_add_tail(&delayed->list, &dc->delayed_bios);
250
251 mutex_unlock(&delayed_bios_lock);
252
253 queue_timeout(dc, expires);
254
255 return 0;
256}
257
258static void delay_presuspend(struct dm_target *ti)
259{
260 struct delay_c *dc = ti->private;
261
262 atomic_set(&dc->may_delay, 0);
263 del_timer_sync(&dc->delay_timer);
264 flush_bios(flush_delayed_bios(dc, 1));
265}
266
267static void delay_resume(struct dm_target *ti)
268{
269 struct delay_c *dc = ti->private;
270
271 atomic_set(&dc->may_delay, 1);
272}
273
274static int delay_map(struct dm_target *ti, struct bio *bio,
275 union map_info *map_context)
276{
277 struct delay_c *dc = ti->private;
278
279 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
280 bio->bi_bdev = dc->dev_write->bdev;
281 bio->bi_sector = dc->start_write +
282 (bio->bi_sector - ti->begin);
283
284 return delay_bio(dc, dc->write_delay, bio);
285 }
286
287 bio->bi_bdev = dc->dev_read->bdev;
288 bio->bi_sector = dc->start_read +
289 (bio->bi_sector - ti->begin);
290
291 return delay_bio(dc, dc->read_delay, bio);
292}
293
294static int delay_status(struct dm_target *ti, status_type_t type,
295 char *result, unsigned maxlen)
296{
297 struct delay_c *dc = ti->private;
298 int sz = 0;
299
300 switch (type) {
301 case STATUSTYPE_INFO:
302 DMEMIT("%u %u", dc->reads, dc->writes);
303 break;
304
305 case STATUSTYPE_TABLE:
306 DMEMIT("%s %llu %u", dc->dev_read->name,
307 (unsigned long long) dc->start_read,
308 dc->read_delay);
309 if (dc->dev_write)
310 DMEMIT("%s %llu %u", dc->dev_write->name,
311 (unsigned long long) dc->start_write,
312 dc->write_delay);
313 break;
314 }
315
316 return 0;
317}
318
319static struct target_type delay_target = {
320 .name = "delay",
321 .version = {1, 0, 2},
322 .module = THIS_MODULE,
323 .ctr = delay_ctr,
324 .dtr = delay_dtr,
325 .map = delay_map,
326 .presuspend = delay_presuspend,
327 .resume = delay_resume,
328 .status = delay_status,
329};
330
331static int __init dm_delay_init(void)
332{
333 int r = -ENOMEM;
334
335 kdelayd_wq = create_workqueue("kdelayd");
336 if (!kdelayd_wq) {
337 DMERR("Couldn't start kdelayd");
338 goto bad_queue;
339 }
340
341 delayed_cache = kmem_cache_create("dm-delay",
342 sizeof(struct delay_info),
343 __alignof__(struct delay_info),
344 0, NULL, NULL);
345 if (!delayed_cache) {
346 DMERR("Couldn't create delayed bio cache.");
347 goto bad_memcache;
348 }
349
350 r = dm_register_target(&delay_target);
351 if (r < 0) {
352 DMERR("register failed %d", r);
353 goto bad_register;
354 }
355
356 return 0;
357
358bad_register:
359 kmem_cache_destroy(delayed_cache);
360bad_memcache:
361 destroy_workqueue(kdelayd_wq);
362bad_queue:
363 return r;
364}
365
366static void __exit dm_delay_exit(void)
367{
368 int r = dm_unregister_target(&delay_target);
369
370 if (r < 0)
371 DMERR("unregister failed %d", r);
372
373 kmem_cache_destroy(delayed_cache);
374 destroy_workqueue(kdelayd_wq);
375}
376
377/* Module hooks */
378module_init(dm_delay_init);
379module_exit(dm_delay_exit);
380
381MODULE_DESCRIPTION(DM_NAME " delay target");
382MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>");
383MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 99cdffa7fbfe..07e0a0c84f6e 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * dm-snapshot.c 2 * dm-exception-store.c
3 * 3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 * Copyright (C) 2006 Red Hat GmbH
5 * 6 *
6 * This file is released under the GPL. 7 * This file is released under the GPL.
7 */ 8 */
@@ -123,6 +124,7 @@ struct pstore {
123 atomic_t pending_count; 124 atomic_t pending_count;
124 uint32_t callback_count; 125 uint32_t callback_count;
125 struct commit_callback *callbacks; 126 struct commit_callback *callbacks;
127 struct dm_io_client *io_client;
126}; 128};
127 129
128static inline unsigned int sectors_to_pages(unsigned int sectors) 130static inline unsigned int sectors_to_pages(unsigned int sectors)
@@ -159,14 +161,20 @@ static void free_area(struct pstore *ps)
159 */ 161 */
160static int chunk_io(struct pstore *ps, uint32_t chunk, int rw) 162static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
161{ 163{
162 struct io_region where; 164 struct io_region where = {
163 unsigned long bits; 165 .bdev = ps->snap->cow->bdev,
164 166 .sector = ps->snap->chunk_size * chunk,
165 where.bdev = ps->snap->cow->bdev; 167 .count = ps->snap->chunk_size,
166 where.sector = ps->snap->chunk_size * chunk; 168 };
167 where.count = ps->snap->chunk_size; 169 struct dm_io_request io_req = {
168 170 .bi_rw = rw,
169 return dm_io_sync_vm(1, &where, rw, ps->area, &bits); 171 .mem.type = DM_IO_VMA,
172 .mem.ptr.vma = ps->area,
173 .client = ps->io_client,
174 .notify.fn = NULL,
175 };
176
177 return dm_io(&io_req, 1, &where, NULL);
170} 178}
171 179
172/* 180/*
@@ -213,17 +221,18 @@ static int read_header(struct pstore *ps, int *new_snapshot)
213 chunk_size_supplied = 0; 221 chunk_size_supplied = 0;
214 } 222 }
215 223
216 r = dm_io_get(sectors_to_pages(ps->snap->chunk_size)); 224 ps->io_client = dm_io_client_create(sectors_to_pages(ps->snap->
217 if (r) 225 chunk_size));
218 return r; 226 if (IS_ERR(ps->io_client))
227 return PTR_ERR(ps->io_client);
219 228
220 r = alloc_area(ps); 229 r = alloc_area(ps);
221 if (r) 230 if (r)
222 goto bad1; 231 return r;
223 232
224 r = chunk_io(ps, 0, READ); 233 r = chunk_io(ps, 0, READ);
225 if (r) 234 if (r)
226 goto bad2; 235 goto bad;
227 236
228 dh = (struct disk_header *) ps->area; 237 dh = (struct disk_header *) ps->area;
229 238
@@ -235,7 +244,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
235 if (le32_to_cpu(dh->magic) != SNAP_MAGIC) { 244 if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
236 DMWARN("Invalid or corrupt snapshot"); 245 DMWARN("Invalid or corrupt snapshot");
237 r = -ENXIO; 246 r = -ENXIO;
238 goto bad2; 247 goto bad;
239 } 248 }
240 249
241 *new_snapshot = 0; 250 *new_snapshot = 0;
@@ -252,27 +261,22 @@ static int read_header(struct pstore *ps, int *new_snapshot)
252 (unsigned long long)ps->snap->chunk_size); 261 (unsigned long long)ps->snap->chunk_size);
253 262
254 /* We had a bogus chunk_size. Fix stuff up. */ 263 /* We had a bogus chunk_size. Fix stuff up. */
255 dm_io_put(sectors_to_pages(ps->snap->chunk_size));
256 free_area(ps); 264 free_area(ps);
257 265
258 ps->snap->chunk_size = chunk_size; 266 ps->snap->chunk_size = chunk_size;
259 ps->snap->chunk_mask = chunk_size - 1; 267 ps->snap->chunk_mask = chunk_size - 1;
260 ps->snap->chunk_shift = ffs(chunk_size) - 1; 268 ps->snap->chunk_shift = ffs(chunk_size) - 1;
261 269
262 r = dm_io_get(sectors_to_pages(chunk_size)); 270 r = dm_io_client_resize(sectors_to_pages(ps->snap->chunk_size),
271 ps->io_client);
263 if (r) 272 if (r)
264 return r; 273 return r;
265 274
266 r = alloc_area(ps); 275 r = alloc_area(ps);
267 if (r) 276 return r;
268 goto bad1;
269
270 return 0;
271 277
272bad2: 278bad:
273 free_area(ps); 279 free_area(ps);
274bad1:
275 dm_io_put(sectors_to_pages(ps->snap->chunk_size));
276 return r; 280 return r;
277} 281}
278 282
@@ -405,7 +409,7 @@ static void persistent_destroy(struct exception_store *store)
405{ 409{
406 struct pstore *ps = get_info(store); 410 struct pstore *ps = get_info(store);
407 411
408 dm_io_put(sectors_to_pages(ps->snap->chunk_size)); 412 dm_io_client_destroy(ps->io_client);
409 vfree(ps->callbacks); 413 vfree(ps->callbacks);
410 free_area(ps); 414 free_area(ps);
411 kfree(ps); 415 kfree(ps);
diff --git a/drivers/md/dm-hw-handler.h b/drivers/md/dm-hw-handler.h
index 32eff28e4adc..e0832e6fcf36 100644
--- a/drivers/md/dm-hw-handler.h
+++ b/drivers/md/dm-hw-handler.h
@@ -16,6 +16,7 @@
16struct hw_handler_type; 16struct hw_handler_type;
17struct hw_handler { 17struct hw_handler {
18 struct hw_handler_type *type; 18 struct hw_handler_type *type;
19 struct mapped_device *md;
19 void *context; 20 void *context;
20}; 21};
21 22
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 8bdc8a87b249..352c6fbeac53 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2003 Sistina Software 2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
3 * 4 *
4 * This file is released under the GPL. 5 * This file is released under the GPL.
5 */ 6 */
@@ -12,13 +13,17 @@
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14 15
15static struct bio_set *_bios; 16struct dm_io_client {
17 mempool_t *pool;
18 struct bio_set *bios;
19};
16 20
17/* FIXME: can we shrink this ? */ 21/* FIXME: can we shrink this ? */
18struct io { 22struct io {
19 unsigned long error; 23 unsigned long error;
20 atomic_t count; 24 atomic_t count;
21 struct task_struct *sleeper; 25 struct task_struct *sleeper;
26 struct dm_io_client *client;
22 io_notify_fn callback; 27 io_notify_fn callback;
23 void *context; 28 void *context;
24}; 29};
@@ -26,63 +31,58 @@ struct io {
26/* 31/*
27 * io contexts are only dynamically allocated for asynchronous 32 * io contexts are only dynamically allocated for asynchronous
28 * io. Since async io is likely to be the majority of io we'll 33 * io. Since async io is likely to be the majority of io we'll
29 * have the same number of io contexts as buffer heads ! (FIXME: 34 * have the same number of io contexts as bios! (FIXME: must reduce this).
30 * must reduce this).
31 */ 35 */
32static unsigned _num_ios;
33static mempool_t *_io_pool;
34 36
35static unsigned int pages_to_ios(unsigned int pages) 37static unsigned int pages_to_ios(unsigned int pages)
36{ 38{
37 return 4 * pages; /* too many ? */ 39 return 4 * pages; /* too many ? */
38} 40}
39 41
40static int resize_pool(unsigned int new_ios) 42/*
43 * Create a client with mempool and bioset.
44 */
45struct dm_io_client *dm_io_client_create(unsigned num_pages)
41{ 46{
42 int r = 0; 47 unsigned ios = pages_to_ios(num_pages);
43 48 struct dm_io_client *client;
44 if (_io_pool) {
45 if (new_ios == 0) {
46 /* free off the pool */
47 mempool_destroy(_io_pool);
48 _io_pool = NULL;
49 bioset_free(_bios);
50
51 } else {
52 /* resize the pool */
53 r = mempool_resize(_io_pool, new_ios, GFP_KERNEL);
54 }
55 49
56 } else { 50 client = kmalloc(sizeof(*client), GFP_KERNEL);
57 /* create new pool */ 51 if (!client)
58 _io_pool = mempool_create_kmalloc_pool(new_ios, 52 return ERR_PTR(-ENOMEM);
59 sizeof(struct io)); 53
60 if (!_io_pool) 54 client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io));
61 return -ENOMEM; 55 if (!client->pool)
62 56 goto bad;
63 _bios = bioset_create(16, 16);
64 if (!_bios) {
65 mempool_destroy(_io_pool);
66 _io_pool = NULL;
67 return -ENOMEM;
68 }
69 }
70 57
71 if (!r) 58 client->bios = bioset_create(16, 16);
72 _num_ios = new_ios; 59 if (!client->bios)
60 goto bad;
73 61
74 return r; 62 return client;
63
64 bad:
65 if (client->pool)
66 mempool_destroy(client->pool);
67 kfree(client);
68 return ERR_PTR(-ENOMEM);
75} 69}
70EXPORT_SYMBOL(dm_io_client_create);
76 71
77int dm_io_get(unsigned int num_pages) 72int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
78{ 73{
79 return resize_pool(_num_ios + pages_to_ios(num_pages)); 74 return mempool_resize(client->pool, pages_to_ios(num_pages),
75 GFP_KERNEL);
80} 76}
77EXPORT_SYMBOL(dm_io_client_resize);
81 78
82void dm_io_put(unsigned int num_pages) 79void dm_io_client_destroy(struct dm_io_client *client)
83{ 80{
84 resize_pool(_num_ios - pages_to_ios(num_pages)); 81 mempool_destroy(client->pool);
82 bioset_free(client->bios);
83 kfree(client);
85} 84}
85EXPORT_SYMBOL(dm_io_client_destroy);
86 86
87/*----------------------------------------------------------------- 87/*-----------------------------------------------------------------
88 * We need to keep track of which region a bio is doing io for. 88 * We need to keep track of which region a bio is doing io for.
@@ -118,7 +118,7 @@ static void dec_count(struct io *io, unsigned int region, int error)
118 io_notify_fn fn = io->callback; 118 io_notify_fn fn = io->callback;
119 void *context = io->context; 119 void *context = io->context;
120 120
121 mempool_free(io, _io_pool); 121 mempool_free(io, io->client->pool);
122 fn(r, context); 122 fn(r, context);
123 } 123 }
124 } 124 }
@@ -126,7 +126,8 @@ static void dec_count(struct io *io, unsigned int region, int error)
126 126
127static int endio(struct bio *bio, unsigned int done, int error) 127static int endio(struct bio *bio, unsigned int done, int error)
128{ 128{
129 struct io *io = (struct io *) bio->bi_private; 129 struct io *io;
130 unsigned region;
130 131
131 /* keep going until we've finished */ 132 /* keep going until we've finished */
132 if (bio->bi_size) 133 if (bio->bi_size)
@@ -135,10 +136,17 @@ static int endio(struct bio *bio, unsigned int done, int error)
135 if (error && bio_data_dir(bio) == READ) 136 if (error && bio_data_dir(bio) == READ)
136 zero_fill_bio(bio); 137 zero_fill_bio(bio);
137 138
138 dec_count(io, bio_get_region(bio), error); 139 /*
140 * The bio destructor in bio_put() may use the io object.
141 */
142 io = bio->bi_private;
143 region = bio_get_region(bio);
144
139 bio->bi_max_vecs++; 145 bio->bi_max_vecs++;
140 bio_put(bio); 146 bio_put(bio);
141 147
148 dec_count(io, region, error);
149
142 return 0; 150 return 0;
143} 151}
144 152
@@ -209,6 +217,9 @@ static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
209 dp->context_ptr = bvec; 217 dp->context_ptr = bvec;
210} 218}
211 219
220/*
221 * Functions for getting the pages from a VMA.
222 */
212static void vm_get_page(struct dpages *dp, 223static void vm_get_page(struct dpages *dp,
213 struct page **p, unsigned long *len, unsigned *offset) 224 struct page **p, unsigned long *len, unsigned *offset)
214{ 225{
@@ -233,7 +244,34 @@ static void vm_dp_init(struct dpages *dp, void *data)
233 244
234static void dm_bio_destructor(struct bio *bio) 245static void dm_bio_destructor(struct bio *bio)
235{ 246{
236 bio_free(bio, _bios); 247 struct io *io = bio->bi_private;
248
249 bio_free(bio, io->client->bios);
250}
251
252/*
253 * Functions for getting the pages from kernel memory.
254 */
255static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
256 unsigned *offset)
257{
258 *p = virt_to_page(dp->context_ptr);
259 *offset = dp->context_u;
260 *len = PAGE_SIZE - dp->context_u;
261}
262
263static void km_next_page(struct dpages *dp)
264{
265 dp->context_ptr += PAGE_SIZE - dp->context_u;
266 dp->context_u = 0;
267}
268
269static void km_dp_init(struct dpages *dp, void *data)
270{
271 dp->get_page = km_get_page;
272 dp->next_page = km_next_page;
273 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
274 dp->context_ptr = data;
237} 275}
238 276
239/*----------------------------------------------------------------- 277/*-----------------------------------------------------------------
@@ -256,7 +294,7 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
256 * to hide it from bio_add_page(). 294 * to hide it from bio_add_page().
257 */ 295 */
258 num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2; 296 num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2;
259 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, _bios); 297 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
260 bio->bi_sector = where->sector + (where->count - remaining); 298 bio->bi_sector = where->sector + (where->count - remaining);
261 bio->bi_bdev = where->bdev; 299 bio->bi_bdev = where->bdev;
262 bio->bi_end_io = endio; 300 bio->bi_end_io = endio;
@@ -311,8 +349,9 @@ static void dispatch_io(int rw, unsigned int num_regions,
311 dec_count(io, 0, 0); 349 dec_count(io, 0, 0);
312} 350}
313 351
314static int sync_io(unsigned int num_regions, struct io_region *where, 352static int sync_io(struct dm_io_client *client, unsigned int num_regions,
315 int rw, struct dpages *dp, unsigned long *error_bits) 353 struct io_region *where, int rw, struct dpages *dp,
354 unsigned long *error_bits)
316{ 355{
317 struct io io; 356 struct io io;
318 357
@@ -324,6 +363,7 @@ static int sync_io(unsigned int num_regions, struct io_region *where,
324 io.error = 0; 363 io.error = 0;
325 atomic_set(&io.count, 1); /* see dispatch_io() */ 364 atomic_set(&io.count, 1); /* see dispatch_io() */
326 io.sleeper = current; 365 io.sleeper = current;
366 io.client = client;
327 367
328 dispatch_io(rw, num_regions, where, dp, &io, 1); 368 dispatch_io(rw, num_regions, where, dp, &io, 1);
329 369
@@ -340,12 +380,15 @@ static int sync_io(unsigned int num_regions, struct io_region *where,
340 if (atomic_read(&io.count)) 380 if (atomic_read(&io.count))
341 return -EINTR; 381 return -EINTR;
342 382
343 *error_bits = io.error; 383 if (error_bits)
384 *error_bits = io.error;
385
344 return io.error ? -EIO : 0; 386 return io.error ? -EIO : 0;
345} 387}
346 388
347static int async_io(unsigned int num_regions, struct io_region *where, int rw, 389static int async_io(struct dm_io_client *client, unsigned int num_regions,
348 struct dpages *dp, io_notify_fn fn, void *context) 390 struct io_region *where, int rw, struct dpages *dp,
391 io_notify_fn fn, void *context)
349{ 392{
350 struct io *io; 393 struct io *io;
351 394
@@ -355,10 +398,11 @@ static int async_io(unsigned int num_regions, struct io_region *where, int rw,
355 return -EIO; 398 return -EIO;
356 } 399 }
357 400
358 io = mempool_alloc(_io_pool, GFP_NOIO); 401 io = mempool_alloc(client->pool, GFP_NOIO);
359 io->error = 0; 402 io->error = 0;
360 atomic_set(&io->count, 1); /* see dispatch_io() */ 403 atomic_set(&io->count, 1); /* see dispatch_io() */
361 io->sleeper = NULL; 404 io->sleeper = NULL;
405 io->client = client;
362 io->callback = fn; 406 io->callback = fn;
363 io->context = context; 407 io->context = context;
364 408
@@ -366,61 +410,51 @@ static int async_io(unsigned int num_regions, struct io_region *where, int rw,
366 return 0; 410 return 0;
367} 411}
368 412
369int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, 413static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
370 struct page_list *pl, unsigned int offset,
371 unsigned long *error_bits)
372{ 414{
373 struct dpages dp; 415 /* Set up dpages based on memory type */
374 list_dp_init(&dp, pl, offset); 416 switch (io_req->mem.type) {
375 return sync_io(num_regions, where, rw, &dp, error_bits); 417 case DM_IO_PAGE_LIST:
376} 418 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
419 break;
420
421 case DM_IO_BVEC:
422 bvec_dp_init(dp, io_req->mem.ptr.bvec);
423 break;
424
425 case DM_IO_VMA:
426 vm_dp_init(dp, io_req->mem.ptr.vma);
427 break;
428
429 case DM_IO_KMEM:
430 km_dp_init(dp, io_req->mem.ptr.addr);
431 break;
432
433 default:
434 return -EINVAL;
435 }
377 436
378int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw, 437 return 0;
379 struct bio_vec *bvec, unsigned long *error_bits)
380{
381 struct dpages dp;
382 bvec_dp_init(&dp, bvec);
383 return sync_io(num_regions, where, rw, &dp, error_bits);
384} 438}
385 439
386int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw, 440/*
387 void *data, unsigned long *error_bits) 441 * New collapsed (a)synchronous interface
442 */
443int dm_io(struct dm_io_request *io_req, unsigned num_regions,
444 struct io_region *where, unsigned long *sync_error_bits)
388{ 445{
446 int r;
389 struct dpages dp; 447 struct dpages dp;
390 vm_dp_init(&dp, data);
391 return sync_io(num_regions, where, rw, &dp, error_bits);
392}
393 448
394int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, 449 r = dp_init(io_req, &dp);
395 struct page_list *pl, unsigned int offset, 450 if (r)
396 io_notify_fn fn, void *context) 451 return r;
397{
398 struct dpages dp;
399 list_dp_init(&dp, pl, offset);
400 return async_io(num_regions, where, rw, &dp, fn, context);
401}
402 452
403int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw, 453 if (!io_req->notify.fn)
404 struct bio_vec *bvec, io_notify_fn fn, void *context) 454 return sync_io(io_req->client, num_regions, where,
405{ 455 io_req->bi_rw, &dp, sync_error_bits);
406 struct dpages dp;
407 bvec_dp_init(&dp, bvec);
408 return async_io(num_regions, where, rw, &dp, fn, context);
409}
410 456
411int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, 457 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
412 void *data, io_notify_fn fn, void *context) 458 &dp, io_req->notify.fn, io_req->notify.context);
413{
414 struct dpages dp;
415 vm_dp_init(&dp, data);
416 return async_io(num_regions, where, rw, &dp, fn, context);
417} 459}
418 460EXPORT_SYMBOL(dm_io);
419EXPORT_SYMBOL(dm_io_get);
420EXPORT_SYMBOL(dm_io_put);
421EXPORT_SYMBOL(dm_io_sync);
422EXPORT_SYMBOL(dm_io_async);
423EXPORT_SYMBOL(dm_io_sync_bvec);
424EXPORT_SYMBOL(dm_io_async_bvec);
425EXPORT_SYMBOL(dm_io_sync_vm);
426EXPORT_SYMBOL(dm_io_async_vm);
diff --git a/drivers/md/dm-io.h b/drivers/md/dm-io.h
index f9035bfd1a9f..f647e2cceaa6 100644
--- a/drivers/md/dm-io.h
+++ b/drivers/md/dm-io.h
@@ -12,7 +12,7 @@
12struct io_region { 12struct io_region {
13 struct block_device *bdev; 13 struct block_device *bdev;
14 sector_t sector; 14 sector_t sector;
15 sector_t count; 15 sector_t count; /* If this is zero the region is ignored. */
16}; 16};
17 17
18struct page_list { 18struct page_list {
@@ -20,55 +20,60 @@ struct page_list {
20 struct page *page; 20 struct page *page;
21}; 21};
22 22
23
24/*
25 * 'error' is a bitset, with each bit indicating whether an error
26 * occurred doing io to the corresponding region.
27 */
28typedef void (*io_notify_fn)(unsigned long error, void *context); 23typedef void (*io_notify_fn)(unsigned long error, void *context);
29 24
25enum dm_io_mem_type {
26 DM_IO_PAGE_LIST,/* Page list */
27 DM_IO_BVEC, /* Bio vector */
28 DM_IO_VMA, /* Virtual memory area */
29 DM_IO_KMEM, /* Kernel memory */
30};
31
32struct dm_io_memory {
33 enum dm_io_mem_type type;
34
35 union {
36 struct page_list *pl;
37 struct bio_vec *bvec;
38 void *vma;
39 void *addr;
40 } ptr;
41
42 unsigned offset;
43};
44
45struct dm_io_notify {
46 io_notify_fn fn; /* Callback for asynchronous requests */
47 void *context; /* Passed to callback */
48};
30 49
31/* 50/*
32 * Before anyone uses the IO interface they should call 51 * IO request structure
33 * dm_io_get(), specifying roughly how many pages they are
34 * expecting to perform io on concurrently.
35 *
36 * This function may block.
37 */ 52 */
38int dm_io_get(unsigned int num_pages); 53struct dm_io_client;
39void dm_io_put(unsigned int num_pages); 54struct dm_io_request {
55 int bi_rw; /* READ|WRITE - not READA */
56 struct dm_io_memory mem; /* Memory to use for io */
57 struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
58 struct dm_io_client *client; /* Client memory handler */
59};
40 60
41/* 61/*
42 * Synchronous IO. 62 * For async io calls, users can alternatively use the dm_io() function below
63 * and dm_io_client_create() to create private mempools for the client.
43 * 64 *
44 * Please ensure that the rw flag in the next two functions is 65 * Create/destroy may block.
45 * either READ or WRITE, ie. we don't take READA. Any
46 * regions with a zero count field will be ignored.
47 */ 66 */
48int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, 67struct dm_io_client *dm_io_client_create(unsigned num_pages);
49 struct page_list *pl, unsigned int offset, 68int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client);
50 unsigned long *error_bits); 69void dm_io_client_destroy(struct dm_io_client *client);
51
52int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
53 struct bio_vec *bvec, unsigned long *error_bits);
54
55int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
56 void *data, unsigned long *error_bits);
57 70
58/* 71/*
59 * Aynchronous IO. 72 * IO interface using private per-client pools.
60 * 73 * Each bit in the optional 'sync_error_bits' bitset indicates whether an
61 * The 'where' array may be safely allocated on the stack since 74 * error occurred doing io to the corresponding region.
62 * the function takes a copy.
63 */ 75 */
64int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, 76int dm_io(struct dm_io_request *io_req, unsigned num_regions,
65 struct page_list *pl, unsigned int offset, 77 struct io_region *region, unsigned long *sync_error_bits);
66 io_notify_fn fn, void *context);
67
68int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
69 struct bio_vec *bvec, io_notify_fn fn, void *context);
70
71int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
72 void *data, io_notify_fn fn, void *context);
73 78
74#endif 79#endif
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 6a9261351848..a66428d860fe 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -149,9 +149,12 @@ struct log_c {
149 FORCESYNC, /* Force a sync to happen */ 149 FORCESYNC, /* Force a sync to happen */
150 } sync; 150 } sync;
151 151
152 struct dm_io_request io_req;
153
152 /* 154 /*
153 * Disk log fields 155 * Disk log fields
154 */ 156 */
157 int log_dev_failed;
155 struct dm_dev *log_dev; 158 struct dm_dev *log_dev;
156 struct log_header header; 159 struct log_header header;
157 160
@@ -199,13 +202,20 @@ static void header_from_disk(struct log_header *core, struct log_header *disk)
199 core->nr_regions = le64_to_cpu(disk->nr_regions); 202 core->nr_regions = le64_to_cpu(disk->nr_regions);
200} 203}
201 204
205static int rw_header(struct log_c *lc, int rw)
206{
207 lc->io_req.bi_rw = rw;
208 lc->io_req.mem.ptr.vma = lc->disk_header;
209 lc->io_req.notify.fn = NULL;
210
211 return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
212}
213
202static int read_header(struct log_c *log) 214static int read_header(struct log_c *log)
203{ 215{
204 int r; 216 int r;
205 unsigned long ebits;
206 217
207 r = dm_io_sync_vm(1, &log->header_location, READ, 218 r = rw_header(log, READ);
208 log->disk_header, &ebits);
209 if (r) 219 if (r)
210 return r; 220 return r;
211 221
@@ -233,11 +243,8 @@ static int read_header(struct log_c *log)
233 243
234static inline int write_header(struct log_c *log) 244static inline int write_header(struct log_c *log)
235{ 245{
236 unsigned long ebits;
237
238 header_to_disk(&log->header, log->disk_header); 246 header_to_disk(&log->header, log->disk_header);
239 return dm_io_sync_vm(1, &log->header_location, WRITE, 247 return rw_header(log, WRITE);
240 log->disk_header, &ebits);
241} 248}
242 249
243/*---------------------------------------------------------------- 250/*----------------------------------------------------------------
@@ -256,6 +263,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
256 uint32_t region_size; 263 uint32_t region_size;
257 unsigned int region_count; 264 unsigned int region_count;
258 size_t bitset_size, buf_size; 265 size_t bitset_size, buf_size;
266 int r;
259 267
260 if (argc < 1 || argc > 2) { 268 if (argc < 1 || argc > 2) {
261 DMWARN("wrong number of arguments to mirror log"); 269 DMWARN("wrong number of arguments to mirror log");
@@ -315,6 +323,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
315 lc->disk_header = NULL; 323 lc->disk_header = NULL;
316 } else { 324 } else {
317 lc->log_dev = dev; 325 lc->log_dev = dev;
326 lc->log_dev_failed = 0;
318 lc->header_location.bdev = lc->log_dev->bdev; 327 lc->header_location.bdev = lc->log_dev->bdev;
319 lc->header_location.sector = 0; 328 lc->header_location.sector = 0;
320 329
@@ -324,6 +333,15 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
324 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + 333 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
325 bitset_size, ti->limits.hardsect_size); 334 bitset_size, ti->limits.hardsect_size);
326 lc->header_location.count = buf_size >> SECTOR_SHIFT; 335 lc->header_location.count = buf_size >> SECTOR_SHIFT;
336 lc->io_req.mem.type = DM_IO_VMA;
337 lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
338 PAGE_SIZE));
339 if (IS_ERR(lc->io_req.client)) {
340 r = PTR_ERR(lc->io_req.client);
341 DMWARN("couldn't allocate disk io client");
342 kfree(lc);
343 return -ENOMEM;
344 }
327 345
328 lc->disk_header = vmalloc(buf_size); 346 lc->disk_header = vmalloc(buf_size);
329 if (!lc->disk_header) { 347 if (!lc->disk_header) {
@@ -424,6 +442,7 @@ static void disk_dtr(struct dirty_log *log)
424 442
425 dm_put_device(lc->ti, lc->log_dev); 443 dm_put_device(lc->ti, lc->log_dev);
426 vfree(lc->disk_header); 444 vfree(lc->disk_header);
445 dm_io_client_destroy(lc->io_req.client);
427 destroy_log_context(lc); 446 destroy_log_context(lc);
428} 447}
429 448
@@ -437,6 +456,15 @@ static int count_bits32(uint32_t *addr, unsigned size)
437 return count; 456 return count;
438} 457}
439 458
459static void fail_log_device(struct log_c *lc)
460{
461 if (lc->log_dev_failed)
462 return;
463
464 lc->log_dev_failed = 1;
465 dm_table_event(lc->ti->table);
466}
467
440static int disk_resume(struct dirty_log *log) 468static int disk_resume(struct dirty_log *log)
441{ 469{
442 int r; 470 int r;
@@ -446,8 +474,19 @@ static int disk_resume(struct dirty_log *log)
446 474
447 /* read the disk header */ 475 /* read the disk header */
448 r = read_header(lc); 476 r = read_header(lc);
449 if (r) 477 if (r) {
450 return r; 478 DMWARN("%s: Failed to read header on mirror log device",
479 lc->log_dev->name);
480 fail_log_device(lc);
481 /*
482 * If the log device cannot be read, we must assume
483 * all regions are out-of-sync. If we simply return
484 * here, the state will be uninitialized and could
485 * lead us to return 'in-sync' status for regions
486 * that are actually 'out-of-sync'.
487 */
488 lc->header.nr_regions = 0;
489 }
451 490
452 /* set or clear any new bits -- device has grown */ 491 /* set or clear any new bits -- device has grown */
453 if (lc->sync == NOSYNC) 492 if (lc->sync == NOSYNC)
@@ -472,7 +511,14 @@ static int disk_resume(struct dirty_log *log)
472 lc->header.nr_regions = lc->region_count; 511 lc->header.nr_regions = lc->region_count;
473 512
474 /* write the new header */ 513 /* write the new header */
475 return write_header(lc); 514 r = write_header(lc);
515 if (r) {
516 DMWARN("%s: Failed to write header on mirror log device",
517 lc->log_dev->name);
518 fail_log_device(lc);
519 }
520
521 return r;
476} 522}
477 523
478static uint32_t core_get_region_size(struct dirty_log *log) 524static uint32_t core_get_region_size(struct dirty_log *log)
@@ -516,7 +562,9 @@ static int disk_flush(struct dirty_log *log)
516 return 0; 562 return 0;
517 563
518 r = write_header(lc); 564 r = write_header(lc);
519 if (!r) 565 if (r)
566 fail_log_device(lc);
567 else
520 lc->touched = 0; 568 lc->touched = 0;
521 569
522 return r; 570 return r;
@@ -591,6 +639,7 @@ static int core_status(struct dirty_log *log, status_type_t status,
591 639
592 switch(status) { 640 switch(status) {
593 case STATUSTYPE_INFO: 641 case STATUSTYPE_INFO:
642 DMEMIT("1 %s", log->type->name);
594 break; 643 break;
595 644
596 case STATUSTYPE_TABLE: 645 case STATUSTYPE_TABLE:
@@ -606,17 +655,17 @@ static int disk_status(struct dirty_log *log, status_type_t status,
606 char *result, unsigned int maxlen) 655 char *result, unsigned int maxlen)
607{ 656{
608 int sz = 0; 657 int sz = 0;
609 char buffer[16];
610 struct log_c *lc = log->context; 658 struct log_c *lc = log->context;
611 659
612 switch(status) { 660 switch(status) {
613 case STATUSTYPE_INFO: 661 case STATUSTYPE_INFO:
662 DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
663 lc->log_dev_failed ? 'D' : 'A');
614 break; 664 break;
615 665
616 case STATUSTYPE_TABLE: 666 case STATUSTYPE_TABLE:
617 format_dev_t(buffer, lc->log_dev->bdev->bd_dev);
618 DMEMIT("%s %u %s %u ", log->type->name, 667 DMEMIT("%s %u %s %u ", log->type->name,
619 lc->sync == DEFAULTSYNC ? 2 : 3, buffer, 668 lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
620 lc->region_size); 669 lc->region_size);
621 DMEMIT_SYNC; 670 DMEMIT_SYNC;
622 } 671 }
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3aa013506967..de54b39e6ffe 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -668,6 +668,9 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
668 return -EINVAL; 668 return -EINVAL;
669 } 669 }
670 670
671 m->hw_handler.md = dm_table_get_md(ti->table);
672 dm_put(m->hw_handler.md);
673
671 r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv); 674 r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv);
672 if (r) { 675 if (r) {
673 dm_put_hw_handler(hwht); 676 dm_put_hw_handler(hwht);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 23a642619bed..ef124b71ccc8 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -21,15 +21,11 @@
21#include <linux/workqueue.h> 21#include <linux/workqueue.h>
22 22
23#define DM_MSG_PREFIX "raid1" 23#define DM_MSG_PREFIX "raid1"
24#define DM_IO_PAGES 64
24 25
25static struct workqueue_struct *_kmirrord_wq; 26#define DM_RAID1_HANDLE_ERRORS 0x01
26static struct work_struct _kmirrord_work;
27static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
28 27
29static inline void wake(void) 28static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
30{
31 queue_work(_kmirrord_wq, &_kmirrord_work);
32}
33 29
34/*----------------------------------------------------------------- 30/*-----------------------------------------------------------------
35 * Region hash 31 * Region hash
@@ -125,17 +121,23 @@ struct mirror_set {
125 struct list_head list; 121 struct list_head list;
126 struct region_hash rh; 122 struct region_hash rh;
127 struct kcopyd_client *kcopyd_client; 123 struct kcopyd_client *kcopyd_client;
124 uint64_t features;
128 125
129 spinlock_t lock; /* protects the next two lists */ 126 spinlock_t lock; /* protects the next two lists */
130 struct bio_list reads; 127 struct bio_list reads;
131 struct bio_list writes; 128 struct bio_list writes;
132 129
130 struct dm_io_client *io_client;
131
133 /* recovery */ 132 /* recovery */
134 region_t nr_regions; 133 region_t nr_regions;
135 int in_sync; 134 int in_sync;
136 135
137 struct mirror *default_mirror; /* Default mirror */ 136 struct mirror *default_mirror; /* Default mirror */
138 137
138 struct workqueue_struct *kmirrord_wq;
139 struct work_struct kmirrord_work;
140
139 unsigned int nr_mirrors; 141 unsigned int nr_mirrors;
140 struct mirror mirror[0]; 142 struct mirror mirror[0];
141}; 143};
@@ -153,6 +155,11 @@ static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
153 return region << rh->region_shift; 155 return region << rh->region_shift;
154} 156}
155 157
158static void wake(struct mirror_set *ms)
159{
160 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
161}
162
156/* FIXME move this */ 163/* FIXME move this */
157static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); 164static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
158 165
@@ -398,8 +405,7 @@ static void rh_update_states(struct region_hash *rh)
398 mempool_free(reg, rh->region_pool); 405 mempool_free(reg, rh->region_pool);
399 } 406 }
400 407
401 if (!list_empty(&recovered)) 408 rh->log->type->flush(rh->log);
402 rh->log->type->flush(rh->log);
403 409
404 list_for_each_entry_safe (reg, next, &clean, list) 410 list_for_each_entry_safe (reg, next, &clean, list)
405 mempool_free(reg, rh->region_pool); 411 mempool_free(reg, rh->region_pool);
@@ -471,7 +477,7 @@ static void rh_dec(struct region_hash *rh, region_t region)
471 spin_unlock_irqrestore(&rh->region_lock, flags); 477 spin_unlock_irqrestore(&rh->region_lock, flags);
472 478
473 if (should_wake) 479 if (should_wake)
474 wake(); 480 wake(rh->ms);
475} 481}
476 482
477/* 483/*
@@ -558,7 +564,7 @@ static void rh_recovery_end(struct region *reg, int success)
558 list_add(&reg->list, &reg->rh->recovered_regions); 564 list_add(&reg->list, &reg->rh->recovered_regions);
559 spin_unlock_irq(&rh->region_lock); 565 spin_unlock_irq(&rh->region_lock);
560 566
561 wake(); 567 wake(rh->ms);
562} 568}
563 569
564static void rh_flush(struct region_hash *rh) 570static void rh_flush(struct region_hash *rh)
@@ -592,7 +598,7 @@ static void rh_start_recovery(struct region_hash *rh)
592 for (i = 0; i < MAX_RECOVERY; i++) 598 for (i = 0; i < MAX_RECOVERY; i++)
593 up(&rh->recovery_count); 599 up(&rh->recovery_count);
594 600
595 wake(); 601 wake(rh->ms);
596} 602}
597 603
598/* 604/*
@@ -735,7 +741,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
735 /* 741 /*
736 * We can only read balance if the region is in sync. 742 * We can only read balance if the region is in sync.
737 */ 743 */
738 if (rh_in_sync(&ms->rh, region, 0)) 744 if (rh_in_sync(&ms->rh, region, 1))
739 m = choose_mirror(ms, bio->bi_sector); 745 m = choose_mirror(ms, bio->bi_sector);
740 else 746 else
741 m = ms->default_mirror; 747 m = ms->default_mirror;
@@ -792,6 +798,14 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
792 unsigned int i; 798 unsigned int i;
793 struct io_region io[KCOPYD_MAX_REGIONS+1]; 799 struct io_region io[KCOPYD_MAX_REGIONS+1];
794 struct mirror *m; 800 struct mirror *m;
801 struct dm_io_request io_req = {
802 .bi_rw = WRITE,
803 .mem.type = DM_IO_BVEC,
804 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
805 .notify.fn = write_callback,
806 .notify.context = bio,
807 .client = ms->io_client,
808 };
795 809
796 for (i = 0; i < ms->nr_mirrors; i++) { 810 for (i = 0; i < ms->nr_mirrors; i++) {
797 m = ms->mirror + i; 811 m = ms->mirror + i;
@@ -802,9 +816,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
802 } 816 }
803 817
804 bio_set_ms(bio, ms); 818 bio_set_ms(bio, ms);
805 dm_io_async_bvec(ms->nr_mirrors, io, WRITE, 819
806 bio->bi_io_vec + bio->bi_idx, 820 (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
807 write_callback, bio);
808} 821}
809 822
810static void do_writes(struct mirror_set *ms, struct bio_list *writes) 823static void do_writes(struct mirror_set *ms, struct bio_list *writes)
@@ -870,11 +883,10 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
870/*----------------------------------------------------------------- 883/*-----------------------------------------------------------------
871 * kmirrord 884 * kmirrord
872 *---------------------------------------------------------------*/ 885 *---------------------------------------------------------------*/
873static LIST_HEAD(_mirror_sets); 886static void do_mirror(struct work_struct *work)
874static DECLARE_RWSEM(_mirror_sets_lock);
875
876static void do_mirror(struct mirror_set *ms)
877{ 887{
888 struct mirror_set *ms =container_of(work, struct mirror_set,
889 kmirrord_work);
878 struct bio_list reads, writes; 890 struct bio_list reads, writes;
879 891
880 spin_lock(&ms->lock); 892 spin_lock(&ms->lock);
@@ -890,16 +902,6 @@ static void do_mirror(struct mirror_set *ms)
890 do_writes(ms, &writes); 902 do_writes(ms, &writes);
891} 903}
892 904
893static void do_work(struct work_struct *ignored)
894{
895 struct mirror_set *ms;
896
897 down_read(&_mirror_sets_lock);
898 list_for_each_entry (ms, &_mirror_sets, list)
899 do_mirror(ms);
900 up_read(&_mirror_sets_lock);
901}
902
903/*----------------------------------------------------------------- 905/*-----------------------------------------------------------------
904 * Target functions 906 * Target functions
905 *---------------------------------------------------------------*/ 907 *---------------------------------------------------------------*/
@@ -931,6 +933,13 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
931 ms->in_sync = 0; 933 ms->in_sync = 0;
932 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR]; 934 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
933 935
936 ms->io_client = dm_io_client_create(DM_IO_PAGES);
937 if (IS_ERR(ms->io_client)) {
938 ti->error = "Error creating dm_io client";
939 kfree(ms);
940 return NULL;
941 }
942
934 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { 943 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
935 ti->error = "Error creating dirty region hash"; 944 ti->error = "Error creating dirty region hash";
936 kfree(ms); 945 kfree(ms);
@@ -946,6 +955,7 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti,
946 while (m--) 955 while (m--)
947 dm_put_device(ti, ms->mirror[m].dev); 956 dm_put_device(ti, ms->mirror[m].dev);
948 957
958 dm_io_client_destroy(ms->io_client);
949 rh_exit(&ms->rh); 959 rh_exit(&ms->rh);
950 kfree(ms); 960 kfree(ms);
951} 961}
@@ -978,23 +988,6 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
978 return 0; 988 return 0;
979} 989}
980 990
981static int add_mirror_set(struct mirror_set *ms)
982{
983 down_write(&_mirror_sets_lock);
984 list_add_tail(&ms->list, &_mirror_sets);
985 up_write(&_mirror_sets_lock);
986 wake();
987
988 return 0;
989}
990
991static void del_mirror_set(struct mirror_set *ms)
992{
993 down_write(&_mirror_sets_lock);
994 list_del(&ms->list);
995 up_write(&_mirror_sets_lock);
996}
997
998/* 991/*
999 * Create dirty log: log_type #log_params <log_params> 992 * Create dirty log: log_type #log_params <log_params>
1000 */ 993 */
@@ -1037,16 +1030,55 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti,
1037 return dl; 1030 return dl;
1038} 1031}
1039 1032
1033static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1034 unsigned *args_used)
1035{
1036 unsigned num_features;
1037 struct dm_target *ti = ms->ti;
1038
1039 *args_used = 0;
1040
1041 if (!argc)
1042 return 0;
1043
1044 if (sscanf(argv[0], "%u", &num_features) != 1) {
1045 ti->error = "Invalid number of features";
1046 return -EINVAL;
1047 }
1048
1049 argc--;
1050 argv++;
1051 (*args_used)++;
1052
1053 if (num_features > argc) {
1054 ti->error = "Not enough arguments to support feature count";
1055 return -EINVAL;
1056 }
1057
1058 if (!strcmp("handle_errors", argv[0]))
1059 ms->features |= DM_RAID1_HANDLE_ERRORS;
1060 else {
1061 ti->error = "Unrecognised feature requested";
1062 return -EINVAL;
1063 }
1064
1065 (*args_used)++;
1066
1067 return 0;
1068}
1069
1040/* 1070/*
1041 * Construct a mirror mapping: 1071 * Construct a mirror mapping:
1042 * 1072 *
1043 * log_type #log_params <log_params> 1073 * log_type #log_params <log_params>
1044 * #mirrors [mirror_path offset]{2,} 1074 * #mirrors [mirror_path offset]{2,}
1075 * [#features <features>]
1045 * 1076 *
1046 * log_type is "core" or "disk" 1077 * log_type is "core" or "disk"
1047 * #log_params is between 1 and 3 1078 * #log_params is between 1 and 3
1079 *
1080 * If present, features must be "handle_errors".
1048 */ 1081 */
1049#define DM_IO_PAGES 64
1050static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1082static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1051{ 1083{
1052 int r; 1084 int r;
@@ -1070,8 +1102,8 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1070 1102
1071 argv++, argc--; 1103 argv++, argc--;
1072 1104
1073 if (argc != nr_mirrors * 2) { 1105 if (argc < nr_mirrors * 2) {
1074 ti->error = "Wrong number of mirror arguments"; 1106 ti->error = "Too few mirror arguments";
1075 dm_destroy_dirty_log(dl); 1107 dm_destroy_dirty_log(dl);
1076 return -EINVAL; 1108 return -EINVAL;
1077 } 1109 }
@@ -1096,13 +1128,37 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1096 ti->private = ms; 1128 ti->private = ms;
1097 ti->split_io = ms->rh.region_size; 1129 ti->split_io = ms->rh.region_size;
1098 1130
1131 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1132 if (!ms->kmirrord_wq) {
1133 DMERR("couldn't start kmirrord");
1134 free_context(ms, ti, m);
1135 return -ENOMEM;
1136 }
1137 INIT_WORK(&ms->kmirrord_work, do_mirror);
1138
1139 r = parse_features(ms, argc, argv, &args_used);
1140 if (r) {
1141 free_context(ms, ti, ms->nr_mirrors);
1142 return r;
1143 }
1144
1145 argv += args_used;
1146 argc -= args_used;
1147
1148 if (argc) {
1149 ti->error = "Too many mirror arguments";
1150 free_context(ms, ti, ms->nr_mirrors);
1151 return -EINVAL;
1152 }
1153
1099 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); 1154 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1100 if (r) { 1155 if (r) {
1156 destroy_workqueue(ms->kmirrord_wq);
1101 free_context(ms, ti, ms->nr_mirrors); 1157 free_context(ms, ti, ms->nr_mirrors);
1102 return r; 1158 return r;
1103 } 1159 }
1104 1160
1105 add_mirror_set(ms); 1161 wake(ms);
1106 return 0; 1162 return 0;
1107} 1163}
1108 1164
@@ -1110,8 +1166,9 @@ static void mirror_dtr(struct dm_target *ti)
1110{ 1166{
1111 struct mirror_set *ms = (struct mirror_set *) ti->private; 1167 struct mirror_set *ms = (struct mirror_set *) ti->private;
1112 1168
1113 del_mirror_set(ms); 1169 flush_workqueue(ms->kmirrord_wq);
1114 kcopyd_client_destroy(ms->kcopyd_client); 1170 kcopyd_client_destroy(ms->kcopyd_client);
1171 destroy_workqueue(ms->kmirrord_wq);
1115 free_context(ms, ti, ms->nr_mirrors); 1172 free_context(ms, ti, ms->nr_mirrors);
1116} 1173}
1117 1174
@@ -1127,7 +1184,7 @@ static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1127 spin_unlock(&ms->lock); 1184 spin_unlock(&ms->lock);
1128 1185
1129 if (should_wake) 1186 if (should_wake)
1130 wake(); 1187 wake(ms);
1131} 1188}
1132 1189
1133/* 1190/*
@@ -1222,11 +1279,9 @@ static void mirror_resume(struct dm_target *ti)
1222static int mirror_status(struct dm_target *ti, status_type_t type, 1279static int mirror_status(struct dm_target *ti, status_type_t type,
1223 char *result, unsigned int maxlen) 1280 char *result, unsigned int maxlen)
1224{ 1281{
1225 unsigned int m, sz; 1282 unsigned int m, sz = 0;
1226 struct mirror_set *ms = (struct mirror_set *) ti->private; 1283 struct mirror_set *ms = (struct mirror_set *) ti->private;
1227 1284
1228 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1229
1230 switch (type) { 1285 switch (type) {
1231 case STATUSTYPE_INFO: 1286 case STATUSTYPE_INFO:
1232 DMEMIT("%d ", ms->nr_mirrors); 1287 DMEMIT("%d ", ms->nr_mirrors);
@@ -1237,13 +1292,21 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1237 (unsigned long long)ms->rh.log->type-> 1292 (unsigned long long)ms->rh.log->type->
1238 get_sync_count(ms->rh.log), 1293 get_sync_count(ms->rh.log),
1239 (unsigned long long)ms->nr_regions); 1294 (unsigned long long)ms->nr_regions);
1295
1296 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1297
1240 break; 1298 break;
1241 1299
1242 case STATUSTYPE_TABLE: 1300 case STATUSTYPE_TABLE:
1301 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1302
1243 DMEMIT("%d", ms->nr_mirrors); 1303 DMEMIT("%d", ms->nr_mirrors);
1244 for (m = 0; m < ms->nr_mirrors; m++) 1304 for (m = 0; m < ms->nr_mirrors; m++)
1245 DMEMIT(" %s %llu", ms->mirror[m].dev->name, 1305 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1246 (unsigned long long)ms->mirror[m].offset); 1306 (unsigned long long)ms->mirror[m].offset);
1307
1308 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1309 DMEMIT(" 1 handle_errors");
1247 } 1310 }
1248 1311
1249 return 0; 1312 return 0;
@@ -1251,7 +1314,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1251 1314
1252static struct target_type mirror_target = { 1315static struct target_type mirror_target = {
1253 .name = "mirror", 1316 .name = "mirror",
1254 .version = {1, 0, 2}, 1317 .version = {1, 0, 3},
1255 .module = THIS_MODULE, 1318 .module = THIS_MODULE,
1256 .ctr = mirror_ctr, 1319 .ctr = mirror_ctr,
1257 .dtr = mirror_dtr, 1320 .dtr = mirror_dtr,
@@ -1270,20 +1333,11 @@ static int __init dm_mirror_init(void)
1270 if (r) 1333 if (r)
1271 return r; 1334 return r;
1272 1335
1273 _kmirrord_wq = create_singlethread_workqueue("kmirrord");
1274 if (!_kmirrord_wq) {
1275 DMERR("couldn't start kmirrord");
1276 dm_dirty_log_exit();
1277 return r;
1278 }
1279 INIT_WORK(&_kmirrord_work, do_work);
1280
1281 r = dm_register_target(&mirror_target); 1336 r = dm_register_target(&mirror_target);
1282 if (r < 0) { 1337 if (r < 0) {
1283 DMERR("%s: Failed to register mirror target", 1338 DMERR("%s: Failed to register mirror target",
1284 mirror_target.name); 1339 mirror_target.name);
1285 dm_dirty_log_exit(); 1340 dm_dirty_log_exit();
1286 destroy_workqueue(_kmirrord_wq);
1287 } 1341 }
1288 1342
1289 return r; 1343 return r;
@@ -1297,7 +1351,6 @@ static void __exit dm_mirror_exit(void)
1297 if (r < 0) 1351 if (r < 0)
1298 DMERR("%s: unregister failed %d", mirror_target.name, r); 1352 DMERR("%s: unregister failed %d", mirror_target.name, r);
1299 1353
1300 destroy_workqueue(_kmirrord_wq);
1301 dm_dirty_log_exit(); 1354 dm_dirty_log_exit();
1302} 1355}
1303 1356
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 05befa91807a..2fc199b0016b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -425,13 +425,15 @@ static void close_dev(struct dm_dev *d, struct mapped_device *md)
425} 425}
426 426
427/* 427/*
428 * If possible (ie. blk_size[major] is set), this checks an area 428 * If possible, this checks an area of a destination device is valid.
429 * of a destination device is valid.
430 */ 429 */
431static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) 430static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
432{ 431{
433 sector_t dev_size; 432 sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
434 dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT; 433
434 if (!dev_size)
435 return 1;
436
435 return ((start < dev_size) && (len <= (dev_size - start))); 437 return ((start < dev_size) && (len <= (dev_size - start)));
436} 438}
437 439
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 11a98df298ec..2717a355dc5b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1236,6 +1236,7 @@ void dm_put(struct mapped_device *md)
1236 free_dev(md); 1236 free_dev(md);
1237 } 1237 }
1238} 1238}
1239EXPORT_SYMBOL_GPL(dm_put);
1239 1240
1240/* 1241/*
1241 * Process the deferred bios 1242 * Process the deferred bios
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index b46f6c575f7e..dbc234e3c69f 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2002 Sistina Software (UK) Limited. 2 * Copyright (C) 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006 Red Hat GmbH
3 * 4 *
4 * This file is released under the GPL. 5 * This file is released under the GPL.
5 * 6 *
@@ -45,6 +46,8 @@ struct kcopyd_client {
45 unsigned int nr_pages; 46 unsigned int nr_pages;
46 unsigned int nr_free_pages; 47 unsigned int nr_free_pages;
47 48
49 struct dm_io_client *io_client;
50
48 wait_queue_head_t destroyq; 51 wait_queue_head_t destroyq;
49 atomic_t nr_jobs; 52 atomic_t nr_jobs;
50}; 53};
@@ -342,16 +345,20 @@ static void complete_io(unsigned long error, void *context)
342static int run_io_job(struct kcopyd_job *job) 345static int run_io_job(struct kcopyd_job *job)
343{ 346{
344 int r; 347 int r;
348 struct dm_io_request io_req = {
349 .bi_rw = job->rw,
350 .mem.type = DM_IO_PAGE_LIST,
351 .mem.ptr.pl = job->pages,
352 .mem.offset = job->offset,
353 .notify.fn = complete_io,
354 .notify.context = job,
355 .client = job->kc->io_client,
356 };
345 357
346 if (job->rw == READ) 358 if (job->rw == READ)
347 r = dm_io_async(1, &job->source, job->rw, 359 r = dm_io(&io_req, 1, &job->source, NULL);
348 job->pages,
349 job->offset, complete_io, job);
350
351 else 360 else
352 r = dm_io_async(job->num_dests, job->dests, job->rw, 361 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
353 job->pages,
354 job->offset, complete_io, job);
355 362
356 return r; 363 return r;
357} 364}
@@ -670,8 +677,9 @@ int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
670 return r; 677 return r;
671 } 678 }
672 679
673 r = dm_io_get(nr_pages); 680 kc->io_client = dm_io_client_create(nr_pages);
674 if (r) { 681 if (IS_ERR(kc->io_client)) {
682 r = PTR_ERR(kc->io_client);
675 client_free_pages(kc); 683 client_free_pages(kc);
676 kfree(kc); 684 kfree(kc);
677 kcopyd_exit(); 685 kcopyd_exit();
@@ -691,7 +699,7 @@ void kcopyd_client_destroy(struct kcopyd_client *kc)
691 /* Wait for completion of all jobs submitted by this client. */ 699 /* Wait for completion of all jobs submitted by this client. */
692 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); 700 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
693 701
694 dm_io_put(kc->nr_pages); 702 dm_io_client_destroy(kc->io_client);
695 client_free_pages(kc); 703 client_free_pages(kc);
696 client_del(kc); 704 client_del(kc);
697 kfree(kc); 705 kfree(kc);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2b4315d7e5d6..2901d0c0ee9e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -33,6 +33,7 @@
33*/ 33*/
34 34
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/kernel.h>
36#include <linux/kthread.h> 37#include <linux/kthread.h>
37#include <linux/linkage.h> 38#include <linux/linkage.h>
38#include <linux/raid/md.h> 39#include <linux/raid/md.h>
@@ -273,6 +274,7 @@ static mddev_t * mddev_find(dev_t unit)
273 atomic_set(&new->active, 1); 274 atomic_set(&new->active, 1);
274 spin_lock_init(&new->write_lock); 275 spin_lock_init(&new->write_lock);
275 init_waitqueue_head(&new->sb_wait); 276 init_waitqueue_head(&new->sb_wait);
277 new->reshape_position = MaxSector;
276 278
277 new->queue = blk_alloc_queue(GFP_KERNEL); 279 new->queue = blk_alloc_queue(GFP_KERNEL);
278 if (!new->queue) { 280 if (!new->queue) {
@@ -589,14 +591,41 @@ abort:
589 return ret; 591 return ret;
590} 592}
591 593
594
595static u32 md_csum_fold(u32 csum)
596{
597 csum = (csum & 0xffff) + (csum >> 16);
598 return (csum & 0xffff) + (csum >> 16);
599}
600
592static unsigned int calc_sb_csum(mdp_super_t * sb) 601static unsigned int calc_sb_csum(mdp_super_t * sb)
593{ 602{
603 u64 newcsum = 0;
604 u32 *sb32 = (u32*)sb;
605 int i;
594 unsigned int disk_csum, csum; 606 unsigned int disk_csum, csum;
595 607
596 disk_csum = sb->sb_csum; 608 disk_csum = sb->sb_csum;
597 sb->sb_csum = 0; 609 sb->sb_csum = 0;
598 csum = csum_partial((void *)sb, MD_SB_BYTES, 0); 610
611 for (i = 0; i < MD_SB_BYTES/4 ; i++)
612 newcsum += sb32[i];
613 csum = (newcsum & 0xffffffff) + (newcsum>>32);
614
615
616#ifdef CONFIG_ALPHA
617 /* This used to use csum_partial, which was wrong for several
618 * reasons including that different results are returned on
619 * different architectures. It isn't critical that we get exactly
620 * the same return value as before (we always csum_fold before
621 * testing, and that removes any differences). However as we
622 * know that csum_partial always returned a 16bit value on
623 * alphas, do a fold to maximise conformity to previous behaviour.
624 */
625 sb->sb_csum = md_csum_fold(disk_csum);
626#else
599 sb->sb_csum = disk_csum; 627 sb->sb_csum = disk_csum;
628#endif
600 return csum; 629 return csum;
601} 630}
602 631
@@ -684,7 +713,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
684 if (sb->raid_disks <= 0) 713 if (sb->raid_disks <= 0)
685 goto abort; 714 goto abort;
686 715
687 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) { 716 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
688 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 717 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
689 b); 718 b);
690 goto abort; 719 goto abort;
@@ -694,6 +723,17 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
694 rdev->data_offset = 0; 723 rdev->data_offset = 0;
695 rdev->sb_size = MD_SB_BYTES; 724 rdev->sb_size = MD_SB_BYTES;
696 725
726 if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
727 if (sb->level != 1 && sb->level != 4
728 && sb->level != 5 && sb->level != 6
729 && sb->level != 10) {
730 /* FIXME use a better test */
731 printk(KERN_WARNING
732 "md: bitmaps not supported for this level.\n");
733 goto abort;
734 }
735 }
736
697 if (sb->level == LEVEL_MULTIPATH) 737 if (sb->level == LEVEL_MULTIPATH)
698 rdev->desc_nr = -1; 738 rdev->desc_nr = -1;
699 else 739 else
@@ -792,16 +832,8 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
792 mddev->max_disks = MD_SB_DISKS; 832 mddev->max_disks = MD_SB_DISKS;
793 833
794 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 834 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
795 mddev->bitmap_file == NULL) { 835 mddev->bitmap_file == NULL)
796 if (mddev->level != 1 && mddev->level != 4
797 && mddev->level != 5 && mddev->level != 6
798 && mddev->level != 10) {
799 /* FIXME use a better test */
800 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
801 return -EINVAL;
802 }
803 mddev->bitmap_offset = mddev->default_bitmap_offset; 836 mddev->bitmap_offset = mddev->default_bitmap_offset;
804 }
805 837
806 } else if (mddev->pers == NULL) { 838 } else if (mddev->pers == NULL) {
807 /* Insist on good event counter while assembling */ 839 /* Insist on good event counter while assembling */
@@ -1058,6 +1090,18 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1058 bdevname(rdev->bdev,b)); 1090 bdevname(rdev->bdev,b));
1059 return -EINVAL; 1091 return -EINVAL;
1060 } 1092 }
1093 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
1094 if (sb->level != cpu_to_le32(1) &&
1095 sb->level != cpu_to_le32(4) &&
1096 sb->level != cpu_to_le32(5) &&
1097 sb->level != cpu_to_le32(6) &&
1098 sb->level != cpu_to_le32(10)) {
1099 printk(KERN_WARNING
1100 "md: bitmaps not supported for this level.\n");
1101 return -EINVAL;
1102 }
1103 }
1104
1061 rdev->preferred_minor = 0xffff; 1105 rdev->preferred_minor = 0xffff;
1062 rdev->data_offset = le64_to_cpu(sb->data_offset); 1106 rdev->data_offset = le64_to_cpu(sb->data_offset);
1063 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1107 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
@@ -1141,14 +1185,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1141 mddev->max_disks = (4096-256)/2; 1185 mddev->max_disks = (4096-256)/2;
1142 1186
1143 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1187 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1144 mddev->bitmap_file == NULL ) { 1188 mddev->bitmap_file == NULL )
1145 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1146 && mddev->level != 10) {
1147 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
1148 return -EINVAL;
1149 }
1150 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1189 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1151 } 1190
1152 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1191 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1153 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1192 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1154 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1193 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
@@ -2204,6 +2243,10 @@ static ssize_t
2204layout_show(mddev_t *mddev, char *page) 2243layout_show(mddev_t *mddev, char *page)
2205{ 2244{
2206 /* just a number, not meaningful for all levels */ 2245 /* just a number, not meaningful for all levels */
2246 if (mddev->reshape_position != MaxSector &&
2247 mddev->layout != mddev->new_layout)
2248 return sprintf(page, "%d (%d)\n",
2249 mddev->new_layout, mddev->layout);
2207 return sprintf(page, "%d\n", mddev->layout); 2250 return sprintf(page, "%d\n", mddev->layout);
2208} 2251}
2209 2252
@@ -2212,13 +2255,16 @@ layout_store(mddev_t *mddev, const char *buf, size_t len)
2212{ 2255{
2213 char *e; 2256 char *e;
2214 unsigned long n = simple_strtoul(buf, &e, 10); 2257 unsigned long n = simple_strtoul(buf, &e, 10);
2215 if (mddev->pers)
2216 return -EBUSY;
2217 2258
2218 if (!*buf || (*e && *e != '\n')) 2259 if (!*buf || (*e && *e != '\n'))
2219 return -EINVAL; 2260 return -EINVAL;
2220 2261
2221 mddev->layout = n; 2262 if (mddev->pers)
2263 return -EBUSY;
2264 if (mddev->reshape_position != MaxSector)
2265 mddev->new_layout = n;
2266 else
2267 mddev->layout = n;
2222 return len; 2268 return len;
2223} 2269}
2224static struct md_sysfs_entry md_layout = 2270static struct md_sysfs_entry md_layout =
@@ -2230,6 +2276,10 @@ raid_disks_show(mddev_t *mddev, char *page)
2230{ 2276{
2231 if (mddev->raid_disks == 0) 2277 if (mddev->raid_disks == 0)
2232 return 0; 2278 return 0;
2279 if (mddev->reshape_position != MaxSector &&
2280 mddev->delta_disks != 0)
2281 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
2282 mddev->raid_disks - mddev->delta_disks);
2233 return sprintf(page, "%d\n", mddev->raid_disks); 2283 return sprintf(page, "%d\n", mddev->raid_disks);
2234} 2284}
2235 2285
@@ -2247,7 +2297,11 @@ raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2247 2297
2248 if (mddev->pers) 2298 if (mddev->pers)
2249 rv = update_raid_disks(mddev, n); 2299 rv = update_raid_disks(mddev, n);
2250 else 2300 else if (mddev->reshape_position != MaxSector) {
2301 int olddisks = mddev->raid_disks - mddev->delta_disks;
2302 mddev->delta_disks = n - olddisks;
2303 mddev->raid_disks = n;
2304 } else
2251 mddev->raid_disks = n; 2305 mddev->raid_disks = n;
2252 return rv ? rv : len; 2306 return rv ? rv : len;
2253} 2307}
@@ -2257,6 +2311,10 @@ __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2257static ssize_t 2311static ssize_t
2258chunk_size_show(mddev_t *mddev, char *page) 2312chunk_size_show(mddev_t *mddev, char *page)
2259{ 2313{
2314 if (mddev->reshape_position != MaxSector &&
2315 mddev->chunk_size != mddev->new_chunk)
2316 return sprintf(page, "%d (%d)\n", mddev->new_chunk,
2317 mddev->chunk_size);
2260 return sprintf(page, "%d\n", mddev->chunk_size); 2318 return sprintf(page, "%d\n", mddev->chunk_size);
2261} 2319}
2262 2320
@@ -2267,12 +2325,15 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2267 char *e; 2325 char *e;
2268 unsigned long n = simple_strtoul(buf, &e, 10); 2326 unsigned long n = simple_strtoul(buf, &e, 10);
2269 2327
2270 if (mddev->pers)
2271 return -EBUSY;
2272 if (!*buf || (*e && *e != '\n')) 2328 if (!*buf || (*e && *e != '\n'))
2273 return -EINVAL; 2329 return -EINVAL;
2274 2330
2275 mddev->chunk_size = n; 2331 if (mddev->pers)
2332 return -EBUSY;
2333 else if (mddev->reshape_position != MaxSector)
2334 mddev->new_chunk = n;
2335 else
2336 mddev->chunk_size = n;
2276 return len; 2337 return len;
2277} 2338}
2278static struct md_sysfs_entry md_chunk_size = 2339static struct md_sysfs_entry md_chunk_size =
@@ -2637,8 +2698,7 @@ metadata_store(mddev_t *mddev, const char *buf, size_t len)
2637 minor = simple_strtoul(buf, &e, 10); 2698 minor = simple_strtoul(buf, &e, 10);
2638 if (e==buf || (*e && *e != '\n') ) 2699 if (e==buf || (*e && *e != '\n') )
2639 return -EINVAL; 2700 return -EINVAL;
2640 if (major >= sizeof(super_types)/sizeof(super_types[0]) || 2701 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
2641 super_types[major].name == NULL)
2642 return -ENOENT; 2702 return -ENOENT;
2643 mddev->major_version = major; 2703 mddev->major_version = major;
2644 mddev->minor_version = minor; 2704 mddev->minor_version = minor;
@@ -2859,6 +2919,37 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
2859static struct md_sysfs_entry md_suspend_hi = 2919static struct md_sysfs_entry md_suspend_hi =
2860__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 2920__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2861 2921
2922static ssize_t
2923reshape_position_show(mddev_t *mddev, char *page)
2924{
2925 if (mddev->reshape_position != MaxSector)
2926 return sprintf(page, "%llu\n",
2927 (unsigned long long)mddev->reshape_position);
2928 strcpy(page, "none\n");
2929 return 5;
2930}
2931
2932static ssize_t
2933reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
2934{
2935 char *e;
2936 unsigned long long new = simple_strtoull(buf, &e, 10);
2937 if (mddev->pers)
2938 return -EBUSY;
2939 if (buf == e || (*e && *e != '\n'))
2940 return -EINVAL;
2941 mddev->reshape_position = new;
2942 mddev->delta_disks = 0;
2943 mddev->new_level = mddev->level;
2944 mddev->new_layout = mddev->layout;
2945 mddev->new_chunk = mddev->chunk_size;
2946 return len;
2947}
2948
2949static struct md_sysfs_entry md_reshape_position =
2950__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
2951 reshape_position_store);
2952
2862 2953
2863static struct attribute *md_default_attrs[] = { 2954static struct attribute *md_default_attrs[] = {
2864 &md_level.attr, 2955 &md_level.attr,
@@ -2871,6 +2962,7 @@ static struct attribute *md_default_attrs[] = {
2871 &md_new_device.attr, 2962 &md_new_device.attr,
2872 &md_safe_delay.attr, 2963 &md_safe_delay.attr,
2873 &md_array_state.attr, 2964 &md_array_state.attr,
2965 &md_reshape_position.attr,
2874 NULL, 2966 NULL,
2875}; 2967};
2876 2968
@@ -3012,6 +3104,7 @@ static int do_md_run(mddev_t * mddev)
3012 struct gendisk *disk; 3104 struct gendisk *disk;
3013 struct mdk_personality *pers; 3105 struct mdk_personality *pers;
3014 char b[BDEVNAME_SIZE]; 3106 char b[BDEVNAME_SIZE];
3107 struct block_device *bdev;
3015 3108
3016 if (list_empty(&mddev->disks)) 3109 if (list_empty(&mddev->disks))
3017 /* cannot run an array with no devices.. */ 3110 /* cannot run an array with no devices.. */
@@ -3239,7 +3332,13 @@ static int do_md_run(mddev_t * mddev)
3239 md_wakeup_thread(mddev->thread); 3332 md_wakeup_thread(mddev->thread);
3240 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 3333 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3241 3334
3242 mddev->changed = 1; 3335 bdev = bdget_disk(mddev->gendisk, 0);
3336 if (bdev) {
3337 bd_set_size(bdev, mddev->array_size << 1);
3338 blkdev_ioctl(bdev->bd_inode, NULL, BLKRRPART, 0);
3339 bdput(bdev);
3340 }
3341
3243 md_new_event(mddev); 3342 md_new_event(mddev);
3244 kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE); 3343 kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE);
3245 return 0; 3344 return 0;
@@ -3361,7 +3460,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
3361 mddev->pers = NULL; 3460 mddev->pers = NULL;
3362 3461
3363 set_capacity(disk, 0); 3462 set_capacity(disk, 0);
3364 mddev->changed = 1;
3365 3463
3366 if (mddev->ro) 3464 if (mddev->ro)
3367 mddev->ro = 0; 3465 mddev->ro = 0;
@@ -3409,6 +3507,7 @@ static int do_md_stop(mddev_t * mddev, int mode)
3409 mddev->size = 0; 3507 mddev->size = 0;
3410 mddev->raid_disks = 0; 3508 mddev->raid_disks = 0;
3411 mddev->recovery_cp = 0; 3509 mddev->recovery_cp = 0;
3510 mddev->reshape_position = MaxSector;
3412 3511
3413 } else if (mddev->pers) 3512 } else if (mddev->pers)
3414 printk(KERN_INFO "md: %s switched to read-only mode.\n", 3513 printk(KERN_INFO "md: %s switched to read-only mode.\n",
@@ -4019,7 +4118,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4019 if (info->raid_disks == 0) { 4118 if (info->raid_disks == 0) {
4020 /* just setting version number for superblock loading */ 4119 /* just setting version number for superblock loading */
4021 if (info->major_version < 0 || 4120 if (info->major_version < 0 ||
4022 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) || 4121 info->major_version >= ARRAY_SIZE(super_types) ||
4023 super_types[info->major_version].name == NULL) { 4122 super_types[info->major_version].name == NULL) {
4024 /* maybe try to auto-load a module? */ 4123 /* maybe try to auto-load a module? */
4025 printk(KERN_INFO 4124 printk(KERN_INFO
@@ -4500,20 +4599,6 @@ static int md_release(struct inode *inode, struct file * file)
4500 return 0; 4599 return 0;
4501} 4600}
4502 4601
4503static int md_media_changed(struct gendisk *disk)
4504{
4505 mddev_t *mddev = disk->private_data;
4506
4507 return mddev->changed;
4508}
4509
4510static int md_revalidate(struct gendisk *disk)
4511{
4512 mddev_t *mddev = disk->private_data;
4513
4514 mddev->changed = 0;
4515 return 0;
4516}
4517static struct block_device_operations md_fops = 4602static struct block_device_operations md_fops =
4518{ 4603{
4519 .owner = THIS_MODULE, 4604 .owner = THIS_MODULE,
@@ -4521,8 +4606,6 @@ static struct block_device_operations md_fops =
4521 .release = md_release, 4606 .release = md_release,
4522 .ioctl = md_ioctl, 4607 .ioctl = md_ioctl,
4523 .getgeo = md_getgeo, 4608 .getgeo = md_getgeo,
4524 .media_changed = md_media_changed,
4525 .revalidate_disk= md_revalidate,
4526}; 4609};
4527 4610
4528static int md_thread(void * arg) 4611static int md_thread(void * arg)
@@ -4941,15 +5024,6 @@ static int md_seq_open(struct inode *inode, struct file *file)
4941 return error; 5024 return error;
4942} 5025}
4943 5026
4944static int md_seq_release(struct inode *inode, struct file *file)
4945{
4946 struct seq_file *m = file->private_data;
4947 struct mdstat_info *mi = m->private;
4948 m->private = NULL;
4949 kfree(mi);
4950 return seq_release(inode, file);
4951}
4952
4953static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 5027static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4954{ 5028{
4955 struct seq_file *m = filp->private_data; 5029 struct seq_file *m = filp->private_data;
@@ -4971,7 +5045,7 @@ static const struct file_operations md_seq_fops = {
4971 .open = md_seq_open, 5045 .open = md_seq_open,
4972 .read = seq_read, 5046 .read = seq_read,
4973 .llseek = seq_lseek, 5047 .llseek = seq_lseek,
4974 .release = md_seq_release, 5048 .release = seq_release_private,
4975 .poll = mdstat_poll, 5049 .poll = mdstat_poll,
4976}; 5050};
4977 5051
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 97ee870b265d..1b7130cad21f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2063,7 +2063,6 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors)
2063 */ 2063 */
2064 mddev->array_size = sectors>>1; 2064 mddev->array_size = sectors>>1;
2065 set_capacity(mddev->gendisk, mddev->array_size << 1); 2065 set_capacity(mddev->gendisk, mddev->array_size << 1);
2066 mddev->changed = 1;
2067 if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) { 2066 if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) {
2068 mddev->recovery_cp = mddev->size << 1; 2067 mddev->recovery_cp = mddev->size << 1;
2069 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2068 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8d59914f2057..a72e70ad0975 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -353,8 +353,8 @@ static int grow_stripes(raid5_conf_t *conf, int num)
353 struct kmem_cache *sc; 353 struct kmem_cache *sc;
354 int devs = conf->raid_disks; 354 int devs = conf->raid_disks;
355 355
356 sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev)); 356 sprintf(conf->cache_name[0], "raid5-%s", mdname(conf->mddev));
357 sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev)); 357 sprintf(conf->cache_name[1], "raid5-%s-alt", mdname(conf->mddev));
358 conf->active_name = 0; 358 conf->active_name = 0;
359 sc = kmem_cache_create(conf->cache_name[conf->active_name], 359 sc = kmem_cache_create(conf->cache_name[conf->active_name],
360 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 360 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
@@ -3864,7 +3864,6 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
3864 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 3864 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
3865 mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1; 3865 mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1;
3866 set_capacity(mddev->gendisk, mddev->array_size << 1); 3866 set_capacity(mddev->gendisk, mddev->array_size << 1);
3867 mddev->changed = 1;
3868 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 3867 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
3869 mddev->recovery_cp = mddev->size << 1; 3868 mddev->recovery_cp = mddev->size << 1;
3870 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3869 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -3999,7 +3998,6 @@ static void end_reshape(raid5_conf_t *conf)
3999 conf->mddev->array_size = conf->mddev->size * 3998 conf->mddev->array_size = conf->mddev->size *
4000 (conf->raid_disks - conf->max_degraded); 3999 (conf->raid_disks - conf->max_degraded);
4001 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1); 4000 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1);
4002 conf->mddev->changed = 1;
4003 4001
4004 bdev = bdget_disk(conf->mddev->gendisk, 0); 4002 bdev = bdget_disk(conf->mddev->gendisk, 0);
4005 if (bdev) { 4003 if (bdev) {
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index 68ed3a788083..9200a30dd1b9 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -3,7 +3,7 @@
3 * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) 3 * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
4 * see dvb-usb-init.c for copyright information. 4 * see dvb-usb-init.c for copyright information.
5 * 5 *
6 * This file contains functions for initializing the the input-device and for handling remote-control-queries. 6 * This file contains functions for initializing the input-device and for handling remote-control-queries.
7 */ 7 */
8#include "dvb-usb-common.h" 8#include "dvb-usb-common.h"
9#include <linux/usb/input.h> 9#include <linux/usb/input.h>
diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c
index f5d40aa3d27f..f64546c6aeb5 100644
--- a/drivers/media/dvb/frontends/dib7000m.c
+++ b/drivers/media/dvb/frontends/dib7000m.c
@@ -266,7 +266,7 @@ static int dib7000m_sad_calib(struct dib7000m_state *state)
266{ 266{
267 267
268/* internal */ 268/* internal */
269// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth 269// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is written in set_bandwidth
270 dib7000m_write_word(state, 929, (0 << 1) | (0 << 0)); 270 dib7000m_write_word(state, 929, (0 << 1) | (0 << 0));
271 dib7000m_write_word(state, 930, 776); // 0.625*3.3 / 4096 271 dib7000m_write_word(state, 930, 776); // 0.625*3.3 / 4096
272 272
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 0349a4b5da3f..aece458cfe12 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -223,7 +223,7 @@ static int dib7000p_set_bandwidth(struct dvb_frontend *demod, u8 BW_Idx)
223static int dib7000p_sad_calib(struct dib7000p_state *state) 223static int dib7000p_sad_calib(struct dib7000p_state *state)
224{ 224{
225/* internal */ 225/* internal */
226// dib7000p_write_word(state, 72, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth 226// dib7000p_write_word(state, 72, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is written in set_bandwidth
227 dib7000p_write_word(state, 73, (0 << 1) | (0 << 0)); 227 dib7000p_write_word(state, 73, (0 << 1) | (0 << 0));
228 dib7000p_write_word(state, 74, 776); // 0.625*3.3 / 4096 228 dib7000p_write_word(state, 74, 776); // 0.625*3.3 / 4096
229 229
diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c
index 110536843e8e..e725f612a6b7 100644
--- a/drivers/media/dvb/frontends/tda10021.c
+++ b/drivers/media/dvb/frontends/tda10021.c
@@ -1,6 +1,6 @@
1/* 1/*
2 TDA10021 - Single Chip Cable Channel Receiver driver module 2 TDA10021 - Single Chip Cable Channel Receiver driver module
3 used on the the Siemens DVB-C cards 3 used on the Siemens DVB-C cards
4 4
5 Copyright (C) 1999 Convergence Integrated Media GmbH <ralph@convergence.de> 5 Copyright (C) 1999 Convergence Integrated Media GmbH <ralph@convergence.de>
6 Copyright (C) 2004 Markus Schulz <msc@antzsystem.de> 6 Copyright (C) 2004 Markus Schulz <msc@antzsystem.de>
diff --git a/drivers/media/dvb/frontends/ves1x93.c b/drivers/media/dvb/frontends/ves1x93.c
index 54d7b07571b8..23fd0303c91b 100644
--- a/drivers/media/dvb/frontends/ves1x93.c
+++ b/drivers/media/dvb/frontends/ves1x93.c
@@ -306,7 +306,7 @@ static int ves1x93_read_status(struct dvb_frontend* fe, fe_status_t* status)
306 * The ves1893 sometimes returns sync values that make no sense, 306 * The ves1893 sometimes returns sync values that make no sense,
307 * because, e.g., the SIGNAL bit is 0, while some of the higher 307 * because, e.g., the SIGNAL bit is 0, while some of the higher
308 * bits are 1 (and how can there be a CARRIER w/o a SIGNAL?). 308 * bits are 1 (and how can there be a CARRIER w/o a SIGNAL?).
309 * Tests showed that the the VITERBI and SYNC bits are returned 309 * Tests showed that the VITERBI and SYNC bits are returned
310 * reliably, while the SIGNAL and CARRIER bits ar sometimes wrong. 310 * reliably, while the SIGNAL and CARRIER bits ar sometimes wrong.
311 * If such a case occurs, we read the value again, until we get a 311 * If such a case occurs, we read the value again, until we get a
312 * valid value. 312 * valid value.
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 563a8319e608..54ccc6e1f92e 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -70,7 +70,7 @@ static int em2800_i2c_send_max4(struct em28xx *dev, unsigned char addr,
70 70
71 ret = dev->em28xx_write_regs(dev, 4 - len, &b2[4 - len], 2 + len); 71 ret = dev->em28xx_write_regs(dev, 4 - len, &b2[4 - len], 2 + len);
72 if (ret != 2 + len) { 72 if (ret != 2 + len) {
73 em28xx_warn("writting to i2c device failed (error=%i)\n", ret); 73 em28xx_warn("writing to i2c device failed (error=%i)\n", ret);
74 return -EIO; 74 return -EIO;
75 } 75 }
76 for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0; 76 for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0;
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index bec67609500f..2c7b158ce7e1 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -1729,7 +1729,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
1729 1729
1730 endpoint = &interface->cur_altsetting->endpoint[1].desc; 1730 endpoint = &interface->cur_altsetting->endpoint[1].desc;
1731 1731
1732 /* check if the the device has the iso in endpoint at the correct place */ 1732 /* check if the device has the iso in endpoint at the correct place */
1733 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != 1733 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
1734 USB_ENDPOINT_XFER_ISOC) { 1734 USB_ENDPOINT_XFER_ISOC) {
1735 em28xx_err(DRIVER_NAME " probing error: endpoint is non-ISO endpoint!\n"); 1735 em28xx_err(DRIVER_NAME " probing error: endpoint is non-ISO endpoint!\n");
diff --git a/drivers/media/video/pwc/philips.txt b/drivers/media/video/pwc/philips.txt
index f5e848410311..f9f3584281d8 100644
--- a/drivers/media/video/pwc/philips.txt
+++ b/drivers/media/video/pwc/philips.txt
@@ -54,9 +54,9 @@ fps
54 Specifies the desired framerate. Is an integer in the range of 4-30. 54 Specifies the desired framerate. Is an integer in the range of 4-30.
55 55
56fbufs 56fbufs
57 This paramter specifies the number of internal buffers to use for storing 57 This parameter specifies the number of internal buffers to use for storing
58 frames from the cam. This will help if the process that reads images from 58 frames from the cam. This will help if the process that reads images from
59 the cam is a bit slow or momentarely busy. However, on slow machines it 59 the cam is a bit slow or momentarily busy. However, on slow machines it
60 only introduces lag, so choose carefully. The default is 3, which is 60 only introduces lag, so choose carefully. The default is 3, which is
61 reasonable. You can set it between 2 and 5. 61 reasonable. You can set it between 2 and 5.
62 62
@@ -209,7 +209,7 @@ trace
209 209
210 128 0x80 PWCX debugging Off 210 128 0x80 PWCX debugging Off
211 211
212 For example, to trace the open() & read() fuctions, sum 8 + 4 = 12, 212 For example, to trace the open() & read() functions, sum 8 + 4 = 12,
213 so you would supply trace=12 during insmod or modprobe. If 213 so you would supply trace=12 during insmod or modprobe. If
214 you want to turn the initialization and probing tracing off, set trace=0. 214 you want to turn the initialization and probing tracing off, set trace=0.
215 The default value for trace is 35 (0x23). 215 The default value for trace is 35 (0x23).
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 876fd2768242..982b115193f8 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -28,7 +28,7 @@
28 * 28 *
29 * Portions of this code were also copied from usbvideo.c 29 * Portions of this code were also copied from usbvideo.c
30 * 30 *
31 * Special thanks to the the whole team at Sourceforge for help making 31 * Special thanks to the whole team at Sourceforge for help making
32 * this driver become a reality. Notably: 32 * this driver become a reality. Notably:
33 * Andy Armstrong who reverse engineered the color encoding and 33 * Andy Armstrong who reverse engineered the color encoding and
34 * Pavel Machek and Chris Cheney who worked on reverse engineering the 34 * Pavel Machek and Chris Cheney who worked on reverse engineering the
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index d6b4c607453b..ddc7ae029dd3 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -571,7 +571,7 @@ mpi_fc.h
571 * 11-02-00 01.01.01 Original release for post 1.0 work 571 * 11-02-00 01.01.01 Original release for post 1.0 work
572 * 12-04-00 01.01.02 Added messages for Common Transport Send and 572 * 12-04-00 01.01.02 Added messages for Common Transport Send and
573 * Primitive Send. 573 * Primitive Send.
574 * 01-09-01 01.01.03 Modifed some of the new flags to have an MPI prefix 574 * 01-09-01 01.01.03 Modified some of the new flags to have an MPI prefix
575 * and modified the FcPrimitiveSend flags. 575 * and modified the FcPrimitiveSend flags.
576 * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger 576 * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger
577 * field. 577 * field.
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 97471af4309c..5021d1a2a1d4 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -3585,7 +3585,7 @@ initChainBuffers(MPT_ADAPTER *ioc)
3585 * index = chain_idx 3585 * index = chain_idx
3586 * 3586 *
3587 * Calculate the number of chain buffers needed(plus 1) per I/O 3587 * Calculate the number of chain buffers needed(plus 1) per I/O
3588 * then multiply the the maximum number of simultaneous cmds 3588 * then multiply the maximum number of simultaneous cmds
3589 * 3589 *
3590 * num_sge = num sge in request frame + last chain buffer 3590 * num_sge = num sge in request frame + last chain buffer
3591 * scale = num sge per chain buffer if no chain element 3591 * scale = num sge per chain buffer if no chain element
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index b6c16704aaab..7385acfa1dd9 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -501,9 +501,9 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay)
501{ 501{
502#ifdef CONFIG_MMC_DEBUG 502#ifdef CONFIG_MMC_DEBUG
503 unsigned long flags; 503 unsigned long flags;
504 spin_lock_irqsave(host->lock, flags); 504 spin_lock_irqsave(&host->lock, flags);
505 BUG_ON(host->removed); 505 BUG_ON(host->removed);
506 spin_unlock_irqrestore(host->lock, flags); 506 spin_unlock_irqrestore(&host->lock, flags);
507#endif 507#endif
508 508
509 mmc_schedule_delayed_work(&host->detect, delay); 509 mmc_schedule_delayed_work(&host->detect, delay);
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index d28e0fc85e12..479d32b57a1e 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -1,5 +1,4 @@
1# drivers/mtd/chips/Kconfig 1# drivers/mtd/chips/Kconfig
2# $Id: Kconfig,v 1.18 2005/11/07 11:14:22 gleixner Exp $
3 2
4menu "RAM/ROM/Flash chip drivers" 3menu "RAM/ROM/Flash chip drivers"
5 depends on MTD!=n 4 depends on MTD!=n
@@ -231,45 +230,6 @@ config MTD_ABSENT
231 the system regardless of media presence. Device nodes created 230 the system regardless of media presence. Device nodes created
232 with this driver will return -ENODEV upon access. 231 with this driver will return -ENODEV upon access.
233 232
234config MTD_OBSOLETE_CHIPS
235 bool "Older (theoretically obsoleted now) drivers for non-CFI chips"
236 help
237 This option does not enable any code directly, but will allow you to
238 select some other chip drivers which are now considered obsolete,
239 because the generic CONFIG_JEDECPROBE code above should now detect
240 the chips which are supported by these drivers, and allow the generic
241 CFI-compatible drivers to drive the chips. Say 'N' here unless you have
242 already tried the CONFIG_JEDECPROBE method and reported its failure
243 to the MTD mailing list at <linux-mtd@lists.infradead.org>
244
245config MTD_AMDSTD
246 tristate "AMD compatible flash chip support (non-CFI)"
247 depends on MTD_OBSOLETE_CHIPS && BROKEN
248 help
249 This option enables support for flash chips using AMD-compatible
250 commands, including some which are not CFI-compatible and hence
251 cannot be used with the CONFIG_MTD_CFI_AMDSTD option.
252
253 It also works on AMD compatible chips that do conform to CFI.
254
255config MTD_SHARP
256 tristate "pre-CFI Sharp chip support"
257 depends on MTD_OBSOLETE_CHIPS
258 help
259 This option enables support for flash chips using Sharp-compatible
260 commands, including some which are not CFI-compatible and hence
261 cannot be used with the CONFIG_MTD_CFI_INTELxxx options.
262
263config MTD_JEDEC
264 tristate "JEDEC device support"
265 depends on MTD_OBSOLETE_CHIPS && BROKEN
266 help
267 Enable older JEDEC flash interface devices for self
268 programming flash. It is commonly used in older AMD chips. It is
269 only called JEDEC because the JEDEC association
270 <http://www.jedec.org/> distributes the identification codes for the
271 chips.
272
273config MTD_XIP 233config MTD_XIP
274 bool "XIP aware MTD support" 234 bool "XIP aware MTD support"
275 depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && EXPERIMENTAL && ARCH_MTD_XIP 235 depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && EXPERIMENTAL && ARCH_MTD_XIP
diff --git a/drivers/mtd/chips/Makefile b/drivers/mtd/chips/Makefile
index 75bc1c2a0f43..36582412ccda 100644
--- a/drivers/mtd/chips/Makefile
+++ b/drivers/mtd/chips/Makefile
@@ -1,19 +1,15 @@
1# 1#
2# linux/drivers/chips/Makefile 2# linux/drivers/chips/Makefile
3# 3#
4# $Id: Makefile.common,v 1.5 2005/11/07 11:14:22 gleixner Exp $
5 4
6obj-$(CONFIG_MTD) += chipreg.o 5obj-$(CONFIG_MTD) += chipreg.o
7obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o
8obj-$(CONFIG_MTD_CFI) += cfi_probe.o 6obj-$(CONFIG_MTD_CFI) += cfi_probe.o
9obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o 7obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o
10obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o 8obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o
11obj-$(CONFIG_MTD_CFI_AMDSTD) += cfi_cmdset_0002.o 9obj-$(CONFIG_MTD_CFI_AMDSTD) += cfi_cmdset_0002.o
12obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o 10obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o
13obj-$(CONFIG_MTD_GEN_PROBE) += gen_probe.o 11obj-$(CONFIG_MTD_GEN_PROBE) += gen_probe.o
14obj-$(CONFIG_MTD_JEDEC) += jedec.o
15obj-$(CONFIG_MTD_JEDECPROBE) += jedec_probe.o 12obj-$(CONFIG_MTD_JEDECPROBE) += jedec_probe.o
16obj-$(CONFIG_MTD_RAM) += map_ram.o 13obj-$(CONFIG_MTD_RAM) += map_ram.o
17obj-$(CONFIG_MTD_ROM) += map_rom.o 14obj-$(CONFIG_MTD_ROM) += map_rom.o
18obj-$(CONFIG_MTD_SHARP) += sharp.o
19obj-$(CONFIG_MTD_ABSENT) += map_absent.o 15obj-$(CONFIG_MTD_ABSENT) += map_absent.o
diff --git a/drivers/mtd/chips/amd_flash.c b/drivers/mtd/chips/amd_flash.c
deleted file mode 100644
index e7999f15d85a..000000000000
--- a/drivers/mtd/chips/amd_flash.c
+++ /dev/null
@@ -1,1396 +0,0 @@
1/*
2 * MTD map driver for AMD compatible flash chips (non-CFI)
3 *
4 * Author: Jonas Holmberg <jonas.holmberg@axis.com>
5 *
6 * $Id: amd_flash.c,v 1.28 2005/11/07 11:14:22 gleixner Exp $
7 *
8 * Copyright (c) 2001 Axis Communications AB
9 *
10 * This file is under GPL.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/errno.h>
19#include <linux/slab.h>
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/init.h>
23#include <linux/mtd/map.h>
24#include <linux/mtd/mtd.h>
25#include <linux/mtd/flashchip.h>
26
27/* There's no limit. It exists only to avoid realloc. */
28#define MAX_AMD_CHIPS 8
29
30#define DEVICE_TYPE_X8 (8 / 8)
31#define DEVICE_TYPE_X16 (16 / 8)
32#define DEVICE_TYPE_X32 (32 / 8)
33
34/* Addresses */
35#define ADDR_MANUFACTURER 0x0000
36#define ADDR_DEVICE_ID 0x0001
37#define ADDR_SECTOR_LOCK 0x0002
38#define ADDR_HANDSHAKE 0x0003
39#define ADDR_UNLOCK_1 0x0555
40#define ADDR_UNLOCK_2 0x02AA
41
42/* Commands */
43#define CMD_UNLOCK_DATA_1 0x00AA
44#define CMD_UNLOCK_DATA_2 0x0055
45#define CMD_MANUFACTURER_UNLOCK_DATA 0x0090
46#define CMD_UNLOCK_BYPASS_MODE 0x0020
47#define CMD_PROGRAM_UNLOCK_DATA 0x00A0
48#define CMD_RESET_DATA 0x00F0
49#define CMD_SECTOR_ERASE_UNLOCK_DATA 0x0080
50#define CMD_SECTOR_ERASE_UNLOCK_DATA_2 0x0030
51
52#define CMD_UNLOCK_SECTOR 0x0060
53
54/* Manufacturers */
55#define MANUFACTURER_AMD 0x0001
56#define MANUFACTURER_ATMEL 0x001F
57#define MANUFACTURER_FUJITSU 0x0004
58#define MANUFACTURER_ST 0x0020
59#define MANUFACTURER_SST 0x00BF
60#define MANUFACTURER_TOSHIBA 0x0098
61
62/* AMD */
63#define AM29F800BB 0x2258
64#define AM29F800BT 0x22D6
65#define AM29LV800BB 0x225B
66#define AM29LV800BT 0x22DA
67#define AM29LV160DT 0x22C4
68#define AM29LV160DB 0x2249
69#define AM29BDS323D 0x22D1
70
71/* Atmel */
72#define AT49xV16x 0x00C0
73#define AT49xV16xT 0x00C2
74
75/* Fujitsu */
76#define MBM29LV160TE 0x22C4
77#define MBM29LV160BE 0x2249
78#define MBM29LV800BB 0x225B
79
80/* ST - www.st.com */
81#define M29W800T 0x00D7
82#define M29W160DT 0x22C4
83#define M29W160DB 0x2249
84
85/* SST */
86#define SST39LF800 0x2781
87#define SST39LF160 0x2782
88
89/* Toshiba */
90#define TC58FVT160 0x00C2
91#define TC58FVB160 0x0043
92
93#define D6_MASK 0x40
94
95struct amd_flash_private {
96 int device_type;
97 int interleave;
98 int numchips;
99 unsigned long chipshift;
100 struct flchip chips[0];
101};
102
103struct amd_flash_info {
104 const __u16 mfr_id;
105 const __u16 dev_id;
106 const char *name;
107 const u_long size;
108 const int numeraseregions;
109 const struct mtd_erase_region_info regions[4];
110};
111
112
113
114static int amd_flash_read(struct mtd_info *, loff_t, size_t, size_t *,
115 u_char *);
116static int amd_flash_write(struct mtd_info *, loff_t, size_t, size_t *,
117 const u_char *);
118static int amd_flash_erase(struct mtd_info *, struct erase_info *);
119static void amd_flash_sync(struct mtd_info *);
120static int amd_flash_suspend(struct mtd_info *);
121static void amd_flash_resume(struct mtd_info *);
122static void amd_flash_destroy(struct mtd_info *);
123static struct mtd_info *amd_flash_probe(struct map_info *map);
124
125
126static struct mtd_chip_driver amd_flash_chipdrv = {
127 .probe = amd_flash_probe,
128 .destroy = amd_flash_destroy,
129 .name = "amd_flash",
130 .module = THIS_MODULE
131};
132
133static inline __u32 wide_read(struct map_info *map, __u32 addr)
134{
135 if (map->buswidth == 1) {
136 return map_read8(map, addr);
137 } else if (map->buswidth == 2) {
138 return map_read16(map, addr);
139 } else if (map->buswidth == 4) {
140 return map_read32(map, addr);
141 }
142
143 return 0;
144}
145
146static inline void wide_write(struct map_info *map, __u32 val, __u32 addr)
147{
148 if (map->buswidth == 1) {
149 map_write8(map, val, addr);
150 } else if (map->buswidth == 2) {
151 map_write16(map, val, addr);
152 } else if (map->buswidth == 4) {
153 map_write32(map, val, addr);
154 }
155}
156
157static inline __u32 make_cmd(struct map_info *map, __u32 cmd)
158{
159 const struct amd_flash_private *private = map->fldrv_priv;
160 if ((private->interleave == 2) &&
161 (private->device_type == DEVICE_TYPE_X16)) {
162 cmd |= (cmd << 16);
163 }
164
165 return cmd;
166}
167
168static inline void send_unlock(struct map_info *map, unsigned long base)
169{
170 wide_write(map, (CMD_UNLOCK_DATA_1 << 16) | CMD_UNLOCK_DATA_1,
171 base + (map->buswidth * ADDR_UNLOCK_1));
172 wide_write(map, (CMD_UNLOCK_DATA_2 << 16) | CMD_UNLOCK_DATA_2,
173 base + (map->buswidth * ADDR_UNLOCK_2));
174}
175
176static inline void send_cmd(struct map_info *map, unsigned long base, __u32 cmd)
177{
178 send_unlock(map, base);
179 wide_write(map, make_cmd(map, cmd),
180 base + (map->buswidth * ADDR_UNLOCK_1));
181}
182
183static inline void send_cmd_to_addr(struct map_info *map, unsigned long base,
184 __u32 cmd, unsigned long addr)
185{
186 send_unlock(map, base);
187 wide_write(map, make_cmd(map, cmd), addr);
188}
189
190static inline int flash_is_busy(struct map_info *map, unsigned long addr,
191 int interleave)
192{
193
194 if ((interleave == 2) && (map->buswidth == 4)) {
195 __u32 read1, read2;
196
197 read1 = wide_read(map, addr);
198 read2 = wide_read(map, addr);
199
200 return (((read1 >> 16) & D6_MASK) !=
201 ((read2 >> 16) & D6_MASK)) ||
202 (((read1 & 0xffff) & D6_MASK) !=
203 ((read2 & 0xffff) & D6_MASK));
204 }
205
206 return ((wide_read(map, addr) & D6_MASK) !=
207 (wide_read(map, addr) & D6_MASK));
208}
209
210static inline void unlock_sector(struct map_info *map, unsigned long sect_addr,
211 int unlock)
212{
213 /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */
214 int SLA = unlock ?
215 (sect_addr | (0x40 * map->buswidth)) :
216 (sect_addr & ~(0x40 * map->buswidth)) ;
217
218 __u32 cmd = make_cmd(map, CMD_UNLOCK_SECTOR);
219
220 wide_write(map, make_cmd(map, CMD_RESET_DATA), 0);
221 wide_write(map, cmd, SLA); /* 1st cycle: write cmd to any address */
222 wide_write(map, cmd, SLA); /* 2nd cycle: write cmd to any address */
223 wide_write(map, cmd, SLA); /* 3rd cycle: write cmd to SLA */
224}
225
226static inline int is_sector_locked(struct map_info *map,
227 unsigned long sect_addr)
228{
229 int status;
230
231 wide_write(map, CMD_RESET_DATA, 0);
232 send_cmd(map, sect_addr, CMD_MANUFACTURER_UNLOCK_DATA);
233
234 /* status is 0x0000 for unlocked and 0x0001 for locked */
235 status = wide_read(map, sect_addr + (map->buswidth * ADDR_SECTOR_LOCK));
236 wide_write(map, CMD_RESET_DATA, 0);
237 return status;
238}
239
240static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
241 int is_unlock)
242{
243 struct map_info *map;
244 struct mtd_erase_region_info *merip;
245 int eraseoffset, erasesize, eraseblocks;
246 int i;
247 int retval = 0;
248 int lock_status;
249
250 map = mtd->priv;
251
252 /* Pass the whole chip through sector by sector and check for each
253 sector if the sector and the given interval overlap */
254 for(i = 0; i < mtd->numeraseregions; i++) {
255 merip = &mtd->eraseregions[i];
256
257 eraseoffset = merip->offset;
258 erasesize = merip->erasesize;
259 eraseblocks = merip->numblocks;
260
261 if (ofs > eraseoffset + erasesize)
262 continue;
263
264 while (eraseblocks > 0) {
265 if (ofs < eraseoffset + erasesize && ofs + len > eraseoffset) {
266 unlock_sector(map, eraseoffset, is_unlock);
267
268 lock_status = is_sector_locked(map, eraseoffset);
269
270 if (is_unlock && lock_status) {
271 printk("Cannot unlock sector at address %x length %xx\n",
272 eraseoffset, merip->erasesize);
273 retval = -1;
274 } else if (!is_unlock && !lock_status) {
275 printk("Cannot lock sector at address %x length %x\n",
276 eraseoffset, merip->erasesize);
277 retval = -1;
278 }
279 }
280 eraseoffset += erasesize;
281 eraseblocks --;
282 }
283 }
284 return retval;
285}
286
287static int amd_flash_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
288{
289 return amd_flash_do_unlock(mtd, ofs, len, 1);
290}
291
292static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
293{
294 return amd_flash_do_unlock(mtd, ofs, len, 0);
295}
296
297
298/*
299 * Reads JEDEC manufacturer ID and device ID and returns the index of the first
300 * matching table entry (-1 if not found or alias for already found chip).
301 */
302static int probe_new_chip(struct mtd_info *mtd, __u32 base,
303 struct flchip *chips,
304 struct amd_flash_private *private,
305 const struct amd_flash_info *table, int table_size)
306{
307 __u32 mfr_id;
308 __u32 dev_id;
309 struct map_info *map = mtd->priv;
310 struct amd_flash_private temp;
311 int i;
312
313 temp.device_type = DEVICE_TYPE_X16; // Assume X16 (FIXME)
314 temp.interleave = 2;
315 map->fldrv_priv = &temp;
316
317 /* Enter autoselect mode. */
318 send_cmd(map, base, CMD_RESET_DATA);
319 send_cmd(map, base, CMD_MANUFACTURER_UNLOCK_DATA);
320
321 mfr_id = wide_read(map, base + (map->buswidth * ADDR_MANUFACTURER));
322 dev_id = wide_read(map, base + (map->buswidth * ADDR_DEVICE_ID));
323
324 if ((map->buswidth == 4) && ((mfr_id >> 16) == (mfr_id & 0xffff)) &&
325 ((dev_id >> 16) == (dev_id & 0xffff))) {
326 mfr_id &= 0xffff;
327 dev_id &= 0xffff;
328 } else {
329 temp.interleave = 1;
330 }
331
332 for (i = 0; i < table_size; i++) {
333 if ((mfr_id == table[i].mfr_id) &&
334 (dev_id == table[i].dev_id)) {
335 if (chips) {
336 int j;
337
338 /* Is this an alias for an already found chip?
339 * In that case that chip should be in
340 * autoselect mode now.
341 */
342 for (j = 0; j < private->numchips; j++) {
343 __u32 mfr_id_other;
344 __u32 dev_id_other;
345
346 mfr_id_other =
347 wide_read(map, chips[j].start +
348 (map->buswidth *
349 ADDR_MANUFACTURER
350 ));
351 dev_id_other =
352 wide_read(map, chips[j].start +
353 (map->buswidth *
354 ADDR_DEVICE_ID));
355 if (temp.interleave == 2) {
356 mfr_id_other &= 0xffff;
357 dev_id_other &= 0xffff;
358 }
359 if ((mfr_id_other == mfr_id) &&
360 (dev_id_other == dev_id)) {
361
362 /* Exit autoselect mode. */
363 send_cmd(map, base,
364 CMD_RESET_DATA);
365
366 return -1;
367 }
368 }
369
370 if (private->numchips == MAX_AMD_CHIPS) {
371 printk(KERN_WARNING
372 "%s: Too many flash chips "
373 "detected. Increase "
374 "MAX_AMD_CHIPS from %d.\n",
375 map->name, MAX_AMD_CHIPS);
376
377 return -1;
378 }
379
380 chips[private->numchips].start = base;
381 chips[private->numchips].state = FL_READY;
382 chips[private->numchips].mutex =
383 &chips[private->numchips]._spinlock;
384 private->numchips++;
385 }
386
387 printk("%s: Found %d x %ldMiB %s at 0x%x\n", map->name,
388 temp.interleave, (table[i].size)/(1024*1024),
389 table[i].name, base);
390
391 mtd->size += table[i].size * temp.interleave;
392 mtd->numeraseregions += table[i].numeraseregions;
393
394 break;
395 }
396 }
397
398 /* Exit autoselect mode. */
399 send_cmd(map, base, CMD_RESET_DATA);
400
401 if (i == table_size) {
402 printk(KERN_DEBUG "%s: unknown flash device at 0x%x, "
403 "mfr id 0x%x, dev id 0x%x\n", map->name,
404 base, mfr_id, dev_id);
405 map->fldrv_priv = NULL;
406
407 return -1;
408 }
409
410 private->device_type = temp.device_type;
411 private->interleave = temp.interleave;
412
413 return i;
414}
415
416
417
418static struct mtd_info *amd_flash_probe(struct map_info *map)
419{
420 static const struct amd_flash_info table[] = {
421 {
422 .mfr_id = MANUFACTURER_AMD,
423 .dev_id = AM29LV160DT,
424 .name = "AMD AM29LV160DT",
425 .size = 0x00200000,
426 .numeraseregions = 4,
427 .regions = {
428 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
429 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
430 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
431 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
432 }
433 }, {
434 .mfr_id = MANUFACTURER_AMD,
435 .dev_id = AM29LV160DB,
436 .name = "AMD AM29LV160DB",
437 .size = 0x00200000,
438 .numeraseregions = 4,
439 .regions = {
440 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
441 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
442 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
443 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
444 }
445 }, {
446 .mfr_id = MANUFACTURER_TOSHIBA,
447 .dev_id = TC58FVT160,
448 .name = "Toshiba TC58FVT160",
449 .size = 0x00200000,
450 .numeraseregions = 4,
451 .regions = {
452 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
453 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
454 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
455 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
456 }
457 }, {
458 .mfr_id = MANUFACTURER_FUJITSU,
459 .dev_id = MBM29LV160TE,
460 .name = "Fujitsu MBM29LV160TE",
461 .size = 0x00200000,
462 .numeraseregions = 4,
463 .regions = {
464 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
465 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
466 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
467 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
468 }
469 }, {
470 .mfr_id = MANUFACTURER_TOSHIBA,
471 .dev_id = TC58FVB160,
472 .name = "Toshiba TC58FVB160",
473 .size = 0x00200000,
474 .numeraseregions = 4,
475 .regions = {
476 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
477 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
478 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
479 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
480 }
481 }, {
482 .mfr_id = MANUFACTURER_FUJITSU,
483 .dev_id = MBM29LV160BE,
484 .name = "Fujitsu MBM29LV160BE",
485 .size = 0x00200000,
486 .numeraseregions = 4,
487 .regions = {
488 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
489 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
490 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
491 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
492 }
493 }, {
494 .mfr_id = MANUFACTURER_AMD,
495 .dev_id = AM29LV800BB,
496 .name = "AMD AM29LV800BB",
497 .size = 0x00100000,
498 .numeraseregions = 4,
499 .regions = {
500 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
501 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
502 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
503 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
504 }
505 }, {
506 .mfr_id = MANUFACTURER_AMD,
507 .dev_id = AM29F800BB,
508 .name = "AMD AM29F800BB",
509 .size = 0x00100000,
510 .numeraseregions = 4,
511 .regions = {
512 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
513 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
514 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
515 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
516 }
517 }, {
518 .mfr_id = MANUFACTURER_AMD,
519 .dev_id = AM29LV800BT,
520 .name = "AMD AM29LV800BT",
521 .size = 0x00100000,
522 .numeraseregions = 4,
523 .regions = {
524 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
525 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
526 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
527 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
528 }
529 }, {
530 .mfr_id = MANUFACTURER_AMD,
531 .dev_id = AM29F800BT,
532 .name = "AMD AM29F800BT",
533 .size = 0x00100000,
534 .numeraseregions = 4,
535 .regions = {
536 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
537 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
538 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
539 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
540 }
541 }, {
542 .mfr_id = MANUFACTURER_AMD,
543 .dev_id = AM29LV800BB,
544 .name = "AMD AM29LV800BB",
545 .size = 0x00100000,
546 .numeraseregions = 4,
547 .regions = {
548 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
549 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
550 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
551 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
552 }
553 }, {
554 .mfr_id = MANUFACTURER_FUJITSU,
555 .dev_id = MBM29LV800BB,
556 .name = "Fujitsu MBM29LV800BB",
557 .size = 0x00100000,
558 .numeraseregions = 4,
559 .regions = {
560 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
561 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
562 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
563 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
564 }
565 }, {
566 .mfr_id = MANUFACTURER_ST,
567 .dev_id = M29W800T,
568 .name = "ST M29W800T",
569 .size = 0x00100000,
570 .numeraseregions = 4,
571 .regions = {
572 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
573 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
574 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
575 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
576 }
577 }, {
578 .mfr_id = MANUFACTURER_ST,
579 .dev_id = M29W160DT,
580 .name = "ST M29W160DT",
581 .size = 0x00200000,
582 .numeraseregions = 4,
583 .regions = {
584 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
585 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
586 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
587 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
588 }
589 }, {
590 .mfr_id = MANUFACTURER_ST,
591 .dev_id = M29W160DB,
592 .name = "ST M29W160DB",
593 .size = 0x00200000,
594 .numeraseregions = 4,
595 .regions = {
596 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
597 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
598 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
599 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
600 }
601 }, {
602 .mfr_id = MANUFACTURER_AMD,
603 .dev_id = AM29BDS323D,
604 .name = "AMD AM29BDS323D",
605 .size = 0x00400000,
606 .numeraseregions = 3,
607 .regions = {
608 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 48 },
609 { .offset = 0x300000, .erasesize = 0x10000, .numblocks = 15 },
610 { .offset = 0x3f0000, .erasesize = 0x02000, .numblocks = 8 },
611 }
612 }, {
613 .mfr_id = MANUFACTURER_ATMEL,
614 .dev_id = AT49xV16x,
615 .name = "Atmel AT49xV16x",
616 .size = 0x00200000,
617 .numeraseregions = 2,
618 .regions = {
619 { .offset = 0x000000, .erasesize = 0x02000, .numblocks = 8 },
620 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
621 }
622 }, {
623 .mfr_id = MANUFACTURER_ATMEL,
624 .dev_id = AT49xV16xT,
625 .name = "Atmel AT49xV16xT",
626 .size = 0x00200000,
627 .numeraseregions = 2,
628 .regions = {
629 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
630 { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks = 8 }
631 }
632 }
633 };
634
635 struct mtd_info *mtd;
636 struct flchip chips[MAX_AMD_CHIPS];
637 int table_pos[MAX_AMD_CHIPS];
638 struct amd_flash_private temp;
639 struct amd_flash_private *private;
640 u_long size;
641 unsigned long base;
642 int i;
643 int reg_idx;
644 int offset;
645
646 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
647 if (!mtd) {
648 printk(KERN_WARNING
649 "%s: kmalloc failed for info structure\n", map->name);
650 return NULL;
651 }
652 mtd->priv = map;
653
654 memset(&temp, 0, sizeof(temp));
655
656 printk("%s: Probing for AMD compatible flash...\n", map->name);
657
658 if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table,
659 ARRAY_SIZE(table)))
660 == -1) {
661 printk(KERN_WARNING
662 "%s: Found no AMD compatible device at location zero\n",
663 map->name);
664 kfree(mtd);
665
666 return NULL;
667 }
668
669 chips[0].start = 0;
670 chips[0].state = FL_READY;
671 chips[0].mutex = &chips[0]._spinlock;
672 temp.numchips = 1;
673 for (size = mtd->size; size > 1; size >>= 1) {
674 temp.chipshift++;
675 }
676 switch (temp.interleave) {
677 case 2:
678 temp.chipshift += 1;
679 break;
680 case 4:
681 temp.chipshift += 2;
682 break;
683 }
684
685 /* Find out if there are any more chips in the map. */
686 for (base = (1 << temp.chipshift);
687 base < map->size;
688 base += (1 << temp.chipshift)) {
689 int numchips = temp.numchips;
690 table_pos[numchips] = probe_new_chip(mtd, base, chips,
691 &temp, table, ARRAY_SIZE(table));
692 }
693
694 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
695 mtd->numeraseregions, GFP_KERNEL);
696 if (!mtd->eraseregions) {
697 printk(KERN_WARNING "%s: Failed to allocate "
698 "memory for MTD erase region info\n", map->name);
699 kfree(mtd);
700 map->fldrv_priv = NULL;
701 return NULL;
702 }
703
704 reg_idx = 0;
705 offset = 0;
706 for (i = 0; i < temp.numchips; i++) {
707 int dev_size;
708 int j;
709
710 dev_size = 0;
711 for (j = 0; j < table[table_pos[i]].numeraseregions; j++) {
712 mtd->eraseregions[reg_idx].offset = offset +
713 (table[table_pos[i]].regions[j].offset *
714 temp.interleave);
715 mtd->eraseregions[reg_idx].erasesize =
716 table[table_pos[i]].regions[j].erasesize *
717 temp.interleave;
718 mtd->eraseregions[reg_idx].numblocks =
719 table[table_pos[i]].regions[j].numblocks;
720 if (mtd->erasesize <
721 mtd->eraseregions[reg_idx].erasesize) {
722 mtd->erasesize =
723 mtd->eraseregions[reg_idx].erasesize;
724 }
725 dev_size += mtd->eraseregions[reg_idx].erasesize *
726 mtd->eraseregions[reg_idx].numblocks;
727 reg_idx++;
728 }
729 offset += dev_size;
730 }
731 mtd->type = MTD_NORFLASH;
732 mtd->writesize = 1;
733 mtd->flags = MTD_CAP_NORFLASH;
734 mtd->name = map->name;
735 mtd->erase = amd_flash_erase;
736 mtd->read = amd_flash_read;
737 mtd->write = amd_flash_write;
738 mtd->sync = amd_flash_sync;
739 mtd->suspend = amd_flash_suspend;
740 mtd->resume = amd_flash_resume;
741 mtd->lock = amd_flash_lock;
742 mtd->unlock = amd_flash_unlock;
743
744 private = kmalloc(sizeof(*private) + (sizeof(struct flchip) *
745 temp.numchips), GFP_KERNEL);
746 if (!private) {
747 printk(KERN_WARNING
748 "%s: kmalloc failed for private structure\n", map->name);
749 kfree(mtd);
750 map->fldrv_priv = NULL;
751 return NULL;
752 }
753 memcpy(private, &temp, sizeof(temp));
754 memcpy(private->chips, chips,
755 sizeof(struct flchip) * private->numchips);
756 for (i = 0; i < private->numchips; i++) {
757 init_waitqueue_head(&private->chips[i].wq);
758 spin_lock_init(&private->chips[i]._spinlock);
759 }
760
761 map->fldrv_priv = private;
762
763 map->fldrv = &amd_flash_chipdrv;
764
765 __module_get(THIS_MODULE);
766 return mtd;
767}
768
769
770
771static inline int read_one_chip(struct map_info *map, struct flchip *chip,
772 loff_t adr, size_t len, u_char *buf)
773{
774 DECLARE_WAITQUEUE(wait, current);
775 unsigned long timeo = jiffies + HZ;
776
777retry:
778 spin_lock_bh(chip->mutex);
779
780 if (chip->state != FL_READY){
781 printk(KERN_INFO "%s: waiting for chip to read, state = %d\n",
782 map->name, chip->state);
783 set_current_state(TASK_UNINTERRUPTIBLE);
784 add_wait_queue(&chip->wq, &wait);
785
786 spin_unlock_bh(chip->mutex);
787
788 schedule();
789 remove_wait_queue(&chip->wq, &wait);
790
791 if(signal_pending(current)) {
792 return -EINTR;
793 }
794
795 timeo = jiffies + HZ;
796
797 goto retry;
798 }
799
800 adr += chip->start;
801
802 chip->state = FL_READY;
803
804 map_copy_from(map, buf, adr, len);
805
806 wake_up(&chip->wq);
807 spin_unlock_bh(chip->mutex);
808
809 return 0;
810}
811
812
813
814static int amd_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
815 size_t *retlen, u_char *buf)
816{
817 struct map_info *map = mtd->priv;
818 struct amd_flash_private *private = map->fldrv_priv;
819 unsigned long ofs;
820 int chipnum;
821 int ret = 0;
822
823 if ((from + len) > mtd->size) {
824 printk(KERN_WARNING "%s: read request past end of device "
825 "(0x%lx)\n", map->name, (unsigned long)from + len);
826
827 return -EINVAL;
828 }
829
830 /* Offset within the first chip that the first read should start. */
831 chipnum = (from >> private->chipshift);
832 ofs = from - (chipnum << private->chipshift);
833
834 *retlen = 0;
835
836 while (len) {
837 unsigned long this_len;
838
839 if (chipnum >= private->numchips) {
840 break;
841 }
842
843 if ((len + ofs - 1) >> private->chipshift) {
844 this_len = (1 << private->chipshift) - ofs;
845 } else {
846 this_len = len;
847 }
848
849 ret = read_one_chip(map, &private->chips[chipnum], ofs,
850 this_len, buf);
851 if (ret) {
852 break;
853 }
854
855 *retlen += this_len;
856 len -= this_len;
857 buf += this_len;
858
859 ofs = 0;
860 chipnum++;
861 }
862
863 return ret;
864}
865
866
867
868static int write_one_word(struct map_info *map, struct flchip *chip,
869 unsigned long adr, __u32 datum)
870{
871 unsigned long timeo = jiffies + HZ;
872 struct amd_flash_private *private = map->fldrv_priv;
873 DECLARE_WAITQUEUE(wait, current);
874 int ret = 0;
875 int times_left;
876
877retry:
878 spin_lock_bh(chip->mutex);
879
880 if (chip->state != FL_READY){
881 printk("%s: waiting for chip to write, state = %d\n",
882 map->name, chip->state);
883 set_current_state(TASK_UNINTERRUPTIBLE);
884 add_wait_queue(&chip->wq, &wait);
885
886 spin_unlock_bh(chip->mutex);
887
888 schedule();
889 remove_wait_queue(&chip->wq, &wait);
890 printk(KERN_INFO "%s: woke up to write\n", map->name);
891 if(signal_pending(current))
892 return -EINTR;
893
894 timeo = jiffies + HZ;
895
896 goto retry;
897 }
898
899 chip->state = FL_WRITING;
900
901 adr += chip->start;
902 ENABLE_VPP(map);
903 send_cmd(map, chip->start, CMD_PROGRAM_UNLOCK_DATA);
904 wide_write(map, datum, adr);
905
906 times_left = 500000;
907 while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
908 if (need_resched()) {
909 spin_unlock_bh(chip->mutex);
910 schedule();
911 spin_lock_bh(chip->mutex);
912 }
913 }
914
915 if (!times_left) {
916 printk(KERN_WARNING "%s: write to 0x%lx timed out!\n",
917 map->name, adr);
918 ret = -EIO;
919 } else {
920 __u32 verify;
921 if ((verify = wide_read(map, adr)) != datum) {
922 printk(KERN_WARNING "%s: write to 0x%lx failed. "
923 "datum = %x, verify = %x\n",
924 map->name, adr, datum, verify);
925 ret = -EIO;
926 }
927 }
928
929 DISABLE_VPP(map);
930 chip->state = FL_READY;
931 wake_up(&chip->wq);
932 spin_unlock_bh(chip->mutex);
933
934 return ret;
935}
936
937
938
939static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
940 size_t *retlen, const u_char *buf)
941{
942 struct map_info *map = mtd->priv;
943 struct amd_flash_private *private = map->fldrv_priv;
944 int ret = 0;
945 int chipnum;
946 unsigned long ofs;
947 unsigned long chipstart;
948
949 *retlen = 0;
950 if (!len) {
951 return 0;
952 }
953
954 chipnum = to >> private->chipshift;
955 ofs = to - (chipnum << private->chipshift);
956 chipstart = private->chips[chipnum].start;
957
958 /* If it's not bus-aligned, do the first byte write. */
959 if (ofs & (map->buswidth - 1)) {
960 unsigned long bus_ofs = ofs & ~(map->buswidth - 1);
961 int i = ofs - bus_ofs;
962 int n = 0;
963 u_char tmp_buf[4];
964 __u32 datum;
965
966 map_copy_from(map, tmp_buf,
967 bus_ofs + private->chips[chipnum].start,
968 map->buswidth);
969 while (len && i < map->buswidth)
970 tmp_buf[i++] = buf[n++], len--;
971
972 if (map->buswidth == 2) {
973 datum = *(__u16*)tmp_buf;
974 } else if (map->buswidth == 4) {
975 datum = *(__u32*)tmp_buf;
976 } else {
977 return -EINVAL; /* should never happen, but be safe */
978 }
979
980 ret = write_one_word(map, &private->chips[chipnum], bus_ofs,
981 datum);
982 if (ret) {
983 return ret;
984 }
985
986 ofs += n;
987 buf += n;
988 (*retlen) += n;
989
990 if (ofs >> private->chipshift) {
991 chipnum++;
992 ofs = 0;
993 if (chipnum == private->numchips) {
994 return 0;
995 }
996 }
997 }
998
999 /* We are now aligned, write as much as possible. */
1000 while(len >= map->buswidth) {
1001 __u32 datum;
1002
1003 if (map->buswidth == 1) {
1004 datum = *(__u8*)buf;
1005 } else if (map->buswidth == 2) {
1006 datum = *(__u16*)buf;
1007 } else if (map->buswidth == 4) {
1008 datum = *(__u32*)buf;
1009 } else {
1010 return -EINVAL;
1011 }
1012
1013 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1014
1015 if (ret) {
1016 return ret;
1017 }
1018
1019 ofs += map->buswidth;
1020 buf += map->buswidth;
1021 (*retlen) += map->buswidth;
1022 len -= map->buswidth;
1023
1024 if (ofs >> private->chipshift) {
1025 chipnum++;
1026 ofs = 0;
1027 if (chipnum == private->numchips) {
1028 return 0;
1029 }
1030 chipstart = private->chips[chipnum].start;
1031 }
1032 }
1033
1034 if (len & (map->buswidth - 1)) {
1035 int i = 0, n = 0;
1036 u_char tmp_buf[2];
1037 __u32 datum;
1038
1039 map_copy_from(map, tmp_buf,
1040 ofs + private->chips[chipnum].start,
1041 map->buswidth);
1042 while (len--) {
1043 tmp_buf[i++] = buf[n++];
1044 }
1045
1046 if (map->buswidth == 2) {
1047 datum = *(__u16*)tmp_buf;
1048 } else if (map->buswidth == 4) {
1049 datum = *(__u32*)tmp_buf;
1050 } else {
1051 return -EINVAL; /* should never happen, but be safe */
1052 }
1053
1054 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1055
1056 if (ret) {
1057 return ret;
1058 }
1059
1060 (*retlen) += n;
1061 }
1062
1063 return 0;
1064}
1065
1066
1067
1068static inline int erase_one_block(struct map_info *map, struct flchip *chip,
1069 unsigned long adr, u_long size)
1070{
1071 unsigned long timeo = jiffies + HZ;
1072 struct amd_flash_private *private = map->fldrv_priv;
1073 DECLARE_WAITQUEUE(wait, current);
1074
1075retry:
1076 spin_lock_bh(chip->mutex);
1077
1078 if (chip->state != FL_READY){
1079 set_current_state(TASK_UNINTERRUPTIBLE);
1080 add_wait_queue(&chip->wq, &wait);
1081
1082 spin_unlock_bh(chip->mutex);
1083
1084 schedule();
1085 remove_wait_queue(&chip->wq, &wait);
1086
1087 if (signal_pending(current)) {
1088 return -EINTR;
1089 }
1090
1091 timeo = jiffies + HZ;
1092
1093 goto retry;
1094 }
1095
1096 chip->state = FL_ERASING;
1097
1098 adr += chip->start;
1099 ENABLE_VPP(map);
1100 send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA);
1101 send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr);
1102
1103 timeo = jiffies + (HZ * 20);
1104
1105 spin_unlock_bh(chip->mutex);
1106 msleep(1000);
1107 spin_lock_bh(chip->mutex);
1108
1109 while (flash_is_busy(map, adr, private->interleave)) {
1110
1111 if (chip->state != FL_ERASING) {
1112 /* Someone's suspended the erase. Sleep */
1113 set_current_state(TASK_UNINTERRUPTIBLE);
1114 add_wait_queue(&chip->wq, &wait);
1115
1116 spin_unlock_bh(chip->mutex);
1117 printk(KERN_INFO "%s: erase suspended. Sleeping\n",
1118 map->name);
1119 schedule();
1120 remove_wait_queue(&chip->wq, &wait);
1121
1122 if (signal_pending(current)) {
1123 return -EINTR;
1124 }
1125
1126 timeo = jiffies + (HZ*2); /* FIXME */
1127 spin_lock_bh(chip->mutex);
1128 continue;
1129 }
1130
1131 /* OK Still waiting */
1132 if (time_after(jiffies, timeo)) {
1133 chip->state = FL_READY;
1134 spin_unlock_bh(chip->mutex);
1135 printk(KERN_WARNING "%s: waiting for erase to complete "
1136 "timed out.\n", map->name);
1137 DISABLE_VPP(map);
1138
1139 return -EIO;
1140 }
1141
1142 /* Latency issues. Drop the lock, wait a while and retry */
1143 spin_unlock_bh(chip->mutex);
1144
1145 if (need_resched())
1146 schedule();
1147 else
1148 udelay(1);
1149
1150 spin_lock_bh(chip->mutex);
1151 }
1152
1153 /* Verify every single word */
1154 {
1155 int address;
1156 int error = 0;
1157 __u8 verify;
1158
1159 for (address = adr; address < (adr + size); address++) {
1160 if ((verify = map_read8(map, address)) != 0xFF) {
1161 error = 1;
1162 break;
1163 }
1164 }
1165 if (error) {
1166 chip->state = FL_READY;
1167 spin_unlock_bh(chip->mutex);
1168 printk(KERN_WARNING
1169 "%s: verify error at 0x%x, size %ld.\n",
1170 map->name, address, size);
1171 DISABLE_VPP(map);
1172
1173 return -EIO;
1174 }
1175 }
1176
1177 DISABLE_VPP(map);
1178 chip->state = FL_READY;
1179 wake_up(&chip->wq);
1180 spin_unlock_bh(chip->mutex);
1181
1182 return 0;
1183}
1184
1185
1186
1187static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1188{
1189 struct map_info *map = mtd->priv;
1190 struct amd_flash_private *private = map->fldrv_priv;
1191 unsigned long adr, len;
1192 int chipnum;
1193 int ret = 0;
1194 int i;
1195 int first;
1196 struct mtd_erase_region_info *regions = mtd->eraseregions;
1197
1198 if (instr->addr > mtd->size) {
1199 return -EINVAL;
1200 }
1201
1202 if ((instr->len + instr->addr) > mtd->size) {
1203 return -EINVAL;
1204 }
1205
1206 /* Check that both start and end of the requested erase are
1207 * aligned with the erasesize at the appropriate addresses.
1208 */
1209
1210 i = 0;
1211
1212 /* Skip all erase regions which are ended before the start of
1213 the requested erase. Actually, to save on the calculations,
1214 we skip to the first erase region which starts after the
1215 start of the requested erase, and then go back one.
1216 */
1217
1218 while ((i < mtd->numeraseregions) &&
1219 (instr->addr >= regions[i].offset)) {
1220 i++;
1221 }
1222 i--;
1223
1224 /* OK, now i is pointing at the erase region in which this
1225 * erase request starts. Check the start of the requested
1226 * erase range is aligned with the erase size which is in
1227 * effect here.
1228 */
1229
1230 if (instr->addr & (regions[i].erasesize-1)) {
1231 return -EINVAL;
1232 }
1233
1234 /* Remember the erase region we start on. */
1235
1236 first = i;
1237
1238 /* Next, check that the end of the requested erase is aligned
1239 * with the erase region at that address.
1240 */
1241
1242 while ((i < mtd->numeraseregions) &&
1243 ((instr->addr + instr->len) >= regions[i].offset)) {
1244 i++;
1245 }
1246
1247 /* As before, drop back one to point at the region in which
1248 * the address actually falls.
1249 */
1250
1251 i--;
1252
1253 if ((instr->addr + instr->len) & (regions[i].erasesize-1)) {
1254 return -EINVAL;
1255 }
1256
1257 chipnum = instr->addr >> private->chipshift;
1258 adr = instr->addr - (chipnum << private->chipshift);
1259 len = instr->len;
1260
1261 i = first;
1262
1263 while (len) {
1264 ret = erase_one_block(map, &private->chips[chipnum], adr,
1265 regions[i].erasesize);
1266
1267 if (ret) {
1268 return ret;
1269 }
1270
1271 adr += regions[i].erasesize;
1272 len -= regions[i].erasesize;
1273
1274 if ((adr % (1 << private->chipshift)) ==
1275 ((regions[i].offset + (regions[i].erasesize *
1276 regions[i].numblocks))
1277 % (1 << private->chipshift))) {
1278 i++;
1279 }
1280
1281 if (adr >> private->chipshift) {
1282 adr = 0;
1283 chipnum++;
1284 if (chipnum >= private->numchips) {
1285 break;
1286 }
1287 }
1288 }
1289
1290 instr->state = MTD_ERASE_DONE;
1291 mtd_erase_callback(instr);
1292
1293 return 0;
1294}
1295
1296
1297
1298static void amd_flash_sync(struct mtd_info *mtd)
1299{
1300 struct map_info *map = mtd->priv;
1301 struct amd_flash_private *private = map->fldrv_priv;
1302 int i;
1303 struct flchip *chip;
1304 int ret = 0;
1305 DECLARE_WAITQUEUE(wait, current);
1306
1307 for (i = 0; !ret && (i < private->numchips); i++) {
1308 chip = &private->chips[i];
1309
1310 retry:
1311 spin_lock_bh(chip->mutex);
1312
1313 switch(chip->state) {
1314 case FL_READY:
1315 case FL_STATUS:
1316 case FL_CFI_QUERY:
1317 case FL_JEDEC_QUERY:
1318 chip->oldstate = chip->state;
1319 chip->state = FL_SYNCING;
1320 /* No need to wake_up() on this state change -
1321 * as the whole point is that nobody can do anything
1322 * with the chip now anyway.
1323 */
1324 case FL_SYNCING:
1325 spin_unlock_bh(chip->mutex);
1326 break;
1327
1328 default:
1329 /* Not an idle state */
1330 add_wait_queue(&chip->wq, &wait);
1331
1332 spin_unlock_bh(chip->mutex);
1333
1334 schedule();
1335
1336 remove_wait_queue(&chip->wq, &wait);
1337
1338 goto retry;
1339 }
1340 }
1341
1342 /* Unlock the chips again */
1343 for (i--; i >= 0; i--) {
1344 chip = &private->chips[i];
1345
1346 spin_lock_bh(chip->mutex);
1347
1348 if (chip->state == FL_SYNCING) {
1349 chip->state = chip->oldstate;
1350 wake_up(&chip->wq);
1351 }
1352 spin_unlock_bh(chip->mutex);
1353 }
1354}
1355
1356
1357
1358static int amd_flash_suspend(struct mtd_info *mtd)
1359{
1360printk("amd_flash_suspend(): not implemented!\n");
1361 return -EINVAL;
1362}
1363
1364
1365
1366static void amd_flash_resume(struct mtd_info *mtd)
1367{
1368printk("amd_flash_resume(): not implemented!\n");
1369}
1370
1371
1372
1373static void amd_flash_destroy(struct mtd_info *mtd)
1374{
1375 struct map_info *map = mtd->priv;
1376 struct amd_flash_private *private = map->fldrv_priv;
1377 kfree(private);
1378}
1379
1380int __init amd_flash_init(void)
1381{
1382 register_mtd_chip_driver(&amd_flash_chipdrv);
1383 return 0;
1384}
1385
1386void __exit amd_flash_exit(void)
1387{
1388 unregister_mtd_chip_driver(&amd_flash_chipdrv);
1389}
1390
1391module_init(amd_flash_init);
1392module_exit(amd_flash_exit);
1393
1394MODULE_LICENSE("GPL");
1395MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>");
1396MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips");
diff --git a/drivers/mtd/chips/jedec.c b/drivers/mtd/chips/jedec.c
deleted file mode 100644
index 14e57b2bf842..000000000000
--- a/drivers/mtd/chips/jedec.c
+++ /dev/null
@@ -1,935 +0,0 @@
1
2/* JEDEC Flash Interface.
3 * This is an older type of interface for self programming flash. It is
4 * commonly use in older AMD chips and is obsolete compared with CFI.
5 * It is called JEDEC because the JEDEC association distributes the ID codes
6 * for the chips.
7 *
8 * See the AMD flash databook for information on how to operate the interface.
9 *
10 * This code does not support anything wider than 8 bit flash chips, I am
11 * not going to guess how to send commands to them, plus I expect they will
12 * all speak CFI..
13 *
14 * $Id: jedec.c,v 1.22 2005/01/05 18:05:11 dwmw2 Exp $
15 */
16
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/slab.h>
21#include <linux/mtd/jedec.h>
22#include <linux/mtd/map.h>
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/compatmac.h>
25
26static struct mtd_info *jedec_probe(struct map_info *);
27static int jedec_probe8(struct map_info *map,unsigned long base,
28 struct jedec_private *priv);
29static int jedec_probe16(struct map_info *map,unsigned long base,
30 struct jedec_private *priv);
31static int jedec_probe32(struct map_info *map,unsigned long base,
32 struct jedec_private *priv);
33static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start,
34 unsigned long len);
35static int flash_erase(struct mtd_info *mtd, struct erase_info *instr);
36static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
37 size_t *retlen, const u_char *buf);
38
39static unsigned long my_bank_size;
40
41/* Listing of parts and sizes. We need this table to learn the sector
42 size of the chip and the total length */
43static const struct JEDECTable JEDEC_table[] = {
44 {
45 .jedec = 0x013D,
46 .name = "AMD Am29F017D",
47 .size = 2*1024*1024,
48 .sectorsize = 64*1024,
49 .capabilities = MTD_CAP_NORFLASH
50 },
51 {
52 .jedec = 0x01AD,
53 .name = "AMD Am29F016",
54 .size = 2*1024*1024,
55 .sectorsize = 64*1024,
56 .capabilities = MTD_CAP_NORFLASH
57 },
58 {
59 .jedec = 0x01D5,
60 .name = "AMD Am29F080",
61 .size = 1*1024*1024,
62 .sectorsize = 64*1024,
63 .capabilities = MTD_CAP_NORFLASH
64 },
65 {
66 .jedec = 0x01A4,
67 .name = "AMD Am29F040",
68 .size = 512*1024,
69 .sectorsize = 64*1024,
70 .capabilities = MTD_CAP_NORFLASH
71 },
72 {
73 .jedec = 0x20E3,
74 .name = "AMD Am29W040B",
75 .size = 512*1024,
76 .sectorsize = 64*1024,
77 .capabilities = MTD_CAP_NORFLASH
78 },
79 {
80 .jedec = 0xC2AD,
81 .name = "Macronix MX29F016",
82 .size = 2*1024*1024,
83 .sectorsize = 64*1024,
84 .capabilities = MTD_CAP_NORFLASH
85 },
86 { .jedec = 0x0 }
87};
88
89static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id);
90static void jedec_sync(struct mtd_info *mtd) {};
91static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
92 size_t *retlen, u_char *buf);
93static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
94 size_t *retlen, u_char *buf);
95
96static struct mtd_info *jedec_probe(struct map_info *map);
97
98
99
100static struct mtd_chip_driver jedec_chipdrv = {
101 .probe = jedec_probe,
102 .name = "jedec",
103 .module = THIS_MODULE
104};
105
106/* Probe entry point */
107
108static struct mtd_info *jedec_probe(struct map_info *map)
109{
110 struct mtd_info *MTD;
111 struct jedec_private *priv;
112 unsigned long Base;
113 unsigned long SectorSize;
114 unsigned count;
115 unsigned I,Uniq;
116 char Part[200];
117 memset(&priv,0,sizeof(priv));
118
119 MTD = kzalloc(sizeof(struct mtd_info) + sizeof(struct jedec_private), GFP_KERNEL);
120 if (!MTD)
121 return NULL;
122
123 priv = (struct jedec_private *)&MTD[1];
124
125 my_bank_size = map->size;
126
127 if (map->size/my_bank_size > MAX_JEDEC_CHIPS)
128 {
129 printk("mtd: Increase MAX_JEDEC_CHIPS, too many banks.\n");
130 kfree(MTD);
131 return NULL;
132 }
133
134 for (Base = 0; Base < map->size; Base += my_bank_size)
135 {
136 // Perhaps zero could designate all tests?
137 if (map->buswidth == 0)
138 map->buswidth = 1;
139
140 if (map->buswidth == 1){
141 if (jedec_probe8(map,Base,priv) == 0) {
142 printk("did recognize jedec chip\n");
143 kfree(MTD);
144 return NULL;
145 }
146 }
147 if (map->buswidth == 2)
148 jedec_probe16(map,Base,priv);
149 if (map->buswidth == 4)
150 jedec_probe32(map,Base,priv);
151 }
152
153 // Get the biggest sector size
154 SectorSize = 0;
155 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
156 {
157 // printk("priv->chips[%d].jedec is %x\n",I,priv->chips[I].jedec);
158 // printk("priv->chips[%d].sectorsize is %lx\n",I,priv->chips[I].sectorsize);
159 if (priv->chips[I].sectorsize > SectorSize)
160 SectorSize = priv->chips[I].sectorsize;
161 }
162
163 // Quickly ensure that the other sector sizes are factors of the largest
164 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
165 {
166 if ((SectorSize/priv->chips[I].sectorsize)*priv->chips[I].sectorsize != SectorSize)
167 {
168 printk("mtd: Failed. Device has incompatible mixed sector sizes\n");
169 kfree(MTD);
170 return NULL;
171 }
172 }
173
174 /* Generate a part name that includes the number of different chips and
175 other configuration information */
176 count = 1;
177 strlcpy(Part,map->name,sizeof(Part)-10);
178 strcat(Part," ");
179 Uniq = 0;
180 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
181 {
182 const struct JEDECTable *JEDEC;
183
184 if (priv->chips[I+1].jedec == priv->chips[I].jedec)
185 {
186 count++;
187 continue;
188 }
189
190 // Locate the chip in the jedec table
191 JEDEC = jedec_idtoinf(priv->chips[I].jedec >> 8,priv->chips[I].jedec);
192 if (JEDEC == 0)
193 {
194 printk("mtd: Internal Error, JEDEC not set\n");
195 kfree(MTD);
196 return NULL;
197 }
198
199 if (Uniq != 0)
200 strcat(Part,",");
201 Uniq++;
202
203 if (count != 1)
204 sprintf(Part+strlen(Part),"%x*[%s]",count,JEDEC->name);
205 else
206 sprintf(Part+strlen(Part),"%s",JEDEC->name);
207 if (strlen(Part) > sizeof(Part)*2/3)
208 break;
209 count = 1;
210 }
211
212 /* Determine if the chips are organized in a linear fashion, or if there
213 are empty banks. Note, the last bank does not count here, only the
214 first banks are important. Holes on non-bank boundaries can not exist
215 due to the way the detection algorithm works. */
216 if (priv->size < my_bank_size)
217 my_bank_size = priv->size;
218 priv->is_banked = 0;
219 //printk("priv->size is %x, my_bank_size is %x\n",priv->size,my_bank_size);
220 //printk("priv->bank_fill[0] is %x\n",priv->bank_fill[0]);
221 if (!priv->size) {
222 printk("priv->size is zero\n");
223 kfree(MTD);
224 return NULL;
225 }
226 if (priv->size/my_bank_size) {
227 if (priv->size/my_bank_size == 1) {
228 priv->size = my_bank_size;
229 }
230 else {
231 for (I = 0; I != priv->size/my_bank_size - 1; I++)
232 {
233 if (priv->bank_fill[I] != my_bank_size)
234 priv->is_banked = 1;
235
236 /* This even could be eliminated, but new de-optimized read/write
237 functions have to be written */
238 printk("priv->bank_fill[%d] is %lx, priv->bank_fill[0] is %lx\n",I,priv->bank_fill[I],priv->bank_fill[0]);
239 if (priv->bank_fill[I] != priv->bank_fill[0])
240 {
241 printk("mtd: Failed. Cannot handle unsymmetric banking\n");
242 kfree(MTD);
243 return NULL;
244 }
245 }
246 }
247 }
248 if (priv->is_banked == 1)
249 strcat(Part,", banked");
250
251 // printk("Part: '%s'\n",Part);
252
253 memset(MTD,0,sizeof(*MTD));
254 // strlcpy(MTD->name,Part,sizeof(MTD->name));
255 MTD->name = map->name;
256 MTD->type = MTD_NORFLASH;
257 MTD->flags = MTD_CAP_NORFLASH;
258 MTD->writesize = 1;
259 MTD->erasesize = SectorSize*(map->buswidth);
260 // printk("MTD->erasesize is %x\n",(unsigned int)MTD->erasesize);
261 MTD->size = priv->size;
262 // printk("MTD->size is %x\n",(unsigned int)MTD->size);
263 //MTD->module = THIS_MODULE; // ? Maybe this should be the low level module?
264 MTD->erase = flash_erase;
265 if (priv->is_banked == 1)
266 MTD->read = jedec_read_banked;
267 else
268 MTD->read = jedec_read;
269 MTD->write = flash_write;
270 MTD->sync = jedec_sync;
271 MTD->priv = map;
272 map->fldrv_priv = priv;
273 map->fldrv = &jedec_chipdrv;
274 __module_get(THIS_MODULE);
275 return MTD;
276}
277
278/* Helper for the JEDEC function, JEDEC numbers all have odd parity */
279static int checkparity(u_char C)
280{
281 u_char parity = 0;
282 while (C != 0)
283 {
284 parity ^= C & 1;
285 C >>= 1;
286 }
287
288 return parity == 1;
289}
290
291
292/* Take an array of JEDEC numbers that represent interleved flash chips
293 and process them. Check to make sure they are good JEDEC numbers, look
294 them up and then add them to the chip list */
295static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
296 unsigned long base,struct jedec_private *priv)
297{
298 unsigned I,J;
299 unsigned long Size;
300 unsigned long SectorSize;
301 const struct JEDECTable *JEDEC;
302
303 // Test #2 JEDEC numbers exhibit odd parity
304 for (I = 0; I != Count; I++)
305 {
306 if (checkparity(Mfg[I]) == 0 || checkparity(Id[I]) == 0)
307 return 0;
308 }
309
310 // Finally, just make sure all the chip sizes are the same
311 JEDEC = jedec_idtoinf(Mfg[0],Id[0]);
312
313 if (JEDEC == 0)
314 {
315 printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]);
316 return 0;
317 }
318
319 Size = JEDEC->size;
320 SectorSize = JEDEC->sectorsize;
321 for (I = 0; I != Count; I++)
322 {
323 JEDEC = jedec_idtoinf(Mfg[0],Id[0]);
324 if (JEDEC == 0)
325 {
326 printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]);
327 return 0;
328 }
329
330 if (Size != JEDEC->size || SectorSize != JEDEC->sectorsize)
331 {
332 printk("mtd: Failed. Interleved flash does not have matching characteristics\n");
333 return 0;
334 }
335 }
336
337 // Load the Chips
338 for (I = 0; I != MAX_JEDEC_CHIPS; I++)
339 {
340 if (priv->chips[I].jedec == 0)
341 break;
342 }
343
344 if (I + Count > MAX_JEDEC_CHIPS)
345 {
346 printk("mtd: Device has too many chips. Increase MAX_JEDEC_CHIPS\n");
347 return 0;
348 }
349
350 // Add them to the table
351 for (J = 0; J != Count; J++)
352 {
353 unsigned long Bank;
354
355 JEDEC = jedec_idtoinf(Mfg[J],Id[J]);
356 priv->chips[I].jedec = (Mfg[J] << 8) | Id[J];
357 priv->chips[I].size = JEDEC->size;
358 priv->chips[I].sectorsize = JEDEC->sectorsize;
359 priv->chips[I].base = base + J;
360 priv->chips[I].datashift = J*8;
361 priv->chips[I].capabilities = JEDEC->capabilities;
362 priv->chips[I].offset = priv->size + J;
363
364 // log2 n :|
365 priv->chips[I].addrshift = 0;
366 for (Bank = Count; Bank != 1; Bank >>= 1, priv->chips[I].addrshift++);
367
368 // Determine how filled this bank is.
369 Bank = base & (~(my_bank_size-1));
370 if (priv->bank_fill[Bank/my_bank_size] < base +
371 (JEDEC->size << priv->chips[I].addrshift) - Bank)
372 priv->bank_fill[Bank/my_bank_size] = base + (JEDEC->size << priv->chips[I].addrshift) - Bank;
373 I++;
374 }
375
376 priv->size += priv->chips[I-1].size*Count;
377
378 return priv->chips[I-1].size;
379}
380
381/* Lookup the chip information from the JEDEC ID table. */
382static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id)
383{
384 __u16 Id = (mfr << 8) | id;
385 unsigned long I = 0;
386 for (I = 0; JEDEC_table[I].jedec != 0; I++)
387 if (JEDEC_table[I].jedec == Id)
388 return JEDEC_table + I;
389 return NULL;
390}
391
392// Look for flash using an 8 bit bus interface
393static int jedec_probe8(struct map_info *map,unsigned long base,
394 struct jedec_private *priv)
395{
396 #define flread(x) map_read8(map,base+x)
397 #define flwrite(v,x) map_write8(map,v,base+x)
398
399 const unsigned long AutoSel1 = 0xAA;
400 const unsigned long AutoSel2 = 0x55;
401 const unsigned long AutoSel3 = 0x90;
402 const unsigned long Reset = 0xF0;
403 __u32 OldVal;
404 __u8 Mfg[1];
405 __u8 Id[1];
406 unsigned I;
407 unsigned long Size;
408
409 // Wait for any write/erase operation to settle
410 OldVal = flread(base);
411 for (I = 0; OldVal != flread(base) && I < 10000; I++)
412 OldVal = flread(base);
413
414 // Reset the chip
415 flwrite(Reset,0x555);
416
417 // Send the sequence
418 flwrite(AutoSel1,0x555);
419 flwrite(AutoSel2,0x2AA);
420 flwrite(AutoSel3,0x555);
421
422 // Get the JEDEC numbers
423 Mfg[0] = flread(0);
424 Id[0] = flread(1);
425 // printk("Mfg is %x, Id is %x\n",Mfg[0],Id[0]);
426
427 Size = handle_jedecs(map,Mfg,Id,1,base,priv);
428 // printk("handle_jedecs Size is %x\n",(unsigned int)Size);
429 if (Size == 0)
430 {
431 flwrite(Reset,0x555);
432 return 0;
433 }
434
435
436 // Reset.
437 flwrite(Reset,0x555);
438
439 return 1;
440
441 #undef flread
442 #undef flwrite
443}
444
445// Look for flash using a 16 bit bus interface (ie 2 8-bit chips)
446static int jedec_probe16(struct map_info *map,unsigned long base,
447 struct jedec_private *priv)
448{
449 return 0;
450}
451
452// Look for flash using a 32 bit bus interface (ie 4 8-bit chips)
453static int jedec_probe32(struct map_info *map,unsigned long base,
454 struct jedec_private *priv)
455{
456 #define flread(x) map_read32(map,base+((x)<<2))
457 #define flwrite(v,x) map_write32(map,v,base+((x)<<2))
458
459 const unsigned long AutoSel1 = 0xAAAAAAAA;
460 const unsigned long AutoSel2 = 0x55555555;
461 const unsigned long AutoSel3 = 0x90909090;
462 const unsigned long Reset = 0xF0F0F0F0;
463 __u32 OldVal;
464 __u8 Mfg[4];
465 __u8 Id[4];
466 unsigned I;
467 unsigned long Size;
468
469 // Wait for any write/erase operation to settle
470 OldVal = flread(base);
471 for (I = 0; OldVal != flread(base) && I < 10000; I++)
472 OldVal = flread(base);
473
474 // Reset the chip
475 flwrite(Reset,0x555);
476
477 // Send the sequence
478 flwrite(AutoSel1,0x555);
479 flwrite(AutoSel2,0x2AA);
480 flwrite(AutoSel3,0x555);
481
482 // Test #1, JEDEC numbers are readable from 0x??00/0x??01
483 if (flread(0) != flread(0x100) ||
484 flread(1) != flread(0x101))
485 {
486 flwrite(Reset,0x555);
487 return 0;
488 }
489
490 // Split up the JEDEC numbers
491 OldVal = flread(0);
492 for (I = 0; I != 4; I++)
493 Mfg[I] = (OldVal >> (I*8));
494 OldVal = flread(1);
495 for (I = 0; I != 4; I++)
496 Id[I] = (OldVal >> (I*8));
497
498 Size = handle_jedecs(map,Mfg,Id,4,base,priv);
499 if (Size == 0)
500 {
501 flwrite(Reset,0x555);
502 return 0;
503 }
504
505 /* Check if there is address wrap around within a single bank, if this
506 returns JEDEC numbers then we assume that it is wrap around. Notice
507 we call this routine with the JEDEC return still enabled, if two or
508 more flashes have a truncated address space the probe test will still
509 work */
510 if (base + (Size<<2)+0x555 < map->size &&
511 base + (Size<<2)+0x555 < (base & (~(my_bank_size-1))) + my_bank_size)
512 {
513 if (flread(base+Size) != flread(base+Size + 0x100) ||
514 flread(base+Size + 1) != flread(base+Size + 0x101))
515 {
516 jedec_probe32(map,base+Size,priv);
517 }
518 }
519
520 // Reset.
521 flwrite(0xF0F0F0F0,0x555);
522
523 return 1;
524
525 #undef flread
526 #undef flwrite
527}
528
529/* Linear read. */
530static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
531 size_t *retlen, u_char *buf)
532{
533 struct map_info *map = mtd->priv;
534
535 map_copy_from(map, buf, from, len);
536 *retlen = len;
537 return 0;
538}
539
540/* Banked read. Take special care to jump past the holes in the bank
541 mapping. This version assumes symetry in the holes.. */
542static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
543 size_t *retlen, u_char *buf)
544{
545 struct map_info *map = mtd->priv;
546 struct jedec_private *priv = map->fldrv_priv;
547
548 *retlen = 0;
549 while (len > 0)
550 {
551 // Determine what bank and offset into that bank the first byte is
552 unsigned long bank = from & (~(priv->bank_fill[0]-1));
553 unsigned long offset = from & (priv->bank_fill[0]-1);
554 unsigned long get = len;
555 if (priv->bank_fill[0] - offset < len)
556 get = priv->bank_fill[0] - offset;
557
558 bank /= priv->bank_fill[0];
559 map_copy_from(map,buf + *retlen,bank*my_bank_size + offset,get);
560
561 len -= get;
562 *retlen += get;
563 from += get;
564 }
565 return 0;
566}
567
568/* Pass the flags value that the flash return before it re-entered read
569 mode. */
570static void jedec_flash_failed(unsigned char code)
571{
572 /* Bit 5 being high indicates that there was an internal device
573 failure, erasure time limits exceeded or something */
574 if ((code & (1 << 5)) != 0)
575 {
576 printk("mtd: Internal Flash failure\n");
577 return;
578 }
579 printk("mtd: Programming didn't take\n");
580}
581
582/* This uses the erasure function described in the AMD Flash Handbook,
583 it will work for flashes with a fixed sector size only. Flashes with
584 a selection of sector sizes (ie the AMD Am29F800B) will need a different
585 routine. This routine tries to parallize erasing multiple chips/sectors
586 where possible */
587static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
588{
589 // Does IO to the currently selected chip
590 #define flread(x) map_read8(map,chip->base+((x)<<chip->addrshift))
591 #define flwrite(v,x) map_write8(map,v,chip->base+((x)<<chip->addrshift))
592
593 unsigned long Time = 0;
594 unsigned long NoTime = 0;
595 unsigned long start = instr->addr, len = instr->len;
596 unsigned int I;
597 struct map_info *map = mtd->priv;
598 struct jedec_private *priv = map->fldrv_priv;
599
600 // Verify the arguments..
601 if (start + len > mtd->size ||
602 (start % mtd->erasesize) != 0 ||
603 (len % mtd->erasesize) != 0 ||
604 (len/mtd->erasesize) == 0)
605 return -EINVAL;
606
607 jedec_flash_chip_scan(priv,start,len);
608
609 // Start the erase sequence on each chip
610 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
611 {
612 unsigned long off;
613 struct jedec_flash_chip *chip = priv->chips + I;
614
615 if (chip->length == 0)
616 continue;
617
618 if (chip->start + chip->length > chip->size)
619 {
620 printk("DIE\n");
621 return -EIO;
622 }
623
624 flwrite(0xF0,chip->start + 0x555);
625 flwrite(0xAA,chip->start + 0x555);
626 flwrite(0x55,chip->start + 0x2AA);
627 flwrite(0x80,chip->start + 0x555);
628 flwrite(0xAA,chip->start + 0x555);
629 flwrite(0x55,chip->start + 0x2AA);
630
631 /* Once we start selecting the erase sectors the delay between each
632 command must not exceed 50us or it will immediately start erasing
633 and ignore the other sectors */
634 for (off = 0; off < len; off += chip->sectorsize)
635 {
636 // Check to make sure we didn't timeout
637 flwrite(0x30,chip->start + off);
638 if (off == 0)
639 continue;
640 if ((flread(chip->start + off) & (1 << 3)) != 0)
641 {
642 printk("mtd: Ack! We timed out the erase timer!\n");
643 return -EIO;
644 }
645 }
646 }
647
648 /* We could split this into a timer routine and return early, performing
649 background erasure.. Maybe later if the need warrents */
650
651 /* Poll the flash for erasure completion, specs say this can take as long
652 as 480 seconds to do all the sectors (for a 2 meg flash).
653 Erasure time is dependent on chip age, temp and wear.. */
654
655 /* This being a generic routine assumes a 32 bit bus. It does read32s
656 and bundles interleved chips into the same grouping. This will work
657 for all bus widths */
658 Time = 0;
659 NoTime = 0;
660 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
661 {
662 struct jedec_flash_chip *chip = priv->chips + I;
663 unsigned long off = 0;
664 unsigned todo[4] = {0,0,0,0};
665 unsigned todo_left = 0;
666 unsigned J;
667
668 if (chip->length == 0)
669 continue;
670
671 /* Find all chips in this data line, realistically this is all
672 or nothing up to the interleve count */
673 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
674 {
675 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
676 (chip->base & (~((1<<chip->addrshift)-1))))
677 {
678 todo_left++;
679 todo[priv->chips[J].base & ((1<<chip->addrshift)-1)] = 1;
680 }
681 }
682
683 /* printk("todo: %x %x %x %x\n",(short)todo[0],(short)todo[1],
684 (short)todo[2],(short)todo[3]);
685 */
686 while (1)
687 {
688 __u32 Last[4];
689 unsigned long Count = 0;
690
691 /* During erase bit 7 is held low and bit 6 toggles, we watch this,
692 should it stop toggling or go high then the erase is completed,
693 or this is not really flash ;> */
694 switch (map->buswidth) {
695 case 1:
696 Last[0] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
697 Last[1] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
698 Last[2] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
699 break;
700 case 2:
701 Last[0] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
702 Last[1] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
703 Last[2] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
704 break;
705 case 3:
706 Last[0] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
707 Last[1] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
708 Last[2] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
709 break;
710 }
711 Count = 3;
712 while (todo_left != 0)
713 {
714 for (J = 0; J != 4; J++)
715 {
716 __u8 Byte1 = (Last[(Count-1)%4] >> (J*8)) & 0xFF;
717 __u8 Byte2 = (Last[(Count-2)%4] >> (J*8)) & 0xFF;
718 __u8 Byte3 = (Last[(Count-3)%4] >> (J*8)) & 0xFF;
719 if (todo[J] == 0)
720 continue;
721
722 if ((Byte1 & (1 << 7)) == 0 && Byte1 != Byte2)
723 {
724// printk("Check %x %x %x\n",(short)J,(short)Byte1,(short)Byte2);
725 continue;
726 }
727
728 if (Byte1 == Byte2)
729 {
730 jedec_flash_failed(Byte3);
731 return -EIO;
732 }
733
734 todo[J] = 0;
735 todo_left--;
736 }
737
738/* if (NoTime == 0)
739 Time += HZ/10 - schedule_timeout(HZ/10);*/
740 NoTime = 0;
741
742 switch (map->buswidth) {
743 case 1:
744 Last[Count % 4] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
745 break;
746 case 2:
747 Last[Count % 4] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
748 break;
749 case 4:
750 Last[Count % 4] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
751 break;
752 }
753 Count++;
754
755/* // Count time, max of 15s per sector (according to AMD)
756 if (Time > 15*len/mtd->erasesize*HZ)
757 {
758 printk("mtd: Flash Erase Timed out\n");
759 return -EIO;
760 } */
761 }
762
763 // Skip to the next chip if we used chip erase
764 if (chip->length == chip->size)
765 off = chip->size;
766 else
767 off += chip->sectorsize;
768
769 if (off >= chip->length)
770 break;
771 NoTime = 1;
772 }
773
774 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
775 {
776 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
777 (chip->base & (~((1<<chip->addrshift)-1))))
778 priv->chips[J].length = 0;
779 }
780 }
781
782 //printk("done\n");
783 instr->state = MTD_ERASE_DONE;
784 mtd_erase_callback(instr);
785 return 0;
786
787 #undef flread
788 #undef flwrite
789}
790
791/* This is the simple flash writing function. It writes to every byte, in
792 sequence. It takes care of how to properly address the flash if
793 the flash is interleved. It can only be used if all the chips in the
794 array are identical!*/
795static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
796 size_t *retlen, const u_char *buf)
797{
798 /* Does IO to the currently selected chip. It takes the bank addressing
799 base (which is divisible by the chip size) adds the necessary lower bits
800 of addrshift (interleave index) and then adds the control register index. */
801 #define flread(x) map_read8(map,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
802 #define flwrite(v,x) map_write8(map,v,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
803
804 struct map_info *map = mtd->priv;
805 struct jedec_private *priv = map->fldrv_priv;
806 unsigned long base;
807 unsigned long off;
808 size_t save_len = len;
809
810 if (start + len > mtd->size)
811 return -EIO;
812
813 //printk("Here");
814
815 //printk("flash_write: start is %x, len is %x\n",start,(unsigned long)len);
816 while (len != 0)
817 {
818 struct jedec_flash_chip *chip = priv->chips;
819 unsigned long bank;
820 unsigned long boffset;
821
822 // Compute the base of the flash.
823 off = ((unsigned long)start) % (chip->size << chip->addrshift);
824 base = start - off;
825
826 // Perform banked addressing translation.
827 bank = base & (~(priv->bank_fill[0]-1));
828 boffset = base & (priv->bank_fill[0]-1);
829 bank = (bank/priv->bank_fill[0])*my_bank_size;
830 base = bank + boffset;
831
832 // printk("Flasing %X %X %X\n",base,chip->size,len);
833 // printk("off is %x, compare with %x\n",off,chip->size << chip->addrshift);
834
835 // Loop over this page
836 for (; off != (chip->size << chip->addrshift) && len != 0; start++, len--, off++,buf++)
837 {
838 unsigned char oldbyte = map_read8(map,base+off);
839 unsigned char Last[4];
840 unsigned long Count = 0;
841
842 if (oldbyte == *buf) {
843 // printk("oldbyte and *buf is %x,len is %x\n",oldbyte,len);
844 continue;
845 }
846 if (((~oldbyte) & *buf) != 0)
847 printk("mtd: warn: Trying to set a 0 to a 1\n");
848
849 // Write
850 flwrite(0xAA,0x555);
851 flwrite(0x55,0x2AA);
852 flwrite(0xA0,0x555);
853 map_write8(map,*buf,base + off);
854 Last[0] = map_read8(map,base + off);
855 Last[1] = map_read8(map,base + off);
856 Last[2] = map_read8(map,base + off);
857
858 /* Wait for the flash to finish the operation. We store the last 4
859 status bytes that have been retrieved so we can determine why
860 it failed. The toggle bits keep toggling when there is a
861 failure */
862 for (Count = 3; Last[(Count - 1) % 4] != Last[(Count - 2) % 4] &&
863 Count < 10000; Count++)
864 Last[Count % 4] = map_read8(map,base + off);
865 if (Last[(Count - 1) % 4] != *buf)
866 {
867 jedec_flash_failed(Last[(Count - 3) % 4]);
868 return -EIO;
869 }
870 }
871 }
872 *retlen = save_len;
873 return 0;
874}
875
876/* This is used to enhance the speed of the erase routine,
877 when things are being done to multiple chips it is possible to
878 parallize the operations, particularly full memory erases of multi
879 chip memories benifit */
880static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start,
881 unsigned long len)
882{
883 unsigned int I;
884
885 // Zero the records
886 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
887 priv->chips[I].start = priv->chips[I].length = 0;
888
889 // Intersect the region with each chip
890 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
891 {
892 struct jedec_flash_chip *chip = priv->chips + I;
893 unsigned long ByteStart;
894 unsigned long ChipEndByte = chip->offset + (chip->size << chip->addrshift);
895
896 // End is before this chip or the start is after it
897 if (start+len < chip->offset ||
898 ChipEndByte - (1 << chip->addrshift) < start)
899 continue;
900
901 if (start < chip->offset)
902 {
903 ByteStart = chip->offset;
904 chip->start = 0;
905 }
906 else
907 {
908 chip->start = (start - chip->offset + (1 << chip->addrshift)-1) >> chip->addrshift;
909 ByteStart = start;
910 }
911
912 if (start + len >= ChipEndByte)
913 chip->length = (ChipEndByte - ByteStart) >> chip->addrshift;
914 else
915 chip->length = (start + len - ByteStart + (1 << chip->addrshift)-1) >> chip->addrshift;
916 }
917}
918
919int __init jedec_init(void)
920{
921 register_mtd_chip_driver(&jedec_chipdrv);
922 return 0;
923}
924
925static void __exit jedec_exit(void)
926{
927 unregister_mtd_chip_driver(&jedec_chipdrv);
928}
929
930module_init(jedec_init);
931module_exit(jedec_exit);
932
933MODULE_LICENSE("GPL");
934MODULE_AUTHOR("Jason Gunthorpe <jgg@deltatee.com> et al.");
935MODULE_DESCRIPTION("Old MTD chip driver for JEDEC-compliant flash chips");
diff --git a/drivers/mtd/chips/sharp.c b/drivers/mtd/chips/sharp.c
deleted file mode 100644
index c9cd3d21ccfa..000000000000
--- a/drivers/mtd/chips/sharp.c
+++ /dev/null
@@ -1,601 +0,0 @@
1/*
2 * MTD chip driver for pre-CFI Sharp flash chips
3 *
4 * Copyright 2000,2001 David A. Schleef <ds@schleef.org>
5 * 2000,2001 Lineo, Inc.
6 *
7 * $Id: sharp.c,v 1.17 2005/11/29 14:28:28 gleixner Exp $
8 *
9 * Devices supported:
10 * LH28F016SCT Symmetrical block flash memory, 2Mx8
11 * LH28F008SCT Symmetrical block flash memory, 1Mx8
12 *
13 * Documentation:
14 * http://www.sharpmeg.com/datasheets/memic/flashcmp/
15 * http://www.sharpmeg.com/datasheets/memic/flashcmp/01symf/16m/016sctl9.pdf
16 * 016sctl9.pdf
17 *
18 * Limitations:
19 * This driver only supports 4x1 arrangement of chips.
20 * Not tested on anything but PowerPC.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/sched.h>
27#include <linux/errno.h>
28#include <linux/interrupt.h>
29#include <linux/mtd/map.h>
30#include <linux/mtd/mtd.h>
31#include <linux/mtd/cfi.h>
32#include <linux/delay.h>
33#include <linux/init.h>
34#include <linux/slab.h>
35
36#define CMD_RESET 0xffffffff
37#define CMD_READ_ID 0x90909090
38#define CMD_READ_STATUS 0x70707070
39#define CMD_CLEAR_STATUS 0x50505050
40#define CMD_BLOCK_ERASE_1 0x20202020
41#define CMD_BLOCK_ERASE_2 0xd0d0d0d0
42#define CMD_BYTE_WRITE 0x40404040
43#define CMD_SUSPEND 0xb0b0b0b0
44#define CMD_RESUME 0xd0d0d0d0
45#define CMD_SET_BLOCK_LOCK_1 0x60606060
46#define CMD_SET_BLOCK_LOCK_2 0x01010101
47#define CMD_SET_MASTER_LOCK_1 0x60606060
48#define CMD_SET_MASTER_LOCK_2 0xf1f1f1f1
49#define CMD_CLEAR_BLOCK_LOCKS_1 0x60606060
50#define CMD_CLEAR_BLOCK_LOCKS_2 0xd0d0d0d0
51
52#define SR_READY 0x80808080 // 1 = ready
53#define SR_ERASE_SUSPEND 0x40404040 // 1 = block erase suspended
54#define SR_ERROR_ERASE 0x20202020 // 1 = error in block erase or clear lock bits
55#define SR_ERROR_WRITE 0x10101010 // 1 = error in byte write or set lock bit
56#define SR_VPP 0x08080808 // 1 = Vpp is low
57#define SR_WRITE_SUSPEND 0x04040404 // 1 = byte write suspended
58#define SR_PROTECT 0x02020202 // 1 = lock bit set
59#define SR_RESERVED 0x01010101
60
61#define SR_ERRORS (SR_ERROR_ERASE|SR_ERROR_WRITE|SR_VPP|SR_PROTECT)
62
63/* Configuration options */
64
65#undef AUTOUNLOCK /* automatically unlocks blocks before erasing */
66
67static struct mtd_info *sharp_probe(struct map_info *);
68
69static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd);
70
71static int sharp_read(struct mtd_info *mtd, loff_t from, size_t len,
72 size_t *retlen, u_char *buf);
73static int sharp_write(struct mtd_info *mtd, loff_t from, size_t len,
74 size_t *retlen, const u_char *buf);
75static int sharp_erase(struct mtd_info *mtd, struct erase_info *instr);
76static void sharp_sync(struct mtd_info *mtd);
77static int sharp_suspend(struct mtd_info *mtd);
78static void sharp_resume(struct mtd_info *mtd);
79static void sharp_destroy(struct mtd_info *mtd);
80
81static int sharp_write_oneword(struct map_info *map, struct flchip *chip,
82 unsigned long adr, __u32 datum);
83static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip,
84 unsigned long adr);
85#ifdef AUTOUNLOCK
86static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip,
87 unsigned long adr);
88#endif
89
90
91struct sharp_info{
92 struct flchip *chip;
93 int bogus;
94 int chipshift;
95 int numchips;
96 struct flchip chips[1];
97};
98
99static void sharp_destroy(struct mtd_info *mtd);
100
101static struct mtd_chip_driver sharp_chipdrv = {
102 .probe = sharp_probe,
103 .destroy = sharp_destroy,
104 .name = "sharp",
105 .module = THIS_MODULE
106};
107
108
109static struct mtd_info *sharp_probe(struct map_info *map)
110{
111 struct mtd_info *mtd = NULL;
112 struct sharp_info *sharp = NULL;
113 int width;
114
115 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
116 if(!mtd)
117 return NULL;
118
119 sharp = kzalloc(sizeof(*sharp), GFP_KERNEL);
120 if(!sharp) {
121 kfree(mtd);
122 return NULL;
123 }
124
125 width = sharp_probe_map(map,mtd);
126 if(!width){
127 kfree(mtd);
128 kfree(sharp);
129 return NULL;
130 }
131
132 mtd->priv = map;
133 mtd->type = MTD_NORFLASH;
134 mtd->erase = sharp_erase;
135 mtd->read = sharp_read;
136 mtd->write = sharp_write;
137 mtd->sync = sharp_sync;
138 mtd->suspend = sharp_suspend;
139 mtd->resume = sharp_resume;
140 mtd->flags = MTD_CAP_NORFLASH;
141 mtd->writesize = 1;
142 mtd->name = map->name;
143
144 sharp->chipshift = 23;
145 sharp->numchips = 1;
146 sharp->chips[0].start = 0;
147 sharp->chips[0].state = FL_READY;
148 sharp->chips[0].mutex = &sharp->chips[0]._spinlock;
149 sharp->chips[0].word_write_time = 0;
150 init_waitqueue_head(&sharp->chips[0].wq);
151 spin_lock_init(&sharp->chips[0]._spinlock);
152
153 map->fldrv = &sharp_chipdrv;
154 map->fldrv_priv = sharp;
155
156 __module_get(THIS_MODULE);
157 return mtd;
158}
159
160static inline void sharp_send_cmd(struct map_info *map, unsigned long cmd, unsigned long adr)
161{
162 map_word map_cmd;
163 map_cmd.x[0] = cmd;
164 map_write(map, map_cmd, adr);
165}
166
167static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd)
168{
169 map_word tmp, read0, read4;
170 unsigned long base = 0;
171 int width = 4;
172
173 tmp = map_read(map, base+0);
174
175 sharp_send_cmd(map, CMD_READ_ID, base+0);
176
177 read0 = map_read(map, base+0);
178 read4 = map_read(map, base+4);
179 if(read0.x[0] == 0x89898989){
180 printk("Looks like sharp flash\n");
181 switch(read4.x[0]){
182 case 0xaaaaaaaa:
183 case 0xa0a0a0a0:
184 /* aa - LH28F016SCT-L95 2Mx8, 32 64k blocks*/
185 /* a0 - LH28F016SCT-Z4 2Mx8, 32 64k blocks*/
186 mtd->erasesize = 0x10000 * width;
187 mtd->size = 0x200000 * width;
188 return width;
189 case 0xa6a6a6a6:
190 /* a6 - LH28F008SCT-L12 1Mx8, 16 64k blocks*/
191 /* a6 - LH28F008SCR-L85 1Mx8, 16 64k blocks*/
192 mtd->erasesize = 0x10000 * width;
193 mtd->size = 0x100000 * width;
194 return width;
195#if 0
196 case 0x00000000: /* unknown */
197 /* XX - LH28F004SCT 512kx8, 8 64k blocks*/
198 mtd->erasesize = 0x10000 * width;
199 mtd->size = 0x80000 * width;
200 return width;
201#endif
202 default:
203 printk("Sort-of looks like sharp flash, 0x%08lx 0x%08lx\n",
204 read0.x[0], read4.x[0]);
205 }
206 }else if((map_read(map, base+0).x[0] == CMD_READ_ID)){
207 /* RAM, probably */
208 printk("Looks like RAM\n");
209 map_write(map, tmp, base+0);
210 }else{
211 printk("Doesn't look like sharp flash, 0x%08lx 0x%08lx\n",
212 read0.x[0], read4.x[0]);
213 }
214
215 return 0;
216}
217
218/* This function returns with the chip->mutex lock held. */
219static int sharp_wait(struct map_info *map, struct flchip *chip)
220{
221 int i;
222 map_word status;
223 unsigned long timeo = jiffies + HZ;
224 DECLARE_WAITQUEUE(wait, current);
225 int adr = 0;
226
227retry:
228 spin_lock_bh(chip->mutex);
229
230 switch(chip->state){
231 case FL_READY:
232 sharp_send_cmd(map, CMD_READ_STATUS, adr);
233 chip->state = FL_STATUS;
234 case FL_STATUS:
235 for(i=0;i<100;i++){
236 status = map_read(map, adr);
237 if((status.x[0] & SR_READY)==SR_READY)
238 break;
239 udelay(1);
240 }
241 break;
242 default:
243 printk("Waiting for chip\n");
244
245 set_current_state(TASK_INTERRUPTIBLE);
246 add_wait_queue(&chip->wq, &wait);
247
248 spin_unlock_bh(chip->mutex);
249
250 schedule();
251 remove_wait_queue(&chip->wq, &wait);
252
253 if(signal_pending(current))
254 return -EINTR;
255
256 timeo = jiffies + HZ;
257
258 goto retry;
259 }
260
261 sharp_send_cmd(map, CMD_RESET, adr);
262
263 chip->state = FL_READY;
264
265 return 0;
266}
267
268static void sharp_release(struct flchip *chip)
269{
270 wake_up(&chip->wq);
271 spin_unlock_bh(chip->mutex);
272}
273
274static int sharp_read(struct mtd_info *mtd, loff_t from, size_t len,
275 size_t *retlen, u_char *buf)
276{
277 struct map_info *map = mtd->priv;
278 struct sharp_info *sharp = map->fldrv_priv;
279 int chipnum;
280 int ret = 0;
281 int ofs = 0;
282
283 chipnum = (from >> sharp->chipshift);
284 ofs = from & ((1 << sharp->chipshift)-1);
285
286 *retlen = 0;
287
288 while(len){
289 unsigned long thislen;
290
291 if(chipnum>=sharp->numchips)
292 break;
293
294 thislen = len;
295 if(ofs+thislen >= (1<<sharp->chipshift))
296 thislen = (1<<sharp->chipshift) - ofs;
297
298 ret = sharp_wait(map,&sharp->chips[chipnum]);
299 if(ret<0)
300 break;
301
302 map_copy_from(map,buf,ofs,thislen);
303
304 sharp_release(&sharp->chips[chipnum]);
305
306 *retlen += thislen;
307 len -= thislen;
308 buf += thislen;
309
310 ofs = 0;
311 chipnum++;
312 }
313 return ret;
314}
315
316static int sharp_write(struct mtd_info *mtd, loff_t to, size_t len,
317 size_t *retlen, const u_char *buf)
318{
319 struct map_info *map = mtd->priv;
320 struct sharp_info *sharp = map->fldrv_priv;
321 int ret = 0;
322 int i,j;
323 int chipnum;
324 unsigned long ofs;
325 union { u32 l; unsigned char uc[4]; } tbuf;
326
327 *retlen = 0;
328
329 while(len){
330 tbuf.l = 0xffffffff;
331 chipnum = to >> sharp->chipshift;
332 ofs = to & ((1<<sharp->chipshift)-1);
333
334 j=0;
335 for(i=ofs&3;i<4 && len;i++){
336 tbuf.uc[i] = *buf;
337 buf++;
338 to++;
339 len--;
340 j++;
341 }
342 sharp_write_oneword(map, &sharp->chips[chipnum], ofs&~3, tbuf.l);
343 if(ret<0)
344 return ret;
345 (*retlen)+=j;
346 }
347
348 return 0;
349}
350
351static int sharp_write_oneword(struct map_info *map, struct flchip *chip,
352 unsigned long adr, __u32 datum)
353{
354 int ret;
355 int timeo;
356 int try;
357 int i;
358 map_word data, status;
359
360 status.x[0] = 0;
361 ret = sharp_wait(map,chip);
362
363 for(try=0;try<10;try++){
364 sharp_send_cmd(map, CMD_BYTE_WRITE, adr);
365 /* cpu_to_le32 -> hack to fix the writel be->le conversion */
366 data.x[0] = cpu_to_le32(datum);
367 map_write(map, data, adr);
368
369 chip->state = FL_WRITING;
370
371 timeo = jiffies + (HZ/2);
372
373 sharp_send_cmd(map, CMD_READ_STATUS, adr);
374 for(i=0;i<100;i++){
375 status = map_read(map, adr);
376 if((status.x[0] & SR_READY) == SR_READY)
377 break;
378 }
379 if(i==100){
380 printk("sharp: timed out writing\n");
381 }
382
383 if(!(status.x[0] & SR_ERRORS))
384 break;
385
386 printk("sharp: error writing byte at addr=%08lx status=%08lx\n", adr, status.x[0]);
387
388 sharp_send_cmd(map, CMD_CLEAR_STATUS, adr);
389 }
390 sharp_send_cmd(map, CMD_RESET, adr);
391 chip->state = FL_READY;
392
393 wake_up(&chip->wq);
394 spin_unlock_bh(chip->mutex);
395
396 return 0;
397}
398
399static int sharp_erase(struct mtd_info *mtd, struct erase_info *instr)
400{
401 struct map_info *map = mtd->priv;
402 struct sharp_info *sharp = map->fldrv_priv;
403 unsigned long adr,len;
404 int chipnum, ret=0;
405
406//printk("sharp_erase()\n");
407 if(instr->addr & (mtd->erasesize - 1))
408 return -EINVAL;
409 if(instr->len & (mtd->erasesize - 1))
410 return -EINVAL;
411 if(instr->len + instr->addr > mtd->size)
412 return -EINVAL;
413
414 chipnum = instr->addr >> sharp->chipshift;
415 adr = instr->addr & ((1<<sharp->chipshift)-1);
416 len = instr->len;
417
418 while(len){
419 ret = sharp_erase_oneblock(map, &sharp->chips[chipnum], adr);
420 if(ret)return ret;
421
422 adr += mtd->erasesize;
423 len -= mtd->erasesize;
424 if(adr >> sharp->chipshift){
425 adr = 0;
426 chipnum++;
427 if(chipnum>=sharp->numchips)
428 break;
429 }
430 }
431
432 instr->state = MTD_ERASE_DONE;
433 mtd_erase_callback(instr);
434
435 return 0;
436}
437
438static int sharp_do_wait_for_ready(struct map_info *map, struct flchip *chip,
439 unsigned long adr)
440{
441 int ret;
442 unsigned long timeo;
443 map_word status;
444 DECLARE_WAITQUEUE(wait, current);
445
446 sharp_send_cmd(map, CMD_READ_STATUS, adr);
447 status = map_read(map, adr);
448
449 timeo = jiffies + HZ;
450
451 while(time_before(jiffies, timeo)){
452 sharp_send_cmd(map, CMD_READ_STATUS, adr);
453 status = map_read(map, adr);
454 if((status.x[0] & SR_READY)==SR_READY){
455 ret = 0;
456 goto out;
457 }
458 set_current_state(TASK_INTERRUPTIBLE);
459 add_wait_queue(&chip->wq, &wait);
460
461 //spin_unlock_bh(chip->mutex);
462
463 schedule_timeout(1);
464 schedule();
465 remove_wait_queue(&chip->wq, &wait);
466
467 //spin_lock_bh(chip->mutex);
468
469 if (signal_pending(current)){
470 ret = -EINTR;
471 goto out;
472 }
473
474 }
475 ret = -ETIME;
476out:
477 return ret;
478}
479
480static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip,
481 unsigned long adr)
482{
483 int ret;
484 //int timeo;
485 map_word status;
486 //int i;
487
488//printk("sharp_erase_oneblock()\n");
489
490#ifdef AUTOUNLOCK
491 /* This seems like a good place to do an unlock */
492 sharp_unlock_oneblock(map,chip,adr);
493#endif
494
495 sharp_send_cmd(map, CMD_BLOCK_ERASE_1, adr);
496 sharp_send_cmd(map, CMD_BLOCK_ERASE_2, adr);
497
498 chip->state = FL_ERASING;
499
500 ret = sharp_do_wait_for_ready(map,chip,adr);
501 if(ret<0)return ret;
502
503 sharp_send_cmd(map, CMD_READ_STATUS, adr);
504 status = map_read(map, adr);
505
506 if(!(status.x[0] & SR_ERRORS)){
507 sharp_send_cmd(map, CMD_RESET, adr);
508 chip->state = FL_READY;
509 //spin_unlock_bh(chip->mutex);
510 return 0;
511 }
512
513 printk("sharp: error erasing block at addr=%08lx status=%08lx\n", adr, status.x[0]);
514 sharp_send_cmd(map, CMD_CLEAR_STATUS, adr);
515
516 //spin_unlock_bh(chip->mutex);
517
518 return -EIO;
519}
520
521#ifdef AUTOUNLOCK
522static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip,
523 unsigned long adr)
524{
525 int i;
526 map_word status;
527
528 sharp_send_cmd(map, CMD_CLEAR_BLOCK_LOCKS_1, adr);
529 sharp_send_cmd(map, CMD_CLEAR_BLOCK_LOCKS_2, adr);
530
531 udelay(100);
532
533 status = map_read(map, adr);
534 printk("status=%08lx\n", status.x[0]);
535
536 for(i=0;i<1000;i++){
537 //sharp_send_cmd(map, CMD_READ_STATUS, adr);
538 status = map_read(map, adr);
539 if((status.x[0] & SR_READY) == SR_READY)
540 break;
541 udelay(100);
542 }
543 if(i==1000){
544 printk("sharp: timed out unlocking block\n");
545 }
546
547 if(!(status.x[0] & SR_ERRORS)){
548 sharp_send_cmd(map, CMD_RESET, adr);
549 chip->state = FL_READY;
550 return;
551 }
552
553 printk("sharp: error unlocking block at addr=%08lx status=%08lx\n", adr, status.x[0]);
554 sharp_send_cmd(map, CMD_CLEAR_STATUS, adr);
555}
556#endif
557
558static void sharp_sync(struct mtd_info *mtd)
559{
560 //printk("sharp_sync()\n");
561}
562
563static int sharp_suspend(struct mtd_info *mtd)
564{
565 printk("sharp_suspend()\n");
566 return -EINVAL;
567}
568
569static void sharp_resume(struct mtd_info *mtd)
570{
571 printk("sharp_resume()\n");
572
573}
574
575static void sharp_destroy(struct mtd_info *mtd)
576{
577 printk("sharp_destroy()\n");
578
579}
580
581static int __init sharp_probe_init(void)
582{
583 printk("MTD Sharp chip driver <ds@lineo.com>\n");
584
585 register_mtd_chip_driver(&sharp_chipdrv);
586
587 return 0;
588}
589
590static void __exit sharp_probe_exit(void)
591{
592 unregister_mtd_chip_driver(&sharp_chipdrv);
593}
594
595module_init(sharp_probe_init);
596module_exit(sharp_probe_exit);
597
598
599MODULE_LICENSE("GPL");
600MODULE_AUTHOR("David Schleef <ds@schleef.org>");
601MODULE_DESCRIPTION("Old MTD chip driver for pre-CFI Sharp flash chips");
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index fc4cc8ba9e29..be4b9948c762 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -373,7 +373,7 @@ static inline void kill_final_newline(char *str)
373 373
374#ifndef MODULE 374#ifndef MODULE
375static int block2mtd_init_called = 0; 375static int block2mtd_init_called = 0;
376static __initdata char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */ 376static char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */
377#endif 377#endif
378 378
379 379
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index d990d8141ef5..b665e4ac2208 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -60,7 +60,7 @@ config MTD_PHYSMAP_BANKWIDTH
60 (i.e., run-time calling physmap_configure()). 60 (i.e., run-time calling physmap_configure()).
61 61
62config MTD_PHYSMAP_OF 62config MTD_PHYSMAP_OF
63 tristate "Flash device in physical memory map based on OF descirption" 63 tristate "Flash device in physical memory map based on OF description"
64 depends on PPC_OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM) 64 depends on PPC_OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM)
65 help 65 help
66 This provides a 'mapping' driver which allows the NOR Flash and 66 This provides a 'mapping' driver which allows the NOR Flash and
@@ -358,22 +358,6 @@ config MTD_CFI_FLAGADM
358 Mapping for the Flaga digital module. If you don't have one, ignore 358 Mapping for the Flaga digital module. If you don't have one, ignore
359 this setting. 359 this setting.
360 360
361config MTD_BEECH
362 tristate "CFI Flash device mapped on IBM 405LP Beech"
363 depends on MTD_CFI && BEECH
364 help
365 This enables access routines for the flash chips on the IBM
366 405LP Beech board. If you have one of these boards and would like
367 to use the flash chips on it, say 'Y'.
368
369config MTD_ARCTIC
370 tristate "CFI Flash device mapped on IBM 405LP Arctic"
371 depends on MTD_CFI && ARCTIC2
372 help
373 This enables access routines for the flash chips on the IBM 405LP
374 Arctic board. If you have one of these boards and would like to
375 use the flash chips on it, say 'Y'.
376
377config MTD_WALNUT 361config MTD_WALNUT
378 tristate "Flash device mapped on IBM 405GP Walnut" 362 tristate "Flash device mapped on IBM 405GP Walnut"
379 depends on MTD_JEDECPROBE && WALNUT 363 depends on MTD_JEDECPROBE && WALNUT
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index de036c5e6139..3acbb5d01ca4 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -58,8 +58,6 @@ obj-$(CONFIG_MTD_NETtel) += nettel.o
58obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o 58obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
59obj-$(CONFIG_MTD_EBONY) += ebony.o 59obj-$(CONFIG_MTD_EBONY) += ebony.o
60obj-$(CONFIG_MTD_OCOTEA) += ocotea.o 60obj-$(CONFIG_MTD_OCOTEA) += ocotea.o
61obj-$(CONFIG_MTD_BEECH) += beech-mtd.o
62obj-$(CONFIG_MTD_ARCTIC) += arctic-mtd.o
63obj-$(CONFIG_MTD_WALNUT) += walnut.o 61obj-$(CONFIG_MTD_WALNUT) += walnut.o
64obj-$(CONFIG_MTD_H720X) += h720x-flash.o 62obj-$(CONFIG_MTD_H720X) += h720x-flash.o
65obj-$(CONFIG_MTD_SBC8240) += sbc8240.o 63obj-$(CONFIG_MTD_SBC8240) += sbc8240.o
diff --git a/drivers/mtd/maps/arctic-mtd.c b/drivers/mtd/maps/arctic-mtd.c
deleted file mode 100644
index 2cc902436275..000000000000
--- a/drivers/mtd/maps/arctic-mtd.c
+++ /dev/null
@@ -1,145 +0,0 @@
1/*
2 * $Id: arctic-mtd.c,v 1.14 2005/11/07 11:14:26 gleixner Exp $
3 *
4 * drivers/mtd/maps/arctic-mtd.c MTD mappings and partition tables for
5 * IBM 405LP Arctic boards.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Copyright (C) 2002, International Business Machines Corporation
22 * All Rights Reserved.
23 *
24 * Bishop Brock
25 * IBM Research, Austin Center for Low-Power Computing
26 * bcbrock@us.ibm.com
27 * March 2002
28 *
29 * modified for Arctic by,
30 * David Gibson
31 * IBM OzLabs, Canberra, Australia
32 * <arctic@gibson.dropbear.id.au>
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/types.h>
38#include <linux/init.h>
39
40#include <linux/mtd/mtd.h>
41#include <linux/mtd/map.h>
42#include <linux/mtd/partitions.h>
43
44#include <asm/io.h>
45#include <asm/ibm4xx.h>
46
47/*
48 * 0 : 0xFE00 0000 - 0xFEFF FFFF : Filesystem 1 (16MiB)
49 * 1 : 0xFF00 0000 - 0xFF4F FFFF : kernel (5.12MiB)
50 * 2 : 0xFF50 0000 - 0xFFF5 FFFF : Filesystem 2 (10.624MiB) (if non-XIP)
51 * 3 : 0xFFF6 0000 - 0xFFFF FFFF : PIBS Firmware (640KiB)
52 */
53
54#define FFS1_SIZE 0x01000000 /* 16MiB */
55#define KERNEL_SIZE 0x00500000 /* 5.12MiB */
56#define FFS2_SIZE 0x00a60000 /* 10.624MiB */
57#define FIRMWARE_SIZE 0x000a0000 /* 640KiB */
58
59
60#define NAME "Arctic Linux Flash"
61#define PADDR SUBZERO_BOOTFLASH_PADDR
62#define BUSWIDTH 2
63#define SIZE SUBZERO_BOOTFLASH_SIZE
64#define PARTITIONS 4
65
66/* Flash memories on these boards are memory resources, accessed big-endian. */
67
68{
69 /* do nothing for now */
70}
71
72static struct map_info arctic_mtd_map = {
73 .name = NAME,
74 .size = SIZE,
75 .bankwidth = BUSWIDTH,
76 .phys = PADDR,
77};
78
79static struct mtd_info *arctic_mtd;
80
81static struct mtd_partition arctic_partitions[PARTITIONS] = {
82 { .name = "Filesystem",
83 .size = FFS1_SIZE,
84 .offset = 0,},
85 { .name = "Kernel",
86 .size = KERNEL_SIZE,
87 .offset = FFS1_SIZE,},
88 { .name = "Filesystem",
89 .size = FFS2_SIZE,
90 .offset = FFS1_SIZE + KERNEL_SIZE,},
91 { .name = "Firmware",
92 .size = FIRMWARE_SIZE,
93 .offset = SUBZERO_BOOTFLASH_SIZE - FIRMWARE_SIZE,},
94};
95
96static int __init
97init_arctic_mtd(void)
98{
99 int err;
100
101 printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR);
102
103 arctic_mtd_map.virt = ioremap(PADDR, SIZE);
104
105 if (!arctic_mtd_map.virt) {
106 printk("%s: failed to ioremap 0x%x\n", NAME, PADDR);
107 return -EIO;
108 }
109 simple_map_init(&arctic_mtd_map);
110
111 printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8);
112 arctic_mtd = do_map_probe("cfi_probe", &arctic_mtd_map);
113
114 if (!arctic_mtd) {
115 iounmap(arctic_mtd_map.virt);
116 return -ENXIO;
117 }
118
119 arctic_mtd->owner = THIS_MODULE;
120
121 err = add_mtd_partitions(arctic_mtd, arctic_partitions, PARTITIONS);
122 if (err) {
123 printk("%s: add_mtd_partitions failed\n", NAME);
124 iounmap(arctic_mtd_map.virt);
125 }
126
127 return err;
128}
129
130static void __exit
131cleanup_arctic_mtd(void)
132{
133 if (arctic_mtd) {
134 del_mtd_partitions(arctic_mtd);
135 map_destroy(arctic_mtd);
136 iounmap((void *) arctic_mtd_map.virt);
137 }
138}
139
140module_init(init_arctic_mtd);
141module_exit(cleanup_arctic_mtd);
142
143MODULE_LICENSE("GPL");
144MODULE_AUTHOR("David Gibson <arctic@gibson.dropbear.id.au>");
145MODULE_DESCRIPTION("MTD map and partitions for IBM 405LP Arctic boards");
diff --git a/drivers/mtd/maps/beech-mtd.c b/drivers/mtd/maps/beech-mtd.c
deleted file mode 100644
index d76d5981b863..000000000000
--- a/drivers/mtd/maps/beech-mtd.c
+++ /dev/null
@@ -1,122 +0,0 @@
1/*
2 * $Id: beech-mtd.c,v 1.11 2005/11/07 11:14:26 gleixner Exp $
3 *
4 * drivers/mtd/maps/beech-mtd.c MTD mappings and partition tables for
5 * IBM 405LP Beech boards.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Copyright (C) 2002, International Business Machines Corporation
22 * All Rights Reserved.
23 *
24 * Bishop Brock
25 * IBM Research, Austin Center for Low-Power Computing
26 * bcbrock@us.ibm.com
27 * March 2002
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/types.h>
34#include <linux/init.h>
35
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/map.h>
38#include <linux/mtd/partitions.h>
39
40#include <asm/io.h>
41#include <asm/ibm4xx.h>
42
43#define NAME "Beech Linux Flash"
44#define PADDR BEECH_BIGFLASH_PADDR
45#define SIZE BEECH_BIGFLASH_SIZE
46#define BUSWIDTH 1
47
48/* Flash memories on these boards are memory resources, accessed big-endian. */
49
50
51static struct map_info beech_mtd_map = {
52 .name = NAME,
53 .size = SIZE,
54 .bankwidth = BUSWIDTH,
55 .phys = PADDR
56};
57
58static struct mtd_info *beech_mtd;
59
60static struct mtd_partition beech_partitions[2] = {
61 {
62 .name = "Linux Kernel",
63 .size = BEECH_KERNEL_SIZE,
64 .offset = BEECH_KERNEL_OFFSET
65 }, {
66 .name = "Free Area",
67 .size = BEECH_FREE_AREA_SIZE,
68 .offset = BEECH_FREE_AREA_OFFSET
69 }
70};
71
72static int __init
73init_beech_mtd(void)
74{
75 int err;
76
77 printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR);
78
79 beech_mtd_map.virt = ioremap(PADDR, SIZE);
80
81 if (!beech_mtd_map.virt) {
82 printk("%s: failed to ioremap 0x%x\n", NAME, PADDR);
83 return -EIO;
84 }
85
86 simple_map_init(&beech_mtd_map);
87
88 printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8);
89 beech_mtd = do_map_probe("cfi_probe", &beech_mtd_map);
90
91 if (!beech_mtd) {
92 iounmap(beech_mtd_map.virt);
93 return -ENXIO;
94 }
95
96 beech_mtd->owner = THIS_MODULE;
97
98 err = add_mtd_partitions(beech_mtd, beech_partitions, 2);
99 if (err) {
100 printk("%s: add_mtd_partitions failed\n", NAME);
101 iounmap(beech_mtd_map.virt);
102 }
103
104 return err;
105}
106
107static void __exit
108cleanup_beech_mtd(void)
109{
110 if (beech_mtd) {
111 del_mtd_partitions(beech_mtd);
112 map_destroy(beech_mtd);
113 iounmap((void *) beech_mtd_map.virt);
114 }
115}
116
117module_init(init_beech_mtd);
118module_exit(cleanup_beech_mtd);
119
120MODULE_LICENSE("GPL");
121MODULE_AUTHOR("Bishop Brock <bcbrock@us.ibm.com>");
122MODULE_DESCRIPTION("MTD map and partitions for IBM 405LP Beech boards");
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 9f53c655af3a..7b96cd02f82b 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -358,7 +358,7 @@ int __init nettel_init(void)
358 /* Turn other PAR off so the first probe doesn't find it */ 358 /* Turn other PAR off so the first probe doesn't find it */
359 *intel1par = 0; 359 *intel1par = 0;
360 360
361 /* Probe for the the size of the first Intel flash */ 361 /* Probe for the size of the first Intel flash */
362 nettel_intel_map.size = maxsize; 362 nettel_intel_map.size = maxsize;
363 nettel_intel_map.phys = intel0addr; 363 nettel_intel_map.phys = intel0addr;
364 nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize); 364 nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 72107dc06d67..bbb42c35b69b 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -186,7 +186,7 @@ static int __devinit of_physmap_probe(struct of_device *dev, const struct of_dev
186 else { 186 else {
187 if (strcmp(of_probe, "ROM")) 187 if (strcmp(of_probe, "ROM"))
188 dev_dbg(&dev->dev, "map_probe: don't know probe type " 188 dev_dbg(&dev->dev, "map_probe: don't know probe type "
189 "'%s', mapping as rom\n"); 189 "'%s', mapping as rom\n", of_probe);
190 info->mtd = do_map_probe("mtd_rom", &info->map); 190 info->mtd = do_map_probe("mtd_rom", &info->map);
191 } 191 }
192 if (info->mtd == NULL) { 192 if (info->mtd == NULL) {
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 1af989023c66..9c6236852942 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -347,7 +347,6 @@ int add_mtd_partitions(struct mtd_info *master,
347 slave->mtd.subpage_sft = master->subpage_sft; 347 slave->mtd.subpage_sft = master->subpage_sft;
348 348
349 slave->mtd.name = parts[i].name; 349 slave->mtd.name = parts[i].name;
350 slave->mtd.bank_size = master->bank_size;
351 slave->mtd.owner = master->owner; 350 slave->mtd.owner = master->owner;
352 351
353 slave->mtd.read = part_read; 352 slave->mtd.read = part_read;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index d05873b8c155..f1d60b6f048e 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -232,11 +232,13 @@ config MTD_NAND_BASLER_EXCITE
232 will be named "excite_nandflash.ko". 232 will be named "excite_nandflash.ko".
233 233
234config MTD_NAND_CAFE 234config MTD_NAND_CAFE
235 tristate "NAND support for OLPC CAFÉ chip" 235 tristate "NAND support for OLPC CAFÉ chip"
236 depends on PCI 236 depends on PCI
237 help 237 select REED_SOLOMON
238 Use NAND flash attached to the CAFÉ chip designed for the $100 238 select REED_SOLOMON_DEC16
239 laptop. 239 help
240 Use NAND flash attached to the CAFÉ chip designed for the $100
241 laptop.
240 242
241config MTD_NAND_CS553X 243config MTD_NAND_CS553X
242 tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)" 244 tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
@@ -270,4 +272,13 @@ config MTD_NAND_NANDSIM
270 The simulator may simulate various NAND flash chips for the 272 The simulator may simulate various NAND flash chips for the
271 MTD nand layer. 273 MTD nand layer.
272 274
275config MTD_NAND_PLATFORM
276 tristate "Support for generic platform NAND driver"
277 depends on MTD_NAND
278 help
279 This implements a generic NAND driver for on-SOC platform
280 devices. You will need to provide platform-specific functions
281 via platform_data.
282
283
273endif # MTD_NAND 284endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 6872031a3fb2..edba1db14bfa 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -26,6 +26,6 @@ obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
26obj-$(CONFIG_MTD_NAND_AT91) += at91_nand.o 26obj-$(CONFIG_MTD_NAND_AT91) += at91_nand.o
27obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o 27obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
28obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o 28obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
29obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
29 30
30nand-objs := nand_base.o nand_bbt.o 31nand-objs := nand_base.o nand_bbt.o
31cafe_nand-objs := cafe.o cafe_ecc.o
diff --git a/drivers/mtd/nand/at91_nand.c b/drivers/mtd/nand/at91_nand.c
index 14b80cc90a7b..512e999177f7 100644
--- a/drivers/mtd/nand/at91_nand.c
+++ b/drivers/mtd/nand/at91_nand.c
@@ -82,6 +82,10 @@ static void at91_nand_disable(struct at91_nand_host *host)
82 at91_set_gpio_value(host->board->enable_pin, 1); 82 at91_set_gpio_value(host->board->enable_pin, 1);
83} 83}
84 84
85#ifdef CONFIG_MTD_PARTITIONS
86const char *part_probes[] = { "cmdlinepart", NULL };
87#endif
88
85/* 89/*
86 * Probe for the NAND device. 90 * Probe for the NAND device.
87 */ 91 */
@@ -151,6 +155,12 @@ static int __init at91_nand_probe(struct platform_device *pdev)
151#ifdef CONFIG_MTD_PARTITIONS 155#ifdef CONFIG_MTD_PARTITIONS
152 if (host->board->partition_info) 156 if (host->board->partition_info)
153 partitions = host->board->partition_info(mtd->size, &num_partitions); 157 partitions = host->board->partition_info(mtd->size, &num_partitions);
158#ifdef CONFIG_MTD_CMDLINE_PARTS
159 else {
160 mtd->name = "at91_nand";
161 num_partitions = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
162 }
163#endif
154 164
155 if ((!partitions) || (num_partitions == 0)) { 165 if ((!partitions) || (num_partitions == 0)) {
156 printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n"); 166 printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n");
diff --git a/drivers/mtd/nand/cafe_ecc.c b/drivers/mtd/nand/cafe_ecc.c
deleted file mode 100644
index ea5c8491d2c5..000000000000
--- a/drivers/mtd/nand/cafe_ecc.c
+++ /dev/null
@@ -1,1381 +0,0 @@
1/* Error correction for CAFÉ NAND controller
2 *
3 * © 2006 Marvell, Inc.
4 * Author: Tom Chiou
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59
18 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/errno.h>
24
25static unsigned short gf4096_mul(unsigned short, unsigned short);
26static unsigned short gf64_mul(unsigned short, unsigned short);
27static unsigned short gf4096_inv(unsigned short);
28static unsigned short err_pos(unsigned short);
29static void find_4bit_err_coefs(unsigned short, unsigned short, unsigned short,
30 unsigned short, unsigned short, unsigned short,
31 unsigned short, unsigned short, unsigned short *);
32static void zero_4x5_col3(unsigned short[4][5]);
33static void zero_4x5_col2(unsigned short[4][5]);
34static void zero_4x5_col1(unsigned short[4][5]);
35static void swap_4x5_rows(unsigned short[4][5], int, int, int);
36static void swap_2x3_rows(unsigned short m[2][3]);
37static void solve_4x5(unsigned short m[4][5], unsigned short *, int *);
38static void sort_coefs(int *, unsigned short *, int);
39static void find_4bit_err_pats(unsigned short, unsigned short, unsigned short,
40 unsigned short, unsigned short, unsigned short,
41 unsigned short, unsigned short, unsigned short *);
42static void find_3bit_err_coefs(unsigned short, unsigned short, unsigned short,
43 unsigned short, unsigned short, unsigned short,
44 unsigned short *);
45static void zero_3x4_col2(unsigned short[3][4]);
46static void zero_3x4_col1(unsigned short[3][4]);
47static void swap_3x4_rows(unsigned short[3][4], int, int, int);
48static void solve_3x4(unsigned short[3][4], unsigned short *, int *);
49static void find_3bit_err_pats(unsigned short, unsigned short, unsigned short,
50 unsigned short, unsigned short, unsigned short,
51 unsigned short *);
52
53static void find_2bit_err_pats(unsigned short, unsigned short, unsigned short,
54 unsigned short, unsigned short *);
55static void find_2x2_soln(unsigned short, unsigned short, unsigned short,
56 unsigned short, unsigned short, unsigned short,
57 unsigned short *);
58static void solve_2x3(unsigned short[2][3], unsigned short *);
59static int chk_no_err_only(unsigned short *, unsigned short *);
60static int chk_1_err_only(unsigned short *, unsigned short *);
61static int chk_2_err_only(unsigned short *, unsigned short *);
62static int chk_3_err_only(unsigned short *, unsigned short *);
63static int chk_4_err_only(unsigned short *, unsigned short *);
64
65static unsigned short gf64_mul(unsigned short a, unsigned short b)
66{
67 unsigned short tmp1, tmp2, tmp3, tmp4, tmp5;
68 unsigned short c_bit0, c_bit1, c_bit2, c_bit3, c_bit4, c_bit5, c;
69
70 tmp1 = ((a) ^ (a >> 5));
71 tmp2 = ((a >> 4) ^ (a >> 5));
72 tmp3 = ((a >> 3) ^ (a >> 4));
73 tmp4 = ((a >> 2) ^ (a >> 3));
74 tmp5 = ((a >> 1) ^ (a >> 2));
75
76 c_bit0 = ((a & b) ^ ((a >> 5) & (b >> 1)) ^ ((a >> 4) & (b >> 2)) ^
77 ((a >> 3) & (b >> 3)) ^ ((a >> 2) & (b >> 4)) ^ ((a >> 1) & (b >> 5))) & 0x1;
78
79 c_bit1 = (((a >> 1) & b) ^ (tmp1 & (b >> 1)) ^ (tmp2 & (b >> 2)) ^
80 (tmp3 & (b >> 3)) ^ (tmp4 & (b >> 4)) ^ (tmp5 & (b >> 5))) & 0x1;
81
82 c_bit2 = (((a >> 2) & b) ^ ((a >> 1) & (b >> 1)) ^ (tmp1 & (b >> 2)) ^
83 (tmp2 & (b >> 3)) ^ (tmp3 & (b >> 4)) ^ (tmp4 & (b >> 5))) & 0x1;
84
85 c_bit3 = (((a >> 3) & b) ^ ((a >> 2) & (b >> 1)) ^ ((a >> 1) & (b >> 2)) ^
86 (tmp1 & (b >> 3)) ^ (tmp2 & (b >> 4)) ^ (tmp3 & (b >> 5))) & 0x1;
87
88 c_bit4 = (((a >> 4) & b) ^ ((a >> 3) & (b >> 1)) ^ ((a >> 2) & (b >> 2)) ^
89 ((a >> 1) & (b >> 3)) ^ (tmp1 & (b >> 4)) ^ (tmp2 & (b >> 5))) & 0x1;
90
91 c_bit5 = (((a >> 5) & b) ^ ((a >> 4) & (b >> 1)) ^ ((a >> 3) & (b >> 2)) ^
92 ((a >> 2) & (b >> 3)) ^ ((a >> 1) & (b >> 4)) ^ (tmp1 & (b >> 5))) & 0x1;
93
94 c = c_bit0 | (c_bit1 << 1) | (c_bit2 << 2) | (c_bit3 << 3) | (c_bit4 << 4) | (c_bit5 << 5);
95
96 return c;
97}
98
99static unsigned short gf4096_mul(unsigned short a, unsigned short b)
100{
101 unsigned short ah, al, bh, bl, alxah, blxbh, ablh, albl, ahbh, ahbhB, c;
102
103 ah = (a >> 6) & 0x3f;
104 al = a & 0x3f;
105 bh = (b >> 6) & 0x3f;
106 bl = b & 0x3f;
107 alxah = al ^ ah;
108 blxbh = bl ^ bh;
109
110 ablh = gf64_mul(alxah, blxbh);
111 albl = gf64_mul(al, bl);
112 ahbh = gf64_mul(ah, bh);
113
114 ahbhB = ((ahbh & 0x1) << 5) |
115 ((ahbh & 0x20) >> 1) |
116 ((ahbh & 0x10) >> 1) | ((ahbh & 0x8) >> 1) | ((ahbh & 0x4) >> 1) | (((ahbh >> 1) ^ ahbh) & 0x1);
117
118 c = ((ablh ^ albl) << 6) | (ahbhB ^ albl);
119 return c;
120}
121
122static void find_2bit_err_pats(unsigned short s0, unsigned short s1, unsigned short r0, unsigned short r1, unsigned short *pats)
123{
124 find_2x2_soln(0x1, 0x1, r0, r1, s0, s1, pats);
125}
126
127static void find_3bit_err_coefs(unsigned short s0, unsigned short s1,
128 unsigned short s2, unsigned short s3, unsigned short s4, unsigned short s5, unsigned short *coefs)
129{
130 unsigned short m[3][4];
131 int row_order[3];
132
133 row_order[0] = 0;
134 row_order[1] = 1;
135 row_order[2] = 2;
136 m[0][0] = s2;
137 m[0][1] = s1;
138 m[0][2] = s0;
139 m[0][3] = s3;
140 m[1][0] = s3;
141 m[1][1] = s2;
142 m[1][2] = s1;
143 m[1][3] = s4;
144 m[2][0] = s4;
145 m[2][1] = s3;
146 m[2][2] = s2;
147 m[2][3] = s5;
148
149 if (m[0][2] != 0x0) {
150 zero_3x4_col2(m);
151 } else if (m[1][2] != 0x0) {
152 swap_3x4_rows(m, 0, 1, 4);
153 zero_3x4_col2(m);
154 } else if (m[2][2] != 0x0) {
155 swap_3x4_rows(m, 0, 2, 4);
156 zero_3x4_col2(m);
157 } else {
158 printk(KERN_ERR "Error: find_3bit_err_coefs, s0,s1,s2 all zeros!\n");
159 }
160
161 if (m[1][1] != 0x0) {
162 zero_3x4_col1(m);
163 } else if (m[2][1] != 0x0) {
164 swap_3x4_rows(m, 1, 2, 4);
165 zero_3x4_col1(m);
166 } else {
167 printk(KERN_ERR "Error: find_3bit_err_coefs, cannot resolve col 1!\n");
168 }
169
170 /* solve coefs */
171 solve_3x4(m, coefs, row_order);
172}
173
174static void zero_3x4_col2(unsigned short m[3][4])
175{
176 unsigned short minv1, minv2;
177
178 minv1 = gf4096_mul(m[1][2], gf4096_inv(m[0][2]));
179 minv2 = gf4096_mul(m[2][2], gf4096_inv(m[0][2]));
180 m[1][0] = m[1][0] ^ gf4096_mul(m[0][0], minv1);
181 m[1][1] = m[1][1] ^ gf4096_mul(m[0][1], minv1);
182 m[1][3] = m[1][3] ^ gf4096_mul(m[0][3], minv1);
183 m[2][0] = m[2][0] ^ gf4096_mul(m[0][0], minv2);
184 m[2][1] = m[2][1] ^ gf4096_mul(m[0][1], minv2);
185 m[2][3] = m[2][3] ^ gf4096_mul(m[0][3], minv2);
186}
187
188static void zero_3x4_col1(unsigned short m[3][4])
189{
190 unsigned short minv;
191 minv = gf4096_mul(m[2][1], gf4096_inv(m[1][1]));
192 m[2][0] = m[2][0] ^ gf4096_mul(m[1][0], minv);
193 m[2][3] = m[2][3] ^ gf4096_mul(m[1][3], minv);
194}
195
196static void swap_3x4_rows(unsigned short m[3][4], int i, int j, int col_width)
197{
198 unsigned short tmp0;
199 int cnt;
200 for (cnt = 0; cnt < col_width; cnt++) {
201 tmp0 = m[i][cnt];
202 m[i][cnt] = m[j][cnt];
203 m[j][cnt] = tmp0;
204 }
205}
206
207static void solve_3x4(unsigned short m[3][4], unsigned short *coefs, int *row_order)
208{
209 unsigned short tmp[3];
210 tmp[0] = gf4096_mul(m[2][3], gf4096_inv(m[2][0]));
211 tmp[1] = gf4096_mul((gf4096_mul(tmp[0], m[1][0]) ^ m[1][3]), gf4096_inv(m[1][1]));
212 tmp[2] = gf4096_mul((gf4096_mul(tmp[0], m[0][0]) ^ gf4096_mul(tmp[1], m[0][1]) ^ m[0][3]), gf4096_inv(m[0][2]));
213 sort_coefs(row_order, tmp, 3);
214 coefs[0] = tmp[0];
215 coefs[1] = tmp[1];
216 coefs[2] = tmp[2];
217}
218
219static void find_3bit_err_pats(unsigned short s0, unsigned short s1,
220 unsigned short s2, unsigned short r0,
221 unsigned short r1, unsigned short r2,
222 unsigned short *pats)
223{
224 find_2x2_soln(r0 ^ r2, r1 ^ r2,
225 gf4096_mul(r0, r0 ^ r2), gf4096_mul(r1, r1 ^ r2),
226 gf4096_mul(s0, r2) ^ s1, gf4096_mul(s1, r2) ^ s2, pats);
227 pats[2] = s0 ^ pats[0] ^ pats[1];
228}
229
230static void find_4bit_err_coefs(unsigned short s0, unsigned short s1,
231 unsigned short s2, unsigned short s3,
232 unsigned short s4, unsigned short s5,
233 unsigned short s6, unsigned short s7,
234 unsigned short *coefs)
235{
236 unsigned short m[4][5];
237 int row_order[4];
238
239 row_order[0] = 0;
240 row_order[1] = 1;
241 row_order[2] = 2;
242 row_order[3] = 3;
243
244 m[0][0] = s3;
245 m[0][1] = s2;
246 m[0][2] = s1;
247 m[0][3] = s0;
248 m[0][4] = s4;
249 m[1][0] = s4;
250 m[1][1] = s3;
251 m[1][2] = s2;
252 m[1][3] = s1;
253 m[1][4] = s5;
254 m[2][0] = s5;
255 m[2][1] = s4;
256 m[2][2] = s3;
257 m[2][3] = s2;
258 m[2][4] = s6;
259 m[3][0] = s6;
260 m[3][1] = s5;
261 m[3][2] = s4;
262 m[3][3] = s3;
263 m[3][4] = s7;
264
265 if (m[0][3] != 0x0) {
266 zero_4x5_col3(m);
267 } else if (m[1][3] != 0x0) {
268 swap_4x5_rows(m, 0, 1, 5);
269 zero_4x5_col3(m);
270 } else if (m[2][3] != 0x0) {
271 swap_4x5_rows(m, 0, 2, 5);
272 zero_4x5_col3(m);
273 } else if (m[3][3] != 0x0) {
274 swap_4x5_rows(m, 0, 3, 5);
275 zero_4x5_col3(m);
276 } else {
277 printk(KERN_ERR "Error: find_4bit_err_coefs, s0,s1,s2,s3 all zeros!\n");
278 }
279
280 if (m[1][2] != 0x0) {
281 zero_4x5_col2(m);
282 } else if (m[2][2] != 0x0) {
283 swap_4x5_rows(m, 1, 2, 5);
284 zero_4x5_col2(m);
285 } else if (m[3][2] != 0x0) {
286 swap_4x5_rows(m, 1, 3, 5);
287 zero_4x5_col2(m);
288 } else {
289 printk(KERN_ERR "Error: find_4bit_err_coefs, cannot resolve col 2!\n");
290 }
291
292 if (m[2][1] != 0x0) {
293 zero_4x5_col1(m);
294 } else if (m[3][1] != 0x0) {
295 swap_4x5_rows(m, 2, 3, 5);
296 zero_4x5_col1(m);
297 } else {
298 printk(KERN_ERR "Error: find_4bit_err_coefs, cannot resolve col 1!\n");
299 }
300
301 solve_4x5(m, coefs, row_order);
302}
303
304static void zero_4x5_col3(unsigned short m[4][5])
305{
306 unsigned short minv1, minv2, minv3;
307
308 minv1 = gf4096_mul(m[1][3], gf4096_inv(m[0][3]));
309 minv2 = gf4096_mul(m[2][3], gf4096_inv(m[0][3]));
310 minv3 = gf4096_mul(m[3][3], gf4096_inv(m[0][3]));
311
312 m[1][0] = m[1][0] ^ gf4096_mul(m[0][0], minv1);
313 m[1][1] = m[1][1] ^ gf4096_mul(m[0][1], minv1);
314 m[1][2] = m[1][2] ^ gf4096_mul(m[0][2], minv1);
315 m[1][4] = m[1][4] ^ gf4096_mul(m[0][4], minv1);
316 m[2][0] = m[2][0] ^ gf4096_mul(m[0][0], minv2);
317 m[2][1] = m[2][1] ^ gf4096_mul(m[0][1], minv2);
318 m[2][2] = m[2][2] ^ gf4096_mul(m[0][2], minv2);
319 m[2][4] = m[2][4] ^ gf4096_mul(m[0][4], minv2);
320 m[3][0] = m[3][0] ^ gf4096_mul(m[0][0], minv3);
321 m[3][1] = m[3][1] ^ gf4096_mul(m[0][1], minv3);
322 m[3][2] = m[3][2] ^ gf4096_mul(m[0][2], minv3);
323 m[3][4] = m[3][4] ^ gf4096_mul(m[0][4], minv3);
324}
325
326static void zero_4x5_col2(unsigned short m[4][5])
327{
328 unsigned short minv2, minv3;
329
330 minv2 = gf4096_mul(m[2][2], gf4096_inv(m[1][2]));
331 minv3 = gf4096_mul(m[3][2], gf4096_inv(m[1][2]));
332
333 m[2][0] = m[2][0] ^ gf4096_mul(m[1][0], minv2);
334 m[2][1] = m[2][1] ^ gf4096_mul(m[1][1], minv2);
335 m[2][4] = m[2][4] ^ gf4096_mul(m[1][4], minv2);
336 m[3][0] = m[3][0] ^ gf4096_mul(m[1][0], minv3);
337 m[3][1] = m[3][1] ^ gf4096_mul(m[1][1], minv3);
338 m[3][4] = m[3][4] ^ gf4096_mul(m[1][4], minv3);
339}
340
341static void zero_4x5_col1(unsigned short m[4][5])
342{
343 unsigned short minv;
344
345 minv = gf4096_mul(m[3][1], gf4096_inv(m[2][1]));
346
347 m[3][0] = m[3][0] ^ gf4096_mul(m[2][0], minv);
348 m[3][4] = m[3][4] ^ gf4096_mul(m[2][4], minv);
349}
350
351static void swap_4x5_rows(unsigned short m[4][5], int i, int j, int col_width)
352{
353 unsigned short tmp0;
354 int cnt;
355
356 for (cnt = 0; cnt < col_width; cnt++) {
357 tmp0 = m[i][cnt];
358 m[i][cnt] = m[j][cnt];
359 m[j][cnt] = tmp0;
360 }
361}
362
363static void solve_4x5(unsigned short m[4][5], unsigned short *coefs, int *row_order)
364{
365 unsigned short tmp[4];
366
367 tmp[0] = gf4096_mul(m[3][4], gf4096_inv(m[3][0]));
368 tmp[1] = gf4096_mul((gf4096_mul(tmp[0], m[2][0]) ^ m[2][4]), gf4096_inv(m[2][1]));
369 tmp[2] = gf4096_mul((gf4096_mul(tmp[0], m[1][0]) ^ gf4096_mul(tmp[1], m[1][1]) ^ m[1][4]), gf4096_inv(m[1][2]));
370 tmp[3] = gf4096_mul((gf4096_mul(tmp[0], m[0][0]) ^
371 gf4096_mul(tmp[1], m[0][1]) ^ gf4096_mul(tmp[2], m[0][2]) ^ m[0][4]), gf4096_inv(m[0][3]));
372 sort_coefs(row_order, tmp, 4);
373 coefs[0] = tmp[0];
374 coefs[1] = tmp[1];
375 coefs[2] = tmp[2];
376 coefs[3] = tmp[3];
377}
378
379static void sort_coefs(int *order, unsigned short *soln, int len)
380{
381 int cnt, start_cnt, least_ord, least_cnt;
382 unsigned short tmp0;
383 for (start_cnt = 0; start_cnt < len; start_cnt++) {
384 for (cnt = start_cnt; cnt < len; cnt++) {
385 if (cnt == start_cnt) {
386 least_ord = order[cnt];
387 least_cnt = start_cnt;
388 } else {
389 if (least_ord > order[cnt]) {
390 least_ord = order[cnt];
391 least_cnt = cnt;
392 }
393 }
394 }
395 if (least_cnt != start_cnt) {
396 tmp0 = order[least_cnt];
397 order[least_cnt] = order[start_cnt];
398 order[start_cnt] = tmp0;
399 tmp0 = soln[least_cnt];
400 soln[least_cnt] = soln[start_cnt];
401 soln[start_cnt] = tmp0;
402 }
403 }
404}
405
406static void find_4bit_err_pats(unsigned short s0, unsigned short s1,
407 unsigned short s2, unsigned short s3,
408 unsigned short z1, unsigned short z2,
409 unsigned short z3, unsigned short z4,
410 unsigned short *pats)
411{
412 unsigned short z4_z1, z3z4_z3z3, z4_z2, s0z4_s1, z1z4_z1z1,
413 z4_z3, z2z4_z2z2, s1z4_s2, z3z3z4_z3z3z3, z1z1z4_z1z1z1, z2z2z4_z2z2z2, s2z4_s3;
414 unsigned short tmp0, tmp1, tmp2, tmp3;
415
416 z4_z1 = z4 ^ z1;
417 z3z4_z3z3 = gf4096_mul(z3, z4) ^ gf4096_mul(z3, z3);
418 z4_z2 = z4 ^ z2;
419 s0z4_s1 = gf4096_mul(s0, z4) ^ s1;
420 z1z4_z1z1 = gf4096_mul(z1, z4) ^ gf4096_mul(z1, z1);
421 z4_z3 = z4 ^ z3;
422 z2z4_z2z2 = gf4096_mul(z2, z4) ^ gf4096_mul(z2, z2);
423 s1z4_s2 = gf4096_mul(s1, z4) ^ s2;
424 z3z3z4_z3z3z3 = gf4096_mul(gf4096_mul(z3, z3), z4) ^ gf4096_mul(gf4096_mul(z3, z3), z3);
425 z1z1z4_z1z1z1 = gf4096_mul(gf4096_mul(z1, z1), z4) ^ gf4096_mul(gf4096_mul(z1, z1), z1);
426 z2z2z4_z2z2z2 = gf4096_mul(gf4096_mul(z2, z2), z4) ^ gf4096_mul(gf4096_mul(z2, z2), z2);
427 s2z4_s3 = gf4096_mul(s2, z4) ^ s3;
428
429 //find err pat 0,1
430 find_2x2_soln(gf4096_mul(z4_z1, z3z4_z3z3) ^
431 gf4096_mul(z1z4_z1z1, z4_z3), gf4096_mul(z4_z2,
432 z3z4_z3z3) ^
433 gf4096_mul(z2z4_z2z2, z4_z3), gf4096_mul(z1z4_z1z1,
434 z3z3z4_z3z3z3) ^
435 gf4096_mul(z1z1z4_z1z1z1, z3z4_z3z3),
436 gf4096_mul(z2z4_z2z2,
437 z3z3z4_z3z3z3) ^ gf4096_mul(z2z2z4_z2z2z2,
438 z3z4_z3z3),
439 gf4096_mul(s0z4_s1, z3z4_z3z3) ^ gf4096_mul(s1z4_s2,
440 z4_z3),
441 gf4096_mul(s1z4_s2, z3z3z4_z3z3z3) ^ gf4096_mul(s2z4_s3, z3z4_z3z3), pats);
442 tmp0 = pats[0];
443 tmp1 = pats[1];
444 tmp2 = pats[0] ^ pats[1] ^ s0;
445 tmp3 = gf4096_mul(pats[0], z1) ^ gf4096_mul(pats[1], z2) ^ s1;
446
447 //find err pat 2,3
448 find_2x2_soln(0x1, 0x1, z3, z4, tmp2, tmp3, pats);
449 pats[2] = pats[0];
450 pats[3] = pats[1];
451 pats[0] = tmp0;
452 pats[1] = tmp1;
453}
454
455static void find_2x2_soln(unsigned short c00, unsigned short c01,
456 unsigned short c10, unsigned short c11,
457 unsigned short lval0, unsigned short lval1,
458 unsigned short *soln)
459{
460 unsigned short m[2][3];
461 m[0][0] = c00;
462 m[0][1] = c01;
463 m[0][2] = lval0;
464 m[1][0] = c10;
465 m[1][1] = c11;
466 m[1][2] = lval1;
467
468 if (m[0][1] != 0x0) {
469 /* */
470 } else if (m[1][1] != 0x0) {
471 swap_2x3_rows(m);
472 } else {
473 printk(KERN_ERR "Warning: find_2bit_err_coefs, s0,s1 all zeros!\n");
474 }
475
476 solve_2x3(m, soln);
477}
478
479static void swap_2x3_rows(unsigned short m[2][3])
480{
481 unsigned short tmp0;
482 int cnt;
483
484 for (cnt = 0; cnt < 3; cnt++) {
485 tmp0 = m[0][cnt];
486 m[0][cnt] = m[1][cnt];
487 m[1][cnt] = tmp0;
488 }
489}
490
491static void solve_2x3(unsigned short m[2][3], unsigned short *coefs)
492{
493 unsigned short minv;
494
495 minv = gf4096_mul(m[1][1], gf4096_inv(m[0][1]));
496 m[1][0] = m[1][0] ^ gf4096_mul(m[0][0], minv);
497 m[1][2] = m[1][2] ^ gf4096_mul(m[0][2], minv);
498 coefs[0] = gf4096_mul(m[1][2], gf4096_inv(m[1][0]));
499 coefs[1] = gf4096_mul((gf4096_mul(coefs[0], m[0][0]) ^ m[0][2]), gf4096_inv(m[0][1]));
500}
501
502static unsigned char gf64_inv[64] = {
503 0, 1, 33, 62, 49, 43, 31, 44, 57, 37, 52, 28, 46, 40, 22, 25,
504 61, 54, 51, 39, 26, 35, 14, 24, 23, 15, 20, 34, 11, 53, 45, 6,
505 63, 2, 27, 21, 56, 9, 50, 19, 13, 47, 48, 5, 7, 30, 12, 41,
506 42, 4, 38, 18, 10, 29, 17, 60, 36, 8, 59, 58, 55, 16, 3, 32
507};
508
509static unsigned short gf4096_inv(unsigned short din)
510{
511 unsigned short alahxal, ah2B, deno, inv, bl, bh;
512 unsigned short ah, al, ahxal;
513 unsigned short dout;
514
515 ah = (din >> 6) & 0x3f;
516 al = din & 0x3f;
517 ahxal = ah ^ al;
518 ah2B = (((ah ^ (ah >> 3)) & 0x1) << 5) |
519 ((ah >> 1) & 0x10) |
520 ((((ah >> 5) ^ (ah >> 2)) & 0x1) << 3) |
521 ((ah >> 2) & 0x4) | ((((ah >> 4) ^ (ah >> 1)) & 0x1) << 1) | (ah & 0x1);
522 alahxal = gf64_mul(ahxal, al);
523 deno = alahxal ^ ah2B;
524 inv = gf64_inv[deno];
525 bl = gf64_mul(inv, ahxal);
526 bh = gf64_mul(inv, ah);
527 dout = ((bh & 0x3f) << 6) | (bl & 0x3f);
528 return (((bh & 0x3f) << 6) | (bl & 0x3f));
529}
530
531static unsigned short err_pos_lut[4096] = {
532 0xfff, 0x000, 0x451, 0xfff, 0xfff, 0x3cf, 0xfff, 0x041,
533 0xfff, 0xfff, 0xfff, 0xfff, 0x28a, 0xfff, 0x492, 0xfff,
534 0x145, 0xfff, 0xfff, 0x514, 0xfff, 0x082, 0xfff, 0xfff,
535 0xfff, 0x249, 0x38e, 0x410, 0xfff, 0x104, 0x208, 0x1c7,
536 0xfff, 0xfff, 0xfff, 0xfff, 0x2cb, 0xfff, 0xfff, 0xfff,
537 0x0c3, 0x34d, 0x4d3, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
538 0xfff, 0xfff, 0xfff, 0x186, 0xfff, 0xfff, 0xfff, 0xfff,
539 0xfff, 0x30c, 0x555, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
540 0xfff, 0xfff, 0xfff, 0x166, 0xfff, 0xfff, 0xfff, 0xfff,
541 0x385, 0x14e, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4e1,
542 0xfff, 0xfff, 0xfff, 0xfff, 0x538, 0xfff, 0x16d, 0xfff,
543 0xfff, 0xfff, 0x45b, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
544 0xfff, 0xfff, 0xfff, 0x29c, 0x2cc, 0x30b, 0x2b3, 0xfff,
545 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x0b3, 0xfff, 0x2f7,
546 0xfff, 0x32b, 0xfff, 0xfff, 0xfff, 0xfff, 0x0a7, 0xfff,
547 0xfff, 0x2da, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
548 0xfff, 0x07e, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
549 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x11c, 0xfff, 0xfff,
550 0xfff, 0xfff, 0xfff, 0x22f, 0xfff, 0x1f4, 0xfff, 0xfff,
551 0x2b0, 0x504, 0xfff, 0x114, 0xfff, 0xfff, 0xfff, 0x21d,
552 0xfff, 0xfff, 0xfff, 0xfff, 0x00d, 0x3c4, 0x340, 0x10f,
553 0xfff, 0xfff, 0x266, 0x02e, 0xfff, 0xfff, 0xfff, 0x4f8,
554 0x337, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
555 0xfff, 0xfff, 0xfff, 0x07b, 0x168, 0xfff, 0xfff, 0x0fe,
556 0xfff, 0xfff, 0x51a, 0xfff, 0x458, 0xfff, 0x36d, 0xfff,
557 0xfff, 0xfff, 0xfff, 0x073, 0x37d, 0x415, 0x550, 0xfff,
558 0xfff, 0xfff, 0x23b, 0x4b4, 0xfff, 0xfff, 0xfff, 0x1a1,
559 0xfff, 0xfff, 0x3aa, 0xfff, 0x117, 0x04d, 0x341, 0xfff,
560 0xfff, 0xfff, 0xfff, 0x518, 0x03e, 0x0f2, 0xfff, 0xfff,
561 0xfff, 0xfff, 0xfff, 0x363, 0xfff, 0x0b9, 0xfff, 0xfff,
562 0x241, 0xfff, 0xfff, 0x049, 0xfff, 0xfff, 0xfff, 0xfff,
563 0x15f, 0x52d, 0xfff, 0xfff, 0xfff, 0x29e, 0xfff, 0xfff,
564 0xfff, 0xfff, 0x4cf, 0x0fc, 0xfff, 0x36f, 0x3d3, 0xfff,
565 0x228, 0xfff, 0xfff, 0x45e, 0xfff, 0xfff, 0xfff, 0xfff,
566 0x238, 0xfff, 0xfff, 0xfff, 0xfff, 0x47f, 0xfff, 0xfff,
567 0x43a, 0x265, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x3e8,
568 0xfff, 0xfff, 0x01a, 0xfff, 0xfff, 0xfff, 0xfff, 0x21e,
569 0x1fc, 0x40b, 0xfff, 0xfff, 0xfff, 0x2d0, 0x159, 0xfff,
570 0xfff, 0x313, 0xfff, 0xfff, 0x05c, 0x4cc, 0xfff, 0xfff,
571 0x0f6, 0x3d5, 0xfff, 0xfff, 0xfff, 0x54f, 0xfff, 0xfff,
572 0xfff, 0x172, 0x1e4, 0x07c, 0xfff, 0xfff, 0xfff, 0xfff,
573 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x53c, 0x1ad, 0x535,
574 0x19b, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
575 0xfff, 0xfff, 0x092, 0xfff, 0x2be, 0xfff, 0xfff, 0x482,
576 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x0e6, 0xfff, 0xfff,
577 0xfff, 0xfff, 0xfff, 0x476, 0xfff, 0x51d, 0xfff, 0xfff,
578 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
579 0xfff, 0xfff, 0x342, 0x2b5, 0x22e, 0x09a, 0xfff, 0x08d,
580 0x44f, 0x3ed, 0xfff, 0xfff, 0xfff, 0xfff, 0x3d1, 0xfff,
581 0xfff, 0x543, 0xfff, 0x48f, 0xfff, 0x3d2, 0xfff, 0x0d5,
582 0x113, 0x0ec, 0x427, 0xfff, 0xfff, 0xfff, 0x4c4, 0xfff,
583 0xfff, 0x50a, 0xfff, 0x144, 0xfff, 0x105, 0x39f, 0x294,
584 0x164, 0xfff, 0x31a, 0xfff, 0xfff, 0x49a, 0xfff, 0x130,
585 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
586 0x1be, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
587 0xfff, 0xfff, 0x49e, 0x371, 0xfff, 0xfff, 0xfff, 0xfff,
588 0xfff, 0xfff, 0xfff, 0xfff, 0x0e8, 0x49c, 0x0f4, 0xfff,
589 0x338, 0x1a7, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
590 0xfff, 0x36c, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
591 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
592 0xfff, 0x1ae, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
593 0xfff, 0x31b, 0xfff, 0xfff, 0x2dd, 0x522, 0xfff, 0xfff,
594 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x2f4,
595 0x3c6, 0x30d, 0xfff, 0xfff, 0xfff, 0xfff, 0x34c, 0x18f,
596 0x30a, 0xfff, 0x01f, 0x079, 0xfff, 0xfff, 0x54d, 0x46b,
597 0x28c, 0x37f, 0xfff, 0xfff, 0xfff, 0xfff, 0x355, 0xfff,
598 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x14f, 0xfff, 0xfff,
599 0xfff, 0xfff, 0xfff, 0x359, 0x3fe, 0x3c5, 0xfff, 0xfff,
600 0xfff, 0xfff, 0x423, 0xfff, 0xfff, 0x34a, 0x22c, 0xfff,
601 0x25a, 0xfff, 0xfff, 0x4ad, 0xfff, 0x28d, 0xfff, 0xfff,
602 0xfff, 0xfff, 0xfff, 0x547, 0xfff, 0xfff, 0xfff, 0xfff,
603 0x2e2, 0xfff, 0xfff, 0x1d5, 0xfff, 0x2a8, 0xfff, 0xfff,
604 0x03f, 0xfff, 0xfff, 0xfff, 0xfff, 0x3eb, 0x0fa, 0xfff,
605 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x55b, 0xfff,
606 0x08e, 0xfff, 0x3ae, 0xfff, 0x3a4, 0xfff, 0x282, 0x158,
607 0xfff, 0x382, 0xfff, 0xfff, 0x499, 0xfff, 0xfff, 0x08a,
608 0xfff, 0xfff, 0xfff, 0x456, 0x3be, 0xfff, 0x1e2, 0xfff,
609 0xfff, 0xfff, 0xfff, 0xfff, 0x559, 0xfff, 0x1a0, 0xfff,
610 0xfff, 0x0b4, 0xfff, 0xfff, 0xfff, 0x2df, 0xfff, 0xfff,
611 0xfff, 0x07f, 0x4f5, 0xfff, 0xfff, 0x27c, 0x133, 0x017,
612 0xfff, 0x3fd, 0xfff, 0xfff, 0xfff, 0x44d, 0x4cd, 0x17a,
613 0x0d7, 0x537, 0xfff, 0xfff, 0x353, 0xfff, 0xfff, 0x351,
614 0x366, 0xfff, 0x44a, 0xfff, 0x1a6, 0xfff, 0xfff, 0xfff,
615 0x291, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1e3,
616 0xfff, 0xfff, 0xfff, 0xfff, 0x389, 0xfff, 0x07a, 0xfff,
617 0x1b6, 0x2ed, 0xfff, 0xfff, 0xfff, 0xfff, 0x24e, 0x074,
618 0xfff, 0xfff, 0x3dc, 0xfff, 0x4e3, 0xfff, 0xfff, 0xfff,
619 0xfff, 0x4eb, 0xfff, 0xfff, 0x3b8, 0x4de, 0xfff, 0x19c,
620 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x262,
621 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x076, 0x4e8, 0x3da,
622 0xfff, 0x531, 0xfff, 0xfff, 0x14a, 0xfff, 0x0a2, 0x433,
623 0x3df, 0x1e9, 0xfff, 0xfff, 0xfff, 0xfff, 0x3e7, 0x285,
624 0x2d8, 0xfff, 0xfff, 0xfff, 0x349, 0x18d, 0x098, 0xfff,
625 0x0df, 0x4bf, 0xfff, 0xfff, 0x0b2, 0xfff, 0x346, 0x24d,
626 0xfff, 0xfff, 0xfff, 0x24f, 0x4fa, 0x2f9, 0xfff, 0xfff,
627 0x3c9, 0xfff, 0x2b4, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
628 0xfff, 0x056, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
629 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
630 0xfff, 0x179, 0xfff, 0x0e9, 0x3f0, 0x33d, 0xfff, 0xfff,
631 0xfff, 0xfff, 0xfff, 0x1fd, 0xfff, 0xfff, 0x526, 0xfff,
632 0xfff, 0xfff, 0x53d, 0xfff, 0xfff, 0xfff, 0x170, 0x331,
633 0xfff, 0x068, 0xfff, 0xfff, 0xfff, 0x3f7, 0xfff, 0x3d8,
634 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
635 0xfff, 0x09f, 0x556, 0xfff, 0xfff, 0x02d, 0xfff, 0xfff,
636 0x553, 0xfff, 0xfff, 0xfff, 0x1f0, 0xfff, 0xfff, 0x4d6,
637 0x41e, 0xfff, 0xfff, 0xfff, 0xfff, 0x4d5, 0xfff, 0xfff,
638 0xfff, 0xfff, 0xfff, 0x248, 0xfff, 0xfff, 0xfff, 0x0a3,
639 0xfff, 0x217, 0xfff, 0xfff, 0xfff, 0x4f1, 0x209, 0xfff,
640 0xfff, 0x475, 0x234, 0x52b, 0x398, 0xfff, 0x08b, 0xfff,
641 0xfff, 0xfff, 0xfff, 0x2c2, 0xfff, 0xfff, 0xfff, 0xfff,
642 0xfff, 0xfff, 0x268, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
643 0xfff, 0x4a3, 0xfff, 0x0aa, 0xfff, 0x1d9, 0xfff, 0xfff,
644 0xfff, 0xfff, 0x155, 0xfff, 0xfff, 0xfff, 0xfff, 0x0bf,
645 0x539, 0xfff, 0xfff, 0x2f1, 0x545, 0xfff, 0xfff, 0xfff,
646 0xfff, 0xfff, 0xfff, 0x2a7, 0x06f, 0xfff, 0x378, 0xfff,
647 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x25e, 0xfff,
648 0xfff, 0xfff, 0xfff, 0x15d, 0x02a, 0xfff, 0xfff, 0x0bc,
649 0x235, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
650 0x150, 0xfff, 0x1a9, 0xfff, 0xfff, 0xfff, 0xfff, 0x381,
651 0xfff, 0x04e, 0x270, 0x13f, 0xfff, 0xfff, 0x405, 0xfff,
652 0x3cd, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
653 0xfff, 0x2ef, 0xfff, 0x06a, 0xfff, 0xfff, 0xfff, 0x34f,
654 0x212, 0xfff, 0xfff, 0x0e2, 0xfff, 0x083, 0x298, 0xfff,
655 0xfff, 0xfff, 0x0c2, 0xfff, 0xfff, 0x52e, 0xfff, 0x488,
656 0xfff, 0xfff, 0xfff, 0x36b, 0xfff, 0xfff, 0xfff, 0x442,
657 0x091, 0xfff, 0x41c, 0xfff, 0xfff, 0x3a5, 0xfff, 0x4e6,
658 0xfff, 0xfff, 0x40d, 0x31d, 0xfff, 0xfff, 0xfff, 0x4c1,
659 0x053, 0xfff, 0x418, 0x13c, 0xfff, 0x350, 0xfff, 0x0ae,
660 0xfff, 0xfff, 0x41f, 0xfff, 0x470, 0xfff, 0x4ca, 0xfff,
661 0xfff, 0xfff, 0x02b, 0x450, 0xfff, 0x1f8, 0xfff, 0xfff,
662 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x293, 0xfff,
663 0xfff, 0xfff, 0xfff, 0x411, 0xfff, 0xfff, 0xfff, 0xfff,
664 0xfff, 0xfff, 0xfff, 0xfff, 0x0b8, 0xfff, 0xfff, 0xfff,
665 0x3e1, 0xfff, 0xfff, 0xfff, 0xfff, 0x43c, 0xfff, 0x2b2,
666 0x2ab, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1ec,
667 0xfff, 0xfff, 0xfff, 0x3f8, 0x034, 0xfff, 0xfff, 0xfff,
668 0xfff, 0xfff, 0xfff, 0x11a, 0xfff, 0x541, 0x45c, 0x134,
669 0x1cc, 0xfff, 0xfff, 0xfff, 0x469, 0xfff, 0xfff, 0x44b,
670 0x161, 0xfff, 0xfff, 0xfff, 0x055, 0xfff, 0xfff, 0xfff,
671 0xfff, 0x307, 0xfff, 0xfff, 0xfff, 0xfff, 0x2d1, 0xfff,
672 0xfff, 0xfff, 0x124, 0x37b, 0x26b, 0x336, 0xfff, 0xfff,
673 0x2e4, 0x3cb, 0xfff, 0xfff, 0x0f8, 0x3c8, 0xfff, 0xfff,
674 0xfff, 0x461, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4b5,
675 0x2cf, 0xfff, 0xfff, 0xfff, 0x20f, 0xfff, 0x35a, 0xfff,
676 0x490, 0xfff, 0x185, 0xfff, 0xfff, 0xfff, 0xfff, 0x42e,
677 0xfff, 0xfff, 0xfff, 0xfff, 0x54b, 0xfff, 0xfff, 0xfff,
678 0x146, 0xfff, 0x412, 0xfff, 0xfff, 0xfff, 0x1ff, 0xfff,
679 0xfff, 0x3e0, 0xfff, 0xfff, 0xfff, 0xfff, 0x2d5, 0xfff,
680 0x4df, 0x505, 0xfff, 0x413, 0xfff, 0x1a5, 0xfff, 0x3b2,
681 0xfff, 0xfff, 0xfff, 0x35b, 0xfff, 0x116, 0xfff, 0xfff,
682 0x171, 0x4d0, 0xfff, 0x154, 0x12d, 0xfff, 0xfff, 0xfff,
683 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x468, 0x4db, 0xfff,
684 0xfff, 0x1df, 0xfff, 0xfff, 0xfff, 0xfff, 0x05a, 0xfff,
685 0x0f1, 0x403, 0xfff, 0x22b, 0x2e0, 0xfff, 0xfff, 0xfff,
686 0x2b7, 0x373, 0xfff, 0xfff, 0xfff, 0xfff, 0x13e, 0xfff,
687 0xfff, 0xfff, 0x0d0, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
688 0x329, 0x1d2, 0x3fa, 0x047, 0xfff, 0x2f2, 0xfff, 0xfff,
689 0x141, 0x0ac, 0x1d7, 0xfff, 0x07d, 0xfff, 0xfff, 0xfff,
690 0x1c1, 0xfff, 0x487, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
691 0xfff, 0xfff, 0xfff, 0x045, 0xfff, 0xfff, 0xfff, 0xfff,
692 0x288, 0x0cd, 0xfff, 0xfff, 0xfff, 0xfff, 0x226, 0x1d8,
693 0xfff, 0x153, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4cb,
694 0x528, 0xfff, 0xfff, 0xfff, 0x20a, 0x343, 0x3a1, 0xfff,
695 0xfff, 0xfff, 0x2d7, 0x2d3, 0x1aa, 0x4c5, 0xfff, 0xfff,
696 0xfff, 0x42b, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
697 0xfff, 0xfff, 0xfff, 0xfff, 0x3e9, 0xfff, 0x20b, 0x260,
698 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x37c, 0x2fd,
699 0xfff, 0xfff, 0x2c8, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
700 0xfff, 0x31e, 0xfff, 0x335, 0xfff, 0xfff, 0xfff, 0xfff,
701 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
702 0xfff, 0xfff, 0x135, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
703 0xfff, 0xfff, 0x35c, 0x4dd, 0x129, 0xfff, 0xfff, 0xfff,
704 0xfff, 0xfff, 0x1ef, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
705 0xfff, 0x34e, 0xfff, 0xfff, 0xfff, 0xfff, 0x407, 0xfff,
706 0xfff, 0xfff, 0xfff, 0xfff, 0x3ad, 0xfff, 0xfff, 0xfff,
707 0x379, 0xfff, 0xfff, 0x1d0, 0x38d, 0xfff, 0xfff, 0x1e8,
708 0x184, 0x3c1, 0x1c4, 0xfff, 0x1f9, 0xfff, 0xfff, 0x424,
709 0xfff, 0xfff, 0xfff, 0xfff, 0x1d3, 0x0d4, 0xfff, 0x4e9,
710 0xfff, 0xfff, 0xfff, 0x530, 0x107, 0xfff, 0x106, 0x04f,
711 0xfff, 0xfff, 0x4c7, 0x503, 0xfff, 0xfff, 0xfff, 0xfff,
712 0xfff, 0x15c, 0xfff, 0x23f, 0xfff, 0xfff, 0xfff, 0xfff,
713 0xfff, 0xfff, 0xfff, 0xfff, 0x4f3, 0xfff, 0xfff, 0x3c7,
714 0xfff, 0x278, 0xfff, 0xfff, 0x0a6, 0xfff, 0xfff, 0xfff,
715 0x122, 0x1cf, 0xfff, 0x327, 0xfff, 0x2e5, 0xfff, 0x29d,
716 0xfff, 0xfff, 0x3f1, 0xfff, 0xfff, 0x48d, 0xfff, 0xfff,
717 0xfff, 0xfff, 0x054, 0xfff, 0xfff, 0xfff, 0xfff, 0x178,
718 0x27e, 0x4e0, 0x352, 0x02f, 0x09c, 0xfff, 0x2a0, 0xfff,
719 0xfff, 0x46a, 0x457, 0xfff, 0xfff, 0x501, 0xfff, 0x2ba,
720 0xfff, 0xfff, 0xfff, 0x54e, 0x2e7, 0xfff, 0xfff, 0xfff,
721 0xfff, 0xfff, 0x551, 0xfff, 0xfff, 0x1db, 0x2aa, 0xfff,
722 0xfff, 0x4bc, 0xfff, 0xfff, 0x395, 0xfff, 0x0de, 0xfff,
723 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x455, 0xfff, 0x17e,
724 0xfff, 0x221, 0x4a7, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
725 0x388, 0xfff, 0xfff, 0xfff, 0x308, 0xfff, 0xfff, 0xfff,
726 0x20e, 0x4b9, 0xfff, 0x273, 0x20c, 0x09e, 0xfff, 0x057,
727 0xfff, 0xfff, 0xfff, 0xfff, 0x3f2, 0xfff, 0x1a8, 0x3a6,
728 0x14c, 0xfff, 0xfff, 0x071, 0xfff, 0xfff, 0x53a, 0xfff,
729 0xfff, 0xfff, 0xfff, 0x109, 0xfff, 0xfff, 0x399, 0xfff,
730 0x061, 0x4f0, 0x39e, 0x244, 0xfff, 0x035, 0xfff, 0xfff,
731 0x305, 0x47e, 0x297, 0xfff, 0xfff, 0x2b8, 0xfff, 0xfff,
732 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1bc, 0xfff, 0x2fc,
733 0xfff, 0xfff, 0x554, 0xfff, 0xfff, 0xfff, 0xfff, 0x3b6,
734 0xfff, 0xfff, 0xfff, 0x515, 0x397, 0xfff, 0xfff, 0x12f,
735 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4e5,
736 0xfff, 0x4fc, 0xfff, 0xfff, 0x05e, 0xfff, 0xfff, 0xfff,
737 0xfff, 0xfff, 0x0a8, 0x3af, 0x015, 0xfff, 0xfff, 0xfff,
738 0xfff, 0x138, 0xfff, 0xfff, 0xfff, 0x540, 0xfff, 0xfff,
739 0xfff, 0x027, 0x523, 0x2f0, 0xfff, 0xfff, 0xfff, 0xfff,
740 0xfff, 0xfff, 0x16c, 0xfff, 0x27d, 0xfff, 0xfff, 0xfff,
741 0xfff, 0x04c, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4dc,
742 0xfff, 0xfff, 0x059, 0x301, 0xfff, 0xfff, 0xfff, 0xfff,
743 0xfff, 0xfff, 0xfff, 0x1a3, 0xfff, 0x15a, 0xfff, 0xfff,
744 0x0a5, 0xfff, 0x435, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
745 0xfff, 0x051, 0xfff, 0xfff, 0x131, 0xfff, 0x4f4, 0xfff,
746 0xfff, 0xfff, 0xfff, 0x441, 0xfff, 0x4fb, 0xfff, 0x03b,
747 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1ed, 0x274,
748 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x0d3, 0x55e, 0x1b3,
749 0xfff, 0x0bd, 0xfff, 0xfff, 0xfff, 0xfff, 0x225, 0xfff,
750 0xfff, 0xfff, 0xfff, 0xfff, 0x4b7, 0xfff, 0xfff, 0x2ff,
751 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4c3, 0xfff,
752 0x383, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x2f6,
753 0xfff, 0xfff, 0x1ee, 0xfff, 0x03d, 0xfff, 0xfff, 0xfff,
754 0xfff, 0xfff, 0x26f, 0x1dc, 0xfff, 0x0db, 0xfff, 0xfff,
755 0xfff, 0xfff, 0xfff, 0x0ce, 0xfff, 0xfff, 0x127, 0x03a,
756 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x311, 0xfff,
757 0xfff, 0x13d, 0x09d, 0x47b, 0x2a6, 0x50d, 0x510, 0x19a,
758 0xfff, 0x354, 0x414, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
759 0xfff, 0xfff, 0x44c, 0x3b0, 0xfff, 0x23d, 0x429, 0xfff,
760 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
761 0x4c0, 0x416, 0xfff, 0x05b, 0xfff, 0xfff, 0x137, 0xfff,
762 0x25f, 0x49f, 0xfff, 0x279, 0x013, 0xfff, 0xfff, 0xfff,
763 0x269, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
764 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x3d0, 0xfff, 0xfff,
765 0xfff, 0xfff, 0xfff, 0xfff, 0x077, 0xfff, 0xfff, 0x3fb,
766 0xfff, 0xfff, 0xfff, 0xfff, 0x271, 0x3a0, 0xfff, 0xfff,
767 0x40f, 0xfff, 0xfff, 0x3de, 0xfff, 0xfff, 0xfff, 0xfff,
768 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1ab, 0x26a,
769 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x489, 0xfff, 0xfff,
770 0x252, 0xfff, 0xfff, 0xfff, 0xfff, 0x1b7, 0x42f, 0xfff,
771 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x3b7,
772 0xfff, 0x2bb, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
773 0xfff, 0xfff, 0xfff, 0x0f7, 0x01d, 0xfff, 0x067, 0xfff,
774 0xfff, 0xfff, 0xfff, 0x4e2, 0xfff, 0xfff, 0x4bb, 0xfff,
775 0xfff, 0xfff, 0x17b, 0xfff, 0x0ee, 0xfff, 0xfff, 0xfff,
776 0xfff, 0xfff, 0x36e, 0xfff, 0xfff, 0xfff, 0x533, 0xfff,
777 0xfff, 0xfff, 0x4d4, 0x356, 0xfff, 0xfff, 0x375, 0xfff,
778 0xfff, 0xfff, 0xfff, 0x4a4, 0x513, 0xfff, 0xfff, 0xfff,
779 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4ff, 0xfff, 0x2af,
780 0xfff, 0xfff, 0x026, 0xfff, 0x0ad, 0xfff, 0xfff, 0xfff,
781 0xfff, 0x26e, 0xfff, 0xfff, 0xfff, 0xfff, 0x493, 0xfff,
782 0x463, 0x4d2, 0x4be, 0xfff, 0xfff, 0xfff, 0xfff, 0x4f2,
783 0x0b6, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
784 0xfff, 0x32d, 0x315, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
785 0xfff, 0x13a, 0x4a1, 0xfff, 0x27a, 0xfff, 0xfff, 0xfff,
786 0x47a, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
787 0x334, 0xfff, 0xfff, 0xfff, 0xfff, 0x54c, 0xfff, 0xfff,
788 0xfff, 0x0c9, 0x007, 0xfff, 0xfff, 0x12e, 0xfff, 0x0ff,
789 0xfff, 0xfff, 0x3f5, 0x509, 0xfff, 0xfff, 0xfff, 0xfff,
790 0x1c3, 0x2ad, 0xfff, 0xfff, 0x47c, 0x261, 0xfff, 0xfff,
791 0xfff, 0xfff, 0xfff, 0x152, 0xfff, 0xfff, 0xfff, 0x339,
792 0xfff, 0x243, 0x1c0, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
793 0x063, 0xfff, 0xfff, 0x254, 0xfff, 0xfff, 0x173, 0xfff,
794 0x0c7, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
795 0xfff, 0x362, 0x259, 0x485, 0x374, 0x0dc, 0x3ab, 0xfff,
796 0x1c5, 0x534, 0x544, 0xfff, 0xfff, 0x508, 0xfff, 0x402,
797 0x408, 0xfff, 0x0e7, 0xfff, 0xfff, 0x00a, 0x205, 0xfff,
798 0xfff, 0x2b9, 0xfff, 0xfff, 0xfff, 0x465, 0xfff, 0xfff,
799 0xfff, 0xfff, 0xfff, 0xfff, 0x23a, 0xfff, 0xfff, 0xfff,
800 0xfff, 0x147, 0x19d, 0x115, 0x214, 0xfff, 0x090, 0x368,
801 0xfff, 0x210, 0xfff, 0xfff, 0x280, 0x52a, 0x163, 0x148,
802 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x326, 0xfff, 0xfff,
803 0xfff, 0xfff, 0xfff, 0x2de, 0xfff, 0xfff, 0xfff, 0xfff,
804 0x206, 0x2c1, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
805 0x189, 0xfff, 0xfff, 0xfff, 0xfff, 0x367, 0xfff, 0x1a4,
806 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x443, 0xfff, 0x27b,
807 0xfff, 0xfff, 0x251, 0x549, 0xfff, 0xfff, 0xfff, 0xfff,
808 0xfff, 0xfff, 0x188, 0x04b, 0xfff, 0xfff, 0xfff, 0x31f,
809 0x4a6, 0xfff, 0x246, 0x1de, 0x156, 0xfff, 0xfff, 0xfff,
810 0x3a9, 0xfff, 0xfff, 0xfff, 0x2fa, 0xfff, 0x128, 0x0d1,
811 0x449, 0x255, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
812 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
813 0xfff, 0xfff, 0xfff, 0xfff, 0x258, 0xfff, 0xfff, 0xfff,
814 0x532, 0xfff, 0xfff, 0xfff, 0x303, 0x517, 0xfff, 0xfff,
815 0x2a9, 0x24a, 0xfff, 0xfff, 0x231, 0xfff, 0xfff, 0xfff,
816 0xfff, 0xfff, 0x4b6, 0x516, 0xfff, 0xfff, 0x0e4, 0x0eb,
817 0xfff, 0x4e4, 0xfff, 0x275, 0xfff, 0xfff, 0x031, 0xfff,
818 0xfff, 0xfff, 0xfff, 0xfff, 0x025, 0x21a, 0xfff, 0x0cc,
819 0x45f, 0x3d9, 0x289, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
820 0xfff, 0xfff, 0x23e, 0xfff, 0xfff, 0xfff, 0x438, 0x097,
821 0x419, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
822 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
823 0xfff, 0xfff, 0x0a9, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
824 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
825 0x37e, 0x0e0, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x431,
826 0x372, 0xfff, 0xfff, 0xfff, 0x1ba, 0x06e, 0xfff, 0x1b1,
827 0xfff, 0xfff, 0x12a, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
828 0xfff, 0xfff, 0x193, 0xfff, 0xfff, 0xfff, 0xfff, 0x10a,
829 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x048, 0x1b4,
830 0xfff, 0xfff, 0xfff, 0xfff, 0x295, 0x140, 0x108, 0xfff,
831 0xfff, 0xfff, 0xfff, 0x16f, 0xfff, 0x0a4, 0x37a, 0xfff,
832 0x29a, 0xfff, 0x284, 0xfff, 0xfff, 0xfff, 0xfff, 0x4c6,
833 0x2a2, 0x3a3, 0xfff, 0x201, 0xfff, 0xfff, 0xfff, 0x4bd,
834 0x005, 0x54a, 0x3b5, 0x204, 0x2ee, 0x11d, 0x436, 0xfff,
835 0xfff, 0xfff, 0xfff, 0xfff, 0x3ec, 0xfff, 0xfff, 0xfff,
836 0xfff, 0xfff, 0xfff, 0xfff, 0x11f, 0x498, 0x21c, 0xfff,
837 0xfff, 0xfff, 0x3d6, 0xfff, 0x4ab, 0xfff, 0x432, 0x2eb,
838 0x542, 0x4fd, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
839 0xfff, 0xfff, 0xfff, 0x4ce, 0xfff, 0xfff, 0x2fb, 0xfff,
840 0xfff, 0x2e1, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
841 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1b9, 0x037, 0x0dd,
842 0xfff, 0xfff, 0xfff, 0x2bf, 0x521, 0x496, 0x095, 0xfff,
843 0xfff, 0x328, 0x070, 0x1bf, 0xfff, 0x393, 0xfff, 0xfff,
844 0x102, 0xfff, 0xfff, 0x21b, 0xfff, 0x142, 0x263, 0x519,
845 0xfff, 0x2a5, 0x177, 0xfff, 0x14d, 0x471, 0x4ae, 0xfff,
846 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
847 0x1f6, 0xfff, 0x481, 0xfff, 0xfff, 0xfff, 0x151, 0xfff,
848 0xfff, 0xfff, 0x085, 0x33f, 0xfff, 0xfff, 0xfff, 0x084,
849 0xfff, 0xfff, 0xfff, 0x345, 0x3a2, 0xfff, 0xfff, 0x0a0,
850 0x0da, 0x024, 0xfff, 0xfff, 0xfff, 0x1bd, 0xfff, 0x55c,
851 0x467, 0x445, 0xfff, 0xfff, 0xfff, 0x052, 0xfff, 0xfff,
852 0xfff, 0xfff, 0x51e, 0xfff, 0xfff, 0x39d, 0xfff, 0x35f,
853 0xfff, 0x376, 0x3ee, 0xfff, 0xfff, 0xfff, 0xfff, 0x448,
854 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x16a,
855 0xfff, 0x036, 0x38f, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
856 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x211,
857 0xfff, 0xfff, 0xfff, 0x230, 0xfff, 0xfff, 0x3ba, 0xfff,
858 0xfff, 0xfff, 0x3ce, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
859 0xfff, 0xfff, 0xfff, 0x229, 0xfff, 0x176, 0xfff, 0xfff,
860 0xfff, 0xfff, 0xfff, 0x00b, 0xfff, 0x162, 0x018, 0xfff,
861 0xfff, 0x233, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
862 0x400, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
863 0xfff, 0xfff, 0xfff, 0x12b, 0xfff, 0xfff, 0xfff, 0xfff,
864 0xfff, 0x3f4, 0xfff, 0x0f0, 0xfff, 0x1ac, 0xfff, 0xfff,
865 0x119, 0xfff, 0x2c0, 0xfff, 0xfff, 0xfff, 0x49b, 0xfff,
866 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x23c, 0xfff,
867 0x4b3, 0x010, 0x064, 0xfff, 0xfff, 0x4ba, 0xfff, 0xfff,
868 0xfff, 0xfff, 0xfff, 0x3c2, 0xfff, 0xfff, 0xfff, 0xfff,
869 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x006, 0x196, 0xfff,
870 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x100, 0x191, 0xfff,
871 0x1ea, 0x29f, 0xfff, 0xfff, 0xfff, 0x276, 0xfff, 0xfff,
872 0x2b1, 0x3b9, 0xfff, 0x03c, 0xfff, 0xfff, 0xfff, 0x180,
873 0xfff, 0x08f, 0xfff, 0xfff, 0x19e, 0x019, 0xfff, 0x0b0,
874 0x0fd, 0x332, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
875 0xfff, 0x06b, 0x2e8, 0xfff, 0x446, 0xfff, 0xfff, 0x004,
876 0x247, 0x197, 0xfff, 0x112, 0x169, 0x292, 0xfff, 0x302,
877 0xfff, 0xfff, 0x33b, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
878 0xfff, 0xfff, 0xfff, 0x287, 0x21f, 0xfff, 0x3ea, 0xfff,
879 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4e7, 0xfff, 0xfff,
880 0xfff, 0xfff, 0xfff, 0x3a8, 0xfff, 0xfff, 0x2bc, 0xfff,
881 0x484, 0x296, 0xfff, 0x1c9, 0x08c, 0x1e5, 0x48a, 0xfff,
882 0x360, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
883 0x1ca, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
884 0xfff, 0xfff, 0xfff, 0x10d, 0xfff, 0xfff, 0xfff, 0xfff,
885 0xfff, 0xfff, 0x066, 0x2ea, 0x28b, 0x25b, 0xfff, 0x072,
886 0xfff, 0xfff, 0xfff, 0xfff, 0x2b6, 0xfff, 0xfff, 0x272,
887 0xfff, 0xfff, 0x525, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
888 0x2ca, 0xfff, 0xfff, 0xfff, 0x299, 0xfff, 0xfff, 0xfff,
889 0x558, 0x41a, 0xfff, 0x4f7, 0x557, 0xfff, 0x4a0, 0x344,
890 0x12c, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x125,
891 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
892 0x40e, 0xfff, 0xfff, 0x502, 0xfff, 0x103, 0x3e6, 0xfff,
893 0x527, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
894 0xfff, 0xfff, 0xfff, 0x45d, 0xfff, 0xfff, 0xfff, 0xfff,
895 0x44e, 0xfff, 0xfff, 0xfff, 0xfff, 0x0d2, 0x4c9, 0x35e,
896 0x459, 0x2d9, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x17d,
897 0x0c4, 0xfff, 0xfff, 0xfff, 0x3ac, 0x390, 0x094, 0xfff,
898 0x483, 0x0ab, 0xfff, 0x253, 0xfff, 0x391, 0xfff, 0xfff,
899 0xfff, 0xfff, 0x123, 0x0ef, 0xfff, 0xfff, 0xfff, 0x330,
900 0x38c, 0xfff, 0xfff, 0x2ae, 0xfff, 0xfff, 0xfff, 0x042,
901 0x012, 0x06d, 0xfff, 0xfff, 0xfff, 0x32a, 0x3db, 0x364,
902 0x2dc, 0xfff, 0x30f, 0x3d7, 0x4a5, 0x050, 0xfff, 0xfff,
903 0x029, 0xfff, 0xfff, 0xfff, 0xfff, 0x1d1, 0xfff, 0xfff,
904 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x480, 0xfff,
905 0x4ed, 0x081, 0x0a1, 0xfff, 0xfff, 0xfff, 0x30e, 0x52f,
906 0x257, 0xfff, 0xfff, 0x447, 0xfff, 0xfff, 0xfff, 0xfff,
907 0xfff, 0xfff, 0xfff, 0x401, 0x3cc, 0xfff, 0xfff, 0x0fb,
908 0x2c9, 0x42a, 0x314, 0x33e, 0x3bd, 0x318, 0xfff, 0x10e,
909 0x2a1, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x24c,
910 0x506, 0xfff, 0x267, 0xfff, 0xfff, 0x219, 0xfff, 0x1eb,
911 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
912 0x309, 0x3e2, 0x46c, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
913 0x384, 0xfff, 0xfff, 0xfff, 0xfff, 0x50c, 0xfff, 0x24b,
914 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x038,
915 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x194,
916 0x143, 0x3e3, 0xfff, 0xfff, 0xfff, 0x4c2, 0xfff, 0xfff,
917 0x0e1, 0x25c, 0xfff, 0x237, 0xfff, 0x1fe, 0xfff, 0xfff,
918 0xfff, 0x065, 0x2a4, 0xfff, 0x386, 0x55a, 0x11b, 0xfff,
919 0xfff, 0x192, 0xfff, 0x183, 0x00e, 0xfff, 0xfff, 0xfff,
920 0xfff, 0xfff, 0xfff, 0x4b2, 0x18e, 0xfff, 0xfff, 0xfff,
921 0xfff, 0x486, 0x4ef, 0x0c6, 0x380, 0xfff, 0x4a8, 0xfff,
922 0x0c5, 0xfff, 0xfff, 0xfff, 0xfff, 0x093, 0x1b8, 0xfff,
923 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x2e6,
924 0xfff, 0x0f3, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
925 0x28e, 0xfff, 0x53b, 0x420, 0x22a, 0x33a, 0xfff, 0x387,
926 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x2a3, 0xfff, 0xfff,
927 0xfff, 0x428, 0x500, 0xfff, 0xfff, 0x120, 0x2c6, 0x290,
928 0x2f5, 0x0e3, 0xfff, 0x0b7, 0xfff, 0x319, 0x474, 0xfff,
929 0xfff, 0xfff, 0x529, 0x014, 0xfff, 0x41b, 0x40a, 0x18b,
930 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x0d9,
931 0xfff, 0x38a, 0xfff, 0xfff, 0xfff, 0xfff, 0x1ce, 0xfff,
932 0xfff, 0xfff, 0xfff, 0xfff, 0x3b1, 0xfff, 0xfff, 0x05d,
933 0x2c4, 0xfff, 0xfff, 0x4af, 0xfff, 0x030, 0xfff, 0xfff,
934 0x203, 0xfff, 0x277, 0x256, 0xfff, 0xfff, 0xfff, 0x4f9,
935 0xfff, 0x2c7, 0xfff, 0x466, 0x016, 0x1cd, 0xfff, 0x167,
936 0xfff, 0xfff, 0x0c8, 0xfff, 0x43d, 0xfff, 0xfff, 0x020,
937 0xfff, 0xfff, 0x232, 0x1cb, 0x1e0, 0xfff, 0xfff, 0x347,
938 0xfff, 0x478, 0xfff, 0x365, 0xfff, 0xfff, 0xfff, 0xfff,
939 0x358, 0xfff, 0x10b, 0xfff, 0x35d, 0xfff, 0xfff, 0xfff,
940 0xfff, 0xfff, 0x452, 0x22d, 0xfff, 0xfff, 0x47d, 0xfff,
941 0x2f3, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x460, 0xfff,
942 0xfff, 0xfff, 0x50b, 0xfff, 0xfff, 0xfff, 0x2ec, 0xfff,
943 0xfff, 0xfff, 0xfff, 0xfff, 0x4b1, 0x422, 0xfff, 0xfff,
944 0xfff, 0x2d4, 0xfff, 0x239, 0xfff, 0xfff, 0xfff, 0x439,
945 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
946 0xfff, 0x491, 0x075, 0xfff, 0xfff, 0xfff, 0x06c, 0xfff,
947 0xfff, 0x0f9, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
948 0xfff, 0x139, 0xfff, 0x4f6, 0xfff, 0xfff, 0x409, 0xfff,
949 0xfff, 0x15b, 0xfff, 0xfff, 0x348, 0xfff, 0xfff, 0xfff,
950 0xfff, 0x4a2, 0x49d, 0xfff, 0x033, 0x175, 0xfff, 0x039,
951 0xfff, 0x312, 0x40c, 0xfff, 0xfff, 0x325, 0xfff, 0xfff,
952 0xfff, 0xfff, 0xfff, 0xfff, 0x4aa, 0xfff, 0xfff, 0xfff,
953 0xfff, 0xfff, 0xfff, 0x165, 0x3bc, 0x48c, 0x310, 0x096,
954 0xfff, 0xfff, 0x250, 0x1a2, 0xfff, 0xfff, 0xfff, 0xfff,
955 0x20d, 0x2ac, 0xfff, 0xfff, 0x39b, 0xfff, 0x377, 0xfff,
956 0x512, 0x495, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
957 0xfff, 0xfff, 0xfff, 0xfff, 0x357, 0x4ea, 0xfff, 0xfff,
958 0xfff, 0xfff, 0x198, 0xfff, 0xfff, 0xfff, 0x434, 0x04a,
959 0xfff, 0xfff, 0xfff, 0xfff, 0x062, 0xfff, 0x1d6, 0x1c8,
960 0xfff, 0x1f3, 0x281, 0xfff, 0x462, 0xfff, 0xfff, 0xfff,
961 0x4b0, 0xfff, 0x207, 0xfff, 0xfff, 0xfff, 0xfff, 0x3dd,
962 0xfff, 0xfff, 0x55d, 0xfff, 0x552, 0x494, 0x1af, 0xfff,
963 0xfff, 0xfff, 0xfff, 0xfff, 0x227, 0xfff, 0xfff, 0x069,
964 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x43e,
965 0x0b5, 0xfff, 0x524, 0x2d2, 0xfff, 0xfff, 0xfff, 0x28f,
966 0xfff, 0x01b, 0x50e, 0xfff, 0xfff, 0x1bb, 0xfff, 0xfff,
967 0x41d, 0xfff, 0x32e, 0x48e, 0xfff, 0x1f7, 0x224, 0xfff,
968 0xfff, 0xfff, 0xfff, 0xfff, 0x394, 0xfff, 0xfff, 0xfff,
969 0xfff, 0x52c, 0xfff, 0xfff, 0xfff, 0x392, 0xfff, 0x1e7,
970 0xfff, 0xfff, 0x3f9, 0x3a7, 0xfff, 0x51f, 0xfff, 0x0bb,
971 0x118, 0x3ca, 0xfff, 0x1dd, 0xfff, 0x48b, 0xfff, 0xfff,
972 0xfff, 0xfff, 0x50f, 0xfff, 0x0d6, 0xfff, 0x1fa, 0xfff,
973 0x11e, 0xfff, 0xfff, 0xfff, 0xfff, 0x4d7, 0xfff, 0x078,
974 0x008, 0xfff, 0x25d, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
975 0x032, 0x33c, 0xfff, 0x4d9, 0x160, 0xfff, 0xfff, 0x300,
976 0x0b1, 0xfff, 0x322, 0xfff, 0x4ec, 0xfff, 0xfff, 0x200,
977 0x00c, 0x369, 0x473, 0xfff, 0xfff, 0x32c, 0xfff, 0xfff,
978 0xfff, 0xfff, 0xfff, 0xfff, 0x53e, 0x3d4, 0x417, 0xfff,
979 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
980 0x34b, 0x001, 0x39a, 0x02c, 0xfff, 0xfff, 0x2ce, 0x00f,
981 0xfff, 0x0ba, 0xfff, 0xfff, 0xfff, 0xfff, 0x060, 0xfff,
982 0x406, 0xfff, 0xfff, 0xfff, 0x4ee, 0x4ac, 0xfff, 0x43f,
983 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x29b, 0xfff, 0xfff,
984 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x216,
985 0x190, 0xfff, 0x396, 0x464, 0xfff, 0xfff, 0x323, 0xfff,
986 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x2e9, 0xfff, 0x26d,
987 0x2cd, 0x040, 0xfff, 0xfff, 0xfff, 0xfff, 0x38b, 0x3c0,
988 0xfff, 0xfff, 0xfff, 0x1f2, 0xfff, 0x0ea, 0xfff, 0xfff,
989 0x472, 0xfff, 0x1fb, 0xfff, 0xfff, 0x0af, 0x27f, 0xfff,
990 0xfff, 0xfff, 0x479, 0x023, 0xfff, 0x0d8, 0x3b3, 0xfff,
991 0xfff, 0xfff, 0x121, 0xfff, 0xfff, 0x3bf, 0xfff, 0xfff,
992 0x16b, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
993 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
994 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
995 0x45a, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
996 0xfff, 0x0be, 0xfff, 0xfff, 0xfff, 0x111, 0xfff, 0x220,
997 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
998 0xfff, 0xfff, 0x09b, 0x218, 0xfff, 0x022, 0x202, 0xfff,
999 0x4c8, 0xfff, 0x0ed, 0xfff, 0xfff, 0x182, 0xfff, 0xfff,
1000 0xfff, 0x17f, 0x213, 0xfff, 0x321, 0x36a, 0xfff, 0x086,
1001 0xfff, 0xfff, 0xfff, 0x43b, 0x088, 0xfff, 0xfff, 0xfff,
1002 0xfff, 0x26c, 0xfff, 0x2f8, 0x3b4, 0xfff, 0xfff, 0xfff,
1003 0x132, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x333, 0x444,
1004 0x0c1, 0x4d8, 0x46d, 0x264, 0xfff, 0xfff, 0xfff, 0xfff,
1005 0x426, 0xfff, 0xfff, 0xfff, 0xfff, 0x2fe, 0xfff, 0xfff,
1006 0xfff, 0xfff, 0x011, 0xfff, 0x05f, 0xfff, 0xfff, 0xfff,
1007 0xfff, 0x10c, 0x101, 0xfff, 0xfff, 0xfff, 0xfff, 0x110,
1008 0xfff, 0x044, 0x304, 0x361, 0x404, 0xfff, 0x51b, 0x099,
1009 0xfff, 0x440, 0xfff, 0xfff, 0xfff, 0x222, 0xfff, 0xfff,
1010 0xfff, 0xfff, 0x1b5, 0xfff, 0x136, 0x430, 0xfff, 0x1da,
1011 0xfff, 0xfff, 0xfff, 0x043, 0xfff, 0x17c, 0xfff, 0xfff,
1012 0xfff, 0x01c, 0xfff, 0xfff, 0xfff, 0x425, 0x236, 0xfff,
1013 0x317, 0xfff, 0xfff, 0x437, 0x3fc, 0xfff, 0x1f1, 0xfff,
1014 0x324, 0xfff, 0xfff, 0x0ca, 0x306, 0xfff, 0x548, 0xfff,
1015 0x46e, 0xfff, 0xfff, 0xfff, 0x4b8, 0x1c2, 0x286, 0xfff,
1016 0xfff, 0x087, 0x18a, 0x19f, 0xfff, 0xfff, 0xfff, 0xfff,
1017 0x18c, 0xfff, 0x215, 0xfff, 0xfff, 0xfff, 0xfff, 0x283,
1018 0xfff, 0xfff, 0xfff, 0x126, 0xfff, 0xfff, 0x370, 0xfff,
1019 0x53f, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x31c, 0xfff,
1020 0x4d1, 0xfff, 0xfff, 0xfff, 0x021, 0xfff, 0x157, 0xfff,
1021 0xfff, 0x028, 0x16e, 0xfff, 0x421, 0xfff, 0x1c6, 0xfff,
1022 0xfff, 0x511, 0xfff, 0xfff, 0x39c, 0x46f, 0x1b2, 0xfff,
1023 0xfff, 0x316, 0xfff, 0xfff, 0x009, 0xfff, 0xfff, 0x195,
1024 0xfff, 0x240, 0x546, 0xfff, 0xfff, 0x520, 0xfff, 0xfff,
1025 0xfff, 0xfff, 0xfff, 0xfff, 0x454, 0xfff, 0xfff, 0xfff,
1026 0x3f3, 0xfff, 0xfff, 0x187, 0xfff, 0x4a9, 0xfff, 0xfff,
1027 0xfff, 0xfff, 0xfff, 0xfff, 0x51c, 0x453, 0x1e6, 0xfff,
1028 0xfff, 0xfff, 0x1b0, 0xfff, 0x477, 0xfff, 0xfff, 0xfff,
1029 0x4fe, 0xfff, 0x32f, 0xfff, 0xfff, 0x15e, 0x1d4, 0xfff,
1030 0x0e5, 0xfff, 0xfff, 0xfff, 0x242, 0x14b, 0x046, 0xfff,
1031 0x3f6, 0x3bb, 0x3e4, 0xfff, 0xfff, 0x2e3, 0xfff, 0x245,
1032 0xfff, 0x149, 0xfff, 0xfff, 0xfff, 0x2db, 0xfff, 0xfff,
1033 0x181, 0xfff, 0x089, 0x2c5, 0xfff, 0x1f5, 0xfff, 0x2d6,
1034 0x507, 0xfff, 0x42d, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
1035 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
1036 0x080, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
1037 0xfff, 0xfff, 0xfff, 0xfff, 0x3c3, 0x320, 0xfff, 0x1e1,
1038 0xfff, 0x0f5, 0x13b, 0xfff, 0xfff, 0xfff, 0x003, 0x4da,
1039 0xfff, 0xfff, 0xfff, 0x42c, 0xfff, 0xfff, 0x0cb, 0xfff,
1040 0x536, 0x2c3, 0xfff, 0xfff, 0xfff, 0xfff, 0x199, 0xfff,
1041 0xfff, 0x0c0, 0xfff, 0x01e, 0x497, 0xfff, 0xfff, 0x3e5,
1042 0xfff, 0xfff, 0xfff, 0x0cf, 0xfff, 0x2bd, 0xfff, 0x223,
1043 0xfff, 0x3ff, 0xfff, 0x058, 0x174, 0x3ef, 0xfff, 0x002
1044};
1045
1046static unsigned short err_pos(unsigned short din)
1047{
1048 BUG_ON(din >= ARRAY_SIZE(err_pos_lut));
1049 return err_pos_lut[din];
1050}
1051static int chk_no_err_only(unsigned short *chk_syndrome_list, unsigned short *err_info)
1052{
1053 if ((chk_syndrome_list[0] | chk_syndrome_list[1] |
1054 chk_syndrome_list[2] | chk_syndrome_list[3] |
1055 chk_syndrome_list[4] | chk_syndrome_list[5] |
1056 chk_syndrome_list[6] | chk_syndrome_list[7]) != 0x0) {
1057 return -EINVAL;
1058 } else {
1059 err_info[0] = 0x0;
1060 return 0;
1061 }
1062}
1063static int chk_1_err_only(unsigned short *chk_syndrome_list, unsigned short *err_info)
1064{
1065 unsigned short tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
1066 tmp0 = gf4096_mul(chk_syndrome_list[1], gf4096_inv(chk_syndrome_list[0]));
1067 tmp1 = gf4096_mul(chk_syndrome_list[2], gf4096_inv(chk_syndrome_list[1]));
1068 tmp2 = gf4096_mul(chk_syndrome_list[3], gf4096_inv(chk_syndrome_list[2]));
1069 tmp3 = gf4096_mul(chk_syndrome_list[4], gf4096_inv(chk_syndrome_list[3]));
1070 tmp4 = gf4096_mul(chk_syndrome_list[5], gf4096_inv(chk_syndrome_list[4]));
1071 tmp5 = gf4096_mul(chk_syndrome_list[6], gf4096_inv(chk_syndrome_list[5]));
1072 tmp6 = gf4096_mul(chk_syndrome_list[7], gf4096_inv(chk_syndrome_list[6]));
1073 if ((tmp0 == tmp1) & (tmp1 == tmp2) & (tmp2 == tmp3) & (tmp3 == tmp4) & (tmp4 == tmp5) & (tmp5 == tmp6)) {
1074 err_info[0] = 0x1; // encode 1-symbol error as 0x1
1075 err_info[1] = err_pos(tmp0);
1076 err_info[1] = (unsigned short)(0x55e - err_info[1]);
1077 err_info[5] = chk_syndrome_list[0];
1078 return 0;
1079 } else
1080 return -EINVAL;
1081}
1082static int chk_2_err_only(unsigned short *chk_syndrome_list, unsigned short *err_info)
1083{
1084 unsigned short tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
1085 unsigned short coefs[4];
1086 unsigned short err_pats[4];
1087 int found_num_root = 0;
1088 unsigned short bit2_root0, bit2_root1;
1089 unsigned short bit2_root0_inv, bit2_root1_inv;
1090 unsigned short err_loc_eqn, test_root;
1091 unsigned short bit2_loc0, bit2_loc1;
1092 unsigned short bit2_pat0, bit2_pat1;
1093
1094 find_2x2_soln(chk_syndrome_list[1],
1095 chk_syndrome_list[0],
1096 chk_syndrome_list[2], chk_syndrome_list[1], chk_syndrome_list[2], chk_syndrome_list[3], coefs);
1097 for (test_root = 0x1; test_root < 0xfff; test_root++) {
1098 err_loc_eqn =
1099 gf4096_mul(coefs[1], gf4096_mul(test_root, test_root)) ^ gf4096_mul(coefs[0], test_root) ^ 0x1;
1100 if (err_loc_eqn == 0x0) {
1101 if (found_num_root == 0) {
1102 bit2_root0 = test_root;
1103 found_num_root = 1;
1104 } else if (found_num_root == 1) {
1105 bit2_root1 = test_root;
1106 found_num_root = 2;
1107 break;
1108 }
1109 }
1110 }
1111 if (found_num_root != 2)
1112 return -EINVAL;
1113 else {
1114 bit2_root0_inv = gf4096_inv(bit2_root0);
1115 bit2_root1_inv = gf4096_inv(bit2_root1);
1116 find_2bit_err_pats(chk_syndrome_list[0],
1117 chk_syndrome_list[1], bit2_root0_inv, bit2_root1_inv, err_pats);
1118 bit2_pat0 = err_pats[0];
1119 bit2_pat1 = err_pats[1];
1120 //for(x+1)
1121 tmp0 = gf4096_mul(gf4096_mul(bit2_root0_inv, bit2_root0_inv), gf4096_mul(bit2_root0_inv, bit2_root0_inv)); //rinv0^4
1122 tmp1 = gf4096_mul(bit2_root0_inv, tmp0); //rinv0^5
1123 tmp2 = gf4096_mul(bit2_root0_inv, tmp1); //rinv0^6
1124 tmp3 = gf4096_mul(bit2_root0_inv, tmp2); //rinv0^7
1125 tmp4 = gf4096_mul(gf4096_mul(bit2_root1_inv, bit2_root1_inv), gf4096_mul(bit2_root1_inv, bit2_root1_inv)); //rinv1^4
1126 tmp5 = gf4096_mul(bit2_root1_inv, tmp4); //rinv1^5
1127 tmp6 = gf4096_mul(bit2_root1_inv, tmp5); //rinv1^6
1128 tmp7 = gf4096_mul(bit2_root1_inv, tmp6); //rinv1^7
1129 //check if only 2-bit error
1130 if ((chk_syndrome_list[4] ==
1131 (gf4096_mul(bit2_pat0, tmp0) ^
1132 gf4096_mul(bit2_pat1,
1133 tmp4))) & (chk_syndrome_list[5] ==
1134 (gf4096_mul(bit2_pat0, tmp1) ^
1135 gf4096_mul(bit2_pat1,
1136 tmp5))) &
1137 (chk_syndrome_list[6] ==
1138 (gf4096_mul(bit2_pat0, tmp2) ^
1139 gf4096_mul(bit2_pat1,
1140 tmp6))) & (chk_syndrome_list[7] ==
1141 (gf4096_mul(bit2_pat0, tmp3) ^ gf4096_mul(bit2_pat1, tmp7)))) {
1142 if ((err_pos(bit2_root0_inv) == 0xfff) | (err_pos(bit2_root1_inv) == 0xfff)) {
1143 return -EINVAL;
1144 } else {
1145 bit2_loc0 = 0x55e - err_pos(bit2_root0_inv);
1146 bit2_loc1 = 0x55e - err_pos(bit2_root1_inv);
1147 err_info[0] = 0x2; // encode 2-symbol error as 0x2
1148 err_info[1] = bit2_loc0;
1149 err_info[2] = bit2_loc1;
1150 err_info[5] = bit2_pat0;
1151 err_info[6] = bit2_pat1;
1152 return 0;
1153 }
1154 } else
1155 return -EINVAL;
1156 }
1157}
1158static int chk_3_err_only(unsigned short *chk_syndrome_list, unsigned short *err_info)
1159{
1160 unsigned short tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
1161 unsigned short coefs[4];
1162 unsigned short err_pats[4];
1163 int found_num_root = 0;
1164 unsigned short bit3_root0, bit3_root1, bit3_root2;
1165 unsigned short bit3_root0_inv, bit3_root1_inv, bit3_root2_inv;
1166 unsigned short err_loc_eqn, test_root;
1167
1168 find_3bit_err_coefs(chk_syndrome_list[0], chk_syndrome_list[1],
1169 chk_syndrome_list[2], chk_syndrome_list[3],
1170 chk_syndrome_list[4], chk_syndrome_list[5], coefs);
1171
1172 for (test_root = 0x1; test_root < 0xfff; test_root++) {
1173 err_loc_eqn = gf4096_mul(coefs[2],
1174 gf4096_mul(gf4096_mul(test_root, test_root),
1175 test_root)) ^ gf4096_mul(coefs[1], gf4096_mul(test_root, test_root))
1176 ^ gf4096_mul(coefs[0], test_root) ^ 0x1;
1177
1178 if (err_loc_eqn == 0x0) {
1179 if (found_num_root == 0) {
1180 bit3_root0 = test_root;
1181 found_num_root = 1;
1182 } else if (found_num_root == 1) {
1183 bit3_root1 = test_root;
1184 found_num_root = 2;
1185 } else if (found_num_root == 2) {
1186 bit3_root2 = test_root;
1187 found_num_root = 3;
1188 break;
1189 }
1190 }
1191 }
1192 if (found_num_root != 3)
1193 return -EINVAL;
1194 else {
1195 bit3_root0_inv = gf4096_inv(bit3_root0);
1196 bit3_root1_inv = gf4096_inv(bit3_root1);
1197 bit3_root2_inv = gf4096_inv(bit3_root2);
1198
1199 find_3bit_err_pats(chk_syndrome_list[0], chk_syndrome_list[1],
1200 chk_syndrome_list[2], bit3_root0_inv,
1201 bit3_root1_inv, bit3_root2_inv, err_pats);
1202
1203 //check if only 3-bit error
1204 tmp0 = gf4096_mul(bit3_root0_inv, bit3_root0_inv);
1205 tmp0 = gf4096_mul(tmp0, tmp0);
1206 tmp0 = gf4096_mul(tmp0, bit3_root0_inv);
1207 tmp0 = gf4096_mul(tmp0, bit3_root0_inv); //rinv0^6
1208 tmp1 = gf4096_mul(tmp0, bit3_root0_inv); //rinv0^7
1209 tmp2 = gf4096_mul(bit3_root1_inv, bit3_root1_inv);
1210 tmp2 = gf4096_mul(tmp2, tmp2);
1211 tmp2 = gf4096_mul(tmp2, bit3_root1_inv);
1212 tmp2 = gf4096_mul(tmp2, bit3_root1_inv); //rinv1^6
1213 tmp3 = gf4096_mul(tmp2, bit3_root1_inv); //rinv1^7
1214 tmp4 = gf4096_mul(bit3_root2_inv, bit3_root2_inv);
1215 tmp4 = gf4096_mul(tmp4, tmp4);
1216 tmp4 = gf4096_mul(tmp4, bit3_root2_inv);
1217 tmp4 = gf4096_mul(tmp4, bit3_root2_inv); //rinv2^6
1218 tmp5 = gf4096_mul(tmp4, bit3_root2_inv); //rinv2^7
1219
1220 //check if only 3 errors
1221 if ((chk_syndrome_list[6] == (gf4096_mul(err_pats[0], tmp0) ^
1222 gf4096_mul(err_pats[1], tmp2) ^
1223 gf4096_mul(err_pats[2], tmp4))) &
1224 (chk_syndrome_list[7] == (gf4096_mul(err_pats[0], tmp1) ^
1225 gf4096_mul(err_pats[1], tmp3) ^ gf4096_mul(err_pats[2], tmp5)))) {
1226 if ((err_pos(bit3_root0_inv) == 0xfff) |
1227 (err_pos(bit3_root1_inv) == 0xfff) | (err_pos(bit3_root2_inv) == 0xfff)) {
1228 return -EINVAL;
1229 } else {
1230 err_info[0] = 0x3;
1231 err_info[1] = (0x55e - err_pos(bit3_root0_inv));
1232 err_info[2] = (0x55e - err_pos(bit3_root1_inv));
1233 err_info[3] = (0x55e - err_pos(bit3_root2_inv));
1234 err_info[5] = err_pats[0];
1235 err_info[6] = err_pats[1];
1236 err_info[7] = err_pats[2];
1237 return 0;
1238 }
1239 } else
1240 return -EINVAL;
1241 }
1242}
1243static int chk_4_err_only(unsigned short *chk_syndrome_list, unsigned short *err_info)
1244{
1245 unsigned short coefs[4];
1246 unsigned short err_pats[4];
1247 int found_num_root = 0;
1248 unsigned short bit4_root0, bit4_root1, bit4_root2, bit4_root3;
1249 unsigned short bit4_root0_inv, bit4_root1_inv, bit4_root2_inv, bit4_root3_inv;
1250 unsigned short err_loc_eqn, test_root;
1251
1252 find_4bit_err_coefs(chk_syndrome_list[0],
1253 chk_syndrome_list[1],
1254 chk_syndrome_list[2],
1255 chk_syndrome_list[3],
1256 chk_syndrome_list[4],
1257 chk_syndrome_list[5], chk_syndrome_list[6], chk_syndrome_list[7], coefs);
1258
1259 for (test_root = 0x1; test_root < 0xfff; test_root++) {
1260 err_loc_eqn =
1261 gf4096_mul(coefs[3],
1262 gf4096_mul(gf4096_mul
1263 (gf4096_mul(test_root, test_root),
1264 test_root),
1265 test_root)) ^ gf4096_mul(coefs[2],
1266 gf4096_mul
1267 (gf4096_mul(test_root, test_root), test_root))
1268 ^ gf4096_mul(coefs[1], gf4096_mul(test_root, test_root)) ^ gf4096_mul(coefs[0], test_root)
1269 ^ 0x1;
1270 if (err_loc_eqn == 0x0) {
1271 if (found_num_root == 0) {
1272 bit4_root0 = test_root;
1273 found_num_root = 1;
1274 } else if (found_num_root == 1) {
1275 bit4_root1 = test_root;
1276 found_num_root = 2;
1277 } else if (found_num_root == 2) {
1278 bit4_root2 = test_root;
1279 found_num_root = 3;
1280 } else {
1281 found_num_root = 4;
1282 bit4_root3 = test_root;
1283 break;
1284 }
1285 }
1286 }
1287 if (found_num_root != 4) {
1288 return -EINVAL;
1289 } else {
1290 bit4_root0_inv = gf4096_inv(bit4_root0);
1291 bit4_root1_inv = gf4096_inv(bit4_root1);
1292 bit4_root2_inv = gf4096_inv(bit4_root2);
1293 bit4_root3_inv = gf4096_inv(bit4_root3);
1294 find_4bit_err_pats(chk_syndrome_list[0],
1295 chk_syndrome_list[1],
1296 chk_syndrome_list[2],
1297 chk_syndrome_list[3],
1298 bit4_root0_inv, bit4_root1_inv, bit4_root2_inv, bit4_root3_inv, err_pats);
1299 err_info[0] = 0x4;
1300 err_info[1] = (0x55e - err_pos(bit4_root0_inv));
1301 err_info[2] = (0x55e - err_pos(bit4_root1_inv));
1302 err_info[3] = (0x55e - err_pos(bit4_root2_inv));
1303 err_info[4] = (0x55e - err_pos(bit4_root3_inv));
1304 err_info[5] = err_pats[0];
1305 err_info[6] = err_pats[1];
1306 err_info[7] = err_pats[2];
1307 err_info[8] = err_pats[3];
1308 return 0;
1309 }
1310}
1311
1312void correct_12bit_symbol(unsigned char *buf, unsigned short sym,
1313 unsigned short val)
1314{
1315 if (unlikely(sym > 1366)) {
1316 printk(KERN_ERR "Error: symbol %d out of range; cannot correct\n", sym);
1317 } else if (sym == 0) {
1318 buf[0] ^= val;
1319 } else if (sym & 1) {
1320 buf[1+(3*(sym-1))/2] ^= (val >> 4);
1321 buf[2+(3*(sym-1))/2] ^= ((val & 0xf) << 4);
1322 } else {
1323 buf[2+(3*(sym-2))/2] ^= (val >> 8);
1324 buf[3+(3*(sym-2))/2] ^= (val & 0xff);
1325 }
1326}
1327
1328static int debugecc = 0;
1329module_param(debugecc, int, 0644);
1330
1331int cafe_correct_ecc(unsigned char *buf,
1332 unsigned short *chk_syndrome_list)
1333{
1334 unsigned short err_info[9];
1335 int i;
1336
1337 if (debugecc) {
1338 printk(KERN_WARNING "cafe_correct_ecc invoked. Syndromes %x %x %x %x %x %x %x %x\n",
1339 chk_syndrome_list[0], chk_syndrome_list[1],
1340 chk_syndrome_list[2], chk_syndrome_list[3],
1341 chk_syndrome_list[4], chk_syndrome_list[5],
1342 chk_syndrome_list[6], chk_syndrome_list[7]);
1343 for (i=0; i < 2048; i+=16) {
1344 printk(KERN_WARNING "D %04x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
1345 i,
1346 buf[i], buf[i+1], buf[i+2], buf[i+3],
1347 buf[i+4], buf[i+5], buf[i+6], buf[i+7],
1348 buf[i+8], buf[i+9], buf[i+10], buf[i+11],
1349 buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
1350 }
1351 for ( ; i < 2112; i+=16) {
1352 printk(KERN_WARNING "O %02x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
1353 i - 2048,
1354 buf[i], buf[i+1], buf[i+2], buf[i+3],
1355 buf[i+4], buf[i+5], buf[i+6], buf[i+7],
1356 buf[i+8], buf[i+9], buf[i+10], buf[i+11],
1357 buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
1358 }
1359 }
1360
1361
1362
1363 if (chk_no_err_only(chk_syndrome_list, err_info) &&
1364 chk_1_err_only(chk_syndrome_list, err_info) &&
1365 chk_2_err_only(chk_syndrome_list, err_info) &&
1366 chk_3_err_only(chk_syndrome_list, err_info) &&
1367 chk_4_err_only(chk_syndrome_list, err_info)) {
1368 return -EIO;
1369 }
1370
1371 for (i=0; i < err_info[0]; i++) {
1372 if (debugecc)
1373 printk(KERN_WARNING "Correct symbol %d with 0x%03x\n",
1374 err_info[1+i], err_info[5+i]);
1375
1376 correct_12bit_symbol(buf, err_info[1+i], err_info[5+i]);
1377 }
1378
1379 return err_info[0];
1380}
1381
diff --git a/drivers/mtd/nand/cafe.c b/drivers/mtd/nand/cafe_nand.c
index c328a7514510..cff969d05d4a 100644
--- a/drivers/mtd/nand/cafe.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -11,6 +11,7 @@
11#undef DEBUG 11#undef DEBUG
12#include <linux/mtd/mtd.h> 12#include <linux/mtd/mtd.h>
13#include <linux/mtd/nand.h> 13#include <linux/mtd/nand.h>
14#include <linux/rslib.h>
14#include <linux/pci.h> 15#include <linux/pci.h>
15#include <linux/delay.h> 16#include <linux/delay.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
@@ -46,13 +47,14 @@
46#define CAFE_GLOBAL_IRQ_MASK 0x300c 47#define CAFE_GLOBAL_IRQ_MASK 0x300c
47#define CAFE_NAND_RESET 0x3034 48#define CAFE_NAND_RESET 0x3034
48 49
49int cafe_correct_ecc(unsigned char *buf, 50/* Missing from the datasheet: bit 19 of CTRL1 sets CE0 vs. CE1 */
50 unsigned short *chk_syndrome_list); 51#define CTRL1_CHIPSELECT (1<<19)
51 52
52struct cafe_priv { 53struct cafe_priv {
53 struct nand_chip nand; 54 struct nand_chip nand;
54 struct pci_dev *pdev; 55 struct pci_dev *pdev;
55 void __iomem *mmio; 56 void __iomem *mmio;
57 struct rs_control *rs;
56 uint32_t ctl1; 58 uint32_t ctl1;
57 uint32_t ctl2; 59 uint32_t ctl2;
58 int datalen; 60 int datalen;
@@ -195,8 +197,8 @@ static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
195 197
196 cafe->data_pos = cafe->datalen = 0; 198 cafe->data_pos = cafe->datalen = 0;
197 199
198 /* Set command valid bit */ 200 /* Set command valid bit, mask in the chip select bit */
199 ctl1 = 0x80000000 | command; 201 ctl1 = 0x80000000 | command | (cafe->ctl1 & CTRL1_CHIPSELECT);
200 202
201 /* Set RD or WR bits as appropriate */ 203 /* Set RD or WR bits as appropriate */
202 if (command == NAND_CMD_READID || command == NAND_CMD_STATUS) { 204 if (command == NAND_CMD_READID || command == NAND_CMD_STATUS) {
@@ -309,8 +311,16 @@ static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
309 311
310static void cafe_select_chip(struct mtd_info *mtd, int chipnr) 312static void cafe_select_chip(struct mtd_info *mtd, int chipnr)
311{ 313{
312 //struct cafe_priv *cafe = mtd->priv; 314 struct cafe_priv *cafe = mtd->priv;
313 // cafe_dev_dbg(&cafe->pdev->dev, "select_chip %d\n", chipnr); 315
316 cafe_dev_dbg(&cafe->pdev->dev, "select_chip %d\n", chipnr);
317
318 /* Mask the appropriate bit into the stored value of ctl1
319 which will be used by cafe_nand_cmdfunc() */
320 if (chipnr)
321 cafe->ctl1 |= CTRL1_CHIPSELECT;
322 else
323 cafe->ctl1 &= ~CTRL1_CHIPSELECT;
314} 324}
315 325
316static int cafe_nand_interrupt(int irq, void *id) 326static int cafe_nand_interrupt(int irq, void *id)
@@ -374,28 +384,66 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
374 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 384 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
375 385
376 if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) { 386 if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
377 unsigned short syn[8]; 387 unsigned short syn[8], pat[4];
378 int i; 388 int pos[4];
389 u8 *oob = chip->oob_poi;
390 int i, n;
379 391
380 for (i=0; i<8; i+=2) { 392 for (i=0; i<8; i+=2) {
381 uint32_t tmp = cafe_readl(cafe, NAND_ECC_SYN01 + (i*2)); 393 uint32_t tmp = cafe_readl(cafe, NAND_ECC_SYN01 + (i*2));
382 syn[i] = tmp & 0xfff; 394 syn[i] = cafe->rs->index_of[tmp & 0xfff];
383 syn[i+1] = (tmp >> 16) & 0xfff; 395 syn[i+1] = cafe->rs->index_of[(tmp >> 16) & 0xfff];
396 }
397
398 n = decode_rs16(cafe->rs, NULL, NULL, 1367, syn, 0, pos, 0,
399 pat);
400
401 for (i = 0; i < n; i++) {
402 int p = pos[i];
403
404 /* The 12-bit symbols are mapped to bytes here */
405
406 if (p > 1374) {
407 /* out of range */
408 n = -1374;
409 } else if (p == 0) {
410 /* high four bits do not correspond to data */
411 if (pat[i] > 0xff)
412 n = -2048;
413 else
414 buf[0] ^= pat[i];
415 } else if (p == 1365) {
416 buf[2047] ^= pat[i] >> 4;
417 oob[0] ^= pat[i] << 4;
418 } else if (p > 1365) {
419 if ((p & 1) == 1) {
420 oob[3*p/2 - 2048] ^= pat[i] >> 4;
421 oob[3*p/2 - 2047] ^= pat[i] << 4;
422 } else {
423 oob[3*p/2 - 2049] ^= pat[i] >> 8;
424 oob[3*p/2 - 2048] ^= pat[i];
425 }
426 } else if ((p & 1) == 1) {
427 buf[3*p/2] ^= pat[i] >> 4;
428 buf[3*p/2 + 1] ^= pat[i] << 4;
429 } else {
430 buf[3*p/2 - 1] ^= pat[i] >> 8;
431 buf[3*p/2] ^= pat[i];
432 }
384 } 433 }
385 434
386 if ((i = cafe_correct_ecc(buf, syn)) < 0) { 435 if (n < 0) {
387 dev_dbg(&cafe->pdev->dev, "Failed to correct ECC at %08x\n", 436 dev_dbg(&cafe->pdev->dev, "Failed to correct ECC at %08x\n",
388 cafe_readl(cafe, NAND_ADDR2) * 2048); 437 cafe_readl(cafe, NAND_ADDR2) * 2048);
389 for (i=0; i< 0x5c; i+=4) 438 for (i = 0; i < 0x5c; i += 4)
390 printk("Register %x: %08x\n", i, readl(cafe->mmio + i)); 439 printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
391 mtd->ecc_stats.failed++; 440 mtd->ecc_stats.failed++;
392 } else { 441 } else {
393 dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", i); 442 dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
394 mtd->ecc_stats.corrected += i; 443 mtd->ecc_stats.corrected += n;
395 } 444 }
396 } 445 }
397 446
398
399 return 0; 447 return 0;
400} 448}
401 449
@@ -416,7 +464,7 @@ static uint8_t cafe_mirror_pattern_512[] = { 0xBC };
416 464
417static struct nand_bbt_descr cafe_bbt_main_descr_2048 = { 465static struct nand_bbt_descr cafe_bbt_main_descr_2048 = {
418 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 466 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
419 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, 467 | NAND_BBT_2BIT | NAND_BBT_VERSION,
420 .offs = 14, 468 .offs = 14,
421 .len = 4, 469 .len = 4,
422 .veroffs = 18, 470 .veroffs = 18,
@@ -426,7 +474,7 @@ static struct nand_bbt_descr cafe_bbt_main_descr_2048 = {
426 474
427static struct nand_bbt_descr cafe_bbt_mirror_descr_2048 = { 475static struct nand_bbt_descr cafe_bbt_mirror_descr_2048 = {
428 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 476 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
429 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, 477 | NAND_BBT_2BIT | NAND_BBT_VERSION,
430 .offs = 14, 478 .offs = 14,
431 .len = 4, 479 .len = 4,
432 .veroffs = 18, 480 .veroffs = 18,
@@ -442,7 +490,7 @@ static struct nand_ecclayout cafe_oobinfo_512 = {
442 490
443static struct nand_bbt_descr cafe_bbt_main_descr_512 = { 491static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
444 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 492 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
445 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, 493 | NAND_BBT_2BIT | NAND_BBT_VERSION,
446 .offs = 14, 494 .offs = 14,
447 .len = 1, 495 .len = 1,
448 .veroffs = 15, 496 .veroffs = 15,
@@ -452,7 +500,7 @@ static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
452 500
453static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = { 501static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
454 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 502 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
455 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, 503 | NAND_BBT_2BIT | NAND_BBT_VERSION,
456 .offs = 14, 504 .offs = 14,
457 .len = 1, 505 .len = 1,
458 .veroffs = 15, 506 .veroffs = 15,
@@ -525,6 +573,48 @@ static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
525 return 0; 573 return 0;
526} 574}
527 575
576/* F_2[X]/(X**6+X+1) */
577static unsigned short __devinit gf64_mul(u8 a, u8 b)
578{
579 u8 c;
580 unsigned int i;
581
582 c = 0;
583 for (i = 0; i < 6; i++) {
584 if (a & 1)
585 c ^= b;
586 a >>= 1;
587 b <<= 1;
588 if ((b & 0x40) != 0)
589 b ^= 0x43;
590 }
591
592 return c;
593}
594
595/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */
596static u16 __devinit gf4096_mul(u16 a, u16 b)
597{
598 u8 ah, al, bh, bl, ch, cl;
599
600 ah = a >> 6;
601 al = a & 0x3f;
602 bh = b >> 6;
603 bl = b & 0x3f;
604
605 ch = gf64_mul(ah ^ al, bh ^ bl) ^ gf64_mul(al, bl);
606 cl = gf64_mul(gf64_mul(ah, bh), 0x21) ^ gf64_mul(al, bl);
607
608 return (ch << 6) ^ cl;
609}
610
611static int __devinit cafe_mul(int x)
612{
613 if (x == 0)
614 return 1;
615 return gf4096_mul(x, 0xe01);
616}
617
528static int __devinit cafe_nand_probe(struct pci_dev *pdev, 618static int __devinit cafe_nand_probe(struct pci_dev *pdev,
529 const struct pci_device_id *ent) 619 const struct pci_device_id *ent)
530{ 620{
@@ -564,6 +654,12 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
564 } 654 }
565 cafe->nand.buffers = (void *)cafe->dmabuf + 2112; 655 cafe->nand.buffers = (void *)cafe->dmabuf + 2112;
566 656
657 cafe->rs = init_rs_non_canonical(12, &cafe_mul, 0, 1, 8);
658 if (!cafe->rs) {
659 err = -ENOMEM;
660 goto out_ior;
661 }
662
567 cafe->nand.cmdfunc = cafe_nand_cmdfunc; 663 cafe->nand.cmdfunc = cafe_nand_cmdfunc;
568 cafe->nand.dev_ready = cafe_device_ready; 664 cafe->nand.dev_ready = cafe_device_ready;
569 cafe->nand.read_byte = cafe_read_byte; 665 cafe->nand.read_byte = cafe_read_byte;
@@ -646,7 +742,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
646 cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK)); 742 cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK));
647 743
648 /* Scan to find existence of the device */ 744 /* Scan to find existence of the device */
649 if (nand_scan_ident(mtd, 1)) { 745 if (nand_scan_ident(mtd, 2)) {
650 err = -ENXIO; 746 err = -ENXIO;
651 goto out_irq; 747 goto out_irq;
652 } 748 }
@@ -713,6 +809,7 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
713 cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK); 809 cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
714 free_irq(pdev->irq, mtd); 810 free_irq(pdev->irq, mtd);
715 nand_release(mtd); 811 nand_release(mtd);
812 free_rs(cafe->rs);
716 pci_iounmap(pdev, cafe->mmio); 813 pci_iounmap(pdev, cafe->mmio);
717 dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr); 814 dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
718 kfree(mtd); 815 kfree(mtd);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 04de315e4937..7e68203fe1ba 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -303,28 +303,27 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
303 struct nand_chip *chip = mtd->priv; 303 struct nand_chip *chip = mtd->priv;
304 u16 bad; 304 u16 bad;
305 305
306 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
307
306 if (getchip) { 308 if (getchip) {
307 page = (int)(ofs >> chip->page_shift);
308 chipnr = (int)(ofs >> chip->chip_shift); 309 chipnr = (int)(ofs >> chip->chip_shift);
309 310
310 nand_get_device(chip, mtd, FL_READING); 311 nand_get_device(chip, mtd, FL_READING);
311 312
312 /* Select the NAND device */ 313 /* Select the NAND device */
313 chip->select_chip(mtd, chipnr); 314 chip->select_chip(mtd, chipnr);
314 } else 315 }
315 page = (int)(ofs >> chip->page_shift);
316 316
317 if (chip->options & NAND_BUSWIDTH_16) { 317 if (chip->options & NAND_BUSWIDTH_16) {
318 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE, 318 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE,
319 page & chip->pagemask); 319 page);
320 bad = cpu_to_le16(chip->read_word(mtd)); 320 bad = cpu_to_le16(chip->read_word(mtd));
321 if (chip->badblockpos & 0x1) 321 if (chip->badblockpos & 0x1)
322 bad >>= 8; 322 bad >>= 8;
323 if ((bad & 0xFF) != 0xff) 323 if ((bad & 0xFF) != 0xff)
324 res = 1; 324 res = 1;
325 } else { 325 } else {
326 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, 326 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
327 page & chip->pagemask);
328 if (chip->read_byte(mtd) != 0xff) 327 if (chip->read_byte(mtd) != 0xff)
329 res = 1; 328 res = 1;
330 } 329 }
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
new file mode 100644
index 000000000000..cd725fc5e813
--- /dev/null
+++ b/drivers/mtd/nand/plat_nand.c
@@ -0,0 +1,150 @@
1/*
2 * Generic NAND driver
3 *
4 * Author: Vitaly Wool <vitalywool@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/io.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/slab.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/nand.h>
18#include <linux/mtd/partitions.h>
19
20struct plat_nand_data {
21 struct nand_chip chip;
22 struct mtd_info mtd;
23 void __iomem *io_base;
24#ifdef CONFIG_MTD_PARTITIONS
25 int nr_parts;
26 struct mtd_partition *parts;
27#endif
28};
29
30/*
31 * Probe for the NAND device.
32 */
33static int __init plat_nand_probe(struct platform_device *pdev)
34{
35 struct platform_nand_data *pdata = pdev->dev.platform_data;
36 struct plat_nand_data *data;
37 int res = 0;
38
39 /* Allocate memory for the device structure (and zero it) */
40 data = kzalloc(sizeof(struct plat_nand_data), GFP_KERNEL);
41 if (!data) {
42 dev_err(&pdev->dev, "failed to allocate device structure.\n");
43 return -ENOMEM;
44 }
45
46 data->io_base = ioremap(pdev->resource[0].start,
47 pdev->resource[0].end - pdev->resource[0].start + 1);
48 if (data->io_base == NULL) {
49 dev_err(&pdev->dev, "ioremap failed\n");
50 kfree(data);
51 return -EIO;
52 }
53
54 data->chip.priv = &data;
55 data->mtd.priv = &data->chip;
56 data->mtd.owner = THIS_MODULE;
57
58 data->chip.IO_ADDR_R = data->io_base;
59 data->chip.IO_ADDR_W = data->io_base;
60 data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl;
61 data->chip.dev_ready = pdata->ctrl.dev_ready;
62 data->chip.select_chip = pdata->ctrl.select_chip;
63 data->chip.chip_delay = pdata->chip.chip_delay;
64 data->chip.options |= pdata->chip.options;
65
66 data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
67 data->chip.ecc.layout = pdata->chip.ecclayout;
68 data->chip.ecc.mode = NAND_ECC_SOFT;
69
70 platform_set_drvdata(pdev, data);
71
72 /* Scan to find existance of the device */
73 if (nand_scan(&data->mtd, 1)) {
74 res = -ENXIO;
75 goto out;
76 }
77
78#ifdef CONFIG_MTD_PARTITIONS
79 if (pdata->chip.part_probe_types) {
80 res = parse_mtd_partitions(&data->mtd,
81 pdata->chip.part_probe_types,
82 &data->parts, 0);
83 if (res > 0) {
84 add_mtd_partitions(&data->mtd, data->parts, res);
85 return 0;
86 }
87 }
88 if (pdata->chip.partitions) {
89 data->parts = pdata->chip.partitions;
90 res = add_mtd_partitions(&data->mtd, data->parts,
91 pdata->chip.nr_partitions);
92 } else
93#endif
94 res = add_mtd_device(&data->mtd);
95
96 if (!res)
97 return res;
98
99 nand_release(&data->mtd);
100out:
101 platform_set_drvdata(pdev, NULL);
102 iounmap(data->io_base);
103 kfree(data);
104 return res;
105}
106
107/*
108 * Remove a NAND device.
109 */
110static int __devexit plat_nand_remove(struct platform_device *pdev)
111{
112 struct plat_nand_data *data = platform_get_drvdata(pdev);
113 struct platform_nand_data *pdata = pdev->dev.platform_data;
114
115 nand_release(&data->mtd);
116#ifdef CONFIG_MTD_PARTITIONS
117 if (data->parts && data->parts != pdata->chip.partitions)
118 kfree(data->parts);
119#endif
120 iounmap(data->io_base);
121 kfree(data);
122
123 return 0;
124}
125
126static struct platform_driver plat_nand_driver = {
127 .probe = plat_nand_probe,
128 .remove = plat_nand_remove,
129 .driver = {
130 .name = "gen_nand",
131 .owner = THIS_MODULE,
132 },
133};
134
135static int __init plat_nand_init(void)
136{
137 return platform_driver_register(&plat_nand_driver);
138}
139
140static void __exit plat_nand_exit(void)
141{
142 platform_driver_unregister(&plat_nand_driver);
143}
144
145module_init(plat_nand_init);
146module_exit(plat_nand_exit);
147
148MODULE_LICENSE("GPL");
149MODULE_AUTHOR("Vitaly Wool");
150MODULE_DESCRIPTION("Simple generic NAND driver");
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 000794c6caf5..0537fac8de74 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -2192,7 +2192,7 @@ static int onenand_check_maf(int manuf)
2192 * @param mtd MTD device structure 2192 * @param mtd MTD device structure
2193 * 2193 *
2194 * OneNAND detection method: 2194 * OneNAND detection method:
2195 * Compare the the values from command with ones from register 2195 * Compare the values from command with ones from register
2196 */ 2196 */
2197static int onenand_probe(struct mtd_info *mtd) 2197static int onenand_probe(struct mtd_info *mtd)
2198{ 2198{
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 9588da3a30e7..127f60841b10 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -95,8 +95,7 @@ static int max_interrupt_work = 10;
95#include <asm/io.h> 95#include <asm/io.h>
96#include <asm/irq.h> 96#include <asm/irq.h>
97 97
98static char versionA[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; 98static char version[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
99static char versionB[] __initdata = "http://www.scyld.com/network/3c509.html\n";
100 99
101#if defined(CONFIG_PM) && (defined(CONFIG_MCA) || defined(CONFIG_EISA)) 100#if defined(CONFIG_PM) && (defined(CONFIG_MCA) || defined(CONFIG_EISA))
102#define EL3_SUSPEND 101#define EL3_SUSPEND
@@ -360,7 +359,7 @@ static int __init el3_common_init(struct net_device *dev)
360 printk(", IRQ %d.\n", dev->irq); 359 printk(", IRQ %d.\n", dev->irq);
361 360
362 if (el3_debug > 0) 361 if (el3_debug > 0)
363 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB); 362 printk(KERN_INFO "%s", version);
364 return 0; 363 return 0;
365 364
366} 365}
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 80924f76dee8..f26ca331615e 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -103,7 +103,7 @@ static int vortex_debug = 1;
103 103
104 104
105static char version[] __devinitdata = 105static char version[] __devinitdata =
106DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n"; 106DRV_NAME ": Donald Becker and others.\n";
107 107
108MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 108MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
109MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "); 109MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver ");
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 18aba838c1ff..82d78ff8399b 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -31,10 +31,8 @@
31 31
32*/ 32*/
33 33
34static const char versionA[] = 34static const char version[] =
35"atp.c:v1.09=ac 2002/10/01 Donald Becker <becker@scyld.com>\n"; 35"atp.c:v1.09=ac 2002/10/01 Donald Becker <becker@scyld.com>\n";
36static const char versionB[] =
37" http://www.scyld.com/network/atp.html\n";
38 36
39/* The user-configurable values. 37/* The user-configurable values.
40 These may be modified when a driver module is loaded.*/ 38 These may be modified when a driver module is loaded.*/
@@ -324,7 +322,7 @@ static int __init atp_probe1(long ioaddr)
324 322
325#ifndef MODULE 323#ifndef MODULE
326 if (net_debug) 324 if (net_debug)
327 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB); 325 printk(KERN_INFO "%s", version);
328#endif 326#endif
329 327
330 printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, SAPROM " 328 printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, SAPROM "
@@ -926,7 +924,7 @@ static void set_rx_mode_8012(struct net_device *dev)
926 924
927static int __init atp_init_module(void) { 925static int __init atp_init_module(void) {
928 if (debug) /* Emit version even if no cards detected. */ 926 if (debug) /* Emit version even if no cards detected. */
929 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB); 927 printk(KERN_INFO "%s", version);
930 return atp_init(); 928 return atp_init();
931} 929}
932 930
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 724bce51f936..223517dcbcfd 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3461,7 +3461,7 @@ void bond_unregister_arp(struct bonding *bond)
3461/*---------------------------- Hashing Policies -----------------------------*/ 3461/*---------------------------- Hashing Policies -----------------------------*/
3462 3462
3463/* 3463/*
3464 * Hash for the the output device based upon layer 3 and layer 4 data. If 3464 * Hash for the output device based upon layer 3 and layer 4 data. If
3465 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is 3465 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is
3466 * altogether not IP, mimic bond_xmit_hash_policy_l2() 3466 * altogether not IP, mimic bond_xmit_hash_policy_l2()
3467 */ 3467 */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 3a03a74c0609..637ae8f68791 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1214,7 +1214,7 @@ e1000_remove(struct pci_dev *pdev)
1214 int i; 1214 int i;
1215#endif 1215#endif
1216 1216
1217 flush_scheduled_work(); 1217 cancel_work_sync(&adapter->reset_task);
1218 1218
1219 e1000_release_manageability(adapter); 1219 e1000_release_manageability(adapter);
1220 1220
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 39654e1e2bed..47680237f783 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1126,7 +1126,7 @@ static void eepro_tx_timeout (struct net_device *dev)
1126 printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name, 1126 printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name,
1127 "network cable problem"); 1127 "network cable problem");
1128 /* This is not a duplicate. One message for the console, 1128 /* This is not a duplicate. One message for the console,
1129 one for the the log file */ 1129 one for the log file */
1130 printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name, 1130 printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name,
1131 "network cable problem"); 1131 "network cable problem");
1132 eepro_complete_selreset(ioaddr); 1132 eepro_complete_selreset(ioaddr);
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 6c267c38df97..9800341956a2 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -28,7 +28,7 @@
28*/ 28*/
29 29
30static const char * const version = 30static const char * const version =
31"eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n" 31"eepro100.c:v1.09j-t 9/29/99 Donald Becker\n"
32"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n"; 32"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
33 33
34/* A few user-configurable values that apply to all boards. 34/* A few user-configurable values that apply to all boards.
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 4e3f14c9c717..5e517946f46a 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -93,8 +93,6 @@ static int rx_copybreak;
93static char version[] __devinitdata = 93static char version[] __devinitdata =
94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n"; 94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
95static char version2[] __devinitdata = 95static char version2[] __devinitdata =
96" http://www.scyld.com/network/epic100.html\n";
97static char version3[] __devinitdata =
98" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n"; 96" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
99 97
100MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 98MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
@@ -323,8 +321,8 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
323#ifndef MODULE 321#ifndef MODULE
324 static int printed_version; 322 static int printed_version;
325 if (!printed_version++) 323 if (!printed_version++)
326 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s", 324 printk (KERN_INFO "%s" KERN_INFO "%s",
327 version, version2, version3); 325 version, version2);
328#endif 326#endif
329 327
330 card_idx++; 328 card_idx++;
@@ -1596,8 +1594,8 @@ static int __init epic_init (void)
1596{ 1594{
1597/* when a module, this is printed whether or not devices are found in probe */ 1595/* when a module, this is printed whether or not devices are found in probe */
1598#ifdef MODULE 1596#ifdef MODULE
1599 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s", 1597 printk (KERN_INFO "%s" KERN_INFO "%s",
1600 version, version2, version3); 1598 version, version2);
1601#endif 1599#endif
1602 1600
1603 return pci_register_driver(&epic_driver); 1601 return pci_register_driver(&epic_driver);
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 6e90619b3b41..36d2c7d4f4d0 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -140,7 +140,7 @@ config BAYCOM_SER_HDX
140 modems that connect to a serial interface. The driver supports the 140 modems that connect to a serial interface. The driver supports the
141 ser12 design in half-duplex mode. This is the old driver. It is 141 ser12 design in half-duplex mode. This is the old driver. It is
142 still provided in case your serial interface chip does not work with 142 still provided in case your serial interface chip does not work with
143 the full-duplex driver. This driver is depreciated. To configure 143 the full-duplex driver. This driver is deprecated. To configure
144 the driver, use the sethdlc utility available in the standard ax25 144 the driver, use the sethdlc utility available in the standard ax25
145 utilities package. For information on the modems, see 145 utilities package. For information on the modems, see
146 <http://www.baycom.de/> and 146 <http://www.baycom.de/> and
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 2ab173d9a0e4..1e67720f1066 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -113,7 +113,7 @@
113/* RxOver overflow in Recv FIFO */ 113/* RxOver overflow in Recv FIFO */
114/* SipRcv received serial gap (or other condition you set) */ 114/* SipRcv received serial gap (or other condition you set) */
115/* Interrupts are enabled by writing a one to the IER register */ 115/* Interrupts are enabled by writing a one to the IER register */
116/* Interrupts are cleared by writting a one to the ISR register */ 116/* Interrupts are cleared by writing a one to the ISR register */
117/* */ 117/* */
118/* 6. The remaining registers: 0x6 and 0x3 appear to be */ 118/* 6. The remaining registers: 0x6 and 0x3 appear to be */
119/* reserved parts of 16 or 32 bit registersthe remainder */ 119/* reserved parts of 16 or 32 bit registersthe remainder */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index f15aebde7b90..52c99d01d568 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -315,7 +315,7 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
315 * hw - Struct containing variables accessed by shared code 315 * hw - Struct containing variables accessed by shared code
316 * 316 *
317 * Reads the first 64 16 bit words of the EEPROM and sums the values read. 317 * Reads the first 64 16 bit words of the EEPROM and sums the values read.
318 * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is 318 * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
319 * valid. 319 * valid.
320 * 320 *
321 * Returns: 321 * Returns:
diff --git a/drivers/net/meth.h b/drivers/net/meth.h
index 84960dae2a22..ea3b8fc86d1e 100644
--- a/drivers/net/meth.h
+++ b/drivers/net/meth.h
@@ -126,7 +126,7 @@ typedef struct rx_packet {
126 /* Note: when loopback is set this bit becomes collision control. Setting this bit will */ 126 /* Note: when loopback is set this bit becomes collision control. Setting this bit will */
127 /* cause a collision to be reported. */ 127 /* cause a collision to be reported. */
128 128
129 /* Bits 5 and 6 are used to determine the the Destination address filter mode */ 129 /* Bits 5 and 6 are used to determine the Destination address filter mode */
130#define METH_ACCEPT_MY 0 /* 00: Accept PHY address only */ 130#define METH_ACCEPT_MY 0 /* 00: Accept PHY address only */
131#define METH_ACCEPT_MCAST 0x20 /* 01: Accept physical, broadcast, and multicast filter matches only */ 131#define METH_ACCEPT_MCAST 0x20 /* 01: Accept physical, broadcast, and multicast filter matches only */
132#define METH_ACCEPT_AMCAST 0x40 /* 10: Accept physical, broadcast, and all multicast packets */ 132#define METH_ACCEPT_AMCAST 0x40 /* 10: Accept physical, broadcast, and all multicast packets */
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 223e0e6264ba..4cf0d3fcb519 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -131,7 +131,6 @@ static const char version[] __devinitdata =
131 KERN_INFO DRV_NAME " dp8381x driver, version " 131 KERN_INFO DRV_NAME " dp8381x driver, version "
132 DRV_VERSION ", " DRV_RELDATE "\n" 132 DRV_VERSION ", " DRV_RELDATE "\n"
133 KERN_INFO " originally by Donald Becker <becker@scyld.com>\n" 133 KERN_INFO " originally by Donald Becker <becker@scyld.com>\n"
134 KERN_INFO " http://www.scyld.com/network/natsemi.html\n"
135 KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n"; 134 KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
136 135
137MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 136MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 589785d1e762..995c0a5d4066 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -63,8 +63,7 @@ static int options[MAX_UNITS];
63 63
64/* These identify the driver base version and may not be removed. */ 64/* These identify the driver base version and may not be removed. */
65static char version[] __devinitdata = 65static char version[] __devinitdata =
66KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " D. Becker/P. Gortmaker\n" 66KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " D. Becker/P. Gortmaker\n";
67KERN_INFO " http://www.scyld.com/network/ne2k-pci.html\n";
68 67
69#if defined(__powerpc__) 68#if defined(__powerpc__)
70#define inl_le(addr) le32_to_cpu(inl(addr)) 69#define inl_le(addr) le32_to_cpu(inl(addr))
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index eed433d6056a..f71dab347667 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -662,10 +662,10 @@ int phy_stop_interrupts(struct phy_device *phydev)
662 phy_error(phydev); 662 phy_error(phydev);
663 663
664 /* 664 /*
665 * Finish any pending work; we might have been scheduled 665 * Finish any pending work; we might have been scheduled to be called
666 * to be called from keventd ourselves, though. 666 * from keventd ourselves, but cancel_work_sync() handles that.
667 */ 667 */
668 run_scheduled_work(&phydev->phy_queue); 668 cancel_work_sync(&phydev->phy_queue);
669 669
670 free_irq(phydev->irq, phydev); 670 free_irq(phydev->irq, phydev);
671 671
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index f51ba31970aa..e1f912d04043 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -110,8 +110,7 @@ static char *media[MAX_UNITS];
110 110
111/* These identify the driver base version and may not be removed. */ 111/* These identify the driver base version and may not be removed. */
112static char version[] = 112static char version[] =
113KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" 113KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
114KERN_INFO " http://www.scyld.com/network/sundance.html\n";
115 114
116MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 115MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117MODULE_DESCRIPTION("Sundance Alta Ethernet driver"); 116MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index e5e901ecd808..923b9c725cc3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3716,10 +3716,8 @@ static void tg3_reset_task(struct work_struct *work)
3716 unsigned int restart_timer; 3716 unsigned int restart_timer;
3717 3717
3718 tg3_full_lock(tp, 0); 3718 tg3_full_lock(tp, 0);
3719 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3720 3719
3721 if (!netif_running(tp->dev)) { 3720 if (!netif_running(tp->dev)) {
3722 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3723 tg3_full_unlock(tp); 3721 tg3_full_unlock(tp);
3724 return; 3722 return;
3725 } 3723 }
@@ -3750,8 +3748,6 @@ static void tg3_reset_task(struct work_struct *work)
3750 mod_timer(&tp->timer, jiffies + 1); 3748 mod_timer(&tp->timer, jiffies + 1);
3751 3749
3752out: 3750out:
3753 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3754
3755 tg3_full_unlock(tp); 3751 tg3_full_unlock(tp);
3756} 3752}
3757 3753
@@ -7390,12 +7386,7 @@ static int tg3_close(struct net_device *dev)
7390{ 7386{
7391 struct tg3 *tp = netdev_priv(dev); 7387 struct tg3 *tp = netdev_priv(dev);
7392 7388
7393 /* Calling flush_scheduled_work() may deadlock because 7389 cancel_work_sync(&tp->reset_task);
7394 * linkwatch_event() may be on the workqueue and it will try to get
7395 * the rtnl_lock which we are holding.
7396 */
7397 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7398 msleep(1);
7399 7390
7400 netif_stop_queue(dev); 7391 netif_stop_queue(dev);
7401 7392
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4d334cf5a243..bd9f4f428e5b 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2228,7 +2228,7 @@ struct tg3 {
2228#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000 2228#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000
2229#define TG3_FLAG_10_100_ONLY 0x01000000 2229#define TG3_FLAG_10_100_ONLY 0x01000000
2230#define TG3_FLAG_PAUSE_AUTONEG 0x02000000 2230#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
2231#define TG3_FLAG_IN_RESET_TASK 0x04000000 2231
2232#define TG3_FLAG_40BIT_DMA_BUG 0x08000000 2232#define TG3_FLAG_40BIT_DMA_BUG 0x08000000
2233#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000 2233#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
2234#define TG3_FLAG_SUPPORT_MSI 0x20000000 2234#define TG3_FLAG_SUPPORT_MSI 0x20000000
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 9b08afbd1f65..ea896777bcaf 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -269,7 +269,7 @@ done:
269 This would turn on IM for devices that is not contributing 269 This would turn on IM for devices that is not contributing
270 to backlog congestion with unnecessary latency. 270 to backlog congestion with unnecessary latency.
271 271
272 We monitor the the device RX-ring and have: 272 We monitor the device RX-ring and have:
273 273
274 HW Interrupt Mitigation either ON or OFF. 274 HW Interrupt Mitigation either ON or OFF.
275 275
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index fa440706fb4a..38f3b99716b8 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1021,7 +1021,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1021 np->tx_ring[entry].length |= DescEndRing; 1021 np->tx_ring[entry].length |= DescEndRing;
1022 1022
1023 /* Now acquire the irq spinlock. 1023 /* Now acquire the irq spinlock.
1024 * The difficult race is the the ordering between 1024 * The difficult race is the ordering between
1025 * increasing np->cur_tx and setting DescOwned: 1025 * increasing np->cur_tx and setting DescOwned:
1026 * - if np->cur_tx is increased first the interrupt 1026 * - if np->cur_tx is increased first the interrupt
1027 * handler could consider the packet as transmitted 1027 * handler could consider the packet as transmitted
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 985a1810ca59..2470b1ee33c0 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1043,7 +1043,7 @@ static int enable_promisc(struct xircom_private *card)
1043 1043
1044 1044
1045/* 1045/*
1046link_status() checks the the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what. 1046link_status() checks the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what.
1047 1047
1048Must be called in locked state with interrupts disabled 1048Must be called in locked state with interrupts disabled
1049*/ 1049*/
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index f2dd7763cd0b..f72573594121 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -639,7 +639,7 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
639 639
640 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd); 640 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
641 641
642 /* "I feel a presence... another warrior is on the the mesa." 642 /* "I feel a presence... another warrior is on the mesa."
643 */ 643 */
644 wmb(); 644 wmb();
645 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY); 645 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c
index 38fac3bbcd82..7d5b8c2cc614 100644
--- a/drivers/net/wireless/airport.c
+++ b/drivers/net/wireless/airport.c
@@ -149,7 +149,7 @@ static int airport_hard_reset(struct orinoco_private *priv)
149 /* Vitally important. If we don't do this it seems we get an 149 /* Vitally important. If we don't do this it seems we get an
150 * interrupt somewhere during the power cycle, since 150 * interrupt somewhere during the power cycle, since
151 * hw_unavailable is already set it doesn't get ACKed, we get 151 * hw_unavailable is already set it doesn't get ACKed, we get
152 * into an interrupt loop and the the PMU decides to turn us 152 * into an interrupt loop and the PMU decides to turn us
153 * off. */ 153 * off. */
154 disable_irq(dev->irq); 154 disable_irq(dev->irq);
155 155
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 841b3c136ad9..283be4a70524 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -3054,7 +3054,7 @@ static const iw_handler prism54_handler[] = {
3054 (iw_handler) prism54_set_wap, /* SIOCSIWAP */ 3054 (iw_handler) prism54_set_wap, /* SIOCSIWAP */
3055 (iw_handler) prism54_get_wap, /* SIOCGIWAP */ 3055 (iw_handler) prism54_get_wap, /* SIOCGIWAP */
3056 (iw_handler) NULL, /* -- hole -- */ 3056 (iw_handler) NULL, /* -- hole -- */
3057 (iw_handler) NULL, /* SIOCGIWAPLIST depreciated */ 3057 (iw_handler) NULL, /* SIOCGIWAPLIST deprecated */
3058 (iw_handler) prism54_set_scan, /* SIOCSIWSCAN */ 3058 (iw_handler) prism54_set_scan, /* SIOCSIWSCAN */
3059 (iw_handler) prism54_get_scan, /* SIOCGIWSCAN */ 3059 (iw_handler) prism54_get_scan, /* SIOCGIWSCAN */
3060 (iw_handler) prism54_set_essid, /* SIOCSIWESSID */ 3060 (iw_handler) prism54_set_essid, /* SIOCSIWESSID */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index a037b11dac9d..084795355b74 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -115,7 +115,7 @@ isl_upload_firmware(islpci_private *priv)
115 ISL38XX_MEMORY_WINDOW_SIZE : fw_len; 115 ISL38XX_MEMORY_WINDOW_SIZE : fw_len;
116 u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN; 116 u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN;
117 117
118 /* set the cards base address for writting the data */ 118 /* set the card's base address for writing the data */
119 isl38xx_w32_flush(device_base, reg, 119 isl38xx_w32_flush(device_base, reg,
120 ISL38XX_DIR_MEM_BASE_REG); 120 ISL38XX_DIR_MEM_BASE_REG);
121 wmb(); /* be paranoid */ 121 wmb(); /* be paranoid */
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 67b867f837ca..5740d4d4267c 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -176,7 +176,7 @@ psa_write(struct net_device * dev,
176 volatile u_char __iomem *verify = lp->mem + PSA_ADDR + 176 volatile u_char __iomem *verify = lp->mem + PSA_ADDR +
177 (psaoff(0, psa_comp_number) << 1); 177 (psaoff(0, psa_comp_number) << 1);
178 178
179 /* Authorize writting to PSA */ 179 /* Authorize writing to PSA */
180 hacr_write(base, HACR_PWR_STAT | HACR_ROM_WEN); 180 hacr_write(base, HACR_PWR_STAT | HACR_ROM_WEN);
181 181
182 while(n-- > 0) 182 while(n-- > 0)
@@ -1676,7 +1676,7 @@ wv_set_frequency(u_long base, /* i/o port of the card */
1676 fee_write(base, 0x60, 1676 fee_write(base, 0x60,
1677 dac, 2); 1677 dac, 2);
1678 1678
1679 /* We now should verify here that the EEprom writting was ok */ 1679 /* We now should verify here that the EEprom writing was ok */
1680 1680
1681 /* ReRead the first area */ 1681 /* ReRead the first area */
1682 fee_read(base, 0x00, 1682 fee_read(base, 0x00,
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h
index 4d1c4905c749..4b9de0093a7b 100644
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/net/wireless/wavelan_cs.p.h
@@ -120,7 +120,7 @@
120 * the Wavelan itself (NCR -> AT&T -> Lucent). 120 * the Wavelan itself (NCR -> AT&T -> Lucent).
121 * 121 *
122 * All started with Anders Klemets <klemets@paul.rutgers.edu>, 122 * All started with Anders Klemets <klemets@paul.rutgers.edu>,
123 * writting a Wavelan ISA driver for the MACH microkernel. Girish 123 * writing a Wavelan ISA driver for the MACH microkernel. Girish
124 * Welling <welling@paul.rutgers.edu> had also worked on it. 124 * Welling <welling@paul.rutgers.edu> had also worked on it.
125 * Keith Moore modify this for the Pcmcia hardware. 125 * Keith Moore modify this for the Pcmcia hardware.
126 * 126 *
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 3f4a7cf9efea..f2a90a7fa2d6 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -109,7 +109,6 @@ static int gx_fix;
109/* These identify the driver base version and may not be removed. */ 109/* These identify the driver base version and may not be removed. */
110static char version[] __devinitdata = 110static char version[] __devinitdata =
111KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n" 111KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
112KERN_INFO " http://www.scyld.com/network/yellowfin.html\n"
113KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n"; 112KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
114 113
115MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 114MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 3bb7739d26a5..8e58ea3d95c0 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -119,7 +119,7 @@ static inline int pci_create_newid_file(struct pci_driver *drv)
119 * system is in its list of supported devices. Returns the matching 119 * system is in its list of supported devices. Returns the matching
120 * pci_device_id structure or %NULL if there is no match. 120 * pci_device_id structure or %NULL if there is no match.
121 * 121 *
122 * Depreciated, don't use this as it will not catch any dynamic ids 122 * Deprecated, don't use this as it will not catch any dynamic ids
123 * that a driver might want to check for. 123 * that a driver might want to check for.
124 */ 124 */
125const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, 125const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 5e439836db2d..76422eded36e 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -98,7 +98,7 @@ config RTC_INTF_DEV_UIE_EMUL
98 bool "RTC UIE emulation on dev interface" 98 bool "RTC UIE emulation on dev interface"
99 depends on RTC_INTF_DEV 99 depends on RTC_INTF_DEV
100 help 100 help
101 Provides an emulation for RTC_UIE if the underlaying rtc chip 101 Provides an emulation for RTC_UIE if the underlying rtc chip
102 driver does not expose RTC_UIE ioctls. Those requests generate 102 driver does not expose RTC_UIE ioctls. Those requests generate
103 once-per-second update interrupts, used for synchronization. 103 once-per-second update interrupts, used for synchronization.
104 104
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 6abf4811958c..e0f91dfce0f5 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -104,7 +104,7 @@ static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id)
104 104
105 writeb(tmp, rtc->regbase + RCR1); 105 writeb(tmp, rtc->regbase + RCR1);
106 106
107 rtc_update_irq(&rtc->rtc_dev, 1, events); 107 rtc_update_irq(rtc->rtc_dev, 1, events);
108 108
109 spin_unlock(&rtc->lock); 109 spin_unlock(&rtc->lock);
110 110
@@ -139,7 +139,7 @@ static irqreturn_t sh_rtc_alarm(int irq, void *dev_id)
139 139
140 rtc->rearm_aie = 1; 140 rtc->rearm_aie = 1;
141 141
142 rtc_update_irq(&rtc->rtc_dev, 1, events); 142 rtc_update_irq(rtc->rtc_dev, 1, events);
143 } 143 }
144 144
145 spin_unlock(&rtc->lock); 145 spin_unlock(&rtc->lock);
@@ -153,7 +153,7 @@ static irqreturn_t sh_rtc_periodic(int irq, void *dev_id)
153 153
154 spin_lock(&rtc->lock); 154 spin_lock(&rtc->lock);
155 155
156 rtc_update_irq(&rtc->rtc_dev, 1, RTC_PF | RTC_IRQF); 156 rtc_update_irq(rtc->rtc_dev, 1, RTC_PF | RTC_IRQF);
157 157
158 spin_unlock(&rtc->lock); 158 spin_unlock(&rtc->lock);
159 159
@@ -341,7 +341,7 @@ static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
341 tm->tm_sec--; 341 tm->tm_sec--;
342#endif 342#endif
343 343
344 dev_dbg(&dev, "%s: tm is secs=%d, mins=%d, hours=%d, " 344 dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
345 "mday=%d, mon=%d, year=%d, wday=%d\n", 345 "mday=%d, mon=%d, year=%d, wday=%d\n",
346 __FUNCTION__, 346 __FUNCTION__,
347 tm->tm_sec, tm->tm_min, tm->tm_hour, 347 tm->tm_sec, tm->tm_min, tm->tm_hour,
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index bbd5b8b66f42..d6b06ab81188 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -23,7 +23,7 @@
23 23
24/* 24/*
25 * The room for the SCCB (only for writing) is not equal to a pages size 25 * The room for the SCCB (only for writing) is not equal to a pages size
26 * (as it is specified as the maximum size in the the SCLP documentation) 26 * (as it is specified as the maximum size in the SCLP documentation)
27 * because of the additional data structure described above. 27 * because of the additional data structure described above.
28 */ 28 */
29#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer)) 29#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 29d176036e5c..0b96d49dd636 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -2860,7 +2860,7 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2860 if (!atomic_read(&queue->set_pci_flags_count)){ 2860 if (!atomic_read(&queue->set_pci_flags_count)){
2861 /* 2861 /*
2862 * there's no outstanding PCI any more, so we 2862 * there's no outstanding PCI any more, so we
2863 * have to request a PCI to be sure the the PCI 2863 * have to request a PCI to be sure that the PCI
2864 * will wake at some time in the future then we 2864 * will wake at some time in the future then we
2865 * can flush packed buffers that might still be 2865 * can flush packed buffers that might still be
2866 * hanging around, which can happen if no 2866 * hanging around, which can happen if no
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index cb08ca3cc0f9..bdf5782b8a7a 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -222,7 +222,7 @@ zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
222 * Since we have been using this adapter, it is save to assume 222 * Since we have been using this adapter, it is save to assume
223 * that it is not failed but recoverable. The card seems to 223 * that it is not failed but recoverable. The card seems to
224 * report link-up events by self-initiated queue shutdown. 224 * report link-up events by self-initiated queue shutdown.
225 * That is why we need to clear the the link-down flag 225 * That is why we need to clear the link-down flag
226 * which is set again in case we have missed by a mile. 226 * which is set again in case we have missed by a mile.
227 */ 227 */
228 zfcp_erp_adapter_reopen( 228 zfcp_erp_adapter_reopen(
diff --git a/drivers/sbus/char/bpp.c b/drivers/sbus/char/bpp.c
index 74b999d77bbf..4fab0c23814c 100644
--- a/drivers/sbus/char/bpp.c
+++ b/drivers/sbus/char/bpp.c
@@ -156,7 +156,7 @@ static unsigned short get_pins(unsigned minor)
156#define BPP_ICR 0x18 156#define BPP_ICR 0x18
157#define BPP_SIZE 0x1A 157#define BPP_SIZE 0x1A
158 158
159/* BPP_CSR. Bits of type RW1 are cleared with writting '1'. */ 159/* BPP_CSR. Bits of type RW1 are cleared with writing '1'. */
160#define P_DEV_ID_MASK 0xf0000000 /* R */ 160#define P_DEV_ID_MASK 0xf0000000 /* R */
161#define P_DEV_ID_ZEBRA 0x40000000 161#define P_DEV_ID_ZEBRA 0x40000000
162#define P_DEV_ID_L64854 0xa0000000 /* == NCR 89C100+89C105. Pity. */ 162#define P_DEV_ID_L64854 0xa0000000 /* == NCR 89C100+89C105. Pity. */
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 8d72bbae96ad..0bada0028aa0 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -966,7 +966,7 @@ ahd_aic790X_setup(struct ahd_softc *ahd)
966 | AHD_BUSFREEREV_BUG; 966 | AHD_BUSFREEREV_BUG;
967 ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_EARLY_REQ_BUG; 967 ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_EARLY_REQ_BUG;
968 968
969 /* If the user requested the the SLOWCRC bit to be set. */ 969 /* If the user requested that the SLOWCRC bit to be set. */
970 if (aic79xx_slowcrc) 970 if (aic79xx_slowcrc)
971 ahd->features |= AHD_AIC79XXB_SLOWCRC; 971 ahd->features |= AHD_AIC79XXB_SLOWCRC;
972 972
diff --git a/drivers/scsi/aic94xx/Makefile b/drivers/scsi/aic94xx/Makefile
index e6b70123940c..e78ce0fa44d2 100644
--- a/drivers/scsi/aic94xx/Makefile
+++ b/drivers/scsi/aic94xx/Makefile
@@ -6,7 +6,7 @@
6# 6#
7# This file is licensed under GPLv2. 7# This file is licensed under GPLv2.
8# 8#
9# This file is part of the the aic94xx driver. 9# This file is part of the aic94xx driver.
10# 10#
11# The aic94xx driver is free software; you can redistribute it and/or 11# The aic94xx driver is free software; you can redistribute it and/or
12# modify it under the terms of the GNU General Public License as 12# modify it under the terms of the GNU General Public License as
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index a965ed3548d5..564ea90ed3a0 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -541,7 +541,7 @@ static struct ParameterData __devinitdata cfg_data[] = {
541 541
542 542
543/* 543/*
544 * Safe settings. If set to zero the the BIOS/default values with 544 * Safe settings. If set to zero the BIOS/default values with
545 * command line overrides will be used. If set to 1 then safe and 545 * command line overrides will be used. If set to 1 then safe and
546 * slow settings will be used. 546 * slow settings will be used.
547 */ 547 */
@@ -617,7 +617,7 @@ static void __devinit fix_settings(void)
617 617
618/* 618/*
619 * Mapping from the eeprom delay index value (index into this array) 619 * Mapping from the eeprom delay index value (index into this array)
620 * to the the number of actual seconds that the delay should be for. 620 * to the number of actual seconds that the delay should be for.
621 */ 621 */
622static char __devinitdata eeprom_index_to_delay_map[] = 622static char __devinitdata eeprom_index_to_delay_map[] =
623 { 1, 3, 5, 10, 16, 30, 60, 120 }; 623 { 1, 3, 5, 10, 16, 30, 60, 120 };
@@ -4136,7 +4136,7 @@ static void __devinit trms1040_write_all(struct NvRamType *eeprom, unsigned long
4136 * @io_port: base I/O address 4136 * @io_port: base I/O address
4137 * @addr: offset into SEEPROM 4137 * @addr: offset into SEEPROM
4138 * 4138 *
4139 * Returns the the byte read. 4139 * Returns the byte read.
4140 **/ 4140 **/
4141static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr) 4141static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr)
4142{ 4142{
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 61fbcdcbb009..1f5a07bf2a75 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -173,7 +173,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
173 * @retries: number of times to retry request 173 * @retries: number of times to retry request
174 * @flags: or into request flags; 174 * @flags: or into request flags;
175 * 175 *
176 * returns the req->errors value which is the the scsi_cmnd result 176 * returns the req->errors value which is the scsi_cmnd result
177 * field. 177 * field.
178 **/ 178 **/
179int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 179int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 66e7bc985797..1d8a2f6bb8eb 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -22,10 +22,7 @@
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/arch/board.h> 23#include <asm/arch/board.h>
24#include <asm/arch/gpio.h> 24#include <asm/arch/gpio.h>
25
26#ifdef CONFIG_ARCH_AT91
27#include <asm/arch/cpu.h> 25#include <asm/arch/cpu.h>
28#endif
29 26
30#include "atmel_spi.h" 27#include "atmel_spi.h"
31 28
@@ -552,10 +549,8 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
552 goto out_free_buffer; 549 goto out_free_buffer;
553 as->irq = irq; 550 as->irq = irq;
554 as->clk = clk; 551 as->clk = clk;
555#ifdef CONFIG_ARCH_AT91
556 if (!cpu_is_at91rm9200()) 552 if (!cpu_is_at91rm9200())
557 as->new_1 = 1; 553 as->new_1 = 1;
558#endif
559 554
560 ret = request_irq(irq, atmel_spi_interrupt, 0, 555 ret = request_irq(irq, atmel_spi_interrupt, 0,
561 pdev->dev.bus_id, master); 556 pdev->dev.bus_id, master);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index b082d95bbbaa..11e9b15ca45a 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1033,7 +1033,7 @@ static int usbatm_do_heavy_init(void *arg)
1033 1033
1034static int usbatm_heavy_init(struct usbatm_data *instance) 1034static int usbatm_heavy_init(struct usbatm_data *instance)
1035{ 1035{
1036 int ret = kernel_thread(usbatm_do_heavy_init, instance, CLONE_KERNEL); 1036 int ret = kernel_thread(usbatm_do_heavy_init, instance, CLONE_FS | CLONE_FILES);
1037 1037
1038 if (ret < 0) { 1038 if (ret < 0) {
1039 usb_err(instance, "%s: failed to create kernel_thread (%d)!\n", __func__, ret); 1039 usb_err(instance, "%s: failed to create kernel_thread (%d)!\n", __func__, ret);
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
index b5332e679c46..88fb56d5db8f 100644
--- a/drivers/usb/misc/auerswald.c
+++ b/drivers/usb/misc/auerswald.c
@@ -1307,7 +1307,7 @@ static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp)
1307} 1307}
1308 1308
1309 1309
1310/* remove a service from the the device 1310/* remove a service from the device
1311 scp->id must be set! */ 1311 scp->id must be set! */
1312static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp) 1312static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp)
1313{ 1313{
diff --git a/drivers/usb/net/usbnet.h b/drivers/usb/net/usbnet.h
index cbb53e065d6c..82db5a8e528e 100644
--- a/drivers/usb/net/usbnet.h
+++ b/drivers/usb/net/usbnet.h
@@ -129,7 +129,7 @@ extern void usbnet_disconnect(struct usb_interface *);
129 129
130 130
131/* Drivers that reuse some of the standard USB CDC infrastructure 131/* Drivers that reuse some of the standard USB CDC infrastructure
132 * (notably, using multiple interfaces according to the the CDC 132 * (notably, using multiple interfaces according to the CDC
133 * union descriptor) get some helper code. 133 * union descriptor) get some helper code.
134 */ 134 */
135struct cdc_state { 135struct cdc_state {
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index ba5d1dc03036..3efe67092f15 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -558,7 +558,7 @@ config USB_SERIAL_DEBUG
558 tristate "USB Debugging Device" 558 tristate "USB Debugging Device"
559 depends on USB_SERIAL 559 depends on USB_SERIAL
560 help 560 help
561 Say Y here if you have a USB debugging device used to recieve 561 Say Y here if you have a USB debugging device used to receive
562 debugging data from another machine. The most common of these 562 debugging data from another machine. The most common of these
563 devices is the NetChip TurboCONNECT device. 563 devices is the NetChip TurboCONNECT device.
564 564
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index b675735bfbee..fbc8c27d5d99 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -9,7 +9,7 @@
9 * The device works as an standard CDC device, it has 2 interfaces, the first 9 * The device works as an standard CDC device, it has 2 interfaces, the first
10 * one is for firmware access and the second is the serial one. 10 * one is for firmware access and the second is the serial one.
11 * The protocol is very simply, there are two posibilities reading or writing. 11 * The protocol is very simply, there are two posibilities reading or writing.
12 * When writting the first urb must have a Header that starts with 0x20 0x29 the 12 * When writing the first urb must have a Header that starts with 0x20 0x29 the
13 * next two bytes must say how much data will be sended. 13 * next two bytes must say how much data will be sended.
14 * When reading the process is almost equal except that the header starts with 14 * When reading the process is almost equal except that the header starts with
15 * 0x00 0x20. 15 * 0x00 0x20.
@@ -18,7 +18,7 @@
18 * buffer: The First and Second byte is used for a Header, the Third and Fourth 18 * buffer: The First and Second byte is used for a Header, the Third and Fourth
19 * tells the device the amount of information the package holds. 19 * tells the device the amount of information the package holds.
20 * Packages are 60 bytes long Header Stuff. 20 * Packages are 60 bytes long Header Stuff.
21 * When writting to the device the first two bytes of the header are 0x20 0x29 21 * When writing to the device the first two bytes of the header are 0x20 0x29
22 * When reading the bytes are 0x00 0x20, or 0x00 0x10, there is an strange 22 * When reading the bytes are 0x00 0x20, or 0x00 0x10, there is an strange
23 * situation, when too much data arrives to the device because it sends the data 23 * situation, when too much data arrives to the device because it sends the data
24 * but with out the header. I will use a simply hack to override this situation, 24 * but with out the header. I will use a simply hack to override this situation,
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 18f74ac76565..4807f960150b 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2465,7 +2465,7 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r
2465 ((edge_serial->is_epic) && 2465 ((edge_serial->is_epic) &&
2466 (!edge_serial->epic_descriptor.Supports.IOSPWriteMCR) && 2466 (!edge_serial->epic_descriptor.Supports.IOSPWriteMCR) &&
2467 (regNum == MCR))) { 2467 (regNum == MCR))) {
2468 dbg("SendCmdWriteUartReg - Not writting to MCR Register"); 2468 dbg("SendCmdWriteUartReg - Not writing to MCR Register");
2469 return 0; 2469 return 0;
2470 } 2470 }
2471 2471
@@ -2473,7 +2473,7 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r
2473 ((edge_serial->is_epic) && 2473 ((edge_serial->is_epic) &&
2474 (!edge_serial->epic_descriptor.Supports.IOSPWriteLCR) && 2474 (!edge_serial->epic_descriptor.Supports.IOSPWriteLCR) &&
2475 (regNum == LCR))) { 2475 (regNum == LCR))) {
2476 dbg ("SendCmdWriteUartReg - Not writting to LCR Register"); 2476 dbg ("SendCmdWriteUartReg - Not writing to LCR Register");
2477 return 0; 2477 return 0;
2478 } 2478 }
2479 2479
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 1132ba5ff391..9a256d2ff9dc 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1348,6 +1348,20 @@ config FB_VOODOO1
1348 Please read the <file:Documentation/fb/README-sstfb.txt> for supported 1348 Please read the <file:Documentation/fb/README-sstfb.txt> for supported
1349 options and other important info support. 1349 options and other important info support.
1350 1350
1351config FB_VT8623
1352 tristate "VIA VT8623 support"
1353 depends on FB && PCI
1354 select FB_CFB_FILLRECT
1355 select FB_CFB_COPYAREA
1356 select FB_CFB_IMAGEBLIT
1357 select FB_TILEBLITTING
1358 select FB_SVGALIB
1359 select VGASTATE
1360 select FONT_8x16 if FRAMEBUFFER_CONSOLE
1361 ---help---
1362 Driver for CastleRock integrated graphics core in the
1363 VIA VT8623 [Apollo CLE266] chipset.
1364
1351config FB_CYBLA 1365config FB_CYBLA
1352 tristate "Cyberblade/i1 support" 1366 tristate "Cyberblade/i1 support"
1353 depends on FB && PCI && X86_32 && !64BIT 1367 depends on FB && PCI && X86_32 && !64BIT
@@ -1401,6 +1415,20 @@ config FB_TRIDENT_ACCEL
1401 This will compile the Trident frame buffer device with 1415 This will compile the Trident frame buffer device with
1402 acceleration functions. 1416 acceleration functions.
1403 1417
1418config FB_ARK
1419 tristate "ARK 2000PV support"
1420 depends on FB && PCI
1421 select FB_CFB_FILLRECT
1422 select FB_CFB_COPYAREA
1423 select FB_CFB_IMAGEBLIT
1424 select FB_TILEBLITTING
1425 select FB_SVGALIB
1426 select VGASTATE
1427 select FONT_8x16 if FRAMEBUFFER_CONSOLE
1428 ---help---
1429 Driver for PCI graphics boards with ARK 2000PV chip
1430 and ICS 5342 RAMDAC.
1431
1404config FB_PM3 1432config FB_PM3
1405 tristate "Permedia3 support" 1433 tristate "Permedia3 support"
1406 depends on FB && PCI && BROKEN 1434 depends on FB && PCI && BROKEN
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index a916c204274f..0b70567458fb 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -54,10 +54,12 @@ obj-$(CONFIG_FB_VALKYRIE) += valkyriefb.o
54obj-$(CONFIG_FB_CT65550) += chipsfb.o 54obj-$(CONFIG_FB_CT65550) += chipsfb.o
55obj-$(CONFIG_FB_IMSTT) += imsttfb.o 55obj-$(CONFIG_FB_IMSTT) += imsttfb.o
56obj-$(CONFIG_FB_FM2) += fm2fb.o 56obj-$(CONFIG_FB_FM2) += fm2fb.o
57obj-$(CONFIG_FB_VT8623) += vt8623fb.o
57obj-$(CONFIG_FB_CYBLA) += cyblafb.o 58obj-$(CONFIG_FB_CYBLA) += cyblafb.o
58obj-$(CONFIG_FB_TRIDENT) += tridentfb.o 59obj-$(CONFIG_FB_TRIDENT) += tridentfb.o
59obj-$(CONFIG_FB_LE80578) += vermilion/ 60obj-$(CONFIG_FB_LE80578) += vermilion/
60obj-$(CONFIG_FB_S3) += s3fb.o 61obj-$(CONFIG_FB_S3) += s3fb.o
62obj-$(CONFIG_FB_ARK) += arkfb.o
61obj-$(CONFIG_FB_STI) += stifb.o 63obj-$(CONFIG_FB_STI) += stifb.o
62obj-$(CONFIG_FB_FFB) += ffb.o sbuslib.o 64obj-$(CONFIG_FB_FFB) += ffb.o sbuslib.o
63obj-$(CONFIG_FB_CG6) += cg6.o sbuslib.o 65obj-$(CONFIG_FB_CG6) += cg6.o sbuslib.o
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
new file mode 100644
index 000000000000..ba6fede5c466
--- /dev/null
+++ b/drivers/video/arkfb.c
@@ -0,0 +1,1200 @@
1/*
2 * linux/drivers/video/arkfb.c -- Frame buffer device driver for ARK 2000PV
3 * with ICS 5342 dac (it is easy to add support for different dacs).
4 *
5 * Copyright (c) 2007 Ondrej Zajicek <santiago@crfreenet.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file COPYING in the main directory of this archive for
9 * more details.
10 *
11 * Code is based on s3fb
12 */
13
14#include <linux/version.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/tty.h>
21#include <linux/slab.h>
22#include <linux/delay.h>
23#include <linux/fb.h>
24#include <linux/svga.h>
25#include <linux/init.h>
26#include <linux/pci.h>
27#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
28#include <video/vga.h>
29
30#ifdef CONFIG_MTRR
31#include <asm/mtrr.h>
32#endif
33
34struct arkfb_info {
35 int mclk_freq;
36 int mtrr_reg;
37
38 struct dac_info *dac;
39 struct vgastate state;
40 struct mutex open_lock;
41 unsigned int ref_count;
42 u32 pseudo_palette[16];
43};
44
45
46/* ------------------------------------------------------------------------- */
47
48
49static const struct svga_fb_format arkfb_formats[] = {
50 { 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
51 FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP4, FB_VISUAL_PSEUDOCOLOR, 8, 8},
52 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
53 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 16},
54 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1,
55 FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 8, 16},
56 { 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
57 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 8},
58 {16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0,
59 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
60 {16, {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0}, 0,
61 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
62 {24, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
63 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 8, 8},
64 {32, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
65 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 2},
66 SVGA_FORMAT_END
67};
68
69
70/* CRT timing register sets */
71
72static const struct vga_regset ark_h_total_regs[] = {{0x00, 0, 7}, {0x41, 7, 7}, VGA_REGSET_END};
73static const struct vga_regset ark_h_display_regs[] = {{0x01, 0, 7}, {0x41, 6, 6}, VGA_REGSET_END};
74static const struct vga_regset ark_h_blank_start_regs[] = {{0x02, 0, 7}, {0x41, 5, 5}, VGA_REGSET_END};
75static const struct vga_regset ark_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7 }, VGA_REGSET_END};
76static const struct vga_regset ark_h_sync_start_regs[] = {{0x04, 0, 7}, {0x41, 4, 4}, VGA_REGSET_END};
77static const struct vga_regset ark_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
78
79static const struct vga_regset ark_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x40, 7, 7}, VGA_REGSET_END};
80static const struct vga_regset ark_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x40, 6, 6}, VGA_REGSET_END};
81static const struct vga_regset ark_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x40, 5, 5}, VGA_REGSET_END};
82// const struct vga_regset ark_v_blank_end_regs[] = {{0x16, 0, 6}, VGA_REGSET_END};
83static const struct vga_regset ark_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
84static const struct vga_regset ark_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x40, 4, 4}, VGA_REGSET_END};
85static const struct vga_regset ark_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
86
87static const struct vga_regset ark_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, VGA_REGSET_END};
88static const struct vga_regset ark_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x40, 0, 2}, VGA_REGSET_END};
89static const struct vga_regset ark_offset_regs[] = {{0x13, 0, 7}, {0x41, 3, 3}, VGA_REGSET_END};
90
91static const struct svga_timing_regs ark_timing_regs = {
92 ark_h_total_regs, ark_h_display_regs, ark_h_blank_start_regs,
93 ark_h_blank_end_regs, ark_h_sync_start_regs, ark_h_sync_end_regs,
94 ark_v_total_regs, ark_v_display_regs, ark_v_blank_start_regs,
95 ark_v_blank_end_regs, ark_v_sync_start_regs, ark_v_sync_end_regs,
96};
97
98
99/* ------------------------------------------------------------------------- */
100
101
102/* Module parameters */
103
104static char *mode = "640x480-8@60";
105
106#ifdef CONFIG_MTRR
107static int mtrr = 1;
108#endif
109
110MODULE_AUTHOR("(c) 2007 Ondrej Zajicek <santiago@crfreenet.org>");
111MODULE_LICENSE("GPL");
112MODULE_DESCRIPTION("fbdev driver for ARK 2000PV");
113
114module_param(mode, charp, 0444);
115MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
116
117#ifdef CONFIG_MTRR
118module_param(mtrr, int, 0444);
119MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
120#endif
121
122static int threshold = 4;
123
124module_param(threshold, int, 0644);
125MODULE_PARM_DESC(threshold, "FIFO threshold");
126
127
128/* ------------------------------------------------------------------------- */
129
130
131static void arkfb_settile(struct fb_info *info, struct fb_tilemap *map)
132{
133 const u8 *font = map->data;
134 u8 __iomem *fb = (u8 __iomem *)info->screen_base;
135 int i, c;
136
137 if ((map->width != 8) || (map->height != 16) ||
138 (map->depth != 1) || (map->length != 256)) {
139 printk(KERN_ERR "fb%d: unsupported font parameters: width %d, "
140 "height %d, depth %d, length %d\n", info->node,
141 map->width, map->height, map->depth, map->length);
142 return;
143 }
144
145 fb += 2;
146 for (c = 0; c < map->length; c++) {
147 for (i = 0; i < map->height; i++) {
148 fb_writeb(font[i], &fb[i * 4]);
149 fb_writeb(font[i], &fb[i * 4 + (128 * 8)]);
150 }
151 fb += 128;
152
153 if ((c % 8) == 7)
154 fb += 128*8;
155
156 font += map->height;
157 }
158}
159
160static struct fb_tile_ops arkfb_tile_ops = {
161 .fb_settile = arkfb_settile,
162 .fb_tilecopy = svga_tilecopy,
163 .fb_tilefill = svga_tilefill,
164 .fb_tileblit = svga_tileblit,
165 .fb_tilecursor = svga_tilecursor,
166 .fb_get_tilemax = svga_get_tilemax,
167};
168
169
170/* ------------------------------------------------------------------------- */
171
172
173/* image data is MSB-first, fb structure is MSB-first too */
174static inline u32 expand_color(u32 c)
175{
176 return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF;
177}
178
179/* arkfb_iplan_imageblit silently assumes that almost everything is 8-pixel aligned */
180static void arkfb_iplan_imageblit(struct fb_info *info, const struct fb_image *image)
181{
182 u32 fg = expand_color(image->fg_color);
183 u32 bg = expand_color(image->bg_color);
184 const u8 *src1, *src;
185 u8 __iomem *dst1;
186 u32 __iomem *dst;
187 u32 val;
188 int x, y;
189
190 src1 = image->data;
191 dst1 = info->screen_base + (image->dy * info->fix.line_length)
192 + ((image->dx / 8) * 4);
193
194 for (y = 0; y < image->height; y++) {
195 src = src1;
196 dst = (u32 __iomem *) dst1;
197 for (x = 0; x < image->width; x += 8) {
198 val = *(src++) * 0x01010101;
199 val = (val & fg) | (~val & bg);
200 fb_writel(val, dst++);
201 }
202 src1 += image->width / 8;
203 dst1 += info->fix.line_length;
204 }
205
206}
207
208/* arkfb_iplan_fillrect silently assumes that almost everything is 8-pixel aligned */
209static void arkfb_iplan_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
210{
211 u32 fg = expand_color(rect->color);
212 u8 __iomem *dst1;
213 u32 __iomem *dst;
214 int x, y;
215
216 dst1 = info->screen_base + (rect->dy * info->fix.line_length)
217 + ((rect->dx / 8) * 4);
218
219 for (y = 0; y < rect->height; y++) {
220 dst = (u32 __iomem *) dst1;
221 for (x = 0; x < rect->width; x += 8) {
222 fb_writel(fg, dst++);
223 }
224 dst1 += info->fix.line_length;
225 }
226
227}
228
229
230/* image data is MSB-first, fb structure is high-nibble-in-low-byte-first */
231static inline u32 expand_pixel(u32 c)
232{
233 return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) |
234 ((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF;
235}
236
237/* arkfb_cfb4_imageblit silently assumes that almost everything is 8-pixel aligned */
238static void arkfb_cfb4_imageblit(struct fb_info *info, const struct fb_image *image)
239{
240 u32 fg = image->fg_color * 0x11111111;
241 u32 bg = image->bg_color * 0x11111111;
242 const u8 *src1, *src;
243 u8 __iomem *dst1;
244 u32 __iomem *dst;
245 u32 val;
246 int x, y;
247
248 src1 = image->data;
249 dst1 = info->screen_base + (image->dy * info->fix.line_length)
250 + ((image->dx / 8) * 4);
251
252 for (y = 0; y < image->height; y++) {
253 src = src1;
254 dst = (u32 __iomem *) dst1;
255 for (x = 0; x < image->width; x += 8) {
256 val = expand_pixel(*(src++));
257 val = (val & fg) | (~val & bg);
258 fb_writel(val, dst++);
259 }
260 src1 += image->width / 8;
261 dst1 += info->fix.line_length;
262 }
263
264}
265
266static void arkfb_imageblit(struct fb_info *info, const struct fb_image *image)
267{
268 if ((info->var.bits_per_pixel == 4) && (image->depth == 1)
269 && ((image->width % 8) == 0) && ((image->dx % 8) == 0)) {
270 if (info->fix.type == FB_TYPE_INTERLEAVED_PLANES)
271 arkfb_iplan_imageblit(info, image);
272 else
273 arkfb_cfb4_imageblit(info, image);
274 } else
275 cfb_imageblit(info, image);
276}
277
278static void arkfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
279{
280 if ((info->var.bits_per_pixel == 4)
281 && ((rect->width % 8) == 0) && ((rect->dx % 8) == 0)
282 && (info->fix.type == FB_TYPE_INTERLEAVED_PLANES))
283 arkfb_iplan_fillrect(info, rect);
284 else
285 cfb_fillrect(info, rect);
286}
287
288
289/* ------------------------------------------------------------------------- */
290
291
292enum
293{
294 DAC_PSEUDO8_8,
295 DAC_RGB1555_8,
296 DAC_RGB0565_8,
297 DAC_RGB0888_8,
298 DAC_RGB8888_8,
299 DAC_PSEUDO8_16,
300 DAC_RGB1555_16,
301 DAC_RGB0565_16,
302 DAC_RGB0888_16,
303 DAC_RGB8888_16,
304 DAC_MAX
305};
306
307struct dac_ops {
308 int (*dac_get_mode)(struct dac_info *info);
309 int (*dac_set_mode)(struct dac_info *info, int mode);
310 int (*dac_get_freq)(struct dac_info *info, int channel);
311 int (*dac_set_freq)(struct dac_info *info, int channel, u32 freq);
312 void (*dac_release)(struct dac_info *info);
313};
314
315typedef void (*dac_read_regs_t)(void *data, u8 *code, int count);
316typedef void (*dac_write_regs_t)(void *data, u8 *code, int count);
317
318struct dac_info
319{
320 struct dac_ops *dacops;
321 dac_read_regs_t dac_read_regs;
322 dac_write_regs_t dac_write_regs;
323 void *data;
324};
325
326
327static inline u8 dac_read_reg(struct dac_info *info, u8 reg)
328{
329 u8 code[2] = {reg, 0};
330 info->dac_read_regs(info->data, code, 1);
331 return code[1];
332}
333
334static inline void dac_read_regs(struct dac_info *info, u8 *code, int count)
335{
336 info->dac_read_regs(info->data, code, count);
337}
338
339static inline void dac_write_reg(struct dac_info *info, u8 reg, u8 val)
340{
341 u8 code[2] = {reg, val};
342 info->dac_write_regs(info->data, code, 1);
343}
344
345static inline void dac_write_regs(struct dac_info *info, u8 *code, int count)
346{
347 info->dac_write_regs(info->data, code, count);
348}
349
350static inline int dac_set_mode(struct dac_info *info, int mode)
351{
352 return info->dacops->dac_set_mode(info, mode);
353}
354
355static inline int dac_set_freq(struct dac_info *info, int channel, u32 freq)
356{
357 return info->dacops->dac_set_freq(info, channel, freq);
358}
359
360static inline void dac_release(struct dac_info *info)
361{
362 info->dacops->dac_release(info);
363}
364
365
366/* ------------------------------------------------------------------------- */
367
368
369/* ICS5342 DAC */
370
371struct ics5342_info
372{
373 struct dac_info dac;
374 u8 mode;
375};
376
377#define DAC_PAR(info) ((struct ics5342_info *) info)
378
379/* LSB is set to distinguish unused slots */
380static const u8 ics5342_mode_table[DAC_MAX] = {
381 [DAC_PSEUDO8_8] = 0x01, [DAC_RGB1555_8] = 0x21, [DAC_RGB0565_8] = 0x61,
382 [DAC_RGB0888_8] = 0x41, [DAC_PSEUDO8_16] = 0x11, [DAC_RGB1555_16] = 0x31,
383 [DAC_RGB0565_16] = 0x51, [DAC_RGB0888_16] = 0x91, [DAC_RGB8888_16] = 0x71
384};
385
386static int ics5342_set_mode(struct dac_info *info, int mode)
387{
388 u8 code;
389
390 if (mode >= DAC_MAX)
391 return -EINVAL;
392
393 code = ics5342_mode_table[mode];
394
395 if (! code)
396 return -EINVAL;
397
398 dac_write_reg(info, 6, code & 0xF0);
399 DAC_PAR(info)->mode = mode;
400
401 return 0;
402}
403
404static const struct svga_pll ics5342_pll = {3, 129, 3, 33, 0, 3,
405 60000, 250000, 14318};
406
407/* pd4 - allow only posdivider 4 (r=2) */
408static const struct svga_pll ics5342_pll_pd4 = {3, 129, 3, 33, 2, 2,
409 60000, 335000, 14318};
410
411/* 270 MHz should be upper bound for VCO clock according to specs,
412 but that is too restrictive in pd4 case */
413
414static int ics5342_set_freq(struct dac_info *info, int channel, u32 freq)
415{
416 u16 m, n, r;
417
418 /* only postdivider 4 (r=2) is valid in mode DAC_PSEUDO8_16 */
419 int rv = svga_compute_pll((DAC_PAR(info)->mode == DAC_PSEUDO8_16)
420 ? &ics5342_pll_pd4 : &ics5342_pll,
421 freq, &m, &n, &r, 0);
422
423 if (rv < 0) {
424 return -EINVAL;
425 } else {
426 u8 code[6] = {4, 3, 5, m-2, 5, (n-2) | (r << 5)};
427 dac_write_regs(info, code, 3);
428 return 0;
429 }
430}
431
432static void ics5342_release(struct dac_info *info)
433{
434 ics5342_set_mode(info, DAC_PSEUDO8_8);
435 kfree(info);
436}
437
438static struct dac_ops ics5342_ops = {
439 .dac_set_mode = ics5342_set_mode,
440 .dac_set_freq = ics5342_set_freq,
441 .dac_release = ics5342_release
442};
443
444
445static struct dac_info * ics5342_init(dac_read_regs_t drr, dac_write_regs_t dwr, void *data)
446{
447 struct dac_info *info = kzalloc(sizeof(struct ics5342_info), GFP_KERNEL);
448
449 if (! info)
450 return NULL;
451
452 info->dacops = &ics5342_ops;
453 info->dac_read_regs = drr;
454 info->dac_write_regs = dwr;
455 info->data = data;
456 DAC_PAR(info)->mode = DAC_PSEUDO8_8; /* estimation */
457 return info;
458}
459
460
461/* ------------------------------------------------------------------------- */
462
463
464static unsigned short dac_regs[4] = {0x3c8, 0x3c9, 0x3c6, 0x3c7};
465
466static void ark_dac_read_regs(void *data, u8 *code, int count)
467{
468 u8 regval = vga_rseq(NULL, 0x1C);
469
470 while (count != 0)
471 {
472 vga_wseq(NULL, 0x1C, regval | (code[0] & 4) ? 0x80 : 0);
473 code[1] = vga_r(NULL, dac_regs[code[0] & 3]);
474 count--;
475 code += 2;
476 }
477
478 vga_wseq(NULL, 0x1C, regval);
479}
480
481static void ark_dac_write_regs(void *data, u8 *code, int count)
482{
483 u8 regval = vga_rseq(NULL, 0x1C);
484
485 while (count != 0)
486 {
487 vga_wseq(NULL, 0x1C, regval | (code[0] & 4) ? 0x80 : 0);
488 vga_w(NULL, dac_regs[code[0] & 3], code[1]);
489 count--;
490 code += 2;
491 }
492
493 vga_wseq(NULL, 0x1C, regval);
494}
495
496
497static void ark_set_pixclock(struct fb_info *info, u32 pixclock)
498{
499 struct arkfb_info *par = info->par;
500 u8 regval;
501
502 int rv = dac_set_freq(par->dac, 0, 1000000000 / pixclock);
503 if (rv < 0) {
504 printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
505 return;
506 }
507
508 /* Set VGA misc register */
509 regval = vga_r(NULL, VGA_MIS_R);
510 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
511}
512
513
514/* Open framebuffer */
515
516static int arkfb_open(struct fb_info *info, int user)
517{
518 struct arkfb_info *par = info->par;
519
520 mutex_lock(&(par->open_lock));
521 if (par->ref_count == 0) {
522 memset(&(par->state), 0, sizeof(struct vgastate));
523 par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
524 par->state.num_crtc = 0x60;
525 par->state.num_seq = 0x30;
526 save_vga(&(par->state));
527 }
528
529 par->ref_count++;
530 mutex_unlock(&(par->open_lock));
531
532 return 0;
533}
534
535/* Close framebuffer */
536
537static int arkfb_release(struct fb_info *info, int user)
538{
539 struct arkfb_info *par = info->par;
540
541 mutex_lock(&(par->open_lock));
542 if (par->ref_count == 0) {
543 mutex_unlock(&(par->open_lock));
544 return -EINVAL;
545 }
546
547 if (par->ref_count == 1) {
548 restore_vga(&(par->state));
549 dac_set_mode(par->dac, DAC_PSEUDO8_8);
550 }
551
552 par->ref_count--;
553 mutex_unlock(&(par->open_lock));
554
555 return 0;
556}
557
558/* Validate passed in var */
559
560static int arkfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
561{
562 int rv, mem, step;
563
564 /* Find appropriate format */
565 rv = svga_match_format (arkfb_formats, var, NULL);
566 if (rv < 0)
567 {
568 printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
569 return rv;
570 }
571
572 /* Do not allow to have real resoulution larger than virtual */
573 if (var->xres > var->xres_virtual)
574 var->xres_virtual = var->xres;
575
576 if (var->yres > var->yres_virtual)
577 var->yres_virtual = var->yres;
578
579 /* Round up xres_virtual to have proper alignment of lines */
580 step = arkfb_formats[rv].xresstep - 1;
581 var->xres_virtual = (var->xres_virtual+step) & ~step;
582
583
584 /* Check whether have enough memory */
585 mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
586 if (mem > info->screen_size)
587 {
588 printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
589 return -EINVAL;
590 }
591
592 rv = svga_check_timings (&ark_timing_regs, var, info->node);
593 if (rv < 0)
594 {
595 printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
596 return rv;
597 }
598
599 /* Interlaced mode is broken */
600 if (var->vmode & FB_VMODE_INTERLACED)
601 return -EINVAL;
602
603 return 0;
604}
605
606/* Set video mode from par */
607
608static int arkfb_set_par(struct fb_info *info)
609{
610 struct arkfb_info *par = info->par;
611 u32 value, mode, hmul, hdiv, offset_value, screen_size;
612 u32 bpp = info->var.bits_per_pixel;
613 u8 regval;
614
615 if (bpp != 0) {
616 info->fix.ypanstep = 1;
617 info->fix.line_length = (info->var.xres_virtual * bpp) / 8;
618
619 info->flags &= ~FBINFO_MISC_TILEBLITTING;
620 info->tileops = NULL;
621
622 /* in 4bpp supports 8p wide tiles only, any tiles otherwise */
623 info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
624 info->pixmap.blit_y = ~(u32)0;
625
626 offset_value = (info->var.xres_virtual * bpp) / 64;
627 screen_size = info->var.yres_virtual * info->fix.line_length;
628 } else {
629 info->fix.ypanstep = 16;
630 info->fix.line_length = 0;
631
632 info->flags |= FBINFO_MISC_TILEBLITTING;
633 info->tileops = &arkfb_tile_ops;
634
635 /* supports 8x16 tiles only */
636 info->pixmap.blit_x = 1 << (8 - 1);
637 info->pixmap.blit_y = 1 << (16 - 1);
638
639 offset_value = info->var.xres_virtual / 16;
640 screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
641 }
642
643 info->var.xoffset = 0;
644 info->var.yoffset = 0;
645 info->var.activate = FB_ACTIVATE_NOW;
646
647 /* Unlock registers */
648 svga_wcrt_mask(0x11, 0x00, 0x80);
649
650 /* Blank screen and turn off sync */
651 svga_wseq_mask(0x01, 0x20, 0x20);
652 svga_wcrt_mask(0x17, 0x00, 0x80);
653
654 /* Set default values */
655 svga_set_default_gfx_regs();
656 svga_set_default_atc_regs();
657 svga_set_default_seq_regs();
658 svga_set_default_crt_regs();
659 svga_wcrt_multi(ark_line_compare_regs, 0xFFFFFFFF);
660 svga_wcrt_multi(ark_start_address_regs, 0);
661
662 /* ARK specific initialization */
663 svga_wseq_mask(0x10, 0x1F, 0x1F); /* enable linear framebuffer and full memory access */
664 svga_wseq_mask(0x12, 0x03, 0x03); /* 4 MB linear framebuffer size */
665
666 vga_wseq(NULL, 0x13, info->fix.smem_start >> 16);
667 vga_wseq(NULL, 0x14, info->fix.smem_start >> 24);
668 vga_wseq(NULL, 0x15, 0);
669 vga_wseq(NULL, 0x16, 0);
670
671 /* Set the FIFO threshold register */
672 /* It is fascinating way to store 5-bit value in 8-bit register */
673 regval = 0x10 | ((threshold & 0x0E) >> 1) | (threshold & 0x01) << 7 | (threshold & 0x10) << 1;
674 vga_wseq(NULL, 0x18, regval);
675
676 /* Set the offset register */
677 pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
678 svga_wcrt_multi(ark_offset_regs, offset_value);
679
680 /* fix for hi-res textmode */
681 svga_wcrt_mask(0x40, 0x08, 0x08);
682
683 if (info->var.vmode & FB_VMODE_DOUBLE)
684 svga_wcrt_mask(0x09, 0x80, 0x80);
685 else
686 svga_wcrt_mask(0x09, 0x00, 0x80);
687
688 if (info->var.vmode & FB_VMODE_INTERLACED)
689 svga_wcrt_mask(0x44, 0x04, 0x04);
690 else
691 svga_wcrt_mask(0x44, 0x00, 0x04);
692
693 hmul = 1;
694 hdiv = 1;
695 mode = svga_match_format(arkfb_formats, &(info->var), &(info->fix));
696
697 /* Set mode-specific register values */
698 switch (mode) {
699 case 0:
700 pr_debug("fb%d: text mode\n", info->node);
701 svga_set_textmode_vga_regs();
702
703 vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
704 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
705 dac_set_mode(par->dac, DAC_PSEUDO8_8);
706
707 break;
708 case 1:
709 pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
710 vga_wgfx(NULL, VGA_GFX_MODE, 0x40);
711
712 vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
713 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
714 dac_set_mode(par->dac, DAC_PSEUDO8_8);
715 break;
716 case 2:
717 pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
718
719 vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
720 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
721 dac_set_mode(par->dac, DAC_PSEUDO8_8);
722 break;
723 case 3:
724 pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
725
726 vga_wseq(NULL, 0x11, 0x16); /* 8bpp accel mode */
727
728 if (info->var.pixclock > 20000) {
729 pr_debug("fb%d: not using multiplex\n", info->node);
730 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
731 dac_set_mode(par->dac, DAC_PSEUDO8_8);
732 } else {
733 pr_debug("fb%d: using multiplex\n", info->node);
734 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
735 dac_set_mode(par->dac, DAC_PSEUDO8_16);
736 hdiv = 2;
737 }
738 break;
739 case 4:
740 pr_debug("fb%d: 5/5/5 truecolor\n", info->node);
741
742 vga_wseq(NULL, 0x11, 0x1A); /* 16bpp accel mode */
743 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
744 dac_set_mode(par->dac, DAC_RGB1555_16);
745 break;
746 case 5:
747 pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
748
749 vga_wseq(NULL, 0x11, 0x1A); /* 16bpp accel mode */
750 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
751 dac_set_mode(par->dac, DAC_RGB0565_16);
752 break;
753 case 6:
754 pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
755
756 vga_wseq(NULL, 0x11, 0x16); /* 8bpp accel mode ??? */
757 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
758 dac_set_mode(par->dac, DAC_RGB0888_16);
759 hmul = 3;
760 hdiv = 2;
761 break;
762 case 7:
763 pr_debug("fb%d: 8/8/8/8 truecolor\n", info->node);
764
765 vga_wseq(NULL, 0x11, 0x1E); /* 32bpp accel mode */
766 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
767 dac_set_mode(par->dac, DAC_RGB8888_16);
768 hmul = 2;
769 break;
770 default:
771 printk(KERN_ERR "fb%d: unsupported mode - bug\n", info->node);
772 return -EINVAL;
773 }
774
775 ark_set_pixclock(info, (hdiv * info->var.pixclock) / hmul);
776 svga_set_timings(&ark_timing_regs, &(info->var), hmul, hdiv,
777 (info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1,
778 (info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1,
779 hmul, info->node);
780
781 /* Set interlaced mode start/end register */
782 value = info->var.xres + info->var.left_margin + info->var.right_margin + info->var.hsync_len;
783 value = ((value * hmul / hdiv) / 8) - 5;
784 vga_wcrt(NULL, 0x42, (value + 1) / 2);
785
786 memset_io(info->screen_base, 0x00, screen_size);
787 /* Device and screen back on */
788 svga_wcrt_mask(0x17, 0x80, 0x80);
789 svga_wseq_mask(0x01, 0x00, 0x20);
790
791 return 0;
792}
793
794/* Set a colour register */
795
796static int arkfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
797 u_int transp, struct fb_info *fb)
798{
799 switch (fb->var.bits_per_pixel) {
800 case 0:
801 case 4:
802 if (regno >= 16)
803 return -EINVAL;
804
805 if ((fb->var.bits_per_pixel == 4) &&
806 (fb->var.nonstd == 0)) {
807 outb(0xF0, VGA_PEL_MSK);
808 outb(regno*16, VGA_PEL_IW);
809 } else {
810 outb(0x0F, VGA_PEL_MSK);
811 outb(regno, VGA_PEL_IW);
812 }
813 outb(red >> 10, VGA_PEL_D);
814 outb(green >> 10, VGA_PEL_D);
815 outb(blue >> 10, VGA_PEL_D);
816 break;
817 case 8:
818 if (regno >= 256)
819 return -EINVAL;
820
821 outb(0xFF, VGA_PEL_MSK);
822 outb(regno, VGA_PEL_IW);
823 outb(red >> 10, VGA_PEL_D);
824 outb(green >> 10, VGA_PEL_D);
825 outb(blue >> 10, VGA_PEL_D);
826 break;
827 case 16:
828 if (regno >= 16)
829 return 0;
830
831 if (fb->var.green.length == 5)
832 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xF800) >> 1) |
833 ((green & 0xF800) >> 6) | ((blue & 0xF800) >> 11);
834 else if (fb->var.green.length == 6)
835 ((u32*)fb->pseudo_palette)[regno] = (red & 0xF800) |
836 ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
837 else
838 return -EINVAL;
839 break;
840 case 24:
841 case 32:
842 if (regno >= 16)
843 return 0;
844
845 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xFF00) << 8) |
846 (green & 0xFF00) | ((blue & 0xFF00) >> 8);
847 break;
848 default:
849 return -EINVAL;
850 }
851
852 return 0;
853}
854
855/* Set the display blanking state */
856
857static int arkfb_blank(int blank_mode, struct fb_info *info)
858{
859 switch (blank_mode) {
860 case FB_BLANK_UNBLANK:
861 pr_debug("fb%d: unblank\n", info->node);
862 svga_wseq_mask(0x01, 0x00, 0x20);
863 svga_wcrt_mask(0x17, 0x80, 0x80);
864 break;
865 case FB_BLANK_NORMAL:
866 pr_debug("fb%d: blank\n", info->node);
867 svga_wseq_mask(0x01, 0x20, 0x20);
868 svga_wcrt_mask(0x17, 0x80, 0x80);
869 break;
870 case FB_BLANK_POWERDOWN:
871 case FB_BLANK_HSYNC_SUSPEND:
872 case FB_BLANK_VSYNC_SUSPEND:
873 pr_debug("fb%d: sync down\n", info->node);
874 svga_wseq_mask(0x01, 0x20, 0x20);
875 svga_wcrt_mask(0x17, 0x00, 0x80);
876 break;
877 }
878 return 0;
879}
880
881
882/* Pan the display */
883
884static int arkfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
885{
886 unsigned int offset;
887
888 /* Calculate the offset */
889 if (var->bits_per_pixel == 0) {
890 offset = (var->yoffset / 16) * (var->xres_virtual / 2) + (var->xoffset / 2);
891 offset = offset >> 2;
892 } else {
893 offset = (var->yoffset * info->fix.line_length) +
894 (var->xoffset * var->bits_per_pixel / 8);
895 offset = offset >> ((var->bits_per_pixel == 4) ? 2 : 3);
896 }
897
898 /* Set the offset */
899 svga_wcrt_multi(ark_start_address_regs, offset);
900
901 return 0;
902}
903
904
905/* ------------------------------------------------------------------------- */
906
907
908/* Frame buffer operations */
909
910static struct fb_ops arkfb_ops = {
911 .owner = THIS_MODULE,
912 .fb_open = arkfb_open,
913 .fb_release = arkfb_release,
914 .fb_check_var = arkfb_check_var,
915 .fb_set_par = arkfb_set_par,
916 .fb_setcolreg = arkfb_setcolreg,
917 .fb_blank = arkfb_blank,
918 .fb_pan_display = arkfb_pan_display,
919 .fb_fillrect = arkfb_fillrect,
920 .fb_copyarea = cfb_copyarea,
921 .fb_imageblit = arkfb_imageblit,
922 .fb_get_caps = svga_get_caps,
923};
924
925
926/* ------------------------------------------------------------------------- */
927
928
929/* PCI probe */
930static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
931{
932 struct fb_info *info;
933 struct arkfb_info *par;
934 int rc;
935 u8 regval;
936
937 /* Ignore secondary VGA device because there is no VGA arbitration */
938 if (! svga_primary_device(dev)) {
939 dev_info(&(dev->dev), "ignoring secondary device\n");
940 return -ENODEV;
941 }
942
943 /* Allocate and fill driver data structure */
944 info = framebuffer_alloc(sizeof(struct arkfb_info), NULL);
945 if (! info) {
946 dev_err(&(dev->dev), "cannot allocate memory\n");
947 return -ENOMEM;
948 }
949
950 par = info->par;
951 mutex_init(&par->open_lock);
952
953 info->flags = FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
954 info->fbops = &arkfb_ops;
955
956 /* Prepare PCI device */
957 rc = pci_enable_device(dev);
958 if (rc < 0) {
959 dev_err(&(dev->dev), "cannot enable PCI device\n");
960 goto err_enable_device;
961 }
962
963 rc = pci_request_regions(dev, "arkfb");
964 if (rc < 0) {
965 dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
966 goto err_request_regions;
967 }
968
969 par->dac = ics5342_init(ark_dac_read_regs, ark_dac_write_regs, info);
970 if (! par->dac) {
971 rc = -ENOMEM;
972 dev_err(&(dev->dev), "RAMDAC initialization failed\n");
973 goto err_dac;
974 }
975
976 info->fix.smem_start = pci_resource_start(dev, 0);
977 info->fix.smem_len = pci_resource_len(dev, 0);
978
979 /* Map physical IO memory address into kernel space */
980 info->screen_base = pci_iomap(dev, 0, 0);
981 if (! info->screen_base) {
982 rc = -ENOMEM;
983 dev_err(&(dev->dev), "iomap for framebuffer failed\n");
984 goto err_iomap;
985 }
986
987 /* FIXME get memsize */
988 regval = vga_rseq(NULL, 0x10);
989 info->screen_size = (1 << (regval >> 6)) << 20;
990 info->fix.smem_len = info->screen_size;
991
992 strcpy(info->fix.id, "ARK 2000PV");
993 info->fix.mmio_start = 0;
994 info->fix.mmio_len = 0;
995 info->fix.type = FB_TYPE_PACKED_PIXELS;
996 info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
997 info->fix.ypanstep = 0;
998 info->fix.accel = FB_ACCEL_NONE;
999 info->pseudo_palette = (void*) (par->pseudo_palette);
1000
1001 /* Prepare startup mode */
1002 rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
1003 if (! ((rc == 1) || (rc == 2))) {
1004 rc = -EINVAL;
1005 dev_err(&(dev->dev), "mode %s not found\n", mode);
1006 goto err_find_mode;
1007 }
1008
1009 rc = fb_alloc_cmap(&info->cmap, 256, 0);
1010 if (rc < 0) {
1011 dev_err(&(dev->dev), "cannot allocate colormap\n");
1012 goto err_alloc_cmap;
1013 }
1014
1015 rc = register_framebuffer(info);
1016 if (rc < 0) {
1017 dev_err(&(dev->dev), "cannot register framebugger\n");
1018 goto err_reg_fb;
1019 }
1020
1021 printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
1022 pci_name(dev), info->fix.smem_len >> 20);
1023
1024 /* Record a reference to the driver data */
1025 pci_set_drvdata(dev, info);
1026
1027#ifdef CONFIG_MTRR
1028 if (mtrr) {
1029 par->mtrr_reg = -1;
1030 par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
1031 }
1032#endif
1033
1034 return 0;
1035
1036 /* Error handling */
1037err_reg_fb:
1038 fb_dealloc_cmap(&info->cmap);
1039err_alloc_cmap:
1040err_find_mode:
1041 pci_iounmap(dev, info->screen_base);
1042err_iomap:
1043 dac_release(par->dac);
1044err_dac:
1045 pci_release_regions(dev);
1046err_request_regions:
1047/* pci_disable_device(dev); */
1048err_enable_device:
1049 framebuffer_release(info);
1050 return rc;
1051}
1052
1053/* PCI remove */
1054
1055static void __devexit ark_pci_remove(struct pci_dev *dev)
1056{
1057 struct fb_info *info = pci_get_drvdata(dev);
1058 struct arkfb_info *par = info->par;
1059
1060 if (info) {
1061#ifdef CONFIG_MTRR
1062 if (par->mtrr_reg >= 0) {
1063 mtrr_del(par->mtrr_reg, 0, 0);
1064 par->mtrr_reg = -1;
1065 }
1066#endif
1067
1068 dac_release(par->dac);
1069 unregister_framebuffer(info);
1070 fb_dealloc_cmap(&info->cmap);
1071
1072 pci_iounmap(dev, info->screen_base);
1073 pci_release_regions(dev);
1074/* pci_disable_device(dev); */
1075
1076 pci_set_drvdata(dev, NULL);
1077 framebuffer_release(info);
1078 }
1079}
1080
1081
1082#ifdef CONFIG_PM
1083/* PCI suspend */
1084
1085static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
1086{
1087 struct fb_info *info = pci_get_drvdata(dev);
1088 struct arkfb_info *par = info->par;
1089
1090 dev_info(&(dev->dev), "suspend\n");
1091
1092 acquire_console_sem();
1093 mutex_lock(&(par->open_lock));
1094
1095 if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
1096 mutex_unlock(&(par->open_lock));
1097 release_console_sem();
1098 return 0;
1099 }
1100
1101 fb_set_suspend(info, 1);
1102
1103 pci_save_state(dev);
1104 pci_disable_device(dev);
1105 pci_set_power_state(dev, pci_choose_state(dev, state));
1106
1107 mutex_unlock(&(par->open_lock));
1108 release_console_sem();
1109
1110 return 0;
1111}
1112
1113
1114/* PCI resume */
1115
1116static int ark_pci_resume (struct pci_dev* dev)
1117{
1118 struct fb_info *info = pci_get_drvdata(dev);
1119 struct arkfb_info *par = info->par;
1120
1121 dev_info(&(dev->dev), "resume\n");
1122
1123 acquire_console_sem();
1124 mutex_lock(&(par->open_lock));
1125
1126 if (par->ref_count == 0) {
1127 mutex_unlock(&(par->open_lock));
1128 release_console_sem();
1129 return 0;
1130 }
1131
1132 pci_set_power_state(dev, PCI_D0);
1133 pci_restore_state(dev);
1134
1135 if (pci_enable_device(dev))
1136 goto fail;
1137
1138 pci_set_master(dev);
1139
1140 arkfb_set_par(info);
1141 fb_set_suspend(info, 0);
1142
1143 mutex_unlock(&(par->open_lock));
1144fail:
1145 release_console_sem();
1146 return 0;
1147}
1148#else
1149#define ark_pci_suspend NULL
1150#define ark_pci_resume NULL
1151#endif /* CONFIG_PM */
1152
1153/* List of boards that we are trying to support */
1154
1155static struct pci_device_id ark_devices[] __devinitdata = {
1156 {PCI_DEVICE(0xEDD8, 0xA099)},
1157 {0, 0, 0, 0, 0, 0, 0}
1158};
1159
1160
1161MODULE_DEVICE_TABLE(pci, ark_devices);
1162
1163static struct pci_driver arkfb_pci_driver = {
1164 .name = "arkfb",
1165 .id_table = ark_devices,
1166 .probe = ark_pci_probe,
1167 .remove = __devexit_p(ark_pci_remove),
1168 .suspend = ark_pci_suspend,
1169 .resume = ark_pci_resume,
1170};
1171
1172/* Cleanup */
1173
1174static void __exit arkfb_cleanup(void)
1175{
1176 pr_debug("arkfb: cleaning up\n");
1177 pci_unregister_driver(&arkfb_pci_driver);
1178}
1179
1180/* Driver Initialisation */
1181
1182static int __init arkfb_init(void)
1183{
1184
1185#ifndef MODULE
1186 char *option = NULL;
1187
1188 if (fb_get_options("arkfb", &option))
1189 return -ENODEV;
1190
1191 if (option && *option)
1192 mode = option;
1193#endif
1194
1195 pr_debug("arkfb: initializing\n");
1196 return pci_register_driver(&arkfb_pci_driver);
1197}
1198
1199module_init(arkfb_init);
1200module_exit(arkfb_cleanup);
diff --git a/drivers/video/console/softcursor.c b/drivers/video/console/softcursor.c
index f577bd80e020..03cfb7ac5733 100644
--- a/drivers/video/console/softcursor.c
+++ b/drivers/video/console/softcursor.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/video/softcursor.c 2 * linux/drivers/video/console/softcursor.c
3 * 3 *
4 * Generic software cursor for frame buffer devices 4 * Generic software cursor for frame buffer devices
5 * 5 *
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 08d4e11d9121..38c2e2558f5e 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1236,6 +1236,10 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
1236 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; 1236 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
1237#elif defined(__arm__) || defined(__sh__) || defined(__m32r__) 1237#elif defined(__arm__) || defined(__sh__) || defined(__m32r__)
1238 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 1238 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1239#elif defined(__avr32__)
1240 vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot)
1241 & ~_PAGE_CACHABLE)
1242 | (_PAGE_BUFFER | _PAGE_DIRTY));
1239#elif defined(__ia64__) 1243#elif defined(__ia64__)
1240 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) 1244 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
1241 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 1245 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 7e760197cf29..1a7d7789d877 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1717,7 +1717,7 @@ static int __devinit i810_alloc_agp_mem(struct fb_info *info)
1717 * @info: pointer to device specific info structure 1717 * @info: pointer to device specific info structure
1718 * 1718 *
1719 * DESCRIPTION: 1719 * DESCRIPTION:
1720 * Sets the the user monitor's horizontal and vertical 1720 * Sets the user monitor's horizontal and vertical
1721 * frequency limits 1721 * frequency limits
1722 */ 1722 */
1723static void __devinit i810_init_monspecs(struct fb_info *info) 1723static void __devinit i810_init_monspecs(struct fb_info *info)
diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
index a5690a5f29d5..9445cdb759b1 100644
--- a/drivers/video/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/matrox/matroxfb_Ti3026.c
@@ -72,7 +72,7 @@
72 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> 72 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
73 * 73 *
74 * (following author is not in any relation with this code, but his ideas 74 * (following author is not in any relation with this code, but his ideas
75 * were used when writting this driver) 75 * were used when writing this driver)
76 * 76 *
77 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> 77 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
78 * 78 *
diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
index a5c825d99466..c57aaadf410c 100644
--- a/drivers/video/matrox/matroxfb_accel.c
+++ b/drivers/video/matrox/matroxfb_accel.c
@@ -70,7 +70,7 @@
70 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> 70 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
71 * 71 *
72 * (following author is not in any relation with this code, but his ideas 72 * (following author is not in any relation with this code, but his ideas
73 * were used when writting this driver) 73 * were used when writing this driver)
74 * 74 *
75 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> 75 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
76 * 76 *
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index cb2aa402ddfd..c8559a756b75 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -93,7 +93,7 @@
93 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> 93 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
94 * 94 *
95 * (following author is not in any relation with this code, but his ideas 95 * (following author is not in any relation with this code, but his ideas
96 * were used when writting this driver) 96 * were used when writing this driver)
97 * 97 *
98 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> 98 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
99 * 99 *
diff --git a/drivers/video/matrox/matroxfb_misc.c b/drivers/video/matrox/matroxfb_misc.c
index 18886b629cb1..5948e54b9ef9 100644
--- a/drivers/video/matrox/matroxfb_misc.c
+++ b/drivers/video/matrox/matroxfb_misc.c
@@ -78,7 +78,7 @@
78 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> 78 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
79 * 79 *
80 * (following author is not in any relation with this code, but his ideas 80 * (following author is not in any relation with this code, but his ideas
81 * were used when writting this driver) 81 * were used when writing this driver)
82 * 82 *
83 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> 83 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
84 * 84 *
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index f297c7b14a41..c627955aa124 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -149,8 +149,7 @@ static void nvGetClocks(struct nvidia_par *par, unsigned int *MClk,
149 pll = NV_RD32(par->PMC, 0x4024); 149 pll = NV_RD32(par->PMC, 0x4024);
150 M = pll & 0xFF; 150 M = pll & 0xFF;
151 N = (pll >> 8) & 0xFF; 151 N = (pll >> 8) & 0xFF;
152 if (((par->Chipset & 0xfff0) == 0x0290) || 152 if (((par->Chipset & 0xfff0) == 0x0290) || ((par->Chipset & 0xfff0) == 0x0390) || ((par->Chipset & 0xfff0) == 0x02E0)) {
153 ((par->Chipset & 0xfff0) == 0x0390)) {
154 MB = 1; 153 MB = 1;
155 NB = 1; 154 NB = 1;
156 } else { 155 } else {
@@ -963,6 +962,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
963 962
964 if (((par->Chipset & 0xfff0) == 0x0090) || 963 if (((par->Chipset & 0xfff0) == 0x0090) ||
965 ((par->Chipset & 0xfff0) == 0x01D0) || 964 ((par->Chipset & 0xfff0) == 0x01D0) ||
965 ((par->Chipset & 0xfff0) == 0x02E0) ||
966 ((par->Chipset & 0xfff0) == 0x0290)) 966 ((par->Chipset & 0xfff0) == 0x0290))
967 regions = 15; 967 regions = 15;
968 for(i = 0; i < regions; i++) { 968 for(i = 0; i < regions; i++) {
@@ -1275,6 +1275,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1275 0x00100000); 1275 0x00100000);
1276 break; 1276 break;
1277 case 0x0090: 1277 case 0x0090:
1278 case 0x02E0:
1278 case 0x0290: 1279 case 0x0290:
1279 NV_WR32(par->PRAMDAC, 0x0608, 1280 NV_WR32(par->PRAMDAC, 0x0608,
1280 NV_RD32(par->PRAMDAC, 0x0608) | 1281 NV_RD32(par->PRAMDAC, 0x0608) |
@@ -1352,6 +1353,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1352 } else { 1353 } else {
1353 if (((par->Chipset & 0xfff0) == 0x0090) || 1354 if (((par->Chipset & 0xfff0) == 0x0090) ||
1354 ((par->Chipset & 0xfff0) == 0x01D0) || 1355 ((par->Chipset & 0xfff0) == 0x01D0) ||
1356 ((par->Chipset & 0xfff0) == 0x02E0) ||
1355 ((par->Chipset & 0xfff0) == 0x0290)) { 1357 ((par->Chipset & 0xfff0) == 0x0290)) {
1356 for (i = 0; i < 60; i++) { 1358 for (i = 0; i < 60; i++) {
1357 NV_WR32(par->PGRAPH, 1359 NV_WR32(par->PGRAPH,
@@ -1403,6 +1405,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1403 } else { 1405 } else {
1404 if ((par->Chipset & 0xfff0) == 0x0090 || 1406 if ((par->Chipset & 0xfff0) == 0x0090 ||
1405 (par->Chipset & 0xfff0) == 0x01D0 || 1407 (par->Chipset & 0xfff0) == 0x01D0 ||
1408 (par->Chipset & 0xfff0) == 0x02E0 ||
1406 (par->Chipset & 0xfff0) == 0x0290) { 1409 (par->Chipset & 0xfff0) == 0x0290) {
1407 NV_WR32(par->PGRAPH, 0x0DF0, 1410 NV_WR32(par->PGRAPH, 0x0DF0,
1408 NV_RD32(par->PFB, 0x0200)); 1411 NV_RD32(par->PFB, 0x0200));
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 7c36b5fe582e..f85edf084da3 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -1243,6 +1243,7 @@ static u32 __devinit nvidia_get_arch(struct fb_info *info)
1243 case 0x0140: /* GeForce 6600 */ 1243 case 0x0140: /* GeForce 6600 */
1244 case 0x0160: /* GeForce 6200 */ 1244 case 0x0160: /* GeForce 6200 */
1245 case 0x01D0: /* GeForce 7200, 7300, 7400 */ 1245 case 0x01D0: /* GeForce 7200, 7300, 7400 */
1246 case 0x02E0: /* GeForce 7300 GT */
1246 case 0x0090: /* GeForce 7800 */ 1247 case 0x0090: /* GeForce 7800 */
1247 case 0x0210: /* GeForce 6800 */ 1248 case 0x0210: /* GeForce 6800 */
1248 case 0x0220: /* GeForce 6200 */ 1249 case 0x0220: /* GeForce 6200 */
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 756fafb41d78..d11735895a01 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -796,23 +796,6 @@ static int s3fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
796 return 0; 796 return 0;
797} 797}
798 798
799/* Get capabilities of accelerator based on the mode */
800
801static void s3fb_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
802 struct fb_var_screeninfo *var)
803{
804 if (var->bits_per_pixel == 0) {
805 /* can only support 256 8x16 bitmap */
806 caps->x = 1 << (8 - 1);
807 caps->y = 1 << (16 - 1);
808 caps->len = 256;
809 } else {
810 caps->x = ~(u32)0;
811 caps->y = ~(u32)0;
812 caps->len = ~(u32)0;
813 }
814}
815
816/* ------------------------------------------------------------------------- */ 799/* ------------------------------------------------------------------------- */
817 800
818/* Frame buffer operations */ 801/* Frame buffer operations */
@@ -829,7 +812,7 @@ static struct fb_ops s3fb_ops = {
829 .fb_fillrect = s3fb_fillrect, 812 .fb_fillrect = s3fb_fillrect,
830 .fb_copyarea = cfb_copyarea, 813 .fb_copyarea = cfb_copyarea,
831 .fb_imageblit = s3fb_imageblit, 814 .fb_imageblit = s3fb_imageblit,
832 .fb_get_caps = s3fb_get_caps, 815 .fb_get_caps = svga_get_caps,
833}; 816};
834 817
835/* ------------------------------------------------------------------------- */ 818/* ------------------------------------------------------------------------- */
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 842b5cd054c6..836a612af977 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -14,7 +14,7 @@
14 * of it. 14 * of it.
15 * 15 *
16 * First the roles of struct fb_info and struct display have changed. Struct 16 * First the roles of struct fb_info and struct display have changed. Struct
17 * display will go away. The way the the new framebuffer console code will 17 * display will go away. The way the new framebuffer console code will
18 * work is that it will act to translate data about the tty/console in 18 * work is that it will act to translate data about the tty/console in
19 * struct vc_data to data in a device independent way in struct fb_info. Then 19 * struct vc_data to data in a device independent way in struct fb_info. Then
20 * various functions in struct fb_ops will be called to store the device 20 * various functions in struct fb_ops will be called to store the device
diff --git a/drivers/video/svgalib.c b/drivers/video/svgalib.c
index 079cdc911e48..25df928d37d8 100644
--- a/drivers/video/svgalib.c
+++ b/drivers/video/svgalib.c
@@ -347,6 +347,23 @@ int svga_get_tilemax(struct fb_info *info)
347 return 256; 347 return 256;
348} 348}
349 349
350/* Get capabilities of accelerator based on the mode */
351
352void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
353 struct fb_var_screeninfo *var)
354{
355 if (var->bits_per_pixel == 0) {
356 /* can only support 256 8x16 bitmap */
357 caps->x = 1 << (8 - 1);
358 caps->y = 1 << (16 - 1);
359 caps->len = 256;
360 } else {
361 caps->x = (var->bits_per_pixel == 4) ? 1 << (8 - 1) : ~(u32)0;
362 caps->y = ~(u32)0;
363 caps->len = ~(u32)0;
364 }
365}
366EXPORT_SYMBOL(svga_get_caps);
350 367
351/* ------------------------------------------------------------------------- */ 368/* ------------------------------------------------------------------------- */
352 369
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
new file mode 100644
index 000000000000..5e9755e464a1
--- /dev/null
+++ b/drivers/video/vt8623fb.c
@@ -0,0 +1,927 @@
1/*
2 * linux/drivers/video/vt8623fb.c - fbdev driver for
3 * integrated graphic core in VIA VT8623 [CLE266] chipset
4 *
5 * Copyright (c) 2006-2007 Ondrej Zajicek <santiago@crfreenet.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file COPYING in the main directory of this archive for
9 * more details.
10 *
11 * Code is based on s3fb, some parts are from David Boucher's viafb
12 * (http://davesdomain.org.uk/viafb/)
13 */
14
15#include <linux/version.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/tty.h>
22#include <linux/slab.h>
23#include <linux/delay.h>
24#include <linux/fb.h>
25#include <linux/svga.h>
26#include <linux/init.h>
27#include <linux/pci.h>
28#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
29#include <video/vga.h>
30
31#ifdef CONFIG_MTRR
32#include <asm/mtrr.h>
33#endif
34
35struct vt8623fb_info {
36 char __iomem *mmio_base;
37 int mtrr_reg;
38 struct vgastate state;
39 struct mutex open_lock;
40 unsigned int ref_count;
41 u32 pseudo_palette[16];
42};
43
44
45
46/* ------------------------------------------------------------------------- */
47
48static const struct svga_fb_format vt8623fb_formats[] = {
49 { 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
50 FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP8, FB_VISUAL_PSEUDOCOLOR, 16, 16},
51 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
52 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 16, 16},
53 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1,
54 FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 16, 16},
55 { 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
56 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 8},
57/* {16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0,
58 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4}, */
59 {16, {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0}, 0,
60 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
61 {32, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
62 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 2},
63 SVGA_FORMAT_END
64};
65
66static const struct svga_pll vt8623_pll = {2, 127, 2, 7, 0, 3,
67 60000, 300000, 14318};
68
69/* CRT timing register sets */
70
71struct vga_regset vt8623_h_total_regs[] = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END};
72struct vga_regset vt8623_h_display_regs[] = {{0x01, 0, 7}, VGA_REGSET_END};
73struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END};
74struct vga_regset vt8623_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END};
75struct vga_regset vt8623_h_sync_start_regs[] = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END};
76struct vga_regset vt8623_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
77
78struct vga_regset vt8623_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END};
79struct vga_regset vt8623_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END};
80struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END};
81struct vga_regset vt8623_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
82struct vga_regset vt8623_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END};
83struct vga_regset vt8623_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
84
85struct vga_regset vt8623_offset_regs[] = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END};
86struct vga_regset vt8623_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END};
87struct vga_regset vt8623_fetch_count_regs[] = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END};
88struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END};
89
90struct svga_timing_regs vt8623_timing_regs = {
91 vt8623_h_total_regs, vt8623_h_display_regs, vt8623_h_blank_start_regs,
92 vt8623_h_blank_end_regs, vt8623_h_sync_start_regs, vt8623_h_sync_end_regs,
93 vt8623_v_total_regs, vt8623_v_display_regs, vt8623_v_blank_start_regs,
94 vt8623_v_blank_end_regs, vt8623_v_sync_start_regs, vt8623_v_sync_end_regs,
95};
96
97
98/* ------------------------------------------------------------------------- */
99
100
101/* Module parameters */
102
103static char *mode = "640x480-8@60";
104
105#ifdef CONFIG_MTRR
106static int mtrr = 1;
107#endif
108
109MODULE_AUTHOR("(c) 2006 Ondrej Zajicek <santiago@crfreenet.org>");
110MODULE_LICENSE("GPL");
111MODULE_DESCRIPTION("fbdev driver for integrated graphics core in VIA VT8623 [CLE266]");
112
113module_param(mode, charp, 0644);
114MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
115
116#ifdef CONFIG_MTRR
117module_param(mtrr, int, 0444);
118MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
119#endif
120
121
122/* ------------------------------------------------------------------------- */
123
124
125static struct fb_tile_ops vt8623fb_tile_ops = {
126 .fb_settile = svga_settile,
127 .fb_tilecopy = svga_tilecopy,
128 .fb_tilefill = svga_tilefill,
129 .fb_tileblit = svga_tileblit,
130 .fb_tilecursor = svga_tilecursor,
131 .fb_get_tilemax = svga_get_tilemax,
132};
133
134
135/* ------------------------------------------------------------------------- */
136
137
138/* image data is MSB-first, fb structure is MSB-first too */
139static inline u32 expand_color(u32 c)
140{
141 return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF;
142}
143
144/* vt8623fb_iplan_imageblit silently assumes that almost everything is 8-pixel aligned */
145static void vt8623fb_iplan_imageblit(struct fb_info *info, const struct fb_image *image)
146{
147 u32 fg = expand_color(image->fg_color);
148 u32 bg = expand_color(image->bg_color);
149 const u8 *src1, *src;
150 u8 __iomem *dst1;
151 u32 __iomem *dst;
152 u32 val;
153 int x, y;
154
155 src1 = image->data;
156 dst1 = info->screen_base + (image->dy * info->fix.line_length)
157 + ((image->dx / 8) * 4);
158
159 for (y = 0; y < image->height; y++) {
160 src = src1;
161 dst = (u32 __iomem *) dst1;
162 for (x = 0; x < image->width; x += 8) {
163 val = *(src++) * 0x01010101;
164 val = (val & fg) | (~val & bg);
165 fb_writel(val, dst++);
166 }
167 src1 += image->width / 8;
168 dst1 += info->fix.line_length;
169 }
170}
171
172/* vt8623fb_iplan_fillrect silently assumes that almost everything is 8-pixel aligned */
173static void vt8623fb_iplan_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
174{
175 u32 fg = expand_color(rect->color);
176 u8 __iomem *dst1;
177 u32 __iomem *dst;
178 int x, y;
179
180 dst1 = info->screen_base + (rect->dy * info->fix.line_length)
181 + ((rect->dx / 8) * 4);
182
183 for (y = 0; y < rect->height; y++) {
184 dst = (u32 __iomem *) dst1;
185 for (x = 0; x < rect->width; x += 8) {
186 fb_writel(fg, dst++);
187 }
188 dst1 += info->fix.line_length;
189 }
190}
191
192
193/* image data is MSB-first, fb structure is high-nibble-in-low-byte-first */
194static inline u32 expand_pixel(u32 c)
195{
196 return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) |
197 ((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF;
198}
199
200/* vt8623fb_cfb4_imageblit silently assumes that almost everything is 8-pixel aligned */
201static void vt8623fb_cfb4_imageblit(struct fb_info *info, const struct fb_image *image)
202{
203 u32 fg = image->fg_color * 0x11111111;
204 u32 bg = image->bg_color * 0x11111111;
205 const u8 *src1, *src;
206 u8 __iomem *dst1;
207 u32 __iomem *dst;
208 u32 val;
209 int x, y;
210
211 src1 = image->data;
212 dst1 = info->screen_base + (image->dy * info->fix.line_length)
213 + ((image->dx / 8) * 4);
214
215 for (y = 0; y < image->height; y++) {
216 src = src1;
217 dst = (u32 __iomem *) dst1;
218 for (x = 0; x < image->width; x += 8) {
219 val = expand_pixel(*(src++));
220 val = (val & fg) | (~val & bg);
221 fb_writel(val, dst++);
222 }
223 src1 += image->width / 8;
224 dst1 += info->fix.line_length;
225 }
226}
227
228static void vt8623fb_imageblit(struct fb_info *info, const struct fb_image *image)
229{
230 if ((info->var.bits_per_pixel == 4) && (image->depth == 1)
231 && ((image->width % 8) == 0) && ((image->dx % 8) == 0)) {
232 if (info->fix.type == FB_TYPE_INTERLEAVED_PLANES)
233 vt8623fb_iplan_imageblit(info, image);
234 else
235 vt8623fb_cfb4_imageblit(info, image);
236 } else
237 cfb_imageblit(info, image);
238}
239
240static void vt8623fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
241{
242 if ((info->var.bits_per_pixel == 4)
243 && ((rect->width % 8) == 0) && ((rect->dx % 8) == 0)
244 && (info->fix.type == FB_TYPE_INTERLEAVED_PLANES))
245 vt8623fb_iplan_fillrect(info, rect);
246 else
247 cfb_fillrect(info, rect);
248}
249
250
251/* ------------------------------------------------------------------------- */
252
253
254static void vt8623_set_pixclock(struct fb_info *info, u32 pixclock)
255{
256 u16 m, n, r;
257 u8 regval;
258 int rv;
259
260 rv = svga_compute_pll(&vt8623_pll, 1000000000 / pixclock, &m, &n, &r, info->node);
261 if (rv < 0) {
262 printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
263 return;
264 }
265
266 /* Set VGA misc register */
267 regval = vga_r(NULL, VGA_MIS_R);
268 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
269
270 /* Set clock registers */
271 vga_wseq(NULL, 0x46, (n | (r << 6)));
272 vga_wseq(NULL, 0x47, m);
273
274 udelay(1000);
275
276 /* PLL reset */
277 svga_wseq_mask(0x40, 0x02, 0x02);
278 svga_wseq_mask(0x40, 0x00, 0x02);
279}
280
281
282static int vt8623fb_open(struct fb_info *info, int user)
283{
284 struct vt8623fb_info *par = info->par;
285
286 mutex_lock(&(par->open_lock));
287 if (par->ref_count == 0) {
288 memset(&(par->state), 0, sizeof(struct vgastate));
289 par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
290 par->state.num_crtc = 0xA2;
291 par->state.num_seq = 0x50;
292 save_vga(&(par->state));
293 }
294
295 par->ref_count++;
296 mutex_unlock(&(par->open_lock));
297
298 return 0;
299}
300
301static int vt8623fb_release(struct fb_info *info, int user)
302{
303 struct vt8623fb_info *par = info->par;
304
305 mutex_lock(&(par->open_lock));
306 if (par->ref_count == 0) {
307 mutex_unlock(&(par->open_lock));
308 return -EINVAL;
309 }
310
311 if (par->ref_count == 1)
312 restore_vga(&(par->state));
313
314 par->ref_count--;
315 mutex_unlock(&(par->open_lock));
316
317 return 0;
318}
319
320static int vt8623fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
321{
322 int rv, mem, step;
323
324 /* Find appropriate format */
325 rv = svga_match_format (vt8623fb_formats, var, NULL);
326 if (rv < 0)
327 {
328 printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
329 return rv;
330 }
331
332 /* Do not allow to have real resoulution larger than virtual */
333 if (var->xres > var->xres_virtual)
334 var->xres_virtual = var->xres;
335
336 if (var->yres > var->yres_virtual)
337 var->yres_virtual = var->yres;
338
339 /* Round up xres_virtual to have proper alignment of lines */
340 step = vt8623fb_formats[rv].xresstep - 1;
341 var->xres_virtual = (var->xres_virtual+step) & ~step;
342
343 /* Check whether have enough memory */
344 mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
345 if (mem > info->screen_size)
346 {
347 printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
348 return -EINVAL;
349 }
350
351 /* Text mode is limited to 256 kB of memory */
352 if ((var->bits_per_pixel == 0) && (mem > (256*1024)))
353 {
354 printk(KERN_ERR "fb%d: text framebuffer size too large (%d kB requested, 256 kB possible)\n", info->node, mem >> 10);
355 return -EINVAL;
356 }
357
358 rv = svga_check_timings (&vt8623_timing_regs, var, info->node);
359 if (rv < 0)
360 {
361 printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
362 return rv;
363 }
364
365 /* Interlaced mode not supported */
366 if (var->vmode & FB_VMODE_INTERLACED)
367 return -EINVAL;
368
369 return 0;
370}
371
372
373static int vt8623fb_set_par(struct fb_info *info)
374{
375 u32 mode, offset_value, fetch_value, screen_size;
376 u32 bpp = info->var.bits_per_pixel;
377
378 if (bpp != 0) {
379 info->fix.ypanstep = 1;
380 info->fix.line_length = (info->var.xres_virtual * bpp) / 8;
381
382 info->flags &= ~FBINFO_MISC_TILEBLITTING;
383 info->tileops = NULL;
384
385 /* in 4bpp supports 8p wide tiles only, any tiles otherwise */
386 info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
387 info->pixmap.blit_y = ~(u32)0;
388
389 offset_value = (info->var.xres_virtual * bpp) / 64;
390 fetch_value = ((info->var.xres * bpp) / 128) + 4;
391
392 if (bpp == 4)
393 fetch_value = (info->var.xres / 8) + 8; /* + 0 is OK */
394
395 screen_size = info->var.yres_virtual * info->fix.line_length;
396 } else {
397 info->fix.ypanstep = 16;
398 info->fix.line_length = 0;
399
400 info->flags |= FBINFO_MISC_TILEBLITTING;
401 info->tileops = &vt8623fb_tile_ops;
402
403 /* supports 8x16 tiles only */
404 info->pixmap.blit_x = 1 << (8 - 1);
405 info->pixmap.blit_y = 1 << (16 - 1);
406
407 offset_value = info->var.xres_virtual / 16;
408 fetch_value = (info->var.xres / 8) + 8;
409 screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
410 }
411
412 info->var.xoffset = 0;
413 info->var.yoffset = 0;
414 info->var.activate = FB_ACTIVATE_NOW;
415
416 /* Unlock registers */
417 svga_wseq_mask(0x10, 0x01, 0x01);
418 svga_wcrt_mask(0x11, 0x00, 0x80);
419 svga_wcrt_mask(0x47, 0x00, 0x01);
420
421 /* Device, screen and sync off */
422 svga_wseq_mask(0x01, 0x20, 0x20);
423 svga_wcrt_mask(0x36, 0x30, 0x30);
424 svga_wcrt_mask(0x17, 0x00, 0x80);
425
426 /* Set default values */
427 svga_set_default_gfx_regs();
428 svga_set_default_atc_regs();
429 svga_set_default_seq_regs();
430 svga_set_default_crt_regs();
431 svga_wcrt_multi(vt8623_line_compare_regs, 0xFFFFFFFF);
432 svga_wcrt_multi(vt8623_start_address_regs, 0);
433
434 svga_wcrt_multi(vt8623_offset_regs, offset_value);
435 svga_wseq_multi(vt8623_fetch_count_regs, fetch_value);
436
437 if (info->var.vmode & FB_VMODE_DOUBLE)
438 svga_wcrt_mask(0x09, 0x80, 0x80);
439 else
440 svga_wcrt_mask(0x09, 0x00, 0x80);
441
442 svga_wseq_mask(0x1E, 0xF0, 0xF0); // DI/DVP bus
443 svga_wseq_mask(0x2A, 0x0F, 0x0F); // DI/DVP bus
444 svga_wseq_mask(0x16, 0x08, 0xBF); // FIFO read treshold
445 vga_wseq(NULL, 0x17, 0x1F); // FIFO depth
446 vga_wseq(NULL, 0x18, 0x4E);
447 svga_wseq_mask(0x1A, 0x08, 0x08); // enable MMIO ?
448
449 vga_wcrt(NULL, 0x32, 0x00);
450 vga_wcrt(NULL, 0x34, 0x00);
451 vga_wcrt(NULL, 0x6A, 0x80);
452 vga_wcrt(NULL, 0x6A, 0xC0);
453
454 vga_wgfx(NULL, 0x20, 0x00);
455 vga_wgfx(NULL, 0x21, 0x00);
456 vga_wgfx(NULL, 0x22, 0x00);
457
458 /* Set SR15 according to number of bits per pixel */
459 mode = svga_match_format(vt8623fb_formats, &(info->var), &(info->fix));
460 switch (mode) {
461 case 0:
462 pr_debug("fb%d: text mode\n", info->node);
463 svga_set_textmode_vga_regs();
464 svga_wseq_mask(0x15, 0x00, 0xFE);
465 svga_wcrt_mask(0x11, 0x60, 0x70);
466 break;
467 case 1:
468 pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
469 vga_wgfx(NULL, VGA_GFX_MODE, 0x40);
470 svga_wseq_mask(0x15, 0x20, 0xFE);
471 svga_wcrt_mask(0x11, 0x00, 0x70);
472 break;
473 case 2:
474 pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
475 svga_wseq_mask(0x15, 0x00, 0xFE);
476 svga_wcrt_mask(0x11, 0x00, 0x70);
477 break;
478 case 3:
479 pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
480 svga_wseq_mask(0x15, 0x22, 0xFE);
481 break;
482 case 4:
483 pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
484 svga_wseq_mask(0x15, 0xB6, 0xFE);
485 break;
486 case 5:
487 pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
488 svga_wseq_mask(0x15, 0xAE, 0xFE);
489 break;
490 default:
491 printk(KERN_ERR "vt8623fb: unsupported mode - bug\n");
492 return (-EINVAL);
493 }
494
495 vt8623_set_pixclock(info, info->var.pixclock);
496 svga_set_timings(&vt8623_timing_regs, &(info->var), 1, 1,
497 (info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1, 1,
498 1, info->node);
499
500 memset_io(info->screen_base, 0x00, screen_size);
501
502 /* Device and screen back on */
503 svga_wcrt_mask(0x17, 0x80, 0x80);
504 svga_wcrt_mask(0x36, 0x00, 0x30);
505 svga_wseq_mask(0x01, 0x00, 0x20);
506
507 return 0;
508}
509
510
511static int vt8623fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
512 u_int transp, struct fb_info *fb)
513{
514 switch (fb->var.bits_per_pixel) {
515 case 0:
516 case 4:
517 if (regno >= 16)
518 return -EINVAL;
519
520 outb(0x0F, VGA_PEL_MSK);
521 outb(regno, VGA_PEL_IW);
522 outb(red >> 10, VGA_PEL_D);
523 outb(green >> 10, VGA_PEL_D);
524 outb(blue >> 10, VGA_PEL_D);
525 break;
526 case 8:
527 if (regno >= 256)
528 return -EINVAL;
529
530 outb(0xFF, VGA_PEL_MSK);
531 outb(regno, VGA_PEL_IW);
532 outb(red >> 10, VGA_PEL_D);
533 outb(green >> 10, VGA_PEL_D);
534 outb(blue >> 10, VGA_PEL_D);
535 break;
536 case 16:
537 if (regno >= 16)
538 return 0;
539
540 if (fb->var.green.length == 5)
541 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xF800) >> 1) |
542 ((green & 0xF800) >> 6) | ((blue & 0xF800) >> 11);
543 else if (fb->var.green.length == 6)
544 ((u32*)fb->pseudo_palette)[regno] = (red & 0xF800) |
545 ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
546 else
547 return -EINVAL;
548 break;
549 case 24:
550 case 32:
551 if (regno >= 16)
552 return 0;
553
554 /* ((transp & 0xFF00) << 16) */
555 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xFF00) << 8) |
556 (green & 0xFF00) | ((blue & 0xFF00) >> 8);
557 break;
558 default:
559 return -EINVAL;
560 }
561
562 return 0;
563}
564
565
566static int vt8623fb_blank(int blank_mode, struct fb_info *info)
567{
568 switch (blank_mode) {
569 case FB_BLANK_UNBLANK:
570 pr_debug("fb%d: unblank\n", info->node);
571 svga_wcrt_mask(0x36, 0x00, 0x30);
572 svga_wseq_mask(0x01, 0x00, 0x20);
573 break;
574 case FB_BLANK_NORMAL:
575 pr_debug("fb%d: blank\n", info->node);
576 svga_wcrt_mask(0x36, 0x00, 0x30);
577 svga_wseq_mask(0x01, 0x20, 0x20);
578 break;
579 case FB_BLANK_HSYNC_SUSPEND:
580 pr_debug("fb%d: DPMS standby (hsync off)\n", info->node);
581 svga_wcrt_mask(0x36, 0x10, 0x30);
582 svga_wseq_mask(0x01, 0x20, 0x20);
583 break;
584 case FB_BLANK_VSYNC_SUSPEND:
585 pr_debug("fb%d: DPMS suspend (vsync off)\n", info->node);
586 svga_wcrt_mask(0x36, 0x20, 0x30);
587 svga_wseq_mask(0x01, 0x20, 0x20);
588 break;
589 case FB_BLANK_POWERDOWN:
590 pr_debug("fb%d: DPMS off (no sync)\n", info->node);
591 svga_wcrt_mask(0x36, 0x30, 0x30);
592 svga_wseq_mask(0x01, 0x20, 0x20);
593 break;
594 }
595
596 return 0;
597}
598
599
600static int vt8623fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
601{
602 unsigned int offset;
603
604 /* Calculate the offset */
605 if (var->bits_per_pixel == 0) {
606 offset = (var->yoffset / 16) * var->xres_virtual + var->xoffset;
607 offset = offset >> 3;
608 } else {
609 offset = (var->yoffset * info->fix.line_length) +
610 (var->xoffset * var->bits_per_pixel / 8);
611 offset = offset >> ((var->bits_per_pixel == 4) ? 2 : 1);
612 }
613
614 /* Set the offset */
615 svga_wcrt_multi(vt8623_start_address_regs, offset);
616
617 return 0;
618}
619
620
621/* ------------------------------------------------------------------------- */
622
623
624/* Frame buffer operations */
625
626static struct fb_ops vt8623fb_ops = {
627 .owner = THIS_MODULE,
628 .fb_open = vt8623fb_open,
629 .fb_release = vt8623fb_release,
630 .fb_check_var = vt8623fb_check_var,
631 .fb_set_par = vt8623fb_set_par,
632 .fb_setcolreg = vt8623fb_setcolreg,
633 .fb_blank = vt8623fb_blank,
634 .fb_pan_display = vt8623fb_pan_display,
635 .fb_fillrect = vt8623fb_fillrect,
636 .fb_copyarea = cfb_copyarea,
637 .fb_imageblit = vt8623fb_imageblit,
638 .fb_get_caps = svga_get_caps,
639};
640
641
642/* PCI probe */
643
644static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
645{
646 struct fb_info *info;
647 struct vt8623fb_info *par;
648 unsigned int memsize1, memsize2;
649 int rc;
650
651 /* Ignore secondary VGA device because there is no VGA arbitration */
652 if (! svga_primary_device(dev)) {
653 dev_info(&(dev->dev), "ignoring secondary device\n");
654 return -ENODEV;
655 }
656
657 /* Allocate and fill driver data structure */
658 info = framebuffer_alloc(sizeof(struct vt8623fb_info), NULL);
659 if (! info) {
660 dev_err(&(dev->dev), "cannot allocate memory\n");
661 return -ENOMEM;
662 }
663
664 par = info->par;
665 mutex_init(&par->open_lock);
666
667 info->flags = FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
668 info->fbops = &vt8623fb_ops;
669
670 /* Prepare PCI device */
671
672 rc = pci_enable_device(dev);
673 if (rc < 0) {
674 dev_err(&(dev->dev), "cannot enable PCI device\n");
675 goto err_enable_device;
676 }
677
678 rc = pci_request_regions(dev, "vt8623fb");
679 if (rc < 0) {
680 dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
681 goto err_request_regions;
682 }
683
684 info->fix.smem_start = pci_resource_start(dev, 0);
685 info->fix.smem_len = pci_resource_len(dev, 0);
686 info->fix.mmio_start = pci_resource_start(dev, 1);
687 info->fix.mmio_len = pci_resource_len(dev, 1);
688
689 /* Map physical IO memory address into kernel space */
690 info->screen_base = pci_iomap(dev, 0, 0);
691 if (! info->screen_base) {
692 rc = -ENOMEM;
693 dev_err(&(dev->dev), "iomap for framebuffer failed\n");
694 goto err_iomap_1;
695 }
696
697 par->mmio_base = pci_iomap(dev, 1, 0);
698 if (! par->mmio_base) {
699 rc = -ENOMEM;
700 dev_err(&(dev->dev), "iomap for MMIO failed\n");
701 goto err_iomap_2;
702 }
703
704 /* Find how many physical memory there is on card */
705 memsize1 = (vga_rseq(NULL, 0x34) + 1) >> 1;
706 memsize2 = vga_rseq(NULL, 0x39) << 2;
707
708 if ((16 <= memsize1) && (memsize1 <= 64) && (memsize1 == memsize2))
709 info->screen_size = memsize1 << 20;
710 else {
711 dev_err(&(dev->dev), "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2);
712 info->screen_size = 16 << 20;
713 }
714
715 info->fix.smem_len = info->screen_size;
716 strcpy(info->fix.id, "VIA VT8623");
717 info->fix.type = FB_TYPE_PACKED_PIXELS;
718 info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
719 info->fix.ypanstep = 0;
720 info->fix.accel = FB_ACCEL_NONE;
721 info->pseudo_palette = (void*)par->pseudo_palette;
722
723 /* Prepare startup mode */
724
725 rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
726 if (! ((rc == 1) || (rc == 2))) {
727 rc = -EINVAL;
728 dev_err(&(dev->dev), "mode %s not found\n", mode);
729 goto err_find_mode;
730 }
731
732 rc = fb_alloc_cmap(&info->cmap, 256, 0);
733 if (rc < 0) {
734 dev_err(&(dev->dev), "cannot allocate colormap\n");
735 goto err_alloc_cmap;
736 }
737
738 rc = register_framebuffer(info);
739 if (rc < 0) {
740 dev_err(&(dev->dev), "cannot register framebugger\n");
741 goto err_reg_fb;
742 }
743
744 printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
745 pci_name(dev), info->fix.smem_len >> 20);
746
747 /* Record a reference to the driver data */
748 pci_set_drvdata(dev, info);
749
750#ifdef CONFIG_MTRR
751 if (mtrr) {
752 par->mtrr_reg = -1;
753 par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
754 }
755#endif
756
757 return 0;
758
759 /* Error handling */
760err_reg_fb:
761 fb_dealloc_cmap(&info->cmap);
762err_alloc_cmap:
763err_find_mode:
764 pci_iounmap(dev, par->mmio_base);
765err_iomap_2:
766 pci_iounmap(dev, info->screen_base);
767err_iomap_1:
768 pci_release_regions(dev);
769err_request_regions:
770/* pci_disable_device(dev); */
771err_enable_device:
772 framebuffer_release(info);
773 return rc;
774}
775
776/* PCI remove */
777
778static void __devexit vt8623_pci_remove(struct pci_dev *dev)
779{
780 struct fb_info *info = pci_get_drvdata(dev);
781 struct vt8623fb_info *par = info->par;
782
783 if (info) {
784#ifdef CONFIG_MTRR
785 if (par->mtrr_reg >= 0) {
786 mtrr_del(par->mtrr_reg, 0, 0);
787 par->mtrr_reg = -1;
788 }
789#endif
790
791 unregister_framebuffer(info);
792 fb_dealloc_cmap(&info->cmap);
793
794 pci_iounmap(dev, info->screen_base);
795 pci_iounmap(dev, par->mmio_base);
796 pci_release_regions(dev);
797/* pci_disable_device(dev); */
798
799 pci_set_drvdata(dev, NULL);
800 framebuffer_release(info);
801 }
802}
803
804
805#ifdef CONFIG_PM
806/* PCI suspend */
807
808static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
809{
810 struct fb_info *info = pci_get_drvdata(dev);
811 struct vt8623fb_info *par = info->par;
812
813 dev_info(&(dev->dev), "suspend\n");
814
815 acquire_console_sem();
816 mutex_lock(&(par->open_lock));
817
818 if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
819 mutex_unlock(&(par->open_lock));
820 release_console_sem();
821 return 0;
822 }
823
824 fb_set_suspend(info, 1);
825
826 pci_save_state(dev);
827 pci_disable_device(dev);
828 pci_set_power_state(dev, pci_choose_state(dev, state));
829
830 mutex_unlock(&(par->open_lock));
831 release_console_sem();
832
833 return 0;
834}
835
836
837/* PCI resume */
838
839static int vt8623_pci_resume(struct pci_dev* dev)
840{
841 struct fb_info *info = pci_get_drvdata(dev);
842 struct vt8623fb_info *par = info->par;
843
844 dev_info(&(dev->dev), "resume\n");
845
846 acquire_console_sem();
847 mutex_lock(&(par->open_lock));
848
849 if (par->ref_count == 0) {
850 mutex_unlock(&(par->open_lock));
851 release_console_sem();
852 return 0;
853 }
854
855 pci_set_power_state(dev, PCI_D0);
856 pci_restore_state(dev);
857
858 if (pci_enable_device(dev))
859 goto fail;
860
861 pci_set_master(dev);
862
863 vt8623fb_set_par(info);
864 fb_set_suspend(info, 0);
865
866 mutex_unlock(&(par->open_lock));
867fail:
868 release_console_sem();
869
870 return 0;
871}
872#else
873#define vt8623_pci_suspend NULL
874#define vt8623_pci_resume NULL
875#endif /* CONFIG_PM */
876
877/* List of boards that we are trying to support */
878
879static struct pci_device_id vt8623_devices[] __devinitdata = {
880 {PCI_DEVICE(PCI_VENDOR_ID_VIA, 0x3122)},
881 {0, 0, 0, 0, 0, 0, 0}
882};
883
884MODULE_DEVICE_TABLE(pci, vt8623_devices);
885
886static struct pci_driver vt8623fb_pci_driver = {
887 .name = "vt8623fb",
888 .id_table = vt8623_devices,
889 .probe = vt8623_pci_probe,
890 .remove = __devexit_p(vt8623_pci_remove),
891 .suspend = vt8623_pci_suspend,
892 .resume = vt8623_pci_resume,
893};
894
895/* Cleanup */
896
897static void __exit vt8623fb_cleanup(void)
898{
899 pr_debug("vt8623fb: cleaning up\n");
900 pci_unregister_driver(&vt8623fb_pci_driver);
901}
902
903/* Driver Initialisation */
904
905int __init vt8623fb_init(void)
906{
907
908#ifndef MODULE
909 char *option = NULL;
910
911 if (fb_get_options("vt8623fb", &option))
912 return -ENODEV;
913
914 if (option && *option)
915 mode = option;
916#endif
917
918 pr_debug("vt8623fb: initializing\n");
919 return pci_register_driver(&vt8623fb_pci_driver);
920}
921
922/* ------------------------------------------------------------------------- */
923
924/* Modularization */
925
926module_init(vt8623fb_init);
927module_exit(vt8623fb_cleanup);
diff --git a/fs/Kconfig b/fs/Kconfig
index 4622dabb2253..0fa0c1193e81 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -724,10 +724,6 @@ config FAT_FS
724 file system and use GNU tar's M option. GNU tar is a program 724 file system and use GNU tar's M option. GNU tar is a program
725 available for Unix and DOS ("man tar" or "info tar"). 725 available for Unix and DOS ("man tar" or "info tar").
726 726
727 It is now also becoming possible to read and write compressed FAT
728 file systems; read <file:Documentation/filesystems/fat_cvf.txt> for
729 details.
730
731 The FAT support will enlarge your kernel by about 37 KB. If unsure, 727 The FAT support will enlarge your kernel by about 37 KB. If unsure,
732 say Y. 728 say Y.
733 729
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 4aa8079e71be..c8796906f584 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -628,11 +628,7 @@ static int affs_prepare_write_ofs(struct file *file, struct page *page, unsigned
628 return err; 628 return err;
629 } 629 }
630 if (to < PAGE_CACHE_SIZE) { 630 if (to < PAGE_CACHE_SIZE) {
631 char *kaddr = kmap_atomic(page, KM_USER0); 631 zero_user_page(page, to, PAGE_CACHE_SIZE - to, KM_USER0);
632
633 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
634 flush_dcache_page(page);
635 kunmap_atomic(kaddr, KM_USER0);
636 if (size > offset + to) { 632 if (size > offset + to) {
637 if (size < offset + PAGE_CACHE_SIZE) 633 if (size < offset + PAGE_CACHE_SIZE)
638 tmp = size & ~PAGE_CACHE_MASK; 634 tmp = size & ~PAGE_CACHE_MASK;
diff --git a/fs/afs/Makefile b/fs/afs/Makefile
index cf83e5d63512..73ce561f3ea0 100644
--- a/fs/afs/Makefile
+++ b/fs/afs/Makefile
@@ -22,6 +22,7 @@ kafs-objs := \
22 vlclient.o \ 22 vlclient.o \
23 vlocation.o \ 23 vlocation.o \
24 vnode.o \ 24 vnode.o \
25 volume.o 25 volume.o \
26 write.o
26 27
27obj-$(CONFIG_AFS_FS) := kafs.o 28obj-$(CONFIG_AFS_FS) := kafs.o
diff --git a/fs/afs/afs_fs.h b/fs/afs/afs_fs.h
index 89e0d1650a72..2198006d2d03 100644
--- a/fs/afs/afs_fs.h
+++ b/fs/afs/afs_fs.h
@@ -18,6 +18,8 @@
18enum AFS_FS_Operations { 18enum AFS_FS_Operations {
19 FSFETCHDATA = 130, /* AFS Fetch file data */ 19 FSFETCHDATA = 130, /* AFS Fetch file data */
20 FSFETCHSTATUS = 132, /* AFS Fetch file status */ 20 FSFETCHSTATUS = 132, /* AFS Fetch file status */
21 FSSTOREDATA = 133, /* AFS Store file data */
22 FSSTORESTATUS = 135, /* AFS Store file status */
21 FSREMOVEFILE = 136, /* AFS Remove a file */ 23 FSREMOVEFILE = 136, /* AFS Remove a file */
22 FSCREATEFILE = 137, /* AFS Create a file */ 24 FSCREATEFILE = 137, /* AFS Create a file */
23 FSRENAME = 138, /* AFS Rename or move a file or directory */ 25 FSRENAME = 138, /* AFS Rename or move a file or directory */
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 9bdbf36a9aa9..f64e40fefc02 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -44,7 +44,7 @@ void afs_init_callback_state(struct afs_server *server)
44 while (!RB_EMPTY_ROOT(&server->cb_promises)) { 44 while (!RB_EMPTY_ROOT(&server->cb_promises)) {
45 vnode = rb_entry(server->cb_promises.rb_node, 45 vnode = rb_entry(server->cb_promises.rb_node,
46 struct afs_vnode, cb_promise); 46 struct afs_vnode, cb_promise);
47 _debug("UNPROMISE { vid=%x vn=%u uq=%u}", 47 _debug("UNPROMISE { vid=%x:%u uq=%u}",
48 vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); 48 vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
49 rb_erase(&vnode->cb_promise, &server->cb_promises); 49 rb_erase(&vnode->cb_promise, &server->cb_promises);
50 vnode->cb_promised = false; 50 vnode->cb_promised = false;
@@ -84,11 +84,8 @@ void afs_broken_callback_work(struct work_struct *work)
84 84
85 /* if the vnode's data version number changed then its contents 85 /* if the vnode's data version number changed then its contents
86 * are different */ 86 * are different */
87 if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) { 87 if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
88 _debug("zap data {%x:%u}", 88 afs_zap_data(vnode);
89 vnode->fid.vid, vnode->fid.vnode);
90 invalidate_remote_inode(&vnode->vfs_inode);
91 }
92 } 89 }
93 90
94out: 91out:
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 0c1e902f17a3..2fb31276196b 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -55,7 +55,8 @@ const struct inode_operations afs_dir_inode_operations = {
55 .rmdir = afs_rmdir, 55 .rmdir = afs_rmdir,
56 .rename = afs_rename, 56 .rename = afs_rename,
57 .permission = afs_permission, 57 .permission = afs_permission,
58 .getattr = afs_inode_getattr, 58 .getattr = afs_getattr,
59 .setattr = afs_setattr,
59}; 60};
60 61
61static struct dentry_operations afs_fs_dentry_operations = { 62static struct dentry_operations afs_fs_dentry_operations = {
@@ -491,7 +492,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
491 492
492 vnode = AFS_FS_I(dir); 493 vnode = AFS_FS_I(dir);
493 494
494 _enter("{%x:%d},%p{%s},", 495 _enter("{%x:%u},%p{%s},",
495 vnode->fid.vid, vnode->fid.vnode, dentry, dentry->d_name.name); 496 vnode->fid.vid, vnode->fid.vnode, dentry, dentry->d_name.name);
496 497
497 ASSERTCMP(dentry->d_inode, ==, NULL); 498 ASSERTCMP(dentry->d_inode, ==, NULL);
@@ -731,7 +732,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
731 732
732 dvnode = AFS_FS_I(dir); 733 dvnode = AFS_FS_I(dir);
733 734
734 _enter("{%x:%d},{%s},%o", 735 _enter("{%x:%u},{%s},%o",
735 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode); 736 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode);
736 737
737 ret = -ENAMETOOLONG; 738 ret = -ENAMETOOLONG;
@@ -796,7 +797,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
796 797
797 dvnode = AFS_FS_I(dir); 798 dvnode = AFS_FS_I(dir);
798 799
799 _enter("{%x:%d},{%s}", 800 _enter("{%x:%u},{%s}",
800 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name); 801 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name);
801 802
802 ret = -ENAMETOOLONG; 803 ret = -ENAMETOOLONG;
@@ -842,7 +843,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
842 843
843 dvnode = AFS_FS_I(dir); 844 dvnode = AFS_FS_I(dir);
844 845
845 _enter("{%x:%d},{%s}", 846 _enter("{%x:%u},{%s}",
846 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name); 847 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name);
847 848
848 ret = -ENAMETOOLONG; 849 ret = -ENAMETOOLONG;
@@ -916,7 +917,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, int mode,
916 917
917 dvnode = AFS_FS_I(dir); 918 dvnode = AFS_FS_I(dir);
918 919
919 _enter("{%x:%d},{%s},%o,", 920 _enter("{%x:%u},{%s},%o,",
920 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode); 921 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode);
921 922
922 ret = -ENAMETOOLONG; 923 ret = -ENAMETOOLONG;
@@ -983,7 +984,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
983 vnode = AFS_FS_I(from->d_inode); 984 vnode = AFS_FS_I(from->d_inode);
984 dvnode = AFS_FS_I(dir); 985 dvnode = AFS_FS_I(dir);
985 986
986 _enter("{%x:%d},{%x:%d},{%s}", 987 _enter("{%x:%u},{%x:%u},{%s}",
987 vnode->fid.vid, vnode->fid.vnode, 988 vnode->fid.vid, vnode->fid.vnode,
988 dvnode->fid.vid, dvnode->fid.vnode, 989 dvnode->fid.vid, dvnode->fid.vnode,
989 dentry->d_name.name); 990 dentry->d_name.name);
@@ -1032,7 +1033,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
1032 1033
1033 dvnode = AFS_FS_I(dir); 1034 dvnode = AFS_FS_I(dir);
1034 1035
1035 _enter("{%x:%d},{%s},%s", 1036 _enter("{%x:%u},{%s},%s",
1036 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, 1037 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name,
1037 content); 1038 content);
1038 1039
@@ -1104,7 +1105,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1104 orig_dvnode = AFS_FS_I(old_dir); 1105 orig_dvnode = AFS_FS_I(old_dir);
1105 new_dvnode = AFS_FS_I(new_dir); 1106 new_dvnode = AFS_FS_I(new_dir);
1106 1107
1107 _enter("{%x:%d},{%x:%d},{%x:%d},{%s}", 1108 _enter("{%x:%u},{%x:%u},{%x:%u},{%s}",
1108 orig_dvnode->fid.vid, orig_dvnode->fid.vnode, 1109 orig_dvnode->fid.vid, orig_dvnode->fid.vnode,
1109 vnode->fid.vid, vnode->fid.vnode, 1110 vnode->fid.vid, vnode->fid.vnode,
1110 new_dvnode->fid.vid, new_dvnode->fid.vnode, 1111 new_dvnode->fid.vid, new_dvnode->fid.vnode,
diff --git a/fs/afs/file.c b/fs/afs/file.c
index ae256498f4f7..3e25795e5a42 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -15,32 +15,43 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/pagemap.h> 17#include <linux/pagemap.h>
18#include <linux/writeback.h>
18#include "internal.h" 19#include "internal.h"
19 20
20static int afs_file_readpage(struct file *file, struct page *page); 21static int afs_readpage(struct file *file, struct page *page);
21static void afs_file_invalidatepage(struct page *page, unsigned long offset); 22static void afs_invalidatepage(struct page *page, unsigned long offset);
22static int afs_file_releasepage(struct page *page, gfp_t gfp_flags); 23static int afs_releasepage(struct page *page, gfp_t gfp_flags);
24static int afs_launder_page(struct page *page);
23 25
24const struct file_operations afs_file_operations = { 26const struct file_operations afs_file_operations = {
25 .open = afs_open, 27 .open = afs_open,
26 .release = afs_release, 28 .release = afs_release,
27 .llseek = generic_file_llseek, 29 .llseek = generic_file_llseek,
28 .read = do_sync_read, 30 .read = do_sync_read,
31 .write = do_sync_write,
29 .aio_read = generic_file_aio_read, 32 .aio_read = generic_file_aio_read,
33 .aio_write = afs_file_write,
30 .mmap = generic_file_readonly_mmap, 34 .mmap = generic_file_readonly_mmap,
31 .sendfile = generic_file_sendfile, 35 .sendfile = generic_file_sendfile,
36 .fsync = afs_fsync,
32}; 37};
33 38
34const struct inode_operations afs_file_inode_operations = { 39const struct inode_operations afs_file_inode_operations = {
35 .getattr = afs_inode_getattr, 40 .getattr = afs_getattr,
41 .setattr = afs_setattr,
36 .permission = afs_permission, 42 .permission = afs_permission,
37}; 43};
38 44
39const struct address_space_operations afs_fs_aops = { 45const struct address_space_operations afs_fs_aops = {
40 .readpage = afs_file_readpage, 46 .readpage = afs_readpage,
41 .set_page_dirty = __set_page_dirty_nobuffers, 47 .set_page_dirty = afs_set_page_dirty,
42 .releasepage = afs_file_releasepage, 48 .launder_page = afs_launder_page,
43 .invalidatepage = afs_file_invalidatepage, 49 .releasepage = afs_releasepage,
50 .invalidatepage = afs_invalidatepage,
51 .prepare_write = afs_prepare_write,
52 .commit_write = afs_commit_write,
53 .writepage = afs_writepage,
54 .writepages = afs_writepages,
44}; 55};
45 56
46/* 57/*
@@ -52,7 +63,7 @@ int afs_open(struct inode *inode, struct file *file)
52 struct key *key; 63 struct key *key;
53 int ret; 64 int ret;
54 65
55 _enter("{%x:%x},", vnode->fid.vid, vnode->fid.vnode); 66 _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
56 67
57 key = afs_request_key(vnode->volume->cell); 68 key = afs_request_key(vnode->volume->cell);
58 if (IS_ERR(key)) { 69 if (IS_ERR(key)) {
@@ -78,7 +89,7 @@ int afs_release(struct inode *inode, struct file *file)
78{ 89{
79 struct afs_vnode *vnode = AFS_FS_I(inode); 90 struct afs_vnode *vnode = AFS_FS_I(inode);
80 91
81 _enter("{%x:%x},", vnode->fid.vid, vnode->fid.vnode); 92 _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
82 93
83 key_put(file->private_data); 94 key_put(file->private_data);
84 _leave(" = 0"); 95 _leave(" = 0");
@@ -89,10 +100,10 @@ int afs_release(struct inode *inode, struct file *file)
89 * deal with notification that a page was read from the cache 100 * deal with notification that a page was read from the cache
90 */ 101 */
91#ifdef AFS_CACHING_SUPPORT 102#ifdef AFS_CACHING_SUPPORT
92static void afs_file_readpage_read_complete(void *cookie_data, 103static void afs_readpage_read_complete(void *cookie_data,
93 struct page *page, 104 struct page *page,
94 void *data, 105 void *data,
95 int error) 106 int error)
96{ 107{
97 _enter("%p,%p,%p,%d", cookie_data, page, data, error); 108 _enter("%p,%p,%p,%d", cookie_data, page, data, error);
98 109
@@ -109,10 +120,10 @@ static void afs_file_readpage_read_complete(void *cookie_data,
109 * deal with notification that a page was written to the cache 120 * deal with notification that a page was written to the cache
110 */ 121 */
111#ifdef AFS_CACHING_SUPPORT 122#ifdef AFS_CACHING_SUPPORT
112static void afs_file_readpage_write_complete(void *cookie_data, 123static void afs_readpage_write_complete(void *cookie_data,
113 struct page *page, 124 struct page *page,
114 void *data, 125 void *data,
115 int error) 126 int error)
116{ 127{
117 _enter("%p,%p,%p,%d", cookie_data, page, data, error); 128 _enter("%p,%p,%p,%d", cookie_data, page, data, error);
118 129
@@ -121,9 +132,9 @@ static void afs_file_readpage_write_complete(void *cookie_data,
121#endif 132#endif
122 133
123/* 134/*
124 * AFS read page from file (or symlink) 135 * AFS read page from file, directory or symlink
125 */ 136 */
126static int afs_file_readpage(struct file *file, struct page *page) 137static int afs_readpage(struct file *file, struct page *page)
127{ 138{
128 struct afs_vnode *vnode; 139 struct afs_vnode *vnode;
129 struct inode *inode; 140 struct inode *inode;
@@ -219,39 +230,17 @@ error:
219} 230}
220 231
221/* 232/*
222 * get a page cookie for the specified page
223 */
224#ifdef AFS_CACHING_SUPPORT
225int afs_cache_get_page_cookie(struct page *page,
226 struct cachefs_page **_page_cookie)
227{
228 int ret;
229
230 _enter("");
231 ret = cachefs_page_get_private(page,_page_cookie, GFP_NOIO);
232
233 _leave(" = %d", ret);
234 return ret;
235}
236#endif
237
238/*
239 * invalidate part or all of a page 233 * invalidate part or all of a page
240 */ 234 */
241static void afs_file_invalidatepage(struct page *page, unsigned long offset) 235static void afs_invalidatepage(struct page *page, unsigned long offset)
242{ 236{
243 int ret = 1; 237 int ret = 1;
244 238
245 _enter("{%lu},%lu", page->index, offset); 239 kenter("{%lu},%lu", page->index, offset);
246 240
247 BUG_ON(!PageLocked(page)); 241 BUG_ON(!PageLocked(page));
248 242
249 if (PagePrivate(page)) { 243 if (PagePrivate(page)) {
250#ifdef AFS_CACHING_SUPPORT
251 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
252 cachefs_uncache_page(vnode->cache,page);
253#endif
254
255 /* We release buffers only if the entire page is being 244 /* We release buffers only if the entire page is being
256 * invalidated. 245 * invalidated.
257 * The get_block cached value has been unconditionally 246 * The get_block cached value has been unconditionally
@@ -272,25 +261,33 @@ static void afs_file_invalidatepage(struct page *page, unsigned long offset)
272} 261}
273 262
274/* 263/*
264 * write back a dirty page
265 */
266static int afs_launder_page(struct page *page)
267{
268 _enter("{%lu}", page->index);
269
270 return 0;
271}
272
273/*
275 * release a page and cleanup its private data 274 * release a page and cleanup its private data
276 */ 275 */
277static int afs_file_releasepage(struct page *page, gfp_t gfp_flags) 276static int afs_releasepage(struct page *page, gfp_t gfp_flags)
278{ 277{
279 struct cachefs_page *pageio; 278 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
279 struct afs_writeback *wb;
280 280
281 _enter("{%lu},%x", page->index, gfp_flags); 281 _enter("{{%x:%u}[%lu],%lx},%x",
282 vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
283 gfp_flags);
282 284
283 if (PagePrivate(page)) { 285 if (PagePrivate(page)) {
284#ifdef AFS_CACHING_SUPPORT 286 wb = (struct afs_writeback *) page_private(page);
285 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); 287 ASSERT(wb != NULL);
286 cachefs_uncache_page(vnode->cache, page);
287#endif
288
289 pageio = (struct cachefs_page *) page_private(page);
290 set_page_private(page, 0); 288 set_page_private(page, 0);
291 ClearPagePrivate(page); 289 ClearPagePrivate(page);
292 290 afs_put_writeback(wb);
293 kfree(pageio);
294 } 291 }
295 292
296 _leave(" = 0"); 293 _leave(" = 0");
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index e54e6c2ad343..025b1903d9e1 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -33,8 +33,10 @@ static void xdr_decode_AFSFid(const __be32 **_bp, struct afs_fid *fid)
33 */ 33 */
34static void xdr_decode_AFSFetchStatus(const __be32 **_bp, 34static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
35 struct afs_file_status *status, 35 struct afs_file_status *status,
36 struct afs_vnode *vnode) 36 struct afs_vnode *vnode,
37 afs_dataversion_t *store_version)
37{ 38{
39 afs_dataversion_t expected_version;
38 const __be32 *bp = *_bp; 40 const __be32 *bp = *_bp;
39 umode_t mode; 41 umode_t mode;
40 u64 data_version, size; 42 u64 data_version, size;
@@ -101,7 +103,11 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
101 vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime; 103 vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime;
102 } 104 }
103 105
104 if (status->data_version != data_version) { 106 expected_version = status->data_version;
107 if (store_version)
108 expected_version = *store_version;
109
110 if (expected_version != data_version) {
105 status->data_version = data_version; 111 status->data_version = data_version;
106 if (vnode && !test_bit(AFS_VNODE_UNSET, &vnode->flags)) { 112 if (vnode && !test_bit(AFS_VNODE_UNSET, &vnode->flags)) {
107 _debug("vnode modified %llx on {%x:%u}", 113 _debug("vnode modified %llx on {%x:%u}",
@@ -110,6 +116,8 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
110 set_bit(AFS_VNODE_MODIFIED, &vnode->flags); 116 set_bit(AFS_VNODE_MODIFIED, &vnode->flags);
111 set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags); 117 set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
112 } 118 }
119 } else if (store_version) {
120 status->data_version = data_version;
113 } 121 }
114} 122}
115 123
@@ -156,6 +164,44 @@ static void xdr_decode_AFSVolSync(const __be32 **_bp,
156} 164}
157 165
158/* 166/*
167 * encode the requested attributes into an AFSStoreStatus block
168 */
169static void xdr_encode_AFS_StoreStatus(__be32 **_bp, struct iattr *attr)
170{
171 __be32 *bp = *_bp;
172 u32 mask = 0, mtime = 0, owner = 0, group = 0, mode = 0;
173
174 mask = 0;
175 if (attr->ia_valid & ATTR_MTIME) {
176 mask |= AFS_SET_MTIME;
177 mtime = attr->ia_mtime.tv_sec;
178 }
179
180 if (attr->ia_valid & ATTR_UID) {
181 mask |= AFS_SET_OWNER;
182 owner = attr->ia_uid;
183 }
184
185 if (attr->ia_valid & ATTR_GID) {
186 mask |= AFS_SET_GROUP;
187 group = attr->ia_gid;
188 }
189
190 if (attr->ia_valid & ATTR_MODE) {
191 mask |= AFS_SET_MODE;
192 mode = attr->ia_mode & S_IALLUGO;
193 }
194
195 *bp++ = htonl(mask);
196 *bp++ = htonl(mtime);
197 *bp++ = htonl(owner);
198 *bp++ = htonl(group);
199 *bp++ = htonl(mode);
200 *bp++ = 0; /* segment size */
201 *_bp = bp;
202}
203
204/*
159 * deliver reply data to an FS.FetchStatus 205 * deliver reply data to an FS.FetchStatus
160 */ 206 */
161static int afs_deliver_fs_fetch_status(struct afs_call *call, 207static int afs_deliver_fs_fetch_status(struct afs_call *call,
@@ -175,7 +221,7 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call,
175 221
176 /* unmarshall the reply once we've received all of it */ 222 /* unmarshall the reply once we've received all of it */
177 bp = call->buffer; 223 bp = call->buffer;
178 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode); 224 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
179 xdr_decode_AFSCallBack(&bp, vnode); 225 xdr_decode_AFSCallBack(&bp, vnode);
180 if (call->reply2) 226 if (call->reply2)
181 xdr_decode_AFSVolSync(&bp, call->reply2); 227 xdr_decode_AFSVolSync(&bp, call->reply2);
@@ -206,7 +252,7 @@ int afs_fs_fetch_file_status(struct afs_server *server,
206 struct afs_call *call; 252 struct afs_call *call;
207 __be32 *bp; 253 __be32 *bp;
208 254
209 _enter(",%x,{%x:%d},,", 255 _enter(",%x,{%x:%u},,",
210 key_serial(key), vnode->fid.vid, vnode->fid.vnode); 256 key_serial(key), vnode->fid.vid, vnode->fid.vnode);
211 257
212 call = afs_alloc_flat_call(&afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4); 258 call = afs_alloc_flat_call(&afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4);
@@ -265,25 +311,20 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
265 call->offset = 0; 311 call->offset = 0;
266 call->unmarshall++; 312 call->unmarshall++;
267 313
268 if (call->count < PAGE_SIZE) {
269 page = call->reply3;
270 buffer = kmap_atomic(page, KM_USER0);
271 memset(buffer + PAGE_SIZE - call->count, 0,
272 call->count);
273 kunmap_atomic(buffer, KM_USER0);
274 }
275
276 /* extract the returned data */ 314 /* extract the returned data */
277 case 2: 315 case 2:
278 _debug("extract data"); 316 _debug("extract data");
279 page = call->reply3; 317 if (call->count > 0) {
280 buffer = kmap_atomic(page, KM_USER0); 318 page = call->reply3;
281 ret = afs_extract_data(call, skb, last, buffer, call->count); 319 buffer = kmap_atomic(page, KM_USER0);
282 kunmap_atomic(buffer, KM_USER0); 320 ret = afs_extract_data(call, skb, last, buffer,
283 switch (ret) { 321 call->count);
284 case 0: break; 322 kunmap_atomic(buffer, KM_USER0);
285 case -EAGAIN: return 0; 323 switch (ret) {
286 default: return ret; 324 case 0: break;
325 case -EAGAIN: return 0;
326 default: return ret;
327 }
287 } 328 }
288 329
289 call->offset = 0; 330 call->offset = 0;
@@ -300,7 +341,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
300 } 341 }
301 342
302 bp = call->buffer; 343 bp = call->buffer;
303 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode); 344 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
304 xdr_decode_AFSCallBack(&bp, vnode); 345 xdr_decode_AFSCallBack(&bp, vnode);
305 if (call->reply2) 346 if (call->reply2)
306 xdr_decode_AFSVolSync(&bp, call->reply2); 347 xdr_decode_AFSVolSync(&bp, call->reply2);
@@ -318,6 +359,14 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
318 if (!last) 359 if (!last)
319 return 0; 360 return 0;
320 361
362 if (call->count < PAGE_SIZE) {
363 _debug("clear");
364 page = call->reply3;
365 buffer = kmap_atomic(page, KM_USER0);
366 memset(buffer + call->count, 0, PAGE_SIZE - call->count);
367 kunmap_atomic(buffer, KM_USER0);
368 }
369
321 _leave(" = 0 [done]"); 370 _leave(" = 0 [done]");
322 return 0; 371 return 0;
323} 372}
@@ -476,8 +525,8 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call,
476 /* unmarshall the reply once we've received all of it */ 525 /* unmarshall the reply once we've received all of it */
477 bp = call->buffer; 526 bp = call->buffer;
478 xdr_decode_AFSFid(&bp, call->reply2); 527 xdr_decode_AFSFid(&bp, call->reply2);
479 xdr_decode_AFSFetchStatus(&bp, call->reply3, NULL); 528 xdr_decode_AFSFetchStatus(&bp, call->reply3, NULL, NULL);
480 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode); 529 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
481 xdr_decode_AFSCallBack_raw(&bp, call->reply4); 530 xdr_decode_AFSCallBack_raw(&bp, call->reply4);
482 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ 531 /* xdr_decode_AFSVolSync(&bp, call->replyX); */
483 532
@@ -574,7 +623,7 @@ static int afs_deliver_fs_remove(struct afs_call *call,
574 623
575 /* unmarshall the reply once we've received all of it */ 624 /* unmarshall the reply once we've received all of it */
576 bp = call->buffer; 625 bp = call->buffer;
577 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode); 626 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
578 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ 627 /* xdr_decode_AFSVolSync(&bp, call->replyX); */
579 628
580 _leave(" = 0 [done]"); 629 _leave(" = 0 [done]");
@@ -657,8 +706,8 @@ static int afs_deliver_fs_link(struct afs_call *call,
657 706
658 /* unmarshall the reply once we've received all of it */ 707 /* unmarshall the reply once we've received all of it */
659 bp = call->buffer; 708 bp = call->buffer;
660 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode); 709 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
661 xdr_decode_AFSFetchStatus(&bp, &dvnode->status, dvnode); 710 xdr_decode_AFSFetchStatus(&bp, &dvnode->status, dvnode, NULL);
662 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ 711 /* xdr_decode_AFSVolSync(&bp, call->replyX); */
663 712
664 _leave(" = 0 [done]"); 713 _leave(" = 0 [done]");
@@ -746,8 +795,8 @@ static int afs_deliver_fs_symlink(struct afs_call *call,
746 /* unmarshall the reply once we've received all of it */ 795 /* unmarshall the reply once we've received all of it */
747 bp = call->buffer; 796 bp = call->buffer;
748 xdr_decode_AFSFid(&bp, call->reply2); 797 xdr_decode_AFSFid(&bp, call->reply2);
749 xdr_decode_AFSFetchStatus(&bp, call->reply3, NULL); 798 xdr_decode_AFSFetchStatus(&bp, call->reply3, NULL, NULL);
750 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode); 799 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
751 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ 800 /* xdr_decode_AFSVolSync(&bp, call->replyX); */
752 801
753 _leave(" = 0 [done]"); 802 _leave(" = 0 [done]");
@@ -852,9 +901,10 @@ static int afs_deliver_fs_rename(struct afs_call *call,
852 901
853 /* unmarshall the reply once we've received all of it */ 902 /* unmarshall the reply once we've received all of it */
854 bp = call->buffer; 903 bp = call->buffer;
855 xdr_decode_AFSFetchStatus(&bp, &orig_dvnode->status, orig_dvnode); 904 xdr_decode_AFSFetchStatus(&bp, &orig_dvnode->status, orig_dvnode, NULL);
856 if (new_dvnode != orig_dvnode) 905 if (new_dvnode != orig_dvnode)
857 xdr_decode_AFSFetchStatus(&bp, &new_dvnode->status, new_dvnode); 906 xdr_decode_AFSFetchStatus(&bp, &new_dvnode->status, new_dvnode,
907 NULL);
858 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ 908 /* xdr_decode_AFSVolSync(&bp, call->replyX); */
859 909
860 _leave(" = 0 [done]"); 910 _leave(" = 0 [done]");
@@ -936,3 +986,262 @@ int afs_fs_rename(struct afs_server *server,
936 986
937 return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); 987 return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
938} 988}
989
990/*
991 * deliver reply data to an FS.StoreData
992 */
993static int afs_deliver_fs_store_data(struct afs_call *call,
994 struct sk_buff *skb, bool last)
995{
996 struct afs_vnode *vnode = call->reply;
997 const __be32 *bp;
998
999 _enter(",,%u", last);
1000
1001 afs_transfer_reply(call, skb);
1002 if (!last) {
1003 _leave(" = 0 [more]");
1004 return 0;
1005 }
1006
1007 if (call->reply_size != call->reply_max) {
1008 _leave(" = -EBADMSG [%u != %u]",
1009 call->reply_size, call->reply_max);
1010 return -EBADMSG;
1011 }
1012
1013 /* unmarshall the reply once we've received all of it */
1014 bp = call->buffer;
1015 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode,
1016 &call->store_version);
1017 /* xdr_decode_AFSVolSync(&bp, call->replyX); */
1018
1019 afs_pages_written_back(vnode, call);
1020
1021 _leave(" = 0 [done]");
1022 return 0;
1023}
1024
1025/*
1026 * FS.StoreData operation type
1027 */
1028static const struct afs_call_type afs_RXFSStoreData = {
1029 .name = "FS.StoreData",
1030 .deliver = afs_deliver_fs_store_data,
1031 .abort_to_error = afs_abort_to_error,
1032 .destructor = afs_flat_call_destructor,
1033};
1034
1035/*
1036 * store a set of pages
1037 */
1038int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
1039 pgoff_t first, pgoff_t last,
1040 unsigned offset, unsigned to,
1041 const struct afs_wait_mode *wait_mode)
1042{
1043 struct afs_vnode *vnode = wb->vnode;
1044 struct afs_call *call;
1045 loff_t size, pos, i_size;
1046 __be32 *bp;
1047
1048 _enter(",%x,{%x:%u},,",
1049 key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode);
1050
1051 size = to - offset;
1052 if (first != last)
1053 size += (loff_t)(last - first) << PAGE_SHIFT;
1054 pos = (loff_t)first << PAGE_SHIFT;
1055 pos += offset;
1056
1057 i_size = i_size_read(&vnode->vfs_inode);
1058 if (pos + size > i_size)
1059 i_size = size + pos;
1060
1061 _debug("size %llx, at %llx, i_size %llx",
1062 (unsigned long long) size, (unsigned long long) pos,
1063 (unsigned long long) i_size);
1064
1065 BUG_ON(i_size > 0xffffffff); // TODO: use 64-bit store
1066
1067 call = afs_alloc_flat_call(&afs_RXFSStoreData,
1068 (4 + 6 + 3) * 4,
1069 (21 + 6) * 4);
1070 if (!call)
1071 return -ENOMEM;
1072
1073 call->wb = wb;
1074 call->key = wb->key;
1075 call->reply = vnode;
1076 call->service_id = FS_SERVICE;
1077 call->port = htons(AFS_FS_PORT);
1078 call->mapping = vnode->vfs_inode.i_mapping;
1079 call->first = first;
1080 call->last = last;
1081 call->first_offset = offset;
1082 call->last_to = to;
1083 call->send_pages = true;
1084 call->store_version = vnode->status.data_version + 1;
1085
1086 /* marshall the parameters */
1087 bp = call->request;
1088 *bp++ = htonl(FSSTOREDATA);
1089 *bp++ = htonl(vnode->fid.vid);
1090 *bp++ = htonl(vnode->fid.vnode);
1091 *bp++ = htonl(vnode->fid.unique);
1092
1093 *bp++ = 0; /* mask */
1094 *bp++ = 0; /* mtime */
1095 *bp++ = 0; /* owner */
1096 *bp++ = 0; /* group */
1097 *bp++ = 0; /* unix mode */
1098 *bp++ = 0; /* segment size */
1099
1100 *bp++ = htonl(pos);
1101 *bp++ = htonl(size);
1102 *bp++ = htonl(i_size);
1103
1104 return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
1105}
1106
1107/*
1108 * deliver reply data to an FS.StoreStatus
1109 */
1110static int afs_deliver_fs_store_status(struct afs_call *call,
1111 struct sk_buff *skb, bool last)
1112{
1113 afs_dataversion_t *store_version;
1114 struct afs_vnode *vnode = call->reply;
1115 const __be32 *bp;
1116
1117 _enter(",,%u", last);
1118
1119 afs_transfer_reply(call, skb);
1120 if (!last) {
1121 _leave(" = 0 [more]");
1122 return 0;
1123 }
1124
1125 if (call->reply_size != call->reply_max) {
1126 _leave(" = -EBADMSG [%u != %u]",
1127 call->reply_size, call->reply_max);
1128 return -EBADMSG;
1129 }
1130
1131 /* unmarshall the reply once we've received all of it */
1132 store_version = NULL;
1133 if (call->operation_ID == FSSTOREDATA)
1134 store_version = &call->store_version;
1135
1136 bp = call->buffer;
1137 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, store_version);
1138 /* xdr_decode_AFSVolSync(&bp, call->replyX); */
1139
1140 _leave(" = 0 [done]");
1141 return 0;
1142}
1143
1144/*
1145 * FS.StoreStatus operation type
1146 */
1147static const struct afs_call_type afs_RXFSStoreStatus = {
1148 .name = "FS.StoreStatus",
1149 .deliver = afs_deliver_fs_store_status,
1150 .abort_to_error = afs_abort_to_error,
1151 .destructor = afs_flat_call_destructor,
1152};
1153
1154static const struct afs_call_type afs_RXFSStoreData_as_Status = {
1155 .name = "FS.StoreData",
1156 .deliver = afs_deliver_fs_store_status,
1157 .abort_to_error = afs_abort_to_error,
1158 .destructor = afs_flat_call_destructor,
1159};
1160
1161/*
1162 * set the attributes on a file, using FS.StoreData rather than FS.StoreStatus
1163 * so as to alter the file size also
1164 */
1165static int afs_fs_setattr_size(struct afs_server *server, struct key *key,
1166 struct afs_vnode *vnode, struct iattr *attr,
1167 const struct afs_wait_mode *wait_mode)
1168{
1169 struct afs_call *call;
1170 __be32 *bp;
1171
1172 _enter(",%x,{%x:%u},,",
1173 key_serial(key), vnode->fid.vid, vnode->fid.vnode);
1174
1175 ASSERT(attr->ia_valid & ATTR_SIZE);
1176 ASSERTCMP(attr->ia_size, <=, 0xffffffff); // TODO: use 64-bit store
1177
1178 call = afs_alloc_flat_call(&afs_RXFSStoreData_as_Status,
1179 (4 + 6 + 3) * 4,
1180 (21 + 6) * 4);
1181 if (!call)
1182 return -ENOMEM;
1183
1184 call->key = key;
1185 call->reply = vnode;
1186 call->service_id = FS_SERVICE;
1187 call->port = htons(AFS_FS_PORT);
1188 call->store_version = vnode->status.data_version + 1;
1189 call->operation_ID = FSSTOREDATA;
1190
1191 /* marshall the parameters */
1192 bp = call->request;
1193 *bp++ = htonl(FSSTOREDATA);
1194 *bp++ = htonl(vnode->fid.vid);
1195 *bp++ = htonl(vnode->fid.vnode);
1196 *bp++ = htonl(vnode->fid.unique);
1197
1198 xdr_encode_AFS_StoreStatus(&bp, attr);
1199
1200 *bp++ = 0; /* position of start of write */
1201 *bp++ = 0; /* size of write */
1202 *bp++ = htonl(attr->ia_size); /* new file length */
1203
1204 return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
1205}
1206
1207/*
1208 * set the attributes on a file, using FS.StoreData if there's a change in file
1209 * size, and FS.StoreStatus otherwise
1210 */
1211int afs_fs_setattr(struct afs_server *server, struct key *key,
1212 struct afs_vnode *vnode, struct iattr *attr,
1213 const struct afs_wait_mode *wait_mode)
1214{
1215 struct afs_call *call;
1216 __be32 *bp;
1217
1218 if (attr->ia_valid & ATTR_SIZE)
1219 return afs_fs_setattr_size(server, key, vnode, attr,
1220 wait_mode);
1221
1222 _enter(",%x,{%x:%u},,",
1223 key_serial(key), vnode->fid.vid, vnode->fid.vnode);
1224
1225 call = afs_alloc_flat_call(&afs_RXFSStoreStatus,
1226 (4 + 6) * 4,
1227 (21 + 6) * 4);
1228 if (!call)
1229 return -ENOMEM;
1230
1231 call->key = key;
1232 call->reply = vnode;
1233 call->service_id = FS_SERVICE;
1234 call->port = htons(AFS_FS_PORT);
1235 call->operation_ID = FSSTORESTATUS;
1236
1237 /* marshall the parameters */
1238 bp = call->request;
1239 *bp++ = htonl(FSSTORESTATUS);
1240 *bp++ = htonl(vnode->fid.vid);
1241 *bp++ = htonl(vnode->fid.vnode);
1242 *bp++ = htonl(vnode->fid.unique);
1243
1244 xdr_encode_AFS_StoreStatus(&bp, attr);
1245
1246 return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
1247}
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index c184a4ee5995..515a5d12d8fb 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -125,7 +125,7 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
125 struct inode *inode; 125 struct inode *inode;
126 int ret; 126 int ret;
127 127
128 _enter(",{%u,%u,%u},,", fid->vid, fid->vnode, fid->unique); 128 _enter(",{%x:%u.%u},,", fid->vid, fid->vnode, fid->unique);
129 129
130 as = sb->s_fs_info; 130 as = sb->s_fs_info;
131 data.volume = as->volume; 131 data.volume = as->volume;
@@ -204,6 +204,19 @@ bad_inode:
204} 204}
205 205
206/* 206/*
207 * mark the data attached to an inode as obsolete due to a write on the server
208 * - might also want to ditch all the outstanding writes and dirty pages
209 */
210void afs_zap_data(struct afs_vnode *vnode)
211{
212 _enter("zap data {%x:%u}", vnode->fid.vid, vnode->fid.vnode);
213
214 /* nuke all the non-dirty pages that aren't locked, mapped or being
215 * written back */
216 invalidate_remote_inode(&vnode->vfs_inode);
217}
218
219/*
207 * validate a vnode/inode 220 * validate a vnode/inode
208 * - there are several things we need to check 221 * - there are several things we need to check
209 * - parent dir data changes (rm, rmdir, rename, mkdir, create, link, 222 * - parent dir data changes (rm, rmdir, rename, mkdir, create, link,
@@ -258,10 +271,8 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
258 271
259 /* if the vnode's data version number changed then its contents are 272 /* if the vnode's data version number changed then its contents are
260 * different */ 273 * different */
261 if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) { 274 if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
262 _debug("zap data {%x:%d}", vnode->fid.vid, vnode->fid.vnode); 275 afs_zap_data(vnode);
263 invalidate_remote_inode(&vnode->vfs_inode);
264 }
265 276
266 clear_bit(AFS_VNODE_MODIFIED, &vnode->flags); 277 clear_bit(AFS_VNODE_MODIFIED, &vnode->flags);
267 mutex_unlock(&vnode->validate_lock); 278 mutex_unlock(&vnode->validate_lock);
@@ -278,7 +289,7 @@ error_unlock:
278/* 289/*
279 * read the attributes of an inode 290 * read the attributes of an inode
280 */ 291 */
281int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, 292int afs_getattr(struct vfsmount *mnt, struct dentry *dentry,
282 struct kstat *stat) 293 struct kstat *stat)
283{ 294{
284 struct inode *inode; 295 struct inode *inode;
@@ -301,7 +312,7 @@ void afs_clear_inode(struct inode *inode)
301 312
302 vnode = AFS_FS_I(inode); 313 vnode = AFS_FS_I(inode);
303 314
304 _enter("{%x:%d.%d} v=%u x=%u t=%u }", 315 _enter("{%x:%u.%d} v=%u x=%u t=%u }",
305 vnode->fid.vid, 316 vnode->fid.vid,
306 vnode->fid.vnode, 317 vnode->fid.vnode,
307 vnode->fid.unique, 318 vnode->fid.unique,
@@ -323,6 +334,7 @@ void afs_clear_inode(struct inode *inode)
323 vnode->server = NULL; 334 vnode->server = NULL;
324 } 335 }
325 336
337 ASSERT(list_empty(&vnode->writebacks));
326 ASSERT(!vnode->cb_promised); 338 ASSERT(!vnode->cb_promised);
327 339
328#ifdef AFS_CACHING_SUPPORT 340#ifdef AFS_CACHING_SUPPORT
@@ -339,3 +351,47 @@ void afs_clear_inode(struct inode *inode)
339 351
340 _leave(""); 352 _leave("");
341} 353}
354
355/*
356 * set the attributes of an inode
357 */
358int afs_setattr(struct dentry *dentry, struct iattr *attr)
359{
360 struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
361 struct key *key;
362 int ret;
363
364 _enter("{%x:%u},{n=%s},%x",
365 vnode->fid.vid, vnode->fid.vnode, dentry->d_name.name,
366 attr->ia_valid);
367
368 if (!(attr->ia_valid & (ATTR_SIZE | ATTR_MODE | ATTR_UID | ATTR_GID |
369 ATTR_MTIME))) {
370 _leave(" = 0 [unsupported]");
371 return 0;
372 }
373
374 /* flush any dirty data outstanding on a regular file */
375 if (S_ISREG(vnode->vfs_inode.i_mode)) {
376 filemap_write_and_wait(vnode->vfs_inode.i_mapping);
377 afs_writeback_all(vnode);
378 }
379
380 if (attr->ia_valid & ATTR_FILE) {
381 key = attr->ia_file->private_data;
382 } else {
383 key = afs_request_key(vnode->volume->cell);
384 if (IS_ERR(key)) {
385 ret = PTR_ERR(key);
386 goto error;
387 }
388 }
389
390 ret = afs_vnode_setattr(vnode, key, attr);
391 if (!(attr->ia_valid & ATTR_FILE))
392 key_put(key);
393
394error:
395 _leave(" = %d", ret);
396 return ret;
397}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index d90c158cd934..a30d4fa768e3 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -21,6 +21,7 @@
21 21
22#define AFS_CELL_MAX_ADDRS 15 22#define AFS_CELL_MAX_ADDRS 15
23 23
24struct pagevec;
24struct afs_call; 25struct afs_call;
25 26
26typedef enum { 27typedef enum {
@@ -75,12 +76,15 @@ struct afs_call {
75 struct key *key; /* security for this call */ 76 struct key *key; /* security for this call */
76 struct afs_server *server; /* server affected by incoming CM call */ 77 struct afs_server *server; /* server affected by incoming CM call */
77 void *request; /* request data (first part) */ 78 void *request; /* request data (first part) */
78 void *request2; /* request data (second part) */ 79 struct address_space *mapping; /* page set */
80 struct afs_writeback *wb; /* writeback being performed */
79 void *buffer; /* reply receive buffer */ 81 void *buffer; /* reply receive buffer */
80 void *reply; /* reply buffer (first part) */ 82 void *reply; /* reply buffer (first part) */
81 void *reply2; /* reply buffer (second part) */ 83 void *reply2; /* reply buffer (second part) */
82 void *reply3; /* reply buffer (third part) */ 84 void *reply3; /* reply buffer (third part) */
83 void *reply4; /* reply buffer (fourth part) */ 85 void *reply4; /* reply buffer (fourth part) */
86 pgoff_t first; /* first page in mapping to deal with */
87 pgoff_t last; /* last page in mapping to deal with */
84 enum { /* call state */ 88 enum { /* call state */
85 AFS_CALL_REQUESTING, /* request is being sent for outgoing call */ 89 AFS_CALL_REQUESTING, /* request is being sent for outgoing call */
86 AFS_CALL_AWAIT_REPLY, /* awaiting reply to outgoing call */ 90 AFS_CALL_AWAIT_REPLY, /* awaiting reply to outgoing call */
@@ -97,14 +101,18 @@ struct afs_call {
97 unsigned request_size; /* size of request data */ 101 unsigned request_size; /* size of request data */
98 unsigned reply_max; /* maximum size of reply */ 102 unsigned reply_max; /* maximum size of reply */
99 unsigned reply_size; /* current size of reply */ 103 unsigned reply_size; /* current size of reply */
104 unsigned first_offset; /* offset into mapping[first] */
105 unsigned last_to; /* amount of mapping[last] */
100 unsigned short offset; /* offset into received data store */ 106 unsigned short offset; /* offset into received data store */
101 unsigned char unmarshall; /* unmarshalling phase */ 107 unsigned char unmarshall; /* unmarshalling phase */
102 bool incoming; /* T if incoming call */ 108 bool incoming; /* T if incoming call */
109 bool send_pages; /* T if data from mapping should be sent */
103 u16 service_id; /* RxRPC service ID to call */ 110 u16 service_id; /* RxRPC service ID to call */
104 __be16 port; /* target UDP port */ 111 __be16 port; /* target UDP port */
105 __be32 operation_ID; /* operation ID for an incoming call */ 112 __be32 operation_ID; /* operation ID for an incoming call */
106 u32 count; /* count for use in unmarshalling */ 113 u32 count; /* count for use in unmarshalling */
107 __be32 tmp; /* place to extract temporary data */ 114 __be32 tmp; /* place to extract temporary data */
115 afs_dataversion_t store_version; /* updated version expected from store */
108}; 116};
109 117
110struct afs_call_type { 118struct afs_call_type {
@@ -124,6 +132,32 @@ struct afs_call_type {
124}; 132};
125 133
126/* 134/*
135 * record of an outstanding writeback on a vnode
136 */
137struct afs_writeback {
138 struct list_head link; /* link in vnode->writebacks */
139 struct work_struct writer; /* work item to perform the writeback */
140 struct afs_vnode *vnode; /* vnode to which this write applies */
141 struct key *key; /* owner of this write */
142 wait_queue_head_t waitq; /* completion and ready wait queue */
143 pgoff_t first; /* first page in batch */
144 pgoff_t point; /* last page in current store op */
145 pgoff_t last; /* last page in batch (inclusive) */
146 unsigned offset_first; /* offset into first page of start of write */
147 unsigned to_last; /* offset into last page of end of write */
148 int num_conflicts; /* count of conflicting writes in list */
149 int usage;
150 bool conflicts; /* T if has dependent conflicts */
151 enum {
152 AFS_WBACK_SYNCING, /* synchronisation being performed */
153 AFS_WBACK_PENDING, /* write pending */
154 AFS_WBACK_CONFLICTING, /* conflicting writes posted */
155 AFS_WBACK_WRITING, /* writing back */
156 AFS_WBACK_COMPLETE /* the writeback record has been unlinked */
157 } state __attribute__((packed));
158};
159
160/*
127 * AFS superblock private data 161 * AFS superblock private data
128 * - there's one superblock per volume 162 * - there's one superblock per volume
129 */ 163 */
@@ -305,6 +339,7 @@ struct afs_vnode {
305 wait_queue_head_t update_waitq; /* status fetch waitqueue */ 339 wait_queue_head_t update_waitq; /* status fetch waitqueue */
306 int update_cnt; /* number of outstanding ops that will update the 340 int update_cnt; /* number of outstanding ops that will update the
307 * status */ 341 * status */
342 spinlock_t writeback_lock; /* lock for writebacks */
308 spinlock_t lock; /* waitqueue/flags lock */ 343 spinlock_t lock; /* waitqueue/flags lock */
309 unsigned long flags; 344 unsigned long flags;
310#define AFS_VNODE_CB_BROKEN 0 /* set if vnode's callback was broken */ 345#define AFS_VNODE_CB_BROKEN 0 /* set if vnode's callback was broken */
@@ -316,6 +351,8 @@ struct afs_vnode {
316 351
317 long acl_order; /* ACL check count (callback break count) */ 352 long acl_order; /* ACL check count (callback break count) */
318 353
354 struct list_head writebacks; /* alterations in pagecache that need writing */
355
319 /* outstanding callback notification on this file */ 356 /* outstanding callback notification on this file */
320 struct rb_node server_rb; /* link in server->fs_vnodes */ 357 struct rb_node server_rb; /* link in server->fs_vnodes */
321 struct rb_node cb_promise; /* link in server->cb_promises */ 358 struct rb_node cb_promise; /* link in server->cb_promises */
@@ -433,10 +470,6 @@ extern const struct file_operations afs_file_operations;
433extern int afs_open(struct inode *, struct file *); 470extern int afs_open(struct inode *, struct file *);
434extern int afs_release(struct inode *, struct file *); 471extern int afs_release(struct inode *, struct file *);
435 472
436#ifdef AFS_CACHING_SUPPORT
437extern int afs_cache_get_page_cookie(struct page *, struct cachefs_page **);
438#endif
439
440/* 473/*
441 * fsclient.c 474 * fsclient.c
442 */ 475 */
@@ -467,6 +500,12 @@ extern int afs_fs_rename(struct afs_server *, struct key *,
467 struct afs_vnode *, const char *, 500 struct afs_vnode *, const char *,
468 struct afs_vnode *, const char *, 501 struct afs_vnode *, const char *,
469 const struct afs_wait_mode *); 502 const struct afs_wait_mode *);
503extern int afs_fs_store_data(struct afs_server *, struct afs_writeback *,
504 pgoff_t, pgoff_t, unsigned, unsigned,
505 const struct afs_wait_mode *);
506extern int afs_fs_setattr(struct afs_server *, struct key *,
507 struct afs_vnode *, struct iattr *,
508 const struct afs_wait_mode *);
470 509
471/* 510/*
472 * inode.c 511 * inode.c
@@ -474,10 +513,10 @@ extern int afs_fs_rename(struct afs_server *, struct key *,
474extern struct inode *afs_iget(struct super_block *, struct key *, 513extern struct inode *afs_iget(struct super_block *, struct key *,
475 struct afs_fid *, struct afs_file_status *, 514 struct afs_fid *, struct afs_file_status *,
476 struct afs_callback *); 515 struct afs_callback *);
516extern void afs_zap_data(struct afs_vnode *);
477extern int afs_validate(struct afs_vnode *, struct key *); 517extern int afs_validate(struct afs_vnode *, struct key *);
478extern int afs_inode_getattr(struct vfsmount *, struct dentry *, 518extern int afs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
479 struct kstat *); 519extern int afs_setattr(struct dentry *, struct iattr *);
480extern void afs_zap_permits(struct rcu_head *);
481extern void afs_clear_inode(struct inode *); 520extern void afs_clear_inode(struct inode *);
482 521
483/* 522/*
@@ -533,6 +572,7 @@ extern int afs_extract_data(struct afs_call *, struct sk_buff *, bool, void *,
533 */ 572 */
534extern void afs_clear_permits(struct afs_vnode *); 573extern void afs_clear_permits(struct afs_vnode *);
535extern void afs_cache_permit(struct afs_vnode *, struct key *, long); 574extern void afs_cache_permit(struct afs_vnode *, struct key *, long);
575extern void afs_zap_permits(struct rcu_head *);
536extern struct key *afs_request_key(struct afs_cell *); 576extern struct key *afs_request_key(struct afs_cell *);
537extern int afs_permission(struct inode *, int, struct nameidata *); 577extern int afs_permission(struct inode *, int, struct nameidata *);
538 578
@@ -629,6 +669,9 @@ extern int afs_vnode_symlink(struct afs_vnode *, struct key *, const char *,
629 struct afs_file_status *, struct afs_server **); 669 struct afs_file_status *, struct afs_server **);
630extern int afs_vnode_rename(struct afs_vnode *, struct afs_vnode *, 670extern int afs_vnode_rename(struct afs_vnode *, struct afs_vnode *,
631 struct key *, const char *, const char *); 671 struct key *, const char *, const char *);
672extern int afs_vnode_store_data(struct afs_writeback *, pgoff_t, pgoff_t,
673 unsigned, unsigned);
674extern int afs_vnode_setattr(struct afs_vnode *, struct key *, struct iattr *);
632 675
633/* 676/*
634 * volume.c 677 * volume.c
@@ -645,6 +688,23 @@ extern struct afs_server *afs_volume_pick_fileserver(struct afs_vnode *);
645extern int afs_volume_release_fileserver(struct afs_vnode *, 688extern int afs_volume_release_fileserver(struct afs_vnode *,
646 struct afs_server *, int); 689 struct afs_server *, int);
647 690
691/*
692 * write.c
693 */
694extern int afs_set_page_dirty(struct page *);
695extern void afs_put_writeback(struct afs_writeback *);
696extern int afs_prepare_write(struct file *, struct page *, unsigned, unsigned);
697extern int afs_commit_write(struct file *, struct page *, unsigned, unsigned);
698extern int afs_writepage(struct page *, struct writeback_control *);
699extern int afs_writepages(struct address_space *, struct writeback_control *);
700extern int afs_write_inode(struct inode *, int);
701extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
702extern ssize_t afs_file_write(struct kiocb *, const struct iovec *,
703 unsigned long, loff_t);
704extern int afs_writeback_all(struct afs_vnode *);
705extern int afs_fsync(struct file *, struct dentry *, int);
706
707
648/*****************************************************************************/ 708/*****************************************************************************/
649/* 709/*
650 * debug tracing 710 * debug tracing
@@ -726,6 +786,21 @@ do { \
726 } \ 786 } \
727} while(0) 787} while(0)
728 788
789#define ASSERTRANGE(L, OP1, N, OP2, H) \
790do { \
791 if (unlikely(!((L) OP1 (N)) || !((N) OP2 (H)))) { \
792 printk(KERN_ERR "\n"); \
793 printk(KERN_ERR "AFS: Assertion failed\n"); \
794 printk(KERN_ERR "%lu "#OP1" %lu "#OP2" %lu is false\n", \
795 (unsigned long)(L), (unsigned long)(N), \
796 (unsigned long)(H)); \
797 printk(KERN_ERR "0x%lx "#OP1" 0x%lx "#OP2" 0x%lx is false\n", \
798 (unsigned long)(L), (unsigned long)(N), \
799 (unsigned long)(H)); \
800 BUG(); \
801 } \
802} while(0)
803
729#define ASSERTIF(C, X) \ 804#define ASSERTIF(C, X) \
730do { \ 805do { \
731 if (unlikely((C) && !(X))) { \ 806 if (unlikely((C) && !(X))) { \
@@ -758,6 +833,10 @@ do { \
758do { \ 833do { \
759} while(0) 834} while(0)
760 835
836#define ASSERTRANGE(L, OP1, N, OP2, H) \
837do { \
838} while(0)
839
761#define ASSERTIF(C, X) \ 840#define ASSERTIF(C, X) \
762do { \ 841do { \
763} while(0) 842} while(0)
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 80ec6fd19a73..f1f71ff7d5c6 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -149,6 +149,7 @@ error_cache:
149 afs_vlocation_purge(); 149 afs_vlocation_purge();
150 afs_cell_purge(); 150 afs_cell_purge();
151 afs_proc_cleanup(); 151 afs_proc_cleanup();
152 rcu_barrier();
152 printk(KERN_ERR "kAFS: failed to register: %d\n", ret); 153 printk(KERN_ERR "kAFS: failed to register: %d\n", ret);
153 return ret; 154 return ret;
154} 155}
@@ -176,6 +177,7 @@ static void __exit afs_exit(void)
176 cachefs_unregister_netfs(&afs_cache_netfs); 177 cachefs_unregister_netfs(&afs_cache_netfs);
177#endif 178#endif
178 afs_proc_cleanup(); 179 afs_proc_cleanup();
180 rcu_barrier();
179} 181}
180 182
181module_exit(afs_exit); 183module_exit(afs_exit);
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index cdb9792d8161..d1a889c40742 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -22,6 +22,7 @@ int afs_abort_to_error(u32 abort_code)
22{ 22{
23 switch (abort_code) { 23 switch (abort_code) {
24 case 13: return -EACCES; 24 case 13: return -EACCES;
25 case 27: return -EFBIG;
25 case 30: return -EROFS; 26 case 30: return -EROFS;
26 case VSALVAGE: return -EIO; 27 case VSALVAGE: return -EIO;
27 case VNOVNODE: return -ENOENT; 28 case VNOVNODE: return -ENOENT;
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 034fcfd4e330..a3684dcc76e7 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -36,7 +36,7 @@ const struct inode_operations afs_mntpt_inode_operations = {
36 .lookup = afs_mntpt_lookup, 36 .lookup = afs_mntpt_lookup,
37 .follow_link = afs_mntpt_follow_link, 37 .follow_link = afs_mntpt_follow_link,
38 .readlink = page_readlink, 38 .readlink = page_readlink,
39 .getattr = afs_inode_getattr, 39 .getattr = afs_getattr,
40}; 40};
41 41
42static LIST_HEAD(afs_vfsmounts); 42static LIST_HEAD(afs_vfsmounts);
@@ -58,7 +58,8 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
58 char *buf; 58 char *buf;
59 int ret; 59 int ret;
60 60
61 _enter("{%u,%u}", vnode->fid.vnode, vnode->fid.unique); 61 _enter("{%x:%u,%u}",
62 vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
62 63
63 /* read the contents of the symlink into the pagecache */ 64 /* read the contents of the symlink into the pagecache */
64 page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, &file); 65 page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, &file);
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 222c1a3abbb8..04189c47d6a0 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -237,6 +237,70 @@ void afs_flat_call_destructor(struct afs_call *call)
237} 237}
238 238
239/* 239/*
240 * attach the data from a bunch of pages on an inode to a call
241 */
242int afs_send_pages(struct afs_call *call, struct msghdr *msg, struct kvec *iov)
243{
244 struct page *pages[8];
245 unsigned count, n, loop, offset, to;
246 pgoff_t first = call->first, last = call->last;
247 int ret;
248
249 _enter("");
250
251 offset = call->first_offset;
252 call->first_offset = 0;
253
254 do {
255 _debug("attach %lx-%lx", first, last);
256
257 count = last - first + 1;
258 if (count > ARRAY_SIZE(pages))
259 count = ARRAY_SIZE(pages);
260 n = find_get_pages_contig(call->mapping, first, count, pages);
261 ASSERTCMP(n, ==, count);
262
263 loop = 0;
264 do {
265 msg->msg_flags = 0;
266 to = PAGE_SIZE;
267 if (first + loop >= last)
268 to = call->last_to;
269 else
270 msg->msg_flags = MSG_MORE;
271 iov->iov_base = kmap(pages[loop]) + offset;
272 iov->iov_len = to - offset;
273 offset = 0;
274
275 _debug("- range %u-%u%s",
276 offset, to, msg->msg_flags ? " [more]" : "");
277 msg->msg_iov = (struct iovec *) iov;
278 msg->msg_iovlen = 1;
279
280 /* have to change the state *before* sending the last
281 * packet as RxRPC might give us the reply before it
282 * returns from sending the request */
283 if (first + loop >= last)
284 call->state = AFS_CALL_AWAIT_REPLY;
285 ret = rxrpc_kernel_send_data(call->rxcall, msg,
286 to - offset);
287 kunmap(pages[loop]);
288 if (ret < 0)
289 break;
290 } while (++loop < count);
291 first += count;
292
293 for (loop = 0; loop < count; loop++)
294 put_page(pages[loop]);
295 if (ret < 0)
296 break;
297 } while (first < last);
298
299 _leave(" = %d", ret);
300 return ret;
301}
302
303/*
240 * initiate a call 304 * initiate a call
241 */ 305 */
242int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, 306int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
@@ -253,8 +317,9 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
253 ASSERT(call->type != NULL); 317 ASSERT(call->type != NULL);
254 ASSERT(call->type->name != NULL); 318 ASSERT(call->type->name != NULL);
255 319
256 _debug("MAKE %p{%s} [%d]", 320 _debug("____MAKE %p{%s,%x} [%d]____",
257 call, call->type->name, atomic_read(&afs_outstanding_calls)); 321 call, call->type->name, key_serial(call->key),
322 atomic_read(&afs_outstanding_calls));
258 323
259 call->wait_mode = wait_mode; 324 call->wait_mode = wait_mode;
260 INIT_WORK(&call->async_work, afs_process_async_call); 325 INIT_WORK(&call->async_work, afs_process_async_call);
@@ -289,16 +354,23 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
289 msg.msg_iovlen = 1; 354 msg.msg_iovlen = 1;
290 msg.msg_control = NULL; 355 msg.msg_control = NULL;
291 msg.msg_controllen = 0; 356 msg.msg_controllen = 0;
292 msg.msg_flags = 0; 357 msg.msg_flags = (call->send_pages ? MSG_MORE : 0);
293 358
294 /* have to change the state *before* sending the last packet as RxRPC 359 /* have to change the state *before* sending the last packet as RxRPC
295 * might give us the reply before it returns from sending the 360 * might give us the reply before it returns from sending the
296 * request */ 361 * request */
297 call->state = AFS_CALL_AWAIT_REPLY; 362 if (!call->send_pages)
363 call->state = AFS_CALL_AWAIT_REPLY;
298 ret = rxrpc_kernel_send_data(rxcall, &msg, call->request_size); 364 ret = rxrpc_kernel_send_data(rxcall, &msg, call->request_size);
299 if (ret < 0) 365 if (ret < 0)
300 goto error_do_abort; 366 goto error_do_abort;
301 367
368 if (call->send_pages) {
369 ret = afs_send_pages(call, &msg, iov);
370 if (ret < 0)
371 goto error_do_abort;
372 }
373
302 /* at this point, an async call may no longer exist as it may have 374 /* at this point, an async call may no longer exist as it may have
303 * already completed */ 375 * already completed */
304 return wait_mode->wait(call); 376 return wait_mode->wait(call);
diff --git a/fs/afs/security.c b/fs/afs/security.c
index f9f424d80458..e0ea88b63ebf 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -109,7 +109,7 @@ void afs_clear_permits(struct afs_vnode *vnode)
109{ 109{
110 struct afs_permits *permits; 110 struct afs_permits *permits;
111 111
112 _enter("{%x}", vnode->fid.vnode); 112 _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
113 113
114 mutex_lock(&vnode->permits_lock); 114 mutex_lock(&vnode->permits_lock);
115 permits = vnode->permits; 115 permits = vnode->permits;
@@ -132,7 +132,8 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key, long acl_order)
132 struct afs_vnode *auth_vnode; 132 struct afs_vnode *auth_vnode;
133 int count, loop; 133 int count, loop;
134 134
135 _enter("{%x},%x,%lx", vnode->fid.vnode, key_serial(key), acl_order); 135 _enter("{%x:%u},%x,%lx",
136 vnode->fid.vid, vnode->fid.vnode, key_serial(key), acl_order);
136 137
137 auth_vnode = afs_get_auth_inode(vnode, key); 138 auth_vnode = afs_get_auth_inode(vnode, key);
138 if (IS_ERR(auth_vnode)) { 139 if (IS_ERR(auth_vnode)) {
@@ -220,7 +221,8 @@ static int afs_check_permit(struct afs_vnode *vnode, struct key *key,
220 bool valid; 221 bool valid;
221 int loop, ret; 222 int loop, ret;
222 223
223 _enter(""); 224 _enter("{%x:%u},%x",
225 vnode->fid.vid, vnode->fid.vnode, key_serial(key));
224 226
225 auth_vnode = afs_get_auth_inode(vnode, key); 227 auth_vnode = afs_get_auth_inode(vnode, key);
226 if (IS_ERR(auth_vnode)) { 228 if (IS_ERR(auth_vnode)) {
@@ -268,9 +270,9 @@ static int afs_check_permit(struct afs_vnode *vnode, struct key *key,
268 _leave(" = %d", ret); 270 _leave(" = %d", ret);
269 return ret; 271 return ret;
270 } 272 }
273 *_access = vnode->status.caller_access;
271 } 274 }
272 275
273 *_access = vnode->status.caller_access;
274 iput(&auth_vnode->vfs_inode); 276 iput(&auth_vnode->vfs_inode);
275 _leave(" = 0 [access %x]", *_access); 277 _leave(" = 0 [access %x]", *_access);
276 return 0; 278 return 0;
@@ -288,7 +290,7 @@ int afs_permission(struct inode *inode, int mask, struct nameidata *nd)
288 struct key *key; 290 struct key *key;
289 int ret; 291 int ret;
290 292
291 _enter("{{%x:%x},%lx},%x,", 293 _enter("{{%x:%u},%lx},%x,",
292 vnode->fid.vid, vnode->fid.vnode, vnode->flags, mask); 294 vnode->fid.vid, vnode->fid.vnode, vnode->flags, mask);
293 295
294 key = afs_request_key(vnode->volume->cell); 296 key = afs_request_key(vnode->volume->cell);
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 96bb23b476a2..231ae4150279 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -252,6 +252,9 @@ static void afs_destroy_server(struct afs_server *server)
252{ 252{
253 _enter("%p", server); 253 _enter("%p", server);
254 254
255 ASSERTIF(server->cb_break_head != server->cb_break_tail,
256 delayed_work_pending(&server->cb_break_work));
257
255 ASSERTCMP(server->fs_vnodes.rb_node, ==, NULL); 258 ASSERTCMP(server->fs_vnodes.rb_node, ==, NULL);
256 ASSERTCMP(server->cb_promises.rb_node, ==, NULL); 259 ASSERTCMP(server->cb_promises.rb_node, ==, NULL);
257 ASSERTCMP(server->cb_break_head, ==, server->cb_break_tail); 260 ASSERTCMP(server->cb_break_head, ==, server->cb_break_tail);
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 7030d76155fc..d24be334b608 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -50,6 +50,7 @@ static const struct super_operations afs_super_ops = {
50 .statfs = simple_statfs, 50 .statfs = simple_statfs,
51 .alloc_inode = afs_alloc_inode, 51 .alloc_inode = afs_alloc_inode,
52 .drop_inode = generic_delete_inode, 52 .drop_inode = generic_delete_inode,
53 .write_inode = afs_write_inode,
53 .destroy_inode = afs_destroy_inode, 54 .destroy_inode = afs_destroy_inode,
54 .clear_inode = afs_clear_inode, 55 .clear_inode = afs_clear_inode,
55 .umount_begin = afs_umount_begin, 56 .umount_begin = afs_umount_begin,
@@ -66,7 +67,7 @@ enum {
66 afs_opt_vol, 67 afs_opt_vol,
67}; 68};
68 69
69static const match_table_t afs_options_list = { 70static match_table_t afs_options_list = {
70 { afs_opt_cell, "cell=%s" }, 71 { afs_opt_cell, "cell=%s" },
71 { afs_opt_rwpath, "rwpath" }, 72 { afs_opt_rwpath, "rwpath" },
72 { afs_opt_vol, "vol=%s" }, 73 { afs_opt_vol, "vol=%s" },
@@ -459,7 +460,9 @@ static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep,
459 init_waitqueue_head(&vnode->update_waitq); 460 init_waitqueue_head(&vnode->update_waitq);
460 mutex_init(&vnode->permits_lock); 461 mutex_init(&vnode->permits_lock);
461 mutex_init(&vnode->validate_lock); 462 mutex_init(&vnode->validate_lock);
463 spin_lock_init(&vnode->writeback_lock);
462 spin_lock_init(&vnode->lock); 464 spin_lock_init(&vnode->lock);
465 INIT_LIST_HEAD(&vnode->writebacks);
463 INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work); 466 INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work);
464 } 467 }
465} 468}
diff --git a/fs/afs/vnode.c b/fs/afs/vnode.c
index a1904ab8426a..ec814660209f 100644
--- a/fs/afs/vnode.c
+++ b/fs/afs/vnode.c
@@ -261,7 +261,7 @@ int afs_vnode_fetch_status(struct afs_vnode *vnode,
261 261
262 DECLARE_WAITQUEUE(myself, current); 262 DECLARE_WAITQUEUE(myself, current);
263 263
264 _enter("%s,{%u,%u,%u}", 264 _enter("%s,{%x:%u.%u}",
265 vnode->volume->vlocation->vldb.name, 265 vnode->volume->vlocation->vldb.name,
266 vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); 266 vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
267 267
@@ -389,7 +389,7 @@ int afs_vnode_fetch_data(struct afs_vnode *vnode, struct key *key,
389 struct afs_server *server; 389 struct afs_server *server;
390 int ret; 390 int ret;
391 391
392 _enter("%s{%u,%u,%u},%x,,,", 392 _enter("%s{%x:%u.%u},%x,,,",
393 vnode->volume->vlocation->vldb.name, 393 vnode->volume->vlocation->vldb.name,
394 vnode->fid.vid, 394 vnode->fid.vid,
395 vnode->fid.vnode, 395 vnode->fid.vnode,
@@ -446,7 +446,7 @@ int afs_vnode_create(struct afs_vnode *vnode, struct key *key,
446 struct afs_server *server; 446 struct afs_server *server;
447 int ret; 447 int ret;
448 448
449 _enter("%s{%u,%u,%u},%x,%s,,", 449 _enter("%s{%x:%u.%u},%x,%s,,",
450 vnode->volume->vlocation->vldb.name, 450 vnode->volume->vlocation->vldb.name,
451 vnode->fid.vid, 451 vnode->fid.vid,
452 vnode->fid.vnode, 452 vnode->fid.vnode,
@@ -502,7 +502,7 @@ int afs_vnode_remove(struct afs_vnode *vnode, struct key *key, const char *name,
502 struct afs_server *server; 502 struct afs_server *server;
503 int ret; 503 int ret;
504 504
505 _enter("%s{%u,%u,%u},%x,%s", 505 _enter("%s{%x:%u.%u},%x,%s",
506 vnode->volume->vlocation->vldb.name, 506 vnode->volume->vlocation->vldb.name,
507 vnode->fid.vid, 507 vnode->fid.vid,
508 vnode->fid.vnode, 508 vnode->fid.vnode,
@@ -557,7 +557,7 @@ extern int afs_vnode_link(struct afs_vnode *dvnode, struct afs_vnode *vnode,
557 struct afs_server *server; 557 struct afs_server *server;
558 int ret; 558 int ret;
559 559
560 _enter("%s{%u,%u,%u},%s{%u,%u,%u},%x,%s", 560 _enter("%s{%x:%u.%u},%s{%x:%u.%u},%x,%s",
561 dvnode->volume->vlocation->vldb.name, 561 dvnode->volume->vlocation->vldb.name,
562 dvnode->fid.vid, 562 dvnode->fid.vid,
563 dvnode->fid.vnode, 563 dvnode->fid.vnode,
@@ -628,7 +628,7 @@ int afs_vnode_symlink(struct afs_vnode *vnode, struct key *key,
628 struct afs_server *server; 628 struct afs_server *server;
629 int ret; 629 int ret;
630 630
631 _enter("%s{%u,%u,%u},%x,%s,%s,,,", 631 _enter("%s{%x:%u.%u},%x,%s,%s,,,",
632 vnode->volume->vlocation->vldb.name, 632 vnode->volume->vlocation->vldb.name,
633 vnode->fid.vid, 633 vnode->fid.vid,
634 vnode->fid.vnode, 634 vnode->fid.vnode,
@@ -687,7 +687,7 @@ int afs_vnode_rename(struct afs_vnode *orig_dvnode,
687 struct afs_server *server; 687 struct afs_server *server;
688 int ret; 688 int ret;
689 689
690 _enter("%s{%u,%u,%u},%s{%u,%u,%u},%x,%s,%s", 690 _enter("%s{%x:%u.%u},%s{%u,%u,%u},%x,%s,%s",
691 orig_dvnode->volume->vlocation->vldb.name, 691 orig_dvnode->volume->vlocation->vldb.name,
692 orig_dvnode->fid.vid, 692 orig_dvnode->fid.vid,
693 orig_dvnode->fid.vnode, 693 orig_dvnode->fid.vnode,
@@ -753,3 +753,110 @@ no_server:
753 _leave(" = %ld [cnt %d]", PTR_ERR(server), orig_dvnode->update_cnt); 753 _leave(" = %ld [cnt %d]", PTR_ERR(server), orig_dvnode->update_cnt);
754 return PTR_ERR(server); 754 return PTR_ERR(server);
755} 755}
756
757/*
758 * write to a file
759 */
760int afs_vnode_store_data(struct afs_writeback *wb, pgoff_t first, pgoff_t last,
761 unsigned offset, unsigned to)
762{
763 struct afs_server *server;
764 struct afs_vnode *vnode = wb->vnode;
765 int ret;
766
767 _enter("%s{%x:%u.%u},%x,%lx,%lx,%x,%x",
768 vnode->volume->vlocation->vldb.name,
769 vnode->fid.vid,
770 vnode->fid.vnode,
771 vnode->fid.unique,
772 key_serial(wb->key),
773 first, last, offset, to);
774
775 /* this op will fetch the status */
776 spin_lock(&vnode->lock);
777 vnode->update_cnt++;
778 spin_unlock(&vnode->lock);
779
780 do {
781 /* pick a server to query */
782 server = afs_volume_pick_fileserver(vnode);
783 if (IS_ERR(server))
784 goto no_server;
785
786 _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
787
788 ret = afs_fs_store_data(server, wb, first, last, offset, to,
789 &afs_sync_call);
790
791 } while (!afs_volume_release_fileserver(vnode, server, ret));
792
793 /* adjust the flags */
794 if (ret == 0) {
795 afs_vnode_finalise_status_update(vnode, server);
796 afs_put_server(server);
797 } else {
798 afs_vnode_status_update_failed(vnode, ret);
799 }
800
801 _leave(" = %d", ret);
802 return ret;
803
804no_server:
805 spin_lock(&vnode->lock);
806 vnode->update_cnt--;
807 ASSERTCMP(vnode->update_cnt, >=, 0);
808 spin_unlock(&vnode->lock);
809 return PTR_ERR(server);
810}
811
812/*
813 * set the attributes on a file
814 */
815int afs_vnode_setattr(struct afs_vnode *vnode, struct key *key,
816 struct iattr *attr)
817{
818 struct afs_server *server;
819 int ret;
820
821 _enter("%s{%x:%u.%u},%x",
822 vnode->volume->vlocation->vldb.name,
823 vnode->fid.vid,
824 vnode->fid.vnode,
825 vnode->fid.unique,
826 key_serial(key));
827
828 /* this op will fetch the status */
829 spin_lock(&vnode->lock);
830 vnode->update_cnt++;
831 spin_unlock(&vnode->lock);
832
833 do {
834 /* pick a server to query */
835 server = afs_volume_pick_fileserver(vnode);
836 if (IS_ERR(server))
837 goto no_server;
838
839 _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
840
841 ret = afs_fs_setattr(server, key, vnode, attr, &afs_sync_call);
842
843 } while (!afs_volume_release_fileserver(vnode, server, ret));
844
845 /* adjust the flags */
846 if (ret == 0) {
847 afs_vnode_finalise_status_update(vnode, server);
848 afs_put_server(server);
849 } else {
850 afs_vnode_status_update_failed(vnode, ret);
851 }
852
853 _leave(" = %d", ret);
854 return ret;
855
856no_server:
857 spin_lock(&vnode->lock);
858 vnode->update_cnt--;
859 ASSERTCMP(vnode->update_cnt, >=, 0);
860 spin_unlock(&vnode->lock);
861 return PTR_ERR(server);
862}
diff --git a/fs/afs/write.c b/fs/afs/write.c
new file mode 100644
index 000000000000..83ff29262816
--- /dev/null
+++ b/fs/afs/write.c
@@ -0,0 +1,835 @@
1/* handling of writes to regular files and writing back to the server
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/pagemap.h>
15#include <linux/writeback.h>
16#include <linux/pagevec.h>
17#include "internal.h"
18
19static int afs_write_back_from_locked_page(struct afs_writeback *wb,
20 struct page *page);
21
22/*
23 * mark a page as having been made dirty and thus needing writeback
24 */
25int afs_set_page_dirty(struct page *page)
26{
27 _enter("");
28 return __set_page_dirty_nobuffers(page);
29}
30
31/*
32 * unlink a writeback record because its usage has reached zero
33 * - must be called with the wb->vnode->writeback_lock held
34 */
35static void afs_unlink_writeback(struct afs_writeback *wb)
36{
37 struct afs_writeback *front;
38 struct afs_vnode *vnode = wb->vnode;
39
40 list_del_init(&wb->link);
41 if (!list_empty(&vnode->writebacks)) {
42 /* if an fsync rises to the front of the queue then wake it
43 * up */
44 front = list_entry(vnode->writebacks.next,
45 struct afs_writeback, link);
46 if (front->state == AFS_WBACK_SYNCING) {
47 _debug("wake up sync");
48 front->state = AFS_WBACK_COMPLETE;
49 wake_up(&front->waitq);
50 }
51 }
52}
53
54/*
55 * free a writeback record
56 */
57static void afs_free_writeback(struct afs_writeback *wb)
58{
59 _enter("");
60 key_put(wb->key);
61 kfree(wb);
62}
63
64/*
65 * dispose of a reference to a writeback record
66 */
67void afs_put_writeback(struct afs_writeback *wb)
68{
69 struct afs_vnode *vnode = wb->vnode;
70
71 _enter("{%d}", wb->usage);
72
73 spin_lock(&vnode->writeback_lock);
74 if (--wb->usage == 0)
75 afs_unlink_writeback(wb);
76 else
77 wb = NULL;
78 spin_unlock(&vnode->writeback_lock);
79 if (wb)
80 afs_free_writeback(wb);
81}
82
83/*
84 * partly or wholly fill a page that's under preparation for writing
85 */
86static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
87 unsigned start, unsigned len, struct page *page)
88{
89 int ret;
90
91 _enter(",,%u,%u", start, len);
92
93 ASSERTCMP(start + len, <=, PAGE_SIZE);
94
95 ret = afs_vnode_fetch_data(vnode, key, start, len, page);
96 if (ret < 0) {
97 if (ret == -ENOENT) {
98 _debug("got NOENT from server"
99 " - marking file deleted and stale");
100 set_bit(AFS_VNODE_DELETED, &vnode->flags);
101 ret = -ESTALE;
102 }
103 }
104
105 _leave(" = %d", ret);
106 return ret;
107}
108
109/*
110 * prepare a page for being written to
111 */
112static int afs_prepare_page(struct afs_vnode *vnode, struct page *page,
113 struct key *key, unsigned offset, unsigned to)
114{
115 unsigned eof, tail, start, stop, len;
116 loff_t i_size, pos;
117 void *p;
118 int ret;
119
120 _enter("");
121
122 if (offset == 0 && to == PAGE_SIZE)
123 return 0;
124
125 p = kmap(page);
126
127 i_size = i_size_read(&vnode->vfs_inode);
128 pos = (loff_t) page->index << PAGE_SHIFT;
129 if (pos >= i_size) {
130 /* partial write, page beyond EOF */
131 _debug("beyond");
132 if (offset > 0)
133 memset(p, 0, offset);
134 if (to < PAGE_SIZE)
135 memset(p + to, 0, PAGE_SIZE - to);
136 kunmap(page);
137 return 0;
138 }
139
140 if (i_size - pos >= PAGE_SIZE) {
141 /* partial write, page entirely before EOF */
142 _debug("before");
143 tail = eof = PAGE_SIZE;
144 } else {
145 /* partial write, page overlaps EOF */
146 eof = i_size - pos;
147 _debug("overlap %u", eof);
148 tail = max(eof, to);
149 if (tail < PAGE_SIZE)
150 memset(p + tail, 0, PAGE_SIZE - tail);
151 if (offset > eof)
152 memset(p + eof, 0, PAGE_SIZE - eof);
153 }
154
155 kunmap(p);
156
157 ret = 0;
158 if (offset > 0 || eof > to) {
159 /* need to fill one or two bits that aren't going to be written
160 * (cover both fillers in one read if there are two) */
161 start = (offset > 0) ? 0 : to;
162 stop = (eof > to) ? eof : offset;
163 len = stop - start;
164 _debug("wr=%u-%u av=0-%u rd=%u@%u",
165 offset, to, eof, start, len);
166 ret = afs_fill_page(vnode, key, start, len, page);
167 }
168
169 _leave(" = %d", ret);
170 return ret;
171}
172
173/*
174 * prepare to perform part of a write to a page
175 * - the caller holds the page locked, preventing it from being written out or
176 * modified by anyone else
177 */
178int afs_prepare_write(struct file *file, struct page *page,
179 unsigned offset, unsigned to)
180{
181 struct afs_writeback *candidate, *wb;
182 struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
183 struct key *key = file->private_data;
184 pgoff_t index;
185 int ret;
186
187 _enter("{%x:%u},{%lx},%u,%u",
188 vnode->fid.vid, vnode->fid.vnode, page->index, offset, to);
189
190 candidate = kzalloc(sizeof(*candidate), GFP_KERNEL);
191 if (!candidate)
192 return -ENOMEM;
193 candidate->vnode = vnode;
194 candidate->first = candidate->last = page->index;
195 candidate->offset_first = offset;
196 candidate->to_last = to;
197 candidate->usage = 1;
198 candidate->state = AFS_WBACK_PENDING;
199 init_waitqueue_head(&candidate->waitq);
200
201 if (!PageUptodate(page)) {
202 _debug("not up to date");
203 ret = afs_prepare_page(vnode, page, key, offset, to);
204 if (ret < 0) {
205 kfree(candidate);
206 _leave(" = %d [prep]", ret);
207 return ret;
208 }
209 SetPageUptodate(page);
210 }
211
212try_again:
213 index = page->index;
214 spin_lock(&vnode->writeback_lock);
215
216 /* see if this page is already pending a writeback under a suitable key
217 * - if so we can just join onto that one */
218 wb = (struct afs_writeback *) page_private(page);
219 if (wb) {
220 if (wb->key == key && wb->state == AFS_WBACK_PENDING)
221 goto subsume_in_current_wb;
222 goto flush_conflicting_wb;
223 }
224
225 if (index > 0) {
226 /* see if we can find an already pending writeback that we can
227 * append this page to */
228 list_for_each_entry(wb, &vnode->writebacks, link) {
229 if (wb->last == index - 1 && wb->key == key &&
230 wb->state == AFS_WBACK_PENDING)
231 goto append_to_previous_wb;
232 }
233 }
234
235 list_add_tail(&candidate->link, &vnode->writebacks);
236 candidate->key = key_get(key);
237 spin_unlock(&vnode->writeback_lock);
238 SetPagePrivate(page);
239 set_page_private(page, (unsigned long) candidate);
240 _leave(" = 0 [new]");
241 return 0;
242
243subsume_in_current_wb:
244 _debug("subsume");
245 ASSERTRANGE(wb->first, <=, index, <=, wb->last);
246 if (index == wb->first && offset < wb->offset_first)
247 wb->offset_first = offset;
248 if (index == wb->last && to > wb->to_last)
249 wb->to_last = to;
250 spin_unlock(&vnode->writeback_lock);
251 kfree(candidate);
252 _leave(" = 0 [sub]");
253 return 0;
254
255append_to_previous_wb:
256 _debug("append into %lx-%lx", wb->first, wb->last);
257 wb->usage++;
258 wb->last++;
259 wb->to_last = to;
260 spin_unlock(&vnode->writeback_lock);
261 SetPagePrivate(page);
262 set_page_private(page, (unsigned long) wb);
263 kfree(candidate);
264 _leave(" = 0 [app]");
265 return 0;
266
267 /* the page is currently bound to another context, so if it's dirty we
268 * need to flush it before we can use the new context */
269flush_conflicting_wb:
270 _debug("flush conflict");
271 if (wb->state == AFS_WBACK_PENDING)
272 wb->state = AFS_WBACK_CONFLICTING;
273 spin_unlock(&vnode->writeback_lock);
274 if (PageDirty(page)) {
275 ret = afs_write_back_from_locked_page(wb, page);
276 if (ret < 0) {
277 afs_put_writeback(candidate);
278 _leave(" = %d", ret);
279 return ret;
280 }
281 }
282
283 /* the page holds a ref on the writeback record */
284 afs_put_writeback(wb);
285 set_page_private(page, 0);
286 ClearPagePrivate(page);
287 goto try_again;
288}
289
290/*
291 * finalise part of a write to a page
292 */
293int afs_commit_write(struct file *file, struct page *page,
294 unsigned offset, unsigned to)
295{
296 struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
297 loff_t i_size, maybe_i_size;
298
299 _enter("{%x:%u},{%lx},%u,%u",
300 vnode->fid.vid, vnode->fid.vnode, page->index, offset, to);
301
302 maybe_i_size = (loff_t) page->index << PAGE_SHIFT;
303 maybe_i_size += to;
304
305 i_size = i_size_read(&vnode->vfs_inode);
306 if (maybe_i_size > i_size) {
307 spin_lock(&vnode->writeback_lock);
308 i_size = i_size_read(&vnode->vfs_inode);
309 if (maybe_i_size > i_size)
310 i_size_write(&vnode->vfs_inode, maybe_i_size);
311 spin_unlock(&vnode->writeback_lock);
312 }
313
314 set_page_dirty(page);
315
316 if (PageDirty(page))
317 _debug("dirtied");
318
319 return 0;
320}
321
322/*
323 * kill all the pages in the given range
324 */
325static void afs_kill_pages(struct afs_vnode *vnode, bool error,
326 pgoff_t first, pgoff_t last)
327{
328 struct pagevec pv;
329 unsigned count, loop;
330
331 _enter("{%x:%u},%lx-%lx",
332 vnode->fid.vid, vnode->fid.vnode, first, last);
333
334 pagevec_init(&pv, 0);
335
336 do {
337 _debug("kill %lx-%lx", first, last);
338
339 count = last - first + 1;
340 if (count > PAGEVEC_SIZE)
341 count = PAGEVEC_SIZE;
342 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
343 first, count, pv.pages);
344 ASSERTCMP(pv.nr, ==, count);
345
346 for (loop = 0; loop < count; loop++) {
347 ClearPageUptodate(pv.pages[loop]);
348 if (error)
349 SetPageError(pv.pages[loop]);
350 end_page_writeback(pv.pages[loop]);
351 }
352
353 __pagevec_release(&pv);
354 } while (first < last);
355
356 _leave("");
357}
358
359/*
360 * synchronously write back the locked page and any subsequent non-locked dirty
361 * pages also covered by the same writeback record
362 */
363static int afs_write_back_from_locked_page(struct afs_writeback *wb,
364 struct page *primary_page)
365{
366 struct page *pages[8], *page;
367 unsigned long count;
368 unsigned n, offset, to;
369 pgoff_t start, first, last;
370 int loop, ret;
371
372 _enter(",%lx", primary_page->index);
373
374 count = 1;
375 if (!clear_page_dirty_for_io(primary_page))
376 BUG();
377 if (test_set_page_writeback(primary_page))
378 BUG();
379
380 /* find all consecutive lockable dirty pages, stopping when we find a
381 * page that is not immediately lockable, is not dirty or is missing,
382 * or we reach the end of the range */
383 start = primary_page->index;
384 if (start >= wb->last)
385 goto no_more;
386 start++;
387 do {
388 _debug("more %lx [%lx]", start, count);
389 n = wb->last - start + 1;
390 if (n > ARRAY_SIZE(pages))
391 n = ARRAY_SIZE(pages);
392 n = find_get_pages_contig(wb->vnode->vfs_inode.i_mapping,
393 start, n, pages);
394 _debug("fgpc %u", n);
395 if (n == 0)
396 goto no_more;
397 if (pages[0]->index != start) {
398 for (n--; n >= 0; n--)
399 put_page(pages[n]);
400 goto no_more;
401 }
402
403 for (loop = 0; loop < n; loop++) {
404 page = pages[loop];
405 if (page->index > wb->last)
406 break;
407 if (TestSetPageLocked(page))
408 break;
409 if (!PageDirty(page) ||
410 page_private(page) != (unsigned long) wb) {
411 unlock_page(page);
412 break;
413 }
414 if (!clear_page_dirty_for_io(page))
415 BUG();
416 if (test_set_page_writeback(page))
417 BUG();
418 unlock_page(page);
419 put_page(page);
420 }
421 count += loop;
422 if (loop < n) {
423 for (; loop < n; loop++)
424 put_page(pages[loop]);
425 goto no_more;
426 }
427
428 start += loop;
429 } while (start <= wb->last && count < 65536);
430
431no_more:
432 /* we now have a contiguous set of dirty pages, each with writeback set
433 * and the dirty mark cleared; the first page is locked and must remain
434 * so, all the rest are unlocked */
435 first = primary_page->index;
436 last = first + count - 1;
437
438 offset = (first == wb->first) ? wb->offset_first : 0;
439 to = (last == wb->last) ? wb->to_last : PAGE_SIZE;
440
441 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
442
443 ret = afs_vnode_store_data(wb, first, last, offset, to);
444 if (ret < 0) {
445 switch (ret) {
446 case -EDQUOT:
447 case -ENOSPC:
448 set_bit(AS_ENOSPC,
449 &wb->vnode->vfs_inode.i_mapping->flags);
450 break;
451 case -EROFS:
452 case -EIO:
453 case -EREMOTEIO:
454 case -EFBIG:
455 case -ENOENT:
456 case -ENOMEDIUM:
457 case -ENXIO:
458 afs_kill_pages(wb->vnode, true, first, last);
459 set_bit(AS_EIO, &wb->vnode->vfs_inode.i_mapping->flags);
460 break;
461 case -EACCES:
462 case -EPERM:
463 case -ENOKEY:
464 case -EKEYEXPIRED:
465 case -EKEYREJECTED:
466 case -EKEYREVOKED:
467 afs_kill_pages(wb->vnode, false, first, last);
468 break;
469 default:
470 break;
471 }
472 } else {
473 ret = count;
474 }
475
476 _leave(" = %d", ret);
477 return ret;
478}
479
480/*
481 * write a page back to the server
482 * - the caller locked the page for us
483 */
484int afs_writepage(struct page *page, struct writeback_control *wbc)
485{
486 struct backing_dev_info *bdi = page->mapping->backing_dev_info;
487 struct afs_writeback *wb;
488 int ret;
489
490 _enter("{%lx},", page->index);
491
492 if (wbc->sync_mode != WB_SYNC_NONE)
493 wait_on_page_writeback(page);
494
495 if (PageWriteback(page) || !PageDirty(page)) {
496 unlock_page(page);
497 return 0;
498 }
499
500 wb = (struct afs_writeback *) page_private(page);
501 ASSERT(wb != NULL);
502
503 ret = afs_write_back_from_locked_page(wb, page);
504 unlock_page(page);
505 if (ret < 0) {
506 _leave(" = %d", ret);
507 return 0;
508 }
509
510 wbc->nr_to_write -= ret;
511 if (wbc->nonblocking && bdi_write_congested(bdi))
512 wbc->encountered_congestion = 1;
513
514 _leave(" = 0");
515 return 0;
516}
517
518/*
519 * write a region of pages back to the server
520 */
521int afs_writepages_region(struct address_space *mapping,
522 struct writeback_control *wbc,
523 pgoff_t index, pgoff_t end, pgoff_t *_next)
524{
525 struct backing_dev_info *bdi = mapping->backing_dev_info;
526 struct afs_writeback *wb;
527 struct page *page;
528 int ret, n;
529
530 _enter(",,%lx,%lx,", index, end);
531
532 do {
533 n = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY,
534 1, &page);
535 if (!n)
536 break;
537
538 _debug("wback %lx", page->index);
539
540 if (page->index > end) {
541 *_next = index;
542 page_cache_release(page);
543 _leave(" = 0 [%lx]", *_next);
544 return 0;
545 }
546
547 /* at this point we hold neither mapping->tree_lock nor lock on
548 * the page itself: the page may be truncated or invalidated
549 * (changing page->mapping to NULL), or even swizzled back from
550 * swapper_space to tmpfs file mapping
551 */
552 lock_page(page);
553
554 if (page->mapping != mapping) {
555 unlock_page(page);
556 page_cache_release(page);
557 continue;
558 }
559
560 if (wbc->sync_mode != WB_SYNC_NONE)
561 wait_on_page_writeback(page);
562
563 if (PageWriteback(page) || !PageDirty(page)) {
564 unlock_page(page);
565 continue;
566 }
567
568 wb = (struct afs_writeback *) page_private(page);
569 ASSERT(wb != NULL);
570
571 spin_lock(&wb->vnode->writeback_lock);
572 wb->state = AFS_WBACK_WRITING;
573 spin_unlock(&wb->vnode->writeback_lock);
574
575 ret = afs_write_back_from_locked_page(wb, page);
576 unlock_page(page);
577 page_cache_release(page);
578 if (ret < 0) {
579 _leave(" = %d", ret);
580 return ret;
581 }
582
583 wbc->nr_to_write -= ret;
584
585 if (wbc->nonblocking && bdi_write_congested(bdi)) {
586 wbc->encountered_congestion = 1;
587 break;
588 }
589
590 cond_resched();
591 } while (index < end && wbc->nr_to_write > 0);
592
593 *_next = index;
594 _leave(" = 0 [%lx]", *_next);
595 return 0;
596}
597
598/*
599 * write some of the pending data back to the server
600 */
601int afs_writepages(struct address_space *mapping,
602 struct writeback_control *wbc)
603{
604 struct backing_dev_info *bdi = mapping->backing_dev_info;
605 pgoff_t start, end, next;
606 int ret;
607
608 _enter("");
609
610 if (wbc->nonblocking && bdi_write_congested(bdi)) {
611 wbc->encountered_congestion = 1;
612 _leave(" = 0 [congest]");
613 return 0;
614 }
615
616 if (wbc->range_cyclic) {
617 start = mapping->writeback_index;
618 end = -1;
619 ret = afs_writepages_region(mapping, wbc, start, end, &next);
620 if (start > 0 && wbc->nr_to_write > 0 && ret == 0 &&
621 !(wbc->nonblocking && wbc->encountered_congestion))
622 ret = afs_writepages_region(mapping, wbc, 0, start,
623 &next);
624 mapping->writeback_index = next;
625 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
626 end = (pgoff_t)(LLONG_MAX >> PAGE_CACHE_SHIFT);
627 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
628 if (wbc->nr_to_write > 0)
629 mapping->writeback_index = next;
630 } else {
631 start = wbc->range_start >> PAGE_CACHE_SHIFT;
632 end = wbc->range_end >> PAGE_CACHE_SHIFT;
633 ret = afs_writepages_region(mapping, wbc, start, end, &next);
634 }
635
636 _leave(" = %d", ret);
637 return ret;
638}
639
640/*
641 * write an inode back
642 */
643int afs_write_inode(struct inode *inode, int sync)
644{
645 struct afs_vnode *vnode = AFS_FS_I(inode);
646 int ret;
647
648 _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
649
650 ret = 0;
651 if (sync) {
652 ret = filemap_fdatawait(inode->i_mapping);
653 if (ret < 0)
654 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
655 }
656
657 _leave(" = %d", ret);
658 return ret;
659}
660
661/*
662 * completion of write to server
663 */
664void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
665{
666 struct afs_writeback *wb = call->wb;
667 struct pagevec pv;
668 unsigned count, loop;
669 pgoff_t first = call->first, last = call->last;
670 bool free_wb;
671
672 _enter("{%x:%u},{%lx-%lx}",
673 vnode->fid.vid, vnode->fid.vnode, first, last);
674
675 ASSERT(wb != NULL);
676
677 pagevec_init(&pv, 0);
678
679 do {
680 _debug("attach %lx-%lx", first, last);
681
682 count = last - first + 1;
683 if (count > PAGEVEC_SIZE)
684 count = PAGEVEC_SIZE;
685 pv.nr = find_get_pages_contig(call->mapping, first, count,
686 pv.pages);
687 ASSERTCMP(pv.nr, ==, count);
688
689 spin_lock(&vnode->writeback_lock);
690 for (loop = 0; loop < count; loop++) {
691 struct page *page = pv.pages[loop];
692 end_page_writeback(page);
693 if (page_private(page) == (unsigned long) wb) {
694 set_page_private(page, 0);
695 ClearPagePrivate(page);
696 wb->usage--;
697 }
698 }
699 free_wb = false;
700 if (wb->usage == 0) {
701 afs_unlink_writeback(wb);
702 free_wb = true;
703 }
704 spin_unlock(&vnode->writeback_lock);
705 first += count;
706 if (free_wb) {
707 afs_free_writeback(wb);
708 wb = NULL;
709 }
710
711 __pagevec_release(&pv);
712 } while (first < last);
713
714 _leave("");
715}
716
717/*
718 * write to an AFS file
719 */
720ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,
721 unsigned long nr_segs, loff_t pos)
722{
723 struct dentry *dentry = iocb->ki_filp->f_path.dentry;
724 struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
725 ssize_t result;
726 size_t count = iov_length(iov, nr_segs);
727 int ret;
728
729 _enter("{%x.%u},{%zu},%lu,",
730 vnode->fid.vid, vnode->fid.vnode, count, nr_segs);
731
732 if (IS_SWAPFILE(&vnode->vfs_inode)) {
733 printk(KERN_INFO
734 "AFS: Attempt to write to active swap file!\n");
735 return -EBUSY;
736 }
737
738 if (!count)
739 return 0;
740
741 result = generic_file_aio_write(iocb, iov, nr_segs, pos);
742 if (IS_ERR_VALUE(result)) {
743 _leave(" = %zd", result);
744 return result;
745 }
746
747 /* return error values for O_SYNC and IS_SYNC() */
748 if (IS_SYNC(&vnode->vfs_inode) || iocb->ki_filp->f_flags & O_SYNC) {
749 ret = afs_fsync(iocb->ki_filp, dentry, 1);
750 if (ret < 0)
751 result = ret;
752 }
753
754 _leave(" = %zd", result);
755 return result;
756}
757
758/*
759 * flush the vnode to the fileserver
760 */
761int afs_writeback_all(struct afs_vnode *vnode)
762{
763 struct address_space *mapping = vnode->vfs_inode.i_mapping;
764 struct writeback_control wbc = {
765 .bdi = mapping->backing_dev_info,
766 .sync_mode = WB_SYNC_ALL,
767 .nr_to_write = LONG_MAX,
768 .for_writepages = 1,
769 .range_cyclic = 1,
770 };
771 int ret;
772
773 _enter("");
774
775 ret = mapping->a_ops->writepages(mapping, &wbc);
776 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
777
778 _leave(" = %d", ret);
779 return ret;
780}
781
782/*
783 * flush any dirty pages for this process, and check for write errors.
784 * - the return status from this call provides a reliable indication of
785 * whether any write errors occurred for this process.
786 */
787int afs_fsync(struct file *file, struct dentry *dentry, int datasync)
788{
789 struct afs_writeback *wb, *xwb;
790 struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
791 int ret;
792
793 _enter("{%x:%u},{n=%s},%d",
794 vnode->fid.vid, vnode->fid.vnode, dentry->d_name.name,
795 datasync);
796
797 /* use a writeback record as a marker in the queue - when this reaches
798 * the front of the queue, all the outstanding writes are either
799 * completed or rejected */
800 wb = kzalloc(sizeof(*wb), GFP_KERNEL);
801 if (!wb)
802 return -ENOMEM;
803 wb->vnode = vnode;
804 wb->first = 0;
805 wb->last = -1;
806 wb->offset_first = 0;
807 wb->to_last = PAGE_SIZE;
808 wb->usage = 1;
809 wb->state = AFS_WBACK_SYNCING;
810 init_waitqueue_head(&wb->waitq);
811
812 spin_lock(&vnode->writeback_lock);
813 list_for_each_entry(xwb, &vnode->writebacks, link) {
814 if (xwb->state == AFS_WBACK_PENDING)
815 xwb->state = AFS_WBACK_CONFLICTING;
816 }
817 list_add_tail(&wb->link, &vnode->writebacks);
818 spin_unlock(&vnode->writeback_lock);
819
820 /* push all the outstanding writebacks to the server */
821 ret = afs_writeback_all(vnode);
822 if (ret < 0) {
823 afs_put_writeback(wb);
824 _leave(" = %d [wb]", ret);
825 return ret;
826 }
827
828 /* wait for the preceding writes to actually complete */
829 ret = wait_event_interruptible(wb->waitq,
830 wb->state == AFS_WBACK_COMPLETE ||
831 vnode->writebacks.next == &wb->link);
832 afs_put_writeback(wb);
833 _leave(" = %d", ret);
834 return ret;
835}
diff --git a/fs/aio.c b/fs/aio.c
index b97ab8028b6d..ac1c1587aa02 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -346,10 +346,9 @@ void fastcall exit_aio(struct mm_struct *mm)
346 346
347 wait_for_all_aios(ctx); 347 wait_for_all_aios(ctx);
348 /* 348 /*
349 * this is an overkill, but ensures we don't leave 349 * Ensure we don't leave the ctx on the aio_wq
350 * the ctx on the aio_wq
351 */ 350 */
352 flush_workqueue(aio_wq); 351 cancel_work_sync(&ctx->wq.work);
353 352
354 if (1 != atomic_read(&ctx->users)) 353 if (1 != atomic_read(&ctx->users))
355 printk(KERN_DEBUG 354 printk(KERN_DEBUG
@@ -372,7 +371,7 @@ void fastcall __put_ioctx(struct kioctx *ctx)
372 BUG_ON(ctx->reqs_active); 371 BUG_ON(ctx->reqs_active);
373 372
374 cancel_delayed_work(&ctx->wq); 373 cancel_delayed_work(&ctx->wq);
375 flush_workqueue(aio_wq); 374 cancel_work_sync(&ctx->wq.work);
376 aio_free_ring(ctx); 375 aio_free_ring(ctx);
377 mmdrop(ctx->mm); 376 mmdrop(ctx->mm);
378 ctx->mm = NULL; 377 ctx->mm = NULL;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 18657f001b43..72d0b412c376 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -675,19 +675,8 @@ static ssize_t
675bm_status_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) 675bm_status_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
676{ 676{
677 char *s = enabled ? "enabled" : "disabled"; 677 char *s = enabled ? "enabled" : "disabled";
678 int len = strlen(s);
679 loff_t pos = *ppos;
680 678
681 if (pos < 0) 679 return simple_read_from_buffer(buf, nbytes, ppos, s, strlen(s));
682 return -EINVAL;
683 if (pos >= len)
684 return 0;
685 if (len < pos + nbytes)
686 nbytes = len - pos;
687 if (copy_to_user(buf, s + pos, nbytes))
688 return -EFAULT;
689 *ppos = pos + nbytes;
690 return nbytes;
691} 680}
692 681
693static ssize_t bm_status_write(struct file * file, const char __user * buffer, 682static ssize_t bm_status_write(struct file * file, const char __user * buffer,
diff --git a/fs/buffer.c b/fs/buffer.c
index eb820b82a636..aecd057cd0e0 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1846,13 +1846,8 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
1846 if (block_start >= to) 1846 if (block_start >= to)
1847 break; 1847 break;
1848 if (buffer_new(bh)) { 1848 if (buffer_new(bh)) {
1849 void *kaddr;
1850
1851 clear_buffer_new(bh); 1849 clear_buffer_new(bh);
1852 kaddr = kmap_atomic(page, KM_USER0); 1850 zero_user_page(page, block_start, bh->b_size, KM_USER0);
1853 memset(kaddr+block_start, 0, bh->b_size);
1854 flush_dcache_page(page);
1855 kunmap_atomic(kaddr, KM_USER0);
1856 set_buffer_uptodate(bh); 1851 set_buffer_uptodate(bh);
1857 mark_buffer_dirty(bh); 1852 mark_buffer_dirty(bh);
1858 } 1853 }
@@ -1940,10 +1935,8 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
1940 SetPageError(page); 1935 SetPageError(page);
1941 } 1936 }
1942 if (!buffer_mapped(bh)) { 1937 if (!buffer_mapped(bh)) {
1943 void *kaddr = kmap_atomic(page, KM_USER0); 1938 zero_user_page(page, i * blocksize, blocksize,
1944 memset(kaddr + i * blocksize, 0, blocksize); 1939 KM_USER0);
1945 flush_dcache_page(page);
1946 kunmap_atomic(kaddr, KM_USER0);
1947 if (!err) 1940 if (!err)
1948 set_buffer_uptodate(bh); 1941 set_buffer_uptodate(bh);
1949 continue; 1942 continue;
@@ -2086,7 +2079,6 @@ int cont_prepare_write(struct page *page, unsigned offset,
2086 long status; 2079 long status;
2087 unsigned zerofrom; 2080 unsigned zerofrom;
2088 unsigned blocksize = 1 << inode->i_blkbits; 2081 unsigned blocksize = 1 << inode->i_blkbits;
2089 void *kaddr;
2090 2082
2091 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) { 2083 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2092 status = -ENOMEM; 2084 status = -ENOMEM;
@@ -2108,10 +2100,8 @@ int cont_prepare_write(struct page *page, unsigned offset,
2108 PAGE_CACHE_SIZE, get_block); 2100 PAGE_CACHE_SIZE, get_block);
2109 if (status) 2101 if (status)
2110 goto out_unmap; 2102 goto out_unmap;
2111 kaddr = kmap_atomic(new_page, KM_USER0); 2103 zero_user_page(page, zerofrom, PAGE_CACHE_SIZE - zerofrom,
2112 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom); 2104 KM_USER0);
2113 flush_dcache_page(new_page);
2114 kunmap_atomic(kaddr, KM_USER0);
2115 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE); 2105 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2116 unlock_page(new_page); 2106 unlock_page(new_page);
2117 page_cache_release(new_page); 2107 page_cache_release(new_page);
@@ -2138,10 +2128,7 @@ int cont_prepare_write(struct page *page, unsigned offset,
2138 if (status) 2128 if (status)
2139 goto out1; 2129 goto out1;
2140 if (zerofrom < offset) { 2130 if (zerofrom < offset) {
2141 kaddr = kmap_atomic(page, KM_USER0); 2131 zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0);
2142 memset(kaddr+zerofrom, 0, offset-zerofrom);
2143 flush_dcache_page(page);
2144 kunmap_atomic(kaddr, KM_USER0);
2145 __block_commit_write(inode, page, zerofrom, offset); 2132 __block_commit_write(inode, page, zerofrom, offset);
2146 } 2133 }
2147 return 0; 2134 return 0;
@@ -2340,10 +2327,7 @@ failed:
2340 * Error recovery is pretty slack. Clear the page and mark it dirty 2327 * Error recovery is pretty slack. Clear the page and mark it dirty
2341 * so we'll later zero out any blocks which _were_ allocated. 2328 * so we'll later zero out any blocks which _were_ allocated.
2342 */ 2329 */
2343 kaddr = kmap_atomic(page, KM_USER0); 2330 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
2344 memset(kaddr, 0, PAGE_CACHE_SIZE);
2345 flush_dcache_page(page);
2346 kunmap_atomic(kaddr, KM_USER0);
2347 SetPageUptodate(page); 2331 SetPageUptodate(page);
2348 set_page_dirty(page); 2332 set_page_dirty(page);
2349 return ret; 2333 return ret;
@@ -2382,7 +2366,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
2382 loff_t i_size = i_size_read(inode); 2366 loff_t i_size = i_size_read(inode);
2383 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2367 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2384 unsigned offset; 2368 unsigned offset;
2385 void *kaddr;
2386 int ret; 2369 int ret;
2387 2370
2388 /* Is the page fully inside i_size? */ 2371 /* Is the page fully inside i_size? */
@@ -2413,10 +2396,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
2413 * the page size, the remaining memory is zeroed when mapped, and 2396 * the page size, the remaining memory is zeroed when mapped, and
2414 * writes to that region are not written out to the file." 2397 * writes to that region are not written out to the file."
2415 */ 2398 */
2416 kaddr = kmap_atomic(page, KM_USER0); 2399 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
2417 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2418 flush_dcache_page(page);
2419 kunmap_atomic(kaddr, KM_USER0);
2420out: 2400out:
2421 ret = mpage_writepage(page, get_block, wbc); 2401 ret = mpage_writepage(page, get_block, wbc);
2422 if (ret == -EAGAIN) 2402 if (ret == -EAGAIN)
@@ -2437,7 +2417,6 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from)
2437 unsigned to; 2417 unsigned to;
2438 struct page *page; 2418 struct page *page;
2439 const struct address_space_operations *a_ops = mapping->a_ops; 2419 const struct address_space_operations *a_ops = mapping->a_ops;
2440 char *kaddr;
2441 int ret = 0; 2420 int ret = 0;
2442 2421
2443 if ((offset & (blocksize - 1)) == 0) 2422 if ((offset & (blocksize - 1)) == 0)
@@ -2451,10 +2430,8 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from)
2451 to = (offset + blocksize) & ~(blocksize - 1); 2430 to = (offset + blocksize) & ~(blocksize - 1);
2452 ret = a_ops->prepare_write(NULL, page, offset, to); 2431 ret = a_ops->prepare_write(NULL, page, offset, to);
2453 if (ret == 0) { 2432 if (ret == 0) {
2454 kaddr = kmap_atomic(page, KM_USER0); 2433 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
2455 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); 2434 KM_USER0);
2456 flush_dcache_page(page);
2457 kunmap_atomic(kaddr, KM_USER0);
2458 /* 2435 /*
2459 * It would be more correct to call aops->commit_write() 2436 * It would be more correct to call aops->commit_write()
2460 * here, but this is more efficient. 2437 * here, but this is more efficient.
@@ -2480,7 +2457,6 @@ int block_truncate_page(struct address_space *mapping,
2480 struct inode *inode = mapping->host; 2457 struct inode *inode = mapping->host;
2481 struct page *page; 2458 struct page *page;
2482 struct buffer_head *bh; 2459 struct buffer_head *bh;
2483 void *kaddr;
2484 int err; 2460 int err;
2485 2461
2486 blocksize = 1 << inode->i_blkbits; 2462 blocksize = 1 << inode->i_blkbits;
@@ -2534,11 +2510,7 @@ int block_truncate_page(struct address_space *mapping,
2534 goto unlock; 2510 goto unlock;
2535 } 2511 }
2536 2512
2537 kaddr = kmap_atomic(page, KM_USER0); 2513 zero_user_page(page, offset, length, KM_USER0);
2538 memset(kaddr + offset, 0, length);
2539 flush_dcache_page(page);
2540 kunmap_atomic(kaddr, KM_USER0);
2541
2542 mark_buffer_dirty(bh); 2514 mark_buffer_dirty(bh);
2543 err = 0; 2515 err = 0;
2544 2516
@@ -2559,7 +2531,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2559 loff_t i_size = i_size_read(inode); 2531 loff_t i_size = i_size_read(inode);
2560 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2532 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2561 unsigned offset; 2533 unsigned offset;
2562 void *kaddr;
2563 2534
2564 /* Is the page fully inside i_size? */ 2535 /* Is the page fully inside i_size? */
2565 if (page->index < end_index) 2536 if (page->index < end_index)
@@ -2585,10 +2556,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2585 * the page size, the remaining memory is zeroed when mapped, and 2556 * the page size, the remaining memory is zeroed when mapped, and
2586 * writes to that region are not written out to the file." 2557 * writes to that region are not written out to the file."
2587 */ 2558 */
2588 kaddr = kmap_atomic(page, KM_USER0); 2559 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
2589 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2590 flush_dcache_page(page);
2591 kunmap_atomic(kaddr, KM_USER0);
2592 return __block_write_full_page(inode, page, get_block, wbc); 2560 return __block_write_full_page(inode, page, get_block, wbc);
2593} 2561}
2594 2562
@@ -2978,7 +2946,7 @@ static void buffer_exit_cpu(int cpu)
2978static int buffer_cpu_notify(struct notifier_block *self, 2946static int buffer_cpu_notify(struct notifier_block *self,
2979 unsigned long action, void *hcpu) 2947 unsigned long action, void *hcpu)
2980{ 2948{
2981 if (action == CPU_DEAD) 2949 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
2982 buffer_exit_cpu((unsigned long)hcpu); 2950 buffer_exit_cpu((unsigned long)hcpu);
2983 return NOTIFY_OK; 2951 return NOTIFY_OK;
2984} 2952}
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index d98be5e01328..3527c7c6def8 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -77,36 +77,6 @@ static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buf
77 return ret; 77 return ret;
78} 78}
79 79
80
81/**
82 * flush_read_buffer - push buffer to userspace.
83 * @buffer: data buffer for file.
84 * @userbuf: user-passed buffer.
85 * @count: number of bytes requested.
86 * @ppos: file position.
87 *
88 * Copy the buffer we filled in fill_read_buffer() to userspace.
89 * This is done at the reader's leisure, copying and advancing
90 * the amount they specify each time.
91 * This may be called continuously until the buffer is empty.
92 */
93static int flush_read_buffer(struct configfs_buffer * buffer, char __user * buf,
94 size_t count, loff_t * ppos)
95{
96 int error;
97
98 if (*ppos > buffer->count)
99 return 0;
100
101 if (count > (buffer->count - *ppos))
102 count = buffer->count - *ppos;
103
104 error = copy_to_user(buf,buffer->page + *ppos,count);
105 if (!error)
106 *ppos += count;
107 return error ? -EFAULT : count;
108}
109
110/** 80/**
111 * configfs_read_file - read an attribute. 81 * configfs_read_file - read an attribute.
112 * @file: file pointer. 82 * @file: file pointer.
@@ -139,7 +109,8 @@ configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *pp
139 } 109 }
140 pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n", 110 pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
141 __FUNCTION__, count, *ppos, buffer->page); 111 __FUNCTION__, count, *ppos, buffer->page);
142 retval = flush_read_buffer(buffer,buf,count,ppos); 112 retval = simple_read_from_buffer(buf, count, ppos, buffer->page,
113 buffer->count);
143out: 114out:
144 up(&buffer->sem); 115 up(&buffer->sem);
145 return retval; 116 return retval;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index d9d0833444f5..8593f3dfd299 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -439,7 +439,7 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
439 * Wait on and process all in-flight BIOs. This must only be called once 439 * Wait on and process all in-flight BIOs. This must only be called once
440 * all bios have been issued so that the refcount can only decrease. 440 * all bios have been issued so that the refcount can only decrease.
441 * This just waits for all bios to make it through dio_bio_complete. IO 441 * This just waits for all bios to make it through dio_bio_complete. IO
442 * errors are propogated through dio->io_error and should be propogated via 442 * errors are propagated through dio->io_error and should be propagated via
443 * dio_complete(). 443 * dio_complete().
444 */ 444 */
445static void dio_await_completion(struct dio *dio) 445static void dio_await_completion(struct dio *dio)
@@ -867,7 +867,6 @@ static int do_direct_IO(struct dio *dio)
867do_holes: 867do_holes:
868 /* Handle holes */ 868 /* Handle holes */
869 if (!buffer_mapped(map_bh)) { 869 if (!buffer_mapped(map_bh)) {
870 char *kaddr;
871 loff_t i_size_aligned; 870 loff_t i_size_aligned;
872 871
873 /* AKPM: eargh, -ENOTBLK is a hack */ 872 /* AKPM: eargh, -ENOTBLK is a hack */
@@ -888,11 +887,8 @@ do_holes:
888 page_cache_release(page); 887 page_cache_release(page);
889 goto out; 888 goto out;
890 } 889 }
891 kaddr = kmap_atomic(page, KM_USER0); 890 zero_user_page(page, block_in_page << blkbits,
892 memset(kaddr + (block_in_page << blkbits), 891 1 << blkbits, KM_USER0);
893 0, 1 << blkbits);
894 flush_dcache_page(page);
895 kunmap_atomic(kaddr, KM_USER0);
896 dio->block_in_file++; 892 dio->block_in_file++;
897 block_in_page++; 893 block_in_page++;
898 goto next_block; 894 goto next_block;
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index e1bb03171986..a6cb6171c3af 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1767,7 +1767,6 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1767 struct inode *inode = mapping->host; 1767 struct inode *inode = mapping->host;
1768 struct buffer_head *bh; 1768 struct buffer_head *bh;
1769 int err = 0; 1769 int err = 0;
1770 void *kaddr;
1771 1770
1772 blocksize = inode->i_sb->s_blocksize; 1771 blocksize = inode->i_sb->s_blocksize;
1773 length = blocksize - (offset & (blocksize - 1)); 1772 length = blocksize - (offset & (blocksize - 1));
@@ -1779,10 +1778,7 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1779 */ 1778 */
1780 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && 1779 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1781 ext3_should_writeback_data(inode) && PageUptodate(page)) { 1780 ext3_should_writeback_data(inode) && PageUptodate(page)) {
1782 kaddr = kmap_atomic(page, KM_USER0); 1781 zero_user_page(page, offset, length, KM_USER0);
1783 memset(kaddr + offset, 0, length);
1784 flush_dcache_page(page);
1785 kunmap_atomic(kaddr, KM_USER0);
1786 set_page_dirty(page); 1782 set_page_dirty(page);
1787 goto unlock; 1783 goto unlock;
1788 } 1784 }
@@ -1835,11 +1831,7 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1835 goto unlock; 1831 goto unlock;
1836 } 1832 }
1837 1833
1838 kaddr = kmap_atomic(page, KM_USER0); 1834 zero_user_page(page, offset, length, KM_USER0);
1839 memset(kaddr + offset, 0, length);
1840 flush_dcache_page(page);
1841 kunmap_atomic(kaddr, KM_USER0);
1842
1843 BUFFER_TRACE(bh, "zeroed end of block"); 1835 BUFFER_TRACE(bh, "zeroed end of block");
1844 1836
1845 err = 0; 1837 err = 0;
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 0208cc7ac5d0..47552d4a6324 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/fs/checkpoint.c 2 * linux/fs/jbd/checkpoint.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
5 * 5 *
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index 11563fe2a52b..2a5f4b833e35 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/fs/recovery.c 2 * linux/fs/jbd/recovery.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
5 * 5 *
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index a68cbb605022..824e3b7d4ec1 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/fs/revoke.c 2 * linux/fs/jbd/revoke.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000
5 * 5 *
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index f9822fc07851..772b6531a2a2 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/fs/transaction.c 2 * linux/fs/jbd/transaction.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5 * 5 *
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 68039fa9a566..3fccde7ba008 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/fs/checkpoint.c 2 * linux/fs/jbd2/checkpoint.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
5 * 5 *
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 9f10acafaf70..395c92a04ac9 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/fs/recovery.c 2 * linux/fs/jbd2/recovery.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
5 * 5 *
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 1e864dcc49ea..9246e763da78 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/fs/revoke.c 2 * linux/fs/jbd2/revoke.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000
5 * 5 *
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index e347d8c078bc..7946ff43fc40 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/fs/transaction.c 2 * linux/fs/jbd2/transaction.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5 * 5 *
diff --git a/fs/jffs2/histo_mips.h b/fs/jffs2/histo_mips.h
deleted file mode 100644
index fa3dac19a109..000000000000
--- a/fs/jffs2/histo_mips.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#define BIT_DIVIDER_MIPS 1043
2static int bits_mips[8] = { 277,249,290,267,229,341,212,241}; /* mips32 */
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 6aff38930b50..4884d5edfe65 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -219,9 +219,9 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
219 struct jffs2_tmp_dnode_info *tn) 219 struct jffs2_tmp_dnode_info *tn)
220{ 220{
221 uint32_t fn_end = tn->fn->ofs + tn->fn->size; 221 uint32_t fn_end = tn->fn->ofs + tn->fn->size;
222 struct jffs2_tmp_dnode_info *insert_point = NULL, *this; 222 struct jffs2_tmp_dnode_info *this;
223 223
224 dbg_readinode("insert fragment %#04x-%#04x, ver %u\n", tn->fn->ofs, fn_end, tn->version); 224 dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw));
225 225
226 /* If a node has zero dsize, we only have to keep if it if it might be the 226 /* If a node has zero dsize, we only have to keep if it if it might be the
227 node with highest version -- i.e. the one which will end up as f->metadata. 227 node with highest version -- i.e. the one which will end up as f->metadata.
@@ -240,23 +240,16 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
240 240
241 /* Find the earliest node which _may_ be relevant to this one */ 241 /* Find the earliest node which _may_ be relevant to this one */
242 this = jffs2_lookup_tn(&rii->tn_root, tn->fn->ofs); 242 this = jffs2_lookup_tn(&rii->tn_root, tn->fn->ofs);
243 if (!this) { 243 if (this) {
244 /* First addition to empty tree. $DEITY how I love the easy cases */ 244 /* If the node is coincident with another at a lower address,
245 rb_link_node(&tn->rb, NULL, &rii->tn_root.rb_node); 245 back up until the other node is found. It may be relevant */
246 rb_insert_color(&tn->rb, &rii->tn_root); 246 while (this->overlapped)
247 dbg_readinode("keep new frag\n"); 247 this = tn_prev(this);
248 return 0;
249 }
250
251 /* If we add a new node it'll be somewhere under here. */
252 insert_point = this;
253
254 /* If the node is coincident with another at a lower address,
255 back up until the other node is found. It may be relevant */
256 while (tn->overlapped)
257 tn = tn_prev(tn);
258 248
259 dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole"); 249 /* First node should never be marked overlapped */
250 BUG_ON(!this);
251 dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole");
252 }
260 253
261 while (this) { 254 while (this) {
262 if (this->fn->ofs > fn_end) 255 if (this->fn->ofs > fn_end)
@@ -274,11 +267,10 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
274 return 0; 267 return 0;
275 } else { 268 } else {
276 /* Who cares if the new one is good; keep it for now anyway. */ 269 /* Who cares if the new one is good; keep it for now anyway. */
270 dbg_readinode("Like new node. Throw away old\n");
277 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root); 271 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root);
278 /* Same overlapping from in front and behind */
279 tn->overlapped = this->overlapped;
280 jffs2_kill_tn(c, this); 272 jffs2_kill_tn(c, this);
281 dbg_readinode("Like new node. Throw away old\n"); 273 /* Same overlapping from in front and behind */
282 return 0; 274 return 0;
283 } 275 }
284 } 276 }
@@ -291,13 +283,8 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
291 jffs2_kill_tn(c, tn); 283 jffs2_kill_tn(c, tn);
292 return 0; 284 return 0;
293 } 285 }
294 /* ... and is good. Kill 'this'... */ 286 /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */
295 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root); 287 while (this && this->fn->ofs + this->fn->size <= fn_end) {
296 tn->overlapped = this->overlapped;
297 jffs2_kill_tn(c, this);
298 /* ... and any subsequent nodes which are also overlapped */
299 this = tn_next(tn);
300 while (this && this->fn->ofs + this->fn->size < fn_end) {
301 struct jffs2_tmp_dnode_info *next = tn_next(this); 288 struct jffs2_tmp_dnode_info *next = tn_next(this);
302 if (this->version < tn->version) { 289 if (this->version < tn->version) {
303 tn_erase(this, &rii->tn_root); 290 tn_erase(this, &rii->tn_root);
@@ -308,8 +295,8 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
308 } 295 }
309 this = next; 296 this = next;
310 } 297 }
311 dbg_readinode("Done inserting new\n"); 298 dbg_readinode("Done killing overlapped nodes\n");
312 return 0; 299 continue;
313 } 300 }
314 if (this->version > tn->version && 301 if (this->version > tn->version &&
315 this->fn->ofs <= tn->fn->ofs && 302 this->fn->ofs <= tn->fn->ofs &&
@@ -321,29 +308,21 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
321 return 0; 308 return 0;
322 } 309 }
323 /* ... but 'this' was bad. Replace it... */ 310 /* ... but 'this' was bad. Replace it... */
324 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root);
325 dbg_readinode("Bad CRC on old overlapping node. Kill it\n"); 311 dbg_readinode("Bad CRC on old overlapping node. Kill it\n");
312 tn_erase(this, &rii->tn_root);
326 jffs2_kill_tn(c, this); 313 jffs2_kill_tn(c, this);
327 return 0; 314 break;
328 } 315 }
329 /* We want to be inserted under the last node which is
330 either at a lower offset _or_ has a smaller range */
331 if (this->fn->ofs < tn->fn->ofs ||
332 (this->fn->ofs == tn->fn->ofs &&
333 this->fn->size <= tn->fn->size))
334 insert_point = this;
335 316
336 this = tn_next(this); 317 this = tn_next(this);
337 } 318 }
338 dbg_readinode("insert_point %p, ver %d, 0x%x-0x%x, ov %d\n", 319
339 insert_point, insert_point->version, insert_point->fn->ofs,
340 insert_point->fn->ofs+insert_point->fn->size,
341 insert_point->overlapped);
342 /* We neither completely obsoleted nor were completely 320 /* We neither completely obsoleted nor were completely
343 obsoleted by an earlier node. Insert under insert_point */ 321 obsoleted by an earlier node. Insert into the tree */
344 { 322 {
345 struct rb_node *parent = &insert_point->rb; 323 struct rb_node *parent;
346 struct rb_node **link = &parent; 324 struct rb_node **link = &rii->tn_root.rb_node;
325 struct jffs2_tmp_dnode_info *insert_point = NULL;
347 326
348 while (*link) { 327 while (*link) {
349 parent = *link; 328 parent = *link;
@@ -359,6 +338,7 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
359 rb_link_node(&tn->rb, &insert_point->rb, link); 338 rb_link_node(&tn->rb, &insert_point->rb, link);
360 rb_insert_color(&tn->rb, &rii->tn_root); 339 rb_insert_color(&tn->rb, &rii->tn_root);
361 } 340 }
341
362 /* If there's anything behind that overlaps us, note it */ 342 /* If there's anything behind that overlaps us, note it */
363 this = tn_prev(tn); 343 this = tn_prev(tn);
364 if (this) { 344 if (this) {
@@ -457,7 +437,7 @@ static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c,
457 this = tn_last(&rii->tn_root); 437 this = tn_last(&rii->tn_root);
458 while (this) { 438 while (this) {
459 dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d\n", this, this->version, this->fn->ofs, 439 dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d\n", this, this->version, this->fn->ofs,
460 this->fn->ofs+this->fn->size, this->overlapped); 440 this->fn->ofs+this->fn->size, this->overlapped);
461 this = tn_prev(this); 441 this = tn_prev(this);
462 } 442 }
463#endif 443#endif
@@ -483,7 +463,7 @@ static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c,
483 vers_next = tn_prev(this); 463 vers_next = tn_prev(this);
484 eat_last(&ver_root, &this->rb); 464 eat_last(&ver_root, &this->rb);
485 if (check_tn_node(c, this)) { 465 if (check_tn_node(c, this)) {
486 dbg_readinode("node ver %x, 0x%x-0x%x failed CRC\n", 466 dbg_readinode("node ver %d, 0x%x-0x%x failed CRC\n",
487 this->version, this->fn->ofs, 467 this->version, this->fn->ofs,
488 this->fn->ofs+this->fn->size); 468 this->fn->ofs+this->fn->size);
489 jffs2_kill_tn(c, this); 469 jffs2_kill_tn(c, this);
@@ -496,7 +476,7 @@ static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c,
496 high_ver = this->version; 476 high_ver = this->version;
497 rii->latest_ref = this->fn->raw; 477 rii->latest_ref = this->fn->raw;
498 } 478 }
499 dbg_readinode("Add %p (v %x, 0x%x-0x%x, ov %d) to fragtree\n", 479 dbg_readinode("Add %p (v %d, 0x%x-0x%x, ov %d) to fragtree\n",
500 this, this->version, this->fn->ofs, 480 this, this->version, this->fn->ofs,
501 this->fn->ofs+this->fn->size, this->overlapped); 481 this->fn->ofs+this->fn->size, this->overlapped);
502 482
@@ -850,7 +830,7 @@ static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
850 return ret; 830 return ret;
851 } 831 }
852#ifdef JFFS2_DBG_READINODE_MESSAGES 832#ifdef JFFS2_DBG_READINODE_MESSAGES
853 dbg_readinode("After adding ver %d:\n", tn->version); 833 dbg_readinode("After adding ver %d:\n", je32_to_cpu(rd->version));
854 tn = tn_first(&rii->tn_root); 834 tn = tn_first(&rii->tn_root);
855 while (tn) { 835 while (tn) {
856 dbg_readinode("%p: v %d r 0x%x-0x%x ov %d\n", 836 dbg_readinode("%p: v %d r 0x%x-0x%x ov %d\n",
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index c556e85a565c..91d1d0f1c66c 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -637,7 +637,10 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
637 637
638 memset(c->wbuf,0xff,c->wbuf_pagesize); 638 memset(c->wbuf,0xff,c->wbuf_pagesize);
639 /* adjust write buffer offset, else we get a non contiguous write bug */ 639 /* adjust write buffer offset, else we get a non contiguous write bug */
640 c->wbuf_ofs += c->wbuf_pagesize; 640 if (SECTOR_ADDR(c->wbuf_ofs) == SECTOR_ADDR(c->wbuf_ofs+c->wbuf_pagesize))
641 c->wbuf_ofs += c->wbuf_pagesize;
642 else
643 c->wbuf_ofs = 0xffffffff;
641 c->wbuf_len = 0; 644 c->wbuf_len = 0;
642 return 0; 645 return 0;
643} 646}
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 82b0544bd76d..f3b1ebb22280 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -1507,7 +1507,7 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
1507 if (l2nb < budmin) { 1507 if (l2nb < budmin) {
1508 1508
1509 /* search the lower level dmap control pages to get 1509 /* search the lower level dmap control pages to get
1510 * the starting block number of the the dmap that 1510 * the starting block number of the dmap that
1511 * contains or starts off the free space. 1511 * contains or starts off the free space.
1512 */ 1512 */
1513 if ((rc = 1513 if ((rc =
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index c465607be991..c6530227cda6 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -386,7 +386,7 @@ int diRead(struct inode *ip)
386 return -EIO; 386 return -EIO;
387 } 387 }
388 388
389 /* locate the the disk inode requested */ 389 /* locate the disk inode requested */
390 dp = (struct dinode *) mp->data; 390 dp = (struct dinode *) mp->data;
391 dp += rel_inode; 391 dp += rel_inode;
392 392
@@ -1407,7 +1407,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
1407 inum = pip->i_ino + 1; 1407 inum = pip->i_ino + 1;
1408 ino = inum & (INOSPERIAG - 1); 1408 ino = inum & (INOSPERIAG - 1);
1409 1409
1410 /* back off the the hint if it is outside of the iag */ 1410 /* back off the hint if it is outside of the iag */
1411 if (ino == 0) 1411 if (ino == 0)
1412 inum = pip->i_ino; 1412 inum = pip->i_ino;
1413 1413
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 6a3f00dc8c83..44a2f33cb98d 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1960,7 +1960,7 @@ static void lbmfree(struct lbuf * bp)
1960/* 1960/*
1961 * NAME: lbmRedrive 1961 * NAME: lbmRedrive
1962 * 1962 *
1963 * FUNCTION: add a log buffer to the the log redrive list 1963 * FUNCTION: add a log buffer to the log redrive list
1964 * 1964 *
1965 * PARAMETER: 1965 * PARAMETER:
1966 * bp - log buffer 1966 * bp - log buffer
diff --git a/fs/libfs.c b/fs/libfs.c
index 1247ee90253a..5294de1f40c4 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -159,7 +159,10 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
159 continue; 159 continue;
160 160
161 spin_unlock(&dcache_lock); 161 spin_unlock(&dcache_lock);
162 if (filldir(dirent, next->d_name.name, next->d_name.len, filp->f_pos, next->d_inode->i_ino, dt_type(next->d_inode)) < 0) 162 if (filldir(dirent, next->d_name.name,
163 next->d_name.len, filp->f_pos,
164 next->d_inode->i_ino,
165 dt_type(next->d_inode)) < 0)
163 return 0; 166 return 0;
164 spin_lock(&dcache_lock); 167 spin_lock(&dcache_lock);
165 /* next is still alive */ 168 /* next is still alive */
diff --git a/fs/mpage.c b/fs/mpage.c
index fa2441f57b41..0fb914fc2ee0 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -284,11 +284,9 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
284 } 284 }
285 285
286 if (first_hole != blocks_per_page) { 286 if (first_hole != blocks_per_page) {
287 char *kaddr = kmap_atomic(page, KM_USER0); 287 zero_user_page(page, first_hole << blkbits,
288 memset(kaddr + (first_hole << blkbits), 0, 288 PAGE_CACHE_SIZE - (first_hole << blkbits),
289 PAGE_CACHE_SIZE - (first_hole << blkbits)); 289 KM_USER0);
290 flush_dcache_page(page);
291 kunmap_atomic(kaddr, KM_USER0);
292 if (first_hole == 0) { 290 if (first_hole == 0) {
293 SetPageUptodate(page); 291 SetPageUptodate(page);
294 unlock_page(page); 292 unlock_page(page);
@@ -576,14 +574,11 @@ page_is_mapped:
576 * written out to the file." 574 * written out to the file."
577 */ 575 */
578 unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); 576 unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
579 char *kaddr;
580 577
581 if (page->index > end_index || !offset) 578 if (page->index > end_index || !offset)
582 goto confused; 579 goto confused;
583 kaddr = kmap_atomic(page, KM_USER0); 580 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
584 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); 581 KM_USER0);
585 flush_dcache_page(page);
586 kunmap_atomic(kaddr, KM_USER0);
587 } 582 }
588 583
589 /* 584 /*
diff --git a/fs/namei.c b/fs/namei.c
index 856b2f5da51d..b3780e3fc88e 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1152,14 +1152,12 @@ static int fastcall do_path_lookup(int dfd, const char *name,
1152 1152
1153 fput_light(file, fput_needed); 1153 fput_light(file, fput_needed);
1154 } 1154 }
1155 current->total_link_count = 0; 1155
1156 retval = link_path_walk(name, nd); 1156 retval = path_walk(name, nd);
1157out: 1157out:
1158 if (likely(retval == 0)) { 1158 if (unlikely(!retval && !audit_dummy_context() && nd->dentry &&
1159 if (unlikely(!audit_dummy_context() && nd && nd->dentry &&
1160 nd->dentry->d_inode)) 1159 nd->dentry->d_inode))
1161 audit_inode(name, nd->dentry->d_inode); 1160 audit_inode(name, nd->dentry->d_inode);
1162 }
1163out_fail: 1161out_fail:
1164 return retval; 1162 return retval;
1165 1163
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 625d8e5fb39d..3df428816559 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -38,7 +38,6 @@
38#include "delegation.h" 38#include "delegation.h"
39#include "iostat.h" 39#include "iostat.h"
40 40
41#define NFS_PARANOIA 1
42/* #define NFS_DEBUG_VERBOSE 1 */ 41/* #define NFS_DEBUG_VERBOSE 1 */
43 42
44static int nfs_opendir(struct inode *, struct file *); 43static int nfs_opendir(struct inode *, struct file *);
@@ -650,12 +649,15 @@ int nfs_fsync_dir(struct file *filp, struct dentry *dentry, int datasync)
650 */ 649 */
651static int nfs_check_verifier(struct inode *dir, struct dentry *dentry) 650static int nfs_check_verifier(struct inode *dir, struct dentry *dentry)
652{ 651{
652 unsigned long verf;
653
653 if (IS_ROOT(dentry)) 654 if (IS_ROOT(dentry))
654 return 1; 655 return 1;
655 if ((NFS_I(dir)->cache_validity & NFS_INO_INVALID_ATTR) != 0 656 verf = (unsigned long)dentry->d_fsdata;
656 || nfs_attribute_timeout(dir)) 657 if (nfs_caches_unstable(dir)
658 || verf != NFS_I(dir)->cache_change_attribute)
657 return 0; 659 return 0;
658 return nfs_verify_change_attribute(dir, (unsigned long)dentry->d_fsdata); 660 return 1;
659} 661}
660 662
661static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf) 663static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
@@ -665,8 +667,7 @@ static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
665 667
666static void nfs_refresh_verifier(struct dentry * dentry, unsigned long verf) 668static void nfs_refresh_verifier(struct dentry * dentry, unsigned long verf)
667{ 669{
668 if (time_after(verf, (unsigned long)dentry->d_fsdata)) 670 nfs_set_verifier(dentry, verf);
669 nfs_set_verifier(dentry, verf);
670} 671}
671 672
672/* 673/*
@@ -765,6 +766,10 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
765 nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE); 766 nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
766 inode = dentry->d_inode; 767 inode = dentry->d_inode;
767 768
769 /* Revalidate parent directory attribute cache */
770 if (nfs_revalidate_inode(NFS_SERVER(dir), dir) < 0)
771 goto out_zap_parent;
772
768 if (!inode) { 773 if (!inode) {
769 if (nfs_neg_need_reval(dir, dentry, nd)) 774 if (nfs_neg_need_reval(dir, dentry, nd))
770 goto out_bad; 775 goto out_bad;
@@ -778,10 +783,6 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
778 goto out_bad; 783 goto out_bad;
779 } 784 }
780 785
781 /* Revalidate parent directory attribute cache */
782 if (nfs_revalidate_inode(NFS_SERVER(dir), dir) < 0)
783 goto out_zap_parent;
784
785 /* Force a full look up iff the parent directory has changed */ 786 /* Force a full look up iff the parent directory has changed */
786 if (nfs_check_verifier(dir, dentry)) { 787 if (nfs_check_verifier(dir, dentry)) {
787 if (nfs_lookup_verify_inode(inode, nd)) 788 if (nfs_lookup_verify_inode(inode, nd))
@@ -1360,11 +1361,6 @@ static int nfs_sillyrename(struct inode *dir, struct dentry *dentry)
1360 atomic_read(&dentry->d_count)); 1361 atomic_read(&dentry->d_count));
1361 nfs_inc_stats(dir, NFSIOS_SILLYRENAME); 1362 nfs_inc_stats(dir, NFSIOS_SILLYRENAME);
1362 1363
1363#ifdef NFS_PARANOIA
1364if (!dentry->d_inode)
1365printk("NFS: silly-renaming %s/%s, negative dentry??\n",
1366dentry->d_parent->d_name.name, dentry->d_name.name);
1367#endif
1368 /* 1364 /*
1369 * We don't allow a dentry to be silly-renamed twice. 1365 * We don't allow a dentry to be silly-renamed twice.
1370 */ 1366 */
@@ -1681,16 +1677,9 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1681 new_inode = NULL; 1677 new_inode = NULL;
1682 /* instantiate the replacement target */ 1678 /* instantiate the replacement target */
1683 d_instantiate(new_dentry, NULL); 1679 d_instantiate(new_dentry, NULL);
1684 } else if (atomic_read(&new_dentry->d_count) > 1) { 1680 } else if (atomic_read(&new_dentry->d_count) > 1)
1685 /* dentry still busy? */ 1681 /* dentry still busy? */
1686#ifdef NFS_PARANOIA
1687 printk("nfs_rename: target %s/%s busy, d_count=%d\n",
1688 new_dentry->d_parent->d_name.name,
1689 new_dentry->d_name.name,
1690 atomic_read(&new_dentry->d_count));
1691#endif
1692 goto out; 1682 goto out;
1693 }
1694 } else 1683 } else
1695 drop_nlink(new_inode); 1684 drop_nlink(new_inode);
1696 1685
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 234778576f09..d1cbf0a0fbb2 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -41,7 +41,6 @@
41#include "internal.h" 41#include "internal.h"
42 42
43#define NFSDBG_FACILITY NFSDBG_CLIENT 43#define NFSDBG_FACILITY NFSDBG_CLIENT
44#define NFS_PARANOIA 1
45 44
46/* 45/*
47 * get an NFS2/NFS3 root dentry from the root filehandle 46 * get an NFS2/NFS3 root dentry from the root filehandle
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 9d4a6b2d1996..d11eb055265c 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -272,7 +272,7 @@ nfs_idmap_id(struct idmap *idmap, struct idmap_hashtable *h,
272 set_current_state(TASK_UNINTERRUPTIBLE); 272 set_current_state(TASK_UNINTERRUPTIBLE);
273 mutex_unlock(&idmap->idmap_im_lock); 273 mutex_unlock(&idmap->idmap_im_lock);
274 schedule(); 274 schedule();
275 current->state = TASK_RUNNING; 275 __set_current_state(TASK_RUNNING);
276 remove_wait_queue(&idmap->idmap_wq, &wq); 276 remove_wait_queue(&idmap->idmap_wq, &wq);
277 mutex_lock(&idmap->idmap_im_lock); 277 mutex_lock(&idmap->idmap_im_lock);
278 278
@@ -333,7 +333,7 @@ nfs_idmap_name(struct idmap *idmap, struct idmap_hashtable *h,
333 set_current_state(TASK_UNINTERRUPTIBLE); 333 set_current_state(TASK_UNINTERRUPTIBLE);
334 mutex_unlock(&idmap->idmap_im_lock); 334 mutex_unlock(&idmap->idmap_im_lock);
335 schedule(); 335 schedule();
336 current->state = TASK_RUNNING; 336 __set_current_state(TASK_RUNNING);
337 remove_wait_queue(&idmap->idmap_wq, &wq); 337 remove_wait_queue(&idmap->idmap_wq, &wq);
338 mutex_lock(&idmap->idmap_im_lock); 338 mutex_lock(&idmap->idmap_im_lock);
339 339
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 1e9a915d1fea..2a3fd9573207 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -48,7 +48,6 @@
48#include "internal.h" 48#include "internal.h"
49 49
50#define NFSDBG_FACILITY NFSDBG_VFS 50#define NFSDBG_FACILITY NFSDBG_VFS
51#define NFS_PARANOIA 1
52 51
53static void nfs_invalidate_inode(struct inode *); 52static void nfs_invalidate_inode(struct inode *);
54static int nfs_update_inode(struct inode *, struct nfs_fattr *); 53static int nfs_update_inode(struct inode *, struct nfs_fattr *);
@@ -1075,10 +1074,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1075 /* 1074 /*
1076 * Big trouble! The inode has become a different object. 1075 * Big trouble! The inode has become a different object.
1077 */ 1076 */
1078#ifdef NFS_PARANOIA
1079 printk(KERN_DEBUG "%s: inode %ld mode changed, %07o to %07o\n", 1077 printk(KERN_DEBUG "%s: inode %ld mode changed, %07o to %07o\n",
1080 __FUNCTION__, inode->i_ino, inode->i_mode, fattr->mode); 1078 __FUNCTION__, inode->i_ino, inode->i_mode, fattr->mode);
1081#endif
1082 out_err: 1079 out_err:
1083 /* 1080 /*
1084 * No need to worry about unhashing the dentry, as the 1081 * No need to worry about unhashing the dentry, as the
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index abd9f8b48943..cd3ca7b5d3db 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -26,7 +26,6 @@
26#include "internal.h" 26#include "internal.h"
27 27
28#define NFSDBG_FACILITY NFSDBG_XDR 28#define NFSDBG_FACILITY NFSDBG_XDR
29/* #define NFS_PARANOIA 1 */
30 29
31/* Mapping from NFS error code to "errno" error code. */ 30/* Mapping from NFS error code to "errno" error code. */
32#define errno_NFSERR_IO EIO 31#define errno_NFSERR_IO EIO
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index b8c28f2380a5..938f37166788 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -224,7 +224,8 @@ static int nfs4_stat_to_errno(int);
224 encode_getattr_maxsz) 224 encode_getattr_maxsz)
225#define NFS4_dec_setattr_sz (compound_decode_hdr_maxsz + \ 225#define NFS4_dec_setattr_sz (compound_decode_hdr_maxsz + \
226 decode_putfh_maxsz + \ 226 decode_putfh_maxsz + \
227 op_decode_hdr_maxsz + 3) 227 op_decode_hdr_maxsz + 3 + \
228 nfs4_fattr_maxsz)
228#define NFS4_enc_fsinfo_sz (compound_encode_hdr_maxsz + \ 229#define NFS4_enc_fsinfo_sz (compound_encode_hdr_maxsz + \
229 encode_putfh_maxsz + \ 230 encode_putfh_maxsz + \
230 encode_fsinfo_maxsz) 231 encode_fsinfo_maxsz)
@@ -2079,9 +2080,11 @@ out:
2079 2080
2080#define READ_BUF(nbytes) do { \ 2081#define READ_BUF(nbytes) do { \
2081 p = xdr_inline_decode(xdr, nbytes); \ 2082 p = xdr_inline_decode(xdr, nbytes); \
2082 if (!p) { \ 2083 if (unlikely(!p)) { \
2083 printk(KERN_WARNING "%s: reply buffer overflowed in line %d.", \ 2084 printk(KERN_INFO "%s: prematurely hit end of receive" \
2084 __FUNCTION__, __LINE__); \ 2085 " buffer\n", __FUNCTION__); \
2086 printk(KERN_INFO "%s: xdr->p=%p, bytes=%u, xdr->end=%p\n", \
2087 __FUNCTION__, xdr->p, nbytes, xdr->end); \
2085 return -EIO; \ 2088 return -EIO; \
2086 } \ 2089 } \
2087} while (0) 2090} while (0)
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 388950118f59..e12054c86d0d 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -20,8 +20,6 @@
20 20
21#include "internal.h" 21#include "internal.h"
22 22
23#define NFS_PARANOIA 1
24
25static struct kmem_cache *nfs_page_cachep; 23static struct kmem_cache *nfs_page_cachep;
26 24
27static inline struct nfs_page * 25static inline struct nfs_page *
@@ -167,11 +165,6 @@ nfs_release_request(struct nfs_page *req)
167 if (!atomic_dec_and_test(&req->wb_count)) 165 if (!atomic_dec_and_test(&req->wb_count))
168 return; 166 return;
169 167
170#ifdef NFS_PARANOIA
171 BUG_ON (!list_empty(&req->wb_list));
172 BUG_ON (NFS_WBACK_BUSY(req));
173#endif
174
175 /* Release struct file or cached credential */ 168 /* Release struct file or cached credential */
176 nfs_clear_request(req); 169 nfs_clear_request(req);
177 put_nfs_open_context(req->wb_context); 170 put_nfs_open_context(req->wb_context);
diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
index ce341dc76d5e..9b118ee20193 100644
--- a/fs/nfsd/Makefile
+++ b/fs/nfsd/Makefile
@@ -11,4 +11,3 @@ nfsd-$(CONFIG_NFSD_V3) += nfs3proc.o nfs3xdr.o
11nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o 11nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o
12nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \ 12nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \
13 nfs4acl.o nfs4callback.o nfs4recover.o 13 nfs4acl.o nfs4callback.o nfs4recover.o
14nfsd-objs := $(nfsd-y)
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 6f24768272a1..79bd03b8bbf8 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -469,6 +469,13 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
469 nd.dentry = NULL; 469 nd.dentry = NULL;
470 exp.ex_path = NULL; 470 exp.ex_path = NULL;
471 471
472 /* fs locations */
473 exp.ex_fslocs.locations = NULL;
474 exp.ex_fslocs.locations_count = 0;
475 exp.ex_fslocs.migrated = 0;
476
477 exp.ex_uuid = NULL;
478
472 if (mesg[mlen-1] != '\n') 479 if (mesg[mlen-1] != '\n')
473 return -EINVAL; 480 return -EINVAL;
474 mesg[mlen-1] = 0; 481 mesg[mlen-1] = 0;
@@ -509,13 +516,6 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
509 if (exp.h.expiry_time == 0) 516 if (exp.h.expiry_time == 0)
510 goto out; 517 goto out;
511 518
512 /* fs locations */
513 exp.ex_fslocs.locations = NULL;
514 exp.ex_fslocs.locations_count = 0;
515 exp.ex_fslocs.migrated = 0;
516
517 exp.ex_uuid = NULL;
518
519 /* flags */ 519 /* flags */
520 err = get_int(&mesg, &an_int); 520 err = get_int(&mesg, &an_int);
521 if (err == -ENOENT) 521 if (err == -ENOENT)
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 7f5bad0393b1..eac82830bfd7 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -177,7 +177,7 @@ nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp,
177 if (max_blocksize < resp->count) 177 if (max_blocksize < resp->count)
178 resp->count = max_blocksize; 178 resp->count = max_blocksize;
179 179
180 svc_reserve(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4); 180 svc_reserve_auth(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4);
181 181
182 fh_copy(&resp->fh, &argp->fh); 182 fh_copy(&resp->fh, &argp->fh);
183 nfserr = nfsd_read(rqstp, &resp->fh, NULL, 183 nfserr = nfsd_read(rqstp, &resp->fh, NULL,
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 7e4bb0af24d7..10f6e7dcf633 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -239,7 +239,7 @@ static __be32 *
239encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) 239encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
240{ 240{
241 struct dentry *dentry = fhp->fh_dentry; 241 struct dentry *dentry = fhp->fh_dentry;
242 if (dentry && dentry->d_inode != NULL) { 242 if (dentry && dentry->d_inode) {
243 int err; 243 int err;
244 struct kstat stat; 244 struct kstat stat;
245 245
@@ -300,9 +300,9 @@ int
300nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p, 300nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p,
301 struct nfsd3_sattrargs *args) 301 struct nfsd3_sattrargs *args)
302{ 302{
303 if (!(p = decode_fh(p, &args->fh)) 303 if (!(p = decode_fh(p, &args->fh)))
304 || !(p = decode_sattr3(p, &args->attrs)))
305 return 0; 304 return 0;
305 p = decode_sattr3(p, &args->attrs);
306 306
307 if ((args->check_guard = ntohl(*p++)) != 0) { 307 if ((args->check_guard = ntohl(*p++)) != 0) {
308 struct timespec time; 308 struct timespec time;
@@ -343,9 +343,9 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
343 int v,pn; 343 int v,pn;
344 u32 max_blocksize = svc_max_payload(rqstp); 344 u32 max_blocksize = svc_max_payload(rqstp);
345 345
346 if (!(p = decode_fh(p, &args->fh)) 346 if (!(p = decode_fh(p, &args->fh)))
347 || !(p = xdr_decode_hyper(p, &args->offset)))
348 return 0; 347 return 0;
348 p = xdr_decode_hyper(p, &args->offset);
349 349
350 len = args->count = ntohl(*p++); 350 len = args->count = ntohl(*p++);
351 351
@@ -369,28 +369,44 @@ int
369nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, 369nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
370 struct nfsd3_writeargs *args) 370 struct nfsd3_writeargs *args)
371{ 371{
372 unsigned int len, v, hdr; 372 unsigned int len, v, hdr, dlen;
373 u32 max_blocksize = svc_max_payload(rqstp); 373 u32 max_blocksize = svc_max_payload(rqstp);
374 374
375 if (!(p = decode_fh(p, &args->fh)) 375 if (!(p = decode_fh(p, &args->fh)))
376 || !(p = xdr_decode_hyper(p, &args->offset)))
377 return 0; 376 return 0;
377 p = xdr_decode_hyper(p, &args->offset);
378 378
379 args->count = ntohl(*p++); 379 args->count = ntohl(*p++);
380 args->stable = ntohl(*p++); 380 args->stable = ntohl(*p++);
381 len = args->len = ntohl(*p++); 381 len = args->len = ntohl(*p++);
382 /*
383 * The count must equal the amount of data passed.
384 */
385 if (args->count != args->len)
386 return 0;
382 387
388 /*
389 * Check to make sure that we got the right number of
390 * bytes.
391 */
383 hdr = (void*)p - rqstp->rq_arg.head[0].iov_base; 392 hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
384 if (rqstp->rq_arg.len < hdr || 393 dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
385 rqstp->rq_arg.len - hdr < len) 394 - hdr;
395 /*
396 * Round the length of the data which was specified up to
397 * the next multiple of XDR units and then compare that
398 * against the length which was actually received.
399 */
400 if (dlen != XDR_QUADLEN(len)*4)
386 return 0; 401 return 0;
387 402
403 if (args->count > max_blocksize) {
404 args->count = max_blocksize;
405 len = args->len = max_blocksize;
406 }
388 rqstp->rq_vec[0].iov_base = (void*)p; 407 rqstp->rq_vec[0].iov_base = (void*)p;
389 rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr; 408 rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
390 409 v = 0;
391 if (len > max_blocksize)
392 len = max_blocksize;
393 v= 0;
394 while (len > rqstp->rq_vec[v].iov_len) { 410 while (len > rqstp->rq_vec[v].iov_len) {
395 len -= rqstp->rq_vec[v].iov_len; 411 len -= rqstp->rq_vec[v].iov_len;
396 v++; 412 v++;
@@ -398,9 +414,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
398 rqstp->rq_vec[v].iov_len = PAGE_SIZE; 414 rqstp->rq_vec[v].iov_len = PAGE_SIZE;
399 } 415 }
400 rqstp->rq_vec[v].iov_len = len; 416 rqstp->rq_vec[v].iov_len = len;
401 args->vlen = v+1; 417 args->vlen = v + 1;
402 418 return 1;
403 return args->count == args->len && rqstp->rq_vec[0].iov_len > 0;
404} 419}
405 420
406int 421int
@@ -414,8 +429,7 @@ nfs3svc_decode_createargs(struct svc_rqst *rqstp, __be32 *p,
414 switch (args->createmode = ntohl(*p++)) { 429 switch (args->createmode = ntohl(*p++)) {
415 case NFS3_CREATE_UNCHECKED: 430 case NFS3_CREATE_UNCHECKED:
416 case NFS3_CREATE_GUARDED: 431 case NFS3_CREATE_GUARDED:
417 if (!(p = decode_sattr3(p, &args->attrs))) 432 p = decode_sattr3(p, &args->attrs);
418 return 0;
419 break; 433 break;
420 case NFS3_CREATE_EXCLUSIVE: 434 case NFS3_CREATE_EXCLUSIVE:
421 args->verf = p; 435 args->verf = p;
@@ -431,10 +445,10 @@ int
431nfs3svc_decode_mkdirargs(struct svc_rqst *rqstp, __be32 *p, 445nfs3svc_decode_mkdirargs(struct svc_rqst *rqstp, __be32 *p,
432 struct nfsd3_createargs *args) 446 struct nfsd3_createargs *args)
433{ 447{
434 if (!(p = decode_fh(p, &args->fh)) 448 if (!(p = decode_fh(p, &args->fh)) ||
435 || !(p = decode_filename(p, &args->name, &args->len)) 449 !(p = decode_filename(p, &args->name, &args->len)))
436 || !(p = decode_sattr3(p, &args->attrs)))
437 return 0; 450 return 0;
451 p = decode_sattr3(p, &args->attrs);
438 452
439 return xdr_argsize_check(rqstp, p); 453 return xdr_argsize_check(rqstp, p);
440} 454}
@@ -448,11 +462,12 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
448 char *old, *new; 462 char *old, *new;
449 struct kvec *vec; 463 struct kvec *vec;
450 464
451 if (!(p = decode_fh(p, &args->ffh)) 465 if (!(p = decode_fh(p, &args->ffh)) ||
452 || !(p = decode_filename(p, &args->fname, &args->flen)) 466 !(p = decode_filename(p, &args->fname, &args->flen))
453 || !(p = decode_sattr3(p, &args->attrs))
454 ) 467 )
455 return 0; 468 return 0;
469 p = decode_sattr3(p, &args->attrs);
470
456 /* now decode the pathname, which might be larger than the first page. 471 /* now decode the pathname, which might be larger than the first page.
457 * As we have to check for nul's anyway, we copy it into a new page 472 * As we have to check for nul's anyway, we copy it into a new page
458 * This page appears in the rq_res.pages list, but as pages_len is always 473 * This page appears in the rq_res.pages list, but as pages_len is always
@@ -502,10 +517,8 @@ nfs3svc_decode_mknodargs(struct svc_rqst *rqstp, __be32 *p,
502 args->ftype = ntohl(*p++); 517 args->ftype = ntohl(*p++);
503 518
504 if (args->ftype == NF3BLK || args->ftype == NF3CHR 519 if (args->ftype == NF3BLK || args->ftype == NF3CHR
505 || args->ftype == NF3SOCK || args->ftype == NF3FIFO) { 520 || args->ftype == NF3SOCK || args->ftype == NF3FIFO)
506 if (!(p = decode_sattr3(p, &args->attrs))) 521 p = decode_sattr3(p, &args->attrs);
507 return 0;
508 }
509 522
510 if (args->ftype == NF3BLK || args->ftype == NF3CHR) { 523 if (args->ftype == NF3BLK || args->ftype == NF3CHR) {
511 args->major = ntohl(*p++); 524 args->major = ntohl(*p++);
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index 673a53c014a3..cc3b7badd486 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -137,7 +137,6 @@ struct ace_container {
137static short ace2type(struct nfs4_ace *); 137static short ace2type(struct nfs4_ace *);
138static void _posix_to_nfsv4_one(struct posix_acl *, struct nfs4_acl *, 138static void _posix_to_nfsv4_one(struct posix_acl *, struct nfs4_acl *,
139 unsigned int); 139 unsigned int);
140void nfs4_acl_add_ace(struct nfs4_acl *, u32, u32, u32, int, uid_t);
141 140
142struct nfs4_acl * 141struct nfs4_acl *
143nfs4_acl_posix_to_nfsv4(struct posix_acl *pacl, struct posix_acl *dpacl, 142nfs4_acl_posix_to_nfsv4(struct posix_acl *pacl, struct posix_acl *dpacl,
@@ -785,21 +784,6 @@ nfs4_acl_new(int n)
785 return acl; 784 return acl;
786} 785}
787 786
788void
789nfs4_acl_add_ace(struct nfs4_acl *acl, u32 type, u32 flag, u32 access_mask,
790 int whotype, uid_t who)
791{
792 struct nfs4_ace *ace = acl->aces + acl->naces;
793
794 ace->type = type;
795 ace->flag = flag;
796 ace->access_mask = access_mask;
797 ace->whotype = whotype;
798 ace->who = who;
799
800 acl->naces++;
801}
802
803static struct { 787static struct {
804 char *string; 788 char *string;
805 int stringlen; 789 int stringlen;
@@ -851,6 +835,5 @@ nfs4_acl_write_who(int who, char *p)
851} 835}
852 836
853EXPORT_SYMBOL(nfs4_acl_new); 837EXPORT_SYMBOL(nfs4_acl_new);
854EXPORT_SYMBOL(nfs4_acl_add_ace);
855EXPORT_SYMBOL(nfs4_acl_get_whotype); 838EXPORT_SYMBOL(nfs4_acl_get_whotype);
856EXPORT_SYMBOL(nfs4_acl_write_who); 839EXPORT_SYMBOL(nfs4_acl_write_who);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 678f3be88ac0..3cc8ce422ab1 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1326,8 +1326,6 @@ do_recall(void *__dp)
1326{ 1326{
1327 struct nfs4_delegation *dp = __dp; 1327 struct nfs4_delegation *dp = __dp;
1328 1328
1329 daemonize("nfsv4-recall");
1330
1331 nfsd4_cb_recall(dp); 1329 nfsd4_cb_recall(dp);
1332 return 0; 1330 return 0;
1333} 1331}
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 739dd3c5c3b2..6ca2d24fc216 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -323,7 +323,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
323 * 323 *
324 */ 324 */
325 325
326 u8 version = 1; 326 u8 version;
327 u8 fsid_type = 0; 327 u8 fsid_type = 0;
328 struct inode * inode = dentry->d_inode; 328 struct inode * inode = dentry->d_inode;
329 struct dentry *parent = dentry->d_parent; 329 struct dentry *parent = dentry->d_parent;
@@ -341,15 +341,59 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
341 * the reference filehandle (if it is in the same export) 341 * the reference filehandle (if it is in the same export)
342 * or the export options. 342 * or the export options.
343 */ 343 */
344 retry:
345 version = 1;
344 if (ref_fh && ref_fh->fh_export == exp) { 346 if (ref_fh && ref_fh->fh_export == exp) {
345 version = ref_fh->fh_handle.fh_version; 347 version = ref_fh->fh_handle.fh_version;
346 if (version == 0xca) 348 fsid_type = ref_fh->fh_handle.fh_fsid_type;
349
350 if (ref_fh == fhp)
351 fh_put(ref_fh);
352 ref_fh = NULL;
353
354 switch (version) {
355 case 0xca:
347 fsid_type = FSID_DEV; 356 fsid_type = FSID_DEV;
348 else 357 break;
349 fsid_type = ref_fh->fh_handle.fh_fsid_type; 358 case 1:
350 /* We know this version/type works for this export 359 break;
351 * so there is no need for further checks. 360 default:
361 goto retry;
362 }
363
364 /* Need to check that this type works for this
365 * export point. As the fsid -> filesystem mapping
366 * was guided by user-space, there is no guarantee
367 * that the filesystem actually supports that fsid
368 * type. If it doesn't we loop around again without
369 * ref_fh set.
352 */ 370 */
371 switch(fsid_type) {
372 case FSID_DEV:
373 if (!old_valid_dev(ex_dev))
374 goto retry;
375 /* FALL THROUGH */
376 case FSID_MAJOR_MINOR:
377 case FSID_ENCODE_DEV:
378 if (!(exp->ex_dentry->d_inode->i_sb->s_type->fs_flags
379 & FS_REQUIRES_DEV))
380 goto retry;
381 break;
382 case FSID_NUM:
383 if (! (exp->ex_flags & NFSEXP_FSID))
384 goto retry;
385 break;
386 case FSID_UUID8:
387 case FSID_UUID16:
388 if (!root_export)
389 goto retry;
390 /* fall through */
391 case FSID_UUID4_INUM:
392 case FSID_UUID16_INUM:
393 if (exp->ex_uuid == NULL)
394 goto retry;
395 break;
396 }
353 } else if (exp->ex_uuid) { 397 } else if (exp->ex_uuid) {
354 if (fhp->fh_maxsize >= 64) { 398 if (fhp->fh_maxsize >= 64) {
355 if (root_export) 399 if (root_export)
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 5cc2eec981b8..b2c7147aa921 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -155,7 +155,7 @@ nfsd_proc_read(struct svc_rqst *rqstp, struct nfsd_readargs *argp,
155 argp->count); 155 argp->count);
156 argp->count = NFSSVC_MAXBLKSIZE_V2; 156 argp->count = NFSSVC_MAXBLKSIZE_V2;
157 } 157 }
158 svc_reserve(rqstp, (19<<2) + argp->count + 4); 158 svc_reserve_auth(rqstp, (19<<2) + argp->count + 4);
159 159
160 resp->count = argp->count; 160 resp->count = argp->count;
161 nfserr = nfsd_read(rqstp, fh_copy(&resp->fh, &argp->fh), NULL, 161 nfserr = nfsd_read(rqstp, fh_copy(&resp->fh, &argp->fh), NULL,
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 0c24b9e24fe8..cb3e7fadb772 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -231,9 +231,10 @@ int
231nfssvc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p, 231nfssvc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p,
232 struct nfsd_sattrargs *args) 232 struct nfsd_sattrargs *args)
233{ 233{
234 if (!(p = decode_fh(p, &args->fh)) 234 p = decode_fh(p, &args->fh);
235 || !(p = decode_sattr(p, &args->attrs))) 235 if (!p)
236 return 0; 236 return 0;
237 p = decode_sattr(p, &args->attrs);
237 238
238 return xdr_argsize_check(rqstp, p); 239 return xdr_argsize_check(rqstp, p);
239} 240}
@@ -284,8 +285,9 @@ int
284nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, 285nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
285 struct nfsd_writeargs *args) 286 struct nfsd_writeargs *args)
286{ 287{
287 unsigned int len; 288 unsigned int len, hdr, dlen;
288 int v; 289 int v;
290
289 if (!(p = decode_fh(p, &args->fh))) 291 if (!(p = decode_fh(p, &args->fh)))
290 return 0; 292 return 0;
291 293
@@ -293,11 +295,30 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
293 args->offset = ntohl(*p++); /* offset */ 295 args->offset = ntohl(*p++); /* offset */
294 p++; /* totalcount */ 296 p++; /* totalcount */
295 len = args->len = ntohl(*p++); 297 len = args->len = ntohl(*p++);
296 rqstp->rq_vec[0].iov_base = (void*)p; 298 /*
297 rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - 299 * The protocol specifies a maximum of 8192 bytes.
298 (((void*)p) - rqstp->rq_arg.head[0].iov_base); 300 */
299 if (len > NFSSVC_MAXBLKSIZE_V2) 301 if (len > NFSSVC_MAXBLKSIZE_V2)
300 len = NFSSVC_MAXBLKSIZE_V2; 302 return 0;
303
304 /*
305 * Check to make sure that we got the right number of
306 * bytes.
307 */
308 hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
309 dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
310 - hdr;
311
312 /*
313 * Round the length of the data which was specified up to
314 * the next multiple of XDR units and then compare that
315 * against the length which was actually received.
316 */
317 if (dlen != XDR_QUADLEN(len)*4)
318 return 0;
319
320 rqstp->rq_vec[0].iov_base = (void*)p;
321 rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
301 v = 0; 322 v = 0;
302 while (len > rqstp->rq_vec[v].iov_len) { 323 while (len > rqstp->rq_vec[v].iov_len) {
303 len -= rqstp->rq_vec[v].iov_len; 324 len -= rqstp->rq_vec[v].iov_len;
@@ -306,18 +327,18 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
306 rqstp->rq_vec[v].iov_len = PAGE_SIZE; 327 rqstp->rq_vec[v].iov_len = PAGE_SIZE;
307 } 328 }
308 rqstp->rq_vec[v].iov_len = len; 329 rqstp->rq_vec[v].iov_len = len;
309 args->vlen = v+1; 330 args->vlen = v + 1;
310 return rqstp->rq_vec[0].iov_len > 0; 331 return 1;
311} 332}
312 333
313int 334int
314nfssvc_decode_createargs(struct svc_rqst *rqstp, __be32 *p, 335nfssvc_decode_createargs(struct svc_rqst *rqstp, __be32 *p,
315 struct nfsd_createargs *args) 336 struct nfsd_createargs *args)
316{ 337{
317 if (!(p = decode_fh(p, &args->fh)) 338 if ( !(p = decode_fh(p, &args->fh))
318 || !(p = decode_filename(p, &args->name, &args->len)) 339 || !(p = decode_filename(p, &args->name, &args->len)))
319 || !(p = decode_sattr(p, &args->attrs)))
320 return 0; 340 return 0;
341 p = decode_sattr(p, &args->attrs);
321 342
322 return xdr_argsize_check(rqstp, p); 343 return xdr_argsize_check(rqstp, p);
323} 344}
@@ -361,11 +382,11 @@ int
361nfssvc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p, 382nfssvc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
362 struct nfsd_symlinkargs *args) 383 struct nfsd_symlinkargs *args)
363{ 384{
364 if (!(p = decode_fh(p, &args->ffh)) 385 if ( !(p = decode_fh(p, &args->ffh))
365 || !(p = decode_filename(p, &args->fname, &args->flen)) 386 || !(p = decode_filename(p, &args->fname, &args->flen))
366 || !(p = decode_pathname(p, &args->tname, &args->tlen)) 387 || !(p = decode_pathname(p, &args->tname, &args->tlen)))
367 || !(p = decode_sattr(p, &args->attrs)))
368 return 0; 388 return 0;
389 p = decode_sattr(p, &args->attrs);
369 390
370 return xdr_argsize_check(rqstp, p); 391 return xdr_argsize_check(rqstp, p);
371} 392}
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index ab45db529c80..9e451a68580f 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -1059,20 +1059,12 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
1059 maping blocks, since there is none, so we just zero out remaining 1059 maping blocks, since there is none, so we just zero out remaining
1060 parts of first and last pages in write area (if needed) */ 1060 parts of first and last pages in write area (if needed) */
1061 if ((pos & ~((loff_t) PAGE_CACHE_SIZE - 1)) > inode->i_size) { 1061 if ((pos & ~((loff_t) PAGE_CACHE_SIZE - 1)) > inode->i_size) {
1062 if (from != 0) { /* First page needs to be partially zeroed */ 1062 if (from != 0) /* First page needs to be partially zeroed */
1063 char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0); 1063 zero_user_page(prepared_pages[0], 0, from, KM_USER0);
1064 memset(kaddr, 0, from); 1064
1065 kunmap_atomic(kaddr, KM_USER0); 1065 if (to != PAGE_CACHE_SIZE) /* Last page needs to be partially zeroed */
1066 flush_dcache_page(prepared_pages[0]); 1066 zero_user_page(prepared_pages[num_pages-1], to,
1067 } 1067 PAGE_CACHE_SIZE - to, KM_USER0);
1068 if (to != PAGE_CACHE_SIZE) { /* Last page needs to be partially zeroed */
1069 char *kaddr =
1070 kmap_atomic(prepared_pages[num_pages - 1],
1071 KM_USER0);
1072 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1073 kunmap_atomic(kaddr, KM_USER0);
1074 flush_dcache_page(prepared_pages[num_pages - 1]);
1075 }
1076 1068
1077 /* Since all blocks are new - use already calculated value */ 1069 /* Since all blocks are new - use already calculated value */
1078 return blocks; 1070 return blocks;
@@ -1199,13 +1191,9 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
1199 ll_rw_block(READ, 1, &bh); 1191 ll_rw_block(READ, 1, &bh);
1200 *wait_bh++ = bh; 1192 *wait_bh++ = bh;
1201 } else { /* Not mapped, zero it */ 1193 } else { /* Not mapped, zero it */
1202 char *kaddr = 1194 zero_user_page(prepared_pages[0],
1203 kmap_atomic(prepared_pages[0], 1195 block_start,
1204 KM_USER0); 1196 from - block_start, KM_USER0);
1205 memset(kaddr + block_start, 0,
1206 from - block_start);
1207 kunmap_atomic(kaddr, KM_USER0);
1208 flush_dcache_page(prepared_pages[0]);
1209 set_buffer_uptodate(bh); 1197 set_buffer_uptodate(bh);
1210 } 1198 }
1211 } 1199 }
@@ -1237,13 +1225,8 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
1237 ll_rw_block(READ, 1, &bh); 1225 ll_rw_block(READ, 1, &bh);
1238 *wait_bh++ = bh; 1226 *wait_bh++ = bh;
1239 } else { /* Not mapped, zero it */ 1227 } else { /* Not mapped, zero it */
1240 char *kaddr = 1228 zero_user_page(prepared_pages[num_pages-1],
1241 kmap_atomic(prepared_pages 1229 to, block_end - to, KM_USER0);
1242 [num_pages - 1],
1243 KM_USER0);
1244 memset(kaddr + to, 0, block_end - to);
1245 kunmap_atomic(kaddr, KM_USER0);
1246 flush_dcache_page(prepared_pages[num_pages - 1]);
1247 set_buffer_uptodate(bh); 1230 set_buffer_uptodate(bh);
1248 } 1231 }
1249 } 1232 }
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 9fcbfe316977..1272d11399fb 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2148,13 +2148,8 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps)
2148 length = offset & (blocksize - 1); 2148 length = offset & (blocksize - 1);
2149 /* if we are not on a block boundary */ 2149 /* if we are not on a block boundary */
2150 if (length) { 2150 if (length) {
2151 char *kaddr;
2152
2153 length = blocksize - length; 2151 length = blocksize - length;
2154 kaddr = kmap_atomic(page, KM_USER0); 2152 zero_user_page(page, offset, length, KM_USER0);
2155 memset(kaddr + offset, 0, length);
2156 flush_dcache_page(page);
2157 kunmap_atomic(kaddr, KM_USER0);
2158 if (buffer_mapped(bh) && bh->b_blocknr != 0) { 2153 if (buffer_mapped(bh) && bh->b_blocknr != 0) {
2159 mark_buffer_dirty(bh); 2154 mark_buffer_dirty(bh);
2160 } 2155 }
@@ -2370,7 +2365,6 @@ static int reiserfs_write_full_page(struct page *page,
2370 ** last byte in the file 2365 ** last byte in the file
2371 */ 2366 */
2372 if (page->index >= end_index) { 2367 if (page->index >= end_index) {
2373 char *kaddr;
2374 unsigned last_offset; 2368 unsigned last_offset;
2375 2369
2376 last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 2370 last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
@@ -2379,10 +2373,7 @@ static int reiserfs_write_full_page(struct page *page,
2379 unlock_page(page); 2373 unlock_page(page);
2380 return 0; 2374 return 0;
2381 } 2375 }
2382 kaddr = kmap_atomic(page, KM_USER0); 2376 zero_user_page(page, last_offset, PAGE_CACHE_SIZE - last_offset, KM_USER0);
2383 memset(kaddr + last_offset, 0, PAGE_CACHE_SIZE - last_offset);
2384 flush_dcache_page(page);
2385 kunmap_atomic(kaddr, KM_USER0);
2386 } 2377 }
2387 bh = head; 2378 bh = head;
2388 block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); 2379 block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index e073fd86cf60..f25086aeef5f 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1110,7 +1110,7 @@ static int flush_commit_list(struct super_block *s,
1110 if (!barrier) { 1110 if (!barrier) {
1111 /* If there was a write error in the journal - we can't commit 1111 /* If there was a write error in the journal - we can't commit
1112 * this transaction - it will be invalid and, if successful, 1112 * this transaction - it will be invalid and, if successful,
1113 * will just end up propogating the write error out to 1113 * will just end up propagating the write error out to
1114 * the file system. */ 1114 * the file system. */
1115 if (likely(!retval && !reiserfs_is_journal_aborted (journal))) { 1115 if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
1116 if (buffer_dirty(jl->j_commit_bh)) 1116 if (buffer_dirty(jl->j_commit_bh))
@@ -1125,7 +1125,7 @@ static int flush_commit_list(struct super_block *s,
1125 1125
1126 /* If there was a write error in the journal - we can't commit this 1126 /* If there was a write error in the journal - we can't commit this
1127 * transaction - it will be invalid and, if successful, will just end 1127 * transaction - it will be invalid and, if successful, will just end
1128 * up propogating the write error out to the filesystem. */ 1128 * up propagating the write error out to the filesystem. */
1129 if (unlikely(!buffer_uptodate(jl->j_commit_bh))) { 1129 if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {
1130#ifdef CONFIG_REISERFS_CHECK 1130#ifdef CONFIG_REISERFS_CHECK
1131 reiserfs_warning(s, "journal-615: buffer write failed"); 1131 reiserfs_warning(s, "journal-615: buffer write failed");
diff --git a/fs/select.c b/fs/select.c
index d86224154dec..a974082b0824 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(poll_initwait);
64 64
65static void free_poll_entry(struct poll_table_entry *entry) 65static void free_poll_entry(struct poll_table_entry *entry)
66{ 66{
67 remove_wait_queue(entry->wait_address,&entry->wait); 67 remove_wait_queue(entry->wait_address, &entry->wait);
68 fput(entry->filp); 68 fput(entry->filp);
69} 69}
70 70
@@ -128,7 +128,7 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
128 entry->filp = filp; 128 entry->filp = filp;
129 entry->wait_address = wait_address; 129 entry->wait_address = wait_address;
130 init_waitqueue_entry(&entry->wait, current); 130 init_waitqueue_entry(&entry->wait, current);
131 add_wait_queue(wait_address,&entry->wait); 131 add_wait_queue(wait_address, &entry->wait);
132} 132}
133 133
134#define FDS_IN(fds, n) (fds->in + n) 134#define FDS_IN(fds, n) (fds->in + n)
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 0e637adc2b87..b502c7197ec0 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -111,36 +111,6 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
111 return ret; 111 return ret;
112} 112}
113 113
114
115/**
116 * flush_read_buffer - push buffer to userspace.
117 * @buffer: data buffer for file.
118 * @buf: user-passed buffer.
119 * @count: number of bytes requested.
120 * @ppos: file position.
121 *
122 * Copy the buffer we filled in fill_read_buffer() to userspace.
123 * This is done at the reader's leisure, copying and advancing
124 * the amount they specify each time.
125 * This may be called continuously until the buffer is empty.
126 */
127static int flush_read_buffer(struct sysfs_buffer * buffer, char __user * buf,
128 size_t count, loff_t * ppos)
129{
130 int error;
131
132 if (*ppos > buffer->count)
133 return 0;
134
135 if (count > (buffer->count - *ppos))
136 count = buffer->count - *ppos;
137
138 error = copy_to_user(buf,buffer->page + *ppos,count);
139 if (!error)
140 *ppos += count;
141 return error ? -EFAULT : count;
142}
143
144/** 114/**
145 * sysfs_read_file - read an attribute. 115 * sysfs_read_file - read an attribute.
146 * @file: file pointer. 116 * @file: file pointer.
@@ -177,7 +147,8 @@ sysfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
177 } 147 }
178 pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n", 148 pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
179 __FUNCTION__, count, *ppos, buffer->page); 149 __FUNCTION__, count, *ppos, buffer->page);
180 retval = flush_read_buffer(buffer,buf,count,ppos); 150 retval = simple_read_from_buffer(buf, count, ppos, buffer->page,
151 buffer->count);
181out: 152out:
182 up(&buffer->sem); 153 up(&buffer->sem);
183 return retval; 154 return retval;
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 7775ddc0b3c6..e725ddd3de5f 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -809,7 +809,7 @@ xfs_inumbers(
809 xfs_buf_relse(agbp); 809 xfs_buf_relse(agbp);
810 agbp = NULL; 810 agbp = NULL;
811 /* 811 /*
812 * Move up the the last inode in the current 812 * Move up the last inode in the current
813 * chunk. The lookup_ge will always get 813 * chunk. The lookup_ge will always get
814 * us the first inode in the next chunk. 814 * us the first inode in the next chunk.
815 */ 815 */
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index f5aa3ef855fb..a96bde6df96d 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1734,11 +1734,13 @@ xfs_icsb_cpu_notify(
1734 per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu); 1734 per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu);
1735 switch (action) { 1735 switch (action) {
1736 case CPU_UP_PREPARE: 1736 case CPU_UP_PREPARE:
1737 case CPU_UP_PREPARE_FROZEN:
1737 /* Easy Case - initialize the area and locks, and 1738 /* Easy Case - initialize the area and locks, and
1738 * then rebalance when online does everything else for us. */ 1739 * then rebalance when online does everything else for us. */
1739 memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1740 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1740 break; 1741 break;
1741 case CPU_ONLINE: 1742 case CPU_ONLINE:
1743 case CPU_ONLINE_FROZEN:
1742 xfs_icsb_lock(mp); 1744 xfs_icsb_lock(mp);
1743 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0); 1745 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
1744 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0); 1746 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
@@ -1746,6 +1748,7 @@ xfs_icsb_cpu_notify(
1746 xfs_icsb_unlock(mp); 1748 xfs_icsb_unlock(mp);
1747 break; 1749 break;
1748 case CPU_DEAD: 1750 case CPU_DEAD:
1751 case CPU_DEAD_FROZEN:
1749 /* Disable all the counters, then fold the dead cpu's 1752 /* Disable all the counters, then fold the dead cpu's
1750 * count into the total on the global superblock and 1753 * count into the total on the global superblock and
1751 * re-enable the counters. */ 1754 * re-enable the counters. */
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index a1a1eca6be45..286e1d844f63 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -51,6 +51,7 @@ int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, in
51 51
52#else /* CONFIG_SMP */ 52#else /* CONFIG_SMP */
53 53
54#define hard_smp_processor_id() 0
54#define smp_call_function_on_cpu(func,info,retry,wait,cpu) ({ 0; }) 55#define smp_call_function_on_cpu(func,info,retry,wait,cpu) ({ 0; })
55 56
56#endif /* CONFIG_SMP */ 57#endif /* CONFIG_SMP */
diff --git a/include/asm-alpha/thread_info.h b/include/asm-alpha/thread_info.h
index eeb3bef91e11..f4defc2bd3fb 100644
--- a/include/asm-alpha/thread_info.h
+++ b/include/asm-alpha/thread_info.h
@@ -97,7 +97,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
97 1 << TIF_UAC_SIGBUS) 97 1 << TIF_UAC_SIGBUS)
98 98
99#define SET_UNALIGN_CTL(task,value) ({ \ 99#define SET_UNALIGN_CTL(task,value) ({ \
100 (task)->thread_info->flags = (((task)->thread_info->flags & \ 100 task_thread_info(task)->flags = ((task_thread_info(task)->flags & \
101 ~ALPHA_UAC_MASK) \ 101 ~ALPHA_UAC_MASK) \
102 | (((value) << ALPHA_UAC_SHIFT) & (1<<TIF_UAC_NOPRINT))\ 102 | (((value) << ALPHA_UAC_SHIFT) & (1<<TIF_UAC_NOPRINT))\
103 | (((value) << (ALPHA_UAC_SHIFT + 1)) & (1<<TIF_UAC_SIGBUS)) \ 103 | (((value) << (ALPHA_UAC_SHIFT + 1)) & (1<<TIF_UAC_SIGBUS)) \
@@ -105,11 +105,11 @@ register struct thread_info *__current_thread_info __asm__("$8");
105 0; }) 105 0; })
106 106
107#define GET_UNALIGN_CTL(task,value) ({ \ 107#define GET_UNALIGN_CTL(task,value) ({ \
108 put_user(((task)->thread_info->flags & (1 << TIF_UAC_NOPRINT)) \ 108 put_user((task_thread_info(task)->flags & (1 << TIF_UAC_NOPRINT))\
109 >> ALPHA_UAC_SHIFT \ 109 >> ALPHA_UAC_SHIFT \
110 | ((task)->thread_info->flags & (1 << TIF_UAC_SIGBUS)) \ 110 | (task_thread_info(task)->flags & (1 << TIF_UAC_SIGBUS))\
111 >> (ALPHA_UAC_SHIFT + 1) \ 111 >> (ALPHA_UAC_SHIFT + 1) \
112 | ((task)->thread_info->flags & (1 << TIF_UAC_NOFIX)) \ 112 | (task_thread_info(task)->flags & (1 << TIF_UAC_NOFIX))\
113 >> (ALPHA_UAC_SHIFT - 1), \ 113 >> (ALPHA_UAC_SHIFT - 1), \
114 (int __user *)(value)); \ 114 (int __user *)(value)); \
115 }) 115 })
diff --git a/include/asm-arm/arch-at91/at91rm9200.h b/include/asm-arm/arch-at91/at91rm9200.h
index a12ac8ab2ad0..802891a9cd81 100644
--- a/include/asm-arm/arch-at91/at91rm9200.h
+++ b/include/asm-arm/arch-at91/at91rm9200.h
@@ -107,185 +107,4 @@
107#define AT91RM9200_UHP_BASE 0x00300000 /* USB Host controller */ 107#define AT91RM9200_UHP_BASE 0x00300000 /* USB Host controller */
108 108
109 109
110#if 0
111/*
112 * PIO pin definitions (peripheral A/B multiplexing).
113 */
114#define AT91_PA0_MISO (1 << 0) /* A: SPI Master-In Slave-Out */
115#define AT91_PA0_PCK3 (1 << 0) /* B: PMC Programmable Clock Output 3 */
116#define AT91_PA1_MOSI (1 << 1) /* A: SPI Master-Out Slave-In */
117#define AT91_PA1_PCK0 (1 << 1) /* B: PMC Programmable Clock Output 0 */
118#define AT91_PA2_SPCK (1 << 2) /* A: SPI Serial Clock */
119#define AT91_PA2_IRQ4 (1 << 2) /* B: External Interrupt 4 */
120#define AT91_PA3_NPCS0 (1 << 3) /* A: SPI Peripheral Chip Select 0 */
121#define AT91_PA3_IRQ5 (1 << 3) /* B: External Interrupt 5 */
122#define AT91_PA4_NPCS1 (1 << 4) /* A: SPI Peripheral Chip Select 1 */
123#define AT91_PA4_PCK1 (1 << 4) /* B: PMC Programmable Clock Output 1 */
124#define AT91_PA5_NPCS2 (1 << 5) /* A: SPI Peripheral Chip Select 2 */
125#define AT91_PA5_TXD3 (1 << 5) /* B: USART Transmit Data 3 */
126#define AT91_PA6_NPCS3 (1 << 6) /* A: SPI Peripheral Chip Select 3 */
127#define AT91_PA6_RXD3 (1 << 6) /* B: USART Receive Data 3 */
128#define AT91_PA7_ETXCK_EREFCK (1 << 7) /* A: Ethernet Reference Clock / Transmit Clock */
129#define AT91_PA7_PCK2 (1 << 7) /* B: PMC Programmable Clock Output 2 */
130#define AT91_PA8_ETXEN (1 << 8) /* A: Ethernet Transmit Enable */
131#define AT91_PA8_MCCDB (1 << 8) /* B: MMC Multimedia Card B Command */
132#define AT91_PA9_ETX0 (1 << 9) /* A: Ethernet Transmit Data 0 */
133#define AT91_PA9_MCDB0 (1 << 9) /* B: MMC Multimedia Card B Data 0 */
134#define AT91_PA10_ETX1 (1 << 10) /* A: Ethernet Transmit Data 1 */
135#define AT91_PA10_MCDB1 (1 << 10) /* B: MMC Multimedia Card B Data 1 */
136#define AT91_PA11_ECRS_ECRSDV (1 << 11) /* A: Ethernet Carrier Sense / Data Valid */
137#define AT91_PA11_MCDB2 (1 << 11) /* B: MMC Multimedia Card B Data 2 */
138#define AT91_PA12_ERX0 (1 << 12) /* A: Ethernet Receive Data 0 */
139#define AT91_PA12_MCDB3 (1 << 12) /* B: MMC Multimedia Card B Data 3 */
140#define AT91_PA13_ERX1 (1 << 13) /* A: Ethernet Receive Data 1 */
141#define AT91_PA13_TCLK0 (1 << 13) /* B: TC External Clock Input 0 */
142#define AT91_PA14_ERXER (1 << 14) /* A: Ethernet Receive Error */
143#define AT91_PA14_TCLK1 (1 << 14) /* B: TC External Clock Input 1 */
144#define AT91_PA15_EMDC (1 << 15) /* A: Ethernet Management Data Clock */
145#define AT91_PA15_TCLK2 (1 << 15) /* B: TC External Clock Input 2 */
146#define AT91_PA16_EMDIO (1 << 16) /* A: Ethernet Management Data I/O */
147#define AT91_PA16_IRQ6 (1 << 16) /* B: External Interrupt 6 */
148#define AT91_PA17_TXD0 (1 << 17) /* A: USART Transmit Data 0 */
149#define AT91_PA17_TIOA0 (1 << 17) /* B: TC I/O Line A 0 */
150#define AT91_PA18_RXD0 (1 << 18) /* A: USART Receive Data 0 */
151#define AT91_PA18_TIOB0 (1 << 18) /* B: TC I/O Line B 0 */
152#define AT91_PA19_SCK0 (1 << 19) /* A: USART Serial Clock 0 */
153#define AT91_PA19_TIOA1 (1 << 19) /* B: TC I/O Line A 1 */
154#define AT91_PA20_CTS0 (1 << 20) /* A: USART Clear To Send 0 */
155#define AT91_PA20_TIOB1 (1 << 20) /* B: TC I/O Line B 1 */
156#define AT91_PA21_RTS0 (1 << 21) /* A: USART Ready To Send 0 */
157#define AT91_PA21_TIOA2 (1 << 21) /* B: TC I/O Line A 2 */
158#define AT91_PA22_RXD2 (1 << 22) /* A: USART Receive Data 2 */
159#define AT91_PA22_TIOB2 (1 << 22) /* B: TC I/O Line B 2 */
160#define AT91_PA23_TXD2 (1 << 23) /* A: USART Transmit Data 2 */
161#define AT91_PA23_IRQ3 (1 << 23) /* B: External Interrupt 3 */
162#define AT91_PA24_SCK2 (1 << 24) /* A: USART Serial Clock 2 */
163#define AT91_PA24_PCK1 (1 << 24) /* B: PMC Programmable Clock Output 1 */
164#define AT91_PA25_TWD (1 << 25) /* A: TWI Two-wire Serial Data */
165#define AT91_PA25_IRQ2 (1 << 25) /* B: External Interrupt 2 */
166#define AT91_PA26_TWCK (1 << 26) /* A: TWI Two-wire Serial Clock */
167#define AT91_PA26_IRQ1 (1 << 26) /* B: External Interrupt 1 */
168#define AT91_PA27_MCCK (1 << 27) /* A: MMC Multimedia Card Clock */
169#define AT91_PA27_TCLK3 (1 << 27) /* B: TC External Clock Input 3 */
170#define AT91_PA28_MCCDA (1 << 28) /* A: MMC Multimedia Card A Command */
171#define AT91_PA28_TCLK4 (1 << 28) /* B: TC External Clock Input 4 */
172#define AT91_PA29_MCDA0 (1 << 29) /* A: MMC Multimedia Card A Data 0 */
173#define AT91_PA29_TCLK5 (1 << 29) /* B: TC External Clock Input 5 */
174#define AT91_PA30_DRXD (1 << 30) /* A: DBGU Receive Data */
175#define AT91_PA30_CTS2 (1 << 30) /* B: USART Clear To Send 2 */
176#define AT91_PA31_DTXD (1 << 31) /* A: DBGU Transmit Data */
177#define AT91_PA31_RTS2 (1 << 31) /* B: USART Ready To Send 2 */
178
179#define AT91_PB0_TF0 (1 << 0) /* A: SSC Transmit Frame Sync 0 */
180#define AT91_PB0_RTS3 (1 << 0) /* B: USART Ready To Send 3 */
181#define AT91_PB1_TK0 (1 << 1) /* A: SSC Transmit Clock 0 */
182#define AT91_PB1_CTS3 (1 << 1) /* B: USART Clear To Send 3 */
183#define AT91_PB2_TD0 (1 << 2) /* A: SSC Transmit Data 0 */
184#define AT91_PB2_SCK3 (1 << 2) /* B: USART Serial Clock 3 */
185#define AT91_PB3_RD0 (1 << 3) /* A: SSC Receive Data 0 */
186#define AT91_PB3_MCDA1 (1 << 3) /* B: MMC Multimedia Card A Data 1 */
187#define AT91_PB4_RK0 (1 << 4) /* A: SSC Receive Clock 0 */
188#define AT91_PB4_MCDA2 (1 << 4) /* B: MMC Multimedia Card A Data 2 */
189#define AT91_PB5_RF0 (1 << 5) /* A: SSC Receive Frame Sync 0 */
190#define AT91_PB5_MCDA3 (1 << 5) /* B: MMC Multimedia Card A Data 3 */
191#define AT91_PB6_TF1 (1 << 6) /* A: SSC Transmit Frame Sync 1 */
192#define AT91_PB6_TIOA3 (1 << 6) /* B: TC I/O Line A 3 */
193#define AT91_PB7_TK1 (1 << 7) /* A: SSC Transmit Clock 1 */
194#define AT91_PB7_TIOB3 (1 << 7) /* B: TC I/O Line B 3 */
195#define AT91_PB8_TD1 (1 << 8) /* A: SSC Transmit Data 1 */
196#define AT91_PB8_TIOA4 (1 << 8) /* B: TC I/O Line A 4 */
197#define AT91_PB9_RD1 (1 << 9) /* A: SSC Receive Data 1 */
198#define AT91_PB9_TIOB4 (1 << 9) /* B: TC I/O Line B 4 */
199#define AT91_PB10_RK1 (1 << 10) /* A: SSC Receive Clock 1 */
200#define AT91_PB10_TIOA5 (1 << 10) /* B: TC I/O Line A 5 */
201#define AT91_PB11_RF1 (1 << 11) /* A: SSC Receive Frame Sync 1 */
202#define AT91_PB11_TIOB5 (1 << 11) /* B: TC I/O Line B 5 */
203#define AT91_PB12_TF2 (1 << 12) /* A: SSC Transmit Frame Sync 2 */
204#define AT91_PB12_ETX2 (1 << 12) /* B: Ethernet Transmit Data 2 */
205#define AT91_PB13_TK2 (1 << 13) /* A: SSC Transmit Clock 3 */
206#define AT91_PB13_ETX3 (1 << 13) /* B: Ethernet Transmit Data 3 */
207#define AT91_PB14_TD2 (1 << 14) /* A: SSC Transmit Data 2 */
208#define AT91_PB14_ETXER (1 << 14) /* B: Ethernet Transmit Coding Error */
209#define AT91_PB15_RD2 (1 << 15) /* A: SSC Receive Data 2 */
210#define AT91_PB15_ERX2 (1 << 15) /* B: Ethernet Receive Data 2 */
211#define AT91_PB16_RK2 (1 << 16) /* A: SSC Receive Clock 2 */
212#define AT91_PB16_ERX3 (1 << 16) /* B: Ethernet Receive Data 3 */
213#define AT91_PB17_RF2 (1 << 17) /* A: SSC Receive Frame Sync 2 */
214#define AT91_PB17_ERXDV (1 << 17) /* B: Ethernet Receive Data Valid */
215#define AT91_PB18_RI1 (1 << 18) /* A: USART Ring Indicator 1 */
216#define AT91_PB18_ECOL (1 << 18) /* B: Ethernet Collision Detected */
217#define AT91_PB19_DTR1 (1 << 19) /* A: USART Data Terminal Ready 1 */
218#define AT91_PB19_ERXCK (1 << 19) /* B: Ethernet Receive Clock */
219#define AT91_PB20_TXD1 (1 << 20) /* A: USART Transmit Data 1 */
220#define AT91_PB21_RXD1 (1 << 21) /* A: USART Receive Data 1 */
221#define AT91_PB22_SCK1 (1 << 22) /* A: USART Serial Clock 1 */
222#define AT91_PB23_DCD1 (1 << 23) /* A: USART Data Carrier Detect 1 */
223#define AT91_PB24_CTS1 (1 << 24) /* A: USART Clear To Send 1 */
224#define AT91_PB25_DSR1 (1 << 25) /* A: USART Data Set Ready 1 */
225#define AT91_PB25_EF100 (1 << 25) /* B: Ethernet Force 100 Mbit */
226#define AT91_PB26_RTS1 (1 << 26) /* A: USART Ready To Send 1 */
227#define AT91_PB27_PCK0 (1 << 27) /* B: PMC Programmable Clock Output 0 */
228#define AT91_PB28_FIQ (1 << 28) /* A: Fast Interrupt */
229#define AT91_PB29_IRQ0 (1 << 29) /* A: External Interrupt 0 */
230
231#define AT91_PC0_BFCK (1 << 0) /* A: Burst Flash Clock */
232#define AT91_PC1_BFRDY_SMOE (1 << 1) /* A: Burst Flash Ready / SmartMedia Output Enable */
233#define AT91_PC2_BFAVD (1 << 2) /* A: Burst Flash Address Valid */
234#define AT91_PC3_BFBAA_SMWE (1 << 3) /* A: Burst Flash Address Advance / SmartMedia Write Enable */
235#define AT91_PC4_BFOE (1 << 4) /* A: Burst Flash Output Enable */
236#define AT91_PC5_BFWE (1 << 5) /* A: Burst Flash Write Enable */
237#define AT91_PC6_NWAIT (1 << 6) /* A: SMC Wait Signal */
238#define AT91_PC7_A23 (1 << 7) /* A: Address Bus 23 */
239#define AT91_PC8_A24 (1 << 8) /* A: Address Bus 24 */
240#define AT91_PC9_A25_CFRNW (1 << 9) /* A: Address Bus 25 / Compact Flash Read Not Write */
241#define AT91_PC10_NCS4_CFCS (1 << 10) /* A: SMC Chip Select 4 / Compact Flash Chip Select */
242#define AT91_PC11_NCS5_CFCE1 (1 << 11) /* A: SMC Chip Select 5 / Compact Flash Chip Enable 1 */
243#define AT91_PC12_NCS6_CFCE2 (1 << 12) /* A: SMC Chip Select 6 / Compact Flash Chip Enable 2 */
244#define AT91_PC13_NCS7 (1 << 13) /* A: Chip Select 7 */
245
246#define AT91_PD0_ETX0 (1 << 0) /* A: Ethernet Transmit Data 0 */
247#define AT91_PD1_ETX1 (1 << 1) /* A: Ethernet Transmit Data 1 */
248#define AT91_PD2_ETX2 (1 << 2) /* A: Ethernet Transmit Data 2 */
249#define AT91_PD3_ETX3 (1 << 3) /* A: Ethernet Transmit Data 3 */
250#define AT91_PD4_ETXEN (1 << 4) /* A: Ethernet Transmit Enable */
251#define AT91_PD5_ETXER (1 << 5) /* A: Ethernet Transmit Coding Error */
252#define AT91_PD6_DTXD (1 << 6) /* A: DBGU Transmit Data */
253#define AT91_PD7_PCK0 (1 << 7) /* A: PMC Programmable Clock Output 0 */
254#define AT91_PD7_TSYNC (1 << 7) /* B: ETM Trace Synchronization Signal */
255#define AT91_PD8_PCK1 (1 << 8) /* A: PMC Programmable Clock Output 1 */
256#define AT91_PD8_TCLK (1 << 8) /* B: ETM Trace Clock */
257#define AT91_PD9_PCK2 (1 << 9) /* A: PMC Programmable Clock Output 2 */
258#define AT91_PD9_TPS0 (1 << 9) /* B: ETM Trace ARM Pipeline Status 0 */
259#define AT91_PD10_PCK3 (1 << 10) /* A: PMC Programmable Clock Output 3 */
260#define AT91_PD10_TPS1 (1 << 10) /* B: ETM Trace ARM Pipeline Status 1 */
261#define AT91_PD11_TPS2 (1 << 11) /* B: ETM Trace ARM Pipeline Status 2 */
262#define AT91_PD12_TPK0 (1 << 12) /* B: ETM Trace Packet Port 0 */
263#define AT91_PD13_TPK1 (1 << 13) /* B: ETM Trace Packet Port 1 */
264#define AT91_PD14_TPK2 (1 << 14) /* B: ETM Trace Packet Port 2 */
265#define AT91_PD15_TD0 (1 << 15) /* A: SSC Transmit Data 0 */
266#define AT91_PD15_TPK3 (1 << 15) /* B: ETM Trace Packet Port 3 */
267#define AT91_PD16_TD1 (1 << 16) /* A: SSC Transmit Data 1 */
268#define AT91_PD16_TPK4 (1 << 16) /* B: ETM Trace Packet Port 4 */
269#define AT91_PD17_TD2 (1 << 17) /* A: SSC Transmit Data 2 */
270#define AT91_PD17_TPK5 (1 << 17) /* B: ETM Trace Packet Port 5 */
271#define AT91_PD18_NPCS1 (1 << 18) /* A: SPI Peripheral Chip Select 1 */
272#define AT91_PD18_TPK6 (1 << 18) /* B: ETM Trace Packet Port 6 */
273#define AT91_PD19_NPCS2 (1 << 19) /* A: SPI Peripheral Chip Select 2 */
274#define AT91_PD19_TPK7 (1 << 19) /* B: ETM Trace Packet Port 7 */
275#define AT91_PD20_NPCS3 (1 << 20) /* A: SPI Peripheral Chip Select 3 */
276#define AT91_PD20_TPK8 (1 << 20) /* B: ETM Trace Packet Port 8 */
277#define AT91_PD21_RTS0 (1 << 21) /* A: USART Ready To Send 0 */
278#define AT91_PD21_TPK9 (1 << 21) /* B: ETM Trace Packet Port 9 */
279#define AT91_PD22_RTS1 (1 << 22) /* A: USART Ready To Send 1 */
280#define AT91_PD22_TPK10 (1 << 22) /* B: ETM Trace Packet Port 10 */
281#define AT91_PD23_RTS2 (1 << 23) /* A: USART Ready To Send 2 */
282#define AT91_PD23_TPK11 (1 << 23) /* B: ETM Trace Packet Port 11 */
283#define AT91_PD24_RTS3 (1 << 24) /* A: USART Ready To Send 3 */
284#define AT91_PD24_TPK12 (1 << 24) /* B: ETM Trace Packet Port 12 */
285#define AT91_PD25_DTR1 (1 << 25) /* A: USART Data Terminal Ready 1 */
286#define AT91_PD25_TPK13 (1 << 25) /* B: ETM Trace Packet Port 13 */
287#define AT91_PD26_TPK14 (1 << 26) /* B: ETM Trace Packet Port 14 */
288#define AT91_PD27_TPK15 (1 << 27) /* B: ETM Trace Packet Port 15 */
289#endif
290
291#endif 110#endif
diff --git a/include/asm-arm/arch-at91/at91sam9260.h b/include/asm-arm/arch-at91/at91sam9260.h
index 2cadebc36af7..0427f8698c07 100644
--- a/include/asm-arm/arch-at91/at91sam9260.h
+++ b/include/asm-arm/arch-at91/at91sam9260.h
@@ -117,13 +117,4 @@
117#define AT91SAM9XE_SRAM_BASE 0x00300000 /* Internal SRAM base address */ 117#define AT91SAM9XE_SRAM_BASE 0x00300000 /* Internal SRAM base address */
118 118
119 119
120#if 0
121/*
122 * PIO pin definitions (peripheral A/B multiplexing).
123 */
124
125// TODO: Add
126
127#endif
128
129#endif 120#endif
diff --git a/include/asm-arm/arch-at91/at91sam9261.h b/include/asm-arm/arch-at91/at91sam9261.h
index 01b58ffe2e27..9eb459570330 100644
--- a/include/asm-arm/arch-at91/at91sam9261.h
+++ b/include/asm-arm/arch-at91/at91sam9261.h
@@ -98,195 +98,4 @@
98#define AT91SAM9261_LCDC_BASE 0x00600000 /* LDC controller */ 98#define AT91SAM9261_LCDC_BASE 0x00600000 /* LDC controller */
99 99
100 100
101#if 0
102/*
103 * PIO pin definitions (peripheral A/B multiplexing).
104 */
105#define AT91_PA0_SPI0_MISO (1 << 0) /* A: SPI0 Master In Slave */
106#define AT91_PA0_MCDA0 (1 << 0) /* B: Multimedia Card A Data 0 */
107#define AT91_PA1_SPI0_MOSI (1 << 1) /* A: SPI0 Master Out Slave */
108#define AT91_PA1_MCCDA (1 << 1) /* B: Multimedia Card A Command */
109#define AT91_PA2_SPI0_SPCK (1 << 2) /* A: SPI0 Serial Clock */
110#define AT91_PA2_MCCK (1 << 2) /* B: Multimedia Card Clock */
111#define AT91_PA3_SPI0_NPCS0 (1 << 3) /* A: SPI0 Peripheral Chip Select 0 */
112#define AT91_PA4_SPI0_NPCS1 (1 << 4) /* A: SPI0 Peripheral Chip Select 1 */
113#define AT91_PA4_MCDA1 (1 << 4) /* B: Multimedia Card A Data 1 */
114#define AT91_PA5_SPI0_NPCS2 (1 << 5) /* A: SPI0 Peripheral Chip Select 2 */
115#define AT91_PA5_MCDA2 (1 << 5) /* B: Multimedia Card A Data 2 */
116#define AT91_PA6_SPI0_NPCS3 (1 << 6) /* A: SPI0 Peripheral Chip Select 3 */
117#define AT91_PA6_MCDA3 (1 << 6) /* B: Multimedia Card A Data 3 */
118#define AT91_PA7_TWD (1 << 7) /* A: TWI Two-wire Serial Data */
119#define AT91_PA7_PCK0 (1 << 7) /* B: PMC Programmable clock Output 0 */
120#define AT91_PA8_TWCK (1 << 8) /* A: TWI Two-wire Serial Clock */
121#define AT91_PA8_PCK1 (1 << 8) /* B: PMC Programmable clock Output 1 */
122#define AT91_PA9_DRXD (1 << 9) /* A: DBGU Debug Receive Data */
123#define AT91_PA9_PCK2 (1 << 9) /* B: PMC Programmable clock Output 2 */
124#define AT91_PA10_DTXD (1 << 10) /* A: DBGU Debug Transmit Data */
125#define AT91_PA10_PCK3 (1 << 10) /* B: PMC Programmable clock Output 3 */
126#define AT91_PA11_TSYNC (1 << 11) /* A: Trace Synchronization Signal */
127#define AT91_PA11_SCK1 (1 << 11) /* B: USART1 Serial Clock */
128#define AT91_PA12_TCLK (1 << 12) /* A: Trace Clock */
129#define AT91_PA12_RTS1 (1 << 12) /* B: USART1 Ready To Send */
130#define AT91_PA13_TPS0 (1 << 13) /* A: Trace ARM Pipeline Status 0 */
131#define AT91_PA13_CTS1 (1 << 13) /* B: USART1 Clear To Send */
132#define AT91_PA14_TPS1 (1 << 14) /* A: Trace ARM Pipeline Status 1 */
133#define AT91_PA14_SCK2 (1 << 14) /* B: USART2 Serial Clock */
134#define AT91_PA15_TPS2 (1 << 15) /* A: Trace ARM Pipeline Status 2 */
135#define AT91_PA15_RTS2 (1 << 15) /* B: USART2 Ready To Send */
136#define AT91_PA16_TPK0 (1 << 16) /* A: Trace Packet Port 0 */
137#define AT91_PA16_CTS2 (1 << 16) /* B: USART2 Clear To Send */
138#define AT91_PA17_TPK1 (1 << 17) /* A: Trace Packet Port 1 */
139#define AT91_PA17_TF1 (1 << 17) /* B: SSC1 Transmit Frame Sync */
140#define AT91_PA18_TPK2 (1 << 18) /* A: Trace Packet Port 2 */
141#define AT91_PA18_TK1 (1 << 18) /* B: SSC1 Transmit Clock */
142#define AT91_PA19_TPK3 (1 << 19) /* A: Trace Packet Port 3 */
143#define AT91_PA19_TD1 (1 << 19) /* B: SSC1 Transmit Data */
144#define AT91_PA20_TPK4 (1 << 20) /* A: Trace Packet Port 4 */
145#define AT91_PA20_RD1 (1 << 20) /* B: SSC1 Receive Data */
146#define AT91_PA21_TPK5 (1 << 21) /* A: Trace Packet Port 5 */
147#define AT91_PA21_RK1 (1 << 21) /* B: SSC1 Receive Clock */
148#define AT91_PA22_TPK6 (1 << 22) /* A: Trace Packet Port 6 */
149#define AT91_PA22_RF1 (1 << 22) /* B: SSC1 Receive Frame Sync */
150#define AT91_PA23_TPK7 (1 << 23) /* A: Trace Packet Port 7 */
151#define AT91_PA23_RTS0 (1 << 23) /* B: USART0 Ready To Send */
152#define AT91_PA24_TPK8 (1 << 24) /* A: Trace Packet Port 8 */
153#define AT91_PA24_SPI1_NPCS1 (1 << 24) /* B: SPI1 Peripheral Chip Select 1 */
154#define AT91_PA25_TPK9 (1 << 25) /* A: Trace Packet Port 9 */
155#define AT91_PA25_SPI1_NPCS2 (1 << 25) /* B: SPI1 Peripheral Chip Select 2 */
156#define AT91_PA26_TPK10 (1 << 26) /* A: Trace Packet Port 10 */
157#define AT91_PA26_SPI1_NPCS3 (1 << 26) /* B: SPI1 Peripheral Chip Select 3 */
158#define AT91_PA27_TPK11 (1 << 27) /* A: Trace Packet Port 11 */
159#define AT91_PA27_SPI0_NPCS1 (1 << 27) /* B: SPI0 Peripheral Chip Select 1 */
160#define AT91_PA28_TPK12 (1 << 28) /* A: Trace Packet Port 12 */
161#define AT91_PA28_SPI0_NPCS2 (1 << 28) /* B: SPI0 Peripheral Chip Select 2 */
162#define AT91_PA29_TPK13 (1 << 29) /* A: Trace Packet Port 13 */
163#define AT91_PA29_SPI0_NPCS3 (1 << 29) /* B: SPI0 Peripheral Chip Select 3 */
164#define AT91_PA30_TPK14 (1 << 30) /* A: Trace Packet Port 14 */
165#define AT91_PA30_A23 (1 << 30) /* B: Address Bus bit 23 */
166#define AT91_PA31_TPK15 (1 << 31) /* A: Trace Packet Port 15 */
167#define AT91_PA31_A24 (1 << 31) /* B: Address Bus bit 24 */
168
169#define AT91_PB0_LCDVSYNC (1 << 0) /* A: LCD Vertical Synchronization */
170#define AT91_PB1_LCDHSYNC (1 << 1) /* A: LCD Horizontal Synchronization */
171#define AT91_PB2_LCDDOTCK (1 << 2) /* A: LCD Dot Clock */
172#define AT91_PB2_PCK0 (1 << 2) /* B: PMC Programmable clock Output 0 */
173#define AT91_PB3_LCDDEN (1 << 3) /* A: LCD Data Enable */
174#define AT91_PB4_LCDCC (1 << 4) /* A: LCD Contrast Control */
175#define AT91_PB4_LCDD2 (1 << 4) /* B: LCD Data Bus Bit 2 */
176#define AT91_PB5_LCDD0 (1 << 5) /* A: LCD Data Bus Bit 0 */
177#define AT91_PB5_LCDD3 (1 << 5) /* B: LCD Data Bus Bit 3 */
178#define AT91_PB6_LCDD1 (1 << 6) /* A: LCD Data Bus Bit 1 */
179#define AT91_PB6_LCDD4 (1 << 6) /* B: LCD Data Bus Bit 4 */
180#define AT91_PB7_LCDD2 (1 << 7) /* A: LCD Data Bus Bit 2 */
181#define AT91_PB7_LCDD5 (1 << 7) /* B: LCD Data Bus Bit 5 */
182#define AT91_PB8_LCDD3 (1 << 8) /* A: LCD Data Bus Bit 3 */
183#define AT91_PB8_LCDD6 (1 << 8) /* B: LCD Data Bus Bit 6 */
184#define AT91_PB9_LCDD4 (1 << 9) /* A: LCD Data Bus Bit 4 */
185#define AT91_PB9_LCDD7 (1 << 9) /* B: LCD Data Bus Bit 7 */
186#define AT91_PB10_LCDD5 (1 << 10) /* A: LCD Data Bus Bit 5 */
187#define AT91_PB10_LCDD10 (1 << 10) /* B: LCD Data Bus Bit 10 */
188#define AT91_PB11_LCDD6 (1 << 11) /* A: LCD Data Bus Bit 6 */
189#define AT91_PB11_LCDD11 (1 << 11) /* B: LCD Data Bus Bit 11 */
190#define AT91_PB12_LCDD7 (1 << 12) /* A: LCD Data Bus Bit 7 */
191#define AT91_PB12_LCDD12 (1 << 12) /* B: LCD Data Bus Bit 12 */
192#define AT91_PB13_LCDD8 (1 << 13) /* A: LCD Data Bus Bit 8 */
193#define AT91_PB13_LCDD13 (1 << 13) /* B: LCD Data Bus Bit 13 */
194#define AT91_PB14_LCDD9 (1 << 14) /* A: LCD Data Bus Bit 9 */
195#define AT91_PB14_LCDD14 (1 << 14) /* B: LCD Data Bus Bit 14 */
196#define AT91_PB15_LCDD10 (1 << 15) /* A: LCD Data Bus Bit 10 */
197#define AT91_PB15_LCDD15 (1 << 15) /* B: LCD Data Bus Bit 15 */
198#define AT91_PB16_LCDD11 (1 << 16) /* A: LCD Data Bus Bit 11 */
199#define AT91_PB16_LCDD19 (1 << 16) /* B: LCD Data Bus Bit 19 */
200#define AT91_PB17_LCDD12 (1 << 17) /* A: LCD Data Bus Bit 12 */
201#define AT91_PB17_LCDD20 (1 << 17) /* B: LCD Data Bus Bit 20 */
202#define AT91_PB18_LCDD13 (1 << 18) /* A: LCD Data Bus Bit 13 */
203#define AT91_PB18_LCDD21 (1 << 18) /* B: LCD Data Bus Bit 21 */
204#define AT91_PB19_LCDD14 (1 << 19) /* A: LCD Data Bus Bit 14 */
205#define AT91_PB19_LCDD22 (1 << 19) /* B: LCD Data Bus Bit 22 */
206#define AT91_PB20_LCDD15 (1 << 20) /* A: LCD Data Bus Bit 15 */
207#define AT91_PB20_LCDD23 (1 << 20) /* B: LCD Data Bus Bit 23 */
208#define AT91_PB21_TF0 (1 << 21) /* A: SSC0 Transmit Frame Sync */
209#define AT91_PB21_LCDD16 (1 << 21) /* B: LCD Data Bus Bit 16 */
210#define AT91_PB22_TK0 (1 << 22) /* A: SSC0 Transmit Clock */
211#define AT91_PB22_LCDD17 (1 << 22) /* B: LCD Data Bus Bit 17 */
212#define AT91_PB23_TD0 (1 << 23) /* A: SSC0 Transmit Data */
213#define AT91_PB23_LCDD18 (1 << 23) /* B: LCD Data Bus Bit 18 */
214#define AT91_PB24_RD0 (1 << 24) /* A: SSC0 Receive Data */
215#define AT91_PB24_LCDD19 (1 << 24) /* B: LCD Data Bus Bit 19 */
216#define AT91_PB25_RK0 (1 << 25) /* A: SSC0 Receive Clock */
217#define AT91_PB25_LCDD20 (1 << 25) /* B: LCD Data Bus Bit 20 */
218#define AT91_PB26_RF0 (1 << 26) /* A: SSC0 Receive Frame Sync */
219#define AT91_PB26_LCDD21 (1 << 26) /* B: LCD Data Bus Bit 21 */
220#define AT91_PB27_SPI1_NPCS1 (1 << 27) /* A: SPI1 Peripheral Chip Select 1 */
221#define AT91_PB27_LCDD22 (1 << 27) /* B: LCD Data Bus Bit 22 */
222#define AT91_PB28_SPI1_NPCS0 (1 << 28) /* A: SPI1 Peripheral Chip Select 0 */
223#define AT91_PB28_LCDD23 (1 << 28) /* B: LCD Data Bus Bit 23 */
224#define AT91_PB29_SPI1_SPCK (1 << 29) /* A: SPI1 Serial Clock */
225#define AT91_PB29_IRQ2 (1 << 29) /* B: Interrupt input 2 */
226#define AT91_PB30_SPI1_MISO (1 << 30) /* A: SPI1 Master In Slave */
227#define AT91_PB30_IRQ1 (1 << 30) /* B: Interrupt input 1 */
228#define AT91_PB31_SPI1_MOSI (1 << 31) /* A: SPI1 Master Out Slave */
229#define AT91_PB31_PCK2 (1 << 31) /* B: PMC Programmable clock Output 2 */
230
231#define AT91_PC0_SMOE (1 << 0) /* A: SmartMedia Output Enable */
232#define AT91_PC0_NCS6 (1 << 0) /* B: Chip Select 6 */
233#define AT91_PC1_SMWE (1 << 1) /* A: SmartMedia Write Enable */
234#define AT91_PC1_NCS7 (1 << 1) /* B: Chip Select 7 */
235#define AT91_PC2_NWAIT (1 << 2) /* A: NWAIT */
236#define AT91_PC2_IRQ0 (1 << 2) /* B: Interrupt input 0 */
237#define AT91_PC3_A25_CFRNW (1 << 3) /* A: Address Bus[25] / Compact Flash Read Not Write */
238#define AT91_PC4_NCS4_CFCS0 (1 << 4) /* A: Chip Select 4 / CompactFlash Chip Select 0 */
239#define AT91_PC5_NCS5_CFCS1 (1 << 5) /* A: Chip Select 5 / CompactFlash Chip Select 1 */
240#define AT91_PC6_CFCE1 (1 << 6) /* A: CompactFlash Chip Enable 1 */
241#define AT91_PC7_CFCE2 (1 << 7) /* A: CompactFlash Chip Enable 2 */
242#define AT91_PC8_TXD0 (1 << 8) /* A: USART0 Transmit Data */
243#define AT91_PC8_PCK2 (1 << 8) /* B: PMC Programmable clock Output 2 */
244#define AT91_PC9_RXD0 (1 << 9) /* A: USART0 Receive Data */
245#define AT91_PC9_PCK3 (1 << 9) /* B: PMC Programmable clock Output 3 */
246#define AT91_PC10_RTS0 (1 << 10) /* A: USART0 Ready To Send */
247#define AT91_PC10_SCK0 (1 << 10) /* B: USART0 Serial Clock */
248#define AT91_PC11_CTS0 (1 << 11) /* A: USART0 Clear To Send */
249#define AT91_PC11_FIQ (1 << 11) /* B: AIC Fast Interrupt Input */
250#define AT91_PC12_TXD1 (1 << 12) /* A: USART1 Transmit Data */
251#define AT91_PC12_NCS6 (1 << 12) /* B: Chip Select 6 */
252#define AT91_PC13_RXD1 (1 << 13) /* A: USART1 Receive Data */
253#define AT91_PC13_NCS7 (1 << 13) /* B: Chip Select 7 */
254#define AT91_PC14_TXD2 (1 << 14) /* A: USART2 Transmit Data */
255#define AT91_PC14_SPI1_NPCS2 (1 << 14) /* B: SPI1 Peripheral Chip Select 2 */
256#define AT91_PC15_RXD2 (1 << 15) /* A: USART2 Receive Data */
257#define AT91_PC15_SPI1_NPCS3 (1 << 15) /* B: SPI1 Peripheral Chip Select 3 */
258#define AT91_PC16_D16 (1 << 16) /* A: Data Bus [16] */
259#define AT91_PC16_TCLK0 (1 << 16) /* B: Timer Counter 0 external clock input */
260#define AT91_PC17_D17 (1 << 17) /* A: Data Bus [17] */
261#define AT91_PC17_TCLK1 (1 << 17) /* B: Timer Counter 1 external clock input */
262#define AT91_PC18_D18 (1 << 18) /* A: Data Bus [18] */
263#define AT91_PC18_TCLK2 (1 << 18) /* B: Timer Counter 2 external clock input */
264#define AT91_PC19_D19 (1 << 19) /* A: Data Bus [19] */
265#define AT91_PC19_TIOA0 (1 << 19) /* B: Timer Counter 0 Multipurpose Timer I/O Pin A */
266#define AT91_PC20_D20 (1 << 20) /* A: Data Bus [20] */
267#define AT91_PC20_TIOB0 (1 << 20) /* B: Timer Counter 0 Multipurpose Timer I/O Pin B */
268#define AT91_PC21_D21 (1 << 21) /* A: Data Bus [21] */
269#define AT91_PC21_TIOA1 (1 << 21) /* B: Timer Counter 1 Multipurpose Timer I/O Pin A */
270#define AT91_PC22_D22 (1 << 22) /* A: Data Bus [22] */
271#define AT91_PC22_TIOB1 (1 << 22) /* B: Timer Counter 1 Multipurpose Timer I/O Pin B */
272#define AT91_PC23_D23 (1 << 23) /* A: Data Bus [23] */
273#define AT91_PC23_TIOA2 (1 << 23) /* B: Timer Counter 2 Multipurpose Timer I/O Pin A */
274#define AT91_PC24_D24 (1 << 24) /* A: Data Bus [24] */
275#define AT91_PC24_TIOB2 (1 << 24) /* B: Timer Counter 2 Multipurpose Timer I/O Pin B */
276#define AT91_PC25_D25 (1 << 25) /* A: Data Bus [25] */
277#define AT91_PC25_TF2 (1 << 25) /* B: SSC2 Transmit Frame Sync */
278#define AT91_PC26_D26 (1 << 26) /* A: Data Bus [26] */
279#define AT91_PC26_TK2 (1 << 26) /* B: SSC2 Transmit Clock */
280#define AT91_PC27_D27 (1 << 27) /* A: Data Bus [27] */
281#define AT91_PC27_TD2 (1 << 27) /* B: SSC2 Transmit Data */
282#define AT91_PC28_D28 (1 << 28) /* A: Data Bus [28] */
283#define AT91_PC28_RD2 (1 << 28) /* B: SSC2 Receive Data */
284#define AT91_PC29_D29 (1 << 29) /* A: Data Bus [29] */
285#define AT91_PC29_RK2 (1 << 29) /* B: SSC2 Receive Clock */
286#define AT91_PC30_D30 (1 << 30) /* A: Data Bus [30] */
287#define AT91_PC30_RF2 (1 << 30) /* B: SSC2 Receive Frame Sync */
288#define AT91_PC31_D31 (1 << 31) /* A: Data Bus [31] */
289#define AT91_PC31_PCK1 (1 << 31) /* B: PMC Programmable clock Output 1 */
290#endif
291
292#endif 101#endif
diff --git a/include/asm-arm/arch-at91/at91sam9263.h b/include/asm-arm/arch-at91/at91sam9263.h
index f4af68ae0ea9..115c47ac7ebb 100644
--- a/include/asm-arm/arch-at91/at91sam9263.h
+++ b/include/asm-arm/arch-at91/at91sam9263.h
@@ -119,13 +119,5 @@
119#define AT91SAM9263_DMAC_BASE 0x00800000 /* DMA Controller */ 119#define AT91SAM9263_DMAC_BASE 0x00800000 /* DMA Controller */
120#define AT91SAM9263_UHP_BASE 0x00a00000 /* USB Host controller */ 120#define AT91SAM9263_UHP_BASE 0x00a00000 /* USB Host controller */
121 121
122#if 0
123/*
124 * PIO pin definitions (peripheral A/B multiplexing).
125 */
126
127// TODO: Add
128
129#endif
130 122
131#endif 123#endif
diff --git a/include/asm-arm/arch-at91/board.h b/include/asm-arm/arch-at91/board.h
index 7a34a5b1fed0..0ce6ee98ed0b 100644
--- a/include/asm-arm/arch-at91/board.h
+++ b/include/asm-arm/arch-at91/board.h
@@ -121,7 +121,7 @@ extern void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data);
121 /* AC97 */ 121 /* AC97 */
122struct atmel_ac97_data { 122struct atmel_ac97_data {
123 u8 reset_pin; /* reset */ 123 u8 reset_pin; /* reset */
124} 124};
125extern void __init at91_add_device_ac97(struct atmel_ac97_data *data); 125extern void __init at91_add_device_ac97(struct atmel_ac97_data *data);
126 126
127 /* LEDs */ 127 /* LEDs */
diff --git a/include/asm-arm/arch-at91/cpu.h b/include/asm-arm/arch-at91/cpu.h
index d464ca58cdbc..7ef4eebe9f8e 100644
--- a/include/asm-arm/arch-at91/cpu.h
+++ b/include/asm-arm/arch-at91/cpu.h
@@ -68,4 +68,10 @@ static inline unsigned long at91_arch_identify(void)
68#define cpu_is_at91sam9263() (0) 68#define cpu_is_at91sam9263() (0)
69#endif 69#endif
70 70
71/*
72 * Since this is ARM, we will never run on any AVR32 CPU. But these
73 * definitions may reduce clutter in common drivers.
74 */
75#define cpu_is_at32ap7000() (0)
76
71#endif 77#endif
diff --git a/include/asm-arm/arch-imx/imx-regs.h b/include/asm-arm/arch-imx/imx-regs.h
index de6494a4dc6b..30de404c61f5 100644
--- a/include/asm-arm/arch-imx/imx-regs.h
+++ b/include/asm-arm/arch-imx/imx-regs.h
@@ -297,7 +297,7 @@
297#define SAR(x) __REG2( IMX_DMAC_BASE + 0x80, (x) << 6) /* Source Address Registers */ 297#define SAR(x) __REG2( IMX_DMAC_BASE + 0x80, (x) << 6) /* Source Address Registers */
298#define DAR(x) __REG2( IMX_DMAC_BASE + 0x84, (x) << 6) /* Destination Address Registers */ 298#define DAR(x) __REG2( IMX_DMAC_BASE + 0x84, (x) << 6) /* Destination Address Registers */
299#define CNTR(x) __REG2( IMX_DMAC_BASE + 0x88, (x) << 6) /* Count Registers */ 299#define CNTR(x) __REG2( IMX_DMAC_BASE + 0x88, (x) << 6) /* Count Registers */
300#define CCR(x) __REG2( IMX_DMAC_BASE + 0x8c, (x) << 6) /* Control Registers */ 300#define CCR(x) __REG2( IMX_DMAC_BASE + 0x8c, (x) << 6) /* Control Registers */
301#define RSSR(x) __REG2( IMX_DMAC_BASE + 0x90, (x) << 6) /* Request source select Registers */ 301#define RSSR(x) __REG2( IMX_DMAC_BASE + 0x90, (x) << 6) /* Request source select Registers */
302#define BLR(x) __REG2( IMX_DMAC_BASE + 0x94, (x) << 6) /* Burst length Registers */ 302#define BLR(x) __REG2( IMX_DMAC_BASE + 0x94, (x) << 6) /* Burst length Registers */
303#define RTOR(x) __REG2( IMX_DMAC_BASE + 0x98, (x) << 6) /* Request timeout Registers */ 303#define RTOR(x) __REG2( IMX_DMAC_BASE + 0x98, (x) << 6) /* Request timeout Registers */
diff --git a/include/asm-arm/arch-integrator/platform.h b/include/asm-arm/arch-integrator/platform.h
index 96ad3d2a66d1..83c4c1ceb411 100644
--- a/include/asm-arm/arch-integrator/platform.h
+++ b/include/asm-arm/arch-integrator/platform.h
@@ -17,7 +17,7 @@
17 * from .s file by awk -f s2h.awk 17 * from .s file by awk -f s2h.awk
18 */ 18 */
19/************************************************************************** 19/**************************************************************************
20 * * Copyright © ARM Limited 1998. All rights reserved. 20 * * Copyright © ARM Limited 1998. All rights reserved.
21 * ***********************************************************************/ 21 * ***********************************************************************/
22/* ************************************************************************ 22/* ************************************************************************
23 * 23 *
diff --git a/include/asm-arm/arch-iop32x/glantank.h b/include/asm-arm/arch-iop32x/glantank.h
index 3b065618dd00..bf0665acc1c1 100644
--- a/include/asm-arm/arch-iop32x/glantank.h
+++ b/include/asm-arm/arch-iop32x/glantank.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * include/asm/arch-iop32x/glantank.h 2 * include/asm-arm/arch-iop32x/glantank.h
3 * 3 *
4 * IO-Data GLAN Tank board registers 4 * IO-Data GLAN Tank board registers
5 */ 5 */
diff --git a/include/asm-arm/arch-iop32x/n2100.h b/include/asm-arm/arch-iop32x/n2100.h
index fed31a648425..77a8af476629 100644
--- a/include/asm-arm/arch-iop32x/n2100.h
+++ b/include/asm-arm/arch-iop32x/n2100.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * include/asm/arch-iop32x/n2100.h 2 * include/asm-arm/arch-iop32x/n2100.h
3 * 3 *
4 * Thecus N2100 board registers 4 * Thecus N2100 board registers
5 */ 5 */
diff --git a/include/asm-arm/arch-omap/aic23.h b/include/asm-arm/arch-omap/aic23.h
index 6513065941d0..aec2d6563622 100644
--- a/include/asm-arm/arch-omap/aic23.h
+++ b/include/asm-arm/arch-omap/aic23.h
@@ -110,7 +110,7 @@
110#define TLV320AIC23ID1 (0x1a) // cs low 110#define TLV320AIC23ID1 (0x1a) // cs low
111#define TLV320AIC23ID2 (0x1b) // cs high 111#define TLV320AIC23ID2 (0x1b) // cs high
112 112
113void tlv320aic23_power_up(void); 113void aic23_power_up(void);
114void tlv320aic23_power_down(void); 114void aic23_power_down(void);
115 115
116#endif /* __ASM_ARCH_AIC23_H */ 116#endif /* __ASM_ARCH_AIC23_H */
diff --git a/include/asm-arm/arch-omap/board-apollon.h b/include/asm-arm/arch-omap/board-apollon.h
index de0c5b792c58..dcb587b311f1 100644
--- a/include/asm-arm/arch-omap/board-apollon.h
+++ b/include/asm-arm/arch-omap/board-apollon.h
@@ -30,16 +30,7 @@
30#define __ASM_ARCH_OMAP_APOLLON_H 30#define __ASM_ARCH_OMAP_APOLLON_H
31 31
32/* Placeholder for APOLLON specific defines */ 32/* Placeholder for APOLLON specific defines */
33/* GPMC CS0 */
34#define APOLLON_CS0_BASE 0x00000000
35/* GPMC CS1 */
36#define APOLLON_CS1_BASE 0x08000000
37#define APOLLON_ETHR_START (APOLLON_CS1_BASE + 0x300)
38#define APOLLON_ETHR_GPIO_IRQ 74 33#define APOLLON_ETHR_GPIO_IRQ 74
39/* GPMC CS2 - reserved for OneNAND */
40#define APOLLON_CS2_BASE 0x10000000
41/* GPMC CS3 - reserved for NOR or NAND */
42#define APOLLON_CS3_BASE 0x18000000
43 34
44#endif /* __ASM_ARCH_OMAP_APOLLON_H */ 35#endif /* __ASM_ARCH_OMAP_APOLLON_H */
45 36
diff --git a/include/asm-arm/arch-omap/board-h4.h b/include/asm-arm/arch-omap/board-h4.h
index 7ef664bc9e33..7e0efef4bb65 100644
--- a/include/asm-arm/arch-omap/board-h4.h
+++ b/include/asm-arm/arch-omap/board-h4.h
@@ -30,9 +30,6 @@
30#define __ASM_ARCH_OMAP_H4_H 30#define __ASM_ARCH_OMAP_H4_H
31 31
32/* Placeholder for H4 specific defines */ 32/* Placeholder for H4 specific defines */
33/* GPMC CS1 */
34#define OMAP24XX_ETHR_START 0x08000300
35#define OMAP24XX_ETHR_GPIO_IRQ 92 33#define OMAP24XX_ETHR_GPIO_IRQ 92
36#define H4_CS0_BASE 0x04000000
37#endif /* __ASM_ARCH_OMAP_H4_H */ 34#endif /* __ASM_ARCH_OMAP_H4_H */
38 35
diff --git a/include/asm-arm/arch-omap/board.h b/include/asm-arm/arch-omap/board.h
index edf1dc6ad919..031672c56377 100644
--- a/include/asm-arm/arch-omap/board.h
+++ b/include/asm-arm/arch-omap/board.h
@@ -4,7 +4,7 @@
4 * Information structures for board-specific data 4 * Information structures for board-specific data
5 * 5 *
6 * Copyright (C) 2004 Nokia Corporation 6 * Copyright (C) 2004 Nokia Corporation
7 * Written by Juha Yrjölä <juha.yrjola@nokia.com> 7 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
8 */ 8 */
9 9
10#ifndef _OMAP_BOARD_H 10#ifndef _OMAP_BOARD_H
@@ -12,6 +12,8 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14 14
15#include <asm/arch/gpio-switch.h>
16
15/* Different peripheral ids */ 17/* Different peripheral ids */
16#define OMAP_TAG_CLOCK 0x4f01 18#define OMAP_TAG_CLOCK 0x4f01
17#define OMAP_TAG_MMC 0x4f02 19#define OMAP_TAG_MMC 0x4f02
@@ -99,26 +101,31 @@ struct omap_usb_config {
99struct omap_lcd_config { 101struct omap_lcd_config {
100 char panel_name[16]; 102 char panel_name[16];
101 char ctrl_name[16]; 103 char ctrl_name[16];
104 s16 nreset_gpio;
105 u8 data_lines;
106};
107
108struct device;
109struct fb_info;
110struct omap_backlight_config {
111 int default_intensity;
112 int (*set_power)(struct device *dev, int state);
113 int (*check_fb)(struct fb_info *fb);
102}; 114};
103 115
104struct omap_fbmem_config { 116struct omap_fbmem_config {
105 u32 fb_sram_start; 117 u32 start;
106 u32 fb_sram_size; 118 u32 size;
107 u32 fb_sdram_start; 119};
108 u32 fb_sdram_size; 120
109}; 121struct omap_pwm_led_platform_data {
110 122 const char *name;
111/* Cover: 123 int intensity_timer;
112 * high -> closed 124 int blink_timer;
113 * low -> open 125 void (*set_power)(struct omap_pwm_led_platform_data *self, int on_off);
114 * Connection: 126};
115 * high -> connected 127
116 * low -> disconnected 128/* See include/asm-arm/arch-omap/gpio-switch.h for definitions */
117 */
118#define OMAP_GPIO_SWITCH_TYPE_COVER 0x0000
119#define OMAP_GPIO_SWITCH_TYPE_CONNECTION 0x0001
120#define OMAP_GPIO_SWITCH_FLAG_INVERTED 0x0001
121#define OMAP_GPIO_SWITCH_FLAG_OUTPUT 0x0002
122struct omap_gpio_switch_config { 129struct omap_gpio_switch_config {
123 char name[12]; 130 char name[12];
124 u16 gpio; 131 u16 gpio;
diff --git a/include/asm-arm/arch-omap/dma.h b/include/asm-arm/arch-omap/dma.h
index d591d0585bba..f7774192a41e 100644
--- a/include/asm-arm/arch-omap/dma.h
+++ b/include/asm-arm/arch-omap/dma.h
@@ -2,7 +2,7 @@
2 * linux/include/asm-arm/arch-omap/dma.h 2 * linux/include/asm-arm/arch-omap/dma.h
3 * 3 *
4 * Copyright (C) 2003 Nokia Corporation 4 * Copyright (C) 2003 Nokia Corporation
5 * Author: Juha Yrjölä <juha.yrjola@nokia.com> 5 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
diff --git a/include/asm-arm/arch-omap/dsp.h b/include/asm-arm/arch-omap/dsp.h
deleted file mode 100644
index 06dad83dd41f..000000000000
--- a/include/asm-arm/arch-omap/dsp.h
+++ /dev/null
@@ -1,250 +0,0 @@
1/*
2 * linux/include/asm-arm/arch-omap/dsp.h
3 *
4 * Header for OMAP DSP driver
5 *
6 * Copyright (C) 2002-2005 Nokia Corporation
7 *
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * 2005/06/01: DSP Gateway version 3.3
25 */
26
27#ifndef ASM_ARCH_DSP_H
28#define ASM_ARCH_DSP_H
29
30
31/*
32 * for /dev/dspctl/ctl
33 */
34#define OMAP_DSP_IOCTL_RESET 1
35#define OMAP_DSP_IOCTL_RUN 2
36#define OMAP_DSP_IOCTL_SETRSTVECT 3
37#define OMAP_DSP_IOCTL_CPU_IDLE 4
38#define OMAP_DSP_IOCTL_MPUI_WORDSWAP_ON 5
39#define OMAP_DSP_IOCTL_MPUI_WORDSWAP_OFF 6
40#define OMAP_DSP_IOCTL_MPUI_BYTESWAP_ON 7
41#define OMAP_DSP_IOCTL_MPUI_BYTESWAP_OFF 8
42#define OMAP_DSP_IOCTL_GBL_IDLE 9
43#define OMAP_DSP_IOCTL_DSPCFG 10
44#define OMAP_DSP_IOCTL_DSPUNCFG 11
45#define OMAP_DSP_IOCTL_TASKCNT 12
46#define OMAP_DSP_IOCTL_POLL 13
47#define OMAP_DSP_IOCTL_REGMEMR 40
48#define OMAP_DSP_IOCTL_REGMEMW 41
49#define OMAP_DSP_IOCTL_REGIOR 42
50#define OMAP_DSP_IOCTL_REGIOW 43
51#define OMAP_DSP_IOCTL_GETVAR 44
52#define OMAP_DSP_IOCTL_SETVAR 45
53#define OMAP_DSP_IOCTL_RUNLEVEL 50
54#define OMAP_DSP_IOCTL_SUSPEND 51
55#define OMAP_DSP_IOCTL_RESUME 52
56#define OMAP_DSP_IOCTL_FBEN 53
57#define OMAP_DSP_IOCTL_FBDIS 54
58#define OMAP_DSP_IOCTL_MBSEND 99
59
60/*
61 * for taskdev
62 * (ioctls below should be >= 0x10000)
63 */
64#define OMAP_DSP_TASK_IOCTL_BFLSH 0x10000
65#define OMAP_DSP_TASK_IOCTL_SETBSZ 0x10001
66#define OMAP_DSP_TASK_IOCTL_LOCK 0x10002
67#define OMAP_DSP_TASK_IOCTL_UNLOCK 0x10003
68#define OMAP_DSP_TASK_IOCTL_GETNAME 0x10004
69
70/*
71 * for /dev/dspctl/mem
72 */
73#define OMAP_DSP_MEM_IOCTL_EXMAP 1
74#define OMAP_DSP_MEM_IOCTL_EXUNMAP 2
75#define OMAP_DSP_MEM_IOCTL_EXMAP_FLUSH 3
76#define OMAP_DSP_MEM_IOCTL_FBEXPORT 5
77#define OMAP_DSP_MEM_IOCTL_MMUITACK 7
78#define OMAP_DSP_MEM_IOCTL_MMUINIT 9
79#define OMAP_DSP_MEM_IOCTL_KMEM_RESERVE 11
80#define OMAP_DSP_MEM_IOCTL_KMEM_RELEASE 12
81
82struct omap_dsp_mapinfo {
83 unsigned long dspadr;
84 unsigned long size;
85};
86
87/*
88 * for /dev/dspctl/twch
89 */
90#define OMAP_DSP_TWCH_IOCTL_MKDEV 1
91#define OMAP_DSP_TWCH_IOCTL_RMDEV 2
92#define OMAP_DSP_TWCH_IOCTL_TADD 11
93#define OMAP_DSP_TWCH_IOCTL_TDEL 12
94#define OMAP_DSP_TWCH_IOCTL_TKILL 13
95
96#define OMAP_DSP_DEVSTATE_NOTASK 0x00000001
97#define OMAP_DSP_DEVSTATE_ATTACHED 0x00000002
98#define OMAP_DSP_DEVSTATE_GARBAGE 0x00000004
99#define OMAP_DSP_DEVSTATE_INVALID 0x00000008
100#define OMAP_DSP_DEVSTATE_ADDREQ 0x00000100
101#define OMAP_DSP_DEVSTATE_DELREQ 0x00000200
102#define OMAP_DSP_DEVSTATE_ADDFAIL 0x00001000
103#define OMAP_DSP_DEVSTATE_ADDING 0x00010000
104#define OMAP_DSP_DEVSTATE_DELING 0x00020000
105#define OMAP_DSP_DEVSTATE_KILLING 0x00040000
106#define OMAP_DSP_DEVSTATE_STATE_MASK 0x7fffffff
107#define OMAP_DSP_DEVSTATE_STALE 0x80000000
108
109struct omap_dsp_taddinfo {
110 unsigned char minor;
111 unsigned long taskadr;
112};
113#define OMAP_DSP_TADD_ABORTADR 0xffffffff
114
115
116/*
117 * error cause definition (for error detection device)
118 */
119#define OMAP_DSP_ERRDT_WDT 0x00000001
120#define OMAP_DSP_ERRDT_MMU 0x00000002
121
122
123/*
124 * mailbox protocol definitions
125 */
126
127struct omap_dsp_mailbox_cmd {
128 unsigned short cmd;
129 unsigned short data;
130};
131
132struct omap_dsp_reginfo {
133 unsigned short adr;
134 unsigned short val;
135};
136
137struct omap_dsp_varinfo {
138 unsigned char varid;
139 unsigned short val[0];
140};
141
142#define OMAP_DSP_MBPROT_REVISION 0x0019
143
144#define OMAP_DSP_MBCMD_WDSND 0x10
145#define OMAP_DSP_MBCMD_WDREQ 0x11
146#define OMAP_DSP_MBCMD_BKSND 0x20
147#define OMAP_DSP_MBCMD_BKREQ 0x21
148#define OMAP_DSP_MBCMD_BKYLD 0x23
149#define OMAP_DSP_MBCMD_BKSNDP 0x24
150#define OMAP_DSP_MBCMD_BKREQP 0x25
151#define OMAP_DSP_MBCMD_TCTL 0x30
152#define OMAP_DSP_MBCMD_TCTLDATA 0x31
153#define OMAP_DSP_MBCMD_POLL 0x32
154#define OMAP_DSP_MBCMD_WDT 0x50 /* v3.3: obsolete */
155#define OMAP_DSP_MBCMD_RUNLEVEL 0x51
156#define OMAP_DSP_MBCMD_PM 0x52
157#define OMAP_DSP_MBCMD_SUSPEND 0x53
158#define OMAP_DSP_MBCMD_KFUNC 0x54
159#define OMAP_DSP_MBCMD_TCFG 0x60
160#define OMAP_DSP_MBCMD_TADD 0x62
161#define OMAP_DSP_MBCMD_TDEL 0x63
162#define OMAP_DSP_MBCMD_TSTOP 0x65
163#define OMAP_DSP_MBCMD_DSPCFG 0x70
164#define OMAP_DSP_MBCMD_REGRW 0x72
165#define OMAP_DSP_MBCMD_GETVAR 0x74
166#define OMAP_DSP_MBCMD_SETVAR 0x75
167#define OMAP_DSP_MBCMD_ERR 0x78
168#define OMAP_DSP_MBCMD_DBG 0x79
169
170#define OMAP_DSP_MBCMD_TCTL_TINIT 0x0000
171#define OMAP_DSP_MBCMD_TCTL_TEN 0x0001
172#define OMAP_DSP_MBCMD_TCTL_TDIS 0x0002
173#define OMAP_DSP_MBCMD_TCTL_TCLR 0x0003
174#define OMAP_DSP_MBCMD_TCTL_TCLR_FORCE 0x0004
175
176#define OMAP_DSP_MBCMD_RUNLEVEL_USER 0x01
177#define OMAP_DSP_MBCMD_RUNLEVEL_SUPER 0x0e
178#define OMAP_DSP_MBCMD_RUNLEVEL_RECOVERY 0x10
179
180#define OMAP_DSP_MBCMD_PM_DISABLE 0x00
181#define OMAP_DSP_MBCMD_PM_ENABLE 0x01
182
183#define OMAP_DSP_MBCMD_KFUNC_FBCTL 0x00
184#define OMAP_DSP_MBCMD_KFUNC_AUDIO_PWR 0x01
185
186#define OMAP_DSP_MBCMD_FBCTL_UPD 0x0000
187#define OMAP_DSP_MBCMD_FBCTL_ENABLE 0x0002
188#define OMAP_DSP_MBCMD_FBCTL_DISABLE 0x0003
189
190#define OMAP_DSP_MBCMD_AUDIO_PWR_UP 0x0000
191#define OMAP_DSP_MBCMD_AUDIO_PWR_DOWN1 0x0001
192#define OMAP_DSP_MBCMD_AUDIO_PWR_DOWN2 0x0002
193
194#define OMAP_DSP_MBCMD_TDEL_SAFE 0x0000
195#define OMAP_DSP_MBCMD_TDEL_KILL 0x0001
196
197#define OMAP_DSP_MBCMD_DSPCFG_REQ 0x00
198#define OMAP_DSP_MBCMD_DSPCFG_SYSADRH 0x28
199#define OMAP_DSP_MBCMD_DSPCFG_SYSADRL 0x29
200#define OMAP_DSP_MBCMD_DSPCFG_PROTREV 0x70
201#define OMAP_DSP_MBCMD_DSPCFG_ABORT 0x78
202#define OMAP_DSP_MBCMD_DSPCFG_LAST 0x80
203
204#define OMAP_DSP_MBCMD_REGRW_MEMR 0x00
205#define OMAP_DSP_MBCMD_REGRW_MEMW 0x01
206#define OMAP_DSP_MBCMD_REGRW_IOR 0x02
207#define OMAP_DSP_MBCMD_REGRW_IOW 0x03
208#define OMAP_DSP_MBCMD_REGRW_DATA 0x04
209
210#define OMAP_DSP_MBCMD_VARID_ICRMASK 0x00
211#define OMAP_DSP_MBCMD_VARID_LOADINFO 0x01
212
213#define OMAP_DSP_TTYP_ARCV 0x0001
214#define OMAP_DSP_TTYP_ASND 0x0002
215#define OMAP_DSP_TTYP_BKMD 0x0004
216#define OMAP_DSP_TTYP_BKDM 0x0008
217#define OMAP_DSP_TTYP_PVMD 0x0010
218#define OMAP_DSP_TTYP_PVDM 0x0020
219
220#define OMAP_DSP_EID_BADTID 0x10
221#define OMAP_DSP_EID_BADTCN 0x11
222#define OMAP_DSP_EID_BADBID 0x20
223#define OMAP_DSP_EID_BADCNT 0x21
224#define OMAP_DSP_EID_NOTLOCKED 0x22
225#define OMAP_DSP_EID_STVBUF 0x23
226#define OMAP_DSP_EID_BADADR 0x24
227#define OMAP_DSP_EID_BADTCTL 0x30
228#define OMAP_DSP_EID_BADPARAM 0x50
229#define OMAP_DSP_EID_FATAL 0x58
230#define OMAP_DSP_EID_NOMEM 0xc0
231#define OMAP_DSP_EID_NORES 0xc1
232#define OMAP_DSP_EID_IPBFULL 0xc2
233#define OMAP_DSP_EID_WDT 0xd0
234#define OMAP_DSP_EID_TASKNOTRDY 0xe0
235#define OMAP_DSP_EID_TASKBSY 0xe1
236#define OMAP_DSP_EID_TASKERR 0xef
237#define OMAP_DSP_EID_BADCFGTYP 0xf0
238#define OMAP_DSP_EID_DEBUG 0xf8
239#define OMAP_DSP_EID_BADSEQ 0xfe
240#define OMAP_DSP_EID_BADCMD 0xff
241
242#define OMAP_DSP_TNM_LEN 16
243
244#define OMAP_DSP_TID_FREE 0xff
245#define OMAP_DSP_TID_ANON 0xfe
246
247#define OMAP_DSP_BID_NULL 0xffff
248#define OMAP_DSP_BID_PVT 0xfffe
249
250#endif /* ASM_ARCH_DSP_H */
diff --git a/include/asm-arm/arch-omap/dsp_common.h b/include/asm-arm/arch-omap/dsp_common.h
index 16a459dfa714..c61f868f24ee 100644
--- a/include/asm-arm/arch-omap/dsp_common.h
+++ b/include/asm-arm/arch-omap/dsp_common.h
@@ -1,38 +1,34 @@
1/* 1/*
2 * linux/include/asm-arm/arch-omap/dsp_common.h 2 * This file is part of OMAP DSP driver (DSP Gateway version 3.3.1)
3 * 3 *
4 * Header for OMAP DSP subsystem control 4 * Copyright (C) 2004-2006 Nokia Corporation. All rights reserved.
5 * 5 *
6 * Copyright (C) 2004,2005 Nokia Corporation 6 * Contact: Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
7 * 7 *
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com> 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
9 * 11 *
10 * This program is free software; you can redistribute it and/or modify 12 * This program is distributed in the hope that it will be useful, but
11 * it under the terms of the GNU General Public License as published by 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * the Free Software Foundation; either version 2 of the License, or 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * (at your option) any later version. 15 * General Public License for more details.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 * 16 *
20 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
23 * 21 *
24 * 2005/06/03: DSP Gateway version 3.3
25 */ 22 */
26 23
27#ifndef ASM_ARCH_DSP_COMMON_H 24#ifndef ASM_ARCH_DSP_COMMON_H
28#define ASM_ARCH_DSP_COMMON_H 25#define ASM_ARCH_DSP_COMMON_H
29 26
27#ifdef CONFIG_ARCH_OMAP1
30extern void omap_dsp_request_mpui(void); 28extern void omap_dsp_request_mpui(void);
31extern void omap_dsp_release_mpui(void); 29extern void omap_dsp_release_mpui(void);
32extern int omap_dsp_request_mem(void); 30extern int omap_dsp_request_mem(void);
33extern int omap_dsp_release_mem(void); 31extern int omap_dsp_release_mem(void);
34 32#endif
35extern void (*omap_dsp_audio_pwr_up_request)(int stage);
36extern void (*omap_dsp_audio_pwr_down_request)(int stage);
37 33
38#endif /* ASM_ARCH_DSP_COMMON_H */ 34#endif /* ASM_ARCH_DSP_COMMON_H */
diff --git a/include/asm-arm/arch-omap/gpio-switch.h b/include/asm-arm/arch-omap/gpio-switch.h
new file mode 100644
index 000000000000..10da0e07c0cf
--- /dev/null
+++ b/include/asm-arm/arch-omap/gpio-switch.h
@@ -0,0 +1,54 @@
1/*
2 * GPIO switch definitions
3 *
4 * Copyright (C) 2006 Nokia Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __ASM_ARCH_OMAP_GPIO_SWITCH_H
12#define __ASM_ARCH_OMAP_GPIO_SWITCH_H
13
14#include <linux/types.h>
15
16/* Cover:
17 * high -> closed
18 * low -> open
19 * Connection:
20 * high -> connected
21 * low -> disconnected
22 * Activity:
23 * high -> active
24 * low -> inactive
25 *
26 */
27#define OMAP_GPIO_SWITCH_TYPE_COVER 0x0000
28#define OMAP_GPIO_SWITCH_TYPE_CONNECTION 0x0001
29#define OMAP_GPIO_SWITCH_TYPE_ACTIVITY 0x0002
30#define OMAP_GPIO_SWITCH_FLAG_INVERTED 0x0001
31#define OMAP_GPIO_SWITCH_FLAG_OUTPUT 0x0002
32
33struct omap_gpio_switch {
34 const char *name;
35 s16 gpio;
36 unsigned flags:4;
37 unsigned type:4;
38
39 /* Time in ms to debounce when transitioning from
40 * inactive state to active state. */
41 u16 debounce_rising;
42 /* Same for transition from active to inactive state. */
43 u16 debounce_falling;
44
45 /* notify board-specific code about state changes */
46 void (* notify)(void *data, int state);
47 void *notify_data;
48};
49
50/* Call at init time only */
51extern void omap_register_gpio_switches(const struct omap_gpio_switch *tbl,
52 int count);
53
54#endif
diff --git a/include/asm-arm/arch-omap/gpio.h b/include/asm-arm/arch-omap/gpio.h
index 590917efc94a..97b397dd7e87 100644
--- a/include/asm-arm/arch-omap/gpio.h
+++ b/include/asm-arm/arch-omap/gpio.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright (C) 2003-2005 Nokia Corporation 6 * Copyright (C) 2003-2005 Nokia Corporation
7 * 7 *
8 * Written by Juha Yrjölä <juha.yrjola@nokia.com> 8 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
diff --git a/include/asm-arm/arch-omap/gpmc.h b/include/asm-arm/arch-omap/gpmc.h
index 7c03ef6c14c4..995cc83482eb 100644
--- a/include/asm-arm/arch-omap/gpmc.h
+++ b/include/asm-arm/arch-omap/gpmc.h
@@ -87,5 +87,7 @@ extern int gpmc_cs_calc_divider(int cs, unsigned int sync_clk);
87extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t); 87extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t);
88extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base); 88extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base);
89extern void gpmc_cs_free(int cs); 89extern void gpmc_cs_free(int cs);
90extern int gpmc_cs_set_reserved(int cs, int reserved);
91extern int gpmc_cs_reserved(int cs);
90 92
91#endif 93#endif
diff --git a/include/asm-arm/arch-omap/hardware.h b/include/asm-arm/arch-omap/hardware.h
index 481048d65214..e225f4f39b34 100644
--- a/include/asm-arm/arch-omap/hardware.h
+++ b/include/asm-arm/arch-omap/hardware.h
@@ -267,6 +267,15 @@
267#define OMAP_LPG2_PMR (OMAP_LPG2_BASE + 0x04) 267#define OMAP_LPG2_PMR (OMAP_LPG2_BASE + 0x04)
268 268
269/* 269/*
270 * ----------------------------------------------------------------------------
271 * Pulse-Width Light
272 * ----------------------------------------------------------------------------
273 */
274#define OMAP_PWL_BASE 0xfffb5800
275#define OMAP_PWL_ENABLE (OMAP_PWL_BASE + 0x00)
276#define OMAP_PWL_CLK_ENABLE (OMAP_PWL_BASE + 0x04)
277
278/*
270 * --------------------------------------------------------------------------- 279 * ---------------------------------------------------------------------------
271 * Processor specific defines 280 * Processor specific defines
272 * --------------------------------------------------------------------------- 281 * ---------------------------------------------------------------------------
diff --git a/include/asm-arm/arch-omap/hwa742.h b/include/asm-arm/arch-omap/hwa742.h
new file mode 100644
index 000000000000..577f492f2d3c
--- /dev/null
+++ b/include/asm-arm/arch-omap/hwa742.h
@@ -0,0 +1,12 @@
1#ifndef _HWA742_H
2#define _HWA742_H
3
4struct hwa742_platform_data {
5 void (*power_up)(struct device *dev);
6 void (*power_down)(struct device *dev);
7 unsigned long (*get_clock_rate)(struct device *dev);
8
9 unsigned te_connected:1;
10};
11
12#endif
diff --git a/include/asm-arm/arch-omap/io.h b/include/asm-arm/arch-omap/io.h
index 78f68e6a4f0c..4aca7e3d7566 100644
--- a/include/asm-arm/arch-omap/io.h
+++ b/include/asm-arm/arch-omap/io.h
@@ -77,6 +77,17 @@
77#define io_p2v(pa) ((pa) + IO_OFFSET) /* Works for L3 and L4 */ 77#define io_p2v(pa) ((pa) + IO_OFFSET) /* Works for L3 and L4 */
78#define io_v2p(va) ((va) - IO_OFFSET) /* Works for L3 and L4 */ 78#define io_v2p(va) ((va) - IO_OFFSET) /* Works for L3 and L4 */
79 79
80/* DSP */
81#define DSP_MEM_24XX_PHYS OMAP24XX_DSP_MEM_BASE /* 0x58000000 */
82#define DSP_MEM_24XX_VIRT 0xe0000000
83#define DSP_MEM_24XX_SIZE 0x28000
84#define DSP_IPI_24XX_PHYS OMAP24XX_DSP_IPI_BASE /* 0x59000000 */
85#define DSP_IPI_24XX_VIRT 0xe1000000
86#define DSP_IPI_24XX_SIZE SZ_4K
87#define DSP_MMU_24XX_PHYS OMAP24XX_DSP_MMU_BASE /* 0x5a000000 */
88#define DSP_MMU_24XX_VIRT 0xe2000000
89#define DSP_MMU_24XX_SIZE SZ_4K
90
80#endif 91#endif
81 92
82#ifndef __ASSEMBLER__ 93#ifndef __ASSEMBLER__
diff --git a/include/asm-arm/arch-omap/irqs.h b/include/asm-arm/arch-omap/irqs.h
index c5bb05a69b81..3ede58b51db2 100644
--- a/include/asm-arm/arch-omap/irqs.h
+++ b/include/asm-arm/arch-omap/irqs.h
@@ -37,8 +37,6 @@
37#define INT_DSP_MMU_ABORT 7 37#define INT_DSP_MMU_ABORT 7
38#define INT_HOST 8 38#define INT_HOST 8
39#define INT_ABORT 9 39#define INT_ABORT 9
40#define INT_DSP_MAILBOX1 10
41#define INT_DSP_MAILBOX2 11
42#define INT_BRIDGE_PRIV 13 40#define INT_BRIDGE_PRIV 13
43#define INT_GPIO_BANK1 14 41#define INT_GPIO_BANK1 14
44#define INT_UART3 15 42#define INT_UART3 15
@@ -63,6 +61,8 @@
63#define INT_1510_RES2 2 61#define INT_1510_RES2 2
64#define INT_1510_SPI_TX 4 62#define INT_1510_SPI_TX 4
65#define INT_1510_SPI_RX 5 63#define INT_1510_SPI_RX 5
64#define INT_1510_DSP_MAILBOX1 10
65#define INT_1510_DSP_MAILBOX2 11
66#define INT_1510_RES12 12 66#define INT_1510_RES12 12
67#define INT_1510_LB_MMU 17 67#define INT_1510_LB_MMU 17
68#define INT_1510_RES18 18 68#define INT_1510_RES18 18
@@ -75,6 +75,8 @@
75#define INT_1610_IH2_FIQ 2 75#define INT_1610_IH2_FIQ 2
76#define INT_1610_McBSP2_TX 4 76#define INT_1610_McBSP2_TX 4
77#define INT_1610_McBSP2_RX 5 77#define INT_1610_McBSP2_RX 5
78#define INT_1610_DSP_MAILBOX1 10
79#define INT_1610_DSP_MAILBOX2 11
78#define INT_1610_LCD_LINE 12 80#define INT_1610_LCD_LINE 12
79#define INT_1610_GPTIMER1 17 81#define INT_1610_GPTIMER1 17
80#define INT_1610_GPTIMER2 18 82#define INT_1610_GPTIMER2 18
@@ -131,11 +133,11 @@
131#define INT_RTC_TIMER (25 + IH2_BASE) 133#define INT_RTC_TIMER (25 + IH2_BASE)
132#define INT_RTC_ALARM (26 + IH2_BASE) 134#define INT_RTC_ALARM (26 + IH2_BASE)
133#define INT_MEM_STICK (27 + IH2_BASE) 135#define INT_MEM_STICK (27 + IH2_BASE)
134#define INT_DSP_MMU (28 + IH2_BASE)
135 136
136/* 137/*
137 * OMAP-1510 specific IRQ numbers for interrupt handler 2 138 * OMAP-1510 specific IRQ numbers for interrupt handler 2
138 */ 139 */
140#define INT_1510_DSP_MMU (28 + IH2_BASE)
139#define INT_1510_COM_SPI_RO (31 + IH2_BASE) 141#define INT_1510_COM_SPI_RO (31 + IH2_BASE)
140 142
141/* 143/*
@@ -146,6 +148,7 @@
146#define INT_1610_USB_OTG (8 + IH2_BASE) 148#define INT_1610_USB_OTG (8 + IH2_BASE)
147#define INT_1610_SoSSI (9 + IH2_BASE) 149#define INT_1610_SoSSI (9 + IH2_BASE)
148#define INT_1610_SoSSI_MATCH (19 + IH2_BASE) 150#define INT_1610_SoSSI_MATCH (19 + IH2_BASE)
151#define INT_1610_DSP_MMU (28 + IH2_BASE)
149#define INT_1610_McBSP2RX_OF (31 + IH2_BASE) 152#define INT_1610_McBSP2RX_OF (31 + IH2_BASE)
150#define INT_1610_STI (32 + IH2_BASE) 153#define INT_1610_STI (32 + IH2_BASE)
151#define INT_1610_STI_WAKEUP (33 + IH2_BASE) 154#define INT_1610_STI_WAKEUP (33 + IH2_BASE)
@@ -239,10 +242,15 @@
239#define INT_24XX_SDMA_IRQ3 15 242#define INT_24XX_SDMA_IRQ3 15
240#define INT_24XX_CAM_IRQ 24 243#define INT_24XX_CAM_IRQ 24
241#define INT_24XX_DSS_IRQ 25 244#define INT_24XX_DSS_IRQ 25
245#define INT_24XX_MAIL_U0_MPU 26
246#define INT_24XX_DSP_UMA 27
247#define INT_24XX_DSP_MMU 28
242#define INT_24XX_GPIO_BANK1 29 248#define INT_24XX_GPIO_BANK1 29
243#define INT_24XX_GPIO_BANK2 30 249#define INT_24XX_GPIO_BANK2 30
244#define INT_24XX_GPIO_BANK3 31 250#define INT_24XX_GPIO_BANK3 31
245#define INT_24XX_GPIO_BANK4 32 251#define INT_24XX_GPIO_BANK4 32
252#define INT_24XX_GPIO_BANK5 33
253#define INT_24XX_MAIL_U3_MPU 34
246#define INT_24XX_GPTIMER1 37 254#define INT_24XX_GPTIMER1 37
247#define INT_24XX_GPTIMER2 38 255#define INT_24XX_GPTIMER2 38
248#define INT_24XX_GPTIMER3 39 256#define INT_24XX_GPTIMER3 39
@@ -262,6 +270,12 @@
262#define INT_24XX_UART1_IRQ 72 270#define INT_24XX_UART1_IRQ 72
263#define INT_24XX_UART2_IRQ 73 271#define INT_24XX_UART2_IRQ 73
264#define INT_24XX_UART3_IRQ 74 272#define INT_24XX_UART3_IRQ 74
273#define INT_24XX_USB_IRQ_GEN 75
274#define INT_24XX_USB_IRQ_NISO 76
275#define INT_24XX_USB_IRQ_ISO 77
276#define INT_24XX_USB_IRQ_HGEN 78
277#define INT_24XX_USB_IRQ_HSOF 79
278#define INT_24XX_USB_IRQ_OTG 80
265#define INT_24XX_MMC_IRQ 83 279#define INT_24XX_MMC_IRQ 83
266 280
267/* Max. 128 level 2 IRQs (OMAP1610), 192 GPIOs (OMAP730) and 281/* Max. 128 level 2 IRQs (OMAP1610), 192 GPIOs (OMAP730) and
diff --git a/include/asm-arm/arch-omap/lcd_lph8923.h b/include/asm-arm/arch-omap/lcd_lph8923.h
deleted file mode 100644
index 004e67e22ca7..000000000000
--- a/include/asm-arm/arch-omap/lcd_lph8923.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __LCD_LPH8923_H
2#define __LCD_LPH8923_H
3
4enum lcd_lph8923_test_num {
5 LCD_LPH8923_TEST_RGB_LINES,
6};
7
8enum lcd_lph8923_test_result {
9 LCD_LPH8923_TEST_SUCCESS,
10 LCD_LPH8923_TEST_INVALID,
11 LCD_LPH8923_TEST_FAILED,
12};
13
14#endif
diff --git a/include/asm-arm/arch-omap/lcd_mipid.h b/include/asm-arm/arch-omap/lcd_mipid.h
new file mode 100644
index 000000000000..f8fbc4801e52
--- /dev/null
+++ b/include/asm-arm/arch-omap/lcd_mipid.h
@@ -0,0 +1,24 @@
1#ifndef __LCD_MIPID_H
2#define __LCD_MIPID_H
3
4enum mipid_test_num {
5 MIPID_TEST_RGB_LINES,
6};
7
8enum mipid_test_result {
9 MIPID_TEST_SUCCESS,
10 MIPID_TEST_INVALID,
11 MIPID_TEST_FAILED,
12};
13
14#ifdef __KERNEL__
15
16struct mipid_platform_data {
17 int nreset_gpio;
18 int data_lines;
19 void (*shutdown)(struct mipid_platform_data *pdata);
20};
21
22#endif
23
24#endif
diff --git a/include/asm-arm/arch-omap/led.h b/include/asm-arm/arch-omap/led.h
new file mode 100644
index 000000000000..f3acae28e2da
--- /dev/null
+++ b/include/asm-arm/arch-omap/led.h
@@ -0,0 +1,24 @@
1/*
2 * linux/include/asm-arm/arch-omap/led.h
3 *
4 * Copyright (C) 2006 Samsung Electronics
5 * Kyungmin Park <kyungmin.park@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef ASMARM_ARCH_LED_H
12#define ASMARM_ARCH_LED_H
13
14struct omap_led_config {
15 struct led_classdev cdev;
16 s16 gpio;
17};
18
19struct omap_led_platform_data {
20 s16 nr_leds;
21 struct omap_led_config *leds;
22};
23
24#endif
diff --git a/include/asm-arm/arch-omap/mailbox.h b/include/asm-arm/arch-omap/mailbox.h
new file mode 100644
index 000000000000..4bf0909461f2
--- /dev/null
+++ b/include/asm-arm/arch-omap/mailbox.h
@@ -0,0 +1,73 @@
1/* mailbox.h */
2
3#ifndef MAILBOX_H
4#define MAILBOX_H
5
6#include <linux/wait.h>
7#include <linux/workqueue.h>
8#include <linux/blkdev.h>
9
10typedef u32 mbox_msg_t;
11typedef void (mbox_receiver_t)(mbox_msg_t msg);
12struct omap_mbox;
13
14typedef int __bitwise omap_mbox_irq_t;
15#define IRQ_TX ((__force omap_mbox_irq_t) 1)
16#define IRQ_RX ((__force omap_mbox_irq_t) 2)
17
18typedef int __bitwise omap_mbox_type_t;
19#define OMAP_MBOX_TYPE1 ((__force omap_mbox_type_t) 1)
20#define OMAP_MBOX_TYPE2 ((__force omap_mbox_type_t) 2)
21
22struct omap_mbox_ops {
23 omap_mbox_type_t type;
24 int (*startup)(struct omap_mbox *mbox);
25 void (*shutdown)(struct omap_mbox *mbox);
26 /* fifo */
27 mbox_msg_t (*fifo_read)(struct omap_mbox *mbox);
28 void (*fifo_write)(struct omap_mbox *mbox, mbox_msg_t msg);
29 int (*fifo_empty)(struct omap_mbox *mbox);
30 int (*fifo_full)(struct omap_mbox *mbox);
31 /* irq */
32 void (*enable_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq);
33 void (*disable_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq);
34 void (*ack_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq);
35 int (*is_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq);
36};
37
38struct omap_mbox_queue {
39 spinlock_t lock;
40 request_queue_t *queue;
41 struct work_struct work;
42 int (*callback)(void *);
43 struct omap_mbox *mbox;
44};
45
46struct omap_mbox {
47 char *name;
48 unsigned int irq;
49
50 struct omap_mbox_queue *txq, *rxq;
51
52 struct omap_mbox_ops *ops;
53
54 mbox_msg_t seq_snd, seq_rcv;
55
56 struct device dev;
57
58 struct omap_mbox *next;
59 void *priv;
60
61 void (*err_notify)(void);
62};
63
64int omap_mbox_msg_send(struct omap_mbox *, mbox_msg_t msg, void *);
65void omap_mbox_init_seq(struct omap_mbox *);
66
67struct omap_mbox *omap_mbox_get(const char *);
68void omap_mbox_put(struct omap_mbox *);
69
70int omap_mbox_register(struct omap_mbox *);
71int omap_mbox_unregister(struct omap_mbox *);
72
73#endif /* MAILBOX_H */
diff --git a/include/asm-arm/arch-omap/mcspi.h b/include/asm-arm/arch-omap/mcspi.h
index 9e7f40a88e1b..1254e4945b6f 100644
--- a/include/asm-arm/arch-omap/mcspi.h
+++ b/include/asm-arm/arch-omap/mcspi.h
@@ -2,7 +2,6 @@
2#define _OMAP2_MCSPI_H 2#define _OMAP2_MCSPI_H
3 3
4struct omap2_mcspi_platform_config { 4struct omap2_mcspi_platform_config {
5 unsigned long base;
6 unsigned short num_cs; 5 unsigned short num_cs;
7}; 6};
8 7
diff --git a/include/asm-arm/arch-omap/memory.h b/include/asm-arm/arch-omap/memory.h
index 48fabc493163..14cba97c18ad 100644
--- a/include/asm-arm/arch-omap/memory.h
+++ b/include/asm-arm/arch-omap/memory.h
@@ -86,5 +86,18 @@
86 86
87#endif /* CONFIG_ARCH_OMAP15XX */ 87#endif /* CONFIG_ARCH_OMAP15XX */
88 88
89/* Override the ARM default */
90#ifdef CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE
91
92#if (CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE == 0)
93#undef CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE
94#define CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE 2
95#endif
96
97#define CONSISTENT_DMA_SIZE \
98 (((CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE + 1) & ~1) * 1024 * 1024)
99
100#endif
101
89#endif 102#endif
90 103
diff --git a/include/asm-arm/arch-omap/menelaus.h b/include/asm-arm/arch-omap/menelaus.h
index 88cd4c87f0de..82d276a6bd95 100644
--- a/include/asm-arm/arch-omap/menelaus.h
+++ b/include/asm-arm/arch-omap/menelaus.h
@@ -7,10 +7,19 @@
7#ifndef __ASM_ARCH_MENELAUS_H 7#ifndef __ASM_ARCH_MENELAUS_H
8#define __ASM_ARCH_MENELAUS_H 8#define __ASM_ARCH_MENELAUS_H
9 9
10extern void menelaus_mmc_register(void (*callback)(unsigned long data, u8 card_mask), 10extern int menelaus_register_mmc_callback(void (*callback)(void *data, u8 card_mask),
11 unsigned long data); 11 void *data);
12extern void menelaus_mmc_remove(void); 12extern void menelaus_unregister_mmc_callback(void);
13extern void menelaus_mmc_opendrain(int enable); 13extern int menelaus_set_mmc_opendrain(int slot, int enable);
14extern int menelaus_set_mmc_slot(int slot, int enable, int power, int cd_on);
15
16extern int menelaus_set_vmem(unsigned int mV);
17extern int menelaus_set_vio(unsigned int mV);
18extern int menelaus_set_vmmc(unsigned int mV);
19extern int menelaus_set_vaux(unsigned int mV);
20extern int menelaus_set_vdcdc(int dcdc, unsigned int mV);
21extern int menelaus_set_slot_sel(int enable);
22extern int menelaus_get_slot_pin_states(void);
14 23
15#if defined(CONFIG_ARCH_OMAP24XX) && defined(CONFIG_MENELAUS) 24#if defined(CONFIG_ARCH_OMAP24XX) && defined(CONFIG_MENELAUS)
16#define omap_has_menelaus() 1 25#define omap_has_menelaus() 1
diff --git a/include/asm-arm/arch-omap/omap16xx.h b/include/asm-arm/arch-omap/omap16xx.h
index f0c7f0fb4dc0..f7f5cdfdccce 100644
--- a/include/asm-arm/arch-omap/omap16xx.h
+++ b/include/asm-arm/arch-omap/omap16xx.h
@@ -159,15 +159,6 @@
159#define UART3_MVR (OMAP_UART3_BASE + 0x50) 159#define UART3_MVR (OMAP_UART3_BASE + 0x50)
160 160
161/* 161/*
162 * ----------------------------------------------------------------------------
163 * Pulse-Width Light
164 * ----------------------------------------------------------------------------
165 */
166#define OMAP16XX_PWL_BASE (0xfffb5800)
167#define OMAP16XX_PWL_ENABLE (OMAP16XX_PWL_BASE + 0x00)
168#define OMAP16XX_PWL_CLK_ENABLE (OMAP16XX_PWL_BASE + 0x04)
169
170/*
171 * --------------------------------------------------------------------------- 162 * ---------------------------------------------------------------------------
172 * Watchdog timer 163 * Watchdog timer
173 * --------------------------------------------------------------------------- 164 * ---------------------------------------------------------------------------
@@ -199,5 +190,8 @@
199#define WSPR_DISABLE_0 (0x0000aaaa) 190#define WSPR_DISABLE_0 (0x0000aaaa)
200#define WSPR_DISABLE_1 (0x00005555) 191#define WSPR_DISABLE_1 (0x00005555)
201 192
193/* Mailbox */
194#define OMAP16XX_MAILBOX_BASE (0xfffcf000)
195
202#endif /* __ASM_ARCH_OMAP16XX_H */ 196#endif /* __ASM_ARCH_OMAP16XX_H */
203 197
diff --git a/include/asm-arm/arch-omap/omap24xx.h b/include/asm-arm/arch-omap/omap24xx.h
index 6e59805fa654..708b2fac77f2 100644
--- a/include/asm-arm/arch-omap/omap24xx.h
+++ b/include/asm-arm/arch-omap/omap24xx.h
@@ -20,5 +20,14 @@
20#define OMAP24XX_PRCM_BASE (L4_24XX_BASE + 0x8000) 20#define OMAP24XX_PRCM_BASE (L4_24XX_BASE + 0x8000)
21#define OMAP24XX_SDRC_BASE (L3_24XX_BASE + 0x9000) 21#define OMAP24XX_SDRC_BASE (L3_24XX_BASE + 0x9000)
22 22
23/* DSP SS */
24#define OMAP24XX_DSP_BASE 0x58000000
25#define OMAP24XX_DSP_MEM_BASE (OMAP24XX_DSP_BASE + 0x0)
26#define OMAP24XX_DSP_IPI_BASE (OMAP24XX_DSP_BASE + 0x1000000)
27#define OMAP24XX_DSP_MMU_BASE (OMAP24XX_DSP_BASE + 0x2000000)
28
29/* Mailbox */
30#define OMAP24XX_MAILBOX_BASE (L4_24XX_BASE + 0x94000)
31
23#endif /* __ASM_ARCH_OMAP24XX_H */ 32#endif /* __ASM_ARCH_OMAP24XX_H */
24 33
diff --git a/include/asm-arm/arch-omap/omapfb.h b/include/asm-arm/arch-omap/omapfb.h
index fccdb3db025f..46d7a4f60854 100644
--- a/include/asm-arm/arch-omap/omapfb.h
+++ b/include/asm-arm/arch-omap/omapfb.h
@@ -24,6 +24,9 @@
24#ifndef __OMAPFB_H 24#ifndef __OMAPFB_H
25#define __OMAPFB_H 25#define __OMAPFB_H
26 26
27#include <asm/ioctl.h>
28#include <asm/types.h>
29
27/* IOCTL commands. */ 30/* IOCTL commands. */
28 31
29#define OMAP_IOW(num, dtype) _IOW('O', num, dtype) 32#define OMAP_IOW(num, dtype) _IOW('O', num, dtype)
@@ -35,26 +38,46 @@
35#define OMAPFB_SYNC_GFX OMAP_IO(37) 38#define OMAPFB_SYNC_GFX OMAP_IO(37)
36#define OMAPFB_VSYNC OMAP_IO(38) 39#define OMAPFB_VSYNC OMAP_IO(38)
37#define OMAPFB_SET_UPDATE_MODE OMAP_IOW(40, int) 40#define OMAPFB_SET_UPDATE_MODE OMAP_IOW(40, int)
38#define OMAPFB_UPDATE_WINDOW_OLD OMAP_IOW(41, struct omapfb_update_window_old) 41#define OMAPFB_GET_CAPS OMAP_IOR(42, struct omapfb_caps)
39#define OMAPFB_GET_CAPS OMAP_IOR(42, unsigned long)
40#define OMAPFB_GET_UPDATE_MODE OMAP_IOW(43, int) 42#define OMAPFB_GET_UPDATE_MODE OMAP_IOW(43, int)
41#define OMAPFB_LCD_TEST OMAP_IOW(45, int) 43#define OMAPFB_LCD_TEST OMAP_IOW(45, int)
42#define OMAPFB_CTRL_TEST OMAP_IOW(46, int) 44#define OMAPFB_CTRL_TEST OMAP_IOW(46, int)
43#define OMAPFB_UPDATE_WINDOW OMAP_IOW(47, struct omapfb_update_window) 45#define OMAPFB_UPDATE_WINDOW_OLD OMAP_IOW(47, struct omapfb_update_window_old)
44#define OMAPFB_SETUP_PLANE OMAP_IOW(48, struct omapfb_setup_plane)
45#define OMAPFB_ENABLE_PLANE OMAP_IOW(49, struct omapfb_enable_plane)
46#define OMAPFB_SET_COLOR_KEY OMAP_IOW(50, struct omapfb_color_key) 46#define OMAPFB_SET_COLOR_KEY OMAP_IOW(50, struct omapfb_color_key)
47#define OMAPFB_GET_COLOR_KEY OMAP_IOW(51, struct omapfb_color_key)
48#define OMAPFB_SETUP_PLANE OMAP_IOW(52, struct omapfb_plane_info)
49#define OMAPFB_QUERY_PLANE OMAP_IOW(53, struct omapfb_plane_info)
50#define OMAPFB_UPDATE_WINDOW OMAP_IOW(54, struct omapfb_update_window)
51#define OMAPFB_SETUP_MEM OMAP_IOW(55, struct omapfb_mem_info)
52#define OMAPFB_QUERY_MEM OMAP_IOW(56, struct omapfb_mem_info)
47 53
48#define OMAPFB_CAPS_GENERIC_MASK 0x00000fff 54#define OMAPFB_CAPS_GENERIC_MASK 0x00000fff
49#define OMAPFB_CAPS_LCDC_MASK 0x00fff000 55#define OMAPFB_CAPS_LCDC_MASK 0x00fff000
50#define OMAPFB_CAPS_PANEL_MASK 0xff000000 56#define OMAPFB_CAPS_PANEL_MASK 0xff000000
51 57
52#define OMAPFB_CAPS_MANUAL_UPDATE 0x00001000 58#define OMAPFB_CAPS_MANUAL_UPDATE 0x00001000
59#define OMAPFB_CAPS_TEARSYNC 0x00002000
60#define OMAPFB_CAPS_PLANE_RELOCATE_MEM 0x00004000
61#define OMAPFB_CAPS_PLANE_SCALE 0x00008000
62#define OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE 0x00010000
63#define OMAPFB_CAPS_WINDOW_SCALE 0x00020000
64#define OMAPFB_CAPS_WINDOW_OVERLAY 0x00040000
53#define OMAPFB_CAPS_SET_BACKLIGHT 0x01000000 65#define OMAPFB_CAPS_SET_BACKLIGHT 0x01000000
54 66
55/* Values from DSP must map to lower 16-bits */ 67/* Values from DSP must map to lower 16-bits */
56#define OMAPFB_FORMAT_MASK 0x00ff 68#define OMAPFB_FORMAT_MASK 0x00ff
57#define OMAPFB_FORMAT_FLAG_DOUBLE 0x0100 69#define OMAPFB_FORMAT_FLAG_DOUBLE 0x0100
70#define OMAPFB_FORMAT_FLAG_TEARSYNC 0x0200
71#define OMAPFB_FORMAT_FLAG_FORCE_VSYNC 0x0400
72#define OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY 0x0800
73#define OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY 0x1000
74
75#define OMAPFB_EVENT_READY 1
76#define OMAPFB_EVENT_DISABLED 2
77
78#define OMAPFB_MEMTYPE_SDRAM 0
79#define OMAPFB_MEMTYPE_SRAM 1
80#define OMAPFB_MEMTYPE_MAX 1
58 81
59enum omapfb_color_format { 82enum omapfb_color_format {
60 OMAPFB_COLOR_RGB565 = 0, 83 OMAPFB_COLOR_RGB565 = 0,
@@ -64,17 +87,23 @@ enum omapfb_color_format {
64 OMAPFB_COLOR_CLUT_4BPP, 87 OMAPFB_COLOR_CLUT_4BPP,
65 OMAPFB_COLOR_CLUT_2BPP, 88 OMAPFB_COLOR_CLUT_2BPP,
66 OMAPFB_COLOR_CLUT_1BPP, 89 OMAPFB_COLOR_CLUT_1BPP,
90 OMAPFB_COLOR_RGB444,
91 OMAPFB_COLOR_YUY422,
67}; 92};
68 93
69struct omapfb_update_window { 94struct omapfb_update_window {
70 __u32 x, y; 95 __u32 x, y;
71 __u32 width, height; 96 __u32 width, height;
72 __u32 format; 97 __u32 format;
98 __u32 out_x, out_y;
99 __u32 out_width, out_height;
100 __u32 reserved[8];
73}; 101};
74 102
75struct omapfb_update_window_old { 103struct omapfb_update_window_old {
76 __u32 x, y; 104 __u32 x, y;
77 __u32 width, height; 105 __u32 width, height;
106 __u32 format;
78}; 107};
79 108
80enum omapfb_plane { 109enum omapfb_plane {
@@ -88,18 +117,28 @@ enum omapfb_channel_out {
88 OMAPFB_CHANNEL_OUT_DIGIT, 117 OMAPFB_CHANNEL_OUT_DIGIT,
89}; 118};
90 119
91struct omapfb_setup_plane { 120struct omapfb_plane_info {
92 __u8 plane; 121 __u32 pos_x;
122 __u32 pos_y;
123 __u8 enabled;
93 __u8 channel_out; 124 __u8 channel_out;
94 __u32 offset; 125 __u8 mirror;
95 __u32 pos_x, pos_y; 126 __u8 reserved1;
96 __u32 width, height; 127 __u32 out_width;
97 __u32 color_mode; 128 __u32 out_height;
129 __u32 reserved2[12];
98}; 130};
99 131
100struct omapfb_enable_plane { 132struct omapfb_mem_info {
101 __u8 plane; 133 __u32 size;
102 __u8 enable; 134 __u8 type;
135 __u8 reserved[3];
136};
137
138struct omapfb_caps {
139 __u32 ctrl;
140 __u32 plane_color;
141 __u32 wnd_color;
103}; 142};
104 143
105enum omapfb_color_key_type { 144enum omapfb_color_key_type {
@@ -141,6 +180,9 @@ enum omapfb_update_mode {
141 180
142#define OMAP_LCDC_PANEL_TFT 0x0100 181#define OMAP_LCDC_PANEL_TFT 0x0100
143 182
183#define OMAPFB_PLANE_XRES_MIN 8
184#define OMAPFB_PLANE_YRES_MIN 8
185
144#ifdef CONFIG_ARCH_OMAP1 186#ifdef CONFIG_ARCH_OMAP1
145#define OMAPFB_PLANE_NUM 1 187#define OMAPFB_PLANE_NUM 1
146#else 188#else
@@ -169,19 +211,19 @@ struct lcd_panel {
169 int pcd; /* pixel clock divider. 211 int pcd; /* pixel clock divider.
170 Obsolete use pixel_clock instead */ 212 Obsolete use pixel_clock instead */
171 213
172 int (*init) (struct omapfb_device *fbdev); 214 int (*init) (struct lcd_panel *panel,
173 void (*cleanup) (void); 215 struct omapfb_device *fbdev);
174 int (*enable) (void); 216 void (*cleanup) (struct lcd_panel *panel);
175 void (*disable) (void); 217 int (*enable) (struct lcd_panel *panel);
176 unsigned long (*get_caps) (void); 218 void (*disable) (struct lcd_panel *panel);
177 int (*set_bklight_level)(unsigned int level); 219 unsigned long (*get_caps) (struct lcd_panel *panel);
178 unsigned int (*get_bklight_level)(void); 220 int (*set_bklight_level)(struct lcd_panel *panel,
179 unsigned int (*get_bklight_max) (void); 221 unsigned int level);
180 int (*run_test) (int test_num); 222 unsigned int (*get_bklight_level)(struct lcd_panel *panel);
223 unsigned int (*get_bklight_max) (struct lcd_panel *panel);
224 int (*run_test) (struct lcd_panel *panel, int test_num);
181}; 225};
182 226
183struct omapfb_device;
184
185struct extif_timings { 227struct extif_timings {
186 int cs_on_time; 228 int cs_on_time;
187 int cs_off_time; 229 int cs_off_time;
@@ -202,9 +244,10 @@ struct extif_timings {
202}; 244};
203 245
204struct lcd_ctrl_extif { 246struct lcd_ctrl_extif {
205 int (*init) (void); 247 int (*init) (struct omapfb_device *fbdev);
206 void (*cleanup) (void); 248 void (*cleanup) (void);
207 void (*get_clk_info) (u32 *clk_period, u32 *max_clk_div); 249 void (*get_clk_info) (u32 *clk_period, u32 *max_clk_div);
250 unsigned long (*get_max_tx_rate)(void);
208 int (*convert_timings) (struct extif_timings *timings); 251 int (*convert_timings) (struct extif_timings *timings);
209 void (*set_timings) (const struct extif_timings *timings); 252 void (*set_timings) (const struct extif_timings *timings);
210 void (*set_bits_per_cycle)(int bpc); 253 void (*set_bits_per_cycle)(int bpc);
@@ -213,31 +256,48 @@ struct lcd_ctrl_extif {
213 void (*write_data) (const void *buf, unsigned int len); 256 void (*write_data) (const void *buf, unsigned int len);
214 void (*transfer_area) (int width, int height, 257 void (*transfer_area) (int width, int height,
215 void (callback)(void * data), void *data); 258 void (callback)(void * data), void *data);
259 int (*setup_tearsync) (unsigned pin_cnt,
260 unsigned hs_pulse_time, unsigned vs_pulse_time,
261 int hs_pol_inv, int vs_pol_inv, int div);
262 int (*enable_tearsync) (int enable, unsigned line);
263
216 unsigned long max_transmit_size; 264 unsigned long max_transmit_size;
217}; 265};
218 266
219struct omapfb_notifier_block { 267struct omapfb_notifier_block {
220 struct notifier_block nb; 268 struct notifier_block nb;
221 void *data; 269 void *data;
270 int plane_idx;
222}; 271};
223 272
224typedef int (*omapfb_notifier_callback_t)(struct omapfb_notifier_block *, 273typedef int (*omapfb_notifier_callback_t)(struct notifier_block *,
225 unsigned long event, 274 unsigned long event,
226 struct omapfb_device *fbdev); 275 void *fbi);
276
277struct omapfb_mem_region {
278 dma_addr_t paddr;
279 void *vaddr;
280 unsigned long size;
281 u8 type; /* OMAPFB_PLANE_MEM_* */
282 unsigned alloc:1; /* allocated by the driver */
283 unsigned map:1; /* kernel mapped by the driver */
284};
285
286struct omapfb_mem_desc {
287 int region_cnt;
288 struct omapfb_mem_region region[OMAPFB_PLANE_NUM];
289};
227 290
228struct lcd_ctrl { 291struct lcd_ctrl {
229 const char *name; 292 const char *name;
230 void *data; 293 void *data;
231 294
232 int (*init) (struct omapfb_device *fbdev, 295 int (*init) (struct omapfb_device *fbdev,
233 int ext_mode, int req_vram_size); 296 int ext_mode,
297 struct omapfb_mem_desc *req_md);
234 void (*cleanup) (void); 298 void (*cleanup) (void);
235 void (*bind_client) (struct omapfb_notifier_block *nb); 299 void (*bind_client) (struct omapfb_notifier_block *nb);
236 void (*get_vram_layout)(unsigned long *size, 300 void (*get_caps) (int plane, struct omapfb_caps *caps);
237 void **virt_base,
238 dma_addr_t *phys_base);
239 int (*mmap) (struct vm_area_struct *vma);
240 unsigned long (*get_caps) (void);
241 int (*set_update_mode)(enum omapfb_update_mode mode); 301 int (*set_update_mode)(enum omapfb_update_mode mode);
242 enum omapfb_update_mode (*get_update_mode)(void); 302 enum omapfb_update_mode (*get_update_mode)(void);
243 int (*setup_plane) (int plane, int channel_out, 303 int (*setup_plane) (int plane, int channel_out,
@@ -245,8 +305,16 @@ struct lcd_ctrl {
245 int screen_width, 305 int screen_width,
246 int pos_x, int pos_y, int width, 306 int pos_x, int pos_y, int width,
247 int height, int color_mode); 307 int height, int color_mode);
308 int (*setup_mem) (int plane, size_t size,
309 int mem_type, unsigned long *paddr);
310 int (*mmap) (struct fb_info *info,
311 struct vm_area_struct *vma);
312 int (*set_scale) (int plane,
313 int orig_width, int orig_height,
314 int out_width, int out_height);
248 int (*enable_plane) (int plane, int enable); 315 int (*enable_plane) (int plane, int enable);
249 int (*update_window) (struct omapfb_update_window *win, 316 int (*update_window) (struct fb_info *fbi,
317 struct omapfb_update_window *win,
250 void (*callback)(void *), 318 void (*callback)(void *),
251 void *callback_data); 319 void *callback_data);
252 void (*sync) (void); 320 void (*sync) (void);
@@ -257,7 +325,7 @@ struct lcd_ctrl {
257 u16 blue, u16 transp, 325 u16 blue, u16 transp,
258 int update_hw_mem); 326 int update_hw_mem);
259 int (*set_color_key) (struct omapfb_color_key *ck); 327 int (*set_color_key) (struct omapfb_color_key *ck);
260 328 int (*get_color_key) (struct omapfb_color_key *ck);
261}; 329};
262 330
263enum omapfb_state { 331enum omapfb_state {
@@ -266,19 +334,20 @@ enum omapfb_state {
266 OMAPFB_ACTIVE = 100 334 OMAPFB_ACTIVE = 100
267}; 335};
268 336
337struct omapfb_plane_struct {
338 int idx;
339 struct omapfb_plane_info info;
340 enum omapfb_color_format color_mode;
341 struct omapfb_device *fbdev;
342};
343
269struct omapfb_device { 344struct omapfb_device {
270 int state; 345 int state;
271 int ext_lcdc; /* Using external 346 int ext_lcdc; /* Using external
272 LCD controller */ 347 LCD controller */
273 struct mutex rqueue_mutex; 348 struct mutex rqueue_mutex;
274 349
275 void *vram_virt_base;
276 dma_addr_t vram_phys_base;
277 unsigned long vram_size;
278
279 int color_mode;
280 int palette_size; 350 int palette_size;
281 int mirror;
282 u32 pseudo_palette[17]; 351 u32 pseudo_palette[17];
283 352
284 struct lcd_panel *panel; /* LCD panel */ 353 struct lcd_panel *panel; /* LCD panel */
@@ -286,19 +355,19 @@ struct omapfb_device {
286 struct lcd_ctrl *int_ctrl; /* internal LCD ctrl */ 355 struct lcd_ctrl *int_ctrl; /* internal LCD ctrl */
287 struct lcd_ctrl_extif *ext_if; /* LCD ctrl external 356 struct lcd_ctrl_extif *ext_if; /* LCD ctrl external
288 interface */ 357 interface */
289 struct fb_info *fb_info;
290
291 struct device *dev; 358 struct device *dev;
359 struct fb_var_screeninfo new_var; /* for mode changes */
360
361 struct omapfb_mem_desc mem_desc;
362 struct fb_info *fb_info[OMAPFB_PLANE_NUM];
292}; 363};
293 364
294struct omapfb_platform_data { 365struct omapfb_platform_data {
295 struct omap_lcd_config lcd; 366 struct omap_lcd_config lcd;
296 struct omap_fbmem_config fbmem; 367 struct omapfb_mem_desc mem_desc;
368 void *ctrl_platform_data;
297}; 369};
298 370
299#define OMAPFB_EVENT_READY 1
300#define OMAPFB_EVENT_DISABLED 2
301
302#ifdef CONFIG_ARCH_OMAP1 371#ifdef CONFIG_ARCH_OMAP1
303extern struct lcd_ctrl omap1_lcd_ctrl; 372extern struct lcd_ctrl omap1_lcd_ctrl;
304#else 373#else
@@ -310,15 +379,16 @@ extern void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval);
310extern void omapfb_notify_clients(struct omapfb_device *fbdev, 379extern void omapfb_notify_clients(struct omapfb_device *fbdev,
311 unsigned long event); 380 unsigned long event);
312extern int omapfb_register_client(struct omapfb_notifier_block *nb, 381extern int omapfb_register_client(struct omapfb_notifier_block *nb,
313 omapfb_notifier_callback_t callback, 382 omapfb_notifier_callback_t callback,
314 void *callback_data); 383 void *callback_data);
315extern int omapfb_unregister_client(struct omapfb_notifier_block *nb); 384extern int omapfb_unregister_client(struct omapfb_notifier_block *nb);
316extern int omapfb_update_window_async(struct omapfb_update_window *win, 385extern int omapfb_update_window_async(struct fb_info *fbi,
317 void (*callback)(void *), 386 struct omapfb_update_window *win,
318 void *callback_data); 387 void (*callback)(void *),
388 void *callback_data);
319 389
320/* in arch/arm/plat-omap/devices.c */ 390/* in arch/arm/plat-omap/fb.c */
321extern void omapfb_reserve_mem(void); 391extern void omapfb_set_ctrl_platform_data(void *pdata);
322 392
323#endif /* __KERNEL__ */ 393#endif /* __KERNEL__ */
324 394
diff --git a/include/asm-arm/arch-omap/sram.h b/include/asm-arm/arch-omap/sram.h
index 6fc0dd57b7c3..bb9bb3fd532f 100644
--- a/include/asm-arm/arch-omap/sram.h
+++ b/include/asm-arm/arch-omap/sram.h
@@ -20,9 +20,6 @@ extern void omap2_sram_reprogram_sdrc(u32 perf_level, u32 dll_val,
20 u32 mem_type); 20 u32 mem_type);
21extern u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass); 21extern u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);
22 22
23extern unsigned long omap_fb_sram_start;
24extern unsigned long omap_fb_sram_size;
25
26/* Do not use these */ 23/* Do not use these */
27extern void sram_reprogram_clock(u32 ckctl, u32 dpllctl); 24extern void sram_reprogram_clock(u32 ckctl, u32 dpllctl);
28extern unsigned long sram_reprogram_clock_sz; 25extern unsigned long sram_reprogram_clock_sz;
diff --git a/include/asm-arm/arch-omap/usb.h b/include/asm-arm/arch-omap/usb.h
index 054fb9a8e0c6..99ae9eabaf71 100644
--- a/include/asm-arm/arch-omap/usb.h
+++ b/include/asm-arm/arch-omap/usb.h
@@ -7,9 +7,27 @@
7 7
8/*-------------------------------------------------------------------------*/ 8/*-------------------------------------------------------------------------*/
9 9
10#define OTG_BASE 0xfffb0400 10#define OMAP1_OTG_BASE 0xfffb0400
11#define UDC_BASE 0xfffb4000 11#define OMAP1_UDC_BASE 0xfffb4000
12#define OMAP_OHCI_BASE 0xfffba000 12#define OMAP1_OHCI_BASE 0xfffba000
13
14#define OMAP2_OHCI_BASE 0x4805e000
15#define OMAP2_UDC_BASE 0x4805e200
16#define OMAP2_OTG_BASE 0x4805e300
17
18#ifdef CONFIG_ARCH_OMAP1
19
20#define OTG_BASE OMAP1_OTG_BASE
21#define UDC_BASE OMAP1_UDC_BASE
22#define OMAP_OHCI_BASE OMAP1_OHCI_BASE
23
24#else
25
26#define OTG_BASE OMAP2_OTG_BASE
27#define UDC_BASE OMAP2_UDC_BASE
28#define OMAP_OHCI_BASE OMAP2_OHCI_BASE
29
30#endif
13 31
14/*-------------------------------------------------------------------------*/ 32/*-------------------------------------------------------------------------*/
15 33
@@ -28,6 +46,7 @@
28# define HST_IDLE_EN (1 << 14) 46# define HST_IDLE_EN (1 << 14)
29# define DEV_IDLE_EN (1 << 13) 47# define DEV_IDLE_EN (1 << 13)
30# define OTG_RESET_DONE (1 << 2) 48# define OTG_RESET_DONE (1 << 2)
49# define OTG_SOFT_RESET (1 << 1)
31#define OTG_SYSCON_2_REG OTG_REG32(0x08) 50#define OTG_SYSCON_2_REG OTG_REG32(0x08)
32# define OTG_EN (1 << 31) 51# define OTG_EN (1 << 31)
33# define USBX_SYNCHRO (1 << 30) 52# define USBX_SYNCHRO (1 << 30)
@@ -103,6 +122,7 @@
103 122
104/*-------------------------------------------------------------------------*/ 123/*-------------------------------------------------------------------------*/
105 124
125/* OMAP1 */
106#define USB_TRANSCEIVER_CTRL_REG __REG32(0xfffe1000 + 0x0064) 126#define USB_TRANSCEIVER_CTRL_REG __REG32(0xfffe1000 + 0x0064)
107# define CONF_USB2_UNI_R (1 << 8) 127# define CONF_USB2_UNI_R (1 << 8)
108# define CONF_USB1_UNI_R (1 << 7) 128# define CONF_USB1_UNI_R (1 << 7)
@@ -111,7 +131,17 @@
111# define CONF_USB_PWRDN_DM_R (1 << 2) 131# define CONF_USB_PWRDN_DM_R (1 << 2)
112# define CONF_USB_PWRDN_DP_R (1 << 1) 132# define CONF_USB_PWRDN_DP_R (1 << 1)
113 133
114 134/* OMAP2 */
115 135#define CONTROL_DEVCONF_REG __REG32(L4_24XX_BASE + 0x0274)
136# define USB_UNIDIR 0x0
137# define USB_UNIDIR_TLL 0x1
138# define USB_BIDIR 0x2
139# define USB_BIDIR_TLL 0x3
140# define USBT0WRMODEI(x) ((x) << 22)
141# define USBT1WRMODEI(x) ((x) << 20)
142# define USBT2WRMODEI(x) ((x) << 18)
143# define USBT2TLL5PI (1 << 17)
144# define USB0PUENACTLOI (1 << 16)
145# define USBSTANDBYCTRL (1 << 15)
116 146
117#endif /* __ASM_ARCH_OMAP_USB_H */ 147#endif /* __ASM_ARCH_OMAP_USB_H */
diff --git a/include/asm-arm/arch-s3c2410/regs-power.h b/include/asm-arm/arch-s3c2410/regs-power.h
index 6c319ea2afac..94ff96505b6a 100644
--- a/include/asm-arm/arch-s3c2410/regs-power.h
+++ b/include/asm-arm/arch-s3c2410/regs-power.h
@@ -1,4 +1,4 @@
1/* linux/include/asm/arch-s3c2410/regs-power.h 1/* linux/include/asm-arm/arch-s3c2410/regs-power.h
2 * 2 *
3 * Copyright (c) 2003,2004,2005,2006 Simtec Electronics <linux@simtec.co.uk> 3 * Copyright (c) 2003,2004,2005,2006 Simtec Electronics <linux@simtec.co.uk>
4 * http://armlinux.simtec.co.uk/ 4 * http://armlinux.simtec.co.uk/
diff --git a/include/asm-arm/arch-s3c2410/regs-s3c2443-clock.h b/include/asm-arm/arch-s3c2410/regs-s3c2443-clock.h
index ff0536d2de42..cd9e26568f85 100644
--- a/include/asm-arm/arch-s3c2410/regs-s3c2443-clock.h
+++ b/include/asm-arm/arch-s3c2410/regs-s3c2443-clock.h
@@ -1,4 +1,4 @@
1/* linux/include/asm-arm/arch-s3c2410/regs-clock.h 1/* linux/include/asm-arm/arch-s3c2410/regs-s3c2443-clock.h
2 * 2 *
3 * Copyright (c) 2007 Simtec Electronics 3 * Copyright (c) 2007 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk> 4 * Ben Dooks <ben@simtec.co.uk>
diff --git a/include/asm-arm/arch-s3c2410/regs-watchdog.h b/include/asm-arm/arch-s3c2410/regs-watchdog.h
index f4fff448c7bd..a9c5d491bdb6 100644
--- a/include/asm-arm/arch-s3c2410/regs-watchdog.h
+++ b/include/asm-arm/arch-s3c2410/regs-watchdog.h
@@ -1,4 +1,4 @@
1/* linux/include/asm/arch-s3c2410/regs-watchdog.h 1/* linux/include/asm-arm/arch-s3c2410/regs-watchdog.h
2 * 2 *
3 * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk> 3 * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
4 * http://www.simtec.co.uk/products/SWLINUX/ 4 * http://www.simtec.co.uk/products/SWLINUX/
diff --git a/include/asm-arm/arch-s3c2410/udc.h b/include/asm-arm/arch-s3c2410/udc.h
index e59ec339d614..b8aa6cb69b58 100644
--- a/include/asm-arm/arch-s3c2410/udc.h
+++ b/include/asm-arm/arch-s3c2410/udc.h
@@ -1,4 +1,4 @@
1/* linux/include/asm/arch-s3c2410/udc.h 1/* linux/include/asm-arm/arch-s3c2410/udc.h
2 * 2 *
3 * Copyright (c) 2005 Arnaud Patard <arnaud.patard@rtp-net.org> 3 * Copyright (c) 2005 Arnaud Patard <arnaud.patard@rtp-net.org>
4 * 4 *
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h
index afad32c76e6c..d1294a46c70c 100644
--- a/include/asm-arm/cacheflush.h
+++ b/include/asm-arm/cacheflush.h
@@ -102,6 +102,14 @@
102//# endif 102//# endif
103#endif 103#endif
104 104
105#if defined(CONFIG_CPU_V7)
106//# ifdef _CACHE
107# define MULTI_CACHE 1
108//# else
109//# define _CACHE v7
110//# endif
111#endif
112
105#if !defined(_CACHE) && !defined(MULTI_CACHE) 113#if !defined(_CACHE) && !defined(MULTI_CACHE)
106#error Unknown cache maintainence model 114#error Unknown cache maintainence model
107#endif 115#endif
@@ -418,11 +426,19 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
418 */ 426 */
419#define flush_icache_page(vma,page) do { } while (0) 427#define flush_icache_page(vma,page) do { } while (0)
420 428
421#define __cacheid_present(val) (val != read_cpuid(CPUID_ID)) 429#define __cacheid_present(val) (val != read_cpuid(CPUID_ID))
422#define __cacheid_vivt(val) ((val & (15 << 25)) != (14 << 25)) 430#define __cacheid_type_v7(val) ((val & (7 << 29)) == (4 << 29))
423#define __cacheid_vipt(val) ((val & (15 << 25)) == (14 << 25)) 431
424#define __cacheid_vipt_nonaliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25)) 432#define __cacheid_vivt_prev7(val) ((val & (15 << 25)) != (14 << 25))
425#define __cacheid_vipt_aliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23)) 433#define __cacheid_vipt_prev7(val) ((val & (15 << 25)) == (14 << 25))
434#define __cacheid_vipt_nonaliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25))
435#define __cacheid_vipt_aliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
436
437#define __cacheid_vivt(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val))
438#define __cacheid_vipt(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val))
439#define __cacheid_vipt_nonaliasing(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val))
440#define __cacheid_vipt_aliasing(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val))
441#define __cacheid_vivt_asid_tagged_instr(val) (__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0)
426 442
427#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) 443#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
428 444
@@ -430,6 +446,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
430#define cache_is_vipt() 0 446#define cache_is_vipt() 0
431#define cache_is_vipt_nonaliasing() 0 447#define cache_is_vipt_nonaliasing() 0
432#define cache_is_vipt_aliasing() 0 448#define cache_is_vipt_aliasing() 0
449#define icache_is_vivt_asid_tagged() 0
433 450
434#elif defined(CONFIG_CPU_CACHE_VIPT) 451#elif defined(CONFIG_CPU_CACHE_VIPT)
435 452
@@ -447,6 +464,12 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
447 __cacheid_vipt_aliasing(__val); \ 464 __cacheid_vipt_aliasing(__val); \
448 }) 465 })
449 466
467#define icache_is_vivt_asid_tagged() \
468 ({ \
469 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
470 __cacheid_vivt_asid_tagged_instr(__val); \
471 })
472
450#else 473#else
451 474
452#define cache_is_vivt() \ 475#define cache_is_vivt() \
@@ -475,6 +498,13 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
475 __cacheid_vipt_aliasing(__val); \ 498 __cacheid_vipt_aliasing(__val); \
476 }) 499 })
477 500
501#define icache_is_vivt_asid_tagged() \
502 ({ \
503 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
504 __cacheid_present(__val) && \
505 __cacheid_vivt_asid_tagged_instr(__val); \
506 })
507
478#endif 508#endif
479 509
480#endif 510#endif
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index abfb75b654c7..c8b5d0db0cf0 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -445,7 +445,7 @@ extern void dmabounce_unregister_dev(struct device *);
445 * 445 *
446 * The dmabounce routines call this function whenever a dma-mapping 446 * The dmabounce routines call this function whenever a dma-mapping
447 * is requested to determine whether a given buffer needs to be bounced 447 * is requested to determine whether a given buffer needs to be bounced
448 * or not. The function must return 0 if the the buffer is OK for 448 * or not. The function must return 0 if the buffer is OK for
449 * DMA access and 1 if the buffer needs to be bounced. 449 * DMA access and 1 if the buffer needs to be bounced.
450 * 450 *
451 */ 451 */
diff --git a/include/asm-arm/glue.h b/include/asm-arm/glue.h
index 0cc5d3b10ce2..22274ce81375 100644
--- a/include/asm-arm/glue.h
+++ b/include/asm-arm/glue.h
@@ -38,6 +38,7 @@
38 * v5tej_early - ARMv5 with Thumb and Java early abort handler 38 * v5tej_early - ARMv5 with Thumb and Java early abort handler
39 * xscale - ARMv5 with Thumb with Xscale extensions 39 * xscale - ARMv5 with Thumb with Xscale extensions
40 * v6_early - ARMv6 generic early abort handler 40 * v6_early - ARMv6 generic early abort handler
41 * v7_early - ARMv7 generic early abort handler
41 */ 42 */
42#undef CPU_ABORT_HANDLER 43#undef CPU_ABORT_HANDLER
43#undef MULTI_ABORT 44#undef MULTI_ABORT
@@ -106,6 +107,14 @@
106# endif 107# endif
107#endif 108#endif
108 109
110#ifdef CONFIG_CPU_ABRT_EV7
111# ifdef CPU_ABORT_HANDLER
112# define MULTI_ABORT 1
113# else
114# define CPU_ABORT_HANDLER v7_early_abort
115# endif
116#endif
117
109#ifndef CPU_ABORT_HANDLER 118#ifndef CPU_ABORT_HANDLER
110#error Unknown data abort handler type 119#error Unknown data abort handler type
111#endif 120#endif
diff --git a/include/asm-arm/mmu_context.h b/include/asm-arm/mmu_context.h
index f8755c818b54..4981ad419198 100644
--- a/include/asm-arm/mmu_context.h
+++ b/include/asm-arm/mmu_context.h
@@ -36,8 +36,9 @@ void __check_kvm_seq(struct mm_struct *mm);
36 * The context ID is used by debuggers and trace logic, and 36 * The context ID is used by debuggers and trace logic, and
37 * should be unique within all running processes. 37 * should be unique within all running processes.
38 */ 38 */
39#define ASID_BITS 8 39#define ASID_BITS 8
40#define ASID_MASK ((~0) << ASID_BITS) 40#define ASID_MASK ((~0) << ASID_BITS)
41#define ASID_FIRST_VERSION (1 << ASID_BITS)
41 42
42extern unsigned int cpu_last_asid; 43extern unsigned int cpu_last_asid;
43 44
@@ -96,8 +97,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
96#ifdef CONFIG_MMU 97#ifdef CONFIG_MMU
97 unsigned int cpu = smp_processor_id(); 98 unsigned int cpu = smp_processor_id();
98 99
99 if (prev != next) { 100 if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) {
100 cpu_set(cpu, next->cpu_vm_mask);
101 check_context(next); 101 check_context(next);
102 cpu_switch_mm(next->pgd, next); 102 cpu_switch_mm(next->pgd, next);
103 if (cache_is_vivt()) 103 if (cache_is_vivt())
diff --git a/include/asm-arm/proc-fns.h b/include/asm-arm/proc-fns.h
index ea7e54c319be..5599d4e5e708 100644
--- a/include/asm-arm/proc-fns.h
+++ b/include/asm-arm/proc-fns.h
@@ -193,6 +193,14 @@
193# define CPU_NAME cpu_v6 193# define CPU_NAME cpu_v6
194# endif 194# endif
195# endif 195# endif
196# ifdef CONFIG_CPU_V7
197# ifdef CPU_NAME
198# undef MULTI_CPU
199# define MULTI_CPU
200# else
201# define CPU_NAME cpu_v7
202# endif
203# endif
196#endif 204#endif
197 205
198#ifndef __ASSEMBLY__ 206#ifndef __ASSEMBLY__
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index f2da3b6e3a83..6f8e6a69dc5f 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -14,6 +14,7 @@
14#define CPU_ARCH_ARMv5TE 6 14#define CPU_ARCH_ARMv5TE 6
15#define CPU_ARCH_ARMv5TEJ 7 15#define CPU_ARCH_ARMv5TEJ 7
16#define CPU_ARCH_ARMv6 8 16#define CPU_ARCH_ARMv6 8
17#define CPU_ARCH_ARMv7 9
17 18
18/* 19/*
19 * CR1 bits (CP#15 CR1) 20 * CR1 bits (CP#15 CR1)
@@ -155,7 +156,11 @@ extern unsigned int user_debug;
155#define vectors_high() (0) 156#define vectors_high() (0)
156#endif 157#endif
157 158
158#if defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ >= 6 159#if __LINUX_ARM_ARCH__ >= 7
160#define isb() __asm__ __volatile__ ("isb" : : : "memory")
161#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
162#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
163#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
159#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 164#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
160 : : "r" (0) : "memory") 165 : : "r" (0) : "memory")
161#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 166#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
diff --git a/include/asm-arm26/io.h b/include/asm-arm26/io.h
index 2aa033bd0678..a5a7a4d5e09c 100644
--- a/include/asm-arm26/io.h
+++ b/include/asm-arm26/io.h
@@ -321,7 +321,7 @@ DECLARE_IO(int,l,"")
321 321
322#define mmiowb() 322#define mmiowb()
323 323
324/* the following macro is depreciated */ 324/* the following macro is deprecated */
325#define ioaddr(port) __ioaddr((port)) 325#define ioaddr(port) __ioaddr((port))
326 326
327/* 327/*
diff --git a/include/asm-arm26/memory.h b/include/asm-arm26/memory.h
index a65f10b80dfb..7c1e5be39060 100644
--- a/include/asm-arm26/memory.h
+++ b/include/asm-arm26/memory.h
@@ -60,7 +60,7 @@ static inline void *phys_to_virt(unsigned long x)
60/* 60/*
61 * Virtual <-> DMA view memory address translations 61 * Virtual <-> DMA view memory address translations
62 * Again, these are *only* valid on the kernel direct mapped RAM 62 * Again, these are *only* valid on the kernel direct mapped RAM
63 * memory. Use of these is *depreciated*. 63 * memory. Use of these is *deprecated*.
64 */ 64 */
65#define virt_to_bus(x) ((unsigned long)(x)) 65#define virt_to_bus(x) ((unsigned long)(x))
66#define bus_to_virt(x) ((void *)((unsigned long)(x))) 66#define bus_to_virt(x) ((void *)((unsigned long)(x)))
@@ -93,7 +93,7 @@ static inline void *phys_to_virt(unsigned long x)
93#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 93#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
94 94
95/* 95/*
96 * We should really eliminate virt_to_bus() here - it's depreciated. 96 * We should really eliminate virt_to_bus() here - it's deprecated.
97 */ 97 */
98#define page_to_bus(page) (page_address(page)) 98#define page_to_bus(page) (page_address(page))
99 99
diff --git a/include/asm-arm26/setup.h b/include/asm-arm26/setup.h
index 1a867b4e8d53..10fd07c76662 100644
--- a/include/asm-arm26/setup.h
+++ b/include/asm-arm26/setup.h
@@ -70,7 +70,7 @@ struct tag_ramdisk {
70/* describes where the compressed ramdisk image lives */ 70/* describes where the compressed ramdisk image lives */
71/* 71/*
72 * this one accidentally used virtual addresses - as such, 72 * this one accidentally used virtual addresses - as such,
73 * its depreciated. 73 * it's deprecated.
74 */ 74 */
75#define ATAG_INITRD 0x54410005 75#define ATAG_INITRD 0x54410005
76 76
diff --git a/include/asm-avr32/arch-at32ap/cpu.h b/include/asm-avr32/arch-at32ap/cpu.h
new file mode 100644
index 000000000000..2bdc5bd6f793
--- /dev/null
+++ b/include/asm-avr32/arch-at32ap/cpu.h
@@ -0,0 +1,33 @@
1/*
2 * AVR32 and (fake) AT91 CPU identification
3 *
4 * Copyright (C) 2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ASM_ARCH_CPU_H
11#define __ASM_ARCH_CPU_H
12
13/*
14 * Only AT32AP7000 is defined for now. We can identify the specific
15 * chip at runtime, but I'm not sure if it's really worth it.
16 */
17#ifdef CONFIG_CPU_AT32AP7000
18# define cpu_is_at32ap7000() (1)
19#else
20# define cpu_is_at32ap7000() (0)
21#endif
22
23/*
24 * Since this is AVR32, we will never run on any AT91 CPU. But these
25 * definitions may reduce clutter in common drivers.
26 */
27#define cpu_is_at91rm9200() (0)
28#define cpu_is_at91sam9xe() (0)
29#define cpu_is_at91sam9260() (0)
30#define cpu_is_at91sam9261() (0)
31#define cpu_is_at91sam9263() (0)
32
33#endif /* __ASM_ARCH_CPU_H */
diff --git a/include/asm-avr32/setup.h b/include/asm-avr32/setup.h
index 1ff1a217015d..b0828d43e110 100644
--- a/include/asm-avr32/setup.h
+++ b/include/asm-avr32/setup.h
@@ -110,7 +110,7 @@ struct tagtable {
110 int (*parse)(struct tag *); 110 int (*parse)(struct tag *);
111}; 111};
112 112
113#define __tag __attribute_used__ __attribute__((__section__(".taglist"))) 113#define __tag __attribute_used__ __attribute__((__section__(".taglist.init")))
114#define __tagtable(tag, fn) \ 114#define __tagtable(tag, fn) \
115 static struct tagtable __tagtable_##fn __tag = { tag, fn } 115 static struct tagtable __tagtable_##fn __tag = { tag, fn }
116 116
diff --git a/include/asm-avr32/unistd.h b/include/asm-avr32/unistd.h
index 8f5120471819..2418cce624cc 100644
--- a/include/asm-avr32/unistd.h
+++ b/include/asm-avr32/unistd.h
@@ -295,8 +295,10 @@
295#define __NR_shmdt 276 295#define __NR_shmdt 276
296#define __NR_shmctl 277 296#define __NR_shmctl 277
297 297
298#define __NR_utimensat 278
299
298#ifdef __KERNEL__ 300#ifdef __KERNEL__
299#define NR_syscalls 278 301#define NR_syscalls 279
300 302
301 303
302#define __ARCH_WANT_IPC_PARSE_VERSION 304#define __ARCH_WANT_IPC_PARSE_VERSION
diff --git a/include/asm-blackfin/processor.h b/include/asm-blackfin/processor.h
index 997465c93e82..0336ff132c16 100644
--- a/include/asm-blackfin/processor.h
+++ b/include/asm-blackfin/processor.h
@@ -58,10 +58,10 @@ do { \
58 (_regs)->pc = (_pc); \ 58 (_regs)->pc = (_pc); \
59 if (current->mm) \ 59 if (current->mm) \
60 (_regs)->p5 = current->mm->start_data; \ 60 (_regs)->p5 = current->mm->start_data; \
61 current->thread_info->l1_task_info.stack_start \ 61 task_thread_info(current)->l1_task_info.stack_start \
62 = (void *)current->mm->context.stack_start; \ 62 = (void *)current->mm->context.stack_start; \
63 current->thread_info->l1_task_info.lowest_sp = (void *)(_usp); \ 63 task_thread_info(current)->l1_task_info.lowest_sp = (void *)(_usp); \
64 memcpy(L1_SCRATCH_TASK_INFO, &current->thread_info->l1_task_info, \ 64 memcpy(L1_SCRATCH_TASK_INFO, &task_thread_info(current)->l1_task_info, \
65 sizeof(*L1_SCRATCH_TASK_INFO)); \ 65 sizeof(*L1_SCRATCH_TASK_INFO)); \
66 wrusp(_usp); \ 66 wrusp(_usp); \
67} while(0) 67} while(0)
diff --git a/include/asm-blackfin/system.h b/include/asm-blackfin/system.h
index b5bf6e7cb5e8..5e5f1a0566c0 100644
--- a/include/asm-blackfin/system.h
+++ b/include/asm-blackfin/system.h
@@ -239,9 +239,9 @@ asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_stru
239 239
240#define switch_to(prev,next,last) \ 240#define switch_to(prev,next,last) \
241do { \ 241do { \
242 memcpy (&prev->thread_info->l1_task_info, L1_SCRATCH_TASK_INFO, \ 242 memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \
243 sizeof *L1_SCRATCH_TASK_INFO); \ 243 sizeof *L1_SCRATCH_TASK_INFO); \
244 memcpy (L1_SCRATCH_TASK_INFO, &next->thread_info->l1_task_info, \ 244 memcpy (L1_SCRATCH_TASK_INFO, &task_thread_info(next)->l1_task_info, \
245 sizeof *L1_SCRATCH_TASK_INFO); \ 245 sizeof *L1_SCRATCH_TASK_INFO); \
246 (last) = resume (prev, next); \ 246 (last) = resume (prev, next); \
247} while (0) 247} while (0)
diff --git a/include/asm-frv/tlb.h b/include/asm-frv/tlb.h
index f94fe5cb9b3a..cd458eb6d75e 100644
--- a/include/asm-frv/tlb.h
+++ b/include/asm-frv/tlb.h
@@ -3,7 +3,11 @@
3 3
4#include <asm/tlbflush.h> 4#include <asm/tlbflush.h>
5 5
6#ifdef CONFIG_MMU
7extern void check_pgt_cache(void);
8#else
6#define check_pgt_cache() do {} while(0) 9#define check_pgt_cache() do {} while(0)
10#endif
7 11
8/* 12/*
9 * we don't need any special per-pte or per-vma handling... 13 * we don't need any special per-pte or per-vma handling...
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 78339319ba02..cd8a9641bd66 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -58,7 +58,7 @@ extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
58 * if you do not require the atomic guarantees. 58 * if you do not require the atomic guarantees.
59 * 59 *
60 * Note: there are no guarantees that this function will not be reordered 60 * Note: there are no guarantees that this function will not be reordered
61 * on non x86 architectures, so if you are writting portable code, 61 * on non x86 architectures, so if you are writing portable code,
62 * make sure not to rely on its reordering guarantees. 62 * make sure not to rely on its reordering guarantees.
63 * 63 *
64 * Note that @nr may be almost arbitrarily large; this function is not 64 * Note that @nr may be almost arbitrarily large; this function is not
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index 273b50629357..a20fe9822f60 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -27,7 +27,7 @@
27 * if you do not require the atomic guarantees. 27 * if you do not require the atomic guarantees.
28 * 28 *
29 * Note: there are no guarantees that this function will not be reordered 29 * Note: there are no guarantees that this function will not be reordered
30 * on non x86 architectures, so if you are writting portable code, 30 * on non x86 architectures, so if you are writing portable code,
31 * make sure not to rely on its reordering guarantees. 31 * make sure not to rely on its reordering guarantees.
32 * 32 *
33 * Note that @nr may be almost arbitrarily large; this function is not 33 * Note that @nr may be almost arbitrarily large; this function is not
diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h
index e7686d0a8413..bd024ab4fe53 100644
--- a/include/asm-i386/boot.h
+++ b/include/asm-i386/boot.h
@@ -12,7 +12,7 @@
12#define EXTENDED_VGA 0xfffe /* 80x50 mode */ 12#define EXTENDED_VGA 0xfffe /* 80x50 mode */
13#define ASK_VGA 0xfffd /* ask for it at bootup */ 13#define ASK_VGA 0xfffd /* ask for it at bootup */
14 14
15/* Physical address where kenrel should be loaded. */ 15/* Physical address where kernel should be loaded. */
16#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ 16#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
17 + (CONFIG_PHYSICAL_ALIGN - 1)) \ 17 + (CONFIG_PHYSICAL_ALIGN - 1)) \
18 & ~(CONFIG_PHYSICAL_ALIGN - 1)) 18 & ~(CONFIG_PHYSICAL_ALIGN - 1))
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index 3503ad66945e..118e9812778f 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -122,21 +122,21 @@ static inline int pfn_valid(int pfn)
122 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) 122 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
123#define alloc_bootmem_node(pgdat, x) \ 123#define alloc_bootmem_node(pgdat, x) \
124({ \ 124({ \
125 struct pglist_data __attribute__ ((unused)) \ 125 struct pglist_data __maybe_unused \
126 *__alloc_bootmem_node__pgdat = (pgdat); \ 126 *__alloc_bootmem_node__pgdat = (pgdat); \
127 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \ 127 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
128 __pa(MAX_DMA_ADDRESS)); \ 128 __pa(MAX_DMA_ADDRESS)); \
129}) 129})
130#define alloc_bootmem_pages_node(pgdat, x) \ 130#define alloc_bootmem_pages_node(pgdat, x) \
131({ \ 131({ \
132 struct pglist_data __attribute__ ((unused)) \ 132 struct pglist_data __maybe_unused \
133 *__alloc_bootmem_node__pgdat = (pgdat); \ 133 *__alloc_bootmem_node__pgdat = (pgdat); \
134 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \ 134 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
135 __pa(MAX_DMA_ADDRESS)) \ 135 __pa(MAX_DMA_ADDRESS)) \
136}) 136})
137#define alloc_bootmem_low_pages_node(pgdat, x) \ 137#define alloc_bootmem_low_pages_node(pgdat, x) \
138({ \ 138({ \
139 struct pglist_data __attribute__ ((unused)) \ 139 struct pglist_data __maybe_unused \
140 *__alloc_bootmem_node__pgdat = (pgdat); \ 140 *__alloc_bootmem_node__pgdat = (pgdat); \
141 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \ 141 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \
142}) 142})
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 26861df52cc4..df21ea049369 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -86,62 +86,50 @@ static inline unsigned long long native_read_pmc(void)
86 86
87#define rdmsr(msr,val1,val2) \ 87#define rdmsr(msr,val1,val2) \
88 do { \ 88 do { \
89 unsigned long long __val = native_read_msr(msr); \ 89 u64 __val = native_read_msr(msr); \
90 val1 = __val; \ 90 (val1) = (u32)__val; \
91 val2 = __val >> 32; \ 91 (val2) = (u32)(__val >> 32); \
92 } while(0) 92 } while(0)
93 93
94#define wrmsr(msr,val1,val2) \ 94static inline void wrmsr(u32 __msr, u32 __low, u32 __high)
95 native_write_msr(msr, ((unsigned long long)val2 << 32) | val1)
96
97#define rdmsrl(msr,val) \
98 do { \
99 (val) = native_read_msr(msr); \
100 } while(0)
101
102static inline void wrmsrl (unsigned long msr, unsigned long long val)
103{ 95{
104 unsigned long lo, hi; 96 native_write_msr(__msr, ((u64)__high << 32) | __low);
105 lo = (unsigned long) val;
106 hi = val >> 32;
107 wrmsr (msr, lo, hi);
108} 97}
109 98
99#define rdmsrl(msr,val) \
100 ((val) = native_read_msr(msr))
101
102#define wrmsrl(msr,val) native_write_msr(msr, val)
103
110/* wrmsr with exception handling */ 104/* wrmsr with exception handling */
111#define wrmsr_safe(msr,val1,val2) \ 105static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high)
112 (native_write_msr_safe(msr, ((unsigned long long)val2 << 32) | val1)) 106{
107 return native_write_msr_safe(__msr, ((u64)__high << 32) | __low);
108}
113 109
114/* rdmsr with exception handling */ 110/* rdmsr with exception handling */
115#define rdmsr_safe(msr,p1,p2) \ 111#define rdmsr_safe(msr,p1,p2) \
116 ({ \ 112 ({ \
117 int __err; \ 113 int __err; \
118 unsigned long long __val = native_read_msr_safe(msr, &__err);\ 114 u64 __val = native_read_msr_safe(msr, &__err); \
119 (*p1) = __val; \ 115 (*p1) = (u32)__val; \
120 (*p2) = __val >> 32; \ 116 (*p2) = (u32)(__val >> 32); \
121 __err; \ 117 __err; \
122 }) 118 })
123 119
124#define rdtsc(low,high) \
125 do { \
126 u64 _l = native_read_tsc(); \
127 (low) = (u32)_l; \
128 (high) = _l >> 32; \
129 } while(0)
130
131#define rdtscl(low) \ 120#define rdtscl(low) \
132 do { \ 121 ((low) = (u32)native_read_tsc())
133 (low) = native_read_tsc(); \
134 } while(0)
135 122
136#define rdtscll(val) ((val) = native_read_tsc()) 123#define rdtscll(val) \
124 ((val) = native_read_tsc())
137 125
138#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 126#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
139 127
140#define rdpmc(counter,low,high) \ 128#define rdpmc(counter,low,high) \
141 do { \ 129 do { \
142 u64 _l = native_read_pmc(); \ 130 u64 _l = native_read_pmc(); \
143 low = (u32)_l; \ 131 (low) = (u32)_l; \
144 high = _l >> 32; \ 132 (high) = (u32)(_l >> 32); \
145 } while(0) 133 } while(0)
146#endif /* !CONFIG_PARAVIRT */ 134#endif /* !CONFIG_PARAVIRT */
147 135
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index e2e7f98723c5..bc5c12c13581 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -560,11 +560,6 @@ static inline u64 paravirt_read_tsc(void)
560{ 560{
561 return PVOP_CALL0(u64, read_tsc); 561 return PVOP_CALL0(u64, read_tsc);
562} 562}
563#define rdtsc(low,high) do { \
564 u64 _l = paravirt_read_tsc(); \
565 low = (u32)_l; \
566 high = _l >> 32; \
567} while(0)
568 563
569#define rdtscl(low) do { \ 564#define rdtscl(low) do { \
570 u64 _l = paravirt_read_tsc(); \ 565 u64 _l = paravirt_read_tsc(); \
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 090abc1da32a..0c7132787062 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -124,20 +124,6 @@ static inline int num_booting_cpus(void)
124 return cpus_weight(cpu_callout_map); 124 return cpus_weight(cpu_callout_map);
125} 125}
126 126
127#ifdef CONFIG_X86_LOCAL_APIC
128
129#ifdef APIC_DEFINITION
130extern int hard_smp_processor_id(void);
131#else
132#include <mach_apicdef.h>
133static inline int hard_smp_processor_id(void)
134{
135 /* we don't want to mark this access volatile - bad code generation */
136 return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
137}
138#endif
139#endif
140
141extern int safe_smp_processor_id(void); 127extern int safe_smp_processor_id(void);
142extern int __cpu_disable(void); 128extern int __cpu_disable(void);
143extern void __cpu_die(unsigned int cpu); 129extern void __cpu_die(unsigned int cpu);
@@ -152,10 +138,31 @@ extern unsigned int num_processors;
152 138
153#define NO_PROC_ID 0xFF /* No processor magic marker */ 139#define NO_PROC_ID 0xFF /* No processor magic marker */
154 140
155#endif 141#endif /* CONFIG_SMP */
156 142
157#ifndef __ASSEMBLY__ 143#ifndef __ASSEMBLY__
158 144
145#ifdef CONFIG_X86_LOCAL_APIC
146
147#ifdef APIC_DEFINITION
148extern int hard_smp_processor_id(void);
149#else
150#include <mach_apicdef.h>
151static inline int hard_smp_processor_id(void)
152{
153 /* we don't want to mark this access volatile - bad code generation */
154 return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
155}
156#endif /* APIC_DEFINITION */
157
158#else /* CONFIG_X86_LOCAL_APIC */
159
160#ifndef CONFIG_SMP
161#define hard_smp_processor_id() 0
162#endif
163
164#endif /* CONFIG_X86_LOCAL_APIC */
165
159extern u8 apicid_2_node[]; 166extern u8 apicid_2_node[];
160 167
161#ifdef CONFIG_X86_LOCAL_APIC 168#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/include/asm-i386/sync_bitops.h b/include/asm-i386/sync_bitops.h
index 7d72351bea75..cbce08a2d135 100644
--- a/include/asm-i386/sync_bitops.h
+++ b/include/asm-i386/sync_bitops.h
@@ -24,7 +24,7 @@
24 * if you do not require the atomic guarantees. 24 * if you do not require the atomic guarantees.
25 * 25 *
26 * Note: there are no guarantees that this function will not be reordered 26 * Note: there are no guarantees that this function will not be reordered
27 * on non x86 architectures, so if you are writting portable code, 27 * on non-x86 architectures, so if you are writing portable code,
28 * make sure not to rely on its reordering guarantees. 28 * make sure not to rely on its reordering guarantees.
29 * 29 *
30 * Note that @nr may be almost arbitrarily large; this function is not 30 * Note that @nr may be almost arbitrarily large; this function is not
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index bf01d4b342bd..4cb0f91ae64f 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -172,7 +172,7 @@ static inline struct thread_info *current_thread_info(void)
172#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ 172#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
173#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */ 173#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */
174 174
175#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING) 175#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
176 176
177#endif /* __KERNEL__ */ 177#endif /* __KERNEL__ */
178 178
diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h
index 27f9df6b9145..c054d7a9aaa7 100644
--- a/include/asm-ia64/hw_irq.h
+++ b/include/asm-ia64/hw_irq.h
@@ -66,6 +66,7 @@ extern int ia64_last_device_vector;
66#define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */ 66#define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */
67#define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */ 67#define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */
68#define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */ 68#define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */
69#define IA64_IPI_LOCAL_TLB_FLUSH 0xfc /* SMP flush local TLB */
69#define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */ 70#define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */
70#define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */ 71#define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */
71 72
diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h
index 20f98f1751a1..421cb6b62a7c 100644
--- a/include/asm-ia64/iosapic.h
+++ b/include/asm-ia64/iosapic.h
@@ -83,7 +83,7 @@ extern int gsi_to_irq (unsigned int gsi);
83extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity, 83extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity,
84 unsigned long trigger); 84 unsigned long trigger);
85extern void iosapic_unregister_intr (unsigned int irq); 85extern void iosapic_unregister_intr (unsigned int irq);
86extern void __init iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, 86extern void __devinit iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
87 unsigned long polarity, 87 unsigned long polarity,
88 unsigned long trigger); 88 unsigned long trigger);
89extern int __init iosapic_register_platform_intr (u32 int_type, 89extern int __init iosapic_register_platform_intr (u32 int_type,
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index 60fd4ae014f6..c60024989ebd 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -38,6 +38,8 @@ ia64_get_lid (void)
38 return lid.f.id << 8 | lid.f.eid; 38 return lid.f.id << 8 | lid.f.eid;
39} 39}
40 40
41#define hard_smp_processor_id() ia64_get_lid()
42
41#ifdef CONFIG_SMP 43#ifdef CONFIG_SMP
42 44
43#define XTP_OFFSET 0x1e0008 45#define XTP_OFFSET 0x1e0008
@@ -110,8 +112,6 @@ max_xtp (void)
110 writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */ 112 writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
111} 113}
112 114
113#define hard_smp_processor_id() ia64_get_lid()
114
115/* Upping and downing of CPUs */ 115/* Upping and downing of CPUs */
116extern int __cpu_disable (void); 116extern int __cpu_disable (void);
117extern void __cpu_die (unsigned int cpu); 117extern void __cpu_die (unsigned int cpu);
@@ -128,7 +128,7 @@ extern void unlock_ipi_calllock(void);
128extern void identify_siblings (struct cpuinfo_ia64 *); 128extern void identify_siblings (struct cpuinfo_ia64 *);
129extern int is_multithreading_enabled(void); 129extern int is_multithreading_enabled(void);
130 130
131#else 131#else /* CONFIG_SMP */
132 132
133#define cpu_logical_id(i) 0 133#define cpu_logical_id(i) 0
134#define cpu_physical_id(i) ia64_get_lid() 134#define cpu_physical_id(i) ia64_get_lid()
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h
index 2c4004eb5a68..291e8ceed6e6 100644
--- a/include/asm-ia64/sn/sn_sal.h
+++ b/include/asm-ia64/sn/sn_sal.h
@@ -106,6 +106,7 @@
106/* interrupt handling */ 106/* interrupt handling */
107#define SAL_INTR_ALLOC 1 107#define SAL_INTR_ALLOC 1
108#define SAL_INTR_FREE 2 108#define SAL_INTR_FREE 2
109#define SAL_INTR_REDIRECT 3
109 110
110/* 111/*
111 * operations available on the generic SN_SAL_SYSCTL_OP 112 * operations available on the generic SN_SAL_SYSCTL_OP
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 91698599f918..7d0241db622b 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -85,6 +85,7 @@ struct thread_info {
85#define TIF_SYSCALL_TRACE 3 /* syscall trace active */ 85#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
86#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ 86#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
87#define TIF_SINGLESTEP 5 /* restore singlestep on return to user mode */ 87#define TIF_SINGLESTEP 5 /* restore singlestep on return to user mode */
88#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
88#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 89#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
89#define TIF_MEMDIE 17 90#define TIF_MEMDIE 17
90#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ 91#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
@@ -96,6 +97,7 @@ struct thread_info {
96#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 97#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
97#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP) 98#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP)
98#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 99#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
100#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
99#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 101#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
100#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 102#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
101#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 103#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
@@ -104,12 +106,12 @@ struct thread_info {
104#define _TIF_FREEZE (1 << TIF_FREEZE) 106#define _TIF_FREEZE (1 << TIF_FREEZE)
105 107
106/* "work to do on user-return" bits */ 108/* "work to do on user-return" bits */
107#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 109#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_RESTORE_SIGMASK)
108/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ 110/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
109#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) 111#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
110 112
111#define TS_POLLING 1 /* true if in idle loop and not sleeping */ 113#define TS_POLLING 1 /* true if in idle loop and not sleeping */
112 114
113#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING) 115#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
114 116
115#endif /* _ASM_IA64_THREAD_INFO_H */ 117#endif /* _ASM_IA64_THREAD_INFO_H */
diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h
index cf9acb9bb1fb..e37f9fbf33af 100644
--- a/include/asm-ia64/tlbflush.h
+++ b/include/asm-ia64/tlbflush.h
@@ -27,9 +27,11 @@ extern void local_flush_tlb_all (void);
27#ifdef CONFIG_SMP 27#ifdef CONFIG_SMP
28 extern void smp_flush_tlb_all (void); 28 extern void smp_flush_tlb_all (void);
29 extern void smp_flush_tlb_mm (struct mm_struct *mm); 29 extern void smp_flush_tlb_mm (struct mm_struct *mm);
30 extern void smp_flush_tlb_cpumask (cpumask_t xcpumask);
30# define flush_tlb_all() smp_flush_tlb_all() 31# define flush_tlb_all() smp_flush_tlb_all()
31#else 32#else
32# define flush_tlb_all() local_flush_tlb_all() 33# define flush_tlb_all() local_flush_tlb_all()
34# define smp_flush_tlb_cpumask(m) local_flush_tlb_all()
33#endif 35#endif
34 36
35static inline void 37static inline void
@@ -94,6 +96,15 @@ flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end
94 */ 96 */
95} 97}
96 98
99/*
100 * Flush the local TLB. Invoked from another cpu using an IPI.
101 */
102#ifdef CONFIG_SMP
103void smp_local_flush_tlb(void);
104#else
105#define smp_local_flush_tlb()
106#endif
107
97#define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */ 108#define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */
98 109
99#endif /* _ASM_IA64_TLBFLUSH_H */ 110#endif /* _ASM_IA64_TLBFLUSH_H */
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
index a9e1fa4cac4d..861c8ec87b09 100644
--- a/include/asm-ia64/unistd.h
+++ b/include/asm-ia64/unistd.h
@@ -283,7 +283,8 @@
283#define __NR_readlinkat 1291 283#define __NR_readlinkat 1291
284#define __NR_fchmodat 1292 284#define __NR_fchmodat 1292
285#define __NR_faccessat 1293 285#define __NR_faccessat 1293
286/* 1294, 1295 reserved for pselect/ppoll */ 286#define __NR_pselect6 1294
287#define __NR_ppoll 1295
287#define __NR_unshare 1296 288#define __NR_unshare 1296
288#define __NR_splice 1297 289#define __NR_splice 1297
289#define __NR_set_robust_list 1298 290#define __NR_set_robust_list 1298
@@ -300,6 +301,7 @@
300#define NR_syscalls 281 /* length of syscall table */ 301#define NR_syscalls 281 /* length of syscall table */
301 302
302#define __ARCH_WANT_SYS_RT_SIGACTION 303#define __ARCH_WANT_SYS_RT_SIGACTION
304#define __ARCH_WANT_SYS_RT_SIGSUSPEND
303 305
304#ifdef CONFIG_IA32_SUPPORT 306#ifdef CONFIG_IA32_SUPPORT
305# define __ARCH_WANT_SYS_FADVISE64 307# define __ARCH_WANT_SYS_FADVISE64
@@ -310,6 +312,7 @@
310# define __ARCH_WANT_SYS_OLDUMOUNT 312# define __ARCH_WANT_SYS_OLDUMOUNT
311# define __ARCH_WANT_SYS_SIGPENDING 313# define __ARCH_WANT_SYS_SIGPENDING
312# define __ARCH_WANT_SYS_SIGPROCMASK 314# define __ARCH_WANT_SYS_SIGPROCMASK
315# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
313# define __ARCH_WANT_COMPAT_SYS_TIME 316# define __ARCH_WANT_COMPAT_SYS_TIME
314#endif 317#endif
315 318
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h
index abd937ac5239..078e1a51a042 100644
--- a/include/asm-m32r/smp.h
+++ b/include/asm-m32r/smp.h
@@ -108,6 +108,10 @@ extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
108#define IPI_SHIFT (0) 108#define IPI_SHIFT (0)
109#define NR_IPIS (8) 109#define NR_IPIS (8)
110 110
111#endif /* CONFIG_SMP */ 111#else /* CONFIG_SMP */
112
113#define hard_smp_processor_id() 0
114
115#endif /* CONFIG_SMP */
112 116
113#endif /* _ASM_M32R_SMP_H */ 117#endif /* _ASM_M32R_SMP_H */
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 06cdece35865..f62f5c9abba6 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -136,7 +136,7 @@ extern void __xchg_called_with_bad_pointer(void);
136 "add3 "reg0", "addr", #0x2000; \n\t" \ 136 "add3 "reg0", "addr", #0x2000; \n\t" \
137 "ld "reg0", @"reg0"; \n\t" \ 137 "ld "reg0", @"reg0"; \n\t" \
138 "unlock "reg0", @"reg1"; \n\t" 138 "unlock "reg0", @"reg1"; \n\t"
139 /* FIXME: This workaround code cannot handle kenrel modules 139 /* FIXME: This workaround code cannot handle kernel modules
140 * correctly under SMP environment. 140 * correctly under SMP environment.
141 */ 141 */
142#else /* CONFIG_CHIP_M32700_TS1 */ 142#else /* CONFIG_CHIP_M32700_TS1 */
diff --git a/include/asm-m68k/atarihw.h b/include/asm-m68k/atarihw.h
index f28acd0fd689..6211363a345f 100644
--- a/include/asm-m68k/atarihw.h
+++ b/include/asm-m68k/atarihw.h
@@ -2,7 +2,7 @@
2** linux/atarihw.h -- This header defines some macros and pointers for 2** linux/atarihw.h -- This header defines some macros and pointers for
3** the various Atari custom hardware registers. 3** the various Atari custom hardware registers.
4** 4**
5** Copyright 1994 by Bj”rn Brauel 5** Copyright 1994 by Bj”rn Brauel
6** 6**
7** 5/1/94 Roman Hodek: 7** 5/1/94 Roman Hodek:
8** Added definitions for TT specific chips. 8** Added definitions for TT specific chips.
diff --git a/include/asm-m68k/atariints.h b/include/asm-m68k/atariints.h
index 0ed454fc24bb..ce6c445805bd 100644
--- a/include/asm-m68k/atariints.h
+++ b/include/asm-m68k/atariints.h
@@ -1,7 +1,7 @@
1/* 1/*
2** atariints.h -- Atari Linux interrupt handling structs and prototypes 2** atariints.h -- Atari Linux interrupt handling structs and prototypes
3** 3**
4** Copyright 1994 by Bj”rn Brauel 4** Copyright 1994 by Bj”rn Brauel
5** 5**
6** 5/2/94 Roman Hodek: 6** 5/2/94 Roman Hodek:
7** TT interrupt definitions added. 7** TT interrupt definitions added.
diff --git a/include/asm-m68k/thread_info.h b/include/asm-m68k/thread_info.h
index c4d622a57dfb..d635a3752488 100644
--- a/include/asm-m68k/thread_info.h
+++ b/include/asm-m68k/thread_info.h
@@ -37,17 +37,17 @@ struct thread_info {
37#define init_stack (init_thread_union.stack) 37#define init_stack (init_thread_union.stack)
38 38
39#define task_thread_info(tsk) (&(tsk)->thread.info) 39#define task_thread_info(tsk) (&(tsk)->thread.info)
40#define task_stack_page(tsk) ((void *)(tsk)->thread_info) 40#define task_stack_page(tsk) ((tsk)->stack)
41#define current_thread_info() task_thread_info(current) 41#define current_thread_info() task_thread_info(current)
42 42
43#define __HAVE_THREAD_FUNCTIONS 43#define __HAVE_THREAD_FUNCTIONS
44 44
45#define setup_thread_stack(p, org) ({ \ 45#define setup_thread_stack(p, org) ({ \
46 *(struct task_struct **)(p)->thread_info = (p); \ 46 *(struct task_struct **)(p)->stack = (p); \
47 task_thread_info(p)->task = (p); \ 47 task_thread_info(p)->task = (p); \
48}) 48})
49 49
50#define end_of_stack(p) ((unsigned long *)(p)->thread_info + 1) 50#define end_of_stack(p) ((unsigned long *)(p)->stack + 1)
51 51
52/* entry.S relies on these definitions! 52/* entry.S relies on these definitions!
53 * bits 0-7 are tested at every exception exit 53 * bits 0-7 are tested at every exception exit
diff --git a/include/asm-mips/bootinfo.h b/include/asm-mips/bootinfo.h
index c7c945baf1ee..dbf834f4dac4 100644
--- a/include/asm-mips/bootinfo.h
+++ b/include/asm-mips/bootinfo.h
@@ -254,7 +254,7 @@ extern void free_init_pages(const char *what,
254extern char arcs_cmdline[CL_SIZE]; 254extern char arcs_cmdline[CL_SIZE];
255 255
256/* 256/*
257 * Registers a0, a1, a3 and a4 as passed to the kenrel entry by firmware 257 * Registers a0, a1, a3 and a4 as passed to the kernel entry by firmware
258 */ 258 */
259extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; 259extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
260 260
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 30f23a2b46ca..3713d256d369 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -55,7 +55,7 @@ do { \
55 if (cpu_has_dsp) \ 55 if (cpu_has_dsp) \
56 __save_dsp(prev); \ 56 __save_dsp(prev); \
57 next->thread.emulated_fp = 0; \ 57 next->thread.emulated_fp = 0; \
58 (last) = resume(prev, next, next->thread_info); \ 58 (last) = resume(prev, next, task_thread_info(next)); \
59 if (cpu_has_dsp) \ 59 if (cpu_has_dsp) \
60 __restore_dsp(current); \ 60 __restore_dsp(current); \
61} while(0) 61} while(0)
diff --git a/include/asm-parisc/compat.h b/include/asm-parisc/compat.h
index fe8579023531..11f4222597a0 100644
--- a/include/asm-parisc/compat.h
+++ b/include/asm-parisc/compat.h
@@ -152,7 +152,7 @@ static __inline__ void __user *compat_alloc_user_space(long len)
152 152
153static inline int __is_compat_task(struct task_struct *t) 153static inline int __is_compat_task(struct task_struct *t)
154{ 154{
155 return test_ti_thread_flag(t->thread_info, TIF_32BIT); 155 return test_ti_thread_flag(task_thread_info(t), TIF_32BIT);
156} 156}
157 157
158static inline int is_compat_task(void) 158static inline int is_compat_task(void)
diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h
index 6739457d8bc0..e2ca55bcfe0b 100644
--- a/include/asm-powerpc/mmu-hash64.h
+++ b/include/asm-powerpc/mmu-hash64.h
@@ -350,10 +350,13 @@ typedef unsigned long mm_context_id_t;
350 350
351typedef struct { 351typedef struct {
352 mm_context_id_t id; 352 mm_context_id_t id;
353 u16 user_psize; /* page size index */ 353 u16 user_psize; /* page size index */
354 u16 sllp; /* SLB entry page size encoding */ 354
355#ifdef CONFIG_HUGETLB_PAGE 355#ifdef CONFIG_PPC_MM_SLICES
356 u16 low_htlb_areas, high_htlb_areas; 356 u64 low_slices_psize; /* SLB page size encodings */
357 u64 high_slices_psize; /* 4 bits per slice for now */
358#else
359 u16 sllp; /* SLB page size encoding */
357#endif 360#endif
358 unsigned long vdso_base; 361 unsigned long vdso_base;
359} mm_context_t; 362} mm_context_t;
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index cf95274f735e..c6a5b1735666 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -83,8 +83,8 @@ struct paca_struct {
83 83
84 mm_context_t context; 84 mm_context_t context;
85 u16 vmalloc_sllp; 85 u16 vmalloc_sllp;
86 u16 slb_cache[SLB_CACHE_ENTRIES];
87 u16 slb_cache_ptr; 86 u16 slb_cache_ptr;
87 u16 slb_cache[SLB_CACHE_ENTRIES];
88 88
89 /* 89 /*
90 * then miscellaneous read-write fields 90 * then miscellaneous read-write fields
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
index eab779c21995..3448a3d4bc64 100644
--- a/include/asm-powerpc/page_64.h
+++ b/include/asm-powerpc/page_64.h
@@ -88,57 +88,55 @@ extern unsigned int HPAGE_SHIFT;
88 88
89#endif /* __ASSEMBLY__ */ 89#endif /* __ASSEMBLY__ */
90 90
91#ifdef CONFIG_HUGETLB_PAGE 91#ifdef CONFIG_PPC_MM_SLICES
92 92
93#define HTLB_AREA_SHIFT 40 93#define SLICE_LOW_SHIFT 28
94#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT) 94#define SLICE_HIGH_SHIFT 40
95#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
96 95
97#define LOW_ESID_MASK(addr, len) \ 96#define SLICE_LOW_TOP (0x100000000ul)
98 (((1U << (GET_ESID(min((addr)+(len)-1, 0x100000000UL))+1)) \ 97#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
99 - (1U << GET_ESID(min((addr), 0x100000000UL)))) & 0xffff) 98#define SLICE_NUM_HIGH (PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
100#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
101 - (1U << GET_HTLB_AREA(addr))) & 0xffff)
102 99
103#define ARCH_HAS_HUGEPAGE_ONLY_RANGE 100#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
104#define ARCH_HAS_HUGETLB_FREE_PGD_RANGE 101#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
105#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
106#define ARCH_HAS_SETCLEAR_HUGE_PTE
107 102
108#define touches_hugepage_low_range(mm, addr, len) \ 103#ifndef __ASSEMBLY__
109 (((addr) < 0x100000000UL) \ 104
110 && (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)) 105struct slice_mask {
111#define touches_hugepage_high_range(mm, addr, len) \ 106 u16 low_slices;
112 ((((addr) + (len)) > 0x100000000UL) \ 107 u16 high_slices;
113 && (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)) 108};
114 109
115#define __within_hugepage_low_range(addr, len, segmask) \ 110struct mm_struct;
116 ( (((addr)+(len)) <= 0x100000000UL) \
117 && ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)))
118#define within_hugepage_low_range(addr, len) \
119 __within_hugepage_low_range((addr), (len), \
120 current->mm->context.low_htlb_areas)
121#define __within_hugepage_high_range(addr, len, zonemask) \
122 ( ((addr) >= 0x100000000UL) \
123 && ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask)))
124#define within_hugepage_high_range(addr, len) \
125 __within_hugepage_high_range((addr), (len), \
126 current->mm->context.high_htlb_areas)
127
128#define is_hugepage_only_range(mm, addr, len) \
129 (touches_hugepage_high_range((mm), (addr), (len)) || \
130 touches_hugepage_low_range((mm), (addr), (len)))
131#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
132 111
133#define in_hugepage_area(context, addr) \ 112extern unsigned long slice_get_unmapped_area(unsigned long addr,
134 (cpu_has_feature(CPU_FTR_16M_PAGE) && \ 113 unsigned long len,
135 ( ( (addr) >= 0x100000000UL) \ 114 unsigned long flags,
136 ? ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) \ 115 unsigned int psize,
137 : ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) 116 int topdown,
117 int use_cache);
138 118
139#else /* !CONFIG_HUGETLB_PAGE */ 119extern unsigned int get_slice_psize(struct mm_struct *mm,
120 unsigned long addr);
140 121
141#define in_hugepage_area(mm, addr) 0 122extern void slice_init_context(struct mm_struct *mm, unsigned int psize);
123extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
124
125#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
126extern int is_hugepage_only_range(struct mm_struct *m,
127 unsigned long addr,
128 unsigned long len);
129
130#endif /* __ASSEMBLY__ */
131#else
132#define slice_init()
133#endif /* CONFIG_PPC_MM_SLICES */
134
135#ifdef CONFIG_HUGETLB_PAGE
136
137#define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
138#define ARCH_HAS_SETCLEAR_HUGE_PTE
139#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
142 140
143#endif /* !CONFIG_HUGETLB_PAGE */ 141#endif /* !CONFIG_HUGETLB_PAGE */
144 142
diff --git a/include/asm-powerpc/pgalloc-64.h b/include/asm-powerpc/pgalloc-64.h
index 30b50cf56e2c..d9a3a8ca58a1 100644
--- a/include/asm-powerpc/pgalloc-64.h
+++ b/include/asm-powerpc/pgalloc-64.h
@@ -14,18 +14,11 @@
14 14
15extern struct kmem_cache *pgtable_cache[]; 15extern struct kmem_cache *pgtable_cache[];
16 16
17#ifdef CONFIG_PPC_64K_PAGES 17#define PGD_CACHE_NUM 0
18#define PTE_CACHE_NUM 0 18#define PUD_CACHE_NUM 1
19#define PMD_CACHE_NUM 1 19#define PMD_CACHE_NUM 1
20#define PGD_CACHE_NUM 2 20#define HUGEPTE_CACHE_NUM 2
21#define HUGEPTE_CACHE_NUM 3 21#define PTE_NONCACHE_NUM 3 /* from GFP rather than kmem_cache */
22#else
23#define PTE_CACHE_NUM 0
24#define PMD_CACHE_NUM 1
25#define PUD_CACHE_NUM 1
26#define PGD_CACHE_NUM 0
27#define HUGEPTE_CACHE_NUM 2
28#endif
29 22
30static inline pgd_t *pgd_alloc(struct mm_struct *mm) 23static inline pgd_t *pgd_alloc(struct mm_struct *mm)
31{ 24{
@@ -91,8 +84,7 @@ static inline void pmd_free(pmd_t *pmd)
91static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 84static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
92 unsigned long address) 85 unsigned long address)
93{ 86{
94 return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM], 87 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
95 GFP_KERNEL|__GFP_REPEAT);
96} 88}
97 89
98static inline struct page *pte_alloc_one(struct mm_struct *mm, 90static inline struct page *pte_alloc_one(struct mm_struct *mm,
@@ -103,12 +95,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
103 95
104static inline void pte_free_kernel(pte_t *pte) 96static inline void pte_free_kernel(pte_t *pte)
105{ 97{
106 kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte); 98 free_page((unsigned long)pte);
107} 99}
108 100
109static inline void pte_free(struct page *ptepage) 101static inline void pte_free(struct page *ptepage)
110{ 102{
111 pte_free_kernel(page_address(ptepage)); 103 __free_page(ptepage);
112} 104}
113 105
114#define PGF_CACHENUM_MASK 0x3 106#define PGF_CACHENUM_MASK 0x3
@@ -130,14 +122,17 @@ static inline void pgtable_free(pgtable_free_t pgf)
130 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); 122 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
131 int cachenum = pgf.val & PGF_CACHENUM_MASK; 123 int cachenum = pgf.val & PGF_CACHENUM_MASK;
132 124
133 kmem_cache_free(pgtable_cache[cachenum], p); 125 if (cachenum == PTE_NONCACHE_NUM)
126 free_page((unsigned long)p);
127 else
128 kmem_cache_free(pgtable_cache[cachenum], p);
134} 129}
135 130
136extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); 131extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
137 132
138#define __pte_free_tlb(tlb, ptepage) \ 133#define __pte_free_tlb(tlb, ptepage) \
139 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ 134 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
140 PTE_CACHE_NUM, PTE_TABLE_SIZE-1)) 135 PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1))
141#define __pmd_free_tlb(tlb, pmd) \ 136#define __pmd_free_tlb(tlb, pmd) \
142 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 137 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
143 PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) 138 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index 1744d6ac12a2..add5481fd7c7 100644
--- a/include/asm-powerpc/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
@@ -80,7 +80,11 @@
80 80
81#define pte_iterate_hashed_end() } while(0) 81#define pte_iterate_hashed_end() } while(0)
82 82
83#define pte_pagesize_index(pte) MMU_PAGE_4K 83#ifdef CONFIG_PPC_HAS_HASH_64K
84#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr)
85#else
86#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
87#endif
84 88
85/* 89/*
86 * 4-level page tables related bits 90 * 4-level page tables related bits
diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
index 16ef4978520d..31cbd3d7fce8 100644
--- a/include/asm-powerpc/pgtable-64k.h
+++ b/include/asm-powerpc/pgtable-64k.h
@@ -35,6 +35,11 @@
35#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ 35#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */
36#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ 36#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */
37#define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */ 37#define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */
38
39/* Note the full page bits must be in the same location as for normal
40 * 4k pages as the same asssembly will be used to insert 64K pages
41 * wether the kernel has CONFIG_PPC_64K_PAGES or not
42 */
38#define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ 43#define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */
39#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ 44#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */
40 45
@@ -88,7 +93,7 @@
88 93
89#define pte_iterate_hashed_end() } while(0); } } while(0) 94#define pte_iterate_hashed_end() } while(0); } } while(0)
90 95
91#define pte_pagesize_index(pte) \ 96#define pte_pagesize_index(mm, addr, pte) \
92 (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) 97 (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
93 98
94#define remap_4k_pfn(vma, addr, pfn, prot) \ 99#define remap_4k_pfn(vma, addr, pfn, prot) \
diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h
index d74b2965bb82..6dcd7a811fe1 100644
--- a/include/asm-powerpc/ppc-pci.h
+++ b/include/asm-powerpc/ppc-pci.h
@@ -64,7 +64,7 @@ struct pci_dev *pci_get_device_by_addr(unsigned long addr);
64 * eeh_slot_error_detail -- record and EEH error condition to the log 64 * eeh_slot_error_detail -- record and EEH error condition to the log
65 * @severity: 1 if temporary, 2 if permanent failure. 65 * @severity: 1 if temporary, 2 if permanent failure.
66 * 66 *
67 * Obtains the the EEH error details from the RTAS subsystem, 67 * Obtains the EEH error details from the RTAS subsystem,
68 * and then logs these details with the RTAS error log system. 68 * and then logs these details with the RTAS error log system.
69 */ 69 */
70void eeh_slot_error_detail (struct pci_dn *pdn, int severity); 70void eeh_slot_error_detail (struct pci_dn *pdn, int severity);
diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h
index 01717f266dc9..d037f50580e2 100644
--- a/include/asm-powerpc/smp.h
+++ b/include/asm-powerpc/smp.h
@@ -83,6 +83,7 @@ extern void __cpu_die(unsigned int cpu);
83 83
84#else 84#else
85/* for UP */ 85/* for UP */
86#define hard_smp_processor_id() 0
86#define smp_setup_cpu_maps() 87#define smp_setup_cpu_maps()
87 88
88#endif /* CONFIG_SMP */ 89#endif /* CONFIG_SMP */
diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h
index 02e56a6685a2..c48ae185c874 100644
--- a/include/asm-powerpc/spu_csa.h
+++ b/include/asm-powerpc/spu_csa.h
@@ -235,6 +235,12 @@ struct spu_priv2_collapsed {
235 */ 235 */
236struct spu_state { 236struct spu_state {
237 struct spu_lscsa *lscsa; 237 struct spu_lscsa *lscsa;
238#ifdef CONFIG_SPU_FS_64K_LS
239 int use_big_pages;
240 /* One struct page per 64k page */
241#define SPU_LSCSA_NUM_BIG_PAGES (sizeof(struct spu_lscsa) / 0x10000)
242 struct page *lscsa_pages[SPU_LSCSA_NUM_BIG_PAGES];
243#endif
238 struct spu_problem_collapsed prob; 244 struct spu_problem_collapsed prob;
239 struct spu_priv1_collapsed priv1; 245 struct spu_priv1_collapsed priv1;
240 struct spu_priv2_collapsed priv2; 246 struct spu_priv2_collapsed priv2;
@@ -247,12 +253,14 @@ struct spu_state {
247 spinlock_t register_lock; 253 spinlock_t register_lock;
248}; 254};
249 255
250extern void spu_init_csa(struct spu_state *csa); 256extern int spu_init_csa(struct spu_state *csa);
251extern void spu_fini_csa(struct spu_state *csa); 257extern void spu_fini_csa(struct spu_state *csa);
252extern int spu_save(struct spu_state *prev, struct spu *spu); 258extern int spu_save(struct spu_state *prev, struct spu *spu);
253extern int spu_restore(struct spu_state *new, struct spu *spu); 259extern int spu_restore(struct spu_state *new, struct spu *spu);
254extern int spu_switch(struct spu_state *prev, struct spu_state *new, 260extern int spu_switch(struct spu_state *prev, struct spu_state *new,
255 struct spu *spu); 261 struct spu *spu);
262extern int spu_alloc_lscsa(struct spu_state *csa);
263extern void spu_free_lscsa(struct spu_state *csa);
256 264
257#endif /* !__SPU__ */ 265#endif /* !__SPU__ */
258#endif /* __KERNEL__ */ 266#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/hydra.h b/include/asm-ppc/hydra.h
index 833a8aff2a80..1ad4eed07fbe 100644
--- a/include/asm-ppc/hydra.h
+++ b/include/asm-ppc/hydra.h
@@ -8,7 +8,7 @@
8 * Macintosh Technology in the Common Hardware Reference Platform 8 * Macintosh Technology in the Common Hardware Reference Platform
9 * Apple Computer, Inc. 9 * Apple Computer, Inc.
10 * 10 *
11 * © Copyright 1995 Apple Computer, Inc. All rights reserved. 11 * © Copyright 1995 Apple Computer, Inc. All rights reserved.
12 * 12 *
13 * It's available online from http://chrp.apple.com/MacTech.pdf. 13 * It's available online from http://chrp.apple.com/MacTech.pdf.
14 * You can obtain paper copies of this book from computer bookstores or by 14 * You can obtain paper copies of this book from computer bookstores or by
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 0a28e6d6ef40..76e424f718c6 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -110,6 +110,7 @@ static inline void smp_send_stop(void)
110 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); 110 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
111} 111}
112 112
113#define hard_smp_processor_id() 0
113#define smp_cpu_not_running(cpu) 1 114#define smp_cpu_not_running(cpu) 1
114#define smp_setup_cpu_possible_map() do { } while (0) 115#define smp_setup_cpu_possible_map() do { } while (0)
115#endif 116#endif
diff --git a/include/asm-sh/bug.h b/include/asm-sh/bug.h
index 794c36daf06d..46f925c815ac 100644
--- a/include/asm-sh/bug.h
+++ b/include/asm-sh/bug.h
@@ -1,12 +1,12 @@
1#ifndef __ASM_SH_BUG_H 1#ifndef __ASM_SH_BUG_H
2#define __ASM_SH_BUG_H 2#define __ASM_SH_BUG_H
3 3
4#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */
5
4#ifdef CONFIG_BUG 6#ifdef CONFIG_BUG
5#define HAVE_ARCH_BUG 7#define HAVE_ARCH_BUG
6#define HAVE_ARCH_WARN_ON 8#define HAVE_ARCH_WARN_ON
7 9
8#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */
9
10/** 10/**
11 * _EMIT_BUG_ENTRY 11 * _EMIT_BUG_ENTRY
12 * %1 - __FILE__ 12 * %1 - __FILE__
diff --git a/include/asm-sh/cpu-features.h b/include/asm-sh/cpu-features.h
index 4bccd7c032f9..86308aa39731 100644
--- a/include/asm-sh/cpu-features.h
+++ b/include/asm-sh/cpu-features.h
@@ -20,5 +20,6 @@
20#define CPU_HAS_PTEA 0x0020 /* PTEA register */ 20#define CPU_HAS_PTEA 0x0020 /* PTEA register */
21#define CPU_HAS_LLSC 0x0040 /* movli.l/movco.l */ 21#define CPU_HAS_LLSC 0x0040 /* movli.l/movco.l */
22#define CPU_HAS_L2_CACHE 0x0080 /* Secondary cache / URAM */ 22#define CPU_HAS_L2_CACHE 0x0080 /* Secondary cache / URAM */
23#define CPU_HAS_OP32 0x0100 /* 32-bit instruction support */
23 24
24#endif /* __ASM_SH_CPU_FEATURES_H */ 25#endif /* __ASM_SH_CPU_FEATURES_H */
diff --git a/include/asm-sh/cpu-sh3/dma.h b/include/asm-sh/cpu-sh3/dma.h
index 954801b46022..3a66dc458023 100644
--- a/include/asm-sh/cpu-sh3/dma.h
+++ b/include/asm-sh/cpu-sh3/dma.h
@@ -26,7 +26,7 @@ enum {
26 XMIT_SZ_128BIT, 26 XMIT_SZ_128BIT,
27}; 27};
28 28
29static unsigned int ts_shift[] __attribute__ ((used)) = { 29static unsigned int ts_shift[] __maybe_unused = {
30 [XMIT_SZ_8BIT] = 0, 30 [XMIT_SZ_8BIT] = 0,
31 [XMIT_SZ_16BIT] = 1, 31 [XMIT_SZ_16BIT] = 1,
32 [XMIT_SZ_32BIT] = 2, 32 [XMIT_SZ_32BIT] = 2,
diff --git a/include/asm-sh/cpu-sh4/dma-sh7780.h b/include/asm-sh/cpu-sh4/dma-sh7780.h
index 6c90d28331b2..71b426a6e482 100644
--- a/include/asm-sh/cpu-sh4/dma-sh7780.h
+++ b/include/asm-sh/cpu-sh4/dma-sh7780.h
@@ -28,7 +28,7 @@ enum {
28/* 28/*
29 * The DMA count is defined as the number of bytes to transfer. 29 * The DMA count is defined as the number of bytes to transfer.
30 */ 30 */
31static unsigned int __attribute__ ((used)) ts_shift[] = { 31static unsigned int ts_shift[] __maybe_unused = {
32 [XMIT_SZ_8BIT] = 0, 32 [XMIT_SZ_8BIT] = 0,
33 [XMIT_SZ_16BIT] = 1, 33 [XMIT_SZ_16BIT] = 1,
34 [XMIT_SZ_32BIT] = 2, 34 [XMIT_SZ_32BIT] = 2,
diff --git a/include/asm-sh/cpu-sh4/dma.h b/include/asm-sh/cpu-sh4/dma.h
index c135e9cebd9c..36e26a964765 100644
--- a/include/asm-sh/cpu-sh4/dma.h
+++ b/include/asm-sh/cpu-sh4/dma.h
@@ -53,7 +53,7 @@ enum {
53/* 53/*
54 * The DMA count is defined as the number of bytes to transfer. 54 * The DMA count is defined as the number of bytes to transfer.
55 */ 55 */
56static unsigned int ts_shift[] __attribute__ ((used)) = { 56static unsigned int ts_shift[] __maybe_unused = {
57 [XMIT_SZ_64BIT] = 3, 57 [XMIT_SZ_64BIT] = 3,
58 [XMIT_SZ_8BIT] = 0, 58 [XMIT_SZ_8BIT] = 0,
59 [XMIT_SZ_16BIT] = 1, 59 [XMIT_SZ_16BIT] = 1,
diff --git a/include/asm-sh/dmabrg.h b/include/asm-sh/dmabrg.h
new file mode 100644
index 000000000000..c5edba216cf1
--- /dev/null
+++ b/include/asm-sh/dmabrg.h
@@ -0,0 +1,23 @@
1/*
2 * SH7760 DMABRG (USB/Audio) support
3 */
4
5#ifndef _DMABRG_H_
6#define _DMABRG_H_
7
8/* IRQ sources */
9#define DMABRGIRQ_USBDMA 0
10#define DMABRGIRQ_USBDMAERR 1
11#define DMABRGIRQ_A0TXF 2
12#define DMABRGIRQ_A0TXH 3
13#define DMABRGIRQ_A0RXF 4
14#define DMABRGIRQ_A0RXH 5
15#define DMABRGIRQ_A1TXF 6
16#define DMABRGIRQ_A1TXH 7
17#define DMABRGIRQ_A1RXF 8
18#define DMABRGIRQ_A1RXH 9
19
20extern int dmabrg_request_irq(unsigned int, void(*)(void *), void *);
21extern void dmabrg_free_irq(unsigned int);
22
23#endif
diff --git a/include/asm-sh/edosk7705.h b/include/asm-sh/edosk7705.h
index a1089a65bc36..5bdc9d9be3de 100644
--- a/include/asm-sh/edosk7705.h
+++ b/include/asm-sh/edosk7705.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * include/asm-sh/edosk7705/io.h 2 * include/asm-sh/edosk7705.h
3 * 3 *
4 * Modified version of io_se.h for the EDOSK7705 specific functions. 4 * Modified version of io_se.h for the EDOSK7705 specific functions.
5 * 5 *
diff --git a/include/asm-sh/kdebug.h b/include/asm-sh/kdebug.h
index 493c20629747..16578b7c9da1 100644
--- a/include/asm-sh/kdebug.h
+++ b/include/asm-sh/kdebug.h
@@ -2,20 +2,6 @@
2#define __ASM_SH_KDEBUG_H 2#define __ASM_SH_KDEBUG_H
3 3
4#include <linux/notifier.h> 4#include <linux/notifier.h>
5#include <asm-generic/kdebug.h>
6
7struct pt_regs;
8
9struct die_args {
10 struct pt_regs *regs;
11 int trapnr;
12};
13
14int register_die_notifier(struct notifier_block *nb);
15int unregister_die_notifier(struct notifier_block *nb);
16int register_page_fault_notifier(struct notifier_block *nb);
17int unregister_page_fault_notifier(struct notifier_block *nb);
18extern struct atomic_notifier_head shdie_chain;
19 5
20/* Grossly misnamed. */ 6/* Grossly misnamed. */
21enum die_val { 7enum die_val {
@@ -23,14 +9,7 @@ enum die_val {
23 DIE_PAGE_FAULT, 9 DIE_PAGE_FAULT,
24}; 10};
25 11
26static inline int notify_die(enum die_val val, struct pt_regs *regs, 12int register_page_fault_notifier(struct notifier_block *nb);
27 int trap, int sig) 13int unregister_page_fault_notifier(struct notifier_block *nb);
28{
29 struct die_args args = {
30 .regs = regs,
31 .trapnr = trap,
32 };
33 14
34 return atomic_notifier_call_chain(&shdie_chain, val, &args);
35}
36#endif /* __ASM_SH_KDEBUG_H */ 15#endif /* __ASM_SH_KDEBUG_H */
diff --git a/include/asm-sh/pgalloc.h b/include/asm-sh/pgalloc.h
index 888e4529e6fe..18b613c57cf5 100644
--- a/include/asm-sh/pgalloc.h
+++ b/include/asm-sh/pgalloc.h
@@ -1,6 +1,12 @@
1#ifndef __ASM_SH_PGALLOC_H 1#ifndef __ASM_SH_PGALLOC_H
2#define __ASM_SH_PGALLOC_H 2#define __ASM_SH_PGALLOC_H
3 3
4#include <linux/quicklist.h>
5#include <asm/page.h>
6
7#define QUICK_PGD 0 /* We preserve special mappings over free */
8#define QUICK_PT 1 /* Other page table pages that are zero on free */
9
4static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 10static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
5 pte_t *pte) 11 pte_t *pte)
6{ 12{
@@ -13,48 +19,49 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
13 set_pmd(pmd, __pmd((unsigned long)page_address(pte))); 19 set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
14} 20}
15 21
22static inline void pgd_ctor(void *x)
23{
24 pgd_t *pgd = x;
25
26 memcpy(pgd + USER_PTRS_PER_PGD,
27 swapper_pg_dir + USER_PTRS_PER_PGD,
28 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
29}
30
16/* 31/*
17 * Allocate and free page tables. 32 * Allocate and free page tables.
18 */ 33 */
19static inline pgd_t *pgd_alloc(struct mm_struct *mm) 34static inline pgd_t *pgd_alloc(struct mm_struct *mm)
20{ 35{
21 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); 36 return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
22
23 if (pgd) {
24 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
25 memcpy(pgd + USER_PTRS_PER_PGD,
26 swapper_pg_dir + USER_PTRS_PER_PGD,
27 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
28 }
29
30 return pgd;
31} 37}
32 38
33static inline void pgd_free(pgd_t *pgd) 39static inline void pgd_free(pgd_t *pgd)
34{ 40{
35 free_page((unsigned long)pgd); 41 quicklist_free(QUICK_PGD, NULL, pgd);
36} 42}
37 43
38static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 44static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
39 unsigned long address) 45 unsigned long address)
40{ 46{
41 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); 47 return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
42} 48}
43 49
44static inline struct page *pte_alloc_one(struct mm_struct *mm, 50static inline struct page *pte_alloc_one(struct mm_struct *mm,
45 unsigned long address) 51 unsigned long address)
46{ 52{
47 return alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); 53 void *pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
54 return pg ? virt_to_page(pg) : NULL;
48} 55}
49 56
50static inline void pte_free_kernel(pte_t *pte) 57static inline void pte_free_kernel(pte_t *pte)
51{ 58{
52 free_page((unsigned long)pte); 59 quicklist_free(QUICK_PT, NULL, pte);
53} 60}
54 61
55static inline void pte_free(struct page *pte) 62static inline void pte_free(struct page *pte)
56{ 63{
57 __free_page(pte); 64 quicklist_free_page(QUICK_PT, NULL, pte);
58} 65}
59 66
60#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) 67#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
@@ -66,6 +73,11 @@ static inline void pte_free(struct page *pte)
66 73
67#define pmd_free(x) do { } while (0) 74#define pmd_free(x) do { } while (0)
68#define __pmd_free_tlb(tlb,x) do { } while (0) 75#define __pmd_free_tlb(tlb,x) do { } while (0)
69#define check_pgt_cache() do { } while (0) 76
77static inline void check_pgt_cache(void)
78{
79 quicklist_trim(QUICK_PGD, NULL, 25, 16);
80 quicklist_trim(QUICK_PT, NULL, 25, 16);
81}
70 82
71#endif /* __ASM_SH_PGALLOC_H */ 83#endif /* __ASM_SH_PGALLOC_H */
diff --git a/include/asm-sh/snapgear.h b/include/asm-sh/snapgear.h
index 6b5e4ddc073a..2d712e72c9e5 100644
--- a/include/asm-sh/snapgear.h
+++ b/include/asm-sh/snapgear.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * include/asm-sh/snapgear/io.h 2 * include/asm-sh/snapgear.h
3 * 3 *
4 * Modified version of io_se.h for the snapgear-specific functions. 4 * Modified version of io_se.h for the snapgear-specific functions.
5 * 5 *
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index e7e96ee0c8a5..82f3e229e621 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -255,6 +255,15 @@ static inline void *set_exception_table_evt(unsigned int evt, void *handler)
255 return set_exception_table_vec(evt >> 5, handler); 255 return set_exception_table_vec(evt >> 5, handler);
256} 256}
257 257
258/*
259 * SH-2A has both 16 and 32-bit opcodes, do lame encoding checks.
260 */
261#ifdef CONFIG_CPU_SH2A
262extern unsigned int instruction_size(unsigned int insn);
263#else
264#define instruction_size(insn) (2)
265#endif
266
258/* XXX 267/* XXX
259 * disable hlt during certain critical i/o operations 268 * disable hlt during certain critical i/o operations
260 */ 269 */
diff --git a/include/asm-sh/timer.h b/include/asm-sh/timer.h
index 17b5e76a4c31..701ba84c7049 100644
--- a/include/asm-sh/timer.h
+++ b/include/asm-sh/timer.h
@@ -2,12 +2,14 @@
2#define __ASM_SH_TIMER_H 2#define __ASM_SH_TIMER_H
3 3
4#include <linux/sysdev.h> 4#include <linux/sysdev.h>
5#include <linux/clocksource.h>
5#include <asm/cpu/timer.h> 6#include <asm/cpu/timer.h>
6 7
7struct sys_timer_ops { 8struct sys_timer_ops {
8 int (*init)(void); 9 int (*init)(void);
9 int (*start)(void); 10 int (*start)(void);
10 int (*stop)(void); 11 int (*stop)(void);
12 cycle_t (*read)(void);
11#ifndef CONFIG_GENERIC_TIME 13#ifndef CONFIG_GENERIC_TIME
12 unsigned long (*get_offset)(void); 14 unsigned long (*get_offset)(void);
13#endif 15#endif
@@ -18,29 +20,8 @@ struct sys_timer {
18 20
19 struct sys_device dev; 21 struct sys_device dev;
20 struct sys_timer_ops *ops; 22 struct sys_timer_ops *ops;
21
22#ifdef CONFIG_NO_IDLE_HZ
23 struct dyn_tick_timer *dyn_tick;
24#endif
25}; 23};
26 24
27#ifdef CONFIG_NO_IDLE_HZ
28#define DYN_TICK_ENABLED (1 << 1)
29
30struct dyn_tick_timer {
31 spinlock_t lock;
32 unsigned int state; /* Current state */
33 int (*enable)(void); /* Enables dynamic tick */
34 int (*disable)(void); /* Disables dynamic tick */
35 void (*reprogram)(unsigned long); /* Reprograms the timer */
36 int (*handler)(int, void *);
37};
38
39void timer_dyn_reprogram(void);
40#else
41#define timer_dyn_reprogram() do { } while (0)
42#endif
43
44#define TICK_SIZE (tick_nsec / 1000) 25#define TICK_SIZE (tick_nsec / 1000)
45 26
46extern struct sys_timer tmu_timer, cmt_timer, mtu2_timer; 27extern struct sys_timer tmu_timer, cmt_timer, mtu2_timer;
@@ -58,5 +39,7 @@ struct sys_timer *get_sys_timer(void);
58 39
59/* arch/sh/kernel/time.c */ 40/* arch/sh/kernel/time.c */
60void handle_timer_tick(void); 41void handle_timer_tick(void);
42extern unsigned long sh_hpt_frequency;
43extern struct clocksource clocksource_sh;
61 44
62#endif /* __ASM_SH_TIMER_H */ 45#endif /* __ASM_SH_TIMER_H */
diff --git a/include/asm-sh/unistd.h b/include/asm-sh/unistd.h
index 49be50a36b77..af71e379a5ee 100644
--- a/include/asm-sh/unistd.h
+++ b/include/asm-sh/unistd.h
@@ -85,7 +85,7 @@
85#define __NR_sigpending 73 85#define __NR_sigpending 73
86#define __NR_sethostname 74 86#define __NR_sethostname 74
87#define __NR_setrlimit 75 87#define __NR_setrlimit 75
88#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ 88#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
89#define __NR_getrusage 77 89#define __NR_getrusage 77
90#define __NR_gettimeofday 78 90#define __NR_gettimeofday 78
91#define __NR_settimeofday 79 91#define __NR_settimeofday 79
@@ -328,8 +328,9 @@
328#define __NR_move_pages 317 328#define __NR_move_pages 317
329#define __NR_getcpu 318 329#define __NR_getcpu 318
330#define __NR_epoll_pwait 319 330#define __NR_epoll_pwait 319
331#define __NR_utimensat 320
331 332
332#define NR_syscalls 320 333#define NR_syscalls 321
333 334
334#ifdef __KERNEL__ 335#ifdef __KERNEL__
335 336
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h
index b9da9a600e35..b3f492208fd2 100644
--- a/include/asm-sparc/smp.h
+++ b/include/asm-sparc/smp.h
@@ -165,6 +165,7 @@ void smp_setup_cpu_possible_map(void);
165 165
166#else /* SMP */ 166#else /* SMP */
167 167
168#define hard_smp_processor_id() 0
168#define smp_setup_cpu_possible_map() do { } while (0) 169#define smp_setup_cpu_possible_map() do { } while (0)
169 170
170#endif /* !(SMP) */ 171#endif /* !(SMP) */
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index cca54804b722..869d16fb907b 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -48,6 +48,7 @@ extern unsigned char boot_cpu_id;
48 48
49#else 49#else
50 50
51#define hard_smp_processor_id() 0
51#define smp_setup_cpu_possible_map() do { } while (0) 52#define smp_setup_cpu_possible_map() do { } while (0)
52#define boot_cpu_id (0) 53#define boot_cpu_id (0)
53 54
diff --git a/include/asm-um/required-features.h b/include/asm-um/required-features.h
new file mode 100644
index 000000000000..dfb967b2d2f3
--- /dev/null
+++ b/include/asm-um/required-features.h
@@ -0,0 +1,9 @@
1#ifndef __UM_REQUIRED_FEATURES_H
2#define __UM_REQUIRED_FEATURES_H
3
4/*
5 * Nothing to see, just need something for the i386 and x86_64 asm
6 * headers to include.
7 */
8
9#endif
diff --git a/include/asm-um/smp.h b/include/asm-um/smp.h
index ca552261ed1f..84f8cf29324e 100644
--- a/include/asm-um/smp.h
+++ b/include/asm-um/smp.h
@@ -24,6 +24,10 @@ extern inline void smp_cpus_done(unsigned int maxcpus)
24 24
25extern struct task_struct *idle_threads[NR_CPUS]; 25extern struct task_struct *idle_threads[NR_CPUS];
26 26
27#else
28
29#define hard_smp_processor_id() 0
30
27#endif 31#endif
28 32
29#endif 33#endif
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index d5704421456b..3f303d2365ed 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -57,12 +57,6 @@ static inline int num_booting_cpus(void)
57 57
58#define raw_smp_processor_id() read_pda(cpunumber) 58#define raw_smp_processor_id() read_pda(cpunumber)
59 59
60static inline int hard_smp_processor_id(void)
61{
62 /* we don't want to mark this access volatile - bad code generation */
63 return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
64}
65
66extern int __cpu_disable(void); 60extern int __cpu_disable(void);
67extern void __cpu_die(unsigned int cpu); 61extern void __cpu_die(unsigned int cpu);
68extern void prefill_possible_map(void); 62extern void prefill_possible_map(void);
@@ -71,7 +65,13 @@ extern unsigned __cpuinitdata disabled_cpus;
71 65
72#define NO_PROC_ID 0xFF /* No processor magic marker */ 66#define NO_PROC_ID 0xFF /* No processor magic marker */
73 67
74#endif 68#endif /* CONFIG_SMP */
69
70static inline int hard_smp_processor_id(void)
71{
72 /* we don't want to mark this access volatile - bad code generation */
73 return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
74}
75 75
76/* 76/*
77 * Some lowlevel functions might want to know about 77 * Some lowlevel functions might want to know about
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index b7b8021e8c43..ead9f9a56234 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -39,7 +39,7 @@
39 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \ 39 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
40 [ti_flags] "i" (offsetof(struct thread_info, flags)),\ 40 [ti_flags] "i" (offsetof(struct thread_info, flags)),\
41 [tif_fork] "i" (TIF_FORK), \ 41 [tif_fork] "i" (TIF_FORK), \
42 [thread_info] "i" (offsetof(struct task_struct, thread_info)), \ 42 [thread_info] "i" (offsetof(struct task_struct, stack)), \
43 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ 43 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
44 : "memory", "cc" __EXTRA_CLOBBER) 44 : "memory", "cc" __EXTRA_CLOBBER)
45 45
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 74a6c74397f7..10bb5a8ed688 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -162,7 +162,7 @@ static inline struct thread_info *stack_thread_info(void)
162#define TS_COMPAT 0x0002 /* 32bit syscall active */ 162#define TS_COMPAT 0x0002 /* 32bit syscall active */
163#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */ 163#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */
164 164
165#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING) 165#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
166 166
167#endif /* __KERNEL__ */ 167#endif /* __KERNEL__ */
168 168
diff --git a/include/asm-xtensa/platform-iss/simcall.h b/include/asm-xtensa/platform-iss/simcall.h
index 6acb572759a6..b7952c06a2b7 100644
--- a/include/asm-xtensa/platform-iss/simcall.h
+++ b/include/asm-xtensa/platform-iss/simcall.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * include/asm-xtensa/platform-iss/hardware.h 2 * include/asm-xtensa/platform-iss/simcall.h
3 * 3 *
4 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
diff --git a/include/linux/aio.h b/include/linux/aio.h
index a30ef13c9e62..43dc2ebfaa0e 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -226,7 +226,8 @@ int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
226 __put_ioctx(kioctx); \ 226 __put_ioctx(kioctx); \
227} while (0) 227} while (0)
228 228
229#define in_aio() !is_sync_wait(current->io_wait) 229#define in_aio() (unlikely(!is_sync_wait(current->io_wait)))
230
230/* may be used for debugging */ 231/* may be used for debugging */
231#define warn_if_async() \ 232#define warn_if_async() \
232do { \ 233do { \
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index a686eabe22d6..db5b00a792f5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -854,7 +854,7 @@ static inline void put_dev_sector(Sector p)
854 854
855struct work_struct; 855struct work_struct;
856int kblockd_schedule_work(struct work_struct *work); 856int kblockd_schedule_work(struct work_struct *work);
857void kblockd_flush(void); 857void kblockd_flush_work(struct work_struct *work);
858 858
859#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 859#define MODULE_ALIAS_BLOCKDEV(major,minor) \
860 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 860 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 2665ca04cf8f..bf297b03a4e4 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -49,6 +49,7 @@ struct clocksource;
49 * @shift: cycle to nanosecond divisor (power of two) 49 * @shift: cycle to nanosecond divisor (power of two)
50 * @flags: flags describing special properties 50 * @flags: flags describing special properties
51 * @vread: vsyscall based read 51 * @vread: vsyscall based read
52 * @resume: resume function for the clocksource, if necessary
52 * @cycle_interval: Used internally by timekeeping core, please ignore. 53 * @cycle_interval: Used internally by timekeeping core, please ignore.
53 * @xtime_interval: Used internally by timekeeping core, please ignore. 54 * @xtime_interval: Used internally by timekeeping core, please ignore.
54 */ 55 */
@@ -65,6 +66,7 @@ struct clocksource {
65 u32 shift; 66 u32 shift;
66 unsigned long flags; 67 unsigned long flags;
67 cycle_t (*vread)(void); 68 cycle_t (*vread)(void);
69 void (*resume)(void);
68 70
69 /* timekeeping specific data, ignore */ 71 /* timekeeping specific data, ignore */
70 cycle_t cycle_interval; 72 cycle_t cycle_interval;
@@ -209,6 +211,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
209extern int clocksource_register(struct clocksource*); 211extern int clocksource_register(struct clocksource*);
210extern struct clocksource* clocksource_get_next(void); 212extern struct clocksource* clocksource_get_next(void);
211extern void clocksource_change_rating(struct clocksource *cs, int rating); 213extern void clocksource_change_rating(struct clocksource *cs, int rating);
214extern void clocksource_resume(void);
212 215
213#ifdef CONFIG_GENERIC_TIME_VSYSCALL 216#ifdef CONFIG_GENERIC_TIME_VSYSCALL
214extern void update_vsyscall(struct timespec *ts, struct clocksource *c); 217extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index ccd863dd77fa..70a157a130bb 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -253,5 +253,8 @@ asmlinkage long compat_sys_epoll_pwait(int epfd,
253 const compat_sigset_t __user *sigmask, 253 const compat_sigset_t __user *sigmask,
254 compat_size_t sigsetsize); 254 compat_size_t sigsetsize);
255 255
256asmlinkage long compat_sys_utimensat(unsigned int dfd, char __user *filename,
257 struct compat_timespec __user *t, int flags);
258
256#endif /* CONFIG_COMPAT */ 259#endif /* CONFIG_COMPAT */
257#endif /* _LINUX_COMPAT_H */ 260#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index a9f794716a81..03ec2311fb29 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -40,3 +40,4 @@
40#define noinline __attribute__((noinline)) 40#define noinline __attribute__((noinline))
41#define __attribute_pure__ __attribute__((pure)) 41#define __attribute_pure__ __attribute__((pure))
42#define __attribute_const__ __attribute__((__const__)) 42#define __attribute_const__ __attribute__((__const__))
43#define __maybe_unused __attribute__((unused))
diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h
index ecd621fd27d2..a9e2863c2dbf 100644
--- a/include/linux/compiler-gcc3.h
+++ b/include/linux/compiler-gcc3.h
@@ -4,9 +4,11 @@
4#include <linux/compiler-gcc.h> 4#include <linux/compiler-gcc.h>
5 5
6#if __GNUC_MINOR__ >= 3 6#if __GNUC_MINOR__ >= 3
7# define __attribute_used__ __attribute__((__used__)) 7# define __used __attribute__((__used__))
8# define __attribute_used__ __used /* deprecated */
8#else 9#else
9# define __attribute_used__ __attribute__((__unused__)) 10# define __used __attribute__((__unused__))
11# define __attribute_used__ __used /* deprecated */
10#endif 12#endif
11 13
12#if __GNUC_MINOR__ >= 4 14#if __GNUC_MINOR__ >= 4
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index fd0cc7c4a636..a03e9398a6c2 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -12,7 +12,8 @@
12# define __inline __inline __attribute__((always_inline)) 12# define __inline __inline __attribute__((always_inline))
13#endif 13#endif
14 14
15#define __attribute_used__ __attribute__((__used__)) 15#define __used __attribute__((__used__))
16#define __attribute_used__ __used /* deprecated */
16#define __must_check __attribute__((warn_unused_result)) 17#define __must_check __attribute__((warn_unused_result))
17#define __compiler_offsetof(a,b) __builtin_offsetof(a,b) 18#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
18#define __always_inline inline __attribute__((always_inline)) 19#define __always_inline inline __attribute__((always_inline))
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 3b6949b41745..498c35920762 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -108,15 +108,30 @@ extern void __chk_io_ptr(const void __iomem *);
108 * Allow us to avoid 'defined but not used' warnings on functions and data, 108 * Allow us to avoid 'defined but not used' warnings on functions and data,
109 * as well as force them to be emitted to the assembly file. 109 * as well as force them to be emitted to the assembly file.
110 * 110 *
111 * As of gcc 3.3, static functions that are not marked with attribute((used)) 111 * As of gcc 3.4, static functions that are not marked with attribute((used))
112 * may be elided from the assembly file. As of gcc 3.3, static data not so 112 * may be elided from the assembly file. As of gcc 3.4, static data not so
113 * marked will not be elided, but this may change in a future gcc version. 113 * marked will not be elided, but this may change in a future gcc version.
114 * 114 *
115 * NOTE: Because distributions shipped with a backported unit-at-a-time
116 * compiler in gcc 3.3, we must define __used to be __attribute__((used))
117 * for gcc >=3.3 instead of 3.4.
118 *
115 * In prior versions of gcc, such functions and data would be emitted, but 119 * In prior versions of gcc, such functions and data would be emitted, but
116 * would be warned about except with attribute((unused)). 120 * would be warned about except with attribute((unused)).
121 *
122 * Mark functions that are referenced only in inline assembly as __used so
123 * the code is emitted even though it appears to be unreferenced.
117 */ 124 */
118#ifndef __attribute_used__ 125#ifndef __attribute_used__
119# define __attribute_used__ /* unimplemented */ 126# define __attribute_used__ /* deprecated */
127#endif
128
129#ifndef __used
130# define __used /* unimplemented */
131#endif
132
133#ifndef __maybe_unused
134# define __maybe_unused /* unimplemented */
120#endif 135#endif
121 136
122/* 137/*
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
index 4395e5206746..7894dd0f3b77 100644
--- a/include/linux/ext3_fs_i.h
+++ b/include/linux/ext3_fs_i.h
@@ -54,7 +54,7 @@ struct ext3_block_alloc_info {
54 /* 54 /*
55 * Was i_next_alloc_goal in ext3_inode_info 55 * Was i_next_alloc_goal in ext3_inode_info
56 * is the *physical* companion to i_next_alloc_block. 56 * is the *physical* companion to i_next_alloc_block.
57 * it the the physical block number of the block which was most-recentl 57 * it the physical block number of the block which was most-recentl
58 * allocated to this file. This give us the goal (target) for the next 58 * allocated to this file. This give us the goal (target) for the next
59 * allocation when we detect linearly ascending requests. 59 * allocation when we detect linearly ascending requests.
60 */ 60 */
diff --git a/include/linux/ext4_fs_i.h b/include/linux/ext4_fs_i.h
index bb42379cb7fd..d5b177e5b395 100644
--- a/include/linux/ext4_fs_i.h
+++ b/include/linux/ext4_fs_i.h
@@ -52,7 +52,7 @@ struct ext4_block_alloc_info {
52 /* 52 /*
53 * Was i_next_alloc_goal in ext4_inode_info 53 * Was i_next_alloc_goal in ext4_inode_info
54 * is the *physical* companion to i_next_alloc_block. 54 * is the *physical* companion to i_next_alloc_block.
55 * it the the physical block number of the block which was most-recentl 55 * it the physical block number of the block which was most-recentl
56 * allocated to this file. This give us the goal (target) for the next 56 * allocated to this file. This give us the goal (target) for the next
57 * allocation when we detect linearly ascending requests. 57 * allocation when we detect linearly ascending requests.
58 */ 58 */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index dff7a728948c..c654d0e9ce33 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -868,7 +868,7 @@ struct fb_info {
868#define fb_writeq sbus_writeq 868#define fb_writeq sbus_writeq
869#define fb_memset sbus_memset_io 869#define fb_memset sbus_memset_io
870 870
871#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || (defined(__sh__) && !defined(__SH5__)) || defined(__powerpc__) 871#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || (defined(__sh__) && !defined(__SH5__)) || defined(__powerpc__) || defined(__avr32__)
872 872
873#define fb_readb __raw_readb 873#define fb_readb __raw_readb
874#define fb_readw __raw_readw 874#define fb_readw __raw_readw
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 820125c628c1..899fc7f20edd 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -3,6 +3,8 @@
3 3
4#include <linux/sched.h> 4#include <linux/sched.h>
5 5
6union ktime;
7
6/* Second argument to futex syscall */ 8/* Second argument to futex syscall */
7 9
8 10
@@ -15,6 +17,19 @@
15#define FUTEX_LOCK_PI 6 17#define FUTEX_LOCK_PI 6
16#define FUTEX_UNLOCK_PI 7 18#define FUTEX_UNLOCK_PI 7
17#define FUTEX_TRYLOCK_PI 8 19#define FUTEX_TRYLOCK_PI 8
20#define FUTEX_CMP_REQUEUE_PI 9
21
22#define FUTEX_PRIVATE_FLAG 128
23#define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG
24
25#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG)
26#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG)
27#define FUTEX_REQUEUE_PRIVATE (FUTEX_REQUEUE | FUTEX_PRIVATE_FLAG)
28#define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG)
29#define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG)
30#define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG)
31#define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG)
32#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)
18 33
19/* 34/*
20 * Support for robust futexes: the kernel cleans up held futexes at 35 * Support for robust futexes: the kernel cleans up held futexes at
@@ -83,9 +98,14 @@ struct robust_list_head {
83#define FUTEX_OWNER_DIED 0x40000000 98#define FUTEX_OWNER_DIED 0x40000000
84 99
85/* 100/*
101 * Some processes have been requeued on this PI-futex
102 */
103#define FUTEX_WAITER_REQUEUED 0x20000000
104
105/*
86 * The rest of the robust-futex field is for the TID: 106 * The rest of the robust-futex field is for the TID:
87 */ 107 */
88#define FUTEX_TID_MASK 0x3fffffff 108#define FUTEX_TID_MASK 0x0fffffff
89 109
90/* 110/*
91 * This limit protects against a deliberately circular list. 111 * This limit protects against a deliberately circular list.
@@ -94,7 +114,7 @@ struct robust_list_head {
94#define ROBUST_LIST_LIMIT 2048 114#define ROBUST_LIST_LIMIT 2048
95 115
96#ifdef __KERNEL__ 116#ifdef __KERNEL__
97long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, 117long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout,
98 u32 __user *uaddr2, u32 val2, u32 val3); 118 u32 __user *uaddr2, u32 val2, u32 val3);
99 119
100extern int 120extern int
@@ -106,9 +126,20 @@ handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
106 * Don't rearrange members without looking at hash_futex(). 126 * Don't rearrange members without looking at hash_futex().
107 * 127 *
108 * offset is aligned to a multiple of sizeof(u32) (== 4) by definition. 128 * offset is aligned to a multiple of sizeof(u32) (== 4) by definition.
109 * We set bit 0 to indicate if it's an inode-based key. 129 * We use the two low order bits of offset to tell what is the kind of key :
110 */ 130 * 00 : Private process futex (PTHREAD_PROCESS_PRIVATE)
131 * (no reference on an inode or mm)
132 * 01 : Shared futex (PTHREAD_PROCESS_SHARED)
133 * mapped on a file (reference on the underlying inode)
134 * 10 : Shared futex (PTHREAD_PROCESS_SHARED)
135 * (but private mapping on an mm, and reference taken on it)
136*/
137
138#define FUT_OFF_INODE 1 /* We set bit 0 if key has a reference on inode */
139#define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */
140
111union futex_key { 141union futex_key {
142 u32 __user *uaddr;
112 struct { 143 struct {
113 unsigned long pgoff; 144 unsigned long pgoff;
114 struct inode *inode; 145 struct inode *inode;
@@ -125,7 +156,8 @@ union futex_key {
125 int offset; 156 int offset;
126 } both; 157 } both;
127}; 158};
128int get_futex_key(u32 __user *uaddr, union futex_key *key); 159int get_futex_key(u32 __user *uaddr, struct rw_semaphore *shared,
160 union futex_key *key);
129void get_futex_key_refs(union futex_key *key); 161void get_futex_key_refs(union futex_key *key);
130void drop_futex_key_refs(union futex_key *key); 162void drop_futex_key_refs(union futex_key *key);
131 163
diff --git a/include/linux/generic_acl.h b/include/linux/generic_acl.h
index 80764f40be75..886f5faa08cb 100644
--- a/include/linux/generic_acl.h
+++ b/include/linux/generic_acl.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * fs/generic_acl.c 2 * include/linux/generic_acl.h
3 * 3 *
4 * (C) 2005 Andreas Gruenbacher <agruen@suse.de> 4 * (C) 2005 Andreas Gruenbacher <agruen@suse.de>
5 * 5 *
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 2c65da7cabb2..f589559cf070 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -413,6 +413,7 @@ char *disk_name (struct gendisk *hd, int part, char *buf);
413extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); 413extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
414extern void add_partition(struct gendisk *, int, sector_t, sector_t, int); 414extern void add_partition(struct gendisk *, int, sector_t, sector_t, int);
415extern void delete_partition(struct gendisk *, int); 415extern void delete_partition(struct gendisk *, int);
416extern void printk_all_partitions(void);
416 417
417extern struct gendisk *alloc_disk_node(int minors, int node_id); 418extern struct gendisk *alloc_disk_node(int minors, int node_id);
418extern struct gendisk *alloc_disk(int minors); 419extern struct gendisk *alloc_disk(int minors);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 97a36c3d96e2..0d2ef0b082a6 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -176,10 +176,6 @@ extern void FASTCALL(free_cold_page(struct page *page));
176#define free_page(addr) free_pages((addr),0) 176#define free_page(addr) free_pages((addr),0)
177 177
178void page_alloc_init(void); 178void page_alloc_init(void);
179#ifdef CONFIG_NUMA 179void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
180void drain_node_pages(int node);
181#else
182static inline void drain_node_pages(int node) { };
183#endif
184 180
185#endif /* __LINUX_GFP_H */ 181#endif /* __LINUX_GFP_H */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index a515eb0afdfb..98e2cce996a4 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -94,17 +94,26 @@ static inline void clear_highpage(struct page *page)
94 94
95/* 95/*
96 * Same but also flushes aliased cache contents to RAM. 96 * Same but also flushes aliased cache contents to RAM.
97 *
98 * This must be a macro because KM_USER0 and friends aren't defined if
99 * !CONFIG_HIGHMEM
97 */ 100 */
98static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size) 101#define zero_user_page(page, offset, size, km_type) \
102 do { \
103 void *kaddr; \
104 \
105 BUG_ON((offset) + (size) > PAGE_SIZE); \
106 \
107 kaddr = kmap_atomic(page, km_type); \
108 memset((char *)kaddr + (offset), 0, (size)); \
109 flush_dcache_page(page); \
110 kunmap_atomic(kaddr, (km_type)); \
111 } while (0)
112
113static inline void __deprecated memclear_highpage_flush(struct page *page,
114 unsigned int offset, unsigned int size)
99{ 115{
100 void *kaddr; 116 zero_user_page(page, offset, size, KM_USER0);
101
102 BUG_ON(offset + size > PAGE_SIZE);
103
104 kaddr = kmap_atomic(page, KM_USER0);
105 memset((char *)kaddr + offset, 0, size);
106 flush_dcache_page(page);
107 kunmap_atomic(kaddr, KM_USER0);
108} 117}
109 118
110#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE 119#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h
index 9ee0f800592f..111334f5b922 100644
--- a/include/linux/i2c-algo-bit.h
+++ b/include/linux/i2c-algo-bit.h
@@ -18,7 +18,7 @@
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ 18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
19/* ------------------------------------------------------------------------- */ 19/* ------------------------------------------------------------------------- */
20 20
21/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even 21/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
22 Frodo Looijaard <frodol@dds.nl> */ 22 Frodo Looijaard <frodol@dds.nl> */
23 23
24#ifndef _LINUX_I2C_ALGO_BIT_H 24#ifndef _LINUX_I2C_ALGO_BIT_H
diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h
index 994eb86f882c..77afbb60fd11 100644
--- a/include/linux/i2c-algo-pcf.h
+++ b/include/linux/i2c-algo-pcf.h
@@ -19,7 +19,7 @@
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ 19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
20/* ------------------------------------------------------------------------- */ 20/* ------------------------------------------------------------------------- */
21 21
22/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even 22/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
23 Frodo Looijaard <frodol@dds.nl> */ 23 Frodo Looijaard <frodol@dds.nl> */
24 24
25#ifndef _LINUX_I2C_ALGO_PCF_H 25#ifndef _LINUX_I2C_ALGO_PCF_H
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 795102309bf1..45170b2fa253 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -95,7 +95,7 @@ extern struct group_info init_groups;
95#define INIT_TASK(tsk) \ 95#define INIT_TASK(tsk) \
96{ \ 96{ \
97 .state = 0, \ 97 .state = 0, \
98 .thread_info = &init_thread_info, \ 98 .stack = &init_thread_info, \
99 .usage = ATOMIC_INIT(2), \ 99 .usage = ATOMIC_INIT(2), \
100 .flags = 0, \ 100 .flags = 0, \
101 .lock_depth = -1, \ 101 .lock_depth = -1, \
diff --git a/include/linux/irda.h b/include/linux/irda.h
index 09d8f105a5a8..945ba3110874 100644
--- a/include/linux/irda.h
+++ b/include/linux/irda.h
@@ -16,7 +16,7 @@
16 * published by the Free Software Foundation; either version 2 of 16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version. 17 * the License, or (at your option) any later version.
18 * 18 *
19 * Neither Dag Brattli nor University of Tromsø admit liability nor 19 * Neither Dag Brattli nor University of Tromsø admit liability nor
20 * provide warranty for any of this software. This material is 20 * provide warranty for any of this software. This material is
21 * provided "AS-IS" and at no charge. 21 * provided "AS-IS" and at no charge.
22 * 22 *
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 1c65e7a9f186..00dd957e245b 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -30,4 +30,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu);
30int kthread_stop(struct task_struct *k); 30int kthread_stop(struct task_struct *k);
31int kthread_should_stop(void); 31int kthread_should_stop(void);
32 32
33int kthreadd(void *unused);
34extern struct task_struct *kthreadd_task;
35
33#endif /* _LINUX_KTHREAD_H */ 36#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 81bb9c7a4eb3..c762954bda14 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -43,7 +43,7 @@
43 * plain scalar nanosecond based representation can be selected by the 43 * plain scalar nanosecond based representation can be selected by the
44 * config switch CONFIG_KTIME_SCALAR. 44 * config switch CONFIG_KTIME_SCALAR.
45 */ 45 */
46typedef union { 46union ktime {
47 s64 tv64; 47 s64 tv64;
48#if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR) 48#if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR)
49 struct { 49 struct {
@@ -54,7 +54,9 @@ typedef union {
54# endif 54# endif
55 } tv; 55 } tv;
56#endif 56#endif
57} ktime_t; 57};
58
59typedef union ktime ktime_t; /* Kill this */
58 60
59#define KTIME_MAX ((s64)~((u64)1 << 63)) 61#define KTIME_MAX ((s64)~((u64)1 << 63))
60#if (BITS_PER_LONG == 64) 62#if (BITS_PER_LONG == 64)
diff --git a/include/linux/mca.h b/include/linux/mca.h
index 5cff2923092b..37972704617f 100644
--- a/include/linux/mca.h
+++ b/include/linux/mca.h
@@ -94,6 +94,7 @@ struct mca_bus {
94struct mca_driver { 94struct mca_driver {
95 const short *id_table; 95 const short *id_table;
96 void *driver_data; 96 void *driver_data;
97 int integrated_id;
97 struct device_driver driver; 98 struct device_driver driver;
98}; 99};
99#define to_mca_driver(mdriver) container_of(mdriver, struct mca_driver, driver) 100#define to_mca_driver(mdriver) container_of(mdriver, struct mca_driver, driver)
@@ -125,6 +126,7 @@ extern enum MCA_AdapterStatus mca_device_status(struct mca_device *mca_dev);
125extern struct bus_type mca_bus_type; 126extern struct bus_type mca_bus_type;
126 127
127extern int mca_register_driver(struct mca_driver *drv); 128extern int mca_register_driver(struct mca_driver *drv);
129extern int mca_register_driver_integrated(struct mca_driver *, int);
128extern void mca_unregister_driver(struct mca_driver *drv); 130extern void mca_unregister_driver(struct mca_driver *drv);
129 131
130/* WARNING: only called by the boot time device setup */ 132/* WARNING: only called by the boot time device setup */
diff --git a/include/linux/meye.h b/include/linux/meye.h
index 11ec45e9a132..39fd9c8ddd4b 100644
--- a/include/linux/meye.h
+++ b/include/linux/meye.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2001-2003 Stelian Pop <stelian@popies.net> 4 * Copyright (C) 2001-2003 Stelian Pop <stelian@popies.net>
5 * 5 *
6 * Copyright (C) 2001-2002 Alcôve <www.alcove.com> 6 * Copyright (C) 2001-2002 Alcôve <www.alcove.com>
7 * 7 *
8 * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com> 8 * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com>
9 * 9 *
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2f1544e83042..d09b1345a3a1 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -83,6 +83,9 @@ struct per_cpu_pages {
83 83
84struct per_cpu_pageset { 84struct per_cpu_pageset {
85 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ 85 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
86#ifdef CONFIG_NUMA
87 s8 expire;
88#endif
86#ifdef CONFIG_SMP 89#ifdef CONFIG_SMP
87 s8 stat_threshold; 90 s8 stat_threshold;
88 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 91 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
diff --git a/include/linux/module.h b/include/linux/module.h
index 6d3dc9c4ff96..792d483c9af7 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -356,6 +356,9 @@ struct module
356 keeping pointers to this stuff */ 356 keeping pointers to this stuff */
357 char *args; 357 char *args;
358}; 358};
359#ifndef MODULE_ARCH_INIT
360#define MODULE_ARCH_INIT {}
361#endif
359 362
360/* FIXME: It'd be nice to isolate modules during init, too, so they 363/* FIXME: It'd be nice to isolate modules during init, too, so they
361 aren't used before they (may) fail. But presently too much code 364 aren't used before they (may) fail. But presently too much code
diff --git a/include/linux/mount.h b/include/linux/mount.h
index dab69afee2fa..6d3047d8c91c 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -33,7 +33,7 @@ struct mnt_namespace;
33 33
34#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */ 34#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */
35#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ 35#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */
36#define MNT_PNODE_MASK 0x3000 /* propogation flag mask */ 36#define MNT_PNODE_MASK 0x3000 /* propagation flag mask */
37 37
38struct vfsmount { 38struct vfsmount {
39 struct list_head mnt_hash; 39 struct list_head mnt_hash;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 45d482ce8397..fd64ccfbce02 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -9,10 +9,6 @@
9#ifndef __MTD_MTD_H__ 9#ifndef __MTD_MTD_H__
10#define __MTD_MTD_H__ 10#define __MTD_MTD_H__
11 11
12#ifndef __KERNEL__
13#error This is a kernel header. Perhaps include mtd-user.h instead?
14#endif
15
16#include <linux/types.h> 12#include <linux/types.h>
17#include <linux/module.h> 13#include <linux/module.h>
18#include <linux/uio.h> 14#include <linux/uio.h>
@@ -137,9 +133,6 @@ struct mtd_info {
137 int numeraseregions; 133 int numeraseregions;
138 struct mtd_erase_region_info *eraseregions; 134 struct mtd_erase_region_info *eraseregions;
139 135
140 /* This really shouldn't be here. It can go away in 2.5 */
141 u_int32_t bank_size;
142
143 int (*erase) (struct mtd_info *mtd, struct erase_info *instr); 136 int (*erase) (struct mtd_info *mtd, struct erase_info *instr);
144 137
145 /* This stuff for eXecute-In-Place */ 138 /* This stuff for eXecute-In-Place */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index cf197ad62da6..d2365c8dcacc 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -560,6 +560,7 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
560 * @chip_delay: R/B delay value in us 560 * @chip_delay: R/B delay value in us
561 * @options: Option flags, e.g. 16bit buswidth 561 * @options: Option flags, e.g. 16bit buswidth
562 * @ecclayout: ecc layout info structure 562 * @ecclayout: ecc layout info structure
563 * @part_probe_types: NULL-terminated array of probe types
563 * @priv: hardware controller specific settings 564 * @priv: hardware controller specific settings
564 */ 565 */
565struct platform_nand_chip { 566struct platform_nand_chip {
@@ -570,6 +571,7 @@ struct platform_nand_chip {
570 struct nand_ecclayout *ecclayout; 571 struct nand_ecclayout *ecclayout;
571 int chip_delay; 572 int chip_delay;
572 unsigned int options; 573 unsigned int options;
574 const char **part_probe_types;
573 void *priv; 575 void *priv;
574}; 576};
575 577
@@ -578,6 +580,8 @@ struct platform_nand_chip {
578 * @hwcontrol: platform specific hardware control structure 580 * @hwcontrol: platform specific hardware control structure
579 * @dev_ready: platform specific function to read ready/busy pin 581 * @dev_ready: platform specific function to read ready/busy pin
580 * @select_chip: platform specific chip select function 582 * @select_chip: platform specific chip select function
583 * @cmd_ctrl: platform specific function for controlling
584 * ALE/CLE/nCE. Also used to write command and address
581 * @priv: private data to transport driver specific settings 585 * @priv: private data to transport driver specific settings
582 * 586 *
583 * All fields are optional and depend on the hardware driver requirements 587 * All fields are optional and depend on the hardware driver requirements
@@ -586,9 +590,21 @@ struct platform_nand_ctrl {
586 void (*hwcontrol)(struct mtd_info *mtd, int cmd); 590 void (*hwcontrol)(struct mtd_info *mtd, int cmd);
587 int (*dev_ready)(struct mtd_info *mtd); 591 int (*dev_ready)(struct mtd_info *mtd);
588 void (*select_chip)(struct mtd_info *mtd, int chip); 592 void (*select_chip)(struct mtd_info *mtd, int chip);
593 void (*cmd_ctrl)(struct mtd_info *mtd, int dat,
594 unsigned int ctrl);
589 void *priv; 595 void *priv;
590}; 596};
591 597
598/**
599 * struct platform_nand_data - container structure for platform-specific data
600 * @chip: chip level chip structure
601 * @ctrl: controller level device structure
602 */
603struct platform_nand_data {
604 struct platform_nand_chip chip;
605 struct platform_nand_ctrl ctrl;
606};
607
592/* Some helpers to access the data structures */ 608/* Some helpers to access the data structures */
593static inline 609static inline
594struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd) 610struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd)
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index b81bc2adaeff..0d50ea3df689 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -121,11 +121,12 @@ static inline int fastcall mutex_is_locked(struct mutex *lock)
121 * Also see Documentation/mutex-design.txt. 121 * Also see Documentation/mutex-design.txt.
122 */ 122 */
123extern void fastcall mutex_lock(struct mutex *lock); 123extern void fastcall mutex_lock(struct mutex *lock);
124extern int fastcall mutex_lock_interruptible(struct mutex *lock); 124extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock);
125 125
126#ifdef CONFIG_DEBUG_LOCK_ALLOC 126#ifdef CONFIG_DEBUG_LOCK_ALLOC
127extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 127extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
128extern int mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass); 128extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
129 unsigned int subclass);
129#else 130#else
130# define mutex_lock_nested(lock, subclass) mutex_lock(lock) 131# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
131# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) 132# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
diff --git a/include/linux/nfs4_acl.h b/include/linux/nfs4_acl.h
index 409b6e02f337..c9c05a78e9bb 100644
--- a/include/linux/nfs4_acl.h
+++ b/include/linux/nfs4_acl.h
@@ -44,7 +44,6 @@
44#define NFS4_ACL_MAX 170 44#define NFS4_ACL_MAX 170
45 45
46struct nfs4_acl *nfs4_acl_new(int); 46struct nfs4_acl *nfs4_acl_new(int);
47void nfs4_acl_add_ace(struct nfs4_acl *, u32, u32, u32, int, uid_t);
48int nfs4_acl_get_whotype(char *, u32); 47int nfs4_acl_get_whotype(char *, u32);
49int nfs4_acl_write_who(int who, char *p); 48int nfs4_acl_write_who(int who, char *p);
50int nfs4_acl_permission(struct nfs4_acl *acl, uid_t owner, gid_t group, 49int nfs4_acl_permission(struct nfs4_acl *acl, uid_t owner, gid_t group,
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 10a43ed0527e..9431101bf876 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -112,32 +112,40 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
112 112
113#ifdef __KERNEL__ 113#ifdef __KERNEL__
114 114
115extern int atomic_notifier_chain_register(struct atomic_notifier_head *, 115extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
116 struct notifier_block *); 116 struct notifier_block *nb);
117extern int blocking_notifier_chain_register(struct blocking_notifier_head *, 117extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
118 struct notifier_block *); 118 struct notifier_block *nb);
119extern int raw_notifier_chain_register(struct raw_notifier_head *, 119extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
120 struct notifier_block *); 120 struct notifier_block *nb);
121extern int srcu_notifier_chain_register(struct srcu_notifier_head *, 121extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
122 struct notifier_block *); 122 struct notifier_block *nb);
123 123
124extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *, 124extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
125 struct notifier_block *); 125 struct notifier_block *nb);
126extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *, 126extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
127 struct notifier_block *); 127 struct notifier_block *nb);
128extern int raw_notifier_chain_unregister(struct raw_notifier_head *, 128extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
129 struct notifier_block *); 129 struct notifier_block *nb);
130extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *, 130extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
131 struct notifier_block *); 131 struct notifier_block *nb);
132 132
133extern int atomic_notifier_call_chain(struct atomic_notifier_head *, 133extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
134 unsigned long val, void *v); 134 unsigned long val, void *v);
135extern int blocking_notifier_call_chain(struct blocking_notifier_head *, 135extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
136 unsigned long val, void *v, int nr_to_call, int *nr_calls);
137extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
136 unsigned long val, void *v); 138 unsigned long val, void *v);
137extern int raw_notifier_call_chain(struct raw_notifier_head *, 139extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
140 unsigned long val, void *v, int nr_to_call, int *nr_calls);
141extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
138 unsigned long val, void *v); 142 unsigned long val, void *v);
139extern int srcu_notifier_call_chain(struct srcu_notifier_head *, 143extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
144 unsigned long val, void *v, int nr_to_call, int *nr_calls);
145extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
140 unsigned long val, void *v); 146 unsigned long val, void *v);
147extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
148 unsigned long val, void *v, int nr_to_call, int *nr_calls);
141 149
142#define NOTIFY_DONE 0x0000 /* Don't care */ 150#define NOTIFY_DONE 0x0000 /* Don't care */
143#define NOTIFY_OK 0x0001 /* Suits me */ 151#define NOTIFY_OK 0x0001 /* Suits me */
@@ -186,6 +194,20 @@ extern int srcu_notifier_call_chain(struct srcu_notifier_head *,
186#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ 194#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
187#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ 195#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
188#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ 196#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
197#define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */
198#define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */
199
200/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
201 * operation in progress
202 */
203#define CPU_TASKS_FROZEN 0x0010
204
205#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
206#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
207#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
208#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
209#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
210#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
189 211
190#endif /* __KERNEL__ */ 212#endif /* __KERNEL__ */
191#endif /* _LINUX_NOTIFIER_H */ 213#endif /* _LINUX_NOTIFIER_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 6e8fa3049e5d..87545e0f0b58 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -107,26 +107,11 @@ typedef int __bitwise suspend_state_t;
107#define PM_SUSPEND_ON ((__force suspend_state_t) 0) 107#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
108#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1) 108#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
109#define PM_SUSPEND_MEM ((__force suspend_state_t) 3) 109#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
110#define PM_SUSPEND_DISK ((__force suspend_state_t) 4) 110#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
111#define PM_SUSPEND_MAX ((__force suspend_state_t) 5)
112
113typedef int __bitwise suspend_disk_method_t;
114
115/* invalid must be 0 so struct pm_ops initialisers can leave it out */
116#define PM_DISK_INVALID ((__force suspend_disk_method_t) 0)
117#define PM_DISK_PLATFORM ((__force suspend_disk_method_t) 1)
118#define PM_DISK_SHUTDOWN ((__force suspend_disk_method_t) 2)
119#define PM_DISK_REBOOT ((__force suspend_disk_method_t) 3)
120#define PM_DISK_TEST ((__force suspend_disk_method_t) 4)
121#define PM_DISK_TESTPROC ((__force suspend_disk_method_t) 5)
122#define PM_DISK_MAX ((__force suspend_disk_method_t) 6)
123 111
124/** 112/**
125 * struct pm_ops - Callbacks for managing platform dependent suspend states. 113 * struct pm_ops - Callbacks for managing platform dependent suspend states.
126 * @valid: Callback to determine whether the given state can be entered. 114 * @valid: Callback to determine whether the given state can be entered.
127 * If %CONFIG_SOFTWARE_SUSPEND is set then %PM_SUSPEND_DISK is
128 * always valid and never passed to this call. If not assigned,
129 * no suspend states are valid.
130 * Valid states are advertised in /sys/power/state but can still 115 * Valid states are advertised in /sys/power/state but can still
131 * be rejected by prepare or enter if the conditions aren't right. 116 * be rejected by prepare or enter if the conditions aren't right.
132 * There is a %pm_valid_only_mem function available that can be assigned 117 * There is a %pm_valid_only_mem function available that can be assigned
@@ -140,24 +125,12 @@ typedef int __bitwise suspend_disk_method_t;
140 * 125 *
141 * @finish: Called when the system has left the given state and all devices 126 * @finish: Called when the system has left the given state and all devices
142 * are resumed. The return value is ignored. 127 * are resumed. The return value is ignored.
143 *
144 * @pm_disk_mode: The generic code always allows one of the shutdown methods
145 * %PM_DISK_SHUTDOWN, %PM_DISK_REBOOT, %PM_DISK_TEST and
146 * %PM_DISK_TESTPROC. If this variable is set, the mode it is set
147 * to is allowed in addition to those modes and is also made default.
148 * When this mode is sent selected, the @prepare call will be called
149 * before suspending to disk (if present), the @enter call should be
150 * present and will be called after all state has been saved and the
151 * machine is ready to be powered off; the @finish callback is called
152 * after state has been restored. All these calls are called with
153 * %PM_SUSPEND_DISK as the state.
154 */ 128 */
155struct pm_ops { 129struct pm_ops {
156 int (*valid)(suspend_state_t state); 130 int (*valid)(suspend_state_t state);
157 int (*prepare)(suspend_state_t state); 131 int (*prepare)(suspend_state_t state);
158 int (*enter)(suspend_state_t state); 132 int (*enter)(suspend_state_t state);
159 int (*finish)(suspend_state_t state); 133 int (*finish)(suspend_state_t state);
160 suspend_disk_method_t pm_disk_mode;
161}; 134};
162 135
163/** 136/**
@@ -276,8 +249,6 @@ extern void device_power_up(void);
276extern void device_resume(void); 249extern void device_resume(void);
277 250
278#ifdef CONFIG_PM 251#ifdef CONFIG_PM
279extern suspend_disk_method_t pm_disk_mode;
280
281extern int device_suspend(pm_message_t state); 252extern int device_suspend(pm_message_t state);
282extern int device_prepare_suspend(pm_message_t state); 253extern int device_prepare_suspend(pm_message_t state);
283 254
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 0deb842541ac..f9e77d2ee320 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -87,10 +87,10 @@ do { \
87 * management of their lifetimes must be completely managed by API users. 87 * management of their lifetimes must be completely managed by API users.
88 * 88 *
89 * For API usage, in general, 89 * For API usage, in general,
90 * - any function _modifying_ the the tree or tags (inserting or deleting 90 * - any function _modifying_ the tree or tags (inserting or deleting
91 * items, setting or clearing tags must exclude other modifications, and 91 * items, setting or clearing tags must exclude other modifications, and
92 * exclude any functions reading the tree. 92 * exclude any functions reading the tree.
93 * - any function _reading_ the the tree or tags (looking up items or tags, 93 * - any function _reading_ the tree or tags (looking up items or tags,
94 * gang lookups) must exclude modifications to the tree, but may occur 94 * gang lookups) must exclude modifications to the tree, but may occur
95 * concurrently with other readers. 95 * concurrently with other readers.
96 * 96 *
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index de72c49747c8..a121f36f4437 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -201,7 +201,6 @@ struct mddev_s
201 struct mutex reconfig_mutex; 201 struct mutex reconfig_mutex;
202 atomic_t active; 202 atomic_t active;
203 203
204 int changed; /* true if we might need to reread partition info */
205 int degraded; /* whether md should consider 204 int degraded; /* whether md should consider
206 * adding a spare 205 * adding a spare
207 */ 206 */
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 759a0f97bec2..6cd8c4425fc7 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -12,6 +12,7 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/timer.h>
15#include <linux/wait.h> 16#include <linux/wait.h>
16#include <linux/list.h> 17#include <linux/list.h>
17#include <linux/fs.h> 18#include <linux/fs.h>
@@ -38,7 +39,7 @@ struct rchan_buf
38 size_t subbufs_consumed; /* count of sub-buffers consumed */ 39 size_t subbufs_consumed; /* count of sub-buffers consumed */
39 struct rchan *chan; /* associated channel */ 40 struct rchan *chan; /* associated channel */
40 wait_queue_head_t read_wait; /* reader wait queue */ 41 wait_queue_head_t read_wait; /* reader wait queue */
41 struct delayed_work wake_readers; /* reader wake-up work struct */ 42 struct timer_list timer; /* reader wake-up timer */
42 struct dentry *dentry; /* channel file dentry */ 43 struct dentry *dentry; /* channel file dentry */
43 struct kref kref; /* channel buffer refcount */ 44 struct kref kref; /* channel buffer refcount */
44 struct page **page_array; /* array of current buffer pages */ 45 struct page **page_array; /* array of current buffer pages */
diff --git a/include/linux/rslib.h b/include/linux/rslib.h
index ace25acfdc97..746580c1939c 100644
--- a/include/linux/rslib.h
+++ b/include/linux/rslib.h
@@ -34,6 +34,7 @@
34 * @prim: Primitive element, index form 34 * @prim: Primitive element, index form
35 * @iprim: prim-th root of 1, index form 35 * @iprim: prim-th root of 1, index form
36 * @gfpoly: The primitive generator polynominal 36 * @gfpoly: The primitive generator polynominal
37 * @gffunc: Function to generate the field, if non-canonical representation
37 * @users: Users of this structure 38 * @users: Users of this structure
38 * @list: List entry for the rs control list 39 * @list: List entry for the rs control list
39*/ 40*/
@@ -48,6 +49,7 @@ struct rs_control {
48 int prim; 49 int prim;
49 int iprim; 50 int iprim;
50 int gfpoly; 51 int gfpoly;
52 int (*gffunc)(int);
51 int users; 53 int users;
52 struct list_head list; 54 struct list_head list;
53}; 55};
@@ -77,6 +79,8 @@ int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len,
77/* Create or get a matching rs control structure */ 79/* Create or get a matching rs control structure */
78struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, 80struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
79 int nroots); 81 int nroots);
82struct rs_control *init_rs_non_canonical(int symsize, int (*func)(int),
83 int fcr, int prim, int nroots);
80 84
81/* Release a rs control structure */ 85/* Release a rs control structure */
82void free_rs(struct rs_control *rs); 86void free_rs(struct rs_control *rs);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3d95c480f58d..17b72d88c4cb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -817,7 +817,7 @@ struct prio_array;
817 817
818struct task_struct { 818struct task_struct {
819 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 819 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
820 struct thread_info *thread_info; 820 void *stack;
821 atomic_t usage; 821 atomic_t usage;
822 unsigned int flags; /* per process flags, defined below */ 822 unsigned int flags; /* per process flags, defined below */
823 unsigned int ptrace; 823 unsigned int ptrace;
@@ -1317,6 +1317,7 @@ extern int in_egroup_p(gid_t);
1317 1317
1318extern void proc_caches_init(void); 1318extern void proc_caches_init(void);
1319extern void flush_signals(struct task_struct *); 1319extern void flush_signals(struct task_struct *);
1320extern void ignore_signals(struct task_struct *);
1320extern void flush_signal_handlers(struct task_struct *, int force_default); 1321extern void flush_signal_handlers(struct task_struct *, int force_default);
1321extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); 1322extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
1322 1323
@@ -1512,8 +1513,8 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
1512 1513
1513#ifndef __HAVE_THREAD_FUNCTIONS 1514#ifndef __HAVE_THREAD_FUNCTIONS
1514 1515
1515#define task_thread_info(task) (task)->thread_info 1516#define task_thread_info(task) ((struct thread_info *)(task)->stack)
1516#define task_stack_page(task) ((void*)((task)->thread_info)) 1517#define task_stack_page(task) ((task)->stack)
1517 1518
1518static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) 1519static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
1519{ 1520{
@@ -1523,7 +1524,7 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
1523 1524
1524static inline unsigned long *end_of_stack(struct task_struct *p) 1525static inline unsigned long *end_of_stack(struct task_struct *p)
1525{ 1526{
1526 return (unsigned long *)(p->thread_info + 1); 1527 return (unsigned long *)(task_thread_info(p) + 1);
1527} 1528}
1528 1529
1529#endif 1530#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 47e82c120f9a..9eb9e0fe0331 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -322,7 +322,7 @@ struct request_sock;
322 * @dir contains the inode structure of parent of the new file. 322 * @dir contains the inode structure of parent of the new file.
323 * @dentry contains the dentry structure of the new file. 323 * @dentry contains the dentry structure of the new file.
324 * @mode contains the mode of the new file. 324 * @mode contains the mode of the new file.
325 * @dev contains the the device number. 325 * @dev contains the device number.
326 * Return 0 if permission is granted. 326 * Return 0 if permission is granted.
327 * @inode_rename: 327 * @inode_rename:
328 * Check for permission to rename a file or directory. 328 * Check for permission to rename a file or directory.
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 14749056dd63..3fa0fab4a04b 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -243,6 +243,131 @@ extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
243 243
244extern struct kmem_cache *sighand_cachep; 244extern struct kmem_cache *sighand_cachep;
245 245
246/*
247 * In POSIX a signal is sent either to a specific thread (Linux task)
248 * or to the process as a whole (Linux thread group). How the signal
249 * is sent determines whether it's to one thread or the whole group,
250 * which determines which signal mask(s) are involved in blocking it
251 * from being delivered until later. When the signal is delivered,
252 * either it's caught or ignored by a user handler or it has a default
253 * effect that applies to the whole thread group (POSIX process).
254 *
255 * The possible effects an unblocked signal set to SIG_DFL can have are:
256 * ignore - Nothing Happens
257 * terminate - kill the process, i.e. all threads in the group,
258 * similar to exit_group. The group leader (only) reports
259 * WIFSIGNALED status to its parent.
260 * coredump - write a core dump file describing all threads using
261 * the same mm and then kill all those threads
262 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
263 *
264 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
265 * Other signals when not blocked and set to SIG_DFL behaves as follows.
266 * The job control signals also have other special effects.
267 *
268 * +--------------------+------------------+
269 * | POSIX signal | default action |
270 * +--------------------+------------------+
271 * | SIGHUP | terminate |
272 * | SIGINT | terminate |
273 * | SIGQUIT | coredump |
274 * | SIGILL | coredump |
275 * | SIGTRAP | coredump |
276 * | SIGABRT/SIGIOT | coredump |
277 * | SIGBUS | coredump |
278 * | SIGFPE | coredump |
279 * | SIGKILL | terminate(+) |
280 * | SIGUSR1 | terminate |
281 * | SIGSEGV | coredump |
282 * | SIGUSR2 | terminate |
283 * | SIGPIPE | terminate |
284 * | SIGALRM | terminate |
285 * | SIGTERM | terminate |
286 * | SIGCHLD | ignore |
287 * | SIGCONT | ignore(*) |
288 * | SIGSTOP | stop(*)(+) |
289 * | SIGTSTP | stop(*) |
290 * | SIGTTIN | stop(*) |
291 * | SIGTTOU | stop(*) |
292 * | SIGURG | ignore |
293 * | SIGXCPU | coredump |
294 * | SIGXFSZ | coredump |
295 * | SIGVTALRM | terminate |
296 * | SIGPROF | terminate |
297 * | SIGPOLL/SIGIO | terminate |
298 * | SIGSYS/SIGUNUSED | coredump |
299 * | SIGSTKFLT | terminate |
300 * | SIGWINCH | ignore |
301 * | SIGPWR | terminate |
302 * | SIGRTMIN-SIGRTMAX | terminate |
303 * +--------------------+------------------+
304 * | non-POSIX signal | default action |
305 * +--------------------+------------------+
306 * | SIGEMT | coredump |
307 * +--------------------+------------------+
308 *
309 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
310 * (*) Special job control effects:
311 * When SIGCONT is sent, it resumes the process (all threads in the group)
312 * from TASK_STOPPED state and also clears any pending/queued stop signals
313 * (any of those marked with "stop(*)"). This happens regardless of blocking,
314 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
315 * any pending/queued SIGCONT signals; this happens regardless of blocking,
316 * catching, or ignored the stop signal, though (except for SIGSTOP) the
317 * default action of stopping the process may happen later or never.
318 */
319
320#ifdef SIGEMT
321#define SIGEMT_MASK rt_sigmask(SIGEMT)
322#else
323#define SIGEMT_MASK 0
324#endif
325
326#if SIGRTMIN > BITS_PER_LONG
327#define rt_sigmask(sig) (1ULL << ((sig)-1))
328#else
329#define rt_sigmask(sig) sigmask(sig)
330#endif
331#define siginmask(sig, mask) (rt_sigmask(sig) & (mask))
332
333#define SIG_KERNEL_ONLY_MASK (\
334 rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP))
335
336#define SIG_KERNEL_STOP_MASK (\
337 rt_sigmask(SIGSTOP) | rt_sigmask(SIGTSTP) | \
338 rt_sigmask(SIGTTIN) | rt_sigmask(SIGTTOU) )
339
340#define SIG_KERNEL_COREDUMP_MASK (\
341 rt_sigmask(SIGQUIT) | rt_sigmask(SIGILL) | \
342 rt_sigmask(SIGTRAP) | rt_sigmask(SIGABRT) | \
343 rt_sigmask(SIGFPE) | rt_sigmask(SIGSEGV) | \
344 rt_sigmask(SIGBUS) | rt_sigmask(SIGSYS) | \
345 rt_sigmask(SIGXCPU) | rt_sigmask(SIGXFSZ) | \
346 SIGEMT_MASK )
347
348#define SIG_KERNEL_IGNORE_MASK (\
349 rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \
350 rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) )
351
352#define sig_kernel_only(sig) \
353 (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_ONLY_MASK))
354#define sig_kernel_coredump(sig) \
355 (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_COREDUMP_MASK))
356#define sig_kernel_ignore(sig) \
357 (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_IGNORE_MASK))
358#define sig_kernel_stop(sig) \
359 (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_STOP_MASK))
360
361#define sig_needs_tasklist(sig) ((sig) == SIGCONT)
362
363#define sig_user_defined(t, signr) \
364 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
365 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
366
367#define sig_fatal(t, signr) \
368 (!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
369 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
370
246#endif /* __KERNEL__ */ 371#endif /* __KERNEL__ */
247 372
248#endif /* _LINUX_SIGNAL_H */ 373#endif /* _LINUX_SIGNAL_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 7ba23ec8211b..3f70149eabbb 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -83,7 +83,6 @@ void smp_prepare_boot_cpu(void);
83 * These macros fold the SMP functionality into a single CPU system 83 * These macros fold the SMP functionality into a single CPU system
84 */ 84 */
85#define raw_smp_processor_id() 0 85#define raw_smp_processor_id() 0
86#define hard_smp_processor_id() 0
87static inline int up_smp_call_function(void) 86static inline int up_smp_call_function(void)
88{ 87{
89 return 0; 88 return 0;
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h
index f56d24734950..34d4b075f7b8 100644
--- a/include/linux/sonypi.h
+++ b/include/linux/sonypi.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright (C) 2005 Narayanan R S <nars@kadamba.org> 6 * Copyright (C) 2005 Narayanan R S <nars@kadamba.org>
7 7
8 * Copyright (C) 2001-2002 Alcôve <www.alcove.com> 8 * Copyright (C) 2001-2002 Alcôve <www.alcove.com>
9 * 9 *
10 * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au> 10 * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au>
11 * 11 *
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 35fa4d5aadd0..4a7ae8ab6eb8 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -396,4 +396,23 @@ char * svc_print_addr(struct svc_rqst *, char *, size_t);
396 396
397#define RPC_MAX_ADDRBUFLEN (63U) 397#define RPC_MAX_ADDRBUFLEN (63U)
398 398
399/*
400 * When we want to reduce the size of the reserved space in the response
401 * buffer, we need to take into account the size of any checksum data that
402 * may be at the end of the packet. This is difficult to determine exactly
403 * for all cases without actually generating the checksum, so we just use a
404 * static value.
405 */
406static inline void
407svc_reserve_auth(struct svc_rqst *rqstp, int space)
408{
409 int added_space = 0;
410
411 switch(rqstp->rq_authop->flavour) {
412 case RPC_AUTH_GSS:
413 added_space = RPC_MAX_AUTH_SIZE;
414 }
415 return svc_reserve(rqstp, space + added_space);
416}
417
399#endif /* SUNRPC_SVC_H */ 418#endif /* SUNRPC_SVC_H */
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 7909687557bf..e21dd93ac4b7 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -37,7 +37,8 @@ struct svc_sock {
37 37
38 atomic_t sk_reserved; /* space on outq that is reserved */ 38 atomic_t sk_reserved; /* space on outq that is reserved */
39 39
40 spinlock_t sk_defer_lock; /* protects sk_deferred */ 40 spinlock_t sk_lock; /* protects sk_deferred and
41 * sk_info_authunix */
41 struct list_head sk_deferred; /* deferred requests that need to 42 struct list_head sk_deferred; /* deferred requests that need to
42 * be revisted */ 43 * be revisted */
43 struct mutex sk_mutex; /* to serialize sending data */ 44 struct mutex sk_mutex; /* to serialize sending data */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 9d2aa1a12aa0..9c7cb6430666 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -32,18 +32,51 @@ static inline int pm_prepare_console(void) { return 0; }
32static inline void pm_restore_console(void) {} 32static inline void pm_restore_console(void) {}
33#endif 33#endif
34 34
35/**
36 * struct hibernation_ops - hibernation platform support
37 *
38 * The methods in this structure allow a platform to override the default
39 * mechanism of shutting down the machine during a hibernation transition.
40 *
41 * All three methods must be assigned.
42 *
43 * @prepare: prepare system for hibernation
44 * @enter: shut down system after state has been saved to disk
45 * @finish: finish/clean up after state has been reloaded
46 */
47struct hibernation_ops {
48 int (*prepare)(void);
49 int (*enter)(void);
50 void (*finish)(void);
51};
52
35#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) 53#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND)
36/* kernel/power/snapshot.c */ 54/* kernel/power/snapshot.c */
37extern void __init register_nosave_region(unsigned long, unsigned long); 55extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
56static inline void register_nosave_region(unsigned long b, unsigned long e)
57{
58 __register_nosave_region(b, e, 0);
59}
60static inline void register_nosave_region_late(unsigned long b, unsigned long e)
61{
62 __register_nosave_region(b, e, 1);
63}
38extern int swsusp_page_is_forbidden(struct page *); 64extern int swsusp_page_is_forbidden(struct page *);
39extern void swsusp_set_page_free(struct page *); 65extern void swsusp_set_page_free(struct page *);
40extern void swsusp_unset_page_free(struct page *); 66extern void swsusp_unset_page_free(struct page *);
41extern unsigned long get_safe_page(gfp_t gfp_mask); 67extern unsigned long get_safe_page(gfp_t gfp_mask);
68
69extern void hibernation_set_ops(struct hibernation_ops *ops);
70extern int hibernate(void);
42#else 71#else
43static inline void register_nosave_region(unsigned long b, unsigned long e) {} 72static inline void register_nosave_region(unsigned long b, unsigned long e) {}
73static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
44static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } 74static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
45static inline void swsusp_set_page_free(struct page *p) {} 75static inline void swsusp_set_page_free(struct page *p) {}
46static inline void swsusp_unset_page_free(struct page *p) {} 76static inline void swsusp_unset_page_free(struct page *p) {}
77
78static inline void hibernation_set_ops(struct hibernation_ops *ops) {}
79static inline int hibernate(void) { return -ENOSYS; }
47#endif /* defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) */ 80#endif /* defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) */
48 81
49void save_processor_state(void); 82void save_processor_state(void);
diff --git a/include/linux/svga.h b/include/linux/svga.h
index e1cc552e04fe..13ad0b82ac28 100644
--- a/include/linux/svga.h
+++ b/include/linux/svga.h
@@ -113,6 +113,8 @@ void svga_tilefill(struct fb_info *info, struct fb_tilerect *rect);
113void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit); 113void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit);
114void svga_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor); 114void svga_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor);
115int svga_get_tilemax(struct fb_info *info); 115int svga_get_tilemax(struct fb_info *info);
116void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
117 struct fb_var_screeninfo *var);
116 118
117int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node); 119int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node);
118int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node); 120int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 1912c6cbef55..3139f4412297 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -576,6 +576,8 @@ asmlinkage long sys_fstatat64(int dfd, char __user *filename,
576 struct stat64 __user *statbuf, int flag); 576 struct stat64 __user *statbuf, int flag);
577asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf, 577asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf,
578 int bufsiz); 578 int bufsiz);
579asmlinkage long sys_utimensat(int dfd, char __user *filename,
580 struct timespec __user *utimes, int flags);
579asmlinkage long compat_sys_futimesat(unsigned int dfd, char __user *filename, 581asmlinkage long compat_sys_futimesat(unsigned int dfd, char __user *filename,
580 struct compat_timeval __user *t); 582 struct compat_timeval __user *t);
581asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename, 583asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
diff --git a/include/linux/usb.h b/include/linux/usb.h
index cfbd2bb8fa2c..94bd38a6d947 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -126,7 +126,7 @@ enum usb_interface_condition {
126 * Each interface may have alternate settings. The initial configuration 126 * Each interface may have alternate settings. The initial configuration
127 * of a device sets altsetting 0, but the device driver can change 127 * of a device sets altsetting 0, but the device driver can change
128 * that setting using usb_set_interface(). Alternate settings are often 128 * that setting using usb_set_interface(). Alternate settings are often
129 * used to control the the use of periodic endpoints, such as by having 129 * used to control the use of periodic endpoints, such as by having
130 * different endpoints use different amounts of reserved USB bandwidth. 130 * different endpoints use different amounts of reserved USB bandwidth.
131 * All standards-conformant USB devices that use isochronous endpoints 131 * All standards-conformant USB devices that use isochronous endpoints
132 * will use them in non-default settings. 132 * will use them in non-default settings.
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index acb1f105870c..d9325cf8a134 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -212,8 +212,6 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item);
212extern void __dec_zone_state(struct zone *, enum zone_stat_item); 212extern void __dec_zone_state(struct zone *, enum zone_stat_item);
213 213
214void refresh_cpu_vm_stats(int); 214void refresh_cpu_vm_stats(int);
215void refresh_vm_stats(void);
216
217#else /* CONFIG_SMP */ 215#else /* CONFIG_SMP */
218 216
219/* 217/*
@@ -260,7 +258,6 @@ static inline void __dec_zone_page_state(struct page *page,
260#define mod_zone_page_state __mod_zone_page_state 258#define mod_zone_page_state __mod_zone_page_state
261 259
262static inline void refresh_cpu_vm_stats(int cpu) { } 260static inline void refresh_cpu_vm_stats(int cpu) { }
263static inline void refresh_vm_stats(void) { }
264#endif 261#endif
265 262
266#endif /* _LINUX_VMSTAT_H */ 263#endif /* _LINUX_VMSTAT_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index f16ba1e0687d..d555f31c0746 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -24,15 +24,13 @@ typedef void (*work_func_t)(struct work_struct *work);
24struct work_struct { 24struct work_struct {
25 atomic_long_t data; 25 atomic_long_t data;
26#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ 26#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
27#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */
28#define WORK_STRUCT_FLAG_MASK (3UL) 27#define WORK_STRUCT_FLAG_MASK (3UL)
29#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) 28#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
30 struct list_head entry; 29 struct list_head entry;
31 work_func_t func; 30 work_func_t func;
32}; 31};
33 32
34#define WORK_DATA_INIT(autorelease) \ 33#define WORK_DATA_INIT() ATOMIC_LONG_INIT(0)
35 ATOMIC_LONG_INIT((autorelease) << WORK_STRUCT_NOAUTOREL)
36 34
37struct delayed_work { 35struct delayed_work {
38 struct work_struct work; 36 struct work_struct work;
@@ -44,14 +42,8 @@ struct execute_work {
44}; 42};
45 43
46#define __WORK_INITIALIZER(n, f) { \ 44#define __WORK_INITIALIZER(n, f) { \
47 .data = WORK_DATA_INIT(0), \ 45 .data = WORK_DATA_INIT(), \
48 .entry = { &(n).entry, &(n).entry }, \ 46 .entry = { &(n).entry, &(n).entry }, \
49 .func = (f), \
50 }
51
52#define __WORK_INITIALIZER_NAR(n, f) { \
53 .data = WORK_DATA_INIT(1), \
54 .entry = { &(n).entry, &(n).entry }, \
55 .func = (f), \ 47 .func = (f), \
56 } 48 }
57 49
@@ -60,23 +52,12 @@ struct execute_work {
60 .timer = TIMER_INITIALIZER(NULL, 0, 0), \ 52 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
61 } 53 }
62 54
63#define __DELAYED_WORK_INITIALIZER_NAR(n, f) { \
64 .work = __WORK_INITIALIZER_NAR((n).work, (f)), \
65 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
66 }
67
68#define DECLARE_WORK(n, f) \ 55#define DECLARE_WORK(n, f) \
69 struct work_struct n = __WORK_INITIALIZER(n, f) 56 struct work_struct n = __WORK_INITIALIZER(n, f)
70 57
71#define DECLARE_WORK_NAR(n, f) \
72 struct work_struct n = __WORK_INITIALIZER_NAR(n, f)
73
74#define DECLARE_DELAYED_WORK(n, f) \ 58#define DECLARE_DELAYED_WORK(n, f) \
75 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) 59 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
76 60
77#define DECLARE_DELAYED_WORK_NAR(n, f) \
78 struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f)
79
80/* 61/*
81 * initialize a work item's function pointer 62 * initialize a work item's function pointer
82 */ 63 */
@@ -95,16 +76,9 @@ struct execute_work {
95 * assignment of the work data initializer allows the compiler 76 * assignment of the work data initializer allows the compiler
96 * to generate better code. 77 * to generate better code.
97 */ 78 */
98#define INIT_WORK(_work, _func) \ 79#define INIT_WORK(_work, _func) \
99 do { \
100 (_work)->data = (atomic_long_t) WORK_DATA_INIT(0); \
101 INIT_LIST_HEAD(&(_work)->entry); \
102 PREPARE_WORK((_work), (_func)); \
103 } while (0)
104
105#define INIT_WORK_NAR(_work, _func) \
106 do { \ 80 do { \
107 (_work)->data = (atomic_long_t) WORK_DATA_INIT(1); \ 81 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
108 INIT_LIST_HEAD(&(_work)->entry); \ 82 INIT_LIST_HEAD(&(_work)->entry); \
109 PREPARE_WORK((_work), (_func)); \ 83 PREPARE_WORK((_work), (_func)); \
110 } while (0) 84 } while (0)
@@ -115,12 +89,6 @@ struct execute_work {
115 init_timer(&(_work)->timer); \ 89 init_timer(&(_work)->timer); \
116 } while (0) 90 } while (0)
117 91
118#define INIT_DELAYED_WORK_NAR(_work, _func) \
119 do { \
120 INIT_WORK_NAR(&(_work)->work, (_func)); \
121 init_timer(&(_work)->timer); \
122 } while (0)
123
124#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ 92#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
125 do { \ 93 do { \
126 INIT_WORK(&(_work)->work, (_func)); \ 94 INIT_WORK(&(_work)->work, (_func)); \
@@ -143,24 +111,10 @@ struct execute_work {
143 work_pending(&(w)->work) 111 work_pending(&(w)->work)
144 112
145/** 113/**
146 * work_release - Release a work item under execution 114 * work_clear_pending - for internal use only, mark a work item as not pending
147 * @work: The work item to release 115 * @work: The work item in question
148 *
149 * This is used to release a work item that has been initialised with automatic
150 * release mode disabled (WORK_STRUCT_NOAUTOREL is set). This gives the work
151 * function the opportunity to grab auxiliary data from the container of the
152 * work_struct before clearing the pending bit as the work_struct may be
153 * subject to deallocation the moment the pending bit is cleared.
154 *
155 * In such a case, this should be called in the work function after it has
156 * fetched any data it may require from the containter of the work_struct.
157 * After this function has been called, the work_struct may be scheduled for
158 * further execution or it may be deallocated unless other precautions are
159 * taken.
160 *
161 * This should also be used to release a delayed work item.
162 */ 116 */
163#define work_release(work) \ 117#define work_clear_pending(work) \
164 clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) 118 clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
165 119
166 120
@@ -174,27 +128,28 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
174extern void destroy_workqueue(struct workqueue_struct *wq); 128extern void destroy_workqueue(struct workqueue_struct *wq);
175 129
176extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); 130extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
177extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay)); 131extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq,
132 struct delayed_work *work, unsigned long delay));
178extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 133extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
179 struct delayed_work *work, unsigned long delay); 134 struct delayed_work *work, unsigned long delay);
135
180extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 136extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
137extern void flush_scheduled_work(void);
181 138
182extern int FASTCALL(schedule_work(struct work_struct *work)); 139extern int FASTCALL(schedule_work(struct work_struct *work));
183extern int FASTCALL(run_scheduled_work(struct work_struct *work)); 140extern int FASTCALL(schedule_delayed_work(struct delayed_work *work,
184extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); 141 unsigned long delay));
185 142extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
186extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); 143 unsigned long delay);
187extern int schedule_on_each_cpu(work_func_t func); 144extern int schedule_on_each_cpu(work_func_t func);
188extern void flush_scheduled_work(void);
189extern int current_is_keventd(void); 145extern int current_is_keventd(void);
190extern int keventd_up(void); 146extern int keventd_up(void);
191 147
192extern void init_workqueues(void); 148extern void init_workqueues(void);
193void cancel_rearming_delayed_work(struct delayed_work *work);
194void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
195 struct delayed_work *);
196int execute_in_process_context(work_func_t fn, struct execute_work *); 149int execute_in_process_context(work_func_t fn, struct execute_work *);
197 150
151extern void cancel_work_sync(struct work_struct *work);
152
198/* 153/*
199 * Kill off a pending schedule_delayed_work(). Note that the work callback 154 * Kill off a pending schedule_delayed_work(). Note that the work callback
200 * function may still be running on return from cancel_delayed_work(), unless 155 * function may still be running on return from cancel_delayed_work(), unless
@@ -207,8 +162,18 @@ static inline int cancel_delayed_work(struct delayed_work *work)
207 162
208 ret = del_timer(&work->timer); 163 ret = del_timer(&work->timer);
209 if (ret) 164 if (ret)
210 work_release(&work->work); 165 work_clear_pending(&work->work);
211 return ret; 166 return ret;
212} 167}
213 168
169extern void cancel_rearming_delayed_work(struct delayed_work *work);
170
171/* Obsolete. use cancel_rearming_delayed_work() */
172static inline
173void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
174 struct delayed_work *work)
175{
176 cancel_rearming_delayed_work(work);
177}
178
214#endif 179#endif
diff --git a/include/net/irda/af_irda.h b/include/net/irda/af_irda.h
index 7a209f61c482..0df574931522 100644
--- a/include/net/irda/af_irda.h
+++ b/include/net/irda/af_irda.h
@@ -17,7 +17,7 @@
17 * published by the Free Software Foundation; either version 2 of 17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version. 18 * the License, or (at your option) any later version.
19 * 19 *
20 * Neither Dag Brattli nor University of Tromsø admit liability nor 20 * Neither Dag Brattli nor University of Tromsø admit liability nor
21 * provide warranty for any of this software. This material is 21 * provide warranty for any of this software. This material is
22 * provided "AS-IS" and at no charge. 22 * provided "AS-IS" and at no charge.
23 * 23 *
diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h
index 89fe534045f1..36bee441aa56 100644
--- a/include/net/irda/irda.h
+++ b/include/net/irda/irda.h
@@ -17,7 +17,7 @@
17 * published by the Free Software Foundation; either version 2 of 17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version. 18 * the License, or (at your option) any later version.
19 * 19 *
20 * Neither Dag Brattli nor University of Tromsø admit liability nor 20 * Neither Dag Brattli nor University of Tromsø admit liability nor
21 * provide warranty for any of this software. This material is 21 * provide warranty for any of this software. This material is
22 * provided "AS-IS" and at no charge. 22 * provided "AS-IS" and at no charge.
23 * 23 *
diff --git a/include/net/irda/iriap.h b/include/net/irda/iriap.h
index 2007c5a0a43f..fcc896491a95 100644
--- a/include/net/irda/iriap.h
+++ b/include/net/irda/iriap.h
@@ -17,7 +17,7 @@
17 * published by the Free Software Foundation; either version 2 of 17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version. 18 * the License, or (at your option) any later version.
19 * 19 *
20 * Neither Dag Brattli nor University of Tromsø admit liability nor 20 * Neither Dag Brattli nor University of Tromsø admit liability nor
21 * provide warranty for any of this software. This material is 21 * provide warranty for any of this software. This material is
22 * provided "AS-IS" and at no charge. 22 * provided "AS-IS" and at no charge.
23 * 23 *
diff --git a/include/net/irda/iriap_event.h b/include/net/irda/iriap_event.h
index 4ca3d2071b03..89747f06d9eb 100644
--- a/include/net/irda/iriap_event.h
+++ b/include/net/irda/iriap_event.h
@@ -16,7 +16,7 @@
16 * published by the Free Software Foundation; either version 2 of 16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version. 17 * the License, or (at your option) any later version.
18 * 18 *
19 * Neither Dag Brattli nor University of Tromsø admit liability nor 19 * Neither Dag Brattli nor University of Tromsø admit liability nor
20 * provide warranty for any of this software. This material is 20 * provide warranty for any of this software. This material is
21 * provided "AS-IS" and at no charge. 21 * provided "AS-IS" and at no charge.
22 * 22 *
diff --git a/include/net/irda/irias_object.h b/include/net/irda/irias_object.h
index c41196b87955..83f78081799c 100644
--- a/include/net/irda/irias_object.h
+++ b/include/net/irda/irias_object.h
@@ -16,7 +16,7 @@
16 * published by the Free Software Foundation; either version 2 of 16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version. 17 * the License, or (at your option) any later version.
18 * 18 *
19 * Neither Dag Brattli nor University of Tromsø admit liability nor 19 * Neither Dag Brattli nor University of Tromsø admit liability nor
20 * provide warranty for any of this software. This material is 20 * provide warranty for any of this software. This material is
21 * provided "AS-IS" and at no charge. 21 * provided "AS-IS" and at no charge.
22 * 22 *
diff --git a/include/net/irda/irlan_client.h b/include/net/irda/irlan_client.h
index 736dabe211e3..fa8455eda280 100644
--- a/include/net/irda/irlan_client.h
+++ b/include/net/irda/irlan_client.h
@@ -16,7 +16,7 @@
16 * published by the Free Software Foundation; either version 2 of 16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version. 17 * the License, or (at your option) any later version.
18 * 18 *
19 * Neither Dag Brattli nor University of Tromsø admit liability nor 19 * Neither Dag Brattli nor University of Tromsø admit liability nor
20 * provide warranty for any of this software. This material is 20 * provide warranty for any of this software. This material is
21 * provided "AS-IS" and at no charge. 21 * provided "AS-IS" and at no charge.
22 * 22 *
diff --git a/include/net/irda/irlan_common.h b/include/net/irda/irlan_common.h
index 9592c374b41d..73cacb3ac16c 100644
--- a/include/net/irda/irlan_common.h
+++ b/include/net/irda/irlan_common.h
@@ -17,7 +17,7 @@
17 * published by the Free Software Foundation; either version 2 of 17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version. 18 * the License, or (at your option) any later version.
19 * 19 *
20 * Neither Dag Brattli nor University of Tromsø admit liability nor 20 * Neither Dag Brattli nor University of Tromsø admit liability nor
21 * provide warranty for any of this software. This material is 21 * provide warranty for any of this software. This material is
22 * provided "AS-IS" and at no charge. 22 * provided "AS-IS" and at no charge.
23 * 23 *
diff --git a/include/net/irda/irlan_eth.h b/include/net/irda/irlan_eth.h
index 9a9b3619d305..0062347600b9 100644
--- a/include/net/irda/irlan_eth.h
+++ b/include/net/irda/irlan_eth.h
@@ -16,7 +16,7 @@
16 * published by the Free Software Foundation; either version 2 of 16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version. 17 * the License, or (at your option) any later version.
18 * 18 *
19 * Neither Dag Brattli nor University of Tromsø admit liability nor 19 * Neither Dag Brattli nor University of Tromsø admit liability nor
20 * provide warranty for any of this software. This material is 20 * provide warranty for any of this software. This material is
21 * provided "AS-IS" and at no charge. 21 * provided "AS-IS" and at no charge.
22 * 22 *
diff --git a/include/net/irda/irlan_event.h b/include/net/irda/irlan_event.h
index b9baac9eb8b6..6d9539f05806 100644
--- a/include/net/irda/irlan_event.h
+++ b/include/net/irda/irlan_event.h
@@ -16,7 +16,7 @@
16 * published by the Free Software Foundation; either version 2 of 16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version. 17 * the License, or (at your option) any later version.
18 * 18 *
19 * Neither Dag Brattli nor University of Tromsø admit liability nor 19 * Neither Dag Brattli nor University of Tromsø admit liability nor
20 * provide warranty for any of this software. This material is 20 * provide warranty for any of this software. This material is
21 * provided "AS-IS" and at no charge. 21 * provided "AS-IS" and at no charge.
22 * 22 *
diff --git a/include/net/irda/irlan_filter.h b/include/net/irda/irlan_filter.h
index 1720539ac2c1..a5a2539485bd 100644
--- a/include/net/irda/irlan_filter.h
+++ b/include/net/irda/irlan_filter.h
@@ -16,7 +16,7 @@
16 * published by the Free Software Foundation; either version 2 of 16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version. 17 * the License, or (at your option) any later version.
18 * 18 *
19 * Neither Dag Brattli nor University of Tromsø admit liability nor 19 * Neither Dag Brattli nor University of Tromsø admit liability nor
20 * provide warranty for any of this software. This material is 20 * provide warranty for any of this software. This material is
21 * provided "AS-IS" and at no charge. 21 * provided "AS-IS" and at no charge.
22 * 22 *
diff --git a/include/net/irda/irlan_provider.h b/include/net/irda/irlan_provider.h
index ca51d5b7c999..92f3b0e1029b 100644
--- a/include/net/irda/irlan_provider.h
+++ b/include/net/irda/irlan_provider.h
@@ -16,7 +16,7 @@
16 * published by the Free Software Foundation; either version 2 of 16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version. 17 * the License, or (at your option) any later version.
18 * 18 *
19 * Neither Dag Brattli nor University of Tromsø admit liability nor 19 * Neither Dag Brattli nor University of Tromsø admit liability nor
20 * provide warranty for any of this software. This material is 20 * provide warranty for any of this software. This material is
21 * provided "AS-IS" and at no charge. 21 * provided "AS-IS" and at no charge.
22 * 22 *
diff --git a/include/net/irda/irlap.h b/include/net/irda/irlap.h
index e77eb88d9226..f0248fb8e196 100644
--- a/include/net/irda/irlap.h
+++ b/include/net/irda/irlap.h
@@ -18,7 +18,7 @@
18 * published by the Free Software Foundation; either version 2 of 18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version. 19 * the License, or (at your option) any later version.
20 * 20 *
21 * Neither Dag Brattli nor University of Tromsø admit liability nor 21 * Neither Dag Brattli nor University of Tromsø admit liability nor
22 * provide warranty for any of this software. This material is 22 * provide warranty for any of this software. This material is
23 * provided "AS-IS" and at no charge. 23 * provided "AS-IS" and at no charge.
24 * 24 *
diff --git a/include/net/irda/irlmp.h b/include/net/irda/irlmp.h
index e212b9bc2503..3ffc1d0f93d6 100644
--- a/include/net/irda/irlmp.h
+++ b/include/net/irda/irlmp.h
@@ -18,7 +18,7 @@
18 * published by the Free Software Foundation; either version 2 of 18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version. 19 * the License, or (at your option) any later version.
20 * 20 *
21 * Neither Dag Brattli nor University of Tromsø admit liability nor 21 * Neither Dag Brattli nor University of Tromsø admit liability nor
22 * provide warranty for any of this software. This material is 22 * provide warranty for any of this software. This material is
23 * provided "AS-IS" and at no charge. 23 * provided "AS-IS" and at no charge.
24 * 24 *
diff --git a/include/net/irda/irlmp_event.h b/include/net/irda/irlmp_event.h
index 03c6f81a502a..e03ae4ae3963 100644
--- a/include/net/irda/irlmp_event.h
+++ b/include/net/irda/irlmp_event.h
@@ -18,7 +18,7 @@
18 * published by the Free Software Foundation; either version 2 of 18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version. 19 * the License, or (at your option) any later version.
20 * 20 *
21 * Neither Dag Brattli nor University of Tromsø admit liability nor 21 * Neither Dag Brattli nor University of Tromsø admit liability nor
22 * provide warranty for any of this software. This material is 22 * provide warranty for any of this software. This material is
23 * provided "AS-IS" and at no charge. 23 * provided "AS-IS" and at no charge.
24 * 24 *
diff --git a/include/net/irda/irlmp_frame.h b/include/net/irda/irlmp_frame.h
index c463f8bca856..1906eb71422e 100644
--- a/include/net/irda/irlmp_frame.h
+++ b/include/net/irda/irlmp_frame.h
@@ -17,7 +17,7 @@
17 * published by the Free Software Foundation; either version 2 of 17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version. 18 * the License, or (at your option) any later version.
19 * 19 *
20 * Neither Dag Brattli nor University of Tromsø admit liability nor 20 * Neither Dag Brattli nor University of Tromsø admit liability nor
21 * provide warranty for any of this software. This material is 21 * provide warranty for any of this software. This material is
22 * provided "AS-IS" and at no charge. 22 * provided "AS-IS" and at no charge.
23 * 23 *
diff --git a/include/net/irda/irmod.h b/include/net/irda/irmod.h
index 72b446c1e22c..86f0dbb8ee5d 100644
--- a/include/net/irda/irmod.h
+++ b/include/net/irda/irmod.h
@@ -17,7 +17,7 @@
17 * published by the Free Software Foundation; either version 2 of 17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version. 18 * the License, or (at your option) any later version.
19 * 19 *
20 * Neither Dag Brattli nor University of Tromsø admit liability nor 20 * Neither Dag Brattli nor University of Tromsø admit liability nor
21 * provide warranty for any of this software. This material is 21 * provide warranty for any of this software. This material is
22 * provided "AS-IS" and at no charg. 22 * provided "AS-IS" and at no charg.
23 * 23 *
diff --git a/include/net/irda/irqueue.h b/include/net/irda/irqueue.h
index 335b0ace9665..37f512bd6733 100644
--- a/include/net/irda/irqueue.h
+++ b/include/net/irda/irqueue.h
@@ -21,7 +21,7 @@
21 * published by the Free Software Foundation; either version 2 of 21 * published by the Free Software Foundation; either version 2 of
22 * the License, or (at your option) any later version. 22 * the License, or (at your option) any later version.
23 * 23 *
24 * Neither Dag Brattli nor University of Tromsø admit liability nor 24 * Neither Dag Brattli nor University of Tromsø admit liability nor
25 * provide warranty for any of this software. This material is 25 * provide warranty for any of this software. This material is
26 * provided "AS-IS" and at no charge. 26 * provided "AS-IS" and at no charge.
27 * 27 *
diff --git a/include/net/irda/irttp.h b/include/net/irda/irttp.h
index a899e5837be8..cf80c1af5854 100644
--- a/include/net/irda/irttp.h
+++ b/include/net/irda/irttp.h
@@ -18,7 +18,7 @@
18 * published by the Free Software Foundation; either version 2 of 18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version. 19 * the License, or (at your option) any later version.
20 * 20 *
21 * Neither Dag Brattli nor University of Tromsø admit liability nor 21 * Neither Dag Brattli nor University of Tromsø admit liability nor
22 * provide warranty for any of this software. This material is 22 * provide warranty for any of this software. This material is
23 * provided "AS-IS" and at no charge. 23 * provided "AS-IS" and at no charge.
24 * 24 *
diff --git a/include/net/irda/parameters.h b/include/net/irda/parameters.h
index 3a605d37ddbf..c0d938847bd3 100644
--- a/include/net/irda/parameters.h
+++ b/include/net/irda/parameters.h
@@ -26,7 +26,7 @@
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
27 * MA 02111-1307 USA 27 * MA 02111-1307 USA
28 * 28 *
29 * Michel Dänzer <daenzer@debian.org>, 10/2001 29 * Michel Dänzer <daenzer@debian.org>, 10/2001
30 * - simplify irda_pv_t to avoid endianness issues 30 * - simplify irda_pv_t to avoid endianness issues
31 * 31 *
32 ********************************************************************/ 32 ********************************************************************/
diff --git a/include/net/irda/timer.h b/include/net/irda/timer.h
index cb61568547d1..cb2615ccf761 100644
--- a/include/net/irda/timer.h
+++ b/include/net/irda/timer.h
@@ -18,7 +18,7 @@
18 * published by the Free Software Foundation; either version 2 of 18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version. 19 * the License, or (at your option) any later version.
20 * 20 *
21 * Neither Dag Brattli nor University of Tromsø admit liability nor 21 * Neither Dag Brattli nor University of Tromsø admit liability nor
22 * provide warranty for any of this software. This material is 22 * provide warranty for any of this software. This material is
23 * provided "AS-IS" and at no charge. 23 * provided "AS-IS" and at no charge.
24 * 24 *
diff --git a/include/net/irda/wrapper.h b/include/net/irda/wrapper.h
index 98768b3f9e31..2942ad6ab932 100644
--- a/include/net/irda/wrapper.h
+++ b/include/net/irda/wrapper.h
@@ -17,7 +17,7 @@
17 * published by the Free Software Foundation; either version 2 of 17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version. 18 * the License, or (at your option) any later version.
19 * 19 *
20 * Neither Dag Brattli nor University of Tromsø admit liability nor 20 * Neither Dag Brattli nor University of Tromsø admit liability nor
21 * provide warranty for any of this software. This material is 21 * provide warranty for any of this software. This material is
22 * provided "AS-IS" and at no charge. 22 * provided "AS-IS" and at no charge.
23 * 23 *
diff --git a/init/Kconfig b/init/Kconfig
index d0edf42f4dba..e63a017c391e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -143,9 +143,7 @@ config POSIX_MQUEUE
143 queues every message has a priority which decides about succession 143 queues every message has a priority which decides about succession
144 of receiving it by a process. If you want to compile and run 144 of receiving it by a process. If you want to compile and run
145 programs written e.g. for Solaris with use of its POSIX message 145 programs written e.g. for Solaris with use of its POSIX message
146 queues (functions mq_*) say Y here. To use this feature you will 146 queues (functions mq_*) say Y here.
147 also need mqueue library, available from
148 <http://www.mat.uni.torun.pl/~wrona/posix_ipc/>
149 147
150 POSIX message queues are visible as a filesystem called 'mqueue' 148 POSIX message queues are visible as a filesystem called 'mqueue'
151 and can be mounted somewhere if you want to do filesystem 149 and can be mounted somewhere if you want to do filesystem
@@ -308,7 +306,7 @@ config SYSFS_DEPRECATED
308 releases. 306 releases.
309 307
310 If enabled, this option will also move any device structures 308 If enabled, this option will also move any device structures
311 that belong to a class, back into the /sys/class heirachy, in 309 that belong to a class, back into the /sys/class hierarchy, in
312 order to support older versions of udev. 310 order to support older versions of udev.
313 311
314 If you are using a distro that was released in 2006 or later, 312 If you are using a distro that was released in 2006 or later,
@@ -504,6 +502,15 @@ config VM_EVENT_COUNTERS
504 on EMBEDDED systems. /proc/vmstat will only show page counts 502 on EMBEDDED systems. /proc/vmstat will only show page counts
505 if VM event counters are disabled. 503 if VM event counters are disabled.
506 504
505config SLUB_DEBUG
506 default y
507 bool "Enable SLUB debugging support" if EMBEDDED
508 help
509 SLUB has extensive debug support features. Disabling these can
510 result in significant savings in code size. This also disables
511 SLUB sysfs support. /sys/slab will not exist and there will be
512 no support for cache validation etc.
513
507choice 514choice
508 prompt "Choose SLAB allocator" 515 prompt "Choose SLAB allocator"
509 default SLAB 516 default SLAB
@@ -514,9 +521,9 @@ config SLAB
514 bool "SLAB" 521 bool "SLAB"
515 help 522 help
516 The regular slab allocator that is established and known to work 523 The regular slab allocator that is established and known to work
517 well in all environments. It organizes chache hot objects in 524 well in all environments. It organizes cache hot objects in
518 per cpu and per node queues. SLAB is the default choice for 525 per cpu and per node queues. SLAB is the default choice for
519 slab allocator. 526 a slab allocator.
520 527
521config SLUB 528config SLUB
522 depends on EXPERIMENTAL && !ARCH_USES_SLAB_PAGE_STRUCT 529 depends on EXPERIMENTAL && !ARCH_USES_SLAB_PAGE_STRUCT
@@ -526,21 +533,20 @@ config SLUB
526 instead of managing queues of cached objects (SLAB approach). 533 instead of managing queues of cached objects (SLAB approach).
527 Per cpu caching is realized using slabs of objects instead 534 Per cpu caching is realized using slabs of objects instead
528 of queues of objects. SLUB can use memory efficiently 535 of queues of objects. SLUB can use memory efficiently
529 way and has enhanced diagnostics. 536 and has enhanced diagnostics.
530 537
531config SLOB 538config SLOB
532# 539#
533# SLOB cannot support SMP because SLAB_DESTROY_BY_RCU does not work 540# SLOB does not support SMP because SLAB_DESTROY_BY_RCU is unsupported
534# properly.
535# 541#
536 depends on EMBEDDED && !SMP && !SPARSEMEM 542 depends on EMBEDDED && !SMP && !SPARSEMEM
537 bool "SLOB (Simple Allocator)" 543 bool "SLOB (Simple Allocator)"
538 help 544 help
539 SLOB replaces the SLAB allocator with a drastically simpler 545 SLOB replaces the SLAB allocator with a drastically simpler
540 allocator. SLOB is more space efficient that SLAB but does not 546 allocator. SLOB is more space efficient that SLAB but does not
541 scale well (single lock for all operations) and is more susceptible 547 scale well (single lock for all operations) and is also highly
542 to fragmentation. SLOB it is a great choice to reduce 548 susceptible to fragmentation. SLUB can accomplish a higher object
543 memory usage and code size for embedded systems. 549 density. It is usually better to use SLUB instead of SLOB.
544 550
545endchoice 551endchoice
546 552
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 3f57ed4599d6..46fe407fb03e 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -7,6 +7,7 @@
7#include <linux/root_dev.h> 7#include <linux/root_dev.h>
8#include <linux/security.h> 8#include <linux/security.h>
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/genhd.h>
10#include <linux/mount.h> 11#include <linux/mount.h>
11#include <linux/device.h> 12#include <linux/device.h>
12#include <linux/init.h> 13#include <linux/init.h>
@@ -308,17 +309,21 @@ retry:
308 /* 309 /*
309 * Allow the user to distinguish between failed sys_open 310 * Allow the user to distinguish between failed sys_open
310 * and bad superblock on root device. 311 * and bad superblock on root device.
312 * and give them a list of the available devices
311 */ 313 */
312#ifdef CONFIG_BLOCK 314#ifdef CONFIG_BLOCK
313 __bdevname(ROOT_DEV, b); 315 __bdevname(ROOT_DEV, b);
314#endif 316#endif
315 printk("VFS: Cannot open root device \"%s\" or %s\n", 317 printk("VFS: Cannot open root device \"%s\" or %s\n",
316 root_device_name, b); 318 root_device_name, b);
317 printk("Please append a correct \"root=\" boot option\n"); 319 printk("Please append a correct \"root=\" boot option; here are the available partitions:\n");
318 320
321 printk_all_partitions();
319 panic("VFS: Unable to mount root fs on %s", b); 322 panic("VFS: Unable to mount root fs on %s", b);
320 } 323 }
321 324
325 printk("List of all partitions:\n");
326 printk_all_partitions();
322 printk("No filesystem could mount root, tried: "); 327 printk("No filesystem could mount root, tried: ");
323 for (p = fs_names; *p; p += strlen(p)+1) 328 for (p = fs_names; *p; p += strlen(p)+1)
324 printk(" %s", p); 329 printk(" %s", p);
diff --git a/init/main.c b/init/main.c
index c1537e0ddceb..e8d080cab443 100644
--- a/init/main.c
+++ b/init/main.c
@@ -54,6 +54,7 @@
54#include <linux/lockdep.h> 54#include <linux/lockdep.h>
55#include <linux/pid_namespace.h> 55#include <linux/pid_namespace.h>
56#include <linux/device.h> 56#include <linux/device.h>
57#include <linux/kthread.h>
57 58
58#include <asm/io.h> 59#include <asm/io.h>
59#include <asm/bugs.h> 60#include <asm/bugs.h>
@@ -425,8 +426,12 @@ static void __init setup_command_line(char *command_line)
425static void noinline rest_init(void) 426static void noinline rest_init(void)
426 __releases(kernel_lock) 427 __releases(kernel_lock)
427{ 428{
429 int pid;
430
428 kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); 431 kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
429 numa_default_policy(); 432 numa_default_policy();
433 pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
434 kthreadd_task = find_task_by_pid(pid);
430 unlock_kernel(); 435 unlock_kernel();
431 436
432 /* 437 /*
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 0b46a5dff4c0..c64ce9c14207 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -23,7 +23,7 @@ config PREEMPT_VOLUNTARY
23 "explicit preemption points" to the kernel code. These new 23 "explicit preemption points" to the kernel code. These new
24 preemption points have been selected to reduce the maximum 24 preemption points have been selected to reduce the maximum
25 latency of rescheduling, providing faster application reactions, 25 latency of rescheduling, providing faster application reactions,
26 at the cost of slighly lower throughput. 26 at the cost of slightly lower throughput.
27 27
28 This allows reaction to interactive events by allowing a 28 This allows reaction to interactive events by allowing a
29 low priority process to voluntarily preempt itself even if it 29 low priority process to voluntarily preempt itself even if it
@@ -43,7 +43,7 @@ config PREEMPT
43 even if it is in kernel mode executing a system call and would 43 even if it is in kernel mode executing a system call and would
44 otherwise not be about to reach a natural preemption point. 44 otherwise not be about to reach a natural preemption point.
45 This allows applications to run more 'smoothly' even when the 45 This allows applications to run more 'smoothly' even when the
46 system is under load, at the cost of slighly lower throughput 46 system is under load, at the cost of slightly lower throughput
47 and a slight runtime overhead to kernel code. 47 and a slight runtime overhead to kernel code.
48 48
49 Select this if you are building a kernel for a desktop or 49 Select this if you are building a kernel for a desktop or
diff --git a/kernel/configs.c b/kernel/configs.c
index 8fa1fb28f8a7..e84d3f9c6c7b 100644
--- a/kernel/configs.c
+++ b/kernel/configs.c
@@ -61,18 +61,9 @@ static ssize_t
61ikconfig_read_current(struct file *file, char __user *buf, 61ikconfig_read_current(struct file *file, char __user *buf,
62 size_t len, loff_t * offset) 62 size_t len, loff_t * offset)
63{ 63{
64 loff_t pos = *offset; 64 return simple_read_from_buffer(buf, len, offset,
65 ssize_t count; 65 kernel_config_data + MAGIC_SIZE,
66 66 kernel_config_data_size);
67 if (pos >= kernel_config_data_size)
68 return 0;
69
70 count = min(len, (size_t)(kernel_config_data_size - pos));
71 if (copy_to_user(buf, kernel_config_data + MAGIC_SIZE + pos, count))
72 return -EFAULT;
73
74 *offset += count;
75 return count;
76} 67}
77 68
78static const struct file_operations ikconfig_file_ops = { 69static const struct file_operations ikconfig_file_ops = {
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 36e70845cfc3..208cf3497c10 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -97,7 +97,7 @@ static inline void check_for_tasks(int cpu)
97 (!cputime_eq(p->utime, cputime_zero) || 97 (!cputime_eq(p->utime, cputime_zero) ||
98 !cputime_eq(p->stime, cputime_zero))) 98 !cputime_eq(p->stime, cputime_zero)))
99 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ 99 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
100 (state = %ld, flags = %lx) \n", 100 (state = %ld, flags = %x) \n",
101 p->comm, p->pid, cpu, p->state, p->flags); 101 p->comm, p->pid, cpu, p->state, p->flags);
102 } 102 }
103 write_unlock_irq(&tasklist_lock); 103 write_unlock_irq(&tasklist_lock);
@@ -120,11 +120,13 @@ static int take_cpu_down(void *unused)
120} 120}
121 121
122/* Requires cpu_add_remove_lock to be held */ 122/* Requires cpu_add_remove_lock to be held */
123static int _cpu_down(unsigned int cpu) 123static int _cpu_down(unsigned int cpu, int tasks_frozen)
124{ 124{
125 int err; 125 int err, nr_calls = 0;
126 struct task_struct *p; 126 struct task_struct *p;
127 cpumask_t old_allowed, tmp; 127 cpumask_t old_allowed, tmp;
128 void *hcpu = (void *)(long)cpu;
129 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
128 130
129 if (num_online_cpus() == 1) 131 if (num_online_cpus() == 1)
130 return -EBUSY; 132 return -EBUSY;
@@ -132,12 +134,16 @@ static int _cpu_down(unsigned int cpu)
132 if (!cpu_online(cpu)) 134 if (!cpu_online(cpu))
133 return -EINVAL; 135 return -EINVAL;
134 136
135 err = raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, 137 raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
136 (void *)(long)cpu); 138 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
139 hcpu, -1, &nr_calls);
137 if (err == NOTIFY_BAD) { 140 if (err == NOTIFY_BAD) {
141 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
142 hcpu, nr_calls, NULL);
138 printk("%s: attempt to take down CPU %u failed\n", 143 printk("%s: attempt to take down CPU %u failed\n",
139 __FUNCTION__, cpu); 144 __FUNCTION__, cpu);
140 return -EINVAL; 145 err = -EINVAL;
146 goto out_release;
141 } 147 }
142 148
143 /* Ensure that we are not runnable on dying cpu */ 149 /* Ensure that we are not runnable on dying cpu */
@@ -152,8 +158,8 @@ static int _cpu_down(unsigned int cpu)
152 158
153 if (IS_ERR(p) || cpu_online(cpu)) { 159 if (IS_ERR(p) || cpu_online(cpu)) {
154 /* CPU didn't die: tell everyone. Can't complain. */ 160 /* CPU didn't die: tell everyone. Can't complain. */
155 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, 161 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
156 (void *)(long)cpu) == NOTIFY_BAD) 162 hcpu) == NOTIFY_BAD)
157 BUG(); 163 BUG();
158 164
159 if (IS_ERR(p)) { 165 if (IS_ERR(p)) {
@@ -170,13 +176,9 @@ static int _cpu_down(unsigned int cpu)
170 /* This actually kills the CPU. */ 176 /* This actually kills the CPU. */
171 __cpu_die(cpu); 177 __cpu_die(cpu);
172 178
173 /* Move it here so it can run. */
174 kthread_bind(p, get_cpu());
175 put_cpu();
176
177 /* CPU is completely dead: tell everyone. Too late to complain. */ 179 /* CPU is completely dead: tell everyone. Too late to complain. */
178 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD, 180 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod,
179 (void *)(long)cpu) == NOTIFY_BAD) 181 hcpu) == NOTIFY_BAD)
180 BUG(); 182 BUG();
181 183
182 check_for_tasks(cpu); 184 check_for_tasks(cpu);
@@ -185,6 +187,8 @@ out_thread:
185 err = kthread_stop(p); 187 err = kthread_stop(p);
186out_allowed: 188out_allowed:
187 set_cpus_allowed(current, old_allowed); 189 set_cpus_allowed(current, old_allowed);
190out_release:
191 raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu);
188 return err; 192 return err;
189} 193}
190 194
@@ -196,7 +200,7 @@ int cpu_down(unsigned int cpu)
196 if (cpu_hotplug_disabled) 200 if (cpu_hotplug_disabled)
197 err = -EBUSY; 201 err = -EBUSY;
198 else 202 else
199 err = _cpu_down(cpu); 203 err = _cpu_down(cpu, 0);
200 204
201 mutex_unlock(&cpu_add_remove_lock); 205 mutex_unlock(&cpu_add_remove_lock);
202 return err; 206 return err;
@@ -204,15 +208,18 @@ int cpu_down(unsigned int cpu)
204#endif /*CONFIG_HOTPLUG_CPU*/ 208#endif /*CONFIG_HOTPLUG_CPU*/
205 209
206/* Requires cpu_add_remove_lock to be held */ 210/* Requires cpu_add_remove_lock to be held */
207static int __cpuinit _cpu_up(unsigned int cpu) 211static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
208{ 212{
209 int ret; 213 int ret, nr_calls = 0;
210 void *hcpu = (void *)(long)cpu; 214 void *hcpu = (void *)(long)cpu;
215 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
211 216
212 if (cpu_online(cpu) || !cpu_present(cpu)) 217 if (cpu_online(cpu) || !cpu_present(cpu))
213 return -EINVAL; 218 return -EINVAL;
214 219
215 ret = raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); 220 raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
221 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
222 -1, &nr_calls);
216 if (ret == NOTIFY_BAD) { 223 if (ret == NOTIFY_BAD) {
217 printk("%s: attempt to bring up CPU %u failed\n", 224 printk("%s: attempt to bring up CPU %u failed\n",
218 __FUNCTION__, cpu); 225 __FUNCTION__, cpu);
@@ -229,12 +236,13 @@ static int __cpuinit _cpu_up(unsigned int cpu)
229 BUG_ON(!cpu_online(cpu)); 236 BUG_ON(!cpu_online(cpu));
230 237
231 /* Now call notifier in preparation. */ 238 /* Now call notifier in preparation. */
232 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); 239 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
233 240
234out_notify: 241out_notify:
235 if (ret != 0) 242 if (ret != 0)
236 raw_notifier_call_chain(&cpu_chain, 243 __raw_notifier_call_chain(&cpu_chain,
237 CPU_UP_CANCELED, hcpu); 244 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
245 raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu);
238 246
239 return ret; 247 return ret;
240} 248}
@@ -247,19 +255,13 @@ int __cpuinit cpu_up(unsigned int cpu)
247 if (cpu_hotplug_disabled) 255 if (cpu_hotplug_disabled)
248 err = -EBUSY; 256 err = -EBUSY;
249 else 257 else
250 err = _cpu_up(cpu); 258 err = _cpu_up(cpu, 0);
251 259
252 mutex_unlock(&cpu_add_remove_lock); 260 mutex_unlock(&cpu_add_remove_lock);
253 return err; 261 return err;
254} 262}
255 263
256#ifdef CONFIG_SUSPEND_SMP 264#ifdef CONFIG_SUSPEND_SMP
257/* Needed to prevent the microcode driver from requesting firmware in its CPU
258 * hotplug notifier during the suspend/resume.
259 */
260int suspend_cpu_hotplug;
261EXPORT_SYMBOL(suspend_cpu_hotplug);
262
263static cpumask_t frozen_cpus; 265static cpumask_t frozen_cpus;
264 266
265int disable_nonboot_cpus(void) 267int disable_nonboot_cpus(void)
@@ -267,7 +269,6 @@ int disable_nonboot_cpus(void)
267 int cpu, first_cpu, error = 0; 269 int cpu, first_cpu, error = 0;
268 270
269 mutex_lock(&cpu_add_remove_lock); 271 mutex_lock(&cpu_add_remove_lock);
270 suspend_cpu_hotplug = 1;
271 first_cpu = first_cpu(cpu_online_map); 272 first_cpu = first_cpu(cpu_online_map);
272 /* We take down all of the non-boot CPUs in one shot to avoid races 273 /* We take down all of the non-boot CPUs in one shot to avoid races
273 * with the userspace trying to use the CPU hotplug at the same time 274 * with the userspace trying to use the CPU hotplug at the same time
@@ -277,7 +278,7 @@ int disable_nonboot_cpus(void)
277 for_each_online_cpu(cpu) { 278 for_each_online_cpu(cpu) {
278 if (cpu == first_cpu) 279 if (cpu == first_cpu)
279 continue; 280 continue;
280 error = _cpu_down(cpu); 281 error = _cpu_down(cpu, 1);
281 if (!error) { 282 if (!error) {
282 cpu_set(cpu, frozen_cpus); 283 cpu_set(cpu, frozen_cpus);
283 printk("CPU%d is down\n", cpu); 284 printk("CPU%d is down\n", cpu);
@@ -294,7 +295,6 @@ int disable_nonboot_cpus(void)
294 } else { 295 } else {
295 printk(KERN_ERR "Non-boot CPUs are not disabled\n"); 296 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
296 } 297 }
297 suspend_cpu_hotplug = 0;
298 mutex_unlock(&cpu_add_remove_lock); 298 mutex_unlock(&cpu_add_remove_lock);
299 return error; 299 return error;
300} 300}
@@ -309,10 +309,9 @@ void enable_nonboot_cpus(void)
309 if (cpus_empty(frozen_cpus)) 309 if (cpus_empty(frozen_cpus))
310 goto out; 310 goto out;
311 311
312 suspend_cpu_hotplug = 1;
313 printk("Enabling non-boot CPUs ...\n"); 312 printk("Enabling non-boot CPUs ...\n");
314 for_each_cpu_mask(cpu, frozen_cpus) { 313 for_each_cpu_mask(cpu, frozen_cpus) {
315 error = _cpu_up(cpu); 314 error = _cpu_up(cpu, 1);
316 if (!error) { 315 if (!error) {
317 printk("CPU%d is up\n", cpu); 316 printk("CPU%d is up\n", cpu);
318 continue; 317 continue;
@@ -320,7 +319,6 @@ void enable_nonboot_cpus(void)
320 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 319 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
321 } 320 }
322 cpus_clear(frozen_cpus); 321 cpus_clear(frozen_cpus);
323 suspend_cpu_hotplug = 0;
324out: 322out:
325 mutex_unlock(&cpu_add_remove_lock); 323 mutex_unlock(&cpu_add_remove_lock);
326} 324}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 88b416dfbc72..f57854b08922 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1772,12 +1772,7 @@ static ssize_t cpuset_tasks_read(struct file *file, char __user *buf,
1772{ 1772{
1773 struct ctr_struct *ctr = file->private_data; 1773 struct ctr_struct *ctr = file->private_data;
1774 1774
1775 if (*ppos + nbytes > ctr->bufsz) 1775 return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz);
1776 nbytes = ctr->bufsz - *ppos;
1777 if (copy_to_user(buf, ctr->buf + *ppos, nbytes))
1778 return -EFAULT;
1779 *ppos += nbytes;
1780 return nbytes;
1781} 1776}
1782 1777
1783static int cpuset_tasks_release(struct inode *unused_inode, struct file *file) 1778static int cpuset_tasks_release(struct inode *unused_inode, struct file *file)
diff --git a/kernel/exit.c b/kernel/exit.c
index f5a7abb621f3..b0c6f0c3a2df 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -26,6 +26,7 @@
26#include <linux/profile.h> 26#include <linux/profile.h>
27#include <linux/mount.h> 27#include <linux/mount.h>
28#include <linux/proc_fs.h> 28#include <linux/proc_fs.h>
29#include <linux/kthread.h>
29#include <linux/mempolicy.h> 30#include <linux/mempolicy.h>
30#include <linux/taskstats_kern.h> 31#include <linux/taskstats_kern.h>
31#include <linux/delayacct.h> 32#include <linux/delayacct.h>
@@ -254,26 +255,25 @@ static int has_stopped_jobs(struct pid *pgrp)
254} 255}
255 256
256/** 257/**
257 * reparent_to_init - Reparent the calling kernel thread to the init task of the pid space that the thread belongs to. 258 * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
258 * 259 *
259 * If a kernel thread is launched as a result of a system call, or if 260 * If a kernel thread is launched as a result of a system call, or if
260 * it ever exits, it should generally reparent itself to init so that 261 * it ever exits, it should generally reparent itself to kthreadd so it
261 * it is correctly cleaned up on exit. 262 * isn't in the way of other processes and is correctly cleaned up on exit.
262 * 263 *
263 * The various task state such as scheduling policy and priority may have 264 * The various task state such as scheduling policy and priority may have
264 * been inherited from a user process, so we reset them to sane values here. 265 * been inherited from a user process, so we reset them to sane values here.
265 * 266 *
266 * NOTE that reparent_to_init() gives the caller full capabilities. 267 * NOTE that reparent_to_kthreadd() gives the caller full capabilities.
267 */ 268 */
268static void reparent_to_init(void) 269static void reparent_to_kthreadd(void)
269{ 270{
270 write_lock_irq(&tasklist_lock); 271 write_lock_irq(&tasklist_lock);
271 272
272 ptrace_unlink(current); 273 ptrace_unlink(current);
273 /* Reparent to init */ 274 /* Reparent to init */
274 remove_parent(current); 275 remove_parent(current);
275 current->parent = child_reaper(current); 276 current->real_parent = current->parent = kthreadd_task;
276 current->real_parent = child_reaper(current);
277 add_parent(current); 277 add_parent(current);
278 278
279 /* Set the exit signal to SIGCHLD so we signal init on exit */ 279 /* Set the exit signal to SIGCHLD so we signal init on exit */
@@ -347,7 +347,7 @@ int disallow_signal(int sig)
347 return -EINVAL; 347 return -EINVAL;
348 348
349 spin_lock_irq(&current->sighand->siglock); 349 spin_lock_irq(&current->sighand->siglock);
350 sigaddset(&current->blocked, sig); 350 current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
351 recalc_sigpending(); 351 recalc_sigpending();
352 spin_unlock_irq(&current->sighand->siglock); 352 spin_unlock_irq(&current->sighand->siglock);
353 return 0; 353 return 0;
@@ -400,7 +400,7 @@ void daemonize(const char *name, ...)
400 current->files = init_task.files; 400 current->files = init_task.files;
401 atomic_inc(&current->files->count); 401 atomic_inc(&current->files->count);
402 402
403 reparent_to_init(); 403 reparent_to_kthreadd();
404} 404}
405 405
406EXPORT_SYMBOL(daemonize); 406EXPORT_SYMBOL(daemonize);
diff --git a/kernel/fork.c b/kernel/fork.c
index a8dd75d4992b..5dd3979747f5 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -105,7 +105,7 @@ static struct kmem_cache *mm_cachep;
105 105
106void free_task(struct task_struct *tsk) 106void free_task(struct task_struct *tsk)
107{ 107{
108 free_thread_info(tsk->thread_info); 108 free_thread_info(tsk->stack);
109 rt_mutex_debug_task_free(tsk); 109 rt_mutex_debug_task_free(tsk);
110 free_task_struct(tsk); 110 free_task_struct(tsk);
111} 111}
@@ -175,7 +175,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
175 } 175 }
176 176
177 *tsk = *orig; 177 *tsk = *orig;
178 tsk->thread_info = ti; 178 tsk->stack = ti;
179 setup_thread_stack(tsk, orig); 179 setup_thread_stack(tsk, orig);
180 180
181#ifdef CONFIG_CC_STACKPROTECTOR 181#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/kernel/futex.c b/kernel/futex.c
index 600bc9d801f2..b7ce15c67e32 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -16,6 +16,9 @@
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> 17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 * 18 *
19 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
19 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly 22 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
20 * enough at me, Linus for the original (flawed) idea, Matthew 23 * enough at me, Linus for the original (flawed) idea, Matthew
21 * Kirkwood for proof-of-concept implementation. 24 * Kirkwood for proof-of-concept implementation.
@@ -53,6 +56,12 @@
53 56
54#include "rtmutex_common.h" 57#include "rtmutex_common.h"
55 58
59#ifdef CONFIG_DEBUG_RT_MUTEXES
60# include "rtmutex-debug.h"
61#else
62# include "rtmutex.h"
63#endif
64
56#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) 65#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
57 66
58/* 67/*
@@ -81,12 +90,12 @@ struct futex_pi_state {
81 * we can wake only the relevant ones (hashed queues may be shared). 90 * we can wake only the relevant ones (hashed queues may be shared).
82 * 91 *
83 * A futex_q has a woken state, just like tasks have TASK_RUNNING. 92 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
84 * It is considered woken when list_empty(&q->list) || q->lock_ptr == 0. 93 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
85 * The order of wakup is always to make the first condition true, then 94 * The order of wakup is always to make the first condition true, then
86 * wake up q->waiters, then make the second condition true. 95 * wake up q->waiters, then make the second condition true.
87 */ 96 */
88struct futex_q { 97struct futex_q {
89 struct list_head list; 98 struct plist_node list;
90 wait_queue_head_t waiters; 99 wait_queue_head_t waiters;
91 100
92 /* Which hash list lock to use: */ 101 /* Which hash list lock to use: */
@@ -102,14 +111,20 @@ struct futex_q {
102 /* Optional priority inheritance state: */ 111 /* Optional priority inheritance state: */
103 struct futex_pi_state *pi_state; 112 struct futex_pi_state *pi_state;
104 struct task_struct *task; 113 struct task_struct *task;
114
115 /*
116 * This waiter is used in case of requeue from a
117 * normal futex to a PI-futex
118 */
119 struct rt_mutex_waiter waiter;
105}; 120};
106 121
107/* 122/*
108 * Split the global futex_lock into every hash list lock. 123 * Split the global futex_lock into every hash list lock.
109 */ 124 */
110struct futex_hash_bucket { 125struct futex_hash_bucket {
111 spinlock_t lock; 126 spinlock_t lock;
112 struct list_head chain; 127 struct plist_head chain;
113}; 128};
114 129
115static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; 130static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
@@ -138,19 +153,26 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
138 && key1->both.offset == key2->both.offset); 153 && key1->both.offset == key2->both.offset);
139} 154}
140 155
141/* 156/**
142 * Get parameters which are the keys for a futex. 157 * get_futex_key - Get parameters which are the keys for a futex.
158 * @uaddr: virtual address of the futex
159 * @shared: NULL for a PROCESS_PRIVATE futex,
160 * &current->mm->mmap_sem for a PROCESS_SHARED futex
161 * @key: address where result is stored.
162 *
163 * Returns a negative error code or 0
164 * The key words are stored in *key on success.
143 * 165 *
144 * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode, 166 * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
145 * offset_within_page). For private mappings, it's (uaddr, current->mm). 167 * offset_within_page). For private mappings, it's (uaddr, current->mm).
146 * We can usually work out the index without swapping in the page. 168 * We can usually work out the index without swapping in the page.
147 * 169 *
148 * Returns: 0, or negative error code. 170 * fshared is NULL for PROCESS_PRIVATE futexes
149 * The key words are stored in *key on success. 171 * For other futexes, it points to &current->mm->mmap_sem and
150 * 172 * caller must have taken the reader lock. but NOT any spinlocks.
151 * Should be called with &current->mm->mmap_sem but NOT any spinlocks.
152 */ 173 */
153int get_futex_key(u32 __user *uaddr, union futex_key *key) 174int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
175 union futex_key *key)
154{ 176{
155 unsigned long address = (unsigned long)uaddr; 177 unsigned long address = (unsigned long)uaddr;
156 struct mm_struct *mm = current->mm; 178 struct mm_struct *mm = current->mm;
@@ -162,11 +184,25 @@ int get_futex_key(u32 __user *uaddr, union futex_key *key)
162 * The futex address must be "naturally" aligned. 184 * The futex address must be "naturally" aligned.
163 */ 185 */
164 key->both.offset = address % PAGE_SIZE; 186 key->both.offset = address % PAGE_SIZE;
165 if (unlikely((key->both.offset % sizeof(u32)) != 0)) 187 if (unlikely((address % sizeof(u32)) != 0))
166 return -EINVAL; 188 return -EINVAL;
167 address -= key->both.offset; 189 address -= key->both.offset;
168 190
169 /* 191 /*
192 * PROCESS_PRIVATE futexes are fast.
193 * As the mm cannot disappear under us and the 'key' only needs
194 * virtual address, we dont even have to find the underlying vma.
195 * Note : We do have to check 'uaddr' is a valid user address,
196 * but access_ok() should be faster than find_vma()
197 */
198 if (!fshared) {
199 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
200 return -EFAULT;
201 key->private.mm = mm;
202 key->private.address = address;
203 return 0;
204 }
205 /*
170 * The futex is hashed differently depending on whether 206 * The futex is hashed differently depending on whether
171 * it's in a shared or private mapping. So check vma first. 207 * it's in a shared or private mapping. So check vma first.
172 */ 208 */
@@ -180,6 +216,9 @@ int get_futex_key(u32 __user *uaddr, union futex_key *key)
180 if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ)) 216 if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
181 return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES; 217 return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;
182 218
219 /* Save the user address in the ley */
220 key->uaddr = uaddr;
221
183 /* 222 /*
184 * Private mappings are handled in a simple way. 223 * Private mappings are handled in a simple way.
185 * 224 *
@@ -190,6 +229,7 @@ int get_futex_key(u32 __user *uaddr, union futex_key *key)
190 * mappings of _writable_ handles. 229 * mappings of _writable_ handles.
191 */ 230 */
192 if (likely(!(vma->vm_flags & VM_MAYSHARE))) { 231 if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
232 key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */
193 key->private.mm = mm; 233 key->private.mm = mm;
194 key->private.address = address; 234 key->private.address = address;
195 return 0; 235 return 0;
@@ -199,7 +239,7 @@ int get_futex_key(u32 __user *uaddr, union futex_key *key)
199 * Linear file mappings are also simple. 239 * Linear file mappings are also simple.
200 */ 240 */
201 key->shared.inode = vma->vm_file->f_path.dentry->d_inode; 241 key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
202 key->both.offset++; /* Bit 0 of offset indicates inode-based key. */ 242 key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
203 if (likely(!(vma->vm_flags & VM_NONLINEAR))) { 243 if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
204 key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT) 244 key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
205 + vma->vm_pgoff); 245 + vma->vm_pgoff);
@@ -227,16 +267,18 @@ EXPORT_SYMBOL_GPL(get_futex_key);
227 * Take a reference to the resource addressed by a key. 267 * Take a reference to the resource addressed by a key.
228 * Can be called while holding spinlocks. 268 * Can be called while holding spinlocks.
229 * 269 *
230 * NOTE: mmap_sem MUST be held between get_futex_key() and calling this
231 * function, if it is called at all. mmap_sem keeps key->shared.inode valid.
232 */ 270 */
233inline void get_futex_key_refs(union futex_key *key) 271inline void get_futex_key_refs(union futex_key *key)
234{ 272{
235 if (key->both.ptr != 0) { 273 if (key->both.ptr == 0)
236 if (key->both.offset & 1) 274 return;
275 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
276 case FUT_OFF_INODE:
237 atomic_inc(&key->shared.inode->i_count); 277 atomic_inc(&key->shared.inode->i_count);
238 else 278 break;
279 case FUT_OFF_MMSHARED:
239 atomic_inc(&key->private.mm->mm_count); 280 atomic_inc(&key->private.mm->mm_count);
281 break;
240 } 282 }
241} 283}
242EXPORT_SYMBOL_GPL(get_futex_key_refs); 284EXPORT_SYMBOL_GPL(get_futex_key_refs);
@@ -247,11 +289,15 @@ EXPORT_SYMBOL_GPL(get_futex_key_refs);
247 */ 289 */
248void drop_futex_key_refs(union futex_key *key) 290void drop_futex_key_refs(union futex_key *key)
249{ 291{
250 if (key->both.ptr != 0) { 292 if (key->both.ptr == 0)
251 if (key->both.offset & 1) 293 return;
294 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
295 case FUT_OFF_INODE:
252 iput(key->shared.inode); 296 iput(key->shared.inode);
253 else 297 break;
298 case FUT_OFF_MMSHARED:
254 mmdrop(key->private.mm); 299 mmdrop(key->private.mm);
300 break;
255 } 301 }
256} 302}
257EXPORT_SYMBOL_GPL(drop_futex_key_refs); 303EXPORT_SYMBOL_GPL(drop_futex_key_refs);
@@ -268,28 +314,38 @@ static inline int get_futex_value_locked(u32 *dest, u32 __user *from)
268} 314}
269 315
270/* 316/*
271 * Fault handling. Called with current->mm->mmap_sem held. 317 * Fault handling.
318 * if fshared is non NULL, current->mm->mmap_sem is already held
272 */ 319 */
273static int futex_handle_fault(unsigned long address, int attempt) 320static int futex_handle_fault(unsigned long address,
321 struct rw_semaphore *fshared, int attempt)
274{ 322{
275 struct vm_area_struct * vma; 323 struct vm_area_struct * vma;
276 struct mm_struct *mm = current->mm; 324 struct mm_struct *mm = current->mm;
325 int ret = -EFAULT;
277 326
278 if (attempt > 2 || !(vma = find_vma(mm, address)) || 327 if (attempt > 2)
279 vma->vm_start > address || !(vma->vm_flags & VM_WRITE)) 328 return ret;
280 return -EFAULT;
281 329
282 switch (handle_mm_fault(mm, vma, address, 1)) { 330 if (!fshared)
283 case VM_FAULT_MINOR: 331 down_read(&mm->mmap_sem);
284 current->min_flt++; 332 vma = find_vma(mm, address);
285 break; 333 if (vma && address >= vma->vm_start &&
286 case VM_FAULT_MAJOR: 334 (vma->vm_flags & VM_WRITE)) {
287 current->maj_flt++; 335 switch (handle_mm_fault(mm, vma, address, 1)) {
288 break; 336 case VM_FAULT_MINOR:
289 default: 337 ret = 0;
290 return -EFAULT; 338 current->min_flt++;
339 break;
340 case VM_FAULT_MAJOR:
341 ret = 0;
342 current->maj_flt++;
343 break;
344 }
291 } 345 }
292 return 0; 346 if (!fshared)
347 up_read(&mm->mmap_sem);
348 return ret;
293} 349}
294 350
295/* 351/*
@@ -439,18 +495,19 @@ void exit_pi_state_list(struct task_struct *curr)
439} 495}
440 496
441static int 497static int
442lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) 498lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
499 union futex_key *key, struct futex_pi_state **ps)
443{ 500{
444 struct futex_pi_state *pi_state = NULL; 501 struct futex_pi_state *pi_state = NULL;
445 struct futex_q *this, *next; 502 struct futex_q *this, *next;
446 struct list_head *head; 503 struct plist_head *head;
447 struct task_struct *p; 504 struct task_struct *p;
448 pid_t pid; 505 pid_t pid;
449 506
450 head = &hb->chain; 507 head = &hb->chain;
451 508
452 list_for_each_entry_safe(this, next, head, list) { 509 plist_for_each_entry_safe(this, next, head, list) {
453 if (match_futex(&this->key, &me->key)) { 510 if (match_futex(&this->key, key)) {
454 /* 511 /*
455 * Another waiter already exists - bump up 512 * Another waiter already exists - bump up
456 * the refcount and return its pi_state: 513 * the refcount and return its pi_state:
@@ -465,7 +522,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
465 WARN_ON(!atomic_read(&pi_state->refcount)); 522 WARN_ON(!atomic_read(&pi_state->refcount));
466 523
467 atomic_inc(&pi_state->refcount); 524 atomic_inc(&pi_state->refcount);
468 me->pi_state = pi_state; 525 *ps = pi_state;
469 526
470 return 0; 527 return 0;
471 } 528 }
@@ -492,7 +549,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
492 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); 549 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
493 550
494 /* Store the key for possible exit cleanups: */ 551 /* Store the key for possible exit cleanups: */
495 pi_state->key = me->key; 552 pi_state->key = *key;
496 553
497 spin_lock_irq(&p->pi_lock); 554 spin_lock_irq(&p->pi_lock);
498 WARN_ON(!list_empty(&pi_state->list)); 555 WARN_ON(!list_empty(&pi_state->list));
@@ -502,7 +559,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
502 559
503 put_task_struct(p); 560 put_task_struct(p);
504 561
505 me->pi_state = pi_state; 562 *ps = pi_state;
506 563
507 return 0; 564 return 0;
508} 565}
@@ -513,12 +570,12 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
513 */ 570 */
514static void wake_futex(struct futex_q *q) 571static void wake_futex(struct futex_q *q)
515{ 572{
516 list_del_init(&q->list); 573 plist_del(&q->list, &q->list.plist);
517 if (q->filp) 574 if (q->filp)
518 send_sigio(&q->filp->f_owner, q->fd, POLL_IN); 575 send_sigio(&q->filp->f_owner, q->fd, POLL_IN);
519 /* 576 /*
520 * The lock in wake_up_all() is a crucial memory barrier after the 577 * The lock in wake_up_all() is a crucial memory barrier after the
521 * list_del_init() and also before assigning to q->lock_ptr. 578 * plist_del() and also before assigning to q->lock_ptr.
522 */ 579 */
523 wake_up_all(&q->waiters); 580 wake_up_all(&q->waiters);
524 /* 581 /*
@@ -562,6 +619,8 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
562 */ 619 */
563 if (!(uval & FUTEX_OWNER_DIED)) { 620 if (!(uval & FUTEX_OWNER_DIED)) {
564 newval = FUTEX_WAITERS | new_owner->pid; 621 newval = FUTEX_WAITERS | new_owner->pid;
622 /* Keep the FUTEX_WAITER_REQUEUED flag if it was set */
623 newval |= (uval & FUTEX_WAITER_REQUEUED);
565 624
566 pagefault_disable(); 625 pagefault_disable();
567 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); 626 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
@@ -629,17 +688,19 @@ double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
629 * Wake up all waiters hashed on the physical page that is mapped 688 * Wake up all waiters hashed on the physical page that is mapped
630 * to this virtual address: 689 * to this virtual address:
631 */ 690 */
632static int futex_wake(u32 __user *uaddr, int nr_wake) 691static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
692 int nr_wake)
633{ 693{
634 struct futex_hash_bucket *hb; 694 struct futex_hash_bucket *hb;
635 struct futex_q *this, *next; 695 struct futex_q *this, *next;
636 struct list_head *head; 696 struct plist_head *head;
637 union futex_key key; 697 union futex_key key;
638 int ret; 698 int ret;
639 699
640 down_read(&current->mm->mmap_sem); 700 if (fshared)
701 down_read(fshared);
641 702
642 ret = get_futex_key(uaddr, &key); 703 ret = get_futex_key(uaddr, fshared, &key);
643 if (unlikely(ret != 0)) 704 if (unlikely(ret != 0))
644 goto out; 705 goto out;
645 706
@@ -647,7 +708,7 @@ static int futex_wake(u32 __user *uaddr, int nr_wake)
647 spin_lock(&hb->lock); 708 spin_lock(&hb->lock);
648 head = &hb->chain; 709 head = &hb->chain;
649 710
650 list_for_each_entry_safe(this, next, head, list) { 711 plist_for_each_entry_safe(this, next, head, list) {
651 if (match_futex (&this->key, &key)) { 712 if (match_futex (&this->key, &key)) {
652 if (this->pi_state) { 713 if (this->pi_state) {
653 ret = -EINVAL; 714 ret = -EINVAL;
@@ -661,7 +722,261 @@ static int futex_wake(u32 __user *uaddr, int nr_wake)
661 722
662 spin_unlock(&hb->lock); 723 spin_unlock(&hb->lock);
663out: 724out:
664 up_read(&current->mm->mmap_sem); 725 if (fshared)
726 up_read(fshared);
727 return ret;
728}
729
730/*
731 * Called from futex_requeue_pi.
732 * Set FUTEX_WAITERS and FUTEX_WAITER_REQUEUED flags on the
733 * PI-futex value; search its associated pi_state if an owner exist
734 * or create a new one without owner.
735 */
736static inline int
737lookup_pi_state_for_requeue(u32 __user *uaddr, struct futex_hash_bucket *hb,
738 union futex_key *key,
739 struct futex_pi_state **pi_state)
740{
741 u32 curval, uval, newval;
742
743retry:
744 /*
745 * We can't handle a fault cleanly because we can't
746 * release the locks here. Simply return the fault.
747 */
748 if (get_futex_value_locked(&curval, uaddr))
749 return -EFAULT;
750
751 /* set the flags FUTEX_WAITERS and FUTEX_WAITER_REQUEUED */
752 if ((curval & (FUTEX_WAITERS | FUTEX_WAITER_REQUEUED))
753 != (FUTEX_WAITERS | FUTEX_WAITER_REQUEUED)) {
754 /*
755 * No waiters yet, we prepare the futex to have some waiters.
756 */
757
758 uval = curval;
759 newval = uval | FUTEX_WAITERS | FUTEX_WAITER_REQUEUED;
760
761 pagefault_disable();
762 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
763 pagefault_enable();
764
765 if (unlikely(curval == -EFAULT))
766 return -EFAULT;
767 if (unlikely(curval != uval))
768 goto retry;
769 }
770
771 if (!(curval & FUTEX_TID_MASK)
772 || lookup_pi_state(curval, hb, key, pi_state)) {
773 /* the futex has no owner (yet) or the lookup failed:
774 allocate one pi_state without owner */
775
776 *pi_state = alloc_pi_state();
777
778 /* Already stores the key: */
779 (*pi_state)->key = *key;
780
781 /* init the mutex without owner */
782 __rt_mutex_init(&(*pi_state)->pi_mutex, NULL);
783 }
784
785 return 0;
786}
787
788/*
789 * Keep the first nr_wake waiter from futex1, wake up one,
790 * and requeue the next nr_requeue waiters following hashed on
791 * one physical page to another physical page (PI-futex uaddr2)
792 */
793static int futex_requeue_pi(u32 __user *uaddr1,
794 struct rw_semaphore *fshared,
795 u32 __user *uaddr2,
796 int nr_wake, int nr_requeue, u32 *cmpval)
797{
798 union futex_key key1, key2;
799 struct futex_hash_bucket *hb1, *hb2;
800 struct plist_head *head1;
801 struct futex_q *this, *next;
802 struct futex_pi_state *pi_state2 = NULL;
803 struct rt_mutex_waiter *waiter, *top_waiter = NULL;
804 struct rt_mutex *lock2 = NULL;
805 int ret, drop_count = 0;
806
807 if (refill_pi_state_cache())
808 return -ENOMEM;
809
810retry:
811 /*
812 * First take all the futex related locks:
813 */
814 if (fshared)
815 down_read(fshared);
816
817 ret = get_futex_key(uaddr1, fshared, &key1);
818 if (unlikely(ret != 0))
819 goto out;
820 ret = get_futex_key(uaddr2, fshared, &key2);
821 if (unlikely(ret != 0))
822 goto out;
823
824 hb1 = hash_futex(&key1);
825 hb2 = hash_futex(&key2);
826
827 double_lock_hb(hb1, hb2);
828
829 if (likely(cmpval != NULL)) {
830 u32 curval;
831
832 ret = get_futex_value_locked(&curval, uaddr1);
833
834 if (unlikely(ret)) {
835 spin_unlock(&hb1->lock);
836 if (hb1 != hb2)
837 spin_unlock(&hb2->lock);
838
839 /*
840 * If we would have faulted, release mmap_sem, fault
841 * it in and start all over again.
842 */
843 if (fshared)
844 up_read(fshared);
845
846 ret = get_user(curval, uaddr1);
847
848 if (!ret)
849 goto retry;
850
851 return ret;
852 }
853 if (curval != *cmpval) {
854 ret = -EAGAIN;
855 goto out_unlock;
856 }
857 }
858
859 head1 = &hb1->chain;
860 plist_for_each_entry_safe(this, next, head1, list) {
861 if (!match_futex (&this->key, &key1))
862 continue;
863 if (++ret <= nr_wake) {
864 wake_futex(this);
865 } else {
866 /*
867 * FIRST: get and set the pi_state
868 */
869 if (!pi_state2) {
870 int s;
871 /* do this only the first time we requeue someone */
872 s = lookup_pi_state_for_requeue(uaddr2, hb2,
873 &key2, &pi_state2);
874 if (s) {
875 ret = s;
876 goto out_unlock;
877 }
878
879 lock2 = &pi_state2->pi_mutex;
880 spin_lock(&lock2->wait_lock);
881
882 /* Save the top waiter of the wait_list */
883 if (rt_mutex_has_waiters(lock2))
884 top_waiter = rt_mutex_top_waiter(lock2);
885 } else
886 atomic_inc(&pi_state2->refcount);
887
888
889 this->pi_state = pi_state2;
890
891 /*
892 * SECOND: requeue futex_q to the correct hashbucket
893 */
894
895 /*
896 * If key1 and key2 hash to the same bucket, no need to
897 * requeue.
898 */
899 if (likely(head1 != &hb2->chain)) {
900 plist_del(&this->list, &hb1->chain);
901 plist_add(&this->list, &hb2->chain);
902 this->lock_ptr = &hb2->lock;
903#ifdef CONFIG_DEBUG_PI_LIST
904 this->list.plist.lock = &hb2->lock;
905#endif
906 }
907 this->key = key2;
908 get_futex_key_refs(&key2);
909 drop_count++;
910
911
912 /*
913 * THIRD: queue it to lock2
914 */
915 spin_lock_irq(&this->task->pi_lock);
916 waiter = &this->waiter;
917 waiter->task = this->task;
918 waiter->lock = lock2;
919 plist_node_init(&waiter->list_entry, this->task->prio);
920 plist_node_init(&waiter->pi_list_entry, this->task->prio);
921 plist_add(&waiter->list_entry, &lock2->wait_list);
922 this->task->pi_blocked_on = waiter;
923 spin_unlock_irq(&this->task->pi_lock);
924
925 if (ret - nr_wake >= nr_requeue)
926 break;
927 }
928 }
929
930 /* If we've requeued some tasks and the top_waiter of the rt_mutex
931 has changed, we must adjust the priority of the owner, if any */
932 if (drop_count) {
933 struct task_struct *owner = rt_mutex_owner(lock2);
934 if (owner &&
935 (top_waiter != (waiter = rt_mutex_top_waiter(lock2)))) {
936 int chain_walk = 0;
937
938 spin_lock_irq(&owner->pi_lock);
939 if (top_waiter)
940 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
941 else
942 /*
943 * There was no waiters before the requeue,
944 * the flag must be updated
945 */
946 mark_rt_mutex_waiters(lock2);
947
948 plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
949 __rt_mutex_adjust_prio(owner);
950 if (owner->pi_blocked_on) {
951 chain_walk = 1;
952 get_task_struct(owner);
953 }
954
955 spin_unlock_irq(&owner->pi_lock);
956 spin_unlock(&lock2->wait_lock);
957
958 if (chain_walk)
959 rt_mutex_adjust_prio_chain(owner, 0, lock2, NULL,
960 current);
961 } else {
962 /* No owner or the top_waiter does not change */
963 mark_rt_mutex_waiters(lock2);
964 spin_unlock(&lock2->wait_lock);
965 }
966 }
967
968out_unlock:
969 spin_unlock(&hb1->lock);
970 if (hb1 != hb2)
971 spin_unlock(&hb2->lock);
972
973 /* drop_futex_key_refs() must be called outside the spinlocks. */
974 while (--drop_count >= 0)
975 drop_futex_key_refs(&key1);
976
977out:
978 if (fshared)
979 up_read(fshared);
665 return ret; 980 return ret;
666} 981}
667 982
@@ -670,22 +985,24 @@ out:
670 * to this virtual address: 985 * to this virtual address:
671 */ 986 */
672static int 987static int
673futex_wake_op(u32 __user *uaddr1, u32 __user *uaddr2, 988futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
989 u32 __user *uaddr2,
674 int nr_wake, int nr_wake2, int op) 990 int nr_wake, int nr_wake2, int op)
675{ 991{
676 union futex_key key1, key2; 992 union futex_key key1, key2;
677 struct futex_hash_bucket *hb1, *hb2; 993 struct futex_hash_bucket *hb1, *hb2;
678 struct list_head *head; 994 struct plist_head *head;
679 struct futex_q *this, *next; 995 struct futex_q *this, *next;
680 int ret, op_ret, attempt = 0; 996 int ret, op_ret, attempt = 0;
681 997
682retryfull: 998retryfull:
683 down_read(&current->mm->mmap_sem); 999 if (fshared)
1000 down_read(fshared);
684 1001
685 ret = get_futex_key(uaddr1, &key1); 1002 ret = get_futex_key(uaddr1, fshared, &key1);
686 if (unlikely(ret != 0)) 1003 if (unlikely(ret != 0))
687 goto out; 1004 goto out;
688 ret = get_futex_key(uaddr2, &key2); 1005 ret = get_futex_key(uaddr2, fshared, &key2);
689 if (unlikely(ret != 0)) 1006 if (unlikely(ret != 0))
690 goto out; 1007 goto out;
691 1008
@@ -725,11 +1042,10 @@ retry:
725 * still holding the mmap_sem. 1042 * still holding the mmap_sem.
726 */ 1043 */
727 if (attempt++) { 1044 if (attempt++) {
728 if (futex_handle_fault((unsigned long)uaddr2, 1045 ret = futex_handle_fault((unsigned long)uaddr2,
729 attempt)) { 1046 fshared, attempt);
730 ret = -EFAULT; 1047 if (ret)
731 goto out; 1048 goto out;
732 }
733 goto retry; 1049 goto retry;
734 } 1050 }
735 1051
@@ -737,7 +1053,8 @@ retry:
737 * If we would have faulted, release mmap_sem, 1053 * If we would have faulted, release mmap_sem,
738 * fault it in and start all over again. 1054 * fault it in and start all over again.
739 */ 1055 */
740 up_read(&current->mm->mmap_sem); 1056 if (fshared)
1057 up_read(fshared);
741 1058
742 ret = get_user(dummy, uaddr2); 1059 ret = get_user(dummy, uaddr2);
743 if (ret) 1060 if (ret)
@@ -748,7 +1065,7 @@ retry:
748 1065
749 head = &hb1->chain; 1066 head = &hb1->chain;
750 1067
751 list_for_each_entry_safe(this, next, head, list) { 1068 plist_for_each_entry_safe(this, next, head, list) {
752 if (match_futex (&this->key, &key1)) { 1069 if (match_futex (&this->key, &key1)) {
753 wake_futex(this); 1070 wake_futex(this);
754 if (++ret >= nr_wake) 1071 if (++ret >= nr_wake)
@@ -760,7 +1077,7 @@ retry:
760 head = &hb2->chain; 1077 head = &hb2->chain;
761 1078
762 op_ret = 0; 1079 op_ret = 0;
763 list_for_each_entry_safe(this, next, head, list) { 1080 plist_for_each_entry_safe(this, next, head, list) {
764 if (match_futex (&this->key, &key2)) { 1081 if (match_futex (&this->key, &key2)) {
765 wake_futex(this); 1082 wake_futex(this);
766 if (++op_ret >= nr_wake2) 1083 if (++op_ret >= nr_wake2)
@@ -774,7 +1091,8 @@ retry:
774 if (hb1 != hb2) 1091 if (hb1 != hb2)
775 spin_unlock(&hb2->lock); 1092 spin_unlock(&hb2->lock);
776out: 1093out:
777 up_read(&current->mm->mmap_sem); 1094 if (fshared)
1095 up_read(fshared);
778 return ret; 1096 return ret;
779} 1097}
780 1098
@@ -782,22 +1100,24 @@ out:
782 * Requeue all waiters hashed on one physical page to another 1100 * Requeue all waiters hashed on one physical page to another
783 * physical page. 1101 * physical page.
784 */ 1102 */
785static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2, 1103static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
1104 u32 __user *uaddr2,
786 int nr_wake, int nr_requeue, u32 *cmpval) 1105 int nr_wake, int nr_requeue, u32 *cmpval)
787{ 1106{
788 union futex_key key1, key2; 1107 union futex_key key1, key2;
789 struct futex_hash_bucket *hb1, *hb2; 1108 struct futex_hash_bucket *hb1, *hb2;
790 struct list_head *head1; 1109 struct plist_head *head1;
791 struct futex_q *this, *next; 1110 struct futex_q *this, *next;
792 int ret, drop_count = 0; 1111 int ret, drop_count = 0;
793 1112
794 retry: 1113 retry:
795 down_read(&current->mm->mmap_sem); 1114 if (fshared)
1115 down_read(fshared);
796 1116
797 ret = get_futex_key(uaddr1, &key1); 1117 ret = get_futex_key(uaddr1, fshared, &key1);
798 if (unlikely(ret != 0)) 1118 if (unlikely(ret != 0))
799 goto out; 1119 goto out;
800 ret = get_futex_key(uaddr2, &key2); 1120 ret = get_futex_key(uaddr2, fshared, &key2);
801 if (unlikely(ret != 0)) 1121 if (unlikely(ret != 0))
802 goto out; 1122 goto out;
803 1123
@@ -820,7 +1140,8 @@ static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2,
820 * If we would have faulted, release mmap_sem, fault 1140 * If we would have faulted, release mmap_sem, fault
821 * it in and start all over again. 1141 * it in and start all over again.
822 */ 1142 */
823 up_read(&current->mm->mmap_sem); 1143 if (fshared)
1144 up_read(fshared);
824 1145
825 ret = get_user(curval, uaddr1); 1146 ret = get_user(curval, uaddr1);
826 1147
@@ -836,7 +1157,7 @@ static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2,
836 } 1157 }
837 1158
838 head1 = &hb1->chain; 1159 head1 = &hb1->chain;
839 list_for_each_entry_safe(this, next, head1, list) { 1160 plist_for_each_entry_safe(this, next, head1, list) {
840 if (!match_futex (&this->key, &key1)) 1161 if (!match_futex (&this->key, &key1))
841 continue; 1162 continue;
842 if (++ret <= nr_wake) { 1163 if (++ret <= nr_wake) {
@@ -847,9 +1168,13 @@ static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2,
847 * requeue. 1168 * requeue.
848 */ 1169 */
849 if (likely(head1 != &hb2->chain)) { 1170 if (likely(head1 != &hb2->chain)) {
850 list_move_tail(&this->list, &hb2->chain); 1171 plist_del(&this->list, &hb1->chain);
1172 plist_add(&this->list, &hb2->chain);
851 this->lock_ptr = &hb2->lock; 1173 this->lock_ptr = &hb2->lock;
852 } 1174#ifdef CONFIG_DEBUG_PI_LIST
1175 this->list.plist.lock = &hb2->lock;
1176#endif
1177 }
853 this->key = key2; 1178 this->key = key2;
854 get_futex_key_refs(&key2); 1179 get_futex_key_refs(&key2);
855 drop_count++; 1180 drop_count++;
@@ -869,7 +1194,8 @@ out_unlock:
869 drop_futex_key_refs(&key1); 1194 drop_futex_key_refs(&key1);
870 1195
871out: 1196out:
872 up_read(&current->mm->mmap_sem); 1197 if (fshared)
1198 up_read(fshared);
873 return ret; 1199 return ret;
874} 1200}
875 1201
@@ -894,7 +1220,23 @@ queue_lock(struct futex_q *q, int fd, struct file *filp)
894 1220
895static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) 1221static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
896{ 1222{
897 list_add_tail(&q->list, &hb->chain); 1223 int prio;
1224
1225 /*
1226 * The priority used to register this element is
1227 * - either the real thread-priority for the real-time threads
1228 * (i.e. threads with a priority lower than MAX_RT_PRIO)
1229 * - or MAX_RT_PRIO for non-RT threads.
1230 * Thus, all RT-threads are woken first in priority order, and
1231 * the others are woken last, in FIFO order.
1232 */
1233 prio = min(current->normal_prio, MAX_RT_PRIO);
1234
1235 plist_node_init(&q->list, prio);
1236#ifdef CONFIG_DEBUG_PI_LIST
1237 q->list.plist.lock = &hb->lock;
1238#endif
1239 plist_add(&q->list, &hb->chain);
898 q->task = current; 1240 q->task = current;
899 spin_unlock(&hb->lock); 1241 spin_unlock(&hb->lock);
900} 1242}
@@ -949,8 +1291,8 @@ static int unqueue_me(struct futex_q *q)
949 spin_unlock(lock_ptr); 1291 spin_unlock(lock_ptr);
950 goto retry; 1292 goto retry;
951 } 1293 }
952 WARN_ON(list_empty(&q->list)); 1294 WARN_ON(plist_node_empty(&q->list));
953 list_del(&q->list); 1295 plist_del(&q->list, &q->list.plist);
954 1296
955 BUG_ON(q->pi_state); 1297 BUG_ON(q->pi_state);
956 1298
@@ -964,39 +1306,104 @@ static int unqueue_me(struct futex_q *q)
964 1306
965/* 1307/*
966 * PI futexes can not be requeued and must remove themself from the 1308 * PI futexes can not be requeued and must remove themself from the
967 * hash bucket. The hash bucket lock is held on entry and dropped here. 1309 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1310 * and dropped here.
968 */ 1311 */
969static void unqueue_me_pi(struct futex_q *q, struct futex_hash_bucket *hb) 1312static void unqueue_me_pi(struct futex_q *q)
970{ 1313{
971 WARN_ON(list_empty(&q->list)); 1314 WARN_ON(plist_node_empty(&q->list));
972 list_del(&q->list); 1315 plist_del(&q->list, &q->list.plist);
973 1316
974 BUG_ON(!q->pi_state); 1317 BUG_ON(!q->pi_state);
975 free_pi_state(q->pi_state); 1318 free_pi_state(q->pi_state);
976 q->pi_state = NULL; 1319 q->pi_state = NULL;
977 1320
978 spin_unlock(&hb->lock); 1321 spin_unlock(q->lock_ptr);
979 1322
980 drop_futex_key_refs(&q->key); 1323 drop_futex_key_refs(&q->key);
981} 1324}
982 1325
1326/*
1327 * Fixup the pi_state owner with current.
1328 *
1329 * The cur->mm semaphore must be held, it is released at return of this
1330 * function.
1331 */
1332static int fixup_pi_state_owner(u32 __user *uaddr, struct rw_semaphore *fshared,
1333 struct futex_q *q,
1334 struct futex_hash_bucket *hb,
1335 struct task_struct *curr)
1336{
1337 u32 newtid = curr->pid | FUTEX_WAITERS;
1338 struct futex_pi_state *pi_state = q->pi_state;
1339 u32 uval, curval, newval;
1340 int ret;
1341
1342 /* Owner died? */
1343 if (pi_state->owner != NULL) {
1344 spin_lock_irq(&pi_state->owner->pi_lock);
1345 WARN_ON(list_empty(&pi_state->list));
1346 list_del_init(&pi_state->list);
1347 spin_unlock_irq(&pi_state->owner->pi_lock);
1348 } else
1349 newtid |= FUTEX_OWNER_DIED;
1350
1351 pi_state->owner = curr;
1352
1353 spin_lock_irq(&curr->pi_lock);
1354 WARN_ON(!list_empty(&pi_state->list));
1355 list_add(&pi_state->list, &curr->pi_state_list);
1356 spin_unlock_irq(&curr->pi_lock);
1357
1358 /* Unqueue and drop the lock */
1359 unqueue_me_pi(q);
1360 if (fshared)
1361 up_read(fshared);
1362 /*
1363 * We own it, so we have to replace the pending owner
1364 * TID. This must be atomic as we have preserve the
1365 * owner died bit here.
1366 */
1367 ret = get_user(uval, uaddr);
1368 while (!ret) {
1369 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1370 newval |= (uval & FUTEX_WAITER_REQUEUED);
1371 curval = futex_atomic_cmpxchg_inatomic(uaddr,
1372 uval, newval);
1373 if (curval == -EFAULT)
1374 ret = -EFAULT;
1375 if (curval == uval)
1376 break;
1377 uval = curval;
1378 }
1379 return ret;
1380}
1381
1382/*
1383 * In case we must use restart_block to restart a futex_wait,
1384 * we encode in the 'arg3' shared capability
1385 */
1386#define ARG3_SHARED 1
1387
983static long futex_wait_restart(struct restart_block *restart); 1388static long futex_wait_restart(struct restart_block *restart);
984static int futex_wait_abstime(u32 __user *uaddr, u32 val, 1389static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
985 int timed, unsigned long abs_time) 1390 u32 val, ktime_t *abs_time)
986{ 1391{
987 struct task_struct *curr = current; 1392 struct task_struct *curr = current;
988 DECLARE_WAITQUEUE(wait, curr); 1393 DECLARE_WAITQUEUE(wait, curr);
989 struct futex_hash_bucket *hb; 1394 struct futex_hash_bucket *hb;
990 struct futex_q q; 1395 struct futex_q q;
991 unsigned long time_left = 0;
992 u32 uval; 1396 u32 uval;
993 int ret; 1397 int ret;
1398 struct hrtimer_sleeper t, *to = NULL;
1399 int rem = 0;
994 1400
995 q.pi_state = NULL; 1401 q.pi_state = NULL;
996 retry: 1402 retry:
997 down_read(&curr->mm->mmap_sem); 1403 if (fshared)
1404 down_read(fshared);
998 1405
999 ret = get_futex_key(uaddr, &q.key); 1406 ret = get_futex_key(uaddr, fshared, &q.key);
1000 if (unlikely(ret != 0)) 1407 if (unlikely(ret != 0))
1001 goto out_release_sem; 1408 goto out_release_sem;
1002 1409
@@ -1019,8 +1426,8 @@ static int futex_wait_abstime(u32 __user *uaddr, u32 val,
1019 * a wakeup when *uaddr != val on entry to the syscall. This is 1426 * a wakeup when *uaddr != val on entry to the syscall. This is
1020 * rare, but normal. 1427 * rare, but normal.
1021 * 1428 *
1022 * We hold the mmap semaphore, so the mapping cannot have changed 1429 * for shared futexes, we hold the mmap semaphore, so the mapping
1023 * since we looked it up in get_futex_key. 1430 * cannot have changed since we looked it up in get_futex_key.
1024 */ 1431 */
1025 ret = get_futex_value_locked(&uval, uaddr); 1432 ret = get_futex_value_locked(&uval, uaddr);
1026 1433
@@ -1031,7 +1438,8 @@ static int futex_wait_abstime(u32 __user *uaddr, u32 val,
1031 * If we would have faulted, release mmap_sem, fault it in and 1438 * If we would have faulted, release mmap_sem, fault it in and
1032 * start all over again. 1439 * start all over again.
1033 */ 1440 */
1034 up_read(&curr->mm->mmap_sem); 1441 if (fshared)
1442 up_read(fshared);
1035 1443
1036 ret = get_user(uval, uaddr); 1444 ret = get_user(uval, uaddr);
1037 1445
@@ -1043,6 +1451,14 @@ static int futex_wait_abstime(u32 __user *uaddr, u32 val,
1043 if (uval != val) 1451 if (uval != val)
1044 goto out_unlock_release_sem; 1452 goto out_unlock_release_sem;
1045 1453
1454 /*
1455 * This rt_mutex_waiter structure is prepared here and will
1456 * be used only if this task is requeued from a normal futex to
1457 * a PI-futex with futex_requeue_pi.
1458 */
1459 debug_rt_mutex_init_waiter(&q.waiter);
1460 q.waiter.task = NULL;
1461
1046 /* Only actually queue if *uaddr contained val. */ 1462 /* Only actually queue if *uaddr contained val. */
1047 __queue_me(&q, hb); 1463 __queue_me(&q, hb);
1048 1464
@@ -1050,7 +1466,8 @@ static int futex_wait_abstime(u32 __user *uaddr, u32 val,
1050 * Now the futex is queued and we have checked the data, we 1466 * Now the futex is queued and we have checked the data, we
1051 * don't want to hold mmap_sem while we sleep. 1467 * don't want to hold mmap_sem while we sleep.
1052 */ 1468 */
1053 up_read(&curr->mm->mmap_sem); 1469 if (fshared)
1470 up_read(fshared);
1054 1471
1055 /* 1472 /*
1056 * There might have been scheduling since the queue_me(), as we 1473 * There might have been scheduling since the queue_me(), as we
@@ -1065,23 +1482,33 @@ static int futex_wait_abstime(u32 __user *uaddr, u32 val,
1065 __set_current_state(TASK_INTERRUPTIBLE); 1482 __set_current_state(TASK_INTERRUPTIBLE);
1066 add_wait_queue(&q.waiters, &wait); 1483 add_wait_queue(&q.waiters, &wait);
1067 /* 1484 /*
1068 * !list_empty() is safe here without any lock. 1485 * !plist_node_empty() is safe here without any lock.
1069 * q.lock_ptr != 0 is not safe, because of ordering against wakeup. 1486 * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
1070 */ 1487 */
1071 time_left = 0; 1488 if (likely(!plist_node_empty(&q.list))) {
1072 if (likely(!list_empty(&q.list))) { 1489 if (!abs_time)
1073 unsigned long rel_time; 1490 schedule();
1074 1491 else {
1075 if (timed) { 1492 to = &t;
1076 unsigned long now = jiffies; 1493 hrtimer_init(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1077 if (time_after(now, abs_time)) 1494 hrtimer_init_sleeper(&t, current);
1078 rel_time = 0; 1495 t.timer.expires = *abs_time;
1079 else
1080 rel_time = abs_time - now;
1081 } else
1082 rel_time = MAX_SCHEDULE_TIMEOUT;
1083 1496
1084 time_left = schedule_timeout(rel_time); 1497 hrtimer_start(&t.timer, t.timer.expires, HRTIMER_MODE_ABS);
1498
1499 /*
1500 * the timer could have already expired, in which
1501 * case current would be flagged for rescheduling.
1502 * Don't bother calling schedule.
1503 */
1504 if (likely(t.task))
1505 schedule();
1506
1507 hrtimer_cancel(&t.timer);
1508
1509 /* Flag if a timeout occured */
1510 rem = (t.task == NULL);
1511 }
1085 } 1512 }
1086 __set_current_state(TASK_RUNNING); 1513 __set_current_state(TASK_RUNNING);
1087 1514
@@ -1090,17 +1517,80 @@ static int futex_wait_abstime(u32 __user *uaddr, u32 val,
1090 * we are the only user of it. 1517 * we are the only user of it.
1091 */ 1518 */
1092 1519
1520 if (q.pi_state) {
1521 /*
1522 * We were woken but have been requeued on a PI-futex.
1523 * We have to complete the lock acquisition by taking
1524 * the rtmutex.
1525 */
1526
1527 struct rt_mutex *lock = &q.pi_state->pi_mutex;
1528
1529 spin_lock(&lock->wait_lock);
1530 if (unlikely(q.waiter.task)) {
1531 remove_waiter(lock, &q.waiter);
1532 }
1533 spin_unlock(&lock->wait_lock);
1534
1535 if (rem)
1536 ret = -ETIMEDOUT;
1537 else
1538 ret = rt_mutex_timed_lock(lock, to, 1);
1539
1540 if (fshared)
1541 down_read(fshared);
1542 spin_lock(q.lock_ptr);
1543
1544 /*
1545 * Got the lock. We might not be the anticipated owner if we
1546 * did a lock-steal - fix up the PI-state in that case.
1547 */
1548 if (!ret && q.pi_state->owner != curr) {
1549 /*
1550 * We MUST play with the futex we were requeued on,
1551 * NOT the current futex.
1552 * We can retrieve it from the key of the pi_state
1553 */
1554 uaddr = q.pi_state->key.uaddr;
1555
1556 /* mmap_sem and hash_bucket lock are unlocked at
1557 return of this function */
1558 ret = fixup_pi_state_owner(uaddr, fshared,
1559 &q, hb, curr);
1560 } else {
1561 /*
1562 * Catch the rare case, where the lock was released
1563 * when we were on the way back before we locked
1564 * the hash bucket.
1565 */
1566 if (ret && q.pi_state->owner == curr) {
1567 if (rt_mutex_trylock(&q.pi_state->pi_mutex))
1568 ret = 0;
1569 }
1570 /* Unqueue and drop the lock */
1571 unqueue_me_pi(&q);
1572 if (fshared)
1573 up_read(fshared);
1574 }
1575
1576 debug_rt_mutex_free_waiter(&q.waiter);
1577
1578 return ret;
1579 }
1580
1581 debug_rt_mutex_free_waiter(&q.waiter);
1582
1093 /* If we were woken (and unqueued), we succeeded, whatever. */ 1583 /* If we were woken (and unqueued), we succeeded, whatever. */
1094 if (!unqueue_me(&q)) 1584 if (!unqueue_me(&q))
1095 return 0; 1585 return 0;
1096 if (time_left == 0) 1586 if (rem)
1097 return -ETIMEDOUT; 1587 return -ETIMEDOUT;
1098 1588
1099 /* 1589 /*
1100 * We expect signal_pending(current), but another thread may 1590 * We expect signal_pending(current), but another thread may
1101 * have handled it for us already. 1591 * have handled it for us already.
1102 */ 1592 */
1103 if (time_left == MAX_SCHEDULE_TIMEOUT) 1593 if (!abs_time)
1104 return -ERESTARTSYS; 1594 return -ERESTARTSYS;
1105 else { 1595 else {
1106 struct restart_block *restart; 1596 struct restart_block *restart;
@@ -1108,8 +1598,10 @@ static int futex_wait_abstime(u32 __user *uaddr, u32 val,
1108 restart->fn = futex_wait_restart; 1598 restart->fn = futex_wait_restart;
1109 restart->arg0 = (unsigned long)uaddr; 1599 restart->arg0 = (unsigned long)uaddr;
1110 restart->arg1 = (unsigned long)val; 1600 restart->arg1 = (unsigned long)val;
1111 restart->arg2 = (unsigned long)timed; 1601 restart->arg2 = (unsigned long)abs_time;
1112 restart->arg3 = abs_time; 1602 restart->arg3 = 0;
1603 if (fshared)
1604 restart->arg3 |= ARG3_SHARED;
1113 return -ERESTART_RESTARTBLOCK; 1605 return -ERESTART_RESTARTBLOCK;
1114 } 1606 }
1115 1607
@@ -1117,65 +1609,111 @@ static int futex_wait_abstime(u32 __user *uaddr, u32 val,
1117 queue_unlock(&q, hb); 1609 queue_unlock(&q, hb);
1118 1610
1119 out_release_sem: 1611 out_release_sem:
1120 up_read(&curr->mm->mmap_sem); 1612 if (fshared)
1613 up_read(fshared);
1121 return ret; 1614 return ret;
1122} 1615}
1123 1616
1124static int futex_wait(u32 __user *uaddr, u32 val, unsigned long rel_time)
1125{
1126 int timed = (rel_time != MAX_SCHEDULE_TIMEOUT);
1127 return futex_wait_abstime(uaddr, val, timed, jiffies+rel_time);
1128}
1129 1617
1130static long futex_wait_restart(struct restart_block *restart) 1618static long futex_wait_restart(struct restart_block *restart)
1131{ 1619{
1132 u32 __user *uaddr = (u32 __user *)restart->arg0; 1620 u32 __user *uaddr = (u32 __user *)restart->arg0;
1133 u32 val = (u32)restart->arg1; 1621 u32 val = (u32)restart->arg1;
1134 int timed = (int)restart->arg2; 1622 ktime_t *abs_time = (ktime_t *)restart->arg2;
1135 unsigned long abs_time = restart->arg3; 1623 struct rw_semaphore *fshared = NULL;
1136 1624
1137 restart->fn = do_no_restart_syscall; 1625 restart->fn = do_no_restart_syscall;
1138 return (long)futex_wait_abstime(uaddr, val, timed, abs_time); 1626 if (restart->arg3 & ARG3_SHARED)
1627 fshared = &current->mm->mmap_sem;
1628 return (long)futex_wait(uaddr, fshared, val, abs_time);
1139} 1629}
1140 1630
1141 1631
1632static void set_pi_futex_owner(struct futex_hash_bucket *hb,
1633 union futex_key *key, struct task_struct *p)
1634{
1635 struct plist_head *head;
1636 struct futex_q *this, *next;
1637 struct futex_pi_state *pi_state = NULL;
1638 struct rt_mutex *lock;
1639
1640 /* Search a waiter that should already exists */
1641
1642 head = &hb->chain;
1643
1644 plist_for_each_entry_safe(this, next, head, list) {
1645 if (match_futex (&this->key, key)) {
1646 pi_state = this->pi_state;
1647 break;
1648 }
1649 }
1650
1651 BUG_ON(!pi_state);
1652
1653 /* set p as pi_state's owner */
1654 lock = &pi_state->pi_mutex;
1655
1656 spin_lock(&lock->wait_lock);
1657 spin_lock_irq(&p->pi_lock);
1658
1659 list_add(&pi_state->list, &p->pi_state_list);
1660 pi_state->owner = p;
1661
1662
1663 /* set p as pi_mutex's owner */
1664 debug_rt_mutex_proxy_lock(lock, p);
1665 WARN_ON(rt_mutex_owner(lock));
1666 rt_mutex_set_owner(lock, p, 0);
1667 rt_mutex_deadlock_account_lock(lock, p);
1668
1669 plist_add(&rt_mutex_top_waiter(lock)->pi_list_entry,
1670 &p->pi_waiters);
1671 __rt_mutex_adjust_prio(p);
1672
1673 spin_unlock_irq(&p->pi_lock);
1674 spin_unlock(&lock->wait_lock);
1675}
1676
1142/* 1677/*
1143 * Userspace tried a 0 -> TID atomic transition of the futex value 1678 * Userspace tried a 0 -> TID atomic transition of the futex value
1144 * and failed. The kernel side here does the whole locking operation: 1679 * and failed. The kernel side here does the whole locking operation:
1145 * if there are waiters then it will block, it does PI, etc. (Due to 1680 * if there are waiters then it will block, it does PI, etc. (Due to
1146 * races the kernel might see a 0 value of the futex too.) 1681 * races the kernel might see a 0 value of the futex too.)
1147 */ 1682 */
1148static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, 1683static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1149 long nsec, int trylock) 1684 int detect, ktime_t *time, int trylock)
1150{ 1685{
1151 struct hrtimer_sleeper timeout, *to = NULL; 1686 struct hrtimer_sleeper timeout, *to = NULL;
1152 struct task_struct *curr = current; 1687 struct task_struct *curr = current;
1153 struct futex_hash_bucket *hb; 1688 struct futex_hash_bucket *hb;
1154 u32 uval, newval, curval; 1689 u32 uval, newval, curval;
1155 struct futex_q q; 1690 struct futex_q q;
1156 int ret, attempt = 0; 1691 int ret, lock_held, attempt = 0;
1157 1692
1158 if (refill_pi_state_cache()) 1693 if (refill_pi_state_cache())
1159 return -ENOMEM; 1694 return -ENOMEM;
1160 1695
1161 if (sec != MAX_SCHEDULE_TIMEOUT) { 1696 if (time) {
1162 to = &timeout; 1697 to = &timeout;
1163 hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 1698 hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
1164 hrtimer_init_sleeper(to, current); 1699 hrtimer_init_sleeper(to, current);
1165 to->timer.expires = ktime_set(sec, nsec); 1700 to->timer.expires = *time;
1166 } 1701 }
1167 1702
1168 q.pi_state = NULL; 1703 q.pi_state = NULL;
1169 retry: 1704 retry:
1170 down_read(&curr->mm->mmap_sem); 1705 if (fshared)
1706 down_read(fshared);
1171 1707
1172 ret = get_futex_key(uaddr, &q.key); 1708 ret = get_futex_key(uaddr, fshared, &q.key);
1173 if (unlikely(ret != 0)) 1709 if (unlikely(ret != 0))
1174 goto out_release_sem; 1710 goto out_release_sem;
1175 1711
1176 hb = queue_lock(&q, -1, NULL); 1712 hb = queue_lock(&q, -1, NULL);
1177 1713
1178 retry_locked: 1714 retry_locked:
1715 lock_held = 0;
1716
1179 /* 1717 /*
1180 * To avoid races, we attempt to take the lock here again 1718 * To avoid races, we attempt to take the lock here again
1181 * (by doing a 0 -> TID atomic cmpxchg), while holding all 1719 * (by doing a 0 -> TID atomic cmpxchg), while holding all
@@ -1194,7 +1732,16 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1194 if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) { 1732 if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) {
1195 if (!detect && 0) 1733 if (!detect && 0)
1196 force_sig(SIGKILL, current); 1734 force_sig(SIGKILL, current);
1197 ret = -EDEADLK; 1735 /*
1736 * Normally, this check is done in user space.
1737 * In case of requeue, the owner may attempt to lock this futex,
1738 * even if the ownership has already been given by the previous
1739 * waker.
1740 * In the usual case, this is a case of deadlock, but not in case
1741 * of REQUEUE_PI.
1742 */
1743 if (!(curval & FUTEX_WAITER_REQUEUED))
1744 ret = -EDEADLK;
1198 goto out_unlock_release_sem; 1745 goto out_unlock_release_sem;
1199 } 1746 }
1200 1747
@@ -1206,7 +1753,18 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1206 goto out_unlock_release_sem; 1753 goto out_unlock_release_sem;
1207 1754
1208 uval = curval; 1755 uval = curval;
1209 newval = uval | FUTEX_WAITERS; 1756 /*
1757 * In case of a requeue, check if there already is an owner
1758 * If not, just take the futex.
1759 */
1760 if ((curval & FUTEX_WAITER_REQUEUED) && !(curval & FUTEX_TID_MASK)) {
1761 /* set current as futex owner */
1762 newval = curval | current->pid;
1763 lock_held = 1;
1764 } else
1765 /* Set the WAITERS flag, so the owner will know it has someone
1766 to wake at next unlock */
1767 newval = curval | FUTEX_WAITERS;
1210 1768
1211 pagefault_disable(); 1769 pagefault_disable();
1212 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); 1770 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
@@ -1217,11 +1775,16 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1217 if (unlikely(curval != uval)) 1775 if (unlikely(curval != uval))
1218 goto retry_locked; 1776 goto retry_locked;
1219 1777
1778 if (lock_held) {
1779 set_pi_futex_owner(hb, &q.key, curr);
1780 goto out_unlock_release_sem;
1781 }
1782
1220 /* 1783 /*
1221 * We dont have the lock. Look up the PI state (or create it if 1784 * We dont have the lock. Look up the PI state (or create it if
1222 * we are the first waiter): 1785 * we are the first waiter):
1223 */ 1786 */
1224 ret = lookup_pi_state(uval, hb, &q); 1787 ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state);
1225 1788
1226 if (unlikely(ret)) { 1789 if (unlikely(ret)) {
1227 /* 1790 /*
@@ -1263,7 +1826,8 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1263 * Now the futex is queued and we have checked the data, we 1826 * Now the futex is queued and we have checked the data, we
1264 * don't want to hold mmap_sem while we sleep. 1827 * don't want to hold mmap_sem while we sleep.
1265 */ 1828 */
1266 up_read(&curr->mm->mmap_sem); 1829 if (fshared)
1830 up_read(fshared);
1267 1831
1268 WARN_ON(!q.pi_state); 1832 WARN_ON(!q.pi_state);
1269 /* 1833 /*
@@ -1277,52 +1841,18 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1277 ret = ret ? 0 : -EWOULDBLOCK; 1841 ret = ret ? 0 : -EWOULDBLOCK;
1278 } 1842 }
1279 1843
1280 down_read(&curr->mm->mmap_sem); 1844 if (fshared)
1845 down_read(fshared);
1281 spin_lock(q.lock_ptr); 1846 spin_lock(q.lock_ptr);
1282 1847
1283 /* 1848 /*
1284 * Got the lock. We might not be the anticipated owner if we 1849 * Got the lock. We might not be the anticipated owner if we
1285 * did a lock-steal - fix up the PI-state in that case. 1850 * did a lock-steal - fix up the PI-state in that case.
1286 */ 1851 */
1287 if (!ret && q.pi_state->owner != curr) { 1852 if (!ret && q.pi_state->owner != curr)
1288 u32 newtid = current->pid | FUTEX_WAITERS; 1853 /* mmap_sem is unlocked at return of this function */
1289 1854 ret = fixup_pi_state_owner(uaddr, fshared, &q, hb, curr);
1290 /* Owner died? */ 1855 else {
1291 if (q.pi_state->owner != NULL) {
1292 spin_lock_irq(&q.pi_state->owner->pi_lock);
1293 WARN_ON(list_empty(&q.pi_state->list));
1294 list_del_init(&q.pi_state->list);
1295 spin_unlock_irq(&q.pi_state->owner->pi_lock);
1296 } else
1297 newtid |= FUTEX_OWNER_DIED;
1298
1299 q.pi_state->owner = current;
1300
1301 spin_lock_irq(&current->pi_lock);
1302 WARN_ON(!list_empty(&q.pi_state->list));
1303 list_add(&q.pi_state->list, &current->pi_state_list);
1304 spin_unlock_irq(&current->pi_lock);
1305
1306 /* Unqueue and drop the lock */
1307 unqueue_me_pi(&q, hb);
1308 up_read(&curr->mm->mmap_sem);
1309 /*
1310 * We own it, so we have to replace the pending owner
1311 * TID. This must be atomic as we have preserve the
1312 * owner died bit here.
1313 */
1314 ret = get_user(uval, uaddr);
1315 while (!ret) {
1316 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1317 curval = futex_atomic_cmpxchg_inatomic(uaddr,
1318 uval, newval);
1319 if (curval == -EFAULT)
1320 ret = -EFAULT;
1321 if (curval == uval)
1322 break;
1323 uval = curval;
1324 }
1325 } else {
1326 /* 1856 /*
1327 * Catch the rare case, where the lock was released 1857 * Catch the rare case, where the lock was released
1328 * when we were on the way back before we locked 1858 * when we were on the way back before we locked
@@ -1333,8 +1863,9 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1333 ret = 0; 1863 ret = 0;
1334 } 1864 }
1335 /* Unqueue and drop the lock */ 1865 /* Unqueue and drop the lock */
1336 unqueue_me_pi(&q, hb); 1866 unqueue_me_pi(&q);
1337 up_read(&curr->mm->mmap_sem); 1867 if (fshared)
1868 up_read(fshared);
1338 } 1869 }
1339 1870
1340 if (!detect && ret == -EDEADLK && 0) 1871 if (!detect && ret == -EDEADLK && 0)
@@ -1346,7 +1877,8 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1346 queue_unlock(&q, hb); 1877 queue_unlock(&q, hb);
1347 1878
1348 out_release_sem: 1879 out_release_sem:
1349 up_read(&curr->mm->mmap_sem); 1880 if (fshared)
1881 up_read(fshared);
1350 return ret; 1882 return ret;
1351 1883
1352 uaddr_faulted: 1884 uaddr_faulted:
@@ -1357,15 +1889,16 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1357 * still holding the mmap_sem. 1889 * still holding the mmap_sem.
1358 */ 1890 */
1359 if (attempt++) { 1891 if (attempt++) {
1360 if (futex_handle_fault((unsigned long)uaddr, attempt)) { 1892 ret = futex_handle_fault((unsigned long)uaddr, fshared,
1361 ret = -EFAULT; 1893 attempt);
1894 if (ret)
1362 goto out_unlock_release_sem; 1895 goto out_unlock_release_sem;
1363 }
1364 goto retry_locked; 1896 goto retry_locked;
1365 } 1897 }
1366 1898
1367 queue_unlock(&q, hb); 1899 queue_unlock(&q, hb);
1368 up_read(&curr->mm->mmap_sem); 1900 if (fshared)
1901 up_read(fshared);
1369 1902
1370 ret = get_user(uval, uaddr); 1903 ret = get_user(uval, uaddr);
1371 if (!ret && (uval != -EFAULT)) 1904 if (!ret && (uval != -EFAULT))
@@ -1379,12 +1912,12 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1379 * This is the in-kernel slowpath: we look up the PI state (if any), 1912 * This is the in-kernel slowpath: we look up the PI state (if any),
1380 * and do the rt-mutex unlock. 1913 * and do the rt-mutex unlock.
1381 */ 1914 */
1382static int futex_unlock_pi(u32 __user *uaddr) 1915static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
1383{ 1916{
1384 struct futex_hash_bucket *hb; 1917 struct futex_hash_bucket *hb;
1385 struct futex_q *this, *next; 1918 struct futex_q *this, *next;
1386 u32 uval; 1919 u32 uval;
1387 struct list_head *head; 1920 struct plist_head *head;
1388 union futex_key key; 1921 union futex_key key;
1389 int ret, attempt = 0; 1922 int ret, attempt = 0;
1390 1923
@@ -1399,9 +1932,10 @@ retry:
1399 /* 1932 /*
1400 * First take all the futex related locks: 1933 * First take all the futex related locks:
1401 */ 1934 */
1402 down_read(&current->mm->mmap_sem); 1935 if (fshared)
1936 down_read(fshared);
1403 1937
1404 ret = get_futex_key(uaddr, &key); 1938 ret = get_futex_key(uaddr, fshared, &key);
1405 if (unlikely(ret != 0)) 1939 if (unlikely(ret != 0))
1406 goto out; 1940 goto out;
1407 1941
@@ -1435,7 +1969,7 @@ retry_locked:
1435 */ 1969 */
1436 head = &hb->chain; 1970 head = &hb->chain;
1437 1971
1438 list_for_each_entry_safe(this, next, head, list) { 1972 plist_for_each_entry_safe(this, next, head, list) {
1439 if (!match_futex (&this->key, &key)) 1973 if (!match_futex (&this->key, &key))
1440 continue; 1974 continue;
1441 ret = wake_futex_pi(uaddr, uval, this); 1975 ret = wake_futex_pi(uaddr, uval, this);
@@ -1460,7 +1994,8 @@ retry_locked:
1460out_unlock: 1994out_unlock:
1461 spin_unlock(&hb->lock); 1995 spin_unlock(&hb->lock);
1462out: 1996out:
1463 up_read(&current->mm->mmap_sem); 1997 if (fshared)
1998 up_read(fshared);
1464 1999
1465 return ret; 2000 return ret;
1466 2001
@@ -1472,15 +2007,16 @@ pi_faulted:
1472 * still holding the mmap_sem. 2007 * still holding the mmap_sem.
1473 */ 2008 */
1474 if (attempt++) { 2009 if (attempt++) {
1475 if (futex_handle_fault((unsigned long)uaddr, attempt)) { 2010 ret = futex_handle_fault((unsigned long)uaddr, fshared,
1476 ret = -EFAULT; 2011 attempt);
2012 if (ret)
1477 goto out_unlock; 2013 goto out_unlock;
1478 }
1479 goto retry_locked; 2014 goto retry_locked;
1480 } 2015 }
1481 2016
1482 spin_unlock(&hb->lock); 2017 spin_unlock(&hb->lock);
1483 up_read(&current->mm->mmap_sem); 2018 if (fshared)
2019 up_read(fshared);
1484 2020
1485 ret = get_user(uval, uaddr); 2021 ret = get_user(uval, uaddr);
1486 if (!ret && (uval != -EFAULT)) 2022 if (!ret && (uval != -EFAULT))
@@ -1509,10 +2045,10 @@ static unsigned int futex_poll(struct file *filp,
1509 poll_wait(filp, &q->waiters, wait); 2045 poll_wait(filp, &q->waiters, wait);
1510 2046
1511 /* 2047 /*
1512 * list_empty() is safe here without any lock. 2048 * plist_node_empty() is safe here without any lock.
1513 * q->lock_ptr != 0 is not safe, because of ordering against wakeup. 2049 * q->lock_ptr != 0 is not safe, because of ordering against wakeup.
1514 */ 2050 */
1515 if (list_empty(&q->list)) 2051 if (plist_node_empty(&q->list))
1516 ret = POLLIN | POLLRDNORM; 2052 ret = POLLIN | POLLRDNORM;
1517 2053
1518 return ret; 2054 return ret;
@@ -1532,6 +2068,7 @@ static int futex_fd(u32 __user *uaddr, int signal)
1532 struct futex_q *q; 2068 struct futex_q *q;
1533 struct file *filp; 2069 struct file *filp;
1534 int ret, err; 2070 int ret, err;
2071 struct rw_semaphore *fshared;
1535 static unsigned long printk_interval; 2072 static unsigned long printk_interval;
1536 2073
1537 if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) { 2074 if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) {
@@ -1573,11 +2110,12 @@ static int futex_fd(u32 __user *uaddr, int signal)
1573 } 2110 }
1574 q->pi_state = NULL; 2111 q->pi_state = NULL;
1575 2112
1576 down_read(&current->mm->mmap_sem); 2113 fshared = &current->mm->mmap_sem;
1577 err = get_futex_key(uaddr, &q->key); 2114 down_read(fshared);
2115 err = get_futex_key(uaddr, fshared, &q->key);
1578 2116
1579 if (unlikely(err != 0)) { 2117 if (unlikely(err != 0)) {
1580 up_read(&current->mm->mmap_sem); 2118 up_read(fshared);
1581 kfree(q); 2119 kfree(q);
1582 goto error; 2120 goto error;
1583 } 2121 }
@@ -1589,7 +2127,7 @@ static int futex_fd(u32 __user *uaddr, int signal)
1589 filp->private_data = q; 2127 filp->private_data = q;
1590 2128
1591 queue_me(q, ret, filp); 2129 queue_me(q, ret, filp);
1592 up_read(&current->mm->mmap_sem); 2130 up_read(fshared);
1593 2131
1594 /* Now we map fd to filp, so userspace can access it */ 2132 /* Now we map fd to filp, so userspace can access it */
1595 fd_install(ret, filp); 2133 fd_install(ret, filp);
@@ -1702,6 +2240,8 @@ retry:
1702 * userspace. 2240 * userspace.
1703 */ 2241 */
1704 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; 2242 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2243 /* Also keep the FUTEX_WAITER_REQUEUED flag if set */
2244 mval |= (uval & FUTEX_WAITER_REQUEUED);
1705 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); 2245 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
1706 2246
1707 if (nval == -EFAULT) 2247 if (nval == -EFAULT)
@@ -1716,7 +2256,7 @@ retry:
1716 */ 2256 */
1717 if (!pi) { 2257 if (!pi) {
1718 if (uval & FUTEX_WAITERS) 2258 if (uval & FUTEX_WAITERS)
1719 futex_wake(uaddr, 1); 2259 futex_wake(uaddr, &curr->mm->mmap_sem, 1);
1720 } 2260 }
1721 } 2261 }
1722 return 0; 2262 return 0;
@@ -1772,7 +2312,8 @@ void exit_robust_list(struct task_struct *curr)
1772 return; 2312 return;
1773 2313
1774 if (pending) 2314 if (pending)
1775 handle_futex_death((void __user *)pending + futex_offset, curr, pip); 2315 handle_futex_death((void __user *)pending + futex_offset,
2316 curr, pip);
1776 2317
1777 while (entry != &head->list) { 2318 while (entry != &head->list) {
1778 /* 2319 /*
@@ -1798,39 +2339,47 @@ void exit_robust_list(struct task_struct *curr)
1798 } 2339 }
1799} 2340}
1800 2341
1801long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, 2342long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
1802 u32 __user *uaddr2, u32 val2, u32 val3) 2343 u32 __user *uaddr2, u32 val2, u32 val3)
1803{ 2344{
1804 int ret; 2345 int ret;
2346 int cmd = op & FUTEX_CMD_MASK;
2347 struct rw_semaphore *fshared = NULL;
2348
2349 if (!(op & FUTEX_PRIVATE_FLAG))
2350 fshared = &current->mm->mmap_sem;
1805 2351
1806 switch (op) { 2352 switch (cmd) {
1807 case FUTEX_WAIT: 2353 case FUTEX_WAIT:
1808 ret = futex_wait(uaddr, val, timeout); 2354 ret = futex_wait(uaddr, fshared, val, timeout);
1809 break; 2355 break;
1810 case FUTEX_WAKE: 2356 case FUTEX_WAKE:
1811 ret = futex_wake(uaddr, val); 2357 ret = futex_wake(uaddr, fshared, val);
1812 break; 2358 break;
1813 case FUTEX_FD: 2359 case FUTEX_FD:
1814 /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */ 2360 /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */
1815 ret = futex_fd(uaddr, val); 2361 ret = futex_fd(uaddr, val);
1816 break; 2362 break;
1817 case FUTEX_REQUEUE: 2363 case FUTEX_REQUEUE:
1818 ret = futex_requeue(uaddr, uaddr2, val, val2, NULL); 2364 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
1819 break; 2365 break;
1820 case FUTEX_CMP_REQUEUE: 2366 case FUTEX_CMP_REQUEUE:
1821 ret = futex_requeue(uaddr, uaddr2, val, val2, &val3); 2367 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3);
1822 break; 2368 break;
1823 case FUTEX_WAKE_OP: 2369 case FUTEX_WAKE_OP:
1824 ret = futex_wake_op(uaddr, uaddr2, val, val2, val3); 2370 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
1825 break; 2371 break;
1826 case FUTEX_LOCK_PI: 2372 case FUTEX_LOCK_PI:
1827 ret = futex_lock_pi(uaddr, val, timeout, val2, 0); 2373 ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
1828 break; 2374 break;
1829 case FUTEX_UNLOCK_PI: 2375 case FUTEX_UNLOCK_PI:
1830 ret = futex_unlock_pi(uaddr); 2376 ret = futex_unlock_pi(uaddr, fshared);
1831 break; 2377 break;
1832 case FUTEX_TRYLOCK_PI: 2378 case FUTEX_TRYLOCK_PI:
1833 ret = futex_lock_pi(uaddr, 0, timeout, val2, 1); 2379 ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
2380 break;
2381 case FUTEX_CMP_REQUEUE_PI:
2382 ret = futex_requeue_pi(uaddr, fshared, uaddr2, val, val2, &val3);
1834 break; 2383 break;
1835 default: 2384 default:
1836 ret = -ENOSYS; 2385 ret = -ENOSYS;
@@ -1843,29 +2392,30 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
1843 struct timespec __user *utime, u32 __user *uaddr2, 2392 struct timespec __user *utime, u32 __user *uaddr2,
1844 u32 val3) 2393 u32 val3)
1845{ 2394{
1846 struct timespec t; 2395 struct timespec ts;
1847 unsigned long timeout = MAX_SCHEDULE_TIMEOUT; 2396 ktime_t t, *tp = NULL;
1848 u32 val2 = 0; 2397 u32 val2 = 0;
2398 int cmd = op & FUTEX_CMD_MASK;
1849 2399
1850 if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) { 2400 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI)) {
1851 if (copy_from_user(&t, utime, sizeof(t)) != 0) 2401 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
1852 return -EFAULT; 2402 return -EFAULT;
1853 if (!timespec_valid(&t)) 2403 if (!timespec_valid(&ts))
1854 return -EINVAL; 2404 return -EINVAL;
1855 if (op == FUTEX_WAIT) 2405
1856 timeout = timespec_to_jiffies(&t) + 1; 2406 t = timespec_to_ktime(ts);
1857 else { 2407 if (cmd == FUTEX_WAIT)
1858 timeout = t.tv_sec; 2408 t = ktime_add(ktime_get(), t);
1859 val2 = t.tv_nsec; 2409 tp = &t;
1860 }
1861 } 2410 }
1862 /* 2411 /*
1863 * requeue parameter in 'utime' if op == FUTEX_REQUEUE. 2412 * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
1864 */ 2413 */
1865 if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE) 2414 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE
2415 || cmd == FUTEX_CMP_REQUEUE_PI)
1866 val2 = (u32) (unsigned long) utime; 2416 val2 = (u32) (unsigned long) utime;
1867 2417
1868 return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3); 2418 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
1869} 2419}
1870 2420
1871static int futexfs_get_sb(struct file_system_type *fs_type, 2421static int futexfs_get_sb(struct file_system_type *fs_type,
@@ -1895,7 +2445,7 @@ static int __init init(void)
1895 } 2445 }
1896 2446
1897 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { 2447 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
1898 INIT_LIST_HEAD(&futex_queues[i].chain); 2448 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
1899 spin_lock_init(&futex_queues[i].lock); 2449 spin_lock_init(&futex_queues[i].lock);
1900 } 2450 }
1901 return 0; 2451 return 0;
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 50f24eea6cd0..338a9b489fbc 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -141,24 +141,24 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
141 struct compat_timespec __user *utime, u32 __user *uaddr2, 141 struct compat_timespec __user *utime, u32 __user *uaddr2,
142 u32 val3) 142 u32 val3)
143{ 143{
144 struct timespec t; 144 struct timespec ts;
145 unsigned long timeout = MAX_SCHEDULE_TIMEOUT; 145 ktime_t t, *tp = NULL;
146 int val2 = 0; 146 int val2 = 0;
147 147
148 if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) { 148 if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) {
149 if (get_compat_timespec(&t, utime)) 149 if (get_compat_timespec(&ts, utime))
150 return -EFAULT; 150 return -EFAULT;
151 if (!timespec_valid(&t)) 151 if (!timespec_valid(&ts))
152 return -EINVAL; 152 return -EINVAL;
153
154 t = timespec_to_ktime(ts);
153 if (op == FUTEX_WAIT) 155 if (op == FUTEX_WAIT)
154 timeout = timespec_to_jiffies(&t) + 1; 156 t = ktime_add(ktime_get(), t);
155 else { 157 tp = &t;
156 timeout = t.tv_sec;
157 val2 = t.tv_nsec;
158 }
159 } 158 }
160 if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE) 159 if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE
160 || op == FUTEX_CMP_REQUEUE_PI)
161 val2 = (int) (unsigned long) utime; 161 val2 = (int) (unsigned long) utime;
162 162
163 return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3); 163 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
164} 164}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c9f4f044a8a8..23c03f43e196 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1411,11 +1411,13 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1411 switch (action) { 1411 switch (action) {
1412 1412
1413 case CPU_UP_PREPARE: 1413 case CPU_UP_PREPARE:
1414 case CPU_UP_PREPARE_FROZEN:
1414 init_hrtimers_cpu(cpu); 1415 init_hrtimers_cpu(cpu);
1415 break; 1416 break;
1416 1417
1417#ifdef CONFIG_HOTPLUG_CPU 1418#ifdef CONFIG_HOTPLUG_CPU
1418 case CPU_DEAD: 1419 case CPU_DEAD:
1420 case CPU_DEAD_FROZEN:
1419 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu); 1421 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
1420 migrate_hrtimers(cpu); 1422 migrate_hrtimers(cpu);
1421 break; 1423 break;
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 32e1ab1477d1..e391cbb1f566 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -22,7 +22,6 @@
22 * handle_bad_irq - handle spurious and unhandled irqs 22 * handle_bad_irq - handle spurious and unhandled irqs
23 * @irq: the interrupt number 23 * @irq: the interrupt number
24 * @desc: description of the interrupt 24 * @desc: description of the interrupt
25 * @regs: pointer to a register structure
26 * 25 *
27 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 26 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
28 */ 27 */
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 49cc4b9c1a8d..4d32eb077179 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -135,7 +135,6 @@ static int ____call_usermodehelper(void *data)
135 135
136 /* Unblock all signals and set the session keyring. */ 136 /* Unblock all signals and set the session keyring. */
137 new_session = key_get(sub_info->ring); 137 new_session = key_get(sub_info->ring);
138 flush_signals(current);
139 spin_lock_irq(&current->sighand->siglock); 138 spin_lock_irq(&current->sighand->siglock);
140 old_session = __install_session_keyring(current, new_session); 139 old_session = __install_session_keyring(current, new_session);
141 flush_signal_handlers(current, 1); 140 flush_signal_handlers(current, 1);
@@ -186,14 +185,9 @@ static int wait_for_helper(void *data)
186{ 185{
187 struct subprocess_info *sub_info = data; 186 struct subprocess_info *sub_info = data;
188 pid_t pid; 187 pid_t pid;
189 struct k_sigaction sa;
190 188
191 /* Install a handler: if SIGCLD isn't handled sys_wait4 won't 189 /* Install a handler: if SIGCLD isn't handled sys_wait4 won't
192 * populate the status, but will return -ECHILD. */ 190 * populate the status, but will return -ECHILD. */
193 sa.sa.sa_handler = SIG_IGN;
194 sa.sa.sa_flags = 0;
195 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
196 do_sigaction(SIGCHLD, &sa, NULL);
197 allow_signal(SIGCHLD); 191 allow_signal(SIGCHLD);
198 192
199 pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD); 193 pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 87c50ccd1d4e..df8a8e8f6ca4 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -1,7 +1,7 @@
1/* Kernel thread helper functions. 1/* Kernel thread helper functions.
2 * Copyright (C) 2004 IBM Corporation, Rusty Russell. 2 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
3 * 3 *
4 * Creation is done via keventd, so that we get a clean environment 4 * Creation is done via kthreadd, so that we get a clean environment
5 * even if we're invoked from userspace (think modprobe, hotplug cpu, 5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
6 * etc.). 6 * etc.).
7 */ 7 */
@@ -15,24 +15,22 @@
15#include <linux/mutex.h> 15#include <linux/mutex.h>
16#include <asm/semaphore.h> 16#include <asm/semaphore.h>
17 17
18/* 18static DEFINE_SPINLOCK(kthread_create_lock);
19 * We dont want to execute off keventd since it might 19static LIST_HEAD(kthread_create_list);
20 * hold a semaphore our callers hold too: 20struct task_struct *kthreadd_task;
21 */
22static struct workqueue_struct *helper_wq;
23 21
24struct kthread_create_info 22struct kthread_create_info
25{ 23{
26 /* Information passed to kthread() from keventd. */ 24 /* Information passed to kthread() from kthreadd. */
27 int (*threadfn)(void *data); 25 int (*threadfn)(void *data);
28 void *data; 26 void *data;
29 struct completion started; 27 struct completion started;
30 28
31 /* Result passed back to kthread_create() from keventd. */ 29 /* Result passed back to kthread_create() from kthreadd. */
32 struct task_struct *result; 30 struct task_struct *result;
33 struct completion done; 31 struct completion done;
34 32
35 struct work_struct work; 33 struct list_head list;
36}; 34};
37 35
38struct kthread_stop_info 36struct kthread_stop_info
@@ -60,42 +58,17 @@ int kthread_should_stop(void)
60} 58}
61EXPORT_SYMBOL(kthread_should_stop); 59EXPORT_SYMBOL(kthread_should_stop);
62 60
63static void kthread_exit_files(void)
64{
65 struct fs_struct *fs;
66 struct task_struct *tsk = current;
67
68 exit_fs(tsk); /* current->fs->count--; */
69 fs = init_task.fs;
70 tsk->fs = fs;
71 atomic_inc(&fs->count);
72 exit_files(tsk);
73 current->files = init_task.files;
74 atomic_inc(&tsk->files->count);
75}
76
77static int kthread(void *_create) 61static int kthread(void *_create)
78{ 62{
79 struct kthread_create_info *create = _create; 63 struct kthread_create_info *create = _create;
80 int (*threadfn)(void *data); 64 int (*threadfn)(void *data);
81 void *data; 65 void *data;
82 sigset_t blocked;
83 int ret = -EINTR; 66 int ret = -EINTR;
84 67
85 kthread_exit_files(); 68 /* Copy data: it's on kthread's stack */
86
87 /* Copy data: it's on keventd's stack */
88 threadfn = create->threadfn; 69 threadfn = create->threadfn;
89 data = create->data; 70 data = create->data;
90 71
91 /* Block and flush all signals (in case we're not from keventd). */
92 sigfillset(&blocked);
93 sigprocmask(SIG_BLOCK, &blocked, NULL);
94 flush_signals(current);
95
96 /* By default we can run anywhere, unlike keventd. */
97 set_cpus_allowed(current, CPU_MASK_ALL);
98
99 /* OK, tell user we're spawned, wait for stop or wakeup */ 72 /* OK, tell user we're spawned, wait for stop or wakeup */
100 __set_current_state(TASK_INTERRUPTIBLE); 73 __set_current_state(TASK_INTERRUPTIBLE);
101 complete(&create->started); 74 complete(&create->started);
@@ -112,11 +85,8 @@ static int kthread(void *_create)
112 return 0; 85 return 0;
113} 86}
114 87
115/* We are keventd: create a thread. */ 88static void create_kthread(struct kthread_create_info *create)
116static void keventd_create_kthread(struct work_struct *work)
117{ 89{
118 struct kthread_create_info *create =
119 container_of(work, struct kthread_create_info, work);
120 int pid; 90 int pid;
121 91
122 /* We want our own signal handler (we take no signals by default). */ 92 /* We want our own signal handler (we take no signals by default). */
@@ -162,17 +132,14 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
162 create.data = data; 132 create.data = data;
163 init_completion(&create.started); 133 init_completion(&create.started);
164 init_completion(&create.done); 134 init_completion(&create.done);
165 INIT_WORK(&create.work, keventd_create_kthread); 135
166 136 spin_lock(&kthread_create_lock);
167 /* 137 list_add_tail(&create.list, &kthread_create_list);
168 * The workqueue needs to start up first: 138 wake_up_process(kthreadd_task);
169 */ 139 spin_unlock(&kthread_create_lock);
170 if (!helper_wq) 140
171 create.work.func(&create.work); 141 wait_for_completion(&create.done);
172 else { 142
173 queue_work(helper_wq, &create.work);
174 wait_for_completion(&create.done);
175 }
176 if (!IS_ERR(create.result)) { 143 if (!IS_ERR(create.result)) {
177 va_list args; 144 va_list args;
178 va_start(args, namefmt); 145 va_start(args, namefmt);
@@ -180,7 +147,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
180 namefmt, args); 147 namefmt, args);
181 va_end(args); 148 va_end(args);
182 } 149 }
183
184 return create.result; 150 return create.result;
185} 151}
186EXPORT_SYMBOL(kthread_create); 152EXPORT_SYMBOL(kthread_create);
@@ -245,12 +211,47 @@ int kthread_stop(struct task_struct *k)
245} 211}
246EXPORT_SYMBOL(kthread_stop); 212EXPORT_SYMBOL(kthread_stop);
247 213
248static __init int helper_init(void) 214
215static __init void kthreadd_setup(void)
249{ 216{
250 helper_wq = create_singlethread_workqueue("kthread"); 217 struct task_struct *tsk = current;
251 BUG_ON(!helper_wq);
252 218
253 return 0; 219 set_task_comm(tsk, "kthreadd");
220
221 ignore_signals(tsk);
222
223 set_user_nice(tsk, -5);
224 set_cpus_allowed(tsk, CPU_MASK_ALL);
254} 225}
255 226
256core_initcall(helper_init); 227int kthreadd(void *unused)
228{
229 /* Setup a clean context for our children to inherit. */
230 kthreadd_setup();
231
232 current->flags |= PF_NOFREEZE;
233
234 for (;;) {
235 set_current_state(TASK_INTERRUPTIBLE);
236 if (list_empty(&kthread_create_list))
237 schedule();
238 __set_current_state(TASK_RUNNING);
239
240 spin_lock(&kthread_create_lock);
241 while (!list_empty(&kthread_create_list)) {
242 struct kthread_create_info *create;
243
244 create = list_entry(kthread_create_list.next,
245 struct kthread_create_info, list);
246 list_del_init(&create->list);
247 spin_unlock(&kthread_create_lock);
248
249 create_kthread(create);
250
251 spin_lock(&kthread_create_lock);
252 }
253 spin_unlock(&kthread_create_lock);
254 }
255
256 return 0;
257}
diff --git a/kernel/module.c b/kernel/module.c
index d36e45477fac..9bd93de01f4a 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -96,9 +96,9 @@ static inline void add_taint_module(struct module *mod, unsigned flag)
96 mod->taints |= flag; 96 mod->taints |= flag;
97} 97}
98 98
99/* A thread that wants to hold a reference to a module only while it 99/*
100 * is running can call ths to safely exit. 100 * A thread that wants to hold a reference to a module only while it
101 * nfsd and lockd use this. 101 * is running can call this to safely exit. nfsd and lockd use this.
102 */ 102 */
103void __module_put_and_exit(struct module *mod, long code) 103void __module_put_and_exit(struct module *mod, long code)
104{ 104{
@@ -1199,7 +1199,7 @@ static int __unlink_module(void *_mod)
1199 return 0; 1199 return 0;
1200} 1200}
1201 1201
1202/* Free a module, remove from lists, etc (must hold module mutex). */ 1202/* Free a module, remove from lists, etc (must hold module_mutex). */
1203static void free_module(struct module *mod) 1203static void free_module(struct module *mod)
1204{ 1204{
1205 /* Delete from various lists */ 1205 /* Delete from various lists */
@@ -1246,7 +1246,7 @@ EXPORT_SYMBOL_GPL(__symbol_get);
1246 1246
1247/* 1247/*
1248 * Ensure that an exported symbol [global namespace] does not already exist 1248 * Ensure that an exported symbol [global namespace] does not already exist
1249 * in the Kernel or in some other modules exported symbol table. 1249 * in the kernel or in some other module's exported symbol table.
1250 */ 1250 */
1251static int verify_export_symbols(struct module *mod) 1251static int verify_export_symbols(struct module *mod)
1252{ 1252{
diff --git a/kernel/mutex.c b/kernel/mutex.c
index e7cbbb82765b..303eab18484b 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -133,7 +133,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
133 133
134 debug_mutex_lock_common(lock, &waiter); 134 debug_mutex_lock_common(lock, &waiter);
135 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 135 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
136 debug_mutex_add_waiter(lock, &waiter, task->thread_info); 136 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
137 137
138 /* add waiting tasks to the end of the waitqueue (FIFO): */ 138 /* add waiting tasks to the end of the waitqueue (FIFO): */
139 list_add_tail(&waiter.list, &lock->wait_list); 139 list_add_tail(&waiter.list, &lock->wait_list);
@@ -159,7 +159,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
159 */ 159 */
160 if (unlikely(state == TASK_INTERRUPTIBLE && 160 if (unlikely(state == TASK_INTERRUPTIBLE &&
161 signal_pending(task))) { 161 signal_pending(task))) {
162 mutex_remove_waiter(lock, &waiter, task->thread_info); 162 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
163 mutex_release(&lock->dep_map, 1, _RET_IP_); 163 mutex_release(&lock->dep_map, 1, _RET_IP_);
164 spin_unlock_mutex(&lock->wait_lock, flags); 164 spin_unlock_mutex(&lock->wait_lock, flags);
165 165
@@ -175,8 +175,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
175 } 175 }
176 176
177 /* got the lock - rejoice! */ 177 /* got the lock - rejoice! */
178 mutex_remove_waiter(lock, &waiter, task->thread_info); 178 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
179 debug_mutex_set_owner(lock, task->thread_info); 179 debug_mutex_set_owner(lock, task_thread_info(task));
180 180
181 /* set it to 0 if there are no waiters left: */ 181 /* set it to 0 if there are no waiters left: */
182 if (likely(list_empty(&lock->wait_list))) 182 if (likely(list_empty(&lock->wait_list)))
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 06331374d862..b5f0543ed84d 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -30,30 +30,69 @@ char resume_file[256] = CONFIG_PM_STD_PARTITION;
30dev_t swsusp_resume_device; 30dev_t swsusp_resume_device;
31sector_t swsusp_resume_block; 31sector_t swsusp_resume_block;
32 32
33enum {
34 HIBERNATION_INVALID,
35 HIBERNATION_PLATFORM,
36 HIBERNATION_TEST,
37 HIBERNATION_TESTPROC,
38 HIBERNATION_SHUTDOWN,
39 HIBERNATION_REBOOT,
40 /* keep last */
41 __HIBERNATION_AFTER_LAST
42};
43#define HIBERNATION_MAX (__HIBERNATION_AFTER_LAST-1)
44#define HIBERNATION_FIRST (HIBERNATION_INVALID + 1)
45
46static int hibernation_mode = HIBERNATION_SHUTDOWN;
47
48struct hibernation_ops *hibernation_ops;
49
50/**
51 * hibernation_set_ops - set the global hibernate operations
52 * @ops: the hibernation operations to use in subsequent hibernation transitions
53 */
54
55void hibernation_set_ops(struct hibernation_ops *ops)
56{
57 if (ops && !(ops->prepare && ops->enter && ops->finish)) {
58 WARN_ON(1);
59 return;
60 }
61 mutex_lock(&pm_mutex);
62 hibernation_ops = ops;
63 if (ops)
64 hibernation_mode = HIBERNATION_PLATFORM;
65 else if (hibernation_mode == HIBERNATION_PLATFORM)
66 hibernation_mode = HIBERNATION_SHUTDOWN;
67
68 mutex_unlock(&pm_mutex);
69}
70
71
33/** 72/**
34 * platform_prepare - prepare the machine for hibernation using the 73 * platform_prepare - prepare the machine for hibernation using the
35 * platform driver if so configured and return an error code if it fails 74 * platform driver if so configured and return an error code if it fails
36 */ 75 */
37 76
38static inline int platform_prepare(void) 77static int platform_prepare(void)
39{ 78{
40 int error = 0; 79 return (hibernation_mode == HIBERNATION_PLATFORM && hibernation_ops) ?
80 hibernation_ops->prepare() : 0;
81}
41 82
42 switch (pm_disk_mode) { 83/**
43 case PM_DISK_TEST: 84 * platform_finish - switch the machine to the normal mode of operation
44 case PM_DISK_TESTPROC: 85 * using the platform driver (must be called after platform_prepare())
45 case PM_DISK_SHUTDOWN: 86 */
46 case PM_DISK_REBOOT: 87
47 break; 88static void platform_finish(void)
48 default: 89{
49 if (pm_ops && pm_ops->prepare) 90 if (hibernation_mode == HIBERNATION_PLATFORM && hibernation_ops)
50 error = pm_ops->prepare(PM_SUSPEND_DISK); 91 hibernation_ops->finish();
51 }
52 return error;
53} 92}
54 93
55/** 94/**
56 * power_down - Shut machine down for hibernate. 95 * power_down - Shut the machine down for hibernation.
57 * 96 *
58 * Use the platform driver, if configured so; otherwise try 97 * Use the platform driver, if configured so; otherwise try
59 * to power off or reboot. 98 * to power off or reboot.
@@ -61,20 +100,20 @@ static inline int platform_prepare(void)
61 100
62static void power_down(void) 101static void power_down(void)
63{ 102{
64 switch (pm_disk_mode) { 103 switch (hibernation_mode) {
65 case PM_DISK_TEST: 104 case HIBERNATION_TEST:
66 case PM_DISK_TESTPROC: 105 case HIBERNATION_TESTPROC:
67 break; 106 break;
68 case PM_DISK_SHUTDOWN: 107 case HIBERNATION_SHUTDOWN:
69 kernel_power_off(); 108 kernel_power_off();
70 break; 109 break;
71 case PM_DISK_REBOOT: 110 case HIBERNATION_REBOOT:
72 kernel_restart(NULL); 111 kernel_restart(NULL);
73 break; 112 break;
74 default: 113 case HIBERNATION_PLATFORM:
75 if (pm_ops && pm_ops->enter) { 114 if (hibernation_ops) {
76 kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK); 115 kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
77 pm_ops->enter(PM_SUSPEND_DISK); 116 hibernation_ops->enter();
78 break; 117 break;
79 } 118 }
80 } 119 }
@@ -87,20 +126,6 @@ static void power_down(void)
87 while(1); 126 while(1);
88} 127}
89 128
90static inline void platform_finish(void)
91{
92 switch (pm_disk_mode) {
93 case PM_DISK_TEST:
94 case PM_DISK_TESTPROC:
95 case PM_DISK_SHUTDOWN:
96 case PM_DISK_REBOOT:
97 break;
98 default:
99 if (pm_ops && pm_ops->finish)
100 pm_ops->finish(PM_SUSPEND_DISK);
101 }
102}
103
104static void unprepare_processes(void) 129static void unprepare_processes(void)
105{ 130{
106 thaw_processes(); 131 thaw_processes();
@@ -120,13 +145,10 @@ static int prepare_processes(void)
120} 145}
121 146
122/** 147/**
123 * pm_suspend_disk - The granpappy of hibernation power management. 148 * hibernate - The granpappy of the built-in hibernation management
124 *
125 * If not, then call swsusp to do its thing, then figure out how
126 * to power down the system.
127 */ 149 */
128 150
129int pm_suspend_disk(void) 151int hibernate(void)
130{ 152{
131 int error; 153 int error;
132 154
@@ -143,7 +165,8 @@ int pm_suspend_disk(void)
143 if (error) 165 if (error)
144 goto Finish; 166 goto Finish;
145 167
146 if (pm_disk_mode == PM_DISK_TESTPROC) { 168 mutex_lock(&pm_mutex);
169 if (hibernation_mode == HIBERNATION_TESTPROC) {
147 printk("swsusp debug: Waiting for 5 seconds.\n"); 170 printk("swsusp debug: Waiting for 5 seconds.\n");
148 mdelay(5000); 171 mdelay(5000);
149 goto Thaw; 172 goto Thaw;
@@ -168,7 +191,7 @@ int pm_suspend_disk(void)
168 if (error) 191 if (error)
169 goto Enable_cpus; 192 goto Enable_cpus;
170 193
171 if (pm_disk_mode == PM_DISK_TEST) { 194 if (hibernation_mode == HIBERNATION_TEST) {
172 printk("swsusp debug: Waiting for 5 seconds.\n"); 195 printk("swsusp debug: Waiting for 5 seconds.\n");
173 mdelay(5000); 196 mdelay(5000);
174 goto Enable_cpus; 197 goto Enable_cpus;
@@ -205,6 +228,7 @@ int pm_suspend_disk(void)
205 device_resume(); 228 device_resume();
206 resume_console(); 229 resume_console();
207 Thaw: 230 Thaw:
231 mutex_unlock(&pm_mutex);
208 unprepare_processes(); 232 unprepare_processes();
209 Finish: 233 Finish:
210 free_basic_memory_bitmaps(); 234 free_basic_memory_bitmaps();
@@ -220,7 +244,7 @@ int pm_suspend_disk(void)
220 * Called as a late_initcall (so all devices are discovered and 244 * Called as a late_initcall (so all devices are discovered and
221 * initialized), we call swsusp to see if we have a saved image or not. 245 * initialized), we call swsusp to see if we have a saved image or not.
222 * If so, we quiesce devices, the restore the saved image. We will 246 * If so, we quiesce devices, the restore the saved image. We will
223 * return above (in pm_suspend_disk() ) if everything goes well. 247 * return above (in hibernate() ) if everything goes well.
224 * Otherwise, we fail gracefully and return to the normally 248 * Otherwise, we fail gracefully and return to the normally
225 * scheduled program. 249 * scheduled program.
226 * 250 *
@@ -315,25 +339,26 @@ static int software_resume(void)
315late_initcall(software_resume); 339late_initcall(software_resume);
316 340
317 341
318static const char * const pm_disk_modes[] = { 342static const char * const hibernation_modes[] = {
319 [PM_DISK_PLATFORM] = "platform", 343 [HIBERNATION_PLATFORM] = "platform",
320 [PM_DISK_SHUTDOWN] = "shutdown", 344 [HIBERNATION_SHUTDOWN] = "shutdown",
321 [PM_DISK_REBOOT] = "reboot", 345 [HIBERNATION_REBOOT] = "reboot",
322 [PM_DISK_TEST] = "test", 346 [HIBERNATION_TEST] = "test",
323 [PM_DISK_TESTPROC] = "testproc", 347 [HIBERNATION_TESTPROC] = "testproc",
324}; 348};
325 349
326/** 350/**
327 * disk - Control suspend-to-disk mode 351 * disk - Control hibernation mode
328 * 352 *
329 * Suspend-to-disk can be handled in several ways. We have a few options 353 * Suspend-to-disk can be handled in several ways. We have a few options
330 * for putting the system to sleep - using the platform driver (e.g. ACPI 354 * for putting the system to sleep - using the platform driver (e.g. ACPI
331 * or other pm_ops), powering off the system or rebooting the system 355 * or other hibernation_ops), powering off the system or rebooting the
332 * (for testing) as well as the two test modes. 356 * system (for testing) as well as the two test modes.
333 * 357 *
334 * The system can support 'platform', and that is known a priori (and 358 * The system can support 'platform', and that is known a priori (and
335 * encoded in pm_ops). However, the user may choose 'shutdown' or 'reboot' 359 * encoded by the presence of hibernation_ops). However, the user may
336 * as alternatives, as well as the test modes 'test' and 'testproc'. 360 * choose 'shutdown' or 'reboot' as alternatives, as well as one fo the
361 * test modes, 'test' or 'testproc'.
337 * 362 *
338 * show() will display what the mode is currently set to. 363 * show() will display what the mode is currently set to.
339 * store() will accept one of 364 * store() will accept one of
@@ -345,7 +370,7 @@ static const char * const pm_disk_modes[] = {
345 * 'testproc' 370 * 'testproc'
346 * 371 *
347 * It will only change to 'platform' if the system 372 * It will only change to 'platform' if the system
348 * supports it (as determined from pm_ops->pm_disk_mode). 373 * supports it (as determined by having hibernation_ops).
349 */ 374 */
350 375
351static ssize_t disk_show(struct kset *kset, char *buf) 376static ssize_t disk_show(struct kset *kset, char *buf)
@@ -353,28 +378,25 @@ static ssize_t disk_show(struct kset *kset, char *buf)
353 int i; 378 int i;
354 char *start = buf; 379 char *start = buf;
355 380
356 for (i = PM_DISK_PLATFORM; i < PM_DISK_MAX; i++) { 381 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
357 if (!pm_disk_modes[i]) 382 if (!hibernation_modes[i])
358 continue; 383 continue;
359 switch (i) { 384 switch (i) {
360 case PM_DISK_SHUTDOWN: 385 case HIBERNATION_SHUTDOWN:
361 case PM_DISK_REBOOT: 386 case HIBERNATION_REBOOT:
362 case PM_DISK_TEST: 387 case HIBERNATION_TEST:
363 case PM_DISK_TESTPROC: 388 case HIBERNATION_TESTPROC:
364 break; 389 break;
365 default: 390 case HIBERNATION_PLATFORM:
366 if (pm_ops && pm_ops->enter && 391 if (hibernation_ops)
367 (i == pm_ops->pm_disk_mode))
368 break; 392 break;
369 /* not a valid mode, continue with loop */ 393 /* not a valid mode, continue with loop */
370 continue; 394 continue;
371 } 395 }
372 if (i == pm_disk_mode) 396 if (i == hibernation_mode)
373 buf += sprintf(buf, "[%s]", pm_disk_modes[i]); 397 buf += sprintf(buf, "[%s] ", hibernation_modes[i]);
374 else 398 else
375 buf += sprintf(buf, "%s", pm_disk_modes[i]); 399 buf += sprintf(buf, "%s ", hibernation_modes[i]);
376 if (i+1 != PM_DISK_MAX)
377 buf += sprintf(buf, " ");
378 } 400 }
379 buf += sprintf(buf, "\n"); 401 buf += sprintf(buf, "\n");
380 return buf-start; 402 return buf-start;
@@ -387,39 +409,38 @@ static ssize_t disk_store(struct kset *kset, const char *buf, size_t n)
387 int i; 409 int i;
388 int len; 410 int len;
389 char *p; 411 char *p;
390 suspend_disk_method_t mode = 0; 412 int mode = HIBERNATION_INVALID;
391 413
392 p = memchr(buf, '\n', n); 414 p = memchr(buf, '\n', n);
393 len = p ? p - buf : n; 415 len = p ? p - buf : n;
394 416
395 mutex_lock(&pm_mutex); 417 mutex_lock(&pm_mutex);
396 for (i = PM_DISK_PLATFORM; i < PM_DISK_MAX; i++) { 418 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
397 if (!strncmp(buf, pm_disk_modes[i], len)) { 419 if (!strncmp(buf, hibernation_modes[i], len)) {
398 mode = i; 420 mode = i;
399 break; 421 break;
400 } 422 }
401 } 423 }
402 if (mode) { 424 if (mode != HIBERNATION_INVALID) {
403 switch (mode) { 425 switch (mode) {
404 case PM_DISK_SHUTDOWN: 426 case HIBERNATION_SHUTDOWN:
405 case PM_DISK_REBOOT: 427 case HIBERNATION_REBOOT:
406 case PM_DISK_TEST: 428 case HIBERNATION_TEST:
407 case PM_DISK_TESTPROC: 429 case HIBERNATION_TESTPROC:
408 pm_disk_mode = mode; 430 hibernation_mode = mode;
409 break; 431 break;
410 default: 432 case HIBERNATION_PLATFORM:
411 if (pm_ops && pm_ops->enter && 433 if (hibernation_ops)
412 (mode == pm_ops->pm_disk_mode)) 434 hibernation_mode = mode;
413 pm_disk_mode = mode;
414 else 435 else
415 error = -EINVAL; 436 error = -EINVAL;
416 } 437 }
417 } else { 438 } else
418 error = -EINVAL; 439 error = -EINVAL;
419 }
420 440
421 pr_debug("PM: suspend-to-disk mode set to '%s'\n", 441 if (!error)
422 pm_disk_modes[mode]); 442 pr_debug("PM: suspend-to-disk mode set to '%s'\n",
443 hibernation_modes[mode]);
423 mutex_unlock(&pm_mutex); 444 mutex_unlock(&pm_mutex);
424 return error ? error : n; 445 return error ? error : n;
425} 446}
diff --git a/kernel/power/main.c b/kernel/power/main.c
index f6dda685e7e2..40d56a31245e 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -30,7 +30,6 @@
30DEFINE_MUTEX(pm_mutex); 30DEFINE_MUTEX(pm_mutex);
31 31
32struct pm_ops *pm_ops; 32struct pm_ops *pm_ops;
33suspend_disk_method_t pm_disk_mode = PM_DISK_SHUTDOWN;
34 33
35/** 34/**
36 * pm_set_ops - Set the global power method table. 35 * pm_set_ops - Set the global power method table.
@@ -41,10 +40,6 @@ void pm_set_ops(struct pm_ops * ops)
41{ 40{
42 mutex_lock(&pm_mutex); 41 mutex_lock(&pm_mutex);
43 pm_ops = ops; 42 pm_ops = ops;
44 if (ops && ops->pm_disk_mode != PM_DISK_INVALID) {
45 pm_disk_mode = ops->pm_disk_mode;
46 } else
47 pm_disk_mode = PM_DISK_SHUTDOWN;
48 mutex_unlock(&pm_mutex); 43 mutex_unlock(&pm_mutex);
49} 44}
50 45
@@ -184,24 +179,12 @@ static void suspend_finish(suspend_state_t state)
184static const char * const pm_states[PM_SUSPEND_MAX] = { 179static const char * const pm_states[PM_SUSPEND_MAX] = {
185 [PM_SUSPEND_STANDBY] = "standby", 180 [PM_SUSPEND_STANDBY] = "standby",
186 [PM_SUSPEND_MEM] = "mem", 181 [PM_SUSPEND_MEM] = "mem",
187 [PM_SUSPEND_DISK] = "disk",
188}; 182};
189 183
190static inline int valid_state(suspend_state_t state) 184static inline int valid_state(suspend_state_t state)
191{ 185{
192 /* Suspend-to-disk does not really need low-level support. 186 /* All states need lowlevel support and need to be valid
193 * It can work with shutdown/reboot if needed. If it isn't 187 * to the lowlevel implementation, no valid callback
194 * configured, then it cannot be supported.
195 */
196 if (state == PM_SUSPEND_DISK)
197#ifdef CONFIG_SOFTWARE_SUSPEND
198 return 1;
199#else
200 return 0;
201#endif
202
203 /* all other states need lowlevel support and need to be
204 * valid to the lowlevel implementation, no valid callback
205 * implies that none are valid. */ 188 * implies that none are valid. */
206 if (!pm_ops || !pm_ops->valid || !pm_ops->valid(state)) 189 if (!pm_ops || !pm_ops->valid || !pm_ops->valid(state))
207 return 0; 190 return 0;
@@ -229,11 +212,6 @@ static int enter_state(suspend_state_t state)
229 if (!mutex_trylock(&pm_mutex)) 212 if (!mutex_trylock(&pm_mutex))
230 return -EBUSY; 213 return -EBUSY;
231 214
232 if (state == PM_SUSPEND_DISK) {
233 error = pm_suspend_disk();
234 goto Unlock;
235 }
236
237 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]); 215 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
238 if ((error = suspend_prepare(state))) 216 if ((error = suspend_prepare(state)))
239 goto Unlock; 217 goto Unlock;
@@ -251,7 +229,7 @@ static int enter_state(suspend_state_t state)
251 229
252/** 230/**
253 * pm_suspend - Externally visible function for suspending system. 231 * pm_suspend - Externally visible function for suspending system.
254 * @state: Enumarted value of state to enter. 232 * @state: Enumerated value of state to enter.
255 * 233 *
256 * Determine whether or not value is within range, get state 234 * Determine whether or not value is within range, get state
257 * structure, and enter (above). 235 * structure, and enter (above).
@@ -289,7 +267,13 @@ static ssize_t state_show(struct kset *kset, char *buf)
289 if (pm_states[i] && valid_state(i)) 267 if (pm_states[i] && valid_state(i))
290 s += sprintf(s,"%s ", pm_states[i]); 268 s += sprintf(s,"%s ", pm_states[i]);
291 } 269 }
292 s += sprintf(s,"\n"); 270#ifdef CONFIG_SOFTWARE_SUSPEND
271 s += sprintf(s, "%s\n", "disk");
272#else
273 if (s != buf)
274 /* convert the last space to a newline */
275 *(s-1) = '\n';
276#endif
293 return (s - buf); 277 return (s - buf);
294} 278}
295 279
@@ -304,6 +288,12 @@ static ssize_t state_store(struct kset *kset, const char *buf, size_t n)
304 p = memchr(buf, '\n', n); 288 p = memchr(buf, '\n', n);
305 len = p ? p - buf : n; 289 len = p ? p - buf : n;
306 290
291 /* First, check if we are requested to hibernate */
292 if (!strncmp(buf, "disk", len)) {
293 error = hibernate();
294 return error ? error : n;
295 }
296
307 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { 297 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) {
308 if (*s && !strncmp(buf, *s, len)) 298 if (*s && !strncmp(buf, *s, len))
309 break; 299 break;
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 34b43542785a..51381487103f 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -25,12 +25,7 @@ struct swsusp_info {
25 */ 25 */
26#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) 26#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
27 27
28extern int pm_suspend_disk(void); 28extern struct hibernation_ops *hibernation_ops;
29#else
30static inline int pm_suspend_disk(void)
31{
32 return -EPERM;
33}
34#endif 29#endif
35 30
36extern int pfn_is_nosave(unsigned long); 31extern int pfn_is_nosave(unsigned long);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index b7039772b05c..a3b7854b8f7c 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -607,7 +607,8 @@ static LIST_HEAD(nosave_regions);
607 */ 607 */
608 608
609void __init 609void __init
610register_nosave_region(unsigned long start_pfn, unsigned long end_pfn) 610__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
611 int use_kmalloc)
611{ 612{
612 struct nosave_region *region; 613 struct nosave_region *region;
613 614
@@ -623,8 +624,13 @@ register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
623 goto Report; 624 goto Report;
624 } 625 }
625 } 626 }
626 /* This allocation cannot fail */ 627 if (use_kmalloc) {
627 region = alloc_bootmem_low(sizeof(struct nosave_region)); 628 /* during init, this shouldn't fail */
629 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
630 BUG_ON(!region);
631 } else
632 /* This allocation cannot fail */
633 region = alloc_bootmem_low(sizeof(struct nosave_region));
628 region->start_pfn = start_pfn; 634 region->start_pfn = start_pfn;
629 region->end_pfn = end_pfn; 635 region->end_pfn = end_pfn;
630 list_add_tail(&region->list, &nosave_regions); 636 list_add_tail(&region->list, &nosave_regions);
@@ -1227,7 +1233,7 @@ asmlinkage int swsusp_save(void)
1227 nr_copy_pages = nr_pages; 1233 nr_copy_pages = nr_pages;
1228 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE); 1234 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1229 1235
1230 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages); 1236 printk("swsusp: critical section: done (%d pages copied)\n", nr_pages);
1231 1237
1232 return 0; 1238 return 0;
1233} 1239}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 040560d9c312..24d7d78e6f42 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -130,16 +130,16 @@ static inline int platform_prepare(void)
130{ 130{
131 int error = 0; 131 int error = 0;
132 132
133 if (pm_ops && pm_ops->prepare) 133 if (hibernation_ops)
134 error = pm_ops->prepare(PM_SUSPEND_DISK); 134 error = hibernation_ops->prepare();
135 135
136 return error; 136 return error;
137} 137}
138 138
139static inline void platform_finish(void) 139static inline void platform_finish(void)
140{ 140{
141 if (pm_ops && pm_ops->finish) 141 if (hibernation_ops)
142 pm_ops->finish(PM_SUSPEND_DISK); 142 hibernation_ops->finish();
143} 143}
144 144
145static inline int snapshot_suspend(int platform_suspend) 145static inline int snapshot_suspend(int platform_suspend)
@@ -384,7 +384,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
384 switch (arg) { 384 switch (arg) {
385 385
386 case PMOPS_PREPARE: 386 case PMOPS_PREPARE:
387 if (pm_ops && pm_ops->enter) { 387 if (hibernation_ops) {
388 data->platform_suspend = 1; 388 data->platform_suspend = 1;
389 error = 0; 389 error = 0;
390 } else { 390 } else {
@@ -395,8 +395,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
395 case PMOPS_ENTER: 395 case PMOPS_ENTER:
396 if (data->platform_suspend) { 396 if (data->platform_suspend) {
397 kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK); 397 kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
398 error = pm_ops->enter(PM_SUSPEND_DISK); 398 error = hibernation_ops->enter();
399 error = 0;
400 } 399 }
401 break; 400 break;
402 401
diff --git a/kernel/profile.c b/kernel/profile.c
index 9bfadb248dd8..cc91b9bf759d 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -340,6 +340,7 @@ static int __devinit profile_cpu_callback(struct notifier_block *info,
340 340
341 switch (action) { 341 switch (action) {
342 case CPU_UP_PREPARE: 342 case CPU_UP_PREPARE:
343 case CPU_UP_PREPARE_FROZEN:
343 node = cpu_to_node(cpu); 344 node = cpu_to_node(cpu);
344 per_cpu(cpu_profile_flip, cpu) = 0; 345 per_cpu(cpu_profile_flip, cpu) = 0;
345 if (!per_cpu(cpu_profile_hits, cpu)[1]) { 346 if (!per_cpu(cpu_profile_hits, cpu)[1]) {
@@ -365,10 +366,13 @@ static int __devinit profile_cpu_callback(struct notifier_block *info,
365 __free_page(page); 366 __free_page(page);
366 return NOTIFY_BAD; 367 return NOTIFY_BAD;
367 case CPU_ONLINE: 368 case CPU_ONLINE:
369 case CPU_ONLINE_FROZEN:
368 cpu_set(cpu, prof_cpu_mask); 370 cpu_set(cpu, prof_cpu_mask);
369 break; 371 break;
370 case CPU_UP_CANCELED: 372 case CPU_UP_CANCELED:
373 case CPU_UP_CANCELED_FROZEN:
371 case CPU_DEAD: 374 case CPU_DEAD:
375 case CPU_DEAD_FROZEN:
372 cpu_clear(cpu, prof_cpu_mask); 376 cpu_clear(cpu, prof_cpu_mask);
373 if (per_cpu(cpu_profile_hits, cpu)[0]) { 377 if (per_cpu(cpu_profile_hits, cpu)[0]) {
374 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); 378 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 3554b76da84c..2c2dd8410dc4 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -558,9 +558,11 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
558 long cpu = (long)hcpu; 558 long cpu = (long)hcpu;
559 switch (action) { 559 switch (action) {
560 case CPU_UP_PREPARE: 560 case CPU_UP_PREPARE:
561 case CPU_UP_PREPARE_FROZEN:
561 rcu_online_cpu(cpu); 562 rcu_online_cpu(cpu);
562 break; 563 break;
563 case CPU_DEAD: 564 case CPU_DEAD:
565 case CPU_DEAD_FROZEN:
564 rcu_offline_cpu(cpu); 566 rcu_offline_cpu(cpu);
565 break; 567 break;
566 default: 568 default:
diff --git a/kernel/relay.c b/kernel/relay.c
index 577f251c7e28..4311101b0ca7 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -310,16 +310,13 @@ static struct rchan_callbacks default_channel_callbacks = {
310 310
311/** 311/**
312 * wakeup_readers - wake up readers waiting on a channel 312 * wakeup_readers - wake up readers waiting on a channel
313 * @work: work struct that contains the the channel buffer 313 * @data: contains the channel buffer
314 * 314 *
315 * This is the work function used to defer reader waking. The 315 * This is the timer function used to defer reader waking.
316 * reason waking is deferred is that calling directly from write
317 * causes problems if you're writing from say the scheduler.
318 */ 316 */
319static void wakeup_readers(struct work_struct *work) 317static void wakeup_readers(unsigned long data)
320{ 318{
321 struct rchan_buf *buf = 319 struct rchan_buf *buf = (struct rchan_buf *)data;
322 container_of(work, struct rchan_buf, wake_readers.work);
323 wake_up_interruptible(&buf->read_wait); 320 wake_up_interruptible(&buf->read_wait);
324} 321}
325 322
@@ -337,11 +334,9 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
337 if (init) { 334 if (init) {
338 init_waitqueue_head(&buf->read_wait); 335 init_waitqueue_head(&buf->read_wait);
339 kref_init(&buf->kref); 336 kref_init(&buf->kref);
340 INIT_DELAYED_WORK(&buf->wake_readers, NULL); 337 setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
341 } else { 338 } else
342 cancel_delayed_work(&buf->wake_readers); 339 del_timer_sync(&buf->timer);
343 flush_scheduled_work();
344 }
345 340
346 buf->subbufs_produced = 0; 341 buf->subbufs_produced = 0;
347 buf->subbufs_consumed = 0; 342 buf->subbufs_consumed = 0;
@@ -447,8 +442,7 @@ end:
447static void relay_close_buf(struct rchan_buf *buf) 442static void relay_close_buf(struct rchan_buf *buf)
448{ 443{
449 buf->finalized = 1; 444 buf->finalized = 1;
450 cancel_delayed_work(&buf->wake_readers); 445 del_timer_sync(&buf->timer);
451 flush_scheduled_work();
452 kref_put(&buf->kref, relay_remove_buf); 446 kref_put(&buf->kref, relay_remove_buf);
453} 447}
454 448
@@ -490,6 +484,7 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
490 484
491 switch(action) { 485 switch(action) {
492 case CPU_UP_PREPARE: 486 case CPU_UP_PREPARE:
487 case CPU_UP_PREPARE_FROZEN:
493 mutex_lock(&relay_channels_mutex); 488 mutex_lock(&relay_channels_mutex);
494 list_for_each_entry(chan, &relay_channels, list) { 489 list_for_each_entry(chan, &relay_channels, list) {
495 if (chan->buf[hotcpu]) 490 if (chan->buf[hotcpu])
@@ -506,6 +501,7 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
506 mutex_unlock(&relay_channels_mutex); 501 mutex_unlock(&relay_channels_mutex);
507 break; 502 break;
508 case CPU_DEAD: 503 case CPU_DEAD:
504 case CPU_DEAD_FROZEN:
509 /* No need to flush the cpu : will be flushed upon 505 /* No need to flush the cpu : will be flushed upon
510 * final relay_flush() call. */ 506 * final relay_flush() call. */
511 break; 507 break;
@@ -608,11 +604,14 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
608 buf->dentry->d_inode->i_size += buf->chan->subbuf_size - 604 buf->dentry->d_inode->i_size += buf->chan->subbuf_size -
609 buf->padding[old_subbuf]; 605 buf->padding[old_subbuf];
610 smp_mb(); 606 smp_mb();
611 if (waitqueue_active(&buf->read_wait)) { 607 if (waitqueue_active(&buf->read_wait))
612 PREPARE_DELAYED_WORK(&buf->wake_readers, 608 /*
613 wakeup_readers); 609 * Calling wake_up_interruptible() from here
614 schedule_delayed_work(&buf->wake_readers, 1); 610 * will deadlock if we happen to be logging
615 } 611 * from the scheduler (trying to re-grab
612 * rq->lock), so defer it.
613 */
614 __mod_timer(&buf->timer, jiffies + 1);
616 } 615 }
617 616
618 old = buf->data; 617 old = buf->data;
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 180978cb2f75..12879f6c1ec3 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -56,7 +56,7 @@
56 * state. 56 * state.
57 */ 57 */
58 58
59static void 59void
60rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner, 60rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
61 unsigned long mask) 61 unsigned long mask)
62{ 62{
@@ -81,29 +81,6 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
81} 81}
82 82
83/* 83/*
84 * We can speed up the acquire/release, if the architecture
85 * supports cmpxchg and if there's no debugging state to be set up
86 */
87#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
88# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
89static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
90{
91 unsigned long owner, *p = (unsigned long *) &lock->owner;
92
93 do {
94 owner = *p;
95 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
96}
97#else
98# define rt_mutex_cmpxchg(l,c,n) (0)
99static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
100{
101 lock->owner = (struct task_struct *)
102 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
103}
104#endif
105
106/*
107 * Calculate task priority from the waiter list priority 84 * Calculate task priority from the waiter list priority
108 * 85 *
109 * Return task->normal_prio when the waiter list is empty or when 86 * Return task->normal_prio when the waiter list is empty or when
@@ -123,7 +100,7 @@ int rt_mutex_getprio(struct task_struct *task)
123 * 100 *
124 * This can be both boosting and unboosting. task->pi_lock must be held. 101 * This can be both boosting and unboosting. task->pi_lock must be held.
125 */ 102 */
126static void __rt_mutex_adjust_prio(struct task_struct *task) 103void __rt_mutex_adjust_prio(struct task_struct *task)
127{ 104{
128 int prio = rt_mutex_getprio(task); 105 int prio = rt_mutex_getprio(task);
129 106
@@ -159,11 +136,11 @@ int max_lock_depth = 1024;
159 * Decreases task's usage by one - may thus free the task. 136 * Decreases task's usage by one - may thus free the task.
160 * Returns 0 or -EDEADLK. 137 * Returns 0 or -EDEADLK.
161 */ 138 */
162static int rt_mutex_adjust_prio_chain(struct task_struct *task, 139int rt_mutex_adjust_prio_chain(struct task_struct *task,
163 int deadlock_detect, 140 int deadlock_detect,
164 struct rt_mutex *orig_lock, 141 struct rt_mutex *orig_lock,
165 struct rt_mutex_waiter *orig_waiter, 142 struct rt_mutex_waiter *orig_waiter,
166 struct task_struct *top_task) 143 struct task_struct *top_task)
167{ 144{
168 struct rt_mutex *lock; 145 struct rt_mutex *lock;
169 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; 146 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
@@ -524,8 +501,8 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
524 * 501 *
525 * Must be called with lock->wait_lock held 502 * Must be called with lock->wait_lock held
526 */ 503 */
527static void remove_waiter(struct rt_mutex *lock, 504void remove_waiter(struct rt_mutex *lock,
528 struct rt_mutex_waiter *waiter) 505 struct rt_mutex_waiter *waiter)
529{ 506{
530 int first = (waiter == rt_mutex_top_waiter(lock)); 507 int first = (waiter == rt_mutex_top_waiter(lock));
531 struct task_struct *owner = rt_mutex_owner(lock); 508 struct task_struct *owner = rt_mutex_owner(lock);
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
index 9c75856e791e..242ec7ee740b 100644
--- a/kernel/rtmutex_common.h
+++ b/kernel/rtmutex_common.h
@@ -113,6 +113,29 @@ static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock)
113} 113}
114 114
115/* 115/*
116 * We can speed up the acquire/release, if the architecture
117 * supports cmpxchg and if there's no debugging state to be set up
118 */
119#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
120# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
121static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
122{
123 unsigned long owner, *p = (unsigned long *) &lock->owner;
124
125 do {
126 owner = *p;
127 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
128}
129#else
130# define rt_mutex_cmpxchg(l,c,n) (0)
131static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
132{
133 lock->owner = (struct task_struct *)
134 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
135}
136#endif
137
138/*
116 * PI-futex support (proxy locking functions, etc.): 139 * PI-futex support (proxy locking functions, etc.):
117 */ 140 */
118extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); 141extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
@@ -120,4 +143,15 @@ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
120 struct task_struct *proxy_owner); 143 struct task_struct *proxy_owner);
121extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, 144extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
122 struct task_struct *proxy_owner); 145 struct task_struct *proxy_owner);
146
147extern void rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
148 unsigned long mask);
149extern void __rt_mutex_adjust_prio(struct task_struct *task);
150extern int rt_mutex_adjust_prio_chain(struct task_struct *task,
151 int deadlock_detect,
152 struct rt_mutex *orig_lock,
153 struct rt_mutex_waiter *orig_waiter,
154 struct task_struct *top_task);
155extern void remove_waiter(struct rt_mutex *lock,
156 struct rt_mutex_waiter *waiter);
123#endif 157#endif
diff --git a/kernel/sched.c b/kernel/sched.c
index 66bd7ff23f18..799d23b4e35d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -305,6 +305,7 @@ struct rq {
305}; 305};
306 306
307static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp; 307static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
308static DEFINE_MUTEX(sched_hotcpu_mutex);
308 309
309static inline int cpu_of(struct rq *rq) 310static inline int cpu_of(struct rq *rq)
310{ 311{
@@ -4520,13 +4521,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
4520 struct task_struct *p; 4521 struct task_struct *p;
4521 int retval; 4522 int retval;
4522 4523
4523 lock_cpu_hotplug(); 4524 mutex_lock(&sched_hotcpu_mutex);
4524 read_lock(&tasklist_lock); 4525 read_lock(&tasklist_lock);
4525 4526
4526 p = find_process_by_pid(pid); 4527 p = find_process_by_pid(pid);
4527 if (!p) { 4528 if (!p) {
4528 read_unlock(&tasklist_lock); 4529 read_unlock(&tasklist_lock);
4529 unlock_cpu_hotplug(); 4530 mutex_unlock(&sched_hotcpu_mutex);
4530 return -ESRCH; 4531 return -ESRCH;
4531 } 4532 }
4532 4533
@@ -4553,7 +4554,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
4553 4554
4554out_unlock: 4555out_unlock:
4555 put_task_struct(p); 4556 put_task_struct(p);
4556 unlock_cpu_hotplug(); 4557 mutex_unlock(&sched_hotcpu_mutex);
4557 return retval; 4558 return retval;
4558} 4559}
4559 4560
@@ -4610,7 +4611,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
4610 struct task_struct *p; 4611 struct task_struct *p;
4611 int retval; 4612 int retval;
4612 4613
4613 lock_cpu_hotplug(); 4614 mutex_lock(&sched_hotcpu_mutex);
4614 read_lock(&tasklist_lock); 4615 read_lock(&tasklist_lock);
4615 4616
4616 retval = -ESRCH; 4617 retval = -ESRCH;
@@ -4626,7 +4627,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
4626 4627
4627out_unlock: 4628out_unlock:
4628 read_unlock(&tasklist_lock); 4629 read_unlock(&tasklist_lock);
4629 unlock_cpu_hotplug(); 4630 mutex_unlock(&sched_hotcpu_mutex);
4630 if (retval) 4631 if (retval)
4631 return retval; 4632 return retval;
4632 4633
@@ -5388,7 +5389,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5388 struct rq *rq; 5389 struct rq *rq;
5389 5390
5390 switch (action) { 5391 switch (action) {
5392 case CPU_LOCK_ACQUIRE:
5393 mutex_lock(&sched_hotcpu_mutex);
5394 break;
5395
5391 case CPU_UP_PREPARE: 5396 case CPU_UP_PREPARE:
5397 case CPU_UP_PREPARE_FROZEN:
5392 p = kthread_create(migration_thread, hcpu, "migration/%d",cpu); 5398 p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
5393 if (IS_ERR(p)) 5399 if (IS_ERR(p))
5394 return NOTIFY_BAD; 5400 return NOTIFY_BAD;
@@ -5402,12 +5408,14 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5402 break; 5408 break;
5403 5409
5404 case CPU_ONLINE: 5410 case CPU_ONLINE:
5411 case CPU_ONLINE_FROZEN:
5405 /* Strictly unneccessary, as first user will wake it. */ 5412 /* Strictly unneccessary, as first user will wake it. */
5406 wake_up_process(cpu_rq(cpu)->migration_thread); 5413 wake_up_process(cpu_rq(cpu)->migration_thread);
5407 break; 5414 break;
5408 5415
5409#ifdef CONFIG_HOTPLUG_CPU 5416#ifdef CONFIG_HOTPLUG_CPU
5410 case CPU_UP_CANCELED: 5417 case CPU_UP_CANCELED:
5418 case CPU_UP_CANCELED_FROZEN:
5411 if (!cpu_rq(cpu)->migration_thread) 5419 if (!cpu_rq(cpu)->migration_thread)
5412 break; 5420 break;
5413 /* Unbind it from offline cpu so it can run. Fall thru. */ 5421 /* Unbind it from offline cpu so it can run. Fall thru. */
@@ -5418,6 +5426,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5418 break; 5426 break;
5419 5427
5420 case CPU_DEAD: 5428 case CPU_DEAD:
5429 case CPU_DEAD_FROZEN:
5421 migrate_live_tasks(cpu); 5430 migrate_live_tasks(cpu);
5422 rq = cpu_rq(cpu); 5431 rq = cpu_rq(cpu);
5423 kthread_stop(rq->migration_thread); 5432 kthread_stop(rq->migration_thread);
@@ -5433,7 +5442,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5433 BUG_ON(rq->nr_running != 0); 5442 BUG_ON(rq->nr_running != 0);
5434 5443
5435 /* No need to migrate the tasks: it was best-effort if 5444 /* No need to migrate the tasks: it was best-effort if
5436 * they didn't do lock_cpu_hotplug(). Just wake up 5445 * they didn't take sched_hotcpu_mutex. Just wake up
5437 * the requestors. */ 5446 * the requestors. */
5438 spin_lock_irq(&rq->lock); 5447 spin_lock_irq(&rq->lock);
5439 while (!list_empty(&rq->migration_queue)) { 5448 while (!list_empty(&rq->migration_queue)) {
@@ -5447,6 +5456,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5447 spin_unlock_irq(&rq->lock); 5456 spin_unlock_irq(&rq->lock);
5448 break; 5457 break;
5449#endif 5458#endif
5459 case CPU_LOCK_RELEASE:
5460 mutex_unlock(&sched_hotcpu_mutex);
5461 break;
5450 } 5462 }
5451 return NOTIFY_OK; 5463 return NOTIFY_OK;
5452} 5464}
@@ -6822,10 +6834,10 @@ int arch_reinit_sched_domains(void)
6822{ 6834{
6823 int err; 6835 int err;
6824 6836
6825 lock_cpu_hotplug(); 6837 mutex_lock(&sched_hotcpu_mutex);
6826 detach_destroy_domains(&cpu_online_map); 6838 detach_destroy_domains(&cpu_online_map);
6827 err = arch_init_sched_domains(&cpu_online_map); 6839 err = arch_init_sched_domains(&cpu_online_map);
6828 unlock_cpu_hotplug(); 6840 mutex_unlock(&sched_hotcpu_mutex);
6829 6841
6830 return err; 6842 return err;
6831} 6843}
@@ -6904,14 +6916,20 @@ static int update_sched_domains(struct notifier_block *nfb,
6904{ 6916{
6905 switch (action) { 6917 switch (action) {
6906 case CPU_UP_PREPARE: 6918 case CPU_UP_PREPARE:
6919 case CPU_UP_PREPARE_FROZEN:
6907 case CPU_DOWN_PREPARE: 6920 case CPU_DOWN_PREPARE:
6921 case CPU_DOWN_PREPARE_FROZEN:
6908 detach_destroy_domains(&cpu_online_map); 6922 detach_destroy_domains(&cpu_online_map);
6909 return NOTIFY_OK; 6923 return NOTIFY_OK;
6910 6924
6911 case CPU_UP_CANCELED: 6925 case CPU_UP_CANCELED:
6926 case CPU_UP_CANCELED_FROZEN:
6912 case CPU_DOWN_FAILED: 6927 case CPU_DOWN_FAILED:
6928 case CPU_DOWN_FAILED_FROZEN:
6913 case CPU_ONLINE: 6929 case CPU_ONLINE:
6930 case CPU_ONLINE_FROZEN:
6914 case CPU_DEAD: 6931 case CPU_DEAD:
6932 case CPU_DEAD_FROZEN:
6915 /* 6933 /*
6916 * Fall through and re-initialise the domains. 6934 * Fall through and re-initialise the domains.
6917 */ 6935 */
@@ -6930,12 +6948,12 @@ void __init sched_init_smp(void)
6930{ 6948{
6931 cpumask_t non_isolated_cpus; 6949 cpumask_t non_isolated_cpus;
6932 6950
6933 lock_cpu_hotplug(); 6951 mutex_lock(&sched_hotcpu_mutex);
6934 arch_init_sched_domains(&cpu_online_map); 6952 arch_init_sched_domains(&cpu_online_map);
6935 cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); 6953 cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
6936 if (cpus_empty(non_isolated_cpus)) 6954 if (cpus_empty(non_isolated_cpus))
6937 cpu_set(smp_processor_id(), non_isolated_cpus); 6955 cpu_set(smp_processor_id(), non_isolated_cpus);
6938 unlock_cpu_hotplug(); 6956 mutex_unlock(&sched_hotcpu_mutex);
6939 /* XXX: Theoretical race here - CPU may be hotplugged now */ 6957 /* XXX: Theoretical race here - CPU may be hotplugged now */
6940 hotcpu_notifier(update_sched_domains, 0); 6958 hotcpu_notifier(update_sched_domains, 0);
6941 6959
diff --git a/kernel/signal.c b/kernel/signal.c
index 1368e67c8482..2ac3a668d9dd 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -38,125 +38,6 @@
38 38
39static struct kmem_cache *sigqueue_cachep; 39static struct kmem_cache *sigqueue_cachep;
40 40
41/*
42 * In POSIX a signal is sent either to a specific thread (Linux task)
43 * or to the process as a whole (Linux thread group). How the signal
44 * is sent determines whether it's to one thread or the whole group,
45 * which determines which signal mask(s) are involved in blocking it
46 * from being delivered until later. When the signal is delivered,
47 * either it's caught or ignored by a user handler or it has a default
48 * effect that applies to the whole thread group (POSIX process).
49 *
50 * The possible effects an unblocked signal set to SIG_DFL can have are:
51 * ignore - Nothing Happens
52 * terminate - kill the process, i.e. all threads in the group,
53 * similar to exit_group. The group leader (only) reports
54 * WIFSIGNALED status to its parent.
55 * coredump - write a core dump file describing all threads using
56 * the same mm and then kill all those threads
57 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
58 *
59 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
60 * Other signals when not blocked and set to SIG_DFL behaves as follows.
61 * The job control signals also have other special effects.
62 *
63 * +--------------------+------------------+
64 * | POSIX signal | default action |
65 * +--------------------+------------------+
66 * | SIGHUP | terminate |
67 * | SIGINT | terminate |
68 * | SIGQUIT | coredump |
69 * | SIGILL | coredump |
70 * | SIGTRAP | coredump |
71 * | SIGABRT/SIGIOT | coredump |
72 * | SIGBUS | coredump |
73 * | SIGFPE | coredump |
74 * | SIGKILL | terminate(+) |
75 * | SIGUSR1 | terminate |
76 * | SIGSEGV | coredump |
77 * | SIGUSR2 | terminate |
78 * | SIGPIPE | terminate |
79 * | SIGALRM | terminate |
80 * | SIGTERM | terminate |
81 * | SIGCHLD | ignore |
82 * | SIGCONT | ignore(*) |
83 * | SIGSTOP | stop(*)(+) |
84 * | SIGTSTP | stop(*) |
85 * | SIGTTIN | stop(*) |
86 * | SIGTTOU | stop(*) |
87 * | SIGURG | ignore |
88 * | SIGXCPU | coredump |
89 * | SIGXFSZ | coredump |
90 * | SIGVTALRM | terminate |
91 * | SIGPROF | terminate |
92 * | SIGPOLL/SIGIO | terminate |
93 * | SIGSYS/SIGUNUSED | coredump |
94 * | SIGSTKFLT | terminate |
95 * | SIGWINCH | ignore |
96 * | SIGPWR | terminate |
97 * | SIGRTMIN-SIGRTMAX | terminate |
98 * +--------------------+------------------+
99 * | non-POSIX signal | default action |
100 * +--------------------+------------------+
101 * | SIGEMT | coredump |
102 * +--------------------+------------------+
103 *
104 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
105 * (*) Special job control effects:
106 * When SIGCONT is sent, it resumes the process (all threads in the group)
107 * from TASK_STOPPED state and also clears any pending/queued stop signals
108 * (any of those marked with "stop(*)"). This happens regardless of blocking,
109 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
110 * any pending/queued SIGCONT signals; this happens regardless of blocking,
111 * catching, or ignored the stop signal, though (except for SIGSTOP) the
112 * default action of stopping the process may happen later or never.
113 */
114
115#ifdef SIGEMT
116#define M_SIGEMT M(SIGEMT)
117#else
118#define M_SIGEMT 0
119#endif
120
121#if SIGRTMIN > BITS_PER_LONG
122#define M(sig) (1ULL << ((sig)-1))
123#else
124#define M(sig) (1UL << ((sig)-1))
125#endif
126#define T(sig, mask) (M(sig) & (mask))
127
128#define SIG_KERNEL_ONLY_MASK (\
129 M(SIGKILL) | M(SIGSTOP) )
130
131#define SIG_KERNEL_STOP_MASK (\
132 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
133
134#define SIG_KERNEL_COREDUMP_MASK (\
135 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
136 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
137 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
138
139#define SIG_KERNEL_IGNORE_MASK (\
140 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
141
142#define sig_kernel_only(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
144#define sig_kernel_coredump(sig) \
145 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
146#define sig_kernel_ignore(sig) \
147 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
148#define sig_kernel_stop(sig) \
149 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
150
151#define sig_needs_tasklist(sig) ((sig) == SIGCONT)
152
153#define sig_user_defined(t, signr) \
154 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
155 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
156
157#define sig_fatal(t, signr) \
158 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
159 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
160 41
161static int sig_ignored(struct task_struct *t, int sig) 42static int sig_ignored(struct task_struct *t, int sig)
162{ 43{
@@ -328,6 +209,16 @@ void flush_signals(struct task_struct *t)
328 spin_unlock_irqrestore(&t->sighand->siglock, flags); 209 spin_unlock_irqrestore(&t->sighand->siglock, flags);
329} 210}
330 211
212void ignore_signals(struct task_struct *t)
213{
214 int i;
215
216 for (i = 0; i < _NSIG; ++i)
217 t->sighand->action[i].sa.sa_handler = SIG_IGN;
218
219 flush_signals(t);
220}
221
331/* 222/*
332 * Flush all handlers for a task. 223 * Flush all handlers for a task.
333 */ 224 */
@@ -1032,17 +923,6 @@ void zap_other_threads(struct task_struct *p)
1032 if (t->exit_state) 923 if (t->exit_state)
1033 continue; 924 continue;
1034 925
1035 /*
1036 * We don't want to notify the parent, since we are
1037 * killed as part of a thread group due to another
1038 * thread doing an execve() or similar. So set the
1039 * exit signal to -1 to allow immediate reaping of
1040 * the process. But don't detach the thread group
1041 * leader.
1042 */
1043 if (t != p->group_leader)
1044 t->exit_signal = -1;
1045
1046 /* SIGKILL will be handled before any pending SIGSTOP */ 926 /* SIGKILL will be handled before any pending SIGSTOP */
1047 sigaddset(&t->pending.signal, SIGKILL); 927 sigaddset(&t->pending.signal, SIGKILL);
1048 signal_wake_up(t, 1); 928 signal_wake_up(t, 1);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 8b75008e2bd8..0b9886a00e74 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -593,6 +593,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
593 593
594 switch (action) { 594 switch (action) {
595 case CPU_UP_PREPARE: 595 case CPU_UP_PREPARE:
596 case CPU_UP_PREPARE_FROZEN:
596 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); 597 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
597 if (IS_ERR(p)) { 598 if (IS_ERR(p)) {
598 printk("ksoftirqd for %i failed\n", hotcpu); 599 printk("ksoftirqd for %i failed\n", hotcpu);
@@ -602,16 +603,19 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
602 per_cpu(ksoftirqd, hotcpu) = p; 603 per_cpu(ksoftirqd, hotcpu) = p;
603 break; 604 break;
604 case CPU_ONLINE: 605 case CPU_ONLINE:
606 case CPU_ONLINE_FROZEN:
605 wake_up_process(per_cpu(ksoftirqd, hotcpu)); 607 wake_up_process(per_cpu(ksoftirqd, hotcpu));
606 break; 608 break;
607#ifdef CONFIG_HOTPLUG_CPU 609#ifdef CONFIG_HOTPLUG_CPU
608 case CPU_UP_CANCELED: 610 case CPU_UP_CANCELED:
611 case CPU_UP_CANCELED_FROZEN:
609 if (!per_cpu(ksoftirqd, hotcpu)) 612 if (!per_cpu(ksoftirqd, hotcpu))
610 break; 613 break;
611 /* Unbind so it can run. Fall thru. */ 614 /* Unbind so it can run. Fall thru. */
612 kthread_bind(per_cpu(ksoftirqd, hotcpu), 615 kthread_bind(per_cpu(ksoftirqd, hotcpu),
613 any_online_cpu(cpu_online_map)); 616 any_online_cpu(cpu_online_map));
614 case CPU_DEAD: 617 case CPU_DEAD:
618 case CPU_DEAD_FROZEN:
615 p = per_cpu(ksoftirqd, hotcpu); 619 p = per_cpu(ksoftirqd, hotcpu);
616 per_cpu(ksoftirqd, hotcpu) = NULL; 620 per_cpu(ksoftirqd, hotcpu) = NULL;
617 kthread_stop(p); 621 kthread_stop(p);
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 8fa7040247ad..0131e296ffb4 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -146,6 +146,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
146 146
147 switch (action) { 147 switch (action) {
148 case CPU_UP_PREPARE: 148 case CPU_UP_PREPARE:
149 case CPU_UP_PREPARE_FROZEN:
149 BUG_ON(per_cpu(watchdog_task, hotcpu)); 150 BUG_ON(per_cpu(watchdog_task, hotcpu));
150 p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); 151 p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
151 if (IS_ERR(p)) { 152 if (IS_ERR(p)) {
@@ -157,16 +158,19 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
157 kthread_bind(p, hotcpu); 158 kthread_bind(p, hotcpu);
158 break; 159 break;
159 case CPU_ONLINE: 160 case CPU_ONLINE:
161 case CPU_ONLINE_FROZEN:
160 wake_up_process(per_cpu(watchdog_task, hotcpu)); 162 wake_up_process(per_cpu(watchdog_task, hotcpu));
161 break; 163 break;
162#ifdef CONFIG_HOTPLUG_CPU 164#ifdef CONFIG_HOTPLUG_CPU
163 case CPU_UP_CANCELED: 165 case CPU_UP_CANCELED:
166 case CPU_UP_CANCELED_FROZEN:
164 if (!per_cpu(watchdog_task, hotcpu)) 167 if (!per_cpu(watchdog_task, hotcpu))
165 break; 168 break;
166 /* Unbind so it can run. Fall thru. */ 169 /* Unbind so it can run. Fall thru. */
167 kthread_bind(per_cpu(watchdog_task, hotcpu), 170 kthread_bind(per_cpu(watchdog_task, hotcpu),
168 any_online_cpu(cpu_online_map)); 171 any_online_cpu(cpu_online_map));
169 case CPU_DEAD: 172 case CPU_DEAD:
173 case CPU_DEAD_FROZEN:
170 p = per_cpu(watchdog_task, hotcpu); 174 p = per_cpu(watchdog_task, hotcpu);
171 per_cpu(watchdog_task, hotcpu) = NULL; 175 per_cpu(watchdog_task, hotcpu) = NULL;
172 kthread_stop(p); 176 kthread_stop(p);
diff --git a/kernel/sys.c b/kernel/sys.c
index 926bf9d7ac45..cdb7e9457ba6 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -134,19 +134,39 @@ static int notifier_chain_unregister(struct notifier_block **nl,
134 return -ENOENT; 134 return -ENOENT;
135} 135}
136 136
137/**
138 * notifier_call_chain - Informs the registered notifiers about an event.
139 * @nl: Pointer to head of the blocking notifier chain
140 * @val: Value passed unmodified to notifier function
141 * @v: Pointer passed unmodified to notifier function
142 * @nr_to_call: Number of notifier functions to be called. Don't care
143 * value of this parameter is -1.
144 * @nr_calls: Records the number of notifications sent. Don't care
145 * value of this field is NULL.
146 * @returns: notifier_call_chain returns the value returned by the
147 * last notifier function called.
148 */
149
137static int __kprobes notifier_call_chain(struct notifier_block **nl, 150static int __kprobes notifier_call_chain(struct notifier_block **nl,
138 unsigned long val, void *v) 151 unsigned long val, void *v,
152 int nr_to_call, int *nr_calls)
139{ 153{
140 int ret = NOTIFY_DONE; 154 int ret = NOTIFY_DONE;
141 struct notifier_block *nb, *next_nb; 155 struct notifier_block *nb, *next_nb;
142 156
143 nb = rcu_dereference(*nl); 157 nb = rcu_dereference(*nl);
144 while (nb) { 158
159 while (nb && nr_to_call) {
145 next_nb = rcu_dereference(nb->next); 160 next_nb = rcu_dereference(nb->next);
146 ret = nb->notifier_call(nb, val, v); 161 ret = nb->notifier_call(nb, val, v);
162
163 if (nr_calls)
164 (*nr_calls)++;
165
147 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) 166 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
148 break; 167 break;
149 nb = next_nb; 168 nb = next_nb;
169 nr_to_call--;
150 } 170 }
151 return ret; 171 return ret;
152} 172}
@@ -205,10 +225,12 @@ int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
205EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); 225EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
206 226
207/** 227/**
208 * atomic_notifier_call_chain - Call functions in an atomic notifier chain 228 * __atomic_notifier_call_chain - Call functions in an atomic notifier chain
209 * @nh: Pointer to head of the atomic notifier chain 229 * @nh: Pointer to head of the atomic notifier chain
210 * @val: Value passed unmodified to notifier function 230 * @val: Value passed unmodified to notifier function
211 * @v: Pointer passed unmodified to notifier function 231 * @v: Pointer passed unmodified to notifier function
232 * @nr_to_call: See the comment for notifier_call_chain.
233 * @nr_calls: See the comment for notifier_call_chain.
212 * 234 *
213 * Calls each function in a notifier chain in turn. The functions 235 * Calls each function in a notifier chain in turn. The functions
214 * run in an atomic context, so they must not block. 236 * run in an atomic context, so they must not block.
@@ -222,19 +244,27 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
222 * of the last notifier function called. 244 * of the last notifier function called.
223 */ 245 */
224 246
225int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh, 247int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
226 unsigned long val, void *v) 248 unsigned long val, void *v,
249 int nr_to_call, int *nr_calls)
227{ 250{
228 int ret; 251 int ret;
229 252
230 rcu_read_lock(); 253 rcu_read_lock();
231 ret = notifier_call_chain(&nh->head, val, v); 254 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
232 rcu_read_unlock(); 255 rcu_read_unlock();
233 return ret; 256 return ret;
234} 257}
235 258
236EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); 259EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain);
260
261int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh,
262 unsigned long val, void *v)
263{
264 return __atomic_notifier_call_chain(nh, val, v, -1, NULL);
265}
237 266
267EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
238/* 268/*
239 * Blocking notifier chain routines. All access to the chain is 269 * Blocking notifier chain routines. All access to the chain is
240 * synchronized by an rwsem. 270 * synchronized by an rwsem.
@@ -304,10 +334,12 @@ int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
304EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); 334EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
305 335
306/** 336/**
307 * blocking_notifier_call_chain - Call functions in a blocking notifier chain 337 * __blocking_notifier_call_chain - Call functions in a blocking notifier chain
308 * @nh: Pointer to head of the blocking notifier chain 338 * @nh: Pointer to head of the blocking notifier chain
309 * @val: Value passed unmodified to notifier function 339 * @val: Value passed unmodified to notifier function
310 * @v: Pointer passed unmodified to notifier function 340 * @v: Pointer passed unmodified to notifier function
341 * @nr_to_call: See comment for notifier_call_chain.
342 * @nr_calls: See comment for notifier_call_chain.
311 * 343 *
312 * Calls each function in a notifier chain in turn. The functions 344 * Calls each function in a notifier chain in turn. The functions
313 * run in a process context, so they are allowed to block. 345 * run in a process context, so they are allowed to block.
@@ -320,8 +352,9 @@ EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
320 * of the last notifier function called. 352 * of the last notifier function called.
321 */ 353 */
322 354
323int blocking_notifier_call_chain(struct blocking_notifier_head *nh, 355int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
324 unsigned long val, void *v) 356 unsigned long val, void *v,
357 int nr_to_call, int *nr_calls)
325{ 358{
326 int ret = NOTIFY_DONE; 359 int ret = NOTIFY_DONE;
327 360
@@ -332,12 +365,19 @@ int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
332 */ 365 */
333 if (rcu_dereference(nh->head)) { 366 if (rcu_dereference(nh->head)) {
334 down_read(&nh->rwsem); 367 down_read(&nh->rwsem);
335 ret = notifier_call_chain(&nh->head, val, v); 368 ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
369 nr_calls);
336 up_read(&nh->rwsem); 370 up_read(&nh->rwsem);
337 } 371 }
338 return ret; 372 return ret;
339} 373}
374EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain);
340 375
376int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
377 unsigned long val, void *v)
378{
379 return __blocking_notifier_call_chain(nh, val, v, -1, NULL);
380}
341EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); 381EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
342 382
343/* 383/*
@@ -383,10 +423,12 @@ int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
383EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); 423EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
384 424
385/** 425/**
386 * raw_notifier_call_chain - Call functions in a raw notifier chain 426 * __raw_notifier_call_chain - Call functions in a raw notifier chain
387 * @nh: Pointer to head of the raw notifier chain 427 * @nh: Pointer to head of the raw notifier chain
388 * @val: Value passed unmodified to notifier function 428 * @val: Value passed unmodified to notifier function
389 * @v: Pointer passed unmodified to notifier function 429 * @v: Pointer passed unmodified to notifier function
430 * @nr_to_call: See comment for notifier_call_chain.
431 * @nr_calls: See comment for notifier_call_chain
390 * 432 *
391 * Calls each function in a notifier chain in turn. The functions 433 * Calls each function in a notifier chain in turn. The functions
392 * run in an undefined context. 434 * run in an undefined context.
@@ -400,10 +442,19 @@ EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
400 * of the last notifier function called. 442 * of the last notifier function called.
401 */ 443 */
402 444
445int __raw_notifier_call_chain(struct raw_notifier_head *nh,
446 unsigned long val, void *v,
447 int nr_to_call, int *nr_calls)
448{
449 return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
450}
451
452EXPORT_SYMBOL_GPL(__raw_notifier_call_chain);
453
403int raw_notifier_call_chain(struct raw_notifier_head *nh, 454int raw_notifier_call_chain(struct raw_notifier_head *nh,
404 unsigned long val, void *v) 455 unsigned long val, void *v)
405{ 456{
406 return notifier_call_chain(&nh->head, val, v); 457 return __raw_notifier_call_chain(nh, val, v, -1, NULL);
407} 458}
408 459
409EXPORT_SYMBOL_GPL(raw_notifier_call_chain); 460EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
@@ -478,10 +529,12 @@ int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
478EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); 529EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
479 530
480/** 531/**
481 * srcu_notifier_call_chain - Call functions in an SRCU notifier chain 532 * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain
482 * @nh: Pointer to head of the SRCU notifier chain 533 * @nh: Pointer to head of the SRCU notifier chain
483 * @val: Value passed unmodified to notifier function 534 * @val: Value passed unmodified to notifier function
484 * @v: Pointer passed unmodified to notifier function 535 * @v: Pointer passed unmodified to notifier function
536 * @nr_to_call: See comment for notifier_call_chain.
537 * @nr_calls: See comment for notifier_call_chain
485 * 538 *
486 * Calls each function in a notifier chain in turn. The functions 539 * Calls each function in a notifier chain in turn. The functions
487 * run in a process context, so they are allowed to block. 540 * run in a process context, so they are allowed to block.
@@ -494,18 +547,25 @@ EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
494 * of the last notifier function called. 547 * of the last notifier function called.
495 */ 548 */
496 549
497int srcu_notifier_call_chain(struct srcu_notifier_head *nh, 550int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
498 unsigned long val, void *v) 551 unsigned long val, void *v,
552 int nr_to_call, int *nr_calls)
499{ 553{
500 int ret; 554 int ret;
501 int idx; 555 int idx;
502 556
503 idx = srcu_read_lock(&nh->srcu); 557 idx = srcu_read_lock(&nh->srcu);
504 ret = notifier_call_chain(&nh->head, val, v); 558 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
505 srcu_read_unlock(&nh->srcu, idx); 559 srcu_read_unlock(&nh->srcu, idx);
506 return ret; 560 return ret;
507} 561}
562EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain);
508 563
564int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
565 unsigned long val, void *v)
566{
567 return __srcu_notifier_call_chain(nh, val, v, -1, NULL);
568}
509EXPORT_SYMBOL_GPL(srcu_notifier_call_chain); 569EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
510 570
511/** 571/**
@@ -881,7 +941,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
881#ifdef CONFIG_SOFTWARE_SUSPEND 941#ifdef CONFIG_SOFTWARE_SUSPEND
882 case LINUX_REBOOT_CMD_SW_SUSPEND: 942 case LINUX_REBOOT_CMD_SW_SUSPEND:
883 { 943 {
884 int ret = pm_suspend(PM_SUSPEND_DISK); 944 int ret = hibernate();
885 unlock_kernel(); 945 unlock_kernel();
886 return ret; 946 return ret;
887 } 947 }
@@ -1292,7 +1352,7 @@ asmlinkage long sys_setfsuid(uid_t uid)
1292} 1352}
1293 1353
1294/* 1354/*
1295 * Samma på svenska.. 1355 * Samma pÃ¥ svenska..
1296 */ 1356 */
1297asmlinkage long sys_setfsgid(gid_t gid) 1357asmlinkage long sys_setfsgid(gid_t gid)
1298{ 1358{
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f0664bd5011c..4073353abd4f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -77,6 +77,7 @@ extern int sysctl_drop_caches;
77extern int percpu_pagelist_fraction; 77extern int percpu_pagelist_fraction;
78extern int compat_log; 78extern int compat_log;
79extern int maps_protect; 79extern int maps_protect;
80extern int sysctl_stat_interval;
80 81
81/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ 82/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
82static int maxolduid = 65535; 83static int maxolduid = 65535;
@@ -857,6 +858,17 @@ static ctl_table vm_table[] = {
857 .extra2 = &one_hundred, 858 .extra2 = &one_hundred,
858 }, 859 },
859#endif 860#endif
861#ifdef CONFIG_SMP
862 {
863 .ctl_name = CTL_UNNUMBERED,
864 .procname = "stat_interval",
865 .data = &sysctl_stat_interval,
866 .maxlen = sizeof(sysctl_stat_interval),
867 .mode = 0644,
868 .proc_handler = &proc_dointvec_jiffies,
869 .strategy = &sysctl_jiffies,
870 },
871#endif
860#if defined(CONFIG_X86_32) || \ 872#if defined(CONFIG_X86_32) || \
861 (defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL)) 873 (defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL))
862 { 874 {
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index fe5c7db24247..3db5c3c460d7 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -74,15 +74,17 @@ static struct clocksource *watchdog;
74static struct timer_list watchdog_timer; 74static struct timer_list watchdog_timer;
75static DEFINE_SPINLOCK(watchdog_lock); 75static DEFINE_SPINLOCK(watchdog_lock);
76static cycle_t watchdog_last; 76static cycle_t watchdog_last;
77static int watchdog_resumed;
78
77/* 79/*
78 * Interval: 0.5sec Treshold: 0.0625s 80 * Interval: 0.5sec Threshold: 0.0625s
79 */ 81 */
80#define WATCHDOG_INTERVAL (HZ >> 1) 82#define WATCHDOG_INTERVAL (HZ >> 1)
81#define WATCHDOG_TRESHOLD (NSEC_PER_SEC >> 4) 83#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
82 84
83static void clocksource_ratewd(struct clocksource *cs, int64_t delta) 85static void clocksource_ratewd(struct clocksource *cs, int64_t delta)
84{ 86{
85 if (delta > -WATCHDOG_TRESHOLD && delta < WATCHDOG_TRESHOLD) 87 if (delta > -WATCHDOG_THRESHOLD && delta < WATCHDOG_THRESHOLD)
86 return; 88 return;
87 89
88 printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", 90 printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
@@ -98,15 +100,26 @@ static void clocksource_watchdog(unsigned long data)
98 struct clocksource *cs, *tmp; 100 struct clocksource *cs, *tmp;
99 cycle_t csnow, wdnow; 101 cycle_t csnow, wdnow;
100 int64_t wd_nsec, cs_nsec; 102 int64_t wd_nsec, cs_nsec;
103 int resumed;
101 104
102 spin_lock(&watchdog_lock); 105 spin_lock(&watchdog_lock);
103 106
107 resumed = watchdog_resumed;
108 if (unlikely(resumed))
109 watchdog_resumed = 0;
110
104 wdnow = watchdog->read(); 111 wdnow = watchdog->read();
105 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); 112 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
106 watchdog_last = wdnow; 113 watchdog_last = wdnow;
107 114
108 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 115 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
109 csnow = cs->read(); 116 csnow = cs->read();
117
118 if (unlikely(resumed)) {
119 cs->wd_last = csnow;
120 continue;
121 }
122
110 /* Initialized ? */ 123 /* Initialized ? */
111 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { 124 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
112 if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && 125 if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
@@ -136,6 +149,13 @@ static void clocksource_watchdog(unsigned long data)
136 } 149 }
137 spin_unlock(&watchdog_lock); 150 spin_unlock(&watchdog_lock);
138} 151}
152static void clocksource_resume_watchdog(void)
153{
154 spin_lock(&watchdog_lock);
155 watchdog_resumed = 1;
156 spin_unlock(&watchdog_lock);
157}
158
139static void clocksource_check_watchdog(struct clocksource *cs) 159static void clocksource_check_watchdog(struct clocksource *cs)
140{ 160{
141 struct clocksource *cse; 161 struct clocksource *cse;
@@ -182,9 +202,34 @@ static void clocksource_check_watchdog(struct clocksource *cs)
182 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 202 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
183 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; 203 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
184} 204}
205
206static inline void clocksource_resume_watchdog(void) { }
185#endif 207#endif
186 208
187/** 209/**
210 * clocksource_resume - resume the clocksource(s)
211 */
212void clocksource_resume(void)
213{
214 struct list_head *tmp;
215 unsigned long flags;
216
217 spin_lock_irqsave(&clocksource_lock, flags);
218
219 list_for_each(tmp, &clocksource_list) {
220 struct clocksource *cs;
221
222 cs = list_entry(tmp, struct clocksource, list);
223 if (cs->resume)
224 cs->resume();
225 }
226
227 clocksource_resume_watchdog();
228
229 spin_unlock_irqrestore(&clocksource_lock, flags);
230}
231
232/**
188 * clocksource_get_next - Returns the selected clocksource 233 * clocksource_get_next - Returns the selected clocksource
189 * 234 *
190 */ 235 */
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index b734ca4bc75e..8bbcfb77f7d2 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -65,7 +65,7 @@ print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now)
65 SEQ_printf(m, ", %s/%d", tmp, timer->start_pid); 65 SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
66#endif 66#endif
67 SEQ_printf(m, "\n"); 67 SEQ_printf(m, "\n");
68 SEQ_printf(m, " # expires at %Ld nsecs [in %Ld nsecs]\n", 68 SEQ_printf(m, " # expires at %Lu nsecs [in %Lu nsecs]\n",
69 (unsigned long long)ktime_to_ns(timer->expires), 69 (unsigned long long)ktime_to_ns(timer->expires),
70 (unsigned long long)(ktime_to_ns(timer->expires) - now)); 70 (unsigned long long)(ktime_to_ns(timer->expires) - now));
71} 71}
@@ -111,14 +111,14 @@ print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
111{ 111{
112 SEQ_printf(m, " .index: %d\n", 112 SEQ_printf(m, " .index: %d\n",
113 base->index); 113 base->index);
114 SEQ_printf(m, " .resolution: %Ld nsecs\n", 114 SEQ_printf(m, " .resolution: %Lu nsecs\n",
115 (unsigned long long)ktime_to_ns(base->resolution)); 115 (unsigned long long)ktime_to_ns(base->resolution));
116 SEQ_printf(m, " .get_time: "); 116 SEQ_printf(m, " .get_time: ");
117 print_name_offset(m, base->get_time); 117 print_name_offset(m, base->get_time);
118 SEQ_printf(m, "\n"); 118 SEQ_printf(m, "\n");
119#ifdef CONFIG_HIGH_RES_TIMERS 119#ifdef CONFIG_HIGH_RES_TIMERS
120 SEQ_printf(m, " .offset: %Ld nsecs\n", 120 SEQ_printf(m, " .offset: %Lu nsecs\n",
121 ktime_to_ns(base->offset)); 121 (unsigned long long) ktime_to_ns(base->offset));
122#endif 122#endif
123 SEQ_printf(m, "active timers:\n"); 123 SEQ_printf(m, "active timers:\n");
124 print_active_timers(m, base, now); 124 print_active_timers(m, base, now);
@@ -135,10 +135,11 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
135 print_base(m, cpu_base->clock_base + i, now); 135 print_base(m, cpu_base->clock_base + i, now);
136 } 136 }
137#define P(x) \ 137#define P(x) \
138 SEQ_printf(m, " .%-15s: %Ld\n", #x, (u64)(cpu_base->x)) 138 SEQ_printf(m, " .%-15s: %Lu\n", #x, \
139 (unsigned long long)(cpu_base->x))
139#define P_ns(x) \ 140#define P_ns(x) \
140 SEQ_printf(m, " .%-15s: %Ld nsecs\n", #x, \ 141 SEQ_printf(m, " .%-15s: %Lu nsecs\n", #x, \
141 (u64)(ktime_to_ns(cpu_base->x))) 142 (unsigned long long)(ktime_to_ns(cpu_base->x)))
142 143
143#ifdef CONFIG_HIGH_RES_TIMERS 144#ifdef CONFIG_HIGH_RES_TIMERS
144 P_ns(expires_next); 145 P_ns(expires_next);
@@ -150,10 +151,11 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
150 151
151#ifdef CONFIG_TICK_ONESHOT 152#ifdef CONFIG_TICK_ONESHOT
152# define P(x) \ 153# define P(x) \
153 SEQ_printf(m, " .%-15s: %Ld\n", #x, (u64)(ts->x)) 154 SEQ_printf(m, " .%-15s: %Lu\n", #x, \
155 (unsigned long long)(ts->x))
154# define P_ns(x) \ 156# define P_ns(x) \
155 SEQ_printf(m, " .%-15s: %Ld nsecs\n", #x, \ 157 SEQ_printf(m, " .%-15s: %Lu nsecs\n", #x, \
156 (u64)(ktime_to_ns(ts->x))) 158 (unsigned long long)(ktime_to_ns(ts->x)))
157 { 159 {
158 struct tick_sched *ts = tick_get_tick_sched(cpu); 160 struct tick_sched *ts = tick_get_tick_sched(cpu);
159 P(nohz_mode); 161 P(nohz_mode);
@@ -167,7 +169,8 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
167 P(last_jiffies); 169 P(last_jiffies);
168 P(next_jiffies); 170 P(next_jiffies);
169 P_ns(idle_expires); 171 P_ns(idle_expires);
170 SEQ_printf(m, "jiffies: %Ld\n", (u64)jiffies); 172 SEQ_printf(m, "jiffies: %Lu\n",
173 (unsigned long long)jiffies);
171 } 174 }
172#endif 175#endif
173 176
diff --git a/kernel/timer.c b/kernel/timer.c
index 7a6448340f90..59a28b1752f8 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -92,24 +92,24 @@ static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
92/* Functions below help us manage 'deferrable' flag */ 92/* Functions below help us manage 'deferrable' flag */
93static inline unsigned int tbase_get_deferrable(tvec_base_t *base) 93static inline unsigned int tbase_get_deferrable(tvec_base_t *base)
94{ 94{
95 return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG); 95 return (unsigned int)((unsigned long)base & TBASE_DEFERRABLE_FLAG);
96} 96}
97 97
98static inline tvec_base_t *tbase_get_base(tvec_base_t *base) 98static inline tvec_base_t *tbase_get_base(tvec_base_t *base)
99{ 99{
100 return ((tvec_base_t *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG)); 100 return (tvec_base_t *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG);
101} 101}
102 102
103static inline void timer_set_deferrable(struct timer_list *timer) 103static inline void timer_set_deferrable(struct timer_list *timer)
104{ 104{
105 timer->base = ((tvec_base_t *)((unsigned long)(timer->base) | 105 timer->base = (tvec_base_t *)((unsigned long)timer->base |
106 TBASE_DEFERRABLE_FLAG)); 106 TBASE_DEFERRABLE_FLAG);
107} 107}
108 108
109static inline void 109static inline void
110timer_set_base(struct timer_list *timer, tvec_base_t *new_base) 110timer_set_base(struct timer_list *timer, tvec_base_t *new_base)
111{ 111{
112 timer->base = (tvec_base_t *)((unsigned long)(new_base) | 112 timer->base = (tvec_base_t *)((unsigned long)new_base |
113 tbase_get_deferrable(timer->base)); 113 tbase_get_deferrable(timer->base));
114} 114}
115 115
@@ -1293,11 +1293,13 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1293 long cpu = (long)hcpu; 1293 long cpu = (long)hcpu;
1294 switch(action) { 1294 switch(action) {
1295 case CPU_UP_PREPARE: 1295 case CPU_UP_PREPARE:
1296 case CPU_UP_PREPARE_FROZEN:
1296 if (init_timers_cpu(cpu) < 0) 1297 if (init_timers_cpu(cpu) < 0)
1297 return NOTIFY_BAD; 1298 return NOTIFY_BAD;
1298 break; 1299 break;
1299#ifdef CONFIG_HOTPLUG_CPU 1300#ifdef CONFIG_HOTPLUG_CPU
1300 case CPU_DEAD: 1301 case CPU_DEAD:
1302 case CPU_DEAD_FROZEN:
1301 migrate_timers(cpu); 1303 migrate_timers(cpu);
1302 break; 1304 break;
1303#endif 1305#endif
@@ -1497,6 +1499,8 @@ unregister_time_interpolator(struct time_interpolator *ti)
1497 prev = &curr->next; 1499 prev = &curr->next;
1498 } 1500 }
1499 1501
1502 clocksource_resume();
1503
1500 write_seqlock_irqsave(&xtime_lock, flags); 1504 write_seqlock_irqsave(&xtime_lock, flags);
1501 if (ti == time_interpolator) { 1505 if (ti == time_interpolator) {
1502 /* we lost the best time-interpolator: */ 1506 /* we lost the best time-interpolator: */
diff --git a/kernel/wait.c b/kernel/wait.c
index 59a82f63275d..444ddbfaefc4 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -61,7 +61,7 @@ EXPORT_SYMBOL(remove_wait_queue);
61 * The spin_unlock() itself is semi-permeable and only protects 61 * The spin_unlock() itself is semi-permeable and only protects
62 * one way (it only protects stuff inside the critical region and 62 * one way (it only protects stuff inside the critical region and
63 * stops them from bleeding out - it would still allow subsequent 63 * stops them from bleeding out - it would still allow subsequent
64 * loads to move into the the critical region). 64 * loads to move into the critical region).
65 */ 65 */
66void fastcall 66void fastcall
67prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) 67prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b6fa5e63085d..fb56fedd5c02 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -36,30 +36,20 @@
36/* 36/*
37 * The per-CPU workqueue (if single thread, we always use the first 37 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu). 38 * possible cpu).
39 *
40 * The sequence counters are for flush_scheduled_work(). It wants to wait
41 * until all currently-scheduled works are completed, but it doesn't
42 * want to be livelocked by new, incoming ones. So it waits until
43 * remove_sequence is >= the insert_sequence which pertained when
44 * flush_scheduled_work() was called.
45 */ 39 */
46struct cpu_workqueue_struct { 40struct cpu_workqueue_struct {
47 41
48 spinlock_t lock; 42 spinlock_t lock;
49 43
50 long remove_sequence; /* Least-recently added (next to run) */
51 long insert_sequence; /* Next to add */
52
53 struct list_head worklist; 44 struct list_head worklist;
54 wait_queue_head_t more_work; 45 wait_queue_head_t more_work;
55 wait_queue_head_t work_done; 46 struct work_struct *current_work;
56 47
57 struct workqueue_struct *wq; 48 struct workqueue_struct *wq;
58 struct task_struct *thread; 49 struct task_struct *thread;
50 int should_stop;
59 51
60 int run_depth; /* Detect run_workqueue() recursion depth */ 52 int run_depth; /* Detect run_workqueue() recursion depth */
61
62 int freezeable; /* Freeze the thread during suspend */
63} ____cacheline_aligned; 53} ____cacheline_aligned;
64 54
65/* 55/*
@@ -68,8 +58,10 @@ struct cpu_workqueue_struct {
68 */ 58 */
69struct workqueue_struct { 59struct workqueue_struct {
70 struct cpu_workqueue_struct *cpu_wq; 60 struct cpu_workqueue_struct *cpu_wq;
61 struct list_head list;
71 const char *name; 62 const char *name;
72 struct list_head list; /* Empty if single thread */ 63 int singlethread;
64 int freezeable; /* Freeze threads during suspend */
73}; 65};
74 66
75/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 67/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
@@ -77,106 +69,68 @@ struct workqueue_struct {
77static DEFINE_MUTEX(workqueue_mutex); 69static DEFINE_MUTEX(workqueue_mutex);
78static LIST_HEAD(workqueues); 70static LIST_HEAD(workqueues);
79 71
80static int singlethread_cpu; 72static int singlethread_cpu __read_mostly;
73static cpumask_t cpu_singlethread_map __read_mostly;
74/* optimization, we could use cpu_possible_map */
75static cpumask_t cpu_populated_map __read_mostly;
81 76
82/* If it's single threaded, it isn't in the list of workqueues. */ 77/* If it's single threaded, it isn't in the list of workqueues. */
83static inline int is_single_threaded(struct workqueue_struct *wq) 78static inline int is_single_threaded(struct workqueue_struct *wq)
84{ 79{
85 return list_empty(&wq->list); 80 return wq->singlethread;
81}
82
83static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
84{
85 return is_single_threaded(wq)
86 ? &cpu_singlethread_map : &cpu_populated_map;
87}
88
89static
90struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
91{
92 if (unlikely(is_single_threaded(wq)))
93 cpu = singlethread_cpu;
94 return per_cpu_ptr(wq->cpu_wq, cpu);
86} 95}
87 96
88/* 97/*
89 * Set the workqueue on which a work item is to be run 98 * Set the workqueue on which a work item is to be run
90 * - Must *only* be called if the pending flag is set 99 * - Must *only* be called if the pending flag is set
91 */ 100 */
92static inline void set_wq_data(struct work_struct *work, void *wq) 101static inline void set_wq_data(struct work_struct *work,
102 struct cpu_workqueue_struct *cwq)
93{ 103{
94 unsigned long new; 104 unsigned long new;
95 105
96 BUG_ON(!work_pending(work)); 106 BUG_ON(!work_pending(work));
97 107
98 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); 108 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
99 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); 109 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
100 atomic_long_set(&work->data, new); 110 atomic_long_set(&work->data, new);
101} 111}
102 112
103static inline void *get_wq_data(struct work_struct *work) 113static inline
114struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
104{ 115{
105 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); 116 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
106} 117}
107 118
108static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) 119static void insert_work(struct cpu_workqueue_struct *cwq,
120 struct work_struct *work, int tail)
109{ 121{
110 int ret = 0; 122 set_wq_data(work, cwq);
111 unsigned long flags;
112
113 spin_lock_irqsave(&cwq->lock, flags);
114 /* 123 /*
115 * We need to re-validate the work info after we've gotten 124 * Ensure that we get the right work->data if we see the
116 * the cpu_workqueue lock. We can run the work now iff: 125 * result of list_add() below, see try_to_grab_pending().
117 *
118 * - the wq_data still matches the cpu_workqueue_struct
119 * - AND the work is still marked pending
120 * - AND the work is still on a list (which will be this
121 * workqueue_struct list)
122 *
123 * All these conditions are important, because we
124 * need to protect against the work being run right
125 * now on another CPU (all but the last one might be
126 * true if it's currently running and has not been
127 * released yet, for example).
128 */ 126 */
129 if (get_wq_data(work) == cwq 127 smp_wmb();
130 && work_pending(work) 128 if (tail)
131 && !list_empty(&work->entry)) { 129 list_add_tail(&work->entry, &cwq->worklist);
132 work_func_t f = work->func; 130 else
133 list_del_init(&work->entry); 131 list_add(&work->entry, &cwq->worklist);
134 spin_unlock_irqrestore(&cwq->lock, flags); 132 wake_up(&cwq->more_work);
135
136 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
137 work_release(work);
138 f(work);
139
140 spin_lock_irqsave(&cwq->lock, flags);
141 cwq->remove_sequence++;
142 wake_up(&cwq->work_done);
143 ret = 1;
144 }
145 spin_unlock_irqrestore(&cwq->lock, flags);
146 return ret;
147}
148
149/**
150 * run_scheduled_work - run scheduled work synchronously
151 * @work: work to run
152 *
153 * This checks if the work was pending, and runs it
154 * synchronously if so. It returns a boolean to indicate
155 * whether it had any scheduled work to run or not.
156 *
157 * NOTE! This _only_ works for normal work_structs. You
158 * CANNOT use this for delayed work, because the wq data
159 * for delayed work will not point properly to the per-
160 * CPU workqueue struct, but will change!
161 */
162int fastcall run_scheduled_work(struct work_struct *work)
163{
164 for (;;) {
165 struct cpu_workqueue_struct *cwq;
166
167 if (!work_pending(work))
168 return 0;
169 if (list_empty(&work->entry))
170 return 0;
171 /* NOTE! This depends intimately on __queue_work! */
172 cwq = get_wq_data(work);
173 if (!cwq)
174 return 0;
175 if (__run_work(cwq, work))
176 return 1;
177 }
178} 133}
179EXPORT_SYMBOL(run_scheduled_work);
180 134
181/* Preempt must be disabled. */ 135/* Preempt must be disabled. */
182static void __queue_work(struct cpu_workqueue_struct *cwq, 136static void __queue_work(struct cpu_workqueue_struct *cwq,
@@ -185,10 +139,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
185 unsigned long flags; 139 unsigned long flags;
186 140
187 spin_lock_irqsave(&cwq->lock, flags); 141 spin_lock_irqsave(&cwq->lock, flags);
188 set_wq_data(work, cwq); 142 insert_work(cwq, work, 1);
189 list_add_tail(&work->entry, &cwq->worklist);
190 cwq->insert_sequence++;
191 wake_up(&cwq->more_work);
192 spin_unlock_irqrestore(&cwq->lock, flags); 143 spin_unlock_irqrestore(&cwq->lock, flags);
193} 144}
194 145
@@ -204,16 +155,14 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
204 */ 155 */
205int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) 156int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
206{ 157{
207 int ret = 0, cpu = get_cpu(); 158 int ret = 0;
208 159
209 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 160 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
210 if (unlikely(is_single_threaded(wq)))
211 cpu = singlethread_cpu;
212 BUG_ON(!list_empty(&work->entry)); 161 BUG_ON(!list_empty(&work->entry));
213 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 162 __queue_work(wq_per_cpu(wq, get_cpu()), work);
163 put_cpu();
214 ret = 1; 164 ret = 1;
215 } 165 }
216 put_cpu();
217 return ret; 166 return ret;
218} 167}
219EXPORT_SYMBOL_GPL(queue_work); 168EXPORT_SYMBOL_GPL(queue_work);
@@ -221,13 +170,10 @@ EXPORT_SYMBOL_GPL(queue_work);
221void delayed_work_timer_fn(unsigned long __data) 170void delayed_work_timer_fn(unsigned long __data)
222{ 171{
223 struct delayed_work *dwork = (struct delayed_work *)__data; 172 struct delayed_work *dwork = (struct delayed_work *)__data;
224 struct workqueue_struct *wq = get_wq_data(&dwork->work); 173 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
225 int cpu = smp_processor_id(); 174 struct workqueue_struct *wq = cwq->wq;
226 175
227 if (unlikely(is_single_threaded(wq))) 176 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
228 cpu = singlethread_cpu;
229
230 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
231} 177}
232 178
233/** 179/**
@@ -241,27 +187,11 @@ void delayed_work_timer_fn(unsigned long __data)
241int fastcall queue_delayed_work(struct workqueue_struct *wq, 187int fastcall queue_delayed_work(struct workqueue_struct *wq,
242 struct delayed_work *dwork, unsigned long delay) 188 struct delayed_work *dwork, unsigned long delay)
243{ 189{
244 int ret = 0; 190 timer_stats_timer_set_start_info(&dwork->timer);
245 struct timer_list *timer = &dwork->timer;
246 struct work_struct *work = &dwork->work;
247
248 timer_stats_timer_set_start_info(timer);
249 if (delay == 0) 191 if (delay == 0)
250 return queue_work(wq, work); 192 return queue_work(wq, &dwork->work);
251
252 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
253 BUG_ON(timer_pending(timer));
254 BUG_ON(!list_empty(&work->entry));
255 193
256 /* This stores wq for the moment, for the timer_fn */ 194 return queue_delayed_work_on(-1, wq, dwork, delay);
257 set_wq_data(work, wq);
258 timer->expires = jiffies + delay;
259 timer->data = (unsigned long)dwork;
260 timer->function = delayed_work_timer_fn;
261 add_timer(timer);
262 ret = 1;
263 }
264 return ret;
265} 195}
266EXPORT_SYMBOL_GPL(queue_delayed_work); 196EXPORT_SYMBOL_GPL(queue_delayed_work);
267 197
@@ -285,12 +215,16 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
285 BUG_ON(timer_pending(timer)); 215 BUG_ON(timer_pending(timer));
286 BUG_ON(!list_empty(&work->entry)); 216 BUG_ON(!list_empty(&work->entry));
287 217
288 /* This stores wq for the moment, for the timer_fn */ 218 /* This stores cwq for the moment, for the timer_fn */
289 set_wq_data(work, wq); 219 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
290 timer->expires = jiffies + delay; 220 timer->expires = jiffies + delay;
291 timer->data = (unsigned long)dwork; 221 timer->data = (unsigned long)dwork;
292 timer->function = delayed_work_timer_fn; 222 timer->function = delayed_work_timer_fn;
293 add_timer_on(timer, cpu); 223
224 if (unlikely(cpu >= 0))
225 add_timer_on(timer, cpu);
226 else
227 add_timer(timer);
294 ret = 1; 228 ret = 1;
295 } 229 }
296 return ret; 230 return ret;
@@ -299,13 +233,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on);
299 233
300static void run_workqueue(struct cpu_workqueue_struct *cwq) 234static void run_workqueue(struct cpu_workqueue_struct *cwq)
301{ 235{
302 unsigned long flags; 236 spin_lock_irq(&cwq->lock);
303
304 /*
305 * Keep taking off work from the queue until
306 * done.
307 */
308 spin_lock_irqsave(&cwq->lock, flags);
309 cwq->run_depth++; 237 cwq->run_depth++;
310 if (cwq->run_depth > 3) { 238 if (cwq->run_depth > 3) {
311 /* morton gets to eat his hat */ 239 /* morton gets to eat his hat */
@@ -318,12 +246,12 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
318 struct work_struct, entry); 246 struct work_struct, entry);
319 work_func_t f = work->func; 247 work_func_t f = work->func;
320 248
249 cwq->current_work = work;
321 list_del_init(cwq->worklist.next); 250 list_del_init(cwq->worklist.next);
322 spin_unlock_irqrestore(&cwq->lock, flags); 251 spin_unlock_irq(&cwq->lock);
323 252
324 BUG_ON(get_wq_data(work) != cwq); 253 BUG_ON(get_wq_data(work) != cwq);
325 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) 254 work_clear_pending(work);
326 work_release(work);
327 f(work); 255 f(work);
328 256
329 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 257 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
@@ -337,63 +265,81 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
337 dump_stack(); 265 dump_stack();
338 } 266 }
339 267
340 spin_lock_irqsave(&cwq->lock, flags); 268 spin_lock_irq(&cwq->lock);
341 cwq->remove_sequence++; 269 cwq->current_work = NULL;
342 wake_up(&cwq->work_done);
343 } 270 }
344 cwq->run_depth--; 271 cwq->run_depth--;
345 spin_unlock_irqrestore(&cwq->lock, flags); 272 spin_unlock_irq(&cwq->lock);
273}
274
275/*
276 * NOTE: the caller must not touch *cwq if this func returns true
277 */
278static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
279{
280 int should_stop = cwq->should_stop;
281
282 if (unlikely(should_stop)) {
283 spin_lock_irq(&cwq->lock);
284 should_stop = cwq->should_stop && list_empty(&cwq->worklist);
285 if (should_stop)
286 cwq->thread = NULL;
287 spin_unlock_irq(&cwq->lock);
288 }
289
290 return should_stop;
346} 291}
347 292
348static int worker_thread(void *__cwq) 293static int worker_thread(void *__cwq)
349{ 294{
350 struct cpu_workqueue_struct *cwq = __cwq; 295 struct cpu_workqueue_struct *cwq = __cwq;
351 DECLARE_WAITQUEUE(wait, current); 296 DEFINE_WAIT(wait);
352 struct k_sigaction sa;
353 sigset_t blocked;
354 297
355 if (!cwq->freezeable) 298 if (!cwq->wq->freezeable)
356 current->flags |= PF_NOFREEZE; 299 current->flags |= PF_NOFREEZE;
357 300
358 set_user_nice(current, -5); 301 set_user_nice(current, -5);
359 302
360 /* Block and flush all signals */ 303 for (;;) {
361 sigfillset(&blocked); 304 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
362 sigprocmask(SIG_BLOCK, &blocked, NULL); 305 if (!freezing(current) && !cwq->should_stop
363 flush_signals(current); 306 && list_empty(&cwq->worklist))
364 307 schedule();
365 /* 308 finish_wait(&cwq->more_work, &wait);
366 * We inherited MPOL_INTERLEAVE from the booting kernel.
367 * Set MPOL_DEFAULT to insure node local allocations.
368 */
369 numa_default_policy();
370
371 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
372 sa.sa.sa_handler = SIG_IGN;
373 sa.sa.sa_flags = 0;
374 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
375 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
376 309
377 set_current_state(TASK_INTERRUPTIBLE); 310 try_to_freeze();
378 while (!kthread_should_stop()) {
379 if (cwq->freezeable)
380 try_to_freeze();
381 311
382 add_wait_queue(&cwq->more_work, &wait); 312 if (cwq_should_stop(cwq))
383 if (list_empty(&cwq->worklist)) 313 break;
384 schedule();
385 else
386 __set_current_state(TASK_RUNNING);
387 remove_wait_queue(&cwq->more_work, &wait);
388 314
389 if (!list_empty(&cwq->worklist)) 315 run_workqueue(cwq);
390 run_workqueue(cwq);
391 set_current_state(TASK_INTERRUPTIBLE);
392 } 316 }
393 __set_current_state(TASK_RUNNING); 317
394 return 0; 318 return 0;
395} 319}
396 320
321struct wq_barrier {
322 struct work_struct work;
323 struct completion done;
324};
325
326static void wq_barrier_func(struct work_struct *work)
327{
328 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
329 complete(&barr->done);
330}
331
332static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
333 struct wq_barrier *barr, int tail)
334{
335 INIT_WORK(&barr->work, wq_barrier_func);
336 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
337
338 init_completion(&barr->done);
339
340 insert_work(cwq, &barr->work, tail);
341}
342
397static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 343static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
398{ 344{
399 if (cwq->thread == current) { 345 if (cwq->thread == current) {
@@ -403,21 +349,18 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
403 */ 349 */
404 run_workqueue(cwq); 350 run_workqueue(cwq);
405 } else { 351 } else {
406 DEFINE_WAIT(wait); 352 struct wq_barrier barr;
407 long sequence_needed; 353 int active = 0;
408 354
409 spin_lock_irq(&cwq->lock); 355 spin_lock_irq(&cwq->lock);
410 sequence_needed = cwq->insert_sequence; 356 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
411 357 insert_wq_barrier(cwq, &barr, 1);
412 while (sequence_needed - cwq->remove_sequence > 0) { 358 active = 1;
413 prepare_to_wait(&cwq->work_done, &wait,
414 TASK_UNINTERRUPTIBLE);
415 spin_unlock_irq(&cwq->lock);
416 schedule();
417 spin_lock_irq(&cwq->lock);
418 } 359 }
419 finish_wait(&cwq->work_done, &wait);
420 spin_unlock_irq(&cwq->lock); 360 spin_unlock_irq(&cwq->lock);
361
362 if (active)
363 wait_for_completion(&barr.done);
421 } 364 }
422} 365}
423 366
@@ -428,151 +371,145 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
428 * Forces execution of the workqueue and blocks until its completion. 371 * Forces execution of the workqueue and blocks until its completion.
429 * This is typically used in driver shutdown handlers. 372 * This is typically used in driver shutdown handlers.
430 * 373 *
431 * This function will sample each workqueue's current insert_sequence number and 374 * We sleep until all works which were queued on entry have been handled,
432 * will sleep until the head sequence is greater than or equal to that. This 375 * but we are not livelocked by new incoming ones.
433 * means that we sleep until all works which were queued on entry have been
434 * handled, but we are not livelocked by new incoming ones.
435 * 376 *
436 * This function used to run the workqueues itself. Now we just wait for the 377 * This function used to run the workqueues itself. Now we just wait for the
437 * helper threads to do it. 378 * helper threads to do it.
438 */ 379 */
439void fastcall flush_workqueue(struct workqueue_struct *wq) 380void fastcall flush_workqueue(struct workqueue_struct *wq)
440{ 381{
382 const cpumask_t *cpu_map = wq_cpu_map(wq);
383 int cpu;
384
441 might_sleep(); 385 might_sleep();
386 for_each_cpu_mask(cpu, *cpu_map)
387 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
388}
389EXPORT_SYMBOL_GPL(flush_workqueue);
442 390
443 if (is_single_threaded(wq)) { 391/*
444 /* Always use first cpu's area. */ 392 * Upon a successful return, the caller "owns" WORK_STRUCT_PENDING bit,
445 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); 393 * so this work can't be re-armed in any way.
446 } else { 394 */
447 int cpu; 395static int try_to_grab_pending(struct work_struct *work)
396{
397 struct cpu_workqueue_struct *cwq;
398 int ret = 0;
448 399
449 mutex_lock(&workqueue_mutex); 400 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
450 for_each_online_cpu(cpu) 401 return 1;
451 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 402
452 mutex_unlock(&workqueue_mutex); 403 /*
404 * The queueing is in progress, or it is already queued. Try to
405 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
406 */
407
408 cwq = get_wq_data(work);
409 if (!cwq)
410 return ret;
411
412 spin_lock_irq(&cwq->lock);
413 if (!list_empty(&work->entry)) {
414 /*
415 * This work is queued, but perhaps we locked the wrong cwq.
416 * In that case we must see the new value after rmb(), see
417 * insert_work()->wmb().
418 */
419 smp_rmb();
420 if (cwq == get_wq_data(work)) {
421 list_del_init(&work->entry);
422 ret = 1;
423 }
453 } 424 }
425 spin_unlock_irq(&cwq->lock);
426
427 return ret;
454} 428}
455EXPORT_SYMBOL_GPL(flush_workqueue);
456 429
457static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 430static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
458 int cpu, int freezeable) 431 struct work_struct *work)
459{ 432{
460 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 433 struct wq_barrier barr;
461 struct task_struct *p; 434 int running = 0;
462 435
463 spin_lock_init(&cwq->lock); 436 spin_lock_irq(&cwq->lock);
464 cwq->wq = wq; 437 if (unlikely(cwq->current_work == work)) {
465 cwq->thread = NULL; 438 insert_wq_barrier(cwq, &barr, 0);
466 cwq->insert_sequence = 0; 439 running = 1;
467 cwq->remove_sequence = 0; 440 }
468 cwq->freezeable = freezeable; 441 spin_unlock_irq(&cwq->lock);
469 INIT_LIST_HEAD(&cwq->worklist);
470 init_waitqueue_head(&cwq->more_work);
471 init_waitqueue_head(&cwq->work_done);
472 442
473 if (is_single_threaded(wq)) 443 if (unlikely(running))
474 p = kthread_create(worker_thread, cwq, "%s", wq->name); 444 wait_for_completion(&barr.done);
475 else
476 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
477 if (IS_ERR(p))
478 return NULL;
479 cwq->thread = p;
480 return p;
481} 445}
482 446
483struct workqueue_struct *__create_workqueue(const char *name, 447static void wait_on_work(struct work_struct *work)
484 int singlethread, int freezeable)
485{ 448{
486 int cpu, destroy = 0; 449 struct cpu_workqueue_struct *cwq;
487 struct workqueue_struct *wq; 450 struct workqueue_struct *wq;
488 struct task_struct *p; 451 const cpumask_t *cpu_map;
452 int cpu;
489 453
490 wq = kzalloc(sizeof(*wq), GFP_KERNEL); 454 might_sleep();
491 if (!wq)
492 return NULL;
493 455
494 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); 456 cwq = get_wq_data(work);
495 if (!wq->cpu_wq) { 457 if (!cwq)
496 kfree(wq); 458 return;
497 return NULL;
498 }
499 459
500 wq->name = name; 460 wq = cwq->wq;
501 mutex_lock(&workqueue_mutex); 461 cpu_map = wq_cpu_map(wq);
502 if (singlethread) {
503 INIT_LIST_HEAD(&wq->list);
504 p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
505 if (!p)
506 destroy = 1;
507 else
508 wake_up_process(p);
509 } else {
510 list_add(&wq->list, &workqueues);
511 for_each_online_cpu(cpu) {
512 p = create_workqueue_thread(wq, cpu, freezeable);
513 if (p) {
514 kthread_bind(p, cpu);
515 wake_up_process(p);
516 } else
517 destroy = 1;
518 }
519 }
520 mutex_unlock(&workqueue_mutex);
521 462
522 /* 463 for_each_cpu_mask(cpu, *cpu_map)
523 * Was there any error during startup? If yes then clean up: 464 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
524 */
525 if (destroy) {
526 destroy_workqueue(wq);
527 wq = NULL;
528 }
529 return wq;
530} 465}
531EXPORT_SYMBOL_GPL(__create_workqueue);
532 466
533static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) 467/**
468 * cancel_work_sync - block until a work_struct's callback has terminated
469 * @work: the work which is to be flushed
470 *
471 * cancel_work_sync() will cancel the work if it is queued. If the work's
472 * callback appears to be running, cancel_work_sync() will block until it
473 * has completed.
474 *
475 * It is possible to use this function if the work re-queues itself. It can
476 * cancel the work even if it migrates to another workqueue, however in that
477 * case it only guarantees that work->func() has completed on the last queued
478 * workqueue.
479 *
480 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
481 * pending, otherwise it goes into a busy-wait loop until the timer expires.
482 *
483 * The caller must ensure that workqueue_struct on which this work was last
484 * queued can't be destroyed before this function returns.
485 */
486void cancel_work_sync(struct work_struct *work)
534{ 487{
535 struct cpu_workqueue_struct *cwq; 488 while (!try_to_grab_pending(work))
536 unsigned long flags; 489 cpu_relax();
537 struct task_struct *p; 490 wait_on_work(work);
538 491 work_clear_pending(work);
539 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
540 spin_lock_irqsave(&cwq->lock, flags);
541 p = cwq->thread;
542 cwq->thread = NULL;
543 spin_unlock_irqrestore(&cwq->lock, flags);
544 if (p)
545 kthread_stop(p);
546} 492}
493EXPORT_SYMBOL_GPL(cancel_work_sync);
547 494
548/** 495/**
549 * destroy_workqueue - safely terminate a workqueue 496 * cancel_rearming_delayed_work - reliably kill off a delayed work.
550 * @wq: target workqueue 497 * @dwork: the delayed work struct
551 * 498 *
552 * Safely destroy a workqueue. All work currently pending will be done first. 499 * It is possible to use this function if @dwork rearms itself via queue_work()
500 * or queue_delayed_work(). See also the comment for cancel_work_sync().
553 */ 501 */
554void destroy_workqueue(struct workqueue_struct *wq) 502void cancel_rearming_delayed_work(struct delayed_work *dwork)
555{ 503{
556 int cpu; 504 while (!del_timer(&dwork->timer) &&
557 505 !try_to_grab_pending(&dwork->work))
558 flush_workqueue(wq); 506 cpu_relax();
559 507 wait_on_work(&dwork->work);
560 /* We don't need the distraction of CPUs appearing and vanishing. */ 508 work_clear_pending(&dwork->work);
561 mutex_lock(&workqueue_mutex);
562 if (is_single_threaded(wq))
563 cleanup_workqueue_thread(wq, singlethread_cpu);
564 else {
565 for_each_online_cpu(cpu)
566 cleanup_workqueue_thread(wq, cpu);
567 list_del(&wq->list);
568 }
569 mutex_unlock(&workqueue_mutex);
570 free_percpu(wq->cpu_wq);
571 kfree(wq);
572} 509}
573EXPORT_SYMBOL_GPL(destroy_workqueue); 510EXPORT_SYMBOL(cancel_rearming_delayed_work);
574 511
575static struct workqueue_struct *keventd_wq; 512static struct workqueue_struct *keventd_wq __read_mostly;
576 513
577/** 514/**
578 * schedule_work - put work task in global workqueue 515 * schedule_work - put work task in global workqueue
@@ -638,7 +575,7 @@ int schedule_on_each_cpu(work_func_t func)
638 if (!works) 575 if (!works)
639 return -ENOMEM; 576 return -ENOMEM;
640 577
641 mutex_lock(&workqueue_mutex); 578 preempt_disable(); /* CPU hotplug */
642 for_each_online_cpu(cpu) { 579 for_each_online_cpu(cpu) {
643 struct work_struct *work = per_cpu_ptr(works, cpu); 580 struct work_struct *work = per_cpu_ptr(works, cpu);
644 581
@@ -646,7 +583,7 @@ int schedule_on_each_cpu(work_func_t func)
646 set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); 583 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
647 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); 584 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
648 } 585 }
649 mutex_unlock(&workqueue_mutex); 586 preempt_enable();
650 flush_workqueue(keventd_wq); 587 flush_workqueue(keventd_wq);
651 free_percpu(works); 588 free_percpu(works);
652 return 0; 589 return 0;
@@ -659,29 +596,6 @@ void flush_scheduled_work(void)
659EXPORT_SYMBOL(flush_scheduled_work); 596EXPORT_SYMBOL(flush_scheduled_work);
660 597
661/** 598/**
662 * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
663 * @wq: the controlling workqueue structure
664 * @dwork: the delayed work struct
665 */
666void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
667 struct delayed_work *dwork)
668{
669 while (!cancel_delayed_work(dwork))
670 flush_workqueue(wq);
671}
672EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
673
674/**
675 * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
676 * @dwork: the delayed work struct
677 */
678void cancel_rearming_delayed_work(struct delayed_work *dwork)
679{
680 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
681}
682EXPORT_SYMBOL(cancel_rearming_delayed_work);
683
684/**
685 * execute_in_process_context - reliably execute the routine with user context 599 * execute_in_process_context - reliably execute the routine with user context
686 * @fn: the function to execute 600 * @fn: the function to execute
687 * @ew: guaranteed storage for the execute work structure (must 601 * @ew: guaranteed storage for the execute work structure (must
@@ -728,94 +642,209 @@ int current_is_keventd(void)
728 642
729} 643}
730 644
731/* Take the work from this (downed) CPU. */ 645static struct cpu_workqueue_struct *
732static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) 646init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
733{ 647{
734 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 648 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
735 struct list_head list;
736 struct work_struct *work;
737 649
738 spin_lock_irq(&cwq->lock); 650 cwq->wq = wq;
739 list_replace_init(&cwq->worklist, &list); 651 spin_lock_init(&cwq->lock);
652 INIT_LIST_HEAD(&cwq->worklist);
653 init_waitqueue_head(&cwq->more_work);
654
655 return cwq;
656}
657
658static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
659{
660 struct workqueue_struct *wq = cwq->wq;
661 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
662 struct task_struct *p;
663
664 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
665 /*
666 * Nobody can add the work_struct to this cwq,
667 * if (caller is __create_workqueue)
668 * nobody should see this wq
669 * else // caller is CPU_UP_PREPARE
670 * cpu is not on cpu_online_map
671 * so we can abort safely.
672 */
673 if (IS_ERR(p))
674 return PTR_ERR(p);
675
676 cwq->thread = p;
677 cwq->should_stop = 0;
678
679 return 0;
680}
681
682static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
683{
684 struct task_struct *p = cwq->thread;
740 685
741 while (!list_empty(&list)) { 686 if (p != NULL) {
742 printk("Taking work for %s\n", wq->name); 687 if (cpu >= 0)
743 work = list_entry(list.next,struct work_struct,entry); 688 kthread_bind(p, cpu);
744 list_del(&work->entry); 689 wake_up_process(p);
745 __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
746 } 690 }
747 spin_unlock_irq(&cwq->lock);
748} 691}
749 692
750/* We're holding the cpucontrol mutex here */ 693struct workqueue_struct *__create_workqueue(const char *name,
751static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 694 int singlethread, int freezeable)
752 unsigned long action,
753 void *hcpu)
754{ 695{
755 unsigned int hotcpu = (unsigned long)hcpu;
756 struct workqueue_struct *wq; 696 struct workqueue_struct *wq;
697 struct cpu_workqueue_struct *cwq;
698 int err = 0, cpu;
757 699
758 switch (action) { 700 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
759 case CPU_UP_PREPARE: 701 if (!wq)
760 mutex_lock(&workqueue_mutex); 702 return NULL;
761 /* Create a new workqueue thread for it. */
762 list_for_each_entry(wq, &workqueues, list) {
763 if (!create_workqueue_thread(wq, hotcpu, 0)) {
764 printk("workqueue for %i failed\n", hotcpu);
765 return NOTIFY_BAD;
766 }
767 }
768 break;
769 703
770 case CPU_ONLINE: 704 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
771 /* Kick off worker threads. */ 705 if (!wq->cpu_wq) {
772 list_for_each_entry(wq, &workqueues, list) { 706 kfree(wq);
773 struct cpu_workqueue_struct *cwq; 707 return NULL;
708 }
774 709
775 cwq = per_cpu_ptr(wq->cpu_wq, hotcpu); 710 wq->name = name;
776 kthread_bind(cwq->thread, hotcpu); 711 wq->singlethread = singlethread;
777 wake_up_process(cwq->thread); 712 wq->freezeable = freezeable;
778 } 713 INIT_LIST_HEAD(&wq->list);
779 mutex_unlock(&workqueue_mutex);
780 break;
781 714
782 case CPU_UP_CANCELED: 715 if (singlethread) {
783 list_for_each_entry(wq, &workqueues, list) { 716 cwq = init_cpu_workqueue(wq, singlethread_cpu);
784 if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread) 717 err = create_workqueue_thread(cwq, singlethread_cpu);
718 start_workqueue_thread(cwq, -1);
719 } else {
720 mutex_lock(&workqueue_mutex);
721 list_add(&wq->list, &workqueues);
722
723 for_each_possible_cpu(cpu) {
724 cwq = init_cpu_workqueue(wq, cpu);
725 if (err || !cpu_online(cpu))
785 continue; 726 continue;
786 /* Unbind so it can run. */ 727 err = create_workqueue_thread(cwq, cpu);
787 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, 728 start_workqueue_thread(cwq, cpu);
788 any_online_cpu(cpu_online_map));
789 cleanup_workqueue_thread(wq, hotcpu);
790 } 729 }
791 mutex_unlock(&workqueue_mutex); 730 mutex_unlock(&workqueue_mutex);
792 break; 731 }
732
733 if (err) {
734 destroy_workqueue(wq);
735 wq = NULL;
736 }
737 return wq;
738}
739EXPORT_SYMBOL_GPL(__create_workqueue);
740
741static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
742{
743 struct wq_barrier barr;
744 int alive = 0;
745
746 spin_lock_irq(&cwq->lock);
747 if (cwq->thread != NULL) {
748 insert_wq_barrier(cwq, &barr, 1);
749 cwq->should_stop = 1;
750 alive = 1;
751 }
752 spin_unlock_irq(&cwq->lock);
753
754 if (alive) {
755 wait_for_completion(&barr.done);
793 756
794 case CPU_DOWN_PREPARE: 757 while (unlikely(cwq->thread != NULL))
758 cpu_relax();
759 /*
760 * Wait until cwq->thread unlocks cwq->lock,
761 * it won't touch *cwq after that.
762 */
763 smp_rmb();
764 spin_unlock_wait(&cwq->lock);
765 }
766}
767
768/**
769 * destroy_workqueue - safely terminate a workqueue
770 * @wq: target workqueue
771 *
772 * Safely destroy a workqueue. All work currently pending will be done first.
773 */
774void destroy_workqueue(struct workqueue_struct *wq)
775{
776 const cpumask_t *cpu_map = wq_cpu_map(wq);
777 struct cpu_workqueue_struct *cwq;
778 int cpu;
779
780 mutex_lock(&workqueue_mutex);
781 list_del(&wq->list);
782 mutex_unlock(&workqueue_mutex);
783
784 for_each_cpu_mask(cpu, *cpu_map) {
785 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
786 cleanup_workqueue_thread(cwq, cpu);
787 }
788
789 free_percpu(wq->cpu_wq);
790 kfree(wq);
791}
792EXPORT_SYMBOL_GPL(destroy_workqueue);
793
794static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
795 unsigned long action,
796 void *hcpu)
797{
798 unsigned int cpu = (unsigned long)hcpu;
799 struct cpu_workqueue_struct *cwq;
800 struct workqueue_struct *wq;
801
802 action &= ~CPU_TASKS_FROZEN;
803
804 switch (action) {
805 case CPU_LOCK_ACQUIRE:
795 mutex_lock(&workqueue_mutex); 806 mutex_lock(&workqueue_mutex);
796 break; 807 return NOTIFY_OK;
797 808
798 case CPU_DOWN_FAILED: 809 case CPU_LOCK_RELEASE:
799 mutex_unlock(&workqueue_mutex); 810 mutex_unlock(&workqueue_mutex);
800 break; 811 return NOTIFY_OK;
801 812
802 case CPU_DEAD: 813 case CPU_UP_PREPARE:
803 list_for_each_entry(wq, &workqueues, list) 814 cpu_set(cpu, cpu_populated_map);
804 cleanup_workqueue_thread(wq, hotcpu); 815 }
805 list_for_each_entry(wq, &workqueues, list) 816
806 take_over_work(wq, hotcpu); 817 list_for_each_entry(wq, &workqueues, list) {
807 mutex_unlock(&workqueue_mutex); 818 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
808 break; 819
820 switch (action) {
821 case CPU_UP_PREPARE:
822 if (!create_workqueue_thread(cwq, cpu))
823 break;
824 printk(KERN_ERR "workqueue for %i failed\n", cpu);
825 return NOTIFY_BAD;
826
827 case CPU_ONLINE:
828 start_workqueue_thread(cwq, cpu);
829 break;
830
831 case CPU_UP_CANCELED:
832 start_workqueue_thread(cwq, -1);
833 case CPU_DEAD:
834 cleanup_workqueue_thread(cwq, cpu);
835 break;
836 }
809 } 837 }
810 838
811 return NOTIFY_OK; 839 return NOTIFY_OK;
812} 840}
813 841
814void init_workqueues(void) 842void __init init_workqueues(void)
815{ 843{
844 cpu_populated_map = cpu_online_map;
816 singlethread_cpu = first_cpu(cpu_possible_map); 845 singlethread_cpu = first_cpu(cpu_possible_map);
846 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
817 hotcpu_notifier(workqueue_cpu_callback, 0); 847 hotcpu_notifier(workqueue_cpu_callback, 0);
818 keventd_wq = create_workqueue("events"); 848 keventd_wq = create_workqueue("events");
819 BUG_ON(!keventd_wq); 849 BUG_ON(!keventd_wq);
820} 850}
821
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index d69ddbe43865..402eb4eb6b23 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1004,7 +1004,7 @@ static int radix_tree_callback(struct notifier_block *nfb,
1004 struct radix_tree_preload *rtp; 1004 struct radix_tree_preload *rtp;
1005 1005
1006 /* Free per-cpu pool of perloaded nodes */ 1006 /* Free per-cpu pool of perloaded nodes */
1007 if (action == CPU_DEAD) { 1007 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
1008 rtp = &per_cpu(radix_tree_preloads, cpu); 1008 rtp = &per_cpu(radix_tree_preloads, cpu);
1009 while (rtp->nr) { 1009 while (rtp->nr) {
1010 kmem_cache_free(radix_tree_node_cachep, 1010 kmem_cache_free(radix_tree_node_cachep,
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index a4b730a2180c..5b0d8522b7ca 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -56,6 +56,7 @@ static DEFINE_MUTEX(rslistlock);
56 * rs_init - Initialize a Reed-Solomon codec 56 * rs_init - Initialize a Reed-Solomon codec
57 * @symsize: symbol size, bits (1-8) 57 * @symsize: symbol size, bits (1-8)
58 * @gfpoly: Field generator polynomial coefficients 58 * @gfpoly: Field generator polynomial coefficients
59 * @gffunc: Field generator function
59 * @fcr: first root of RS code generator polynomial, index form 60 * @fcr: first root of RS code generator polynomial, index form
60 * @prim: primitive element to generate polynomial roots 61 * @prim: primitive element to generate polynomial roots
61 * @nroots: RS code generator polynomial degree (number of roots) 62 * @nroots: RS code generator polynomial degree (number of roots)
@@ -63,8 +64,8 @@ static DEFINE_MUTEX(rslistlock);
63 * Allocate a control structure and the polynom arrays for faster 64 * Allocate a control structure and the polynom arrays for faster
64 * en/decoding. Fill the arrays according to the given parameters. 65 * en/decoding. Fill the arrays according to the given parameters.
65 */ 66 */
66static struct rs_control *rs_init(int symsize, int gfpoly, int fcr, 67static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int),
67 int prim, int nroots) 68 int fcr, int prim, int nroots)
68{ 69{
69 struct rs_control *rs; 70 struct rs_control *rs;
70 int i, j, sr, root, iprim; 71 int i, j, sr, root, iprim;
@@ -82,6 +83,7 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int fcr,
82 rs->prim = prim; 83 rs->prim = prim;
83 rs->nroots = nroots; 84 rs->nroots = nroots;
84 rs->gfpoly = gfpoly; 85 rs->gfpoly = gfpoly;
86 rs->gffunc = gffunc;
85 87
86 /* Allocate the arrays */ 88 /* Allocate the arrays */
87 rs->alpha_to = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL); 89 rs->alpha_to = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL);
@@ -99,17 +101,26 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int fcr,
99 /* Generate Galois field lookup tables */ 101 /* Generate Galois field lookup tables */
100 rs->index_of[0] = rs->nn; /* log(zero) = -inf */ 102 rs->index_of[0] = rs->nn; /* log(zero) = -inf */
101 rs->alpha_to[rs->nn] = 0; /* alpha**-inf = 0 */ 103 rs->alpha_to[rs->nn] = 0; /* alpha**-inf = 0 */
102 sr = 1; 104 if (gfpoly) {
103 for (i = 0; i < rs->nn; i++) { 105 sr = 1;
104 rs->index_of[sr] = i; 106 for (i = 0; i < rs->nn; i++) {
105 rs->alpha_to[i] = sr; 107 rs->index_of[sr] = i;
106 sr <<= 1; 108 rs->alpha_to[i] = sr;
107 if (sr & (1 << symsize)) 109 sr <<= 1;
108 sr ^= gfpoly; 110 if (sr & (1 << symsize))
109 sr &= rs->nn; 111 sr ^= gfpoly;
112 sr &= rs->nn;
113 }
114 } else {
115 sr = gffunc(0);
116 for (i = 0; i < rs->nn; i++) {
117 rs->index_of[sr] = i;
118 rs->alpha_to[i] = sr;
119 sr = gffunc(sr);
120 }
110 } 121 }
111 /* If it's not primitive, exit */ 122 /* If it's not primitive, exit */
112 if(sr != 1) 123 if(sr != rs->alpha_to[0])
113 goto errpol; 124 goto errpol;
114 125
115 /* Find prim-th root of 1, used in decoding */ 126 /* Find prim-th root of 1, used in decoding */
@@ -173,18 +184,22 @@ void free_rs(struct rs_control *rs)
173} 184}
174 185
175/** 186/**
176 * init_rs - Find a matching or allocate a new rs control structure 187 * init_rs_internal - Find a matching or allocate a new rs control structure
177 * @symsize: the symbol size (number of bits) 188 * @symsize: the symbol size (number of bits)
178 * @gfpoly: the extended Galois field generator polynomial coefficients, 189 * @gfpoly: the extended Galois field generator polynomial coefficients,
179 * with the 0th coefficient in the low order bit. The polynomial 190 * with the 0th coefficient in the low order bit. The polynomial
180 * must be primitive; 191 * must be primitive;
192 * @gffunc: pointer to function to generate the next field element,
193 * or the multiplicative identity element if given 0. Used
194 * instead of gfpoly if gfpoly is 0
181 * @fcr: the first consecutive root of the rs code generator polynomial 195 * @fcr: the first consecutive root of the rs code generator polynomial
182 * in index form 196 * in index form
183 * @prim: primitive element to generate polynomial roots 197 * @prim: primitive element to generate polynomial roots
184 * @nroots: RS code generator polynomial degree (number of roots) 198 * @nroots: RS code generator polynomial degree (number of roots)
185 */ 199 */
186struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, 200static struct rs_control *init_rs_internal(int symsize, int gfpoly,
187 int nroots) 201 int (*gffunc)(int), int fcr,
202 int prim, int nroots)
188{ 203{
189 struct list_head *tmp; 204 struct list_head *tmp;
190 struct rs_control *rs; 205 struct rs_control *rs;
@@ -208,6 +223,8 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
208 continue; 223 continue;
209 if (gfpoly != rs->gfpoly) 224 if (gfpoly != rs->gfpoly)
210 continue; 225 continue;
226 if (gffunc != rs->gffunc)
227 continue;
211 if (fcr != rs->fcr) 228 if (fcr != rs->fcr)
212 continue; 229 continue;
213 if (prim != rs->prim) 230 if (prim != rs->prim)
@@ -220,7 +237,7 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
220 } 237 }
221 238
222 /* Create a new one */ 239 /* Create a new one */
223 rs = rs_init(symsize, gfpoly, fcr, prim, nroots); 240 rs = rs_init(symsize, gfpoly, gffunc, fcr, prim, nroots);
224 if (rs) { 241 if (rs) {
225 rs->users = 1; 242 rs->users = 1;
226 list_add(&rs->list, &rslist); 243 list_add(&rs->list, &rslist);
@@ -230,6 +247,42 @@ out:
230 return rs; 247 return rs;
231} 248}
232 249
250/**
251 * init_rs - Find a matching or allocate a new rs control structure
252 * @symsize: the symbol size (number of bits)
253 * @gfpoly: the extended Galois field generator polynomial coefficients,
254 * with the 0th coefficient in the low order bit. The polynomial
255 * must be primitive;
256 * @fcr: the first consecutive root of the rs code generator polynomial
257 * in index form
258 * @prim: primitive element to generate polynomial roots
259 * @nroots: RS code generator polynomial degree (number of roots)
260 */
261struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
262 int nroots)
263{
264 return init_rs_internal(symsize, gfpoly, NULL, fcr, prim, nroots);
265}
266
267/**
268 * init_rs_non_canonical - Find a matching or allocate a new rs control
269 * structure, for fields with non-canonical
270 * representation
271 * @symsize: the symbol size (number of bits)
272 * @gffunc: pointer to function to generate the next field element,
273 * or the multiplicative identity element if given 0. Used
274 * instead of gfpoly if gfpoly is 0
275 * @fcr: the first consecutive root of the rs code generator polynomial
276 * in index form
277 * @prim: primitive element to generate polynomial roots
278 * @nroots: RS code generator polynomial degree (number of roots)
279 */
280struct rs_control *init_rs_non_canonical(int symsize, int (*gffunc)(int),
281 int fcr, int prim, int nroots)
282{
283 return init_rs_internal(symsize, 0, gffunc, fcr, prim, nroots);
284}
285
233#ifdef CONFIG_REED_SOLOMON_ENC8 286#ifdef CONFIG_REED_SOLOMON_ENC8
234/** 287/**
235 * encode_rs8 - Calculate the parity for data values (8bit data width) 288 * encode_rs8 - Calculate the parity for data values (8bit data width)
@@ -321,6 +374,7 @@ EXPORT_SYMBOL_GPL(decode_rs16);
321#endif 374#endif
322 375
323EXPORT_SYMBOL_GPL(init_rs); 376EXPORT_SYMBOL_GPL(init_rs);
377EXPORT_SYMBOL_GPL(init_rs_non_canonical);
324EXPORT_SYMBOL_GPL(free_rs); 378EXPORT_SYMBOL_GPL(free_rs);
325 379
326MODULE_LICENSE("GPL"); 380MODULE_LICENSE("GPL");
diff --git a/mm/Kconfig b/mm/Kconfig
index 1ac718f636ec..a17da8bafe62 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -166,5 +166,5 @@ config ZONE_DMA_FLAG
166config NR_QUICK 166config NR_QUICK
167 int 167 int
168 depends on QUICKLIST 168 depends on QUICKLIST
169 default "2" if SUPERH
169 default "1" 170 default "1"
170
diff --git a/mm/filemap.c b/mm/filemap.c
index 9cbf4fea4a59..7b48b2ad00e7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -750,6 +750,7 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
750 read_unlock_irq(&mapping->tree_lock); 750 read_unlock_irq(&mapping->tree_lock);
751 return i; 751 return i;
752} 752}
753EXPORT_SYMBOL(find_get_pages_contig);
753 754
754/** 755/**
755 * find_get_pages_tag - find and return pages that match @tag 756 * find_get_pages_tag - find and return pages that match @tag
@@ -778,6 +779,7 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
778 read_unlock_irq(&mapping->tree_lock); 779 read_unlock_irq(&mapping->tree_lock);
779 return ret; 780 return ret;
780} 781}
782EXPORT_SYMBOL(find_get_pages_tag);
781 783
782/** 784/**
783 * grab_cache_page_nowait - returns locked page at given index in given cache 785 * grab_cache_page_nowait - returns locked page at given index in given cache
@@ -1782,7 +1784,7 @@ struct page *read_cache_page_async(struct address_space *mapping,
1782retry: 1784retry:
1783 page = __read_cache_page(mapping, index, filler, data); 1785 page = __read_cache_page(mapping, index, filler, data);
1784 if (IS_ERR(page)) 1786 if (IS_ERR(page))
1785 goto out; 1787 return page;
1786 mark_page_accessed(page); 1788 mark_page_accessed(page);
1787 if (PageUptodate(page)) 1789 if (PageUptodate(page))
1788 goto out; 1790 goto out;
@@ -1800,9 +1802,9 @@ retry:
1800 err = filler(data, page); 1802 err = filler(data, page);
1801 if (err < 0) { 1803 if (err < 0) {
1802 page_cache_release(page); 1804 page_cache_release(page);
1803 page = ERR_PTR(err); 1805 return ERR_PTR(err);
1804 } 1806 }
1805 out: 1807out:
1806 mark_page_accessed(page); 1808 mark_page_accessed(page);
1807 return page; 1809 return page;
1808} 1810}
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index cbb335813ec0..1b49dab9b25d 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -434,7 +434,6 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
434 unsigned blocksize; 434 unsigned blocksize;
435 unsigned length; 435 unsigned length;
436 struct page *page; 436 struct page *page;
437 void *kaddr;
438 437
439 BUG_ON(!mapping->a_ops->get_xip_page); 438 BUG_ON(!mapping->a_ops->get_xip_page);
440 439
@@ -458,11 +457,7 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
458 else 457 else
459 return PTR_ERR(page); 458 return PTR_ERR(page);
460 } 459 }
461 kaddr = kmap_atomic(page, KM_USER0); 460 zero_user_page(page, offset, length, KM_USER0);
462 memset(kaddr + offset, 0, length);
463 kunmap_atomic(kaddr, KM_USER0);
464
465 flush_dcache_page(page);
466 return 0; 461 return 0;
467} 462}
468EXPORT_SYMBOL_GPL(xip_truncate_page); 463EXPORT_SYMBOL_GPL(xip_truncate_page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 36db012b38dd..eb7180db3033 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -140,6 +140,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
140 return page; 140 return page;
141 141
142fail: 142fail:
143 if (vma->vm_flags & VM_MAYSHARE)
144 resv_huge_pages++;
143 spin_unlock(&hugetlb_lock); 145 spin_unlock(&hugetlb_lock);
144 return NULL; 146 return NULL;
145} 147}
@@ -172,6 +174,17 @@ static int __init hugetlb_setup(char *s)
172} 174}
173__setup("hugepages=", hugetlb_setup); 175__setup("hugepages=", hugetlb_setup);
174 176
177static unsigned int cpuset_mems_nr(unsigned int *array)
178{
179 int node;
180 unsigned int nr = 0;
181
182 for_each_node_mask(node, cpuset_current_mems_allowed)
183 nr += array[node];
184
185 return nr;
186}
187
175#ifdef CONFIG_SYSCTL 188#ifdef CONFIG_SYSCTL
176static void update_and_free_page(struct page *page) 189static void update_and_free_page(struct page *page)
177{ 190{
@@ -817,6 +830,26 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
817 chg = region_chg(&inode->i_mapping->private_list, from, to); 830 chg = region_chg(&inode->i_mapping->private_list, from, to);
818 if (chg < 0) 831 if (chg < 0)
819 return chg; 832 return chg;
833 /*
834 * When cpuset is configured, it breaks the strict hugetlb page
835 * reservation as the accounting is done on a global variable. Such
836 * reservation is completely rubbish in the presence of cpuset because
837 * the reservation is not checked against page availability for the
838 * current cpuset. Application can still potentially OOM'ed by kernel
839 * with lack of free htlb page in cpuset that the task is in.
840 * Attempt to enforce strict accounting with cpuset is almost
841 * impossible (or too ugly) because cpuset is too fluid that
842 * task or memory node can be dynamically moved between cpusets.
843 *
844 * The change of semantics for shared hugetlb mapping with cpuset is
845 * undesirable. However, in order to preserve some of the semantics,
846 * we fall back to check against current free page availability as
847 * a best attempt and hopefully to minimize the impact of changing
848 * semantics that cpuset has.
849 */
850 if (chg > cpuset_mems_nr(free_huge_pages_node))
851 return -ENOMEM;
852
820 ret = hugetlb_acct_memory(chg); 853 ret = hugetlb_acct_memory(chg);
821 if (ret < 0) 854 if (ret < 0)
822 return ret; 855 return ret;
diff --git a/mm/mmap.c b/mm/mmap.c
index cc1f543eb1b8..68b9ad2ef1d6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1720,7 +1720,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1720 1720
1721/* 1721/*
1722 * Split a vma into two pieces at address 'addr', a new vma is allocated 1722 * Split a vma into two pieces at address 'addr', a new vma is allocated
1723 * either for the first part or the the tail. 1723 * either for the first part or the tail.
1724 */ 1724 */
1725int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, 1725int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1726 unsigned long addr, int new_below) 1726 unsigned long addr, int new_below)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6fd0b7455b0b..f9b5d6d5f4d6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -691,43 +691,26 @@ static void __init setup_nr_node_ids(void) {}
691 691
692#ifdef CONFIG_NUMA 692#ifdef CONFIG_NUMA
693/* 693/*
694 * Called from the slab reaper to drain pagesets on a particular node that 694 * Called from the vmstat counter updater to drain pagesets of this
695 * belongs to the currently executing processor. 695 * currently executing processor on remote nodes after they have
696 * expired.
697 *
696 * Note that this function must be called with the thread pinned to 698 * Note that this function must be called with the thread pinned to
697 * a single processor. 699 * a single processor.
698 */ 700 */
699void drain_node_pages(int nodeid) 701void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
700{ 702{
701 int i;
702 enum zone_type z;
703 unsigned long flags; 703 unsigned long flags;
704 int to_drain;
704 705
705 for (z = 0; z < MAX_NR_ZONES; z++) { 706 local_irq_save(flags);
706 struct zone *zone = NODE_DATA(nodeid)->node_zones + z; 707 if (pcp->count >= pcp->batch)
707 struct per_cpu_pageset *pset; 708 to_drain = pcp->batch;
708 709 else
709 if (!populated_zone(zone)) 710 to_drain = pcp->count;
710 continue; 711 free_pages_bulk(zone, to_drain, &pcp->list, 0);
711 712 pcp->count -= to_drain;
712 pset = zone_pcp(zone, smp_processor_id()); 713 local_irq_restore(flags);
713 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
714 struct per_cpu_pages *pcp;
715
716 pcp = &pset->pcp[i];
717 if (pcp->count) {
718 int to_drain;
719
720 local_irq_save(flags);
721 if (pcp->count >= pcp->batch)
722 to_drain = pcp->batch;
723 else
724 to_drain = pcp->count;
725 free_pages_bulk(zone, to_drain, &pcp->list, 0);
726 pcp->count -= to_drain;
727 local_irq_restore(flags);
728 }
729 }
730 }
731} 714}
732#endif 715#endif
733 716
@@ -2148,11 +2131,14 @@ static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
2148 2131
2149 switch (action) { 2132 switch (action) {
2150 case CPU_UP_PREPARE: 2133 case CPU_UP_PREPARE:
2134 case CPU_UP_PREPARE_FROZEN:
2151 if (process_zones(cpu)) 2135 if (process_zones(cpu))
2152 ret = NOTIFY_BAD; 2136 ret = NOTIFY_BAD;
2153 break; 2137 break;
2154 case CPU_UP_CANCELED: 2138 case CPU_UP_CANCELED:
2139 case CPU_UP_CANCELED_FROZEN:
2155 case CPU_DEAD: 2140 case CPU_DEAD:
2141 case CPU_DEAD_FROZEN:
2156 free_zone_pagesets(cpu); 2142 free_zone_pagesets(cpu);
2157 break; 2143 break;
2158 default: 2144 default:
@@ -3012,7 +2998,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
3012{ 2998{
3013 int cpu = (unsigned long)hcpu; 2999 int cpu = (unsigned long)hcpu;
3014 3000
3015 if (action == CPU_DEAD) { 3001 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
3016 local_irq_disable(); 3002 local_irq_disable();
3017 __drain_pages(cpu); 3003 __drain_pages(cpu);
3018 vm_events_fold_cpu(cpu); 3004 vm_events_fold_cpu(cpu);
diff --git a/mm/slab.c b/mm/slab.c
index acda7e2d66e4..944b20581f8c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -928,12 +928,6 @@ static void next_reap_node(void)
928{ 928{
929 int node = __get_cpu_var(reap_node); 929 int node = __get_cpu_var(reap_node);
930 930
931 /*
932 * Also drain per cpu pages on remote zones
933 */
934 if (node != numa_node_id())
935 drain_node_pages(node);
936
937 node = next_node(node, node_online_map); 931 node = next_node(node, node_online_map);
938 if (unlikely(node >= MAX_NUMNODES)) 932 if (unlikely(node >= MAX_NUMNODES))
939 node = first_node(node_online_map); 933 node = first_node(node_online_map);
@@ -1186,8 +1180,11 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1186 int memsize = sizeof(struct kmem_list3); 1180 int memsize = sizeof(struct kmem_list3);
1187 1181
1188 switch (action) { 1182 switch (action) {
1189 case CPU_UP_PREPARE: 1183 case CPU_LOCK_ACQUIRE:
1190 mutex_lock(&cache_chain_mutex); 1184 mutex_lock(&cache_chain_mutex);
1185 break;
1186 case CPU_UP_PREPARE:
1187 case CPU_UP_PREPARE_FROZEN:
1191 /* 1188 /*
1192 * We need to do this right in the beginning since 1189 * We need to do this right in the beginning since
1193 * alloc_arraycache's are going to use this list. 1190 * alloc_arraycache's are going to use this list.
@@ -1274,17 +1271,28 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1274 } 1271 }
1275 break; 1272 break;
1276 case CPU_ONLINE: 1273 case CPU_ONLINE:
1277 mutex_unlock(&cache_chain_mutex); 1274 case CPU_ONLINE_FROZEN:
1278 start_cpu_timer(cpu); 1275 start_cpu_timer(cpu);
1279 break; 1276 break;
1280#ifdef CONFIG_HOTPLUG_CPU 1277#ifdef CONFIG_HOTPLUG_CPU
1281 case CPU_DOWN_PREPARE: 1278 case CPU_DOWN_PREPARE:
1282 mutex_lock(&cache_chain_mutex); 1279 case CPU_DOWN_PREPARE_FROZEN:
1283 break; 1280 /*
1284 case CPU_DOWN_FAILED: 1281 * Shutdown cache reaper. Note that the cache_chain_mutex is
1285 mutex_unlock(&cache_chain_mutex); 1282 * held so that if cache_reap() is invoked it cannot do
1286 break; 1283 * anything expensive but will only modify reap_work
1284 * and reschedule the timer.
1285 */
1286 cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
1287 /* Now the cache_reaper is guaranteed to be not running. */
1288 per_cpu(reap_work, cpu).work.func = NULL;
1289 break;
1290 case CPU_DOWN_FAILED:
1291 case CPU_DOWN_FAILED_FROZEN:
1292 start_cpu_timer(cpu);
1293 break;
1287 case CPU_DEAD: 1294 case CPU_DEAD:
1295 case CPU_DEAD_FROZEN:
1288 /* 1296 /*
1289 * Even if all the cpus of a node are down, we don't free the 1297 * Even if all the cpus of a node are down, we don't free the
1290 * kmem_list3 of any cache. This to avoid a race between 1298 * kmem_list3 of any cache. This to avoid a race between
@@ -1296,6 +1304,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1296 /* fall thru */ 1304 /* fall thru */
1297#endif 1305#endif
1298 case CPU_UP_CANCELED: 1306 case CPU_UP_CANCELED:
1307 case CPU_UP_CANCELED_FROZEN:
1299 list_for_each_entry(cachep, &cache_chain, next) { 1308 list_for_each_entry(cachep, &cache_chain, next) {
1300 struct array_cache *nc; 1309 struct array_cache *nc;
1301 struct array_cache *shared; 1310 struct array_cache *shared;
@@ -1354,6 +1363,8 @@ free_array_cache:
1354 continue; 1363 continue;
1355 drain_freelist(cachep, l3, l3->free_objects); 1364 drain_freelist(cachep, l3, l3->free_objects);
1356 } 1365 }
1366 break;
1367 case CPU_LOCK_RELEASE:
1357 mutex_unlock(&cache_chain_mutex); 1368 mutex_unlock(&cache_chain_mutex);
1358 break; 1369 break;
1359 } 1370 }
@@ -3742,7 +3753,6 @@ EXPORT_SYMBOL(__kmalloc);
3742 3753
3743/** 3754/**
3744 * krealloc - reallocate memory. The contents will remain unchanged. 3755 * krealloc - reallocate memory. The contents will remain unchanged.
3745 *
3746 * @p: object to reallocate memory for. 3756 * @p: object to reallocate memory for.
3747 * @new_size: how many bytes of memory are required. 3757 * @new_size: how many bytes of memory are required.
3748 * @flags: the type of memory to allocate. 3758 * @flags: the type of memory to allocate.
@@ -4140,7 +4150,6 @@ next:
4140 check_irq_on(); 4150 check_irq_on();
4141 mutex_unlock(&cache_chain_mutex); 4151 mutex_unlock(&cache_chain_mutex);
4142 next_reap_node(); 4152 next_reap_node();
4143 refresh_cpu_vm_stats(smp_processor_id());
4144out: 4153out:
4145 /* Set up the next iteration */ 4154 /* Set up the next iteration */
4146 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC)); 4155 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
diff --git a/mm/slub.c b/mm/slub.c
index 5db3da5a60bf..bd2efae02bcd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -66,11 +66,11 @@
66 * SLUB assigns one slab for allocation to each processor. 66 * SLUB assigns one slab for allocation to each processor.
67 * Allocations only occur from these slabs called cpu slabs. 67 * Allocations only occur from these slabs called cpu slabs.
68 * 68 *
69 * Slabs with free elements are kept on a partial list. 69 * Slabs with free elements are kept on a partial list and during regular
70 * There is no list for full slabs. If an object in a full slab is 70 * operations no list for full slabs is used. If an object in a full slab is
71 * freed then the slab will show up again on the partial lists. 71 * freed then the slab will show up again on the partial lists.
72 * Otherwise there is no need to track full slabs unless we have to 72 * We track full slabs for debugging purposes though because otherwise we
73 * track full slabs for debugging purposes. 73 * cannot scan all objects.
74 * 74 *
75 * Slabs are freed when they become empty. Teardown and setup is 75 * Slabs are freed when they become empty. Teardown and setup is
76 * minimal so we rely on the page allocators per cpu caches for 76 * minimal so we rely on the page allocators per cpu caches for
@@ -87,13 +87,36 @@
87 * the fast path. 87 * the fast path.
88 */ 88 */
89 89
90static inline int SlabDebug(struct page *page)
91{
92#ifdef CONFIG_SLUB_DEBUG
93 return PageError(page);
94#else
95 return 0;
96#endif
97}
98
99static inline void SetSlabDebug(struct page *page)
100{
101#ifdef CONFIG_SLUB_DEBUG
102 SetPageError(page);
103#endif
104}
105
106static inline void ClearSlabDebug(struct page *page)
107{
108#ifdef CONFIG_SLUB_DEBUG
109 ClearPageError(page);
110#endif
111}
112
90/* 113/*
91 * Issues still to be resolved: 114 * Issues still to be resolved:
92 * 115 *
93 * - The per cpu array is updated for each new slab and and is a remote 116 * - The per cpu array is updated for each new slab and and is a remote
94 * cacheline for most nodes. This could become a bouncing cacheline given 117 * cacheline for most nodes. This could become a bouncing cacheline given
95 * enough frequent updates. There are 16 pointers in a cacheline.so at 118 * enough frequent updates. There are 16 pointers in a cacheline, so at
96 * max 16 cpus could compete. Likely okay. 119 * max 16 cpus could compete for the cacheline which may be okay.
97 * 120 *
98 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 121 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
99 * 122 *
@@ -137,6 +160,7 @@
137 160
138#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ 161#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
139 SLAB_POISON | SLAB_STORE_USER) 162 SLAB_POISON | SLAB_STORE_USER)
163
140/* 164/*
141 * Set of flags that will prevent slab merging 165 * Set of flags that will prevent slab merging
142 */ 166 */
@@ -157,6 +181,11 @@
157/* Internal SLUB flags */ 181/* Internal SLUB flags */
158#define __OBJECT_POISON 0x80000000 /* Poison object */ 182#define __OBJECT_POISON 0x80000000 /* Poison object */
159 183
184/* Not all arches define cache_line_size */
185#ifndef cache_line_size
186#define cache_line_size() L1_CACHE_BYTES
187#endif
188
160static int kmem_size = sizeof(struct kmem_cache); 189static int kmem_size = sizeof(struct kmem_cache);
161 190
162#ifdef CONFIG_SMP 191#ifdef CONFIG_SMP
@@ -166,7 +195,7 @@ static struct notifier_block slab_notifier;
166static enum { 195static enum {
167 DOWN, /* No slab functionality available */ 196 DOWN, /* No slab functionality available */
168 PARTIAL, /* kmem_cache_open() works but kmalloc does not */ 197 PARTIAL, /* kmem_cache_open() works but kmalloc does not */
169 UP, /* Everything works */ 198 UP, /* Everything works but does not show up in sysfs */
170 SYSFS /* Sysfs up */ 199 SYSFS /* Sysfs up */
171} slab_state = DOWN; 200} slab_state = DOWN;
172 201
@@ -174,7 +203,19 @@ static enum {
174static DECLARE_RWSEM(slub_lock); 203static DECLARE_RWSEM(slub_lock);
175LIST_HEAD(slab_caches); 204LIST_HEAD(slab_caches);
176 205
177#ifdef CONFIG_SYSFS 206/*
207 * Tracking user of a slab.
208 */
209struct track {
210 void *addr; /* Called from address */
211 int cpu; /* Was running on cpu */
212 int pid; /* Pid context */
213 unsigned long when; /* When did the operation occur */
214};
215
216enum track_item { TRACK_ALLOC, TRACK_FREE };
217
218#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
178static int sysfs_slab_add(struct kmem_cache *); 219static int sysfs_slab_add(struct kmem_cache *);
179static int sysfs_slab_alias(struct kmem_cache *, const char *); 220static int sysfs_slab_alias(struct kmem_cache *, const char *);
180static void sysfs_slab_remove(struct kmem_cache *); 221static void sysfs_slab_remove(struct kmem_cache *);
@@ -202,6 +243,63 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
202#endif 243#endif
203} 244}
204 245
246static inline int check_valid_pointer(struct kmem_cache *s,
247 struct page *page, const void *object)
248{
249 void *base;
250
251 if (!object)
252 return 1;
253
254 base = page_address(page);
255 if (object < base || object >= base + s->objects * s->size ||
256 (object - base) % s->size) {
257 return 0;
258 }
259
260 return 1;
261}
262
263/*
264 * Slow version of get and set free pointer.
265 *
266 * This version requires touching the cache lines of kmem_cache which
267 * we avoid to do in the fast alloc free paths. There we obtain the offset
268 * from the page struct.
269 */
270static inline void *get_freepointer(struct kmem_cache *s, void *object)
271{
272 return *(void **)(object + s->offset);
273}
274
275static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
276{
277 *(void **)(object + s->offset) = fp;
278}
279
280/* Loop over all objects in a slab */
281#define for_each_object(__p, __s, __addr) \
282 for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
283 __p += (__s)->size)
284
285/* Scan freelist */
286#define for_each_free_object(__p, __s, __free) \
287 for (__p = (__free); __p; __p = get_freepointer((__s), __p))
288
289/* Determine object index from a given position */
290static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
291{
292 return (p - addr) / s->size;
293}
294
295#ifdef CONFIG_SLUB_DEBUG
296/*
297 * Debug settings:
298 */
299static int slub_debug;
300
301static char *slub_debug_slabs;
302
205/* 303/*
206 * Object debugging 304 * Object debugging
207 */ 305 */
@@ -237,35 +335,6 @@ static void print_section(char *text, u8 *addr, unsigned int length)
237 } 335 }
238} 336}
239 337
240/*
241 * Slow version of get and set free pointer.
242 *
243 * This requires touching the cache lines of kmem_cache.
244 * The offset can also be obtained from the page. In that
245 * case it is in the cacheline that we already need to touch.
246 */
247static void *get_freepointer(struct kmem_cache *s, void *object)
248{
249 return *(void **)(object + s->offset);
250}
251
252static void set_freepointer(struct kmem_cache *s, void *object, void *fp)
253{
254 *(void **)(object + s->offset) = fp;
255}
256
257/*
258 * Tracking user of a slab.
259 */
260struct track {
261 void *addr; /* Called from address */
262 int cpu; /* Was running on cpu */
263 int pid; /* Pid context */
264 unsigned long when; /* When did the operation occur */
265};
266
267enum track_item { TRACK_ALLOC, TRACK_FREE };
268
269static struct track *get_track(struct kmem_cache *s, void *object, 338static struct track *get_track(struct kmem_cache *s, void *object,
270 enum track_item alloc) 339 enum track_item alloc)
271{ 340{
@@ -400,24 +469,6 @@ static int check_bytes(u8 *start, unsigned int value, unsigned int bytes)
400 return 1; 469 return 1;
401} 470}
402 471
403
404static int check_valid_pointer(struct kmem_cache *s, struct page *page,
405 void *object)
406{
407 void *base;
408
409 if (!object)
410 return 1;
411
412 base = page_address(page);
413 if (object < base || object >= base + s->objects * s->size ||
414 (object - base) % s->size) {
415 return 0;
416 }
417
418 return 1;
419}
420
421/* 472/*
422 * Object layout: 473 * Object layout:
423 * 474 *
@@ -425,26 +476,34 @@ static int check_valid_pointer(struct kmem_cache *s, struct page *page,
425 * Bytes of the object to be managed. 476 * Bytes of the object to be managed.
426 * If the freepointer may overlay the object then the free 477 * If the freepointer may overlay the object then the free
427 * pointer is the first word of the object. 478 * pointer is the first word of the object.
479 *
428 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 480 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
429 * 0xa5 (POISON_END) 481 * 0xa5 (POISON_END)
430 * 482 *
431 * object + s->objsize 483 * object + s->objsize
432 * Padding to reach word boundary. This is also used for Redzoning. 484 * Padding to reach word boundary. This is also used for Redzoning.
433 * Padding is extended to word size if Redzoning is enabled 485 * Padding is extended by another word if Redzoning is enabled and
434 * and objsize == inuse. 486 * objsize == inuse.
487 *
435 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 488 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
436 * 0xcc (RED_ACTIVE) for objects in use. 489 * 0xcc (RED_ACTIVE) for objects in use.
437 * 490 *
438 * object + s->inuse 491 * object + s->inuse
492 * Meta data starts here.
493 *
439 * A. Free pointer (if we cannot overwrite object on free) 494 * A. Free pointer (if we cannot overwrite object on free)
440 * B. Tracking data for SLAB_STORE_USER 495 * B. Tracking data for SLAB_STORE_USER
441 * C. Padding to reach required alignment boundary 496 * C. Padding to reach required alignment boundary or at mininum
442 * Padding is done using 0x5a (POISON_INUSE) 497 * one word if debuggin is on to be able to detect writes
498 * before the word boundary.
499 *
500 * Padding is done using 0x5a (POISON_INUSE)
443 * 501 *
444 * object + s->size 502 * object + s->size
503 * Nothing is used beyond s->size.
445 * 504 *
446 * If slabcaches are merged then the objsize and inuse boundaries are to 505 * If slabcaches are merged then the objsize and inuse boundaries are mostly
447 * be ignored. And therefore no slab options that rely on these boundaries 506 * ignored. And therefore no slab options that rely on these boundaries
448 * may be used with merged slabcaches. 507 * may be used with merged slabcaches.
449 */ 508 */
450 509
@@ -570,8 +629,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
570 /* 629 /*
571 * No choice but to zap it and thus loose the remainder 630 * No choice but to zap it and thus loose the remainder
572 * of the free objects in this slab. May cause 631 * of the free objects in this slab. May cause
573 * another error because the object count maybe 632 * another error because the object count is now wrong.
574 * wrong now.
575 */ 633 */
576 set_freepointer(s, p, NULL); 634 set_freepointer(s, p, NULL);
577 return 0; 635 return 0;
@@ -611,9 +669,8 @@ static int check_slab(struct kmem_cache *s, struct page *page)
611} 669}
612 670
613/* 671/*
614 * Determine if a certain object on a page is on the freelist and 672 * Determine if a certain object on a page is on the freelist. Must hold the
615 * therefore free. Must hold the slab lock for cpu slabs to 673 * slab lock to guarantee that the chains are in a consistent state.
616 * guarantee that the chains are consistent.
617 */ 674 */
618static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 675static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
619{ 676{
@@ -659,7 +716,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
659} 716}
660 717
661/* 718/*
662 * Tracking of fully allocated slabs for debugging 719 * Tracking of fully allocated slabs for debugging purposes.
663 */ 720 */
664static void add_full(struct kmem_cache_node *n, struct page *page) 721static void add_full(struct kmem_cache_node *n, struct page *page)
665{ 722{
@@ -710,7 +767,7 @@ bad:
710 /* 767 /*
711 * If this is a slab page then lets do the best we can 768 * If this is a slab page then lets do the best we can
712 * to avoid issues in the future. Marking all objects 769 * to avoid issues in the future. Marking all objects
713 * as used avoids touching the remainder. 770 * as used avoids touching the remaining objects.
714 */ 771 */
715 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n", 772 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n",
716 s->name, page); 773 s->name, page);
@@ -764,6 +821,113 @@ fail:
764 return 0; 821 return 0;
765} 822}
766 823
824static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
825{
826 if (s->flags & SLAB_TRACE) {
827 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
828 s->name,
829 alloc ? "alloc" : "free",
830 object, page->inuse,
831 page->freelist);
832
833 if (!alloc)
834 print_section("Object", (void *)object, s->objsize);
835
836 dump_stack();
837 }
838}
839
840static int __init setup_slub_debug(char *str)
841{
842 if (!str || *str != '=')
843 slub_debug = DEBUG_DEFAULT_FLAGS;
844 else {
845 str++;
846 if (*str == 0 || *str == ',')
847 slub_debug = DEBUG_DEFAULT_FLAGS;
848 else
849 for( ;*str && *str != ','; str++)
850 switch (*str) {
851 case 'f' : case 'F' :
852 slub_debug |= SLAB_DEBUG_FREE;
853 break;
854 case 'z' : case 'Z' :
855 slub_debug |= SLAB_RED_ZONE;
856 break;
857 case 'p' : case 'P' :
858 slub_debug |= SLAB_POISON;
859 break;
860 case 'u' : case 'U' :
861 slub_debug |= SLAB_STORE_USER;
862 break;
863 case 't' : case 'T' :
864 slub_debug |= SLAB_TRACE;
865 break;
866 default:
867 printk(KERN_ERR "slub_debug option '%c' "
868 "unknown. skipped\n",*str);
869 }
870 }
871
872 if (*str == ',')
873 slub_debug_slabs = str + 1;
874 return 1;
875}
876
877__setup("slub_debug", setup_slub_debug);
878
879static void kmem_cache_open_debug_check(struct kmem_cache *s)
880{
881 /*
882 * The page->offset field is only 16 bit wide. This is an offset
883 * in units of words from the beginning of an object. If the slab
884 * size is bigger then we cannot move the free pointer behind the
885 * object anymore.
886 *
887 * On 32 bit platforms the limit is 256k. On 64bit platforms
888 * the limit is 512k.
889 *
890 * Debugging or ctor/dtors may create a need to move the free
891 * pointer. Fail if this happens.
892 */
893 if (s->size >= 65535 * sizeof(void *)) {
894 BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
895 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
896 BUG_ON(s->ctor || s->dtor);
897 }
898 else
899 /*
900 * Enable debugging if selected on the kernel commandline.
901 */
902 if (slub_debug && (!slub_debug_slabs ||
903 strncmp(slub_debug_slabs, s->name,
904 strlen(slub_debug_slabs)) == 0))
905 s->flags |= slub_debug;
906}
907#else
908
909static inline int alloc_object_checks(struct kmem_cache *s,
910 struct page *page, void *object) { return 0; }
911
912static inline int free_object_checks(struct kmem_cache *s,
913 struct page *page, void *object) { return 0; }
914
915static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
916static inline void remove_full(struct kmem_cache *s, struct page *page) {}
917static inline void trace(struct kmem_cache *s, struct page *page,
918 void *object, int alloc) {}
919static inline void init_object(struct kmem_cache *s,
920 void *object, int active) {}
921static inline void init_tracking(struct kmem_cache *s, void *object) {}
922static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
923 { return 1; }
924static inline int check_object(struct kmem_cache *s, struct page *page,
925 void *object, int active) { return 1; }
926static inline void set_track(struct kmem_cache *s, void *object,
927 enum track_item alloc, void *addr) {}
928static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {}
929#define slub_debug 0
930#endif
767/* 931/*
768 * Slab allocation and freeing 932 * Slab allocation and freeing
769 */ 933 */
@@ -797,7 +961,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
797static void setup_object(struct kmem_cache *s, struct page *page, 961static void setup_object(struct kmem_cache *s, struct page *page,
798 void *object) 962 void *object)
799{ 963{
800 if (PageError(page)) { 964 if (SlabDebug(page)) {
801 init_object(s, object, 0); 965 init_object(s, object, 0);
802 init_tracking(s, object); 966 init_tracking(s, object);
803 } 967 }
@@ -832,7 +996,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
832 page->flags |= 1 << PG_slab; 996 page->flags |= 1 << PG_slab;
833 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | 997 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
834 SLAB_STORE_USER | SLAB_TRACE)) 998 SLAB_STORE_USER | SLAB_TRACE))
835 page->flags |= 1 << PG_error; 999 SetSlabDebug(page);
836 1000
837 start = page_address(page); 1001 start = page_address(page);
838 end = start + s->objects * s->size; 1002 end = start + s->objects * s->size;
@@ -841,7 +1005,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
841 memset(start, POISON_INUSE, PAGE_SIZE << s->order); 1005 memset(start, POISON_INUSE, PAGE_SIZE << s->order);
842 1006
843 last = start; 1007 last = start;
844 for (p = start + s->size; p < end; p += s->size) { 1008 for_each_object(p, s, start) {
845 setup_object(s, page, last); 1009 setup_object(s, page, last);
846 set_freepointer(s, last, p); 1010 set_freepointer(s, last, p);
847 last = p; 1011 last = p;
@@ -861,13 +1025,11 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
861{ 1025{
862 int pages = 1 << s->order; 1026 int pages = 1 << s->order;
863 1027
864 if (unlikely(PageError(page) || s->dtor)) { 1028 if (unlikely(SlabDebug(page) || s->dtor)) {
865 void *start = page_address(page);
866 void *end = start + (pages << PAGE_SHIFT);
867 void *p; 1029 void *p;
868 1030
869 slab_pad_check(s, page); 1031 slab_pad_check(s, page);
870 for (p = start; p <= end - s->size; p += s->size) { 1032 for_each_object(p, s, page_address(page)) {
871 if (s->dtor) 1033 if (s->dtor)
872 s->dtor(p, s, 0); 1034 s->dtor(p, s, 0);
873 check_object(s, page, p, 0); 1035 check_object(s, page, p, 0);
@@ -910,7 +1072,8 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
910 1072
911 atomic_long_dec(&n->nr_slabs); 1073 atomic_long_dec(&n->nr_slabs);
912 reset_page_mapcount(page); 1074 reset_page_mapcount(page);
913 page->flags &= ~(1 << PG_slab | 1 << PG_error); 1075 ClearSlabDebug(page);
1076 __ClearPageSlab(page);
914 free_slab(s, page); 1077 free_slab(s, page);
915} 1078}
916 1079
@@ -966,9 +1129,9 @@ static void remove_partial(struct kmem_cache *s,
966} 1129}
967 1130
968/* 1131/*
969 * Lock page and remove it from the partial list 1132 * Lock slab and remove from the partial list.
970 * 1133 *
971 * Must hold list_lock 1134 * Must hold list_lock.
972 */ 1135 */
973static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page) 1136static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page)
974{ 1137{
@@ -981,7 +1144,7 @@ static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page)
981} 1144}
982 1145
983/* 1146/*
984 * Try to get a partial slab from a specific node 1147 * Try to allocate a partial slab from a specific node.
985 */ 1148 */
986static struct page *get_partial_node(struct kmem_cache_node *n) 1149static struct page *get_partial_node(struct kmem_cache_node *n)
987{ 1150{
@@ -990,7 +1153,8 @@ static struct page *get_partial_node(struct kmem_cache_node *n)
990 /* 1153 /*
991 * Racy check. If we mistakenly see no partial slabs then we 1154 * Racy check. If we mistakenly see no partial slabs then we
992 * just allocate an empty slab. If we mistakenly try to get a 1155 * just allocate an empty slab. If we mistakenly try to get a
993 * partial slab then get_partials() will return NULL. 1156 * partial slab and there is none available then get_partials()
1157 * will return NULL.
994 */ 1158 */
995 if (!n || !n->nr_partial) 1159 if (!n || !n->nr_partial)
996 return NULL; 1160 return NULL;
@@ -1006,8 +1170,7 @@ out:
1006} 1170}
1007 1171
1008/* 1172/*
1009 * Get a page from somewhere. Search in increasing NUMA 1173 * Get a page from somewhere. Search in increasing NUMA distances.
1010 * distances.
1011 */ 1174 */
1012static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) 1175static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1013{ 1176{
@@ -1017,24 +1180,22 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1017 struct page *page; 1180 struct page *page;
1018 1181
1019 /* 1182 /*
1020 * The defrag ratio allows to configure the tradeoffs between 1183 * The defrag ratio allows a configuration of the tradeoffs between
1021 * inter node defragmentation and node local allocations. 1184 * inter node defragmentation and node local allocations. A lower
1022 * A lower defrag_ratio increases the tendency to do local 1185 * defrag_ratio increases the tendency to do local allocations
1023 * allocations instead of scanning throught the partial 1186 * instead of attempting to obtain partial slabs from other nodes.
1024 * lists on other nodes.
1025 *
1026 * If defrag_ratio is set to 0 then kmalloc() always
1027 * returns node local objects. If its higher then kmalloc()
1028 * may return off node objects in order to avoid fragmentation.
1029 * 1187 *
1030 * A higher ratio means slabs may be taken from other nodes 1188 * If the defrag_ratio is set to 0 then kmalloc() always
1031 * thus reducing the number of partial slabs on those nodes. 1189 * returns node local objects. If the ratio is higher then kmalloc()
1190 * may return off node objects because partial slabs are obtained
1191 * from other nodes and filled up.
1032 * 1192 *
1033 * If /sys/slab/xx/defrag_ratio is set to 100 (which makes 1193 * If /sys/slab/xx/defrag_ratio is set to 100 (which makes
1034 * defrag_ratio = 1000) then every (well almost) allocation 1194 * defrag_ratio = 1000) then every (well almost) allocation will
1035 * will first attempt to defrag slab caches on other nodes. This 1195 * first attempt to defrag slab caches on other nodes. This means
1036 * means scanning over all nodes to look for partial slabs which 1196 * scanning over all nodes to look for partial slabs which may be
1037 * may be a bit expensive to do on every slab allocation. 1197 * expensive if we do it every time we are trying to find a slab
1198 * with available objects.
1038 */ 1199 */
1039 if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio) 1200 if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio)
1040 return NULL; 1201 return NULL;
@@ -1087,18 +1248,19 @@ static void putback_slab(struct kmem_cache *s, struct page *page)
1087 1248
1088 if (page->freelist) 1249 if (page->freelist)
1089 add_partial(n, page); 1250 add_partial(n, page);
1090 else if (PageError(page) && (s->flags & SLAB_STORE_USER)) 1251 else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
1091 add_full(n, page); 1252 add_full(n, page);
1092 slab_unlock(page); 1253 slab_unlock(page);
1093 1254
1094 } else { 1255 } else {
1095 if (n->nr_partial < MIN_PARTIAL) { 1256 if (n->nr_partial < MIN_PARTIAL) {
1096 /* 1257 /*
1097 * Adding an empty page to the partial slabs in order 1258 * Adding an empty slab to the partial slabs in order
1098 * to avoid page allocator overhead. This page needs to 1259 * to avoid page allocator overhead. This slab needs
1099 * come after all the others that are not fully empty 1260 * to come after the other slabs with objects in
1100 * in order to make sure that we do maximum 1261 * order to fill them up. That way the size of the
1101 * defragmentation. 1262 * partial list stays small. kmem_cache_shrink can
1263 * reclaim empty slabs from the partial list.
1102 */ 1264 */
1103 add_partial_tail(n, page); 1265 add_partial_tail(n, page);
1104 slab_unlock(page); 1266 slab_unlock(page);
@@ -1166,11 +1328,11 @@ static void flush_all(struct kmem_cache *s)
1166 * 1. The page struct 1328 * 1. The page struct
1167 * 2. The first cacheline of the object to be allocated. 1329 * 2. The first cacheline of the object to be allocated.
1168 * 1330 *
1169 * The only cache lines that are read (apart from code) is the 1331 * The only other cache lines that are read (apart from code) is the
1170 * per cpu array in the kmem_cache struct. 1332 * per cpu array in the kmem_cache struct.
1171 * 1333 *
1172 * Fastpath is not possible if we need to get a new slab or have 1334 * Fastpath is not possible if we need to get a new slab or have
1173 * debugging enabled (which means all slabs are marked with PageError) 1335 * debugging enabled (which means all slabs are marked with SlabDebug)
1174 */ 1336 */
1175static void *slab_alloc(struct kmem_cache *s, 1337static void *slab_alloc(struct kmem_cache *s,
1176 gfp_t gfpflags, int node, void *addr) 1338 gfp_t gfpflags, int node, void *addr)
@@ -1193,7 +1355,7 @@ redo:
1193 object = page->freelist; 1355 object = page->freelist;
1194 if (unlikely(!object)) 1356 if (unlikely(!object))
1195 goto another_slab; 1357 goto another_slab;
1196 if (unlikely(PageError(page))) 1358 if (unlikely(SlabDebug(page)))
1197 goto debug; 1359 goto debug;
1198 1360
1199have_object: 1361have_object:
@@ -1220,9 +1382,11 @@ have_slab:
1220 cpu = smp_processor_id(); 1382 cpu = smp_processor_id();
1221 if (s->cpu_slab[cpu]) { 1383 if (s->cpu_slab[cpu]) {
1222 /* 1384 /*
1223 * Someone else populated the cpu_slab while we enabled 1385 * Someone else populated the cpu_slab while we
1224 * interrupts, or we have got scheduled on another cpu. 1386 * enabled interrupts, or we have gotten scheduled
1225 * The page may not be on the requested node. 1387 * on another cpu. The page may not be on the
1388 * requested node even if __GFP_THISNODE was
1389 * specified. So we need to recheck.
1226 */ 1390 */
1227 if (node == -1 || 1391 if (node == -1 ||
1228 page_to_nid(s->cpu_slab[cpu]) == node) { 1392 page_to_nid(s->cpu_slab[cpu]) == node) {
@@ -1235,7 +1399,7 @@ have_slab:
1235 slab_lock(page); 1399 slab_lock(page);
1236 goto redo; 1400 goto redo;
1237 } 1401 }
1238 /* Dump the current slab */ 1402 /* New slab does not fit our expectations */
1239 flush_slab(s, s->cpu_slab[cpu], cpu); 1403 flush_slab(s, s->cpu_slab[cpu], cpu);
1240 } 1404 }
1241 slab_lock(page); 1405 slab_lock(page);
@@ -1248,12 +1412,7 @@ debug:
1248 goto another_slab; 1412 goto another_slab;
1249 if (s->flags & SLAB_STORE_USER) 1413 if (s->flags & SLAB_STORE_USER)
1250 set_track(s, object, TRACK_ALLOC, addr); 1414 set_track(s, object, TRACK_ALLOC, addr);
1251 if (s->flags & SLAB_TRACE) { 1415 trace(s, page, object, 1);
1252 printk(KERN_INFO "TRACE %s alloc 0x%p inuse=%d fp=0x%p\n",
1253 s->name, object, page->inuse,
1254 page->freelist);
1255 dump_stack();
1256 }
1257 init_object(s, object, 1); 1416 init_object(s, object, 1);
1258 goto have_object; 1417 goto have_object;
1259} 1418}
@@ -1276,7 +1435,8 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
1276 * The fastpath only writes the cacheline of the page struct and the first 1435 * The fastpath only writes the cacheline of the page struct and the first
1277 * cacheline of the object. 1436 * cacheline of the object.
1278 * 1437 *
1279 * No special cachelines need to be read 1438 * We read the cpu_slab cacheline to check if the slab is the per cpu
1439 * slab for this processor.
1280 */ 1440 */
1281static void slab_free(struct kmem_cache *s, struct page *page, 1441static void slab_free(struct kmem_cache *s, struct page *page,
1282 void *x, void *addr) 1442 void *x, void *addr)
@@ -1288,7 +1448,7 @@ static void slab_free(struct kmem_cache *s, struct page *page,
1288 local_irq_save(flags); 1448 local_irq_save(flags);
1289 slab_lock(page); 1449 slab_lock(page);
1290 1450
1291 if (unlikely(PageError(page))) 1451 if (unlikely(SlabDebug(page)))
1292 goto debug; 1452 goto debug;
1293checks_ok: 1453checks_ok:
1294 prior = object[page->offset] = page->freelist; 1454 prior = object[page->offset] = page->freelist;
@@ -1321,7 +1481,7 @@ out_unlock:
1321slab_empty: 1481slab_empty:
1322 if (prior) 1482 if (prior)
1323 /* 1483 /*
1324 * Slab on the partial list. 1484 * Slab still on the partial list.
1325 */ 1485 */
1326 remove_partial(s, page); 1486 remove_partial(s, page);
1327 1487
@@ -1337,13 +1497,7 @@ debug:
1337 remove_full(s, page); 1497 remove_full(s, page);
1338 if (s->flags & SLAB_STORE_USER) 1498 if (s->flags & SLAB_STORE_USER)
1339 set_track(s, x, TRACK_FREE, addr); 1499 set_track(s, x, TRACK_FREE, addr);
1340 if (s->flags & SLAB_TRACE) { 1500 trace(s, page, object, 0);
1341 printk(KERN_INFO "TRACE %s free 0x%p inuse=%d fp=0x%p\n",
1342 s->name, object, page->inuse,
1343 page->freelist);
1344 print_section("Object", (void *)object, s->objsize);
1345 dump_stack();
1346 }
1347 init_object(s, object, 0); 1501 init_object(s, object, 0);
1348 goto checks_ok; 1502 goto checks_ok;
1349} 1503}
@@ -1370,22 +1524,16 @@ static struct page *get_object_page(const void *x)
1370} 1524}
1371 1525
1372/* 1526/*
1373 * kmem_cache_open produces objects aligned at "size" and the first object 1527 * Object placement in a slab is made very easy because we always start at
1374 * is placed at offset 0 in the slab (We have no metainformation on the 1528 * offset 0. If we tune the size of the object to the alignment then we can
1375 * slab, all slabs are in essence "off slab"). 1529 * get the required alignment by putting one properly sized object after
1376 * 1530 * another.
1377 * In order to get the desired alignment one just needs to align the
1378 * size.
1379 * 1531 *
1380 * Notice that the allocation order determines the sizes of the per cpu 1532 * Notice that the allocation order determines the sizes of the per cpu
1381 * caches. Each processor has always one slab available for allocations. 1533 * caches. Each processor has always one slab available for allocations.
1382 * Increasing the allocation order reduces the number of times that slabs 1534 * Increasing the allocation order reduces the number of times that slabs
1383 * must be moved on and off the partial lists and therefore may influence 1535 * must be moved on and off the partial lists and is therefore a factor in
1384 * locking overhead. 1536 * locking overhead.
1385 *
1386 * The offset is used to relocate the free list link in each object. It is
1387 * therefore possible to move the free list link behind the object. This
1388 * is necessary for RCU to work properly and also useful for debugging.
1389 */ 1537 */
1390 1538
1391/* 1539/*
@@ -1396,76 +1544,110 @@ static struct page *get_object_page(const void *x)
1396 */ 1544 */
1397static int slub_min_order; 1545static int slub_min_order;
1398static int slub_max_order = DEFAULT_MAX_ORDER; 1546static int slub_max_order = DEFAULT_MAX_ORDER;
1399
1400/*
1401 * Minimum number of objects per slab. This is necessary in order to
1402 * reduce locking overhead. Similar to the queue size in SLAB.
1403 */
1404static int slub_min_objects = DEFAULT_MIN_OBJECTS; 1547static int slub_min_objects = DEFAULT_MIN_OBJECTS;
1405 1548
1406/* 1549/*
1407 * Merge control. If this is set then no merging of slab caches will occur. 1550 * Merge control. If this is set then no merging of slab caches will occur.
1551 * (Could be removed. This was introduced to pacify the merge skeptics.)
1408 */ 1552 */
1409static int slub_nomerge; 1553static int slub_nomerge;
1410 1554
1411/* 1555/*
1412 * Debug settings:
1413 */
1414static int slub_debug;
1415
1416static char *slub_debug_slabs;
1417
1418/*
1419 * Calculate the order of allocation given an slab object size. 1556 * Calculate the order of allocation given an slab object size.
1420 * 1557 *
1421 * The order of allocation has significant impact on other elements 1558 * The order of allocation has significant impact on performance and other
1422 * of the system. Generally order 0 allocations should be preferred 1559 * system components. Generally order 0 allocations should be preferred since
1423 * since they do not cause fragmentation in the page allocator. Larger 1560 * order 0 does not cause fragmentation in the page allocator. Larger objects
1424 * objects may have problems with order 0 because there may be too much 1561 * be problematic to put into order 0 slabs because there may be too much
1425 * space left unused in a slab. We go to a higher order if more than 1/8th 1562 * unused space left. We go to a higher order if more than 1/8th of the slab
1426 * of the slab would be wasted. 1563 * would be wasted.
1427 * 1564 *
1428 * In order to reach satisfactory performance we must ensure that 1565 * In order to reach satisfactory performance we must ensure that a minimum
1429 * a minimum number of objects is in one slab. Otherwise we may 1566 * number of objects is in one slab. Otherwise we may generate too much
1430 * generate too much activity on the partial lists. This is less a 1567 * activity on the partial lists which requires taking the list_lock. This is
1431 * concern for large slabs though. slub_max_order specifies the order 1568 * less a concern for large slabs though which are rarely used.
1432 * where we begin to stop considering the number of objects in a slab.
1433 * 1569 *
1434 * Higher order allocations also allow the placement of more objects 1570 * slub_max_order specifies the order where we begin to stop considering the
1435 * in a slab and thereby reduce object handling overhead. If the user 1571 * number of objects in a slab as critical. If we reach slub_max_order then
1436 * has requested a higher mininum order then we start with that one 1572 * we try to keep the page order as low as possible. So we accept more waste
1437 * instead of zero. 1573 * of space in favor of a small page order.
1574 *
1575 * Higher order allocations also allow the placement of more objects in a
1576 * slab and thereby reduce object handling overhead. If the user has
1577 * requested a higher mininum order then we start with that one instead of
1578 * the smallest order which will fit the object.
1438 */ 1579 */
1439static int calculate_order(int size) 1580static inline int slab_order(int size, int min_objects,
1581 int max_order, int fract_leftover)
1440{ 1582{
1441 int order; 1583 int order;
1442 int rem; 1584 int rem;
1443 1585
1444 for (order = max(slub_min_order, fls(size - 1) - PAGE_SHIFT); 1586 for (order = max(slub_min_order,
1445 order < MAX_ORDER; order++) { 1587 fls(min_objects * size - 1) - PAGE_SHIFT);
1446 unsigned long slab_size = PAGE_SIZE << order; 1588 order <= max_order; order++) {
1447 1589
1448 if (slub_max_order > order && 1590 unsigned long slab_size = PAGE_SIZE << order;
1449 slab_size < slub_min_objects * size)
1450 continue;
1451 1591
1452 if (slab_size < size) 1592 if (slab_size < min_objects * size)
1453 continue; 1593 continue;
1454 1594
1455 rem = slab_size % size; 1595 rem = slab_size % size;
1456 1596
1457 if (rem <= (PAGE_SIZE << order) / 8) 1597 if (rem <= slab_size / fract_leftover)
1458 break; 1598 break;
1459 1599
1460 } 1600 }
1461 if (order >= MAX_ORDER) 1601
1462 return -E2BIG;
1463 return order; 1602 return order;
1464} 1603}
1465 1604
1605static inline int calculate_order(int size)
1606{
1607 int order;
1608 int min_objects;
1609 int fraction;
1610
1611 /*
1612 * Attempt to find best configuration for a slab. This
1613 * works by first attempting to generate a layout with
1614 * the best configuration and backing off gradually.
1615 *
1616 * First we reduce the acceptable waste in a slab. Then
1617 * we reduce the minimum objects required in a slab.
1618 */
1619 min_objects = slub_min_objects;
1620 while (min_objects > 1) {
1621 fraction = 8;
1622 while (fraction >= 4) {
1623 order = slab_order(size, min_objects,
1624 slub_max_order, fraction);
1625 if (order <= slub_max_order)
1626 return order;
1627 fraction /= 2;
1628 }
1629 min_objects /= 2;
1630 }
1631
1632 /*
1633 * We were unable to place multiple objects in a slab. Now
1634 * lets see if we can place a single object there.
1635 */
1636 order = slab_order(size, 1, slub_max_order, 1);
1637 if (order <= slub_max_order)
1638 return order;
1639
1640 /*
1641 * Doh this slab cannot be placed using slub_max_order.
1642 */
1643 order = slab_order(size, 1, MAX_ORDER, 1);
1644 if (order <= MAX_ORDER)
1645 return order;
1646 return -ENOSYS;
1647}
1648
1466/* 1649/*
1467 * Function to figure out which alignment to use from the 1650 * Figure out what the alignment of the objects will be.
1468 * various ways of specifying it.
1469 */ 1651 */
1470static unsigned long calculate_alignment(unsigned long flags, 1652static unsigned long calculate_alignment(unsigned long flags,
1471 unsigned long align, unsigned long size) 1653 unsigned long align, unsigned long size)
@@ -1480,8 +1662,8 @@ static unsigned long calculate_alignment(unsigned long flags,
1480 * then use it. 1662 * then use it.
1481 */ 1663 */
1482 if ((flags & SLAB_HWCACHE_ALIGN) && 1664 if ((flags & SLAB_HWCACHE_ALIGN) &&
1483 size > L1_CACHE_BYTES / 2) 1665 size > cache_line_size() / 2)
1484 return max_t(unsigned long, align, L1_CACHE_BYTES); 1666 return max_t(unsigned long, align, cache_line_size());
1485 1667
1486 if (align < ARCH_SLAB_MINALIGN) 1668 if (align < ARCH_SLAB_MINALIGN)
1487 return ARCH_SLAB_MINALIGN; 1669 return ARCH_SLAB_MINALIGN;
@@ -1619,22 +1801,23 @@ static int calculate_sizes(struct kmem_cache *s)
1619 */ 1801 */
1620 size = ALIGN(size, sizeof(void *)); 1802 size = ALIGN(size, sizeof(void *));
1621 1803
1804#ifdef CONFIG_SLUB_DEBUG
1622 /* 1805 /*
1623 * If we are redzoning then check if there is some space between the 1806 * If we are Redzoning then check if there is some space between the
1624 * end of the object and the free pointer. If not then add an 1807 * end of the object and the free pointer. If not then add an
1625 * additional word, so that we can establish a redzone between 1808 * additional word to have some bytes to store Redzone information.
1626 * the object and the freepointer to be able to check for overwrites.
1627 */ 1809 */
1628 if ((flags & SLAB_RED_ZONE) && size == s->objsize) 1810 if ((flags & SLAB_RED_ZONE) && size == s->objsize)
1629 size += sizeof(void *); 1811 size += sizeof(void *);
1812#endif
1630 1813
1631 /* 1814 /*
1632 * With that we have determined how much of the slab is in actual 1815 * With that we have determined the number of bytes in actual use
1633 * use by the object. This is the potential offset to the free 1816 * by the object. This is the potential offset to the free pointer.
1634 * pointer.
1635 */ 1817 */
1636 s->inuse = size; 1818 s->inuse = size;
1637 1819
1820#ifdef CONFIG_SLUB_DEBUG
1638 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 1821 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
1639 s->ctor || s->dtor)) { 1822 s->ctor || s->dtor)) {
1640 /* 1823 /*
@@ -1656,7 +1839,7 @@ static int calculate_sizes(struct kmem_cache *s)
1656 */ 1839 */
1657 size += 2 * sizeof(struct track); 1840 size += 2 * sizeof(struct track);
1658 1841
1659 if (flags & DEBUG_DEFAULT_FLAGS) 1842 if (flags & SLAB_RED_ZONE)
1660 /* 1843 /*
1661 * Add some empty padding so that we can catch 1844 * Add some empty padding so that we can catch
1662 * overwrites from earlier objects rather than let 1845 * overwrites from earlier objects rather than let
@@ -1665,10 +1848,12 @@ static int calculate_sizes(struct kmem_cache *s)
1665 * of the object. 1848 * of the object.
1666 */ 1849 */
1667 size += sizeof(void *); 1850 size += sizeof(void *);
1851#endif
1852
1668 /* 1853 /*
1669 * Determine the alignment based on various parameters that the 1854 * Determine the alignment based on various parameters that the
1670 * user specified (this is unecessarily complex due to the attempt 1855 * user specified and the dynamic determination of cache line size
1671 * to be compatible with SLAB. Should be cleaned up some day). 1856 * on bootup.
1672 */ 1857 */
1673 align = calculate_alignment(flags, align, s->objsize); 1858 align = calculate_alignment(flags, align, s->objsize);
1674 1859
@@ -1700,23 +1885,6 @@ static int calculate_sizes(struct kmem_cache *s)
1700 1885
1701} 1886}
1702 1887
1703static int __init finish_bootstrap(void)
1704{
1705 struct list_head *h;
1706 int err;
1707
1708 slab_state = SYSFS;
1709
1710 list_for_each(h, &slab_caches) {
1711 struct kmem_cache *s =
1712 container_of(h, struct kmem_cache, list);
1713
1714 err = sysfs_slab_add(s);
1715 BUG_ON(err);
1716 }
1717 return 0;
1718}
1719
1720static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, 1888static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
1721 const char *name, size_t size, 1889 const char *name, size_t size,
1722 size_t align, unsigned long flags, 1890 size_t align, unsigned long flags,
@@ -1730,32 +1898,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
1730 s->objsize = size; 1898 s->objsize = size;
1731 s->flags = flags; 1899 s->flags = flags;
1732 s->align = align; 1900 s->align = align;
1733 1901 kmem_cache_open_debug_check(s);
1734 /*
1735 * The page->offset field is only 16 bit wide. This is an offset
1736 * in units of words from the beginning of an object. If the slab
1737 * size is bigger then we cannot move the free pointer behind the
1738 * object anymore.
1739 *
1740 * On 32 bit platforms the limit is 256k. On 64bit platforms
1741 * the limit is 512k.
1742 *
1743 * Debugging or ctor/dtors may create a need to move the free
1744 * pointer. Fail if this happens.
1745 */
1746 if (s->size >= 65535 * sizeof(void *)) {
1747 BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON |
1748 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
1749 BUG_ON(ctor || dtor);
1750 }
1751 else
1752 /*
1753 * Enable debugging if selected on the kernel commandline.
1754 */
1755 if (slub_debug && (!slub_debug_slabs ||
1756 strncmp(slub_debug_slabs, name,
1757 strlen(slub_debug_slabs)) == 0))
1758 s->flags |= slub_debug;
1759 1902
1760 if (!calculate_sizes(s)) 1903 if (!calculate_sizes(s))
1761 goto error; 1904 goto error;
@@ -1783,7 +1926,6 @@ EXPORT_SYMBOL(kmem_cache_open);
1783int kmem_ptr_validate(struct kmem_cache *s, const void *object) 1926int kmem_ptr_validate(struct kmem_cache *s, const void *object)
1784{ 1927{
1785 struct page * page; 1928 struct page * page;
1786 void *addr;
1787 1929
1788 page = get_object_page(object); 1930 page = get_object_page(object);
1789 1931
@@ -1791,13 +1933,7 @@ int kmem_ptr_validate(struct kmem_cache *s, const void *object)
1791 /* No slab or wrong slab */ 1933 /* No slab or wrong slab */
1792 return 0; 1934 return 0;
1793 1935
1794 addr = page_address(page); 1936 if (!check_valid_pointer(s, page, object))
1795 if (object < addr || object >= addr + s->objects * s->size)
1796 /* Out of bounds */
1797 return 0;
1798
1799 if ((object - addr) % s->size)
1800 /* Improperly aligned */
1801 return 0; 1937 return 0;
1802 1938
1803 /* 1939 /*
@@ -1826,7 +1962,8 @@ const char *kmem_cache_name(struct kmem_cache *s)
1826EXPORT_SYMBOL(kmem_cache_name); 1962EXPORT_SYMBOL(kmem_cache_name);
1827 1963
1828/* 1964/*
1829 * Attempt to free all slabs on a node 1965 * Attempt to free all slabs on a node. Return the number of slabs we
1966 * were unable to free.
1830 */ 1967 */
1831static int free_list(struct kmem_cache *s, struct kmem_cache_node *n, 1968static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
1832 struct list_head *list) 1969 struct list_head *list)
@@ -1847,7 +1984,7 @@ static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
1847} 1984}
1848 1985
1849/* 1986/*
1850 * Release all resources used by slab cache 1987 * Release all resources used by a slab cache.
1851 */ 1988 */
1852static int kmem_cache_close(struct kmem_cache *s) 1989static int kmem_cache_close(struct kmem_cache *s)
1853{ 1990{
@@ -1932,45 +2069,6 @@ static int __init setup_slub_nomerge(char *str)
1932 2069
1933__setup("slub_nomerge", setup_slub_nomerge); 2070__setup("slub_nomerge", setup_slub_nomerge);
1934 2071
1935static int __init setup_slub_debug(char *str)
1936{
1937 if (!str || *str != '=')
1938 slub_debug = DEBUG_DEFAULT_FLAGS;
1939 else {
1940 str++;
1941 if (*str == 0 || *str == ',')
1942 slub_debug = DEBUG_DEFAULT_FLAGS;
1943 else
1944 for( ;*str && *str != ','; str++)
1945 switch (*str) {
1946 case 'f' : case 'F' :
1947 slub_debug |= SLAB_DEBUG_FREE;
1948 break;
1949 case 'z' : case 'Z' :
1950 slub_debug |= SLAB_RED_ZONE;
1951 break;
1952 case 'p' : case 'P' :
1953 slub_debug |= SLAB_POISON;
1954 break;
1955 case 'u' : case 'U' :
1956 slub_debug |= SLAB_STORE_USER;
1957 break;
1958 case 't' : case 'T' :
1959 slub_debug |= SLAB_TRACE;
1960 break;
1961 default:
1962 printk(KERN_ERR "slub_debug option '%c' "
1963 "unknown. skipped\n",*str);
1964 }
1965 }
1966
1967 if (*str == ',')
1968 slub_debug_slabs = str + 1;
1969 return 1;
1970}
1971
1972__setup("slub_debug", setup_slub_debug);
1973
1974static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, 2072static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
1975 const char *name, int size, gfp_t gfp_flags) 2073 const char *name, int size, gfp_t gfp_flags)
1976{ 2074{
@@ -2108,13 +2206,14 @@ void kfree(const void *x)
2108EXPORT_SYMBOL(kfree); 2206EXPORT_SYMBOL(kfree);
2109 2207
2110/* 2208/*
2111 * kmem_cache_shrink removes empty slabs from the partial lists 2209 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2112 * and then sorts the partially allocated slabs by the number 2210 * the remaining slabs by the number of items in use. The slabs with the
2113 * of items in use. The slabs with the most items in use 2211 * most items in use come first. New allocations will then fill those up
2114 * come first. New allocations will remove these from the 2212 * and thus they can be removed from the partial lists.
2115 * partial list because they are full. The slabs with the 2213 *
2116 * least items are placed last. If it happens that the objects 2214 * The slabs with the least items are placed last. This results in them
2117 * are freed then the page can be returned to the page allocator. 2215 * being allocated from last increasing the chance that the last objects
2216 * are freed in them.
2118 */ 2217 */
2119int kmem_cache_shrink(struct kmem_cache *s) 2218int kmem_cache_shrink(struct kmem_cache *s)
2120{ 2219{
@@ -2143,12 +2242,10 @@ int kmem_cache_shrink(struct kmem_cache *s)
2143 spin_lock_irqsave(&n->list_lock, flags); 2242 spin_lock_irqsave(&n->list_lock, flags);
2144 2243
2145 /* 2244 /*
2146 * Build lists indexed by the items in use in 2245 * Build lists indexed by the items in use in each slab.
2147 * each slab or free slabs if empty.
2148 * 2246 *
2149 * Note that concurrent frees may occur while 2247 * Note that concurrent frees may occur while we hold the
2150 * we hold the list_lock. page->inuse here is 2248 * list_lock. page->inuse here is the upper limit.
2151 * the upper limit.
2152 */ 2249 */
2153 list_for_each_entry_safe(page, t, &n->partial, lru) { 2250 list_for_each_entry_safe(page, t, &n->partial, lru) {
2154 if (!page->inuse && slab_trylock(page)) { 2251 if (!page->inuse && slab_trylock(page)) {
@@ -2172,8 +2269,8 @@ int kmem_cache_shrink(struct kmem_cache *s)
2172 goto out; 2269 goto out;
2173 2270
2174 /* 2271 /*
2175 * Rebuild the partial list with the slabs filled up 2272 * Rebuild the partial list with the slabs filled up most
2176 * most first and the least used slabs at the end. 2273 * first and the least used slabs at the end.
2177 */ 2274 */
2178 for (i = s->objects - 1; i >= 0; i--) 2275 for (i = s->objects - 1; i >= 0; i--)
2179 list_splice(slabs_by_inuse + i, n->partial.prev); 2276 list_splice(slabs_by_inuse + i, n->partial.prev);
@@ -2189,7 +2286,6 @@ EXPORT_SYMBOL(kmem_cache_shrink);
2189 2286
2190/** 2287/**
2191 * krealloc - reallocate memory. The contents will remain unchanged. 2288 * krealloc - reallocate memory. The contents will remain unchanged.
2192 *
2193 * @p: object to reallocate memory for. 2289 * @p: object to reallocate memory for.
2194 * @new_size: how many bytes of memory are required. 2290 * @new_size: how many bytes of memory are required.
2195 * @flags: the type of memory to allocate. 2291 * @flags: the type of memory to allocate.
@@ -2201,9 +2297,8 @@ EXPORT_SYMBOL(kmem_cache_shrink);
2201 */ 2297 */
2202void *krealloc(const void *p, size_t new_size, gfp_t flags) 2298void *krealloc(const void *p, size_t new_size, gfp_t flags)
2203{ 2299{
2204 struct kmem_cache *new_cache;
2205 void *ret; 2300 void *ret;
2206 struct page *page; 2301 size_t ks;
2207 2302
2208 if (unlikely(!p)) 2303 if (unlikely(!p))
2209 return kmalloc(new_size, flags); 2304 return kmalloc(new_size, flags);
@@ -2213,19 +2308,13 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
2213 return NULL; 2308 return NULL;
2214 } 2309 }
2215 2310
2216 page = virt_to_head_page(p); 2311 ks = ksize(p);
2217 2312 if (ks >= new_size)
2218 new_cache = get_slab(new_size, flags);
2219
2220 /*
2221 * If new size fits in the current cache, bail out.
2222 */
2223 if (likely(page->slab == new_cache))
2224 return (void *)p; 2313 return (void *)p;
2225 2314
2226 ret = kmalloc(new_size, flags); 2315 ret = kmalloc(new_size, flags);
2227 if (ret) { 2316 if (ret) {
2228 memcpy(ret, p, min(new_size, ksize(p))); 2317 memcpy(ret, p, min(new_size, ks));
2229 kfree(p); 2318 kfree(p);
2230 } 2319 }
2231 return ret; 2320 return ret;
@@ -2243,7 +2332,7 @@ void __init kmem_cache_init(void)
2243#ifdef CONFIG_NUMA 2332#ifdef CONFIG_NUMA
2244 /* 2333 /*
2245 * Must first have the slab cache available for the allocations of the 2334 * Must first have the slab cache available for the allocations of the
2246 * struct kmalloc_cache_node's. There is special bootstrap code in 2335 * struct kmem_cache_node's. There is special bootstrap code in
2247 * kmem_cache_open for slab_state == DOWN. 2336 * kmem_cache_open for slab_state == DOWN.
2248 */ 2337 */
2249 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", 2338 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
@@ -2280,7 +2369,7 @@ void __init kmem_cache_init(void)
2280 2369
2281 printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 2370 printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
2282 " Processors=%d, Nodes=%d\n", 2371 " Processors=%d, Nodes=%d\n",
2283 KMALLOC_SHIFT_HIGH, L1_CACHE_BYTES, 2372 KMALLOC_SHIFT_HIGH, cache_line_size(),
2284 slub_min_order, slub_max_order, slub_min_objects, 2373 slub_min_order, slub_max_order, slub_min_objects,
2285 nr_cpu_ids, nr_node_ids); 2374 nr_cpu_ids, nr_node_ids);
2286} 2375}
@@ -2415,8 +2504,8 @@ static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu)
2415} 2504}
2416 2505
2417/* 2506/*
2418 * Use the cpu notifier to insure that the slab are flushed 2507 * Use the cpu notifier to insure that the cpu slabs are flushed when
2419 * when necessary. 2508 * necessary.
2420 */ 2509 */
2421static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, 2510static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
2422 unsigned long action, void *hcpu) 2511 unsigned long action, void *hcpu)
@@ -2425,7 +2514,9 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
2425 2514
2426 switch (action) { 2515 switch (action) {
2427 case CPU_UP_CANCELED: 2516 case CPU_UP_CANCELED:
2517 case CPU_UP_CANCELED_FROZEN:
2428 case CPU_DEAD: 2518 case CPU_DEAD:
2519 case CPU_DEAD_FROZEN:
2429 for_all_slabs(__flush_cpu_slab, cpu); 2520 for_all_slabs(__flush_cpu_slab, cpu);
2430 break; 2521 break;
2431 default: 2522 default:
@@ -2439,153 +2530,6 @@ static struct notifier_block __cpuinitdata slab_notifier =
2439 2530
2440#endif 2531#endif
2441 2532
2442#ifdef CONFIG_NUMA
2443
2444/*****************************************************************
2445 * Generic reaper used to support the page allocator
2446 * (the cpu slabs are reaped by a per slab workqueue).
2447 *
2448 * Maybe move this to the page allocator?
2449 ****************************************************************/
2450
2451static DEFINE_PER_CPU(unsigned long, reap_node);
2452
2453static void init_reap_node(int cpu)
2454{
2455 int node;
2456
2457 node = next_node(cpu_to_node(cpu), node_online_map);
2458 if (node == MAX_NUMNODES)
2459 node = first_node(node_online_map);
2460
2461 __get_cpu_var(reap_node) = node;
2462}
2463
2464static void next_reap_node(void)
2465{
2466 int node = __get_cpu_var(reap_node);
2467
2468 /*
2469 * Also drain per cpu pages on remote zones
2470 */
2471 if (node != numa_node_id())
2472 drain_node_pages(node);
2473
2474 node = next_node(node, node_online_map);
2475 if (unlikely(node >= MAX_NUMNODES))
2476 node = first_node(node_online_map);
2477 __get_cpu_var(reap_node) = node;
2478}
2479#else
2480#define init_reap_node(cpu) do { } while (0)
2481#define next_reap_node(void) do { } while (0)
2482#endif
2483
2484#define REAPTIMEOUT_CPUC (2*HZ)
2485
2486#ifdef CONFIG_SMP
2487static DEFINE_PER_CPU(struct delayed_work, reap_work);
2488
2489static void cache_reap(struct work_struct *unused)
2490{
2491 next_reap_node();
2492 refresh_cpu_vm_stats(smp_processor_id());
2493 schedule_delayed_work(&__get_cpu_var(reap_work),
2494 REAPTIMEOUT_CPUC);
2495}
2496
2497static void __devinit start_cpu_timer(int cpu)
2498{
2499 struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
2500
2501 /*
2502 * When this gets called from do_initcalls via cpucache_init(),
2503 * init_workqueues() has already run, so keventd will be setup
2504 * at that time.
2505 */
2506 if (keventd_up() && reap_work->work.func == NULL) {
2507 init_reap_node(cpu);
2508 INIT_DELAYED_WORK(reap_work, cache_reap);
2509 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
2510 }
2511}
2512
2513static int __init cpucache_init(void)
2514{
2515 int cpu;
2516
2517 /*
2518 * Register the timers that drain pcp pages and update vm statistics
2519 */
2520 for_each_online_cpu(cpu)
2521 start_cpu_timer(cpu);
2522 return 0;
2523}
2524__initcall(cpucache_init);
2525#endif
2526
2527#ifdef SLUB_RESILIENCY_TEST
2528static unsigned long validate_slab_cache(struct kmem_cache *s);
2529
2530static void resiliency_test(void)
2531{
2532 u8 *p;
2533
2534 printk(KERN_ERR "SLUB resiliency testing\n");
2535 printk(KERN_ERR "-----------------------\n");
2536 printk(KERN_ERR "A. Corruption after allocation\n");
2537
2538 p = kzalloc(16, GFP_KERNEL);
2539 p[16] = 0x12;
2540 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
2541 " 0x12->0x%p\n\n", p + 16);
2542
2543 validate_slab_cache(kmalloc_caches + 4);
2544
2545 /* Hmmm... The next two are dangerous */
2546 p = kzalloc(32, GFP_KERNEL);
2547 p[32 + sizeof(void *)] = 0x34;
2548 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
2549 " 0x34 -> -0x%p\n", p);
2550 printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
2551
2552 validate_slab_cache(kmalloc_caches + 5);
2553 p = kzalloc(64, GFP_KERNEL);
2554 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
2555 *p = 0x56;
2556 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
2557 p);
2558 printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
2559 validate_slab_cache(kmalloc_caches + 6);
2560
2561 printk(KERN_ERR "\nB. Corruption after free\n");
2562 p = kzalloc(128, GFP_KERNEL);
2563 kfree(p);
2564 *p = 0x78;
2565 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
2566 validate_slab_cache(kmalloc_caches + 7);
2567
2568 p = kzalloc(256, GFP_KERNEL);
2569 kfree(p);
2570 p[50] = 0x9a;
2571 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
2572 validate_slab_cache(kmalloc_caches + 8);
2573
2574 p = kzalloc(512, GFP_KERNEL);
2575 kfree(p);
2576 p[512] = 0xab;
2577 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
2578 validate_slab_cache(kmalloc_caches + 9);
2579}
2580#else
2581static void resiliency_test(void) {};
2582#endif
2583
2584/*
2585 * These are not as efficient as kmalloc for the non debug case.
2586 * We do not have the page struct available so we have to touch one
2587 * cacheline in struct kmem_cache to check slab flags.
2588 */
2589void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) 2533void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
2590{ 2534{
2591 struct kmem_cache *s = get_slab(size, gfpflags); 2535 struct kmem_cache *s = get_slab(size, gfpflags);
@@ -2607,13 +2551,12 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
2607 return slab_alloc(s, gfpflags, node, caller); 2551 return slab_alloc(s, gfpflags, node, caller);
2608} 2552}
2609 2553
2610#ifdef CONFIG_SYSFS 2554#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
2611
2612static int validate_slab(struct kmem_cache *s, struct page *page) 2555static int validate_slab(struct kmem_cache *s, struct page *page)
2613{ 2556{
2614 void *p; 2557 void *p;
2615 void *addr = page_address(page); 2558 void *addr = page_address(page);
2616 unsigned long map[BITS_TO_LONGS(s->objects)]; 2559 DECLARE_BITMAP(map, s->objects);
2617 2560
2618 if (!check_slab(s, page) || 2561 if (!check_slab(s, page) ||
2619 !on_freelist(s, page, NULL)) 2562 !on_freelist(s, page, NULL))
@@ -2622,14 +2565,14 @@ static int validate_slab(struct kmem_cache *s, struct page *page)
2622 /* Now we know that a valid freelist exists */ 2565 /* Now we know that a valid freelist exists */
2623 bitmap_zero(map, s->objects); 2566 bitmap_zero(map, s->objects);
2624 2567
2625 for(p = page->freelist; p; p = get_freepointer(s, p)) { 2568 for_each_free_object(p, s, page->freelist) {
2626 set_bit((p - addr) / s->size, map); 2569 set_bit(slab_index(p, s, addr), map);
2627 if (!check_object(s, page, p, 0)) 2570 if (!check_object(s, page, p, 0))
2628 return 0; 2571 return 0;
2629 } 2572 }
2630 2573
2631 for(p = addr; p < addr + s->objects * s->size; p += s->size) 2574 for_each_object(p, s, addr)
2632 if (!test_bit((p - addr) / s->size, map)) 2575 if (!test_bit(slab_index(p, s, addr), map))
2633 if (!check_object(s, page, p, 1)) 2576 if (!check_object(s, page, p, 1))
2634 return 0; 2577 return 0;
2635 return 1; 2578 return 1;
@@ -2645,12 +2588,12 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page)
2645 s->name, page); 2588 s->name, page);
2646 2589
2647 if (s->flags & DEBUG_DEFAULT_FLAGS) { 2590 if (s->flags & DEBUG_DEFAULT_FLAGS) {
2648 if (!PageError(page)) 2591 if (!SlabDebug(page))
2649 printk(KERN_ERR "SLUB %s: PageError not set " 2592 printk(KERN_ERR "SLUB %s: SlabDebug not set "
2650 "on slab 0x%p\n", s->name, page); 2593 "on slab 0x%p\n", s->name, page);
2651 } else { 2594 } else {
2652 if (PageError(page)) 2595 if (SlabDebug(page))
2653 printk(KERN_ERR "SLUB %s: PageError set on " 2596 printk(KERN_ERR "SLUB %s: SlabDebug set on "
2654 "slab 0x%p\n", s->name, page); 2597 "slab 0x%p\n", s->name, page);
2655 } 2598 }
2656} 2599}
@@ -2702,14 +2645,76 @@ static unsigned long validate_slab_cache(struct kmem_cache *s)
2702 return count; 2645 return count;
2703} 2646}
2704 2647
2648#ifdef SLUB_RESILIENCY_TEST
2649static void resiliency_test(void)
2650{
2651 u8 *p;
2652
2653 printk(KERN_ERR "SLUB resiliency testing\n");
2654 printk(KERN_ERR "-----------------------\n");
2655 printk(KERN_ERR "A. Corruption after allocation\n");
2656
2657 p = kzalloc(16, GFP_KERNEL);
2658 p[16] = 0x12;
2659 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
2660 " 0x12->0x%p\n\n", p + 16);
2661
2662 validate_slab_cache(kmalloc_caches + 4);
2663
2664 /* Hmmm... The next two are dangerous */
2665 p = kzalloc(32, GFP_KERNEL);
2666 p[32 + sizeof(void *)] = 0x34;
2667 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
2668 " 0x34 -> -0x%p\n", p);
2669 printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
2670
2671 validate_slab_cache(kmalloc_caches + 5);
2672 p = kzalloc(64, GFP_KERNEL);
2673 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
2674 *p = 0x56;
2675 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
2676 p);
2677 printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
2678 validate_slab_cache(kmalloc_caches + 6);
2679
2680 printk(KERN_ERR "\nB. Corruption after free\n");
2681 p = kzalloc(128, GFP_KERNEL);
2682 kfree(p);
2683 *p = 0x78;
2684 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
2685 validate_slab_cache(kmalloc_caches + 7);
2686
2687 p = kzalloc(256, GFP_KERNEL);
2688 kfree(p);
2689 p[50] = 0x9a;
2690 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
2691 validate_slab_cache(kmalloc_caches + 8);
2692
2693 p = kzalloc(512, GFP_KERNEL);
2694 kfree(p);
2695 p[512] = 0xab;
2696 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
2697 validate_slab_cache(kmalloc_caches + 9);
2698}
2699#else
2700static void resiliency_test(void) {};
2701#endif
2702
2705/* 2703/*
2706 * Generate lists of locations where slabcache objects are allocated 2704 * Generate lists of code addresses where slabcache objects are allocated
2707 * and freed. 2705 * and freed.
2708 */ 2706 */
2709 2707
2710struct location { 2708struct location {
2711 unsigned long count; 2709 unsigned long count;
2712 void *addr; 2710 void *addr;
2711 long long sum_time;
2712 long min_time;
2713 long max_time;
2714 long min_pid;
2715 long max_pid;
2716 cpumask_t cpus;
2717 nodemask_t nodes;
2713}; 2718};
2714 2719
2715struct loc_track { 2720struct loc_track {
@@ -2750,11 +2755,12 @@ static int alloc_loc_track(struct loc_track *t, unsigned long max)
2750} 2755}
2751 2756
2752static int add_location(struct loc_track *t, struct kmem_cache *s, 2757static int add_location(struct loc_track *t, struct kmem_cache *s,
2753 void *addr) 2758 const struct track *track)
2754{ 2759{
2755 long start, end, pos; 2760 long start, end, pos;
2756 struct location *l; 2761 struct location *l;
2757 void *caddr; 2762 void *caddr;
2763 unsigned long age = jiffies - track->when;
2758 2764
2759 start = -1; 2765 start = -1;
2760 end = t->count; 2766 end = t->count;
@@ -2770,19 +2776,36 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
2770 break; 2776 break;
2771 2777
2772 caddr = t->loc[pos].addr; 2778 caddr = t->loc[pos].addr;
2773 if (addr == caddr) { 2779 if (track->addr == caddr) {
2774 t->loc[pos].count++; 2780
2781 l = &t->loc[pos];
2782 l->count++;
2783 if (track->when) {
2784 l->sum_time += age;
2785 if (age < l->min_time)
2786 l->min_time = age;
2787 if (age > l->max_time)
2788 l->max_time = age;
2789
2790 if (track->pid < l->min_pid)
2791 l->min_pid = track->pid;
2792 if (track->pid > l->max_pid)
2793 l->max_pid = track->pid;
2794
2795 cpu_set(track->cpu, l->cpus);
2796 }
2797 node_set(page_to_nid(virt_to_page(track)), l->nodes);
2775 return 1; 2798 return 1;
2776 } 2799 }
2777 2800
2778 if (addr < caddr) 2801 if (track->addr < caddr)
2779 end = pos; 2802 end = pos;
2780 else 2803 else
2781 start = pos; 2804 start = pos;
2782 } 2805 }
2783 2806
2784 /* 2807 /*
2785 * Not found. Insert new tracking element 2808 * Not found. Insert new tracking element.
2786 */ 2809 */
2787 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max)) 2810 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max))
2788 return 0; 2811 return 0;
@@ -2793,7 +2816,16 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
2793 (t->count - pos) * sizeof(struct location)); 2816 (t->count - pos) * sizeof(struct location));
2794 t->count++; 2817 t->count++;
2795 l->count = 1; 2818 l->count = 1;
2796 l->addr = addr; 2819 l->addr = track->addr;
2820 l->sum_time = age;
2821 l->min_time = age;
2822 l->max_time = age;
2823 l->min_pid = track->pid;
2824 l->max_pid = track->pid;
2825 cpus_clear(l->cpus);
2826 cpu_set(track->cpu, l->cpus);
2827 nodes_clear(l->nodes);
2828 node_set(page_to_nid(virt_to_page(track)), l->nodes);
2797 return 1; 2829 return 1;
2798} 2830}
2799 2831
@@ -2801,19 +2833,16 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
2801 struct page *page, enum track_item alloc) 2833 struct page *page, enum track_item alloc)
2802{ 2834{
2803 void *addr = page_address(page); 2835 void *addr = page_address(page);
2804 unsigned long map[BITS_TO_LONGS(s->objects)]; 2836 DECLARE_BITMAP(map, s->objects);
2805 void *p; 2837 void *p;
2806 2838
2807 bitmap_zero(map, s->objects); 2839 bitmap_zero(map, s->objects);
2808 for (p = page->freelist; p; p = get_freepointer(s, p)) 2840 for_each_free_object(p, s, page->freelist)
2809 set_bit((p - addr) / s->size, map); 2841 set_bit(slab_index(p, s, addr), map);
2810
2811 for (p = addr; p < addr + s->objects * s->size; p += s->size)
2812 if (!test_bit((p - addr) / s->size, map)) {
2813 void *addr = get_track(s, p, alloc)->addr;
2814 2842
2815 add_location(t, s, addr); 2843 for_each_object(p, s, addr)
2816 } 2844 if (!test_bit(slab_index(p, s, addr), map))
2845 add_location(t, s, get_track(s, p, alloc));
2817} 2846}
2818 2847
2819static int list_locations(struct kmem_cache *s, char *buf, 2848static int list_locations(struct kmem_cache *s, char *buf,
@@ -2847,15 +2876,47 @@ static int list_locations(struct kmem_cache *s, char *buf,
2847 } 2876 }
2848 2877
2849 for (i = 0; i < t.count; i++) { 2878 for (i = 0; i < t.count; i++) {
2850 void *addr = t.loc[i].addr; 2879 struct location *l = &t.loc[i];
2851 2880
2852 if (n > PAGE_SIZE - 100) 2881 if (n > PAGE_SIZE - 100)
2853 break; 2882 break;
2854 n += sprintf(buf + n, "%7ld ", t.loc[i].count); 2883 n += sprintf(buf + n, "%7ld ", l->count);
2855 if (addr) 2884
2856 n += sprint_symbol(buf + n, (unsigned long)t.loc[i].addr); 2885 if (l->addr)
2886 n += sprint_symbol(buf + n, (unsigned long)l->addr);
2857 else 2887 else
2858 n += sprintf(buf + n, "<not-available>"); 2888 n += sprintf(buf + n, "<not-available>");
2889
2890 if (l->sum_time != l->min_time) {
2891 unsigned long remainder;
2892
2893 n += sprintf(buf + n, " age=%ld/%ld/%ld",
2894 l->min_time,
2895 div_long_long_rem(l->sum_time, l->count, &remainder),
2896 l->max_time);
2897 } else
2898 n += sprintf(buf + n, " age=%ld",
2899 l->min_time);
2900
2901 if (l->min_pid != l->max_pid)
2902 n += sprintf(buf + n, " pid=%ld-%ld",
2903 l->min_pid, l->max_pid);
2904 else
2905 n += sprintf(buf + n, " pid=%ld",
2906 l->min_pid);
2907
2908 if (num_online_cpus() > 1 && !cpus_empty(l->cpus)) {
2909 n += sprintf(buf + n, " cpus=");
2910 n += cpulist_scnprintf(buf + n, PAGE_SIZE - n - 50,
2911 l->cpus);
2912 }
2913
2914 if (num_online_nodes() > 1 && !nodes_empty(l->nodes)) {
2915 n += sprintf(buf + n, " nodes=");
2916 n += nodelist_scnprintf(buf + n, PAGE_SIZE - n - 50,
2917 l->nodes);
2918 }
2919
2859 n += sprintf(buf + n, "\n"); 2920 n += sprintf(buf + n, "\n");
2860 } 2921 }
2861 2922
@@ -3491,6 +3552,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
3491 3552
3492static int __init slab_sysfs_init(void) 3553static int __init slab_sysfs_init(void)
3493{ 3554{
3555 struct list_head *h;
3494 int err; 3556 int err;
3495 3557
3496 err = subsystem_register(&slab_subsys); 3558 err = subsystem_register(&slab_subsys);
@@ -3499,7 +3561,15 @@ static int __init slab_sysfs_init(void)
3499 return -ENOSYS; 3561 return -ENOSYS;
3500 } 3562 }
3501 3563
3502 finish_bootstrap(); 3564 slab_state = SYSFS;
3565
3566 list_for_each(h, &slab_caches) {
3567 struct kmem_cache *s =
3568 container_of(h, struct kmem_cache, list);
3569
3570 err = sysfs_slab_add(s);
3571 BUG_ON(err);
3572 }
3503 3573
3504 while (alias_list) { 3574 while (alias_list) {
3505 struct saved_alias *al = alias_list; 3575 struct saved_alias *al = alias_list;
@@ -3515,6 +3585,4 @@ static int __init slab_sysfs_init(void)
3515} 3585}
3516 3586
3517__initcall(slab_sysfs_init); 3587__initcall(slab_sysfs_init);
3518#else
3519__initcall(finish_bootstrap);
3520#endif 3588#endif
diff --git a/mm/swap.c b/mm/swap.c
index 218c52a24a21..d3cb966fe992 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -488,7 +488,7 @@ static int cpu_swap_callback(struct notifier_block *nfb,
488 long *committed; 488 long *committed;
489 489
490 committed = &per_cpu(committed_space, (long)hcpu); 490 committed = &per_cpu(committed_space, (long)hcpu);
491 if (action == CPU_DEAD) { 491 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
492 atomic_add(*committed, &vm_committed_space); 492 atomic_add(*committed, &vm_committed_space);
493 *committed = 0; 493 *committed = 0;
494 __lru_add_drain((long)hcpu); 494 __lru_add_drain((long)hcpu);
diff --git a/mm/truncate.c b/mm/truncate.c
index 0f4b6d18ab0e..4fbe1a2da5fb 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -12,6 +12,7 @@
12#include <linux/swap.h> 12#include <linux/swap.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/pagemap.h> 14#include <linux/pagemap.h>
15#include <linux/highmem.h>
15#include <linux/pagevec.h> 16#include <linux/pagevec.h>
16#include <linux/task_io_accounting_ops.h> 17#include <linux/task_io_accounting_ops.h>
17#include <linux/buffer_head.h> /* grr. try_to_release_page, 18#include <linux/buffer_head.h> /* grr. try_to_release_page,
@@ -46,7 +47,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
46 47
47static inline void truncate_partial_page(struct page *page, unsigned partial) 48static inline void truncate_partial_page(struct page *page, unsigned partial)
48{ 49{
49 memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial); 50 zero_user_page(page, partial, PAGE_CACHE_SIZE - partial, KM_USER0);
50 if (PagePrivate(page)) 51 if (PagePrivate(page))
51 do_invalidatepage(page, partial); 52 do_invalidatepage(page, partial);
52} 53}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1c8e75a1cfcd..1be5a6376ef0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1528,7 +1528,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
1528 pg_data_t *pgdat; 1528 pg_data_t *pgdat;
1529 cpumask_t mask; 1529 cpumask_t mask;
1530 1530
1531 if (action == CPU_ONLINE) { 1531 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
1532 for_each_online_pgdat(pgdat) { 1532 for_each_online_pgdat(pgdat) {
1533 mask = node_to_cpumask(pgdat->node_id); 1533 mask = node_to_cpumask(pgdat->node_id);
1534 if (any_online_cpu(mask) != NR_CPUS) 1534 if (any_online_cpu(mask) != NR_CPUS)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6c488d6ac425..9832d9a41d8c 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -281,6 +281,17 @@ EXPORT_SYMBOL(dec_zone_page_state);
281 281
282/* 282/*
283 * Update the zone counters for one cpu. 283 * Update the zone counters for one cpu.
284 *
285 * Note that refresh_cpu_vm_stats strives to only access
286 * node local memory. The per cpu pagesets on remote zones are placed
287 * in the memory local to the processor using that pageset. So the
288 * loop over all zones will access a series of cachelines local to
289 * the processor.
290 *
291 * The call to zone_page_state_add updates the cachelines with the
292 * statistics in the remote zone struct as well as the global cachelines
293 * with the global counters. These could cause remote node cache line
294 * bouncing and will have to be only done when necessary.
284 */ 295 */
285void refresh_cpu_vm_stats(int cpu) 296void refresh_cpu_vm_stats(int cpu)
286{ 297{
@@ -289,21 +300,54 @@ void refresh_cpu_vm_stats(int cpu)
289 unsigned long flags; 300 unsigned long flags;
290 301
291 for_each_zone(zone) { 302 for_each_zone(zone) {
292 struct per_cpu_pageset *pcp; 303 struct per_cpu_pageset *p;
293 304
294 if (!populated_zone(zone)) 305 if (!populated_zone(zone))
295 continue; 306 continue;
296 307
297 pcp = zone_pcp(zone, cpu); 308 p = zone_pcp(zone, cpu);
298 309
299 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 310 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
300 if (pcp->vm_stat_diff[i]) { 311 if (p->vm_stat_diff[i]) {
301 local_irq_save(flags); 312 local_irq_save(flags);
302 zone_page_state_add(pcp->vm_stat_diff[i], 313 zone_page_state_add(p->vm_stat_diff[i],
303 zone, i); 314 zone, i);
304 pcp->vm_stat_diff[i] = 0; 315 p->vm_stat_diff[i] = 0;
316#ifdef CONFIG_NUMA
317 /* 3 seconds idle till flush */
318 p->expire = 3;
319#endif
305 local_irq_restore(flags); 320 local_irq_restore(flags);
306 } 321 }
322#ifdef CONFIG_NUMA
323 /*
324 * Deal with draining the remote pageset of this
325 * processor
326 *
327 * Check if there are pages remaining in this pageset
328 * if not then there is nothing to expire.
329 */
330 if (!p->expire || (!p->pcp[0].count && !p->pcp[1].count))
331 continue;
332
333 /*
334 * We never drain zones local to this processor.
335 */
336 if (zone_to_nid(zone) == numa_node_id()) {
337 p->expire = 0;
338 continue;
339 }
340
341 p->expire--;
342 if (p->expire)
343 continue;
344
345 if (p->pcp[0].count)
346 drain_zone_pages(zone, p->pcp + 0);
347
348 if (p->pcp[1].count)
349 drain_zone_pages(zone, p->pcp + 1);
350#endif
307 } 351 }
308} 352}
309 353
@@ -640,6 +684,24 @@ const struct seq_operations vmstat_op = {
640#endif /* CONFIG_PROC_FS */ 684#endif /* CONFIG_PROC_FS */
641 685
642#ifdef CONFIG_SMP 686#ifdef CONFIG_SMP
687static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
688int sysctl_stat_interval __read_mostly = HZ;
689
690static void vmstat_update(struct work_struct *w)
691{
692 refresh_cpu_vm_stats(smp_processor_id());
693 schedule_delayed_work(&__get_cpu_var(vmstat_work),
694 sysctl_stat_interval);
695}
696
697static void __devinit start_cpu_timer(int cpu)
698{
699 struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
700
701 INIT_DELAYED_WORK(vmstat_work, vmstat_update);
702 schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu);
703}
704
643/* 705/*
644 * Use the cpu notifier to insure that the thresholds are recalculated 706 * Use the cpu notifier to insure that the thresholds are recalculated
645 * when necessary. 707 * when necessary.
@@ -648,10 +710,24 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
648 unsigned long action, 710 unsigned long action,
649 void *hcpu) 711 void *hcpu)
650{ 712{
713 long cpu = (long)hcpu;
714
651 switch (action) { 715 switch (action) {
652 case CPU_UP_PREPARE: 716 case CPU_ONLINE:
653 case CPU_UP_CANCELED: 717 case CPU_ONLINE_FROZEN:
718 start_cpu_timer(cpu);
719 break;
720 case CPU_DOWN_PREPARE:
721 case CPU_DOWN_PREPARE_FROZEN:
722 cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
723 per_cpu(vmstat_work, cpu).work.func = NULL;
724 break;
725 case CPU_DOWN_FAILED:
726 case CPU_DOWN_FAILED_FROZEN:
727 start_cpu_timer(cpu);
728 break;
654 case CPU_DEAD: 729 case CPU_DEAD:
730 case CPU_DEAD_FROZEN:
655 refresh_zone_stat_thresholds(); 731 refresh_zone_stat_thresholds();
656 break; 732 break;
657 default: 733 default:
@@ -665,8 +741,13 @@ static struct notifier_block __cpuinitdata vmstat_notifier =
665 741
666int __init setup_vmstat(void) 742int __init setup_vmstat(void)
667{ 743{
744 int cpu;
745
668 refresh_zone_stat_thresholds(); 746 refresh_zone_stat_thresholds();
669 register_cpu_notifier(&vmstat_notifier); 747 register_cpu_notifier(&vmstat_notifier);
748
749 for_each_online_cpu(cpu)
750 start_cpu_timer(cpu);
670 return 0; 751 return 0;
671} 752}
672module_init(setup_vmstat) 753module_init(setup_vmstat)
diff --git a/net/core/dev.c b/net/core/dev.c
index 4317c1be4d3f..8301e2ac747f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3450,7 +3450,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
3450 unsigned int cpu, oldcpu = (unsigned long)ocpu; 3450 unsigned int cpu, oldcpu = (unsigned long)ocpu;
3451 struct softnet_data *sd, *oldsd; 3451 struct softnet_data *sd, *oldsd;
3452 3452
3453 if (action != CPU_DEAD) 3453 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
3454 return NOTIFY_OK; 3454 return NOTIFY_OK;
3455 3455
3456 local_irq_disable(); 3456 local_irq_disable();
diff --git a/net/core/flow.c b/net/core/flow.c
index 5d25697920b1..051430545a05 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -338,7 +338,7 @@ static int flow_cache_cpu(struct notifier_block *nfb,
338 unsigned long action, 338 unsigned long action,
339 void *hcpu) 339 void *hcpu)
340{ 340{
341 if (action == CPU_DEAD) 341 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
342 __flow_cache_shrink((unsigned long)hcpu, 0); 342 __flow_cache_shrink((unsigned long)hcpu, 0);
343 return NOTIFY_OK; 343 return NOTIFY_OK;
344} 344}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 9fbe87c93802..bfa910b6ad25 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1839,7 +1839,7 @@ static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *que
1839} 1839}
1840 1840
1841/* 1841/*
1842 * The DECnet spec requires the the "routing layer" accepts packets which 1842 * The DECnet spec requires that the "routing layer" accepts packets which
1843 * are at least 230 bytes in size. This excludes any headers which the NSP 1843 * are at least 230 bytes in size. This excludes any headers which the NSP
1844 * layer might add, so we always assume that we'll be using the maximal 1844 * layer might add, so we always assume that we'll be using the maximal
1845 * length header on data packets. The variation in length is due to the 1845 * length header on data packets. The variation in length is due to the
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index e62aee0ec4c5..c68196cc56ab 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -130,7 +130,7 @@ config IP_ROUTE_MULTIPATH_RR
130 tristate "MULTIPATH: round robin algorithm" 130 tristate "MULTIPATH: round robin algorithm"
131 depends on IP_ROUTE_MULTIPATH_CACHED 131 depends on IP_ROUTE_MULTIPATH_CACHED
132 help 132 help
133 Mulitpath routes are chosen according to Round Robin 133 Multipath routes are chosen according to Round Robin
134 134
135config IP_ROUTE_MULTIPATH_RANDOM 135config IP_ROUTE_MULTIPATH_RANDOM
136 tristate "MULTIPATH: random algorithm" 136 tristate "MULTIPATH: random algorithm"
@@ -651,7 +651,7 @@ config TCP_MD5SIG
651 select CRYPTO 651 select CRYPTO
652 select CRYPTO_MD5 652 select CRYPTO_MD5
653 ---help--- 653 ---help---
654 RFC2385 specifices a method of giving MD5 protection to TCP sessions. 654 RFC2385 specifies a method of giving MD5 protection to TCP sessions.
655 Its main (only?) use is to protect BGP sessions between core routers 655 Its main (only?) use is to protect BGP sessions between core routers
656 on the Internet. 656 on the Internet.
657 657
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index e1f18489db1d..86a2b52aad38 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -629,7 +629,7 @@ doi_walk_return:
629 * @domain: the domain to add 629 * @domain: the domain to add
630 * 630 *
631 * Description: 631 * Description:
632 * Adds the @domain to the the DOI specified by @doi_def, this function 632 * Adds the @domain to the DOI specified by @doi_def, this function
633 * should only be called by external functions (i.e. NetLabel). This function 633 * should only be called by external functions (i.e. NetLabel). This function
634 * does allocate memory. Returns zero on success, negative values on failure. 634 * does allocate memory. Returns zero on success, negative values on failure.
635 * 635 *
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index b3050a6817e7..68fe1d4d0210 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -2387,6 +2387,7 @@ void ip_vs_control_cleanup(void)
2387 EnterFunction(2); 2387 EnterFunction(2);
2388 ip_vs_trash_cleanup(); 2388 ip_vs_trash_cleanup();
2389 cancel_rearming_delayed_work(&defense_work); 2389 cancel_rearming_delayed_work(&defense_work);
2390 cancel_work_sync(&defense_work.work);
2390 ip_vs_kill_estimator(&ip_vs_stats); 2391 ip_vs_kill_estimator(&ip_vs_stats);
2391 unregister_sysctl_table(sysctl_header); 2392 unregister_sysctl_table(sysctl_header);
2392 proc_net_remove("ip_vs_stats"); 2393 proc_net_remove("ip_vs_stats");
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c
index ff366f7390d9..dd7c128f9db3 100644
--- a/net/ipv4/ipvs/ip_vs_sed.c
+++ b/net/ipv4/ipvs/ip_vs_sed.c
@@ -18,7 +18,7 @@
18 * The SED algorithm attempts to minimize each job's expected delay until 18 * The SED algorithm attempts to minimize each job's expected delay until
19 * completion. The expected delay that the job will experience is 19 * completion. The expected delay that the job will experience is
20 * (Ci + 1) / Ui if sent to the ith server, in which Ci is the number of 20 * (Ci + 1) / Ui if sent to the ith server, in which Ci is the number of
21 * jobs on the the ith server and Ui is the fixed service rate (weight) of 21 * jobs on the ith server and Ui is the fixed service rate (weight) of
22 * the ith server. The SED algorithm adopts a greedy policy that each does 22 * the ith server. The SED algorithm adopts a greedy policy that each does
23 * what is in its own best interest, i.e. to join the queue which would 23 * what is in its own best interest, i.e. to join the queue which would
24 * minimize its expected delay of completion. 24 * minimize its expected delay of completion.
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 113e0c4c8a92..66026df1cc76 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -983,7 +983,7 @@ int udp_disconnect(struct sock *sk, int flags)
983} 983}
984 984
985/* return: 985/* return:
986 * 1 if the the UDP system should process it 986 * 1 if the UDP system should process it
987 * 0 if we should drop this packet 987 * 0 if we should drop this packet
988 * -1 if it should get processed by xfrm4_rcv_encap 988 * -1 if it should get processed by xfrm4_rcv_encap
989 */ 989 */
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index bbe99f842f9f..838b8ddee8c0 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -28,7 +28,7 @@ config IP6_NF_QUEUE
28 packets which enables users to receive the filtered packets 28 packets which enables users to receive the filtered packets
29 with QUEUE target using libipq. 29 with QUEUE target using libipq.
30 30
31 THis option enables the old IPv6-only "ip6_queue" implementation 31 This option enables the old IPv6-only "ip6_queue" implementation
32 which has been obsoleted by the new "nfnetlink_queue" code (see 32 which has been obsoleted by the new "nfnetlink_queue" code (see
33 CONFIG_NETFILTER_NETLINK_QUEUE). 33 CONFIG_NETFILTER_NETLINK_QUEUE).
34 34
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index fb3faf72e850..b7333061016d 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -556,6 +556,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
556 556
557 switch (action) { 557 switch (action) {
558 case CPU_UP_PREPARE: 558 case CPU_UP_PREPARE:
559 case CPU_UP_PREPARE_FROZEN:
559 if (!percpu_populate(iucv_irq_data, 560 if (!percpu_populate(iucv_irq_data,
560 sizeof(struct iucv_irq_data), 561 sizeof(struct iucv_irq_data),
561 GFP_KERNEL|GFP_DMA, cpu)) 562 GFP_KERNEL|GFP_DMA, cpu))
@@ -567,15 +568,20 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
567 } 568 }
568 break; 569 break;
569 case CPU_UP_CANCELED: 570 case CPU_UP_CANCELED:
571 case CPU_UP_CANCELED_FROZEN:
570 case CPU_DEAD: 572 case CPU_DEAD:
573 case CPU_DEAD_FROZEN:
571 percpu_depopulate(iucv_param, cpu); 574 percpu_depopulate(iucv_param, cpu);
572 percpu_depopulate(iucv_irq_data, cpu); 575 percpu_depopulate(iucv_irq_data, cpu);
573 break; 576 break;
574 case CPU_ONLINE: 577 case CPU_ONLINE:
578 case CPU_ONLINE_FROZEN:
575 case CPU_DOWN_FAILED: 579 case CPU_DOWN_FAILED:
580 case CPU_DOWN_FAILED_FROZEN:
576 smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu); 581 smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
577 break; 582 break;
578 case CPU_DOWN_PREPARE: 583 case CPU_DOWN_PREPARE:
584 case CPU_DOWN_PREPARE_FROZEN:
579 cpumask = iucv_buffer_cpumask; 585 cpumask = iucv_buffer_cpumask;
580 cpu_clear(cpu, cpumask); 586 cpu_clear(cpu, cpumask);
581 if (cpus_empty(cpumask)) 587 if (cpus_empty(cpumask))
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 7d9fa38b6a7d..6b8a103cf9e6 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -324,7 +324,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
324 memset(&laddr, 0, sizeof(laddr)); 324 memset(&laddr, 0, sizeof(laddr));
325 memset(&daddr, 0, sizeof(daddr)); 325 memset(&daddr, 0, sizeof(daddr));
326 /* 326 /*
327 * FIXME: check if the the address is multicast, 327 * FIXME: check if the address is multicast,
328 * only SOCK_DGRAM can do this. 328 * only SOCK_DGRAM can do this.
329 */ 329 */
330 memcpy(laddr.mac, addr->sllc_mac, IFHWADDRLEN); 330 memcpy(laddr.mac, addr->sllc_mac, IFHWADDRLEN);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index ea6211cade0a..a567dae8e5fd 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -197,7 +197,7 @@ config NF_CONNTRACK_PPTP
197 197
198 Please note that not all PPTP modes of operation are supported yet. 198 Please note that not all PPTP modes of operation are supported yet.
199 Specifically these limitations exist: 199 Specifically these limitations exist:
200 - Blindy assumes that control connections are always established 200 - Blindly assumes that control connections are always established
201 in PNS->PAC direction. This is a violation of RFC2637. 201 in PNS->PAC direction. This is a violation of RFC2637.
202 - Only supports a single call within each session 202 - Only supports a single call within each session
203 203
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index c31af29a4439..117cbfdb910c 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -177,7 +177,7 @@ void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp)
177 struct nf_conntrack_expect *i; 177 struct nf_conntrack_expect *i;
178 178
179 write_lock_bh(&nf_conntrack_lock); 179 write_lock_bh(&nf_conntrack_lock);
180 /* choose the the oldest expectation to evict */ 180 /* choose the oldest expectation to evict */
181 list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) { 181 list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
182 if (expect_matches(i, exp) && del_timer(&i->timeout)) { 182 if (expect_matches(i, exp) && del_timer(&i->timeout)) {
183 nf_ct_unlink_expect(i); 183 nf_ct_unlink_expect(i);
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 83ef411772f4..77fb7b06a9c4 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This file is part of the SCTP kernel reference Implementation 4 * This file is part of the SCTP kernel reference Implementation
5 * 5 *
6 * This file contains the code relating the the chunk abstraction. 6 * This file contains the code relating the chunk abstraction.
7 * 7 *
8 * The SCTP reference implementation is free software; 8 * The SCTP reference implementation is free software;
9 * you can redistribute it and/or modify it under the terms of 9 * you can redistribute it and/or modify it under the terms of
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9f1a908776de..83a76ba9d7b3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2586,7 +2586,7 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, int opt
2586 * 2586 *
2587 * 7.1.2 SCTP_ASSOCINFO 2587 * 7.1.2 SCTP_ASSOCINFO
2588 * 2588 *
2589 * This option is used to tune the the maximum retransmission attempts 2589 * This option is used to tune the maximum retransmission attempts
2590 * of the association. 2590 * of the association.
2591 * Returns an error if the new association retransmission value is 2591 * Returns an error if the new association retransmission value is
2592 * greater than the sum of the retransmission value of the peer. 2592 * greater than the sum of the retransmission value of the peer.
@@ -4547,7 +4547,7 @@ static int sctp_getsockopt_rtoinfo(struct sock *sk, int len,
4547 * 4547 *
4548 * 7.1.2 SCTP_ASSOCINFO 4548 * 7.1.2 SCTP_ASSOCINFO
4549 * 4549 *
4550 * This option is used to tune the the maximum retransmission attempts 4550 * This option is used to tune the maximum retransmission attempts
4551 * of the association. 4551 * of the association.
4552 * Returns an error if the new association retransmission value is 4552 * Returns an error if the new association retransmission value is
4553 * greater than the sum of the retransmission value of the peer. 4553 * greater than the sum of the retransmission value of the peer.
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index db298b501c81..099a983797da 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -924,6 +924,7 @@ static inline int
924gss_write_init_verf(struct svc_rqst *rqstp, struct rsi *rsip) 924gss_write_init_verf(struct svc_rqst *rqstp, struct rsi *rsip)
925{ 925{
926 struct rsc *rsci; 926 struct rsc *rsci;
927 int rc;
927 928
928 if (rsip->major_status != GSS_S_COMPLETE) 929 if (rsip->major_status != GSS_S_COMPLETE)
929 return gss_write_null_verf(rqstp); 930 return gss_write_null_verf(rqstp);
@@ -932,7 +933,9 @@ gss_write_init_verf(struct svc_rqst *rqstp, struct rsi *rsip)
932 rsip->major_status = GSS_S_NO_CONTEXT; 933 rsip->major_status = GSS_S_NO_CONTEXT;
933 return gss_write_null_verf(rqstp); 934 return gss_write_null_verf(rqstp);
934 } 935 }
935 return gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN); 936 rc = gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN);
937 cache_put(&rsci->h, &rsc_cache);
938 return rc;
936} 939}
937 940
938/* 941/*
@@ -1089,6 +1092,8 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1089 } 1092 }
1090 goto complete; 1093 goto complete;
1091 case RPC_GSS_PROC_DESTROY: 1094 case RPC_GSS_PROC_DESTROY:
1095 if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
1096 goto auth_err;
1092 set_bit(CACHE_NEGATIVE, &rsci->h.flags); 1097 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
1093 if (resv->iov_len + 4 > PAGE_SIZE) 1098 if (resv->iov_len + 4 > PAGE_SIZE)
1094 goto drop; 1099 goto drop;
@@ -1196,13 +1201,7 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1196 if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, 1201 if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
1197 integ_len)) 1202 integ_len))
1198 BUG(); 1203 BUG();
1199 if (resbuf->page_len == 0 1204 if (resbuf->tail[0].iov_base == NULL) {
1200 && resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE
1201 < PAGE_SIZE) {
1202 BUG_ON(resbuf->tail[0].iov_len);
1203 /* Use head for everything */
1204 resv = &resbuf->head[0];
1205 } else if (resbuf->tail[0].iov_base == NULL) {
1206 if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE) 1205 if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1207 goto out_err; 1206 goto out_err;
1208 resbuf->tail[0].iov_base = resbuf->head[0].iov_base 1207 resbuf->tail[0].iov_base = resbuf->head[0].iov_base
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index ad39b47e05bc..a2f1893bde53 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -845,6 +845,8 @@ init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
845 845
846int register_rpc_pipefs(void) 846int register_rpc_pipefs(void)
847{ 847{
848 int err;
849
848 rpc_inode_cachep = kmem_cache_create("rpc_inode_cache", 850 rpc_inode_cachep = kmem_cache_create("rpc_inode_cache",
849 sizeof(struct rpc_inode), 851 sizeof(struct rpc_inode),
850 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 852 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
@@ -852,7 +854,12 @@ int register_rpc_pipefs(void)
852 init_once, NULL); 854 init_once, NULL);
853 if (!rpc_inode_cachep) 855 if (!rpc_inode_cachep)
854 return -ENOMEM; 856 return -ENOMEM;
855 register_filesystem(&rpc_pipe_fs_type); 857 err = register_filesystem(&rpc_pipe_fs_type);
858 if (err) {
859 kmem_cache_destroy(rpc_inode_cachep);
860 return err;
861 }
862
856 return 0; 863 return 0;
857} 864}
858 865
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 99014516b73c..b011eb625e49 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -736,6 +736,11 @@ static void rpc_async_schedule(struct work_struct *work)
736 __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); 736 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
737} 737}
738 738
739struct rpc_buffer {
740 size_t len;
741 char data[];
742};
743
739/** 744/**
740 * rpc_malloc - allocate an RPC buffer 745 * rpc_malloc - allocate an RPC buffer
741 * @task: RPC task that will use this buffer 746 * @task: RPC task that will use this buffer
@@ -754,18 +759,22 @@ static void rpc_async_schedule(struct work_struct *work)
754 */ 759 */
755void *rpc_malloc(struct rpc_task *task, size_t size) 760void *rpc_malloc(struct rpc_task *task, size_t size)
756{ 761{
757 size_t *buf; 762 struct rpc_buffer *buf;
758 gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT; 763 gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
759 764
760 size += sizeof(size_t); 765 size += sizeof(struct rpc_buffer);
761 if (size <= RPC_BUFFER_MAXSIZE) 766 if (size <= RPC_BUFFER_MAXSIZE)
762 buf = mempool_alloc(rpc_buffer_mempool, gfp); 767 buf = mempool_alloc(rpc_buffer_mempool, gfp);
763 else 768 else
764 buf = kmalloc(size, gfp); 769 buf = kmalloc(size, gfp);
765 *buf = size; 770
771 if (!buf)
772 return NULL;
773
774 buf->len = size;
766 dprintk("RPC: %5u allocated buffer of size %zu at %p\n", 775 dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
767 task->tk_pid, size, buf); 776 task->tk_pid, size, buf);
768 return ++buf; 777 return &buf->data;
769} 778}
770 779
771/** 780/**
@@ -775,15 +784,18 @@ void *rpc_malloc(struct rpc_task *task, size_t size)
775 */ 784 */
776void rpc_free(void *buffer) 785void rpc_free(void *buffer)
777{ 786{
778 size_t size, *buf = buffer; 787 size_t size;
788 struct rpc_buffer *buf;
779 789
780 if (!buffer) 790 if (!buffer)
781 return; 791 return;
782 size = *buf; 792
783 buf--; 793 buf = container_of(buffer, struct rpc_buffer, data);
794 size = buf->len;
784 795
785 dprintk("RPC: freeing buffer of size %zu at %p\n", 796 dprintk("RPC: freeing buffer of size %zu at %p\n",
786 size, buf); 797 size, buf);
798
787 if (size <= RPC_BUFFER_MAXSIZE) 799 if (size <= RPC_BUFFER_MAXSIZE)
788 mempool_free(buf, rpc_buffer_mempool); 800 mempool_free(buf, rpc_buffer_mempool);
789 else 801 else
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 43ecf62f12ef..0d35bc796d00 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -146,9 +146,11 @@ init_sunrpc(void)
146 int err = register_rpc_pipefs(); 146 int err = register_rpc_pipefs();
147 if (err) 147 if (err)
148 goto out; 148 goto out;
149 err = rpc_init_mempool() != 0; 149 err = rpc_init_mempool();
150 if (err) 150 if (err) {
151 unregister_rpc_pipefs();
151 goto out; 152 goto out;
153 }
152#ifdef RPC_DEBUG 154#ifdef RPC_DEBUG
153 rpc_register_sysctl(); 155 rpc_register_sysctl();
154#endif 156#endif
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index b7503c103ae8..e673ef993904 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -907,7 +907,7 @@ svc_process(struct svc_rqst *rqstp)
907 * better idea of reply size 907 * better idea of reply size
908 */ 908 */
909 if (procp->pc_xdrressize) 909 if (procp->pc_xdrressize)
910 svc_reserve(rqstp, procp->pc_xdrressize<<2); 910 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
911 911
912 /* Call the function that processes the request. */ 912 /* Call the function that processes the request. */
913 if (!versp->vs_dispatch) { 913 if (!versp->vs_dispatch) {
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index f5c3808bf85a..af7c5f05c6e1 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -64,7 +64,7 @@ int svc_set_client(struct svc_rqst *rqstp)
64} 64}
65 65
66/* A request, which was authenticated, has now executed. 66/* A request, which was authenticated, has now executed.
67 * Time to finalise the the credentials and verifier 67 * Time to finalise the credentials and verifier
68 * and release and resources 68 * and release and resources
69 */ 69 */
70int svc_authorise(struct svc_rqst *rqstp) 70int svc_authorise(struct svc_rqst *rqstp)
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 2bd23ea2aa8b..07dcd20cbee4 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -385,7 +385,7 @@ ip_map_cached_get(struct svc_rqst *rqstp)
385{ 385{
386 struct ip_map *ipm; 386 struct ip_map *ipm;
387 struct svc_sock *svsk = rqstp->rq_sock; 387 struct svc_sock *svsk = rqstp->rq_sock;
388 spin_lock_bh(&svsk->sk_defer_lock); 388 spin_lock(&svsk->sk_lock);
389 ipm = svsk->sk_info_authunix; 389 ipm = svsk->sk_info_authunix;
390 if (ipm != NULL) { 390 if (ipm != NULL) {
391 if (!cache_valid(&ipm->h)) { 391 if (!cache_valid(&ipm->h)) {
@@ -395,13 +395,13 @@ ip_map_cached_get(struct svc_rqst *rqstp)
395 * same IP address. 395 * same IP address.
396 */ 396 */
397 svsk->sk_info_authunix = NULL; 397 svsk->sk_info_authunix = NULL;
398 spin_unlock_bh(&svsk->sk_defer_lock); 398 spin_unlock(&svsk->sk_lock);
399 cache_put(&ipm->h, &ip_map_cache); 399 cache_put(&ipm->h, &ip_map_cache);
400 return NULL; 400 return NULL;
401 } 401 }
402 cache_get(&ipm->h); 402 cache_get(&ipm->h);
403 } 403 }
404 spin_unlock_bh(&svsk->sk_defer_lock); 404 spin_unlock(&svsk->sk_lock);
405 return ipm; 405 return ipm;
406} 406}
407 407
@@ -410,14 +410,14 @@ ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
410{ 410{
411 struct svc_sock *svsk = rqstp->rq_sock; 411 struct svc_sock *svsk = rqstp->rq_sock;
412 412
413 spin_lock_bh(&svsk->sk_defer_lock); 413 spin_lock(&svsk->sk_lock);
414 if (svsk->sk_sock->type == SOCK_STREAM && 414 if (svsk->sk_sock->type == SOCK_STREAM &&
415 svsk->sk_info_authunix == NULL) { 415 svsk->sk_info_authunix == NULL) {
416 /* newly cached, keep the reference */ 416 /* newly cached, keep the reference */
417 svsk->sk_info_authunix = ipm; 417 svsk->sk_info_authunix = ipm;
418 ipm = NULL; 418 ipm = NULL;
419 } 419 }
420 spin_unlock_bh(&svsk->sk_defer_lock); 420 spin_unlock(&svsk->sk_lock);
421 if (ipm) 421 if (ipm)
422 cache_put(&ipm->h, &ip_map_cache); 422 cache_put(&ipm->h, &ip_map_cache);
423} 423}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 22f61aee4824..5baf48de2558 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -53,7 +53,8 @@
53 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 53 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
54 * when both need to be taken (rare), svc_serv->sv_lock is first. 54 * when both need to be taken (rare), svc_serv->sv_lock is first.
55 * BKL protects svc_serv->sv_nrthread. 55 * BKL protects svc_serv->sv_nrthread.
56 * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list 56 * svc_sock->sk_lock protects the svc_sock->sk_deferred list
57 * and the ->sk_info_authunix cache.
57 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply. 58 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
58 * 59 *
59 * Some flags can be set to certain values at any time 60 * Some flags can be set to certain values at any time
@@ -787,15 +788,20 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
787 } 788 }
788 789
789 clear_bit(SK_DATA, &svsk->sk_flags); 790 clear_bit(SK_DATA, &svsk->sk_flags);
790 while ((err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, 791 skb = NULL;
791 0, 0, MSG_PEEK | MSG_DONTWAIT)) < 0 || 792 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
792 (skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) { 793 0, 0, MSG_PEEK | MSG_DONTWAIT);
793 if (err == -EAGAIN) { 794 if (err >= 0)
794 svc_sock_received(svsk); 795 skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err);
795 return err; 796
797 if (skb == NULL) {
798 if (err != -EAGAIN) {
799 /* possibly an icmp error */
800 dprintk("svc: recvfrom returned error %d\n", -err);
801 set_bit(SK_DATA, &svsk->sk_flags);
796 } 802 }
797 /* possibly an icmp error */ 803 svc_sock_received(svsk);
798 dprintk("svc: recvfrom returned error %d\n", -err); 804 return -EAGAIN;
799 } 805 }
800 rqstp->rq_addrlen = sizeof(rqstp->rq_addr); 806 rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
801 if (skb->tstamp.tv64 == 0) { 807 if (skb->tstamp.tv64 == 0) {
@@ -1633,7 +1639,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1633 svsk->sk_server = serv; 1639 svsk->sk_server = serv;
1634 atomic_set(&svsk->sk_inuse, 1); 1640 atomic_set(&svsk->sk_inuse, 1);
1635 svsk->sk_lastrecv = get_seconds(); 1641 svsk->sk_lastrecv = get_seconds();
1636 spin_lock_init(&svsk->sk_defer_lock); 1642 spin_lock_init(&svsk->sk_lock);
1637 INIT_LIST_HEAD(&svsk->sk_deferred); 1643 INIT_LIST_HEAD(&svsk->sk_deferred);
1638 INIT_LIST_HEAD(&svsk->sk_ready); 1644 INIT_LIST_HEAD(&svsk->sk_ready);
1639 mutex_init(&svsk->sk_mutex); 1645 mutex_init(&svsk->sk_mutex);
@@ -1857,9 +1863,9 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1857 dprintk("revisit queued\n"); 1863 dprintk("revisit queued\n");
1858 svsk = dr->svsk; 1864 svsk = dr->svsk;
1859 dr->svsk = NULL; 1865 dr->svsk = NULL;
1860 spin_lock_bh(&svsk->sk_defer_lock); 1866 spin_lock(&svsk->sk_lock);
1861 list_add(&dr->handle.recent, &svsk->sk_deferred); 1867 list_add(&dr->handle.recent, &svsk->sk_deferred);
1862 spin_unlock_bh(&svsk->sk_defer_lock); 1868 spin_unlock(&svsk->sk_lock);
1863 set_bit(SK_DEFERRED, &svsk->sk_flags); 1869 set_bit(SK_DEFERRED, &svsk->sk_flags);
1864 svc_sock_enqueue(svsk); 1870 svc_sock_enqueue(svsk);
1865 svc_sock_put(svsk); 1871 svc_sock_put(svsk);
@@ -1925,7 +1931,7 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1925 1931
1926 if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) 1932 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1927 return NULL; 1933 return NULL;
1928 spin_lock_bh(&svsk->sk_defer_lock); 1934 spin_lock(&svsk->sk_lock);
1929 clear_bit(SK_DEFERRED, &svsk->sk_flags); 1935 clear_bit(SK_DEFERRED, &svsk->sk_flags);
1930 if (!list_empty(&svsk->sk_deferred)) { 1936 if (!list_empty(&svsk->sk_deferred)) {
1931 dr = list_entry(svsk->sk_deferred.next, 1937 dr = list_entry(svsk->sk_deferred.next,
@@ -1934,6 +1940,6 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1934 list_del_init(&dr->handle.recent); 1940 list_del_init(&dr->handle.recent);
1935 set_bit(SK_DEFERRED, &svsk->sk_flags); 1941 set_bit(SK_DEFERRED, &svsk->sk_flags);
1936 } 1942 }
1937 spin_unlock_bh(&svsk->sk_defer_lock); 1943 spin_unlock(&svsk->sk_lock);
1938 return dr; 1944 return dr;
1939} 1945}
diff --git a/scripts/basic/docproc.c b/scripts/basic/docproc.c
index d6071cbf13d7..f4d2f68452ba 100644
--- a/scripts/basic/docproc.c
+++ b/scripts/basic/docproc.c
@@ -211,7 +211,7 @@ void find_export_symbols(char * filename)
211 * Document all external or internal functions in a file. 211 * Document all external or internal functions in a file.
212 * Call kernel-doc with following parameters: 212 * Call kernel-doc with following parameters:
213 * kernel-doc -docbook -nofunction function_name1 filename 213 * kernel-doc -docbook -nofunction function_name1 filename
214 * function names are obtained from all the the src files 214 * function names are obtained from all the src files
215 * by find_export_symbols. 215 * by find_export_symbols.
216 * intfunc uses -nofunction 216 * intfunc uses -nofunction
217 * extfunc uses -function 217 * extfunc uses -function
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index a325a0c890dc..e5bf649e516a 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -337,6 +337,7 @@ sub get_kernel_version() {
337 } 337 }
338 return $version; 338 return $version;
339} 339}
340my $kernelversion = get_kernel_version();
340 341
341# generate a sequence of code that will splice in highlighting information 342# generate a sequence of code that will splice in highlighting information
342# using the s// operator. 343# using the s// operator.
@@ -610,7 +611,7 @@ sub output_function_xml(%) {
610 print "<refmeta>\n"; 611 print "<refmeta>\n";
611 print " <refentrytitle><phrase>".$args{'function'}."</phrase></refentrytitle>\n"; 612 print " <refentrytitle><phrase>".$args{'function'}."</phrase></refentrytitle>\n";
612 print " <manvolnum>9</manvolnum>\n"; 613 print " <manvolnum>9</manvolnum>\n";
613 print " <refmiscinfo class=\"version\">" . get_kernel_version() . "</refmiscinfo>\n"; 614 print " <refmiscinfo class=\"version\">" . $kernelversion . "</refmiscinfo>\n";
614 print "</refmeta>\n"; 615 print "</refmeta>\n";
615 print "<refnamediv>\n"; 616 print "<refnamediv>\n";
616 print " <refname>".$args{'function'}."</refname>\n"; 617 print " <refname>".$args{'function'}."</refname>\n";
@@ -687,7 +688,7 @@ sub output_struct_xml(%) {
687 print "<refmeta>\n"; 688 print "<refmeta>\n";
688 print " <refentrytitle><phrase>".$args{'type'}." ".$args{'struct'}."</phrase></refentrytitle>\n"; 689 print " <refentrytitle><phrase>".$args{'type'}." ".$args{'struct'}."</phrase></refentrytitle>\n";
689 print " <manvolnum>9</manvolnum>\n"; 690 print " <manvolnum>9</manvolnum>\n";
690 print " <refmiscinfo class=\"version\">" . get_kernel_version() . "</refmiscinfo>\n"; 691 print " <refmiscinfo class=\"version\">" . $kernelversion . "</refmiscinfo>\n";
691 print "</refmeta>\n"; 692 print "</refmeta>\n";
692 print "<refnamediv>\n"; 693 print "<refnamediv>\n";
693 print " <refname>".$args{'type'}." ".$args{'struct'}."</refname>\n"; 694 print " <refname>".$args{'type'}." ".$args{'struct'}."</refname>\n";
@@ -772,7 +773,7 @@ sub output_enum_xml(%) {
772 print "<refmeta>\n"; 773 print "<refmeta>\n";
773 print " <refentrytitle><phrase>enum ".$args{'enum'}."</phrase></refentrytitle>\n"; 774 print " <refentrytitle><phrase>enum ".$args{'enum'}."</phrase></refentrytitle>\n";
774 print " <manvolnum>9</manvolnum>\n"; 775 print " <manvolnum>9</manvolnum>\n";
775 print " <refmiscinfo class=\"version\">" . get_kernel_version() . "</refmiscinfo>\n"; 776 print " <refmiscinfo class=\"version\">" . $kernelversion . "</refmiscinfo>\n";
776 print "</refmeta>\n"; 777 print "</refmeta>\n";
777 print "<refnamediv>\n"; 778 print "<refnamediv>\n";
778 print " <refname>enum ".$args{'enum'}."</refname>\n"; 779 print " <refname>enum ".$args{'enum'}."</refname>\n";
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 480e18b00aa6..113dc77b9f60 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1343,6 +1343,7 @@ static void add_header(struct buffer *b, struct module *mod)
1343 buf_printf(b, "#ifdef CONFIG_MODULE_UNLOAD\n" 1343 buf_printf(b, "#ifdef CONFIG_MODULE_UNLOAD\n"
1344 " .exit = cleanup_module,\n" 1344 " .exit = cleanup_module,\n"
1345 "#endif\n"); 1345 "#endif\n");
1346 buf_printf(b, " .arch = MODULE_ARCH_INIT,\n");
1346 buf_printf(b, "};\n"); 1347 buf_printf(b, "};\n");
1347} 1348}
1348 1349
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index 23b51047494e..b32a459c0683 100644
--- a/security/selinux/Kconfig
+++ b/security/selinux/Kconfig
@@ -137,7 +137,7 @@ config SECURITY_SELINUX_POLICYDB_VERSION_MAX
137 137
138 Examples: 138 Examples:
139 For the Fedora Core 3 or 4 Linux distributions, enable this option 139 For the Fedora Core 3 or 4 Linux distributions, enable this option
140 and set the value via the next option. For Fedore Core 5 and later, 140 and set the value via the next option. For Fedora Core 5 and later,
141 do not enable this option. 141 do not enable this option.
142 142
143 If you are unsure how to answer this question, answer N. 143 If you are unsure how to answer this question, answer N.
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index b2927523d79d..829ca38b595e 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -146,7 +146,7 @@ config SND_VERBOSE_PROCFS
146 default y 146 default y
147 help 147 help
148 Say Y here to include code for verbose procfs contents (provides 148 Say Y here to include code for verbose procfs contents (provides
149 usefull information to developers when a problem occurs). On the 149 useful information to developers when a problem occurs). On the
150 other side, it makes the ALSA subsystem larger. 150 other side, it makes the ALSA subsystem larger.
151 151
152config SND_VERBOSE_PRINTK 152config SND_VERBOSE_PRINTK
diff --git a/sound/oss/es1371.c b/sound/oss/es1371.c
index 974dd732b149..593a3aac12ce 100644
--- a/sound/oss/es1371.c
+++ b/sound/oss/es1371.c
@@ -100,7 +100,7 @@
100 * Tjeerd Mulder <tjeerd.mulder@fujitsu-siemens.com> 100 * Tjeerd Mulder <tjeerd.mulder@fujitsu-siemens.com>
101 * 05.01.2001 0.29 Hopefully updates will not be required anymore when Creative bumps 101 * 05.01.2001 0.29 Hopefully updates will not be required anymore when Creative bumps
102 * the CT5880 revision. 102 * the CT5880 revision.
103 * suggested by Stephan Müller <smueller@chronox.de> 103 * suggested by Stephan Müller <smueller@chronox.de>
104 * 31.01.2001 0.30 Register/Unregister gameport 104 * 31.01.2001 0.30 Register/Unregister gameport
105 * Fix SETTRIGGER non OSS API conformity 105 * Fix SETTRIGGER non OSS API conformity
106 * 14.07.2001 0.31 Add list of laptops needing amplifier control 106 * 14.07.2001 0.31 Add list of laptops needing amplifier control
diff --git a/sound/oss/pas2_pcm.c b/sound/oss/pas2_pcm.c
index 4af6aafa3d86..36c3ea62086b 100644
--- a/sound/oss/pas2_pcm.c
+++ b/sound/oss/pas2_pcm.c
@@ -85,7 +85,7 @@ static int pcm_set_speed(int arg)
85 * come from the SDK found on ftp.uwp.edu:/pub/msdos/proaudio/. 85 * come from the SDK found on ftp.uwp.edu:/pub/msdos/proaudio/.
86 * 86 *
87 * I cleared bit 5 of these values, since that bit controls the master 87 * I cleared bit 5 of these values, since that bit controls the master
88 * mute flag. (Olav Wölfelschneider) 88 * mute flag. (Olav Wölfelschneider)
89 * 89 *
90 */ 90 */
91#if !defined NO_AUTO_FILTER_SET 91#if !defined NO_AUTO_FILTER_SET
diff --git a/sound/oss/trident.c b/sound/oss/trident.c
index d98d311542ed..3bc1f6e9e4a3 100644
--- a/sound/oss/trident.c
+++ b/sound/oss/trident.c
@@ -18,7 +18,7 @@
18 * Ollie Lho <ollie@sis.com.tw> SiS 7018 Audio Core Support 18 * Ollie Lho <ollie@sis.com.tw> SiS 7018 Audio Core Support
19 * Ching-Ling Lee <cling-li@ali.com.tw> ALi 5451 Audio Core Support 19 * Ching-Ling Lee <cling-li@ali.com.tw> ALi 5451 Audio Core Support
20 * Matt Wu <mattwu@acersoftech.com.cn> ALi 5451 Audio Core Support 20 * Matt Wu <mattwu@acersoftech.com.cn> ALi 5451 Audio Core Support
21 * Peter Wächtler <pwaechtler@loewe-komp.de> CyberPro5050 support 21 * Peter Wächtler <pwaechtler@loewe-komp.de> CyberPro5050 support
22 * Muli Ben-Yehuda <mulix@mulix.org> 22 * Muli Ben-Yehuda <mulix@mulix.org>
23 * 23 *
24 * 24 *
@@ -89,7 +89,7 @@
89 * use set_current_state, properly release resources on failure in 89 * use set_current_state, properly release resources on failure in
90 * trident_probe, get rid of check_region 90 * trident_probe, get rid of check_region
91 * v0.14.9c 91 * v0.14.9c
92 * August 10 2001 Peter Wächtler <pwaechtler@loewe-komp.de> 92 * August 10 2001 Peter Wächtler <pwaechtler@loewe-komp.de>
93 * added support for Tvia (formerly Integraphics/IGST) CyberPro5050 93 * added support for Tvia (formerly Integraphics/IGST) CyberPro5050
94 * this chip is often found in settop boxes (combined video+audio) 94 * this chip is often found in settop boxes (combined video+audio)
95 * v0.14.9b 95 * v0.14.9b
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index a9eec2a2357d..3bfb2102fc5d 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -1083,7 +1083,7 @@ static void check_volume_resolution(struct snd_ac97 *ac97, int reg, unsigned cha
1083 unsigned short val; 1083 unsigned short val;
1084 snd_ac97_write(ac97, reg, 0x8080 | cbit[i] | (cbit[i] << 8)); 1084 snd_ac97_write(ac97, reg, 0x8080 | cbit[i] | (cbit[i] << 8));
1085 /* Do the read twice due to buffers on some ac97 codecs. 1085 /* Do the read twice due to buffers on some ac97 codecs.
1086 * e.g. The STAC9704 returns exactly what you wrote the the register 1086 * e.g. The STAC9704 returns exactly what you wrote to the register
1087 * if you read it immediately. This causes the detect routine to fail. 1087 * if you read it immediately. This causes the detect routine to fail.
1088 */ 1088 */
1089 val = snd_ac97_read(ac97, reg); 1089 val = snd_ac97_read(ac97, reg);
diff --git a/sound/pci/ice1712/delta.h b/sound/pci/ice1712/delta.h
index e65d669af639..e47861ccd6e7 100644
--- a/sound/pci/ice1712/delta.h
+++ b/sound/pci/ice1712/delta.h
@@ -63,7 +63,7 @@ extern const struct snd_ice1712_card_info snd_ice1712_delta_cards[];
63 /* look to CS8414 datasheet */ 63 /* look to CS8414 datasheet */
64#define ICE1712_DELTA_SPDIF_OUT_STAT_CLOCK 0x04 64#define ICE1712_DELTA_SPDIF_OUT_STAT_CLOCK 0x04
65 /* S/PDIF output status clock */ 65 /* S/PDIF output status clock */
66 /* (writting on rising edge - 0->1) */ 66 /* (writing on rising edge - 0->1) */
67 /* all except Delta44 */ 67 /* all except Delta44 */
68 /* look to CS8404A datasheet */ 68 /* look to CS8404A datasheet */
69#define ICE1712_DELTA_SPDIF_OUT_STAT_DATA 0x08 69#define ICE1712_DELTA_SPDIF_OUT_STAT_DATA 0x08
@@ -100,7 +100,7 @@ extern const struct snd_ice1712_card_info snd_ice1712_delta_cards[];
100 /* AKM4524 serial data */ 100 /* AKM4524 serial data */
101#define ICE1712_DELTA_CODEC_SERIAL_CLOCK 0x20 101#define ICE1712_DELTA_CODEC_SERIAL_CLOCK 0x20
102 /* AKM4524 serial clock */ 102 /* AKM4524 serial clock */
103 /* (writting on rising edge - 0->1 */ 103 /* (writing on rising edge - 0->1 */
104#define ICE1712_DELTA_CODEC_CHIP_A 0x40 104#define ICE1712_DELTA_CODEC_CHIP_A 0x40
105#define ICE1712_DELTA_CODEC_CHIP_B 0x80 105#define ICE1712_DELTA_CODEC_CHIP_B 0x80
106 /* 1 - select chip A or B */ 106 /* 1 - select chip A or B */
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index 21386da3bc86..ac007cec0879 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -472,7 +472,7 @@ static int snd_mixart_prepare(struct snd_pcm_substream *subs)
472 struct snd_mixart *chip = snd_pcm_substream_chip(subs); 472 struct snd_mixart *chip = snd_pcm_substream_chip(subs);
473 struct mixart_stream *stream = subs->runtime->private_data; 473 struct mixart_stream *stream = subs->runtime->private_data;
474 474
475 /* TODO de façon non bloquante, réappliquer les hw_params (rate, bits, codec) */ 475 /* TODO de façon non bloquante, réappliquer les hw_params (rate, bits, codec) */
476 476
477 snd_printdd("snd_mixart_prepare\n"); 477 snd_printdd("snd_mixart_prepare\n");
478 478
diff --git a/sound/soc/pxa/pxa2xx-ac97.h b/sound/soc/pxa/pxa2xx-ac97.h
index 4c4b882316ac..b8ccfee095c4 100644
--- a/sound/soc/pxa/pxa2xx-ac97.h
+++ b/sound/soc/pxa/pxa2xx-ac97.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/sound/arm/pxa2xx-ac97.h 2 * linux/sound/soc/pxa/pxa2xx-ac97.h
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
diff --git a/sound/soc/pxa/pxa2xx-i2s.h b/sound/soc/pxa/pxa2xx-i2s.h
index a2484f0881f1..4435bd9f884f 100644
--- a/sound/soc/pxa/pxa2xx-i2s.h
+++ b/sound/soc/pxa/pxa2xx-i2s.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/sound/arm/pxa2xx-i2s.h 2 * linux/sound/soc/pxa/pxa2xx-i2s.h
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index 25a2a7333006..e07085a7cfc3 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -673,7 +673,7 @@ static s32 *dbri_cmdlock(struct snd_dbri * dbri, int len)
673} 673}
674 674
675/* 675/*
676 * Send prepared cmd string. It works by writting a JUMP cmd into 676 * Send prepared cmd string. It works by writing a JUMP cmd into
677 * the last WAIT cmd and force DBRI to reread the cmd. 677 * the last WAIT cmd and force DBRI to reread the cmd.
678 * The JUMP cmd points to the new cmd string. 678 * The JUMP cmd points to the new cmd string.
679 * It also releases the cmdlock spinlock. 679 * It also releases the cmdlock spinlock.