aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/RCU/checklist.txt12
-rw-r--r--Documentation/feature-removal-schedule.txt9
-rw-r--r--Documentation/filesystems/squashfs.txt2
-rw-r--r--Documentation/networking/ipv6.txt35
-rw-r--r--Documentation/x86/earlyprintk.txt101
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/mm/init.c20
-rw-r--r--arch/arm/kernel/setup.c13
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c105
-rw-r--r--arch/arm/mach-at91/include/mach/board.h3
-rw-r--r--arch/arm/mach-at91/pm.c1
-rw-r--r--arch/arm/mach-omap2/board-ldp.c2
-rw-r--r--arch/arm/mm/abort-ev6.S3
-rw-r--r--arch/arm/plat-s3c64xx/irq-eint.c2
-rw-r--r--arch/avr32/Kconfig2
-rw-r--r--arch/blackfin/Kconfig7
-rw-r--r--arch/blackfin/Kconfig.debug6
-rw-r--r--arch/blackfin/configs/BF518F-EZBRD_defconfig63
-rw-r--r--arch/blackfin/configs/BF527-EZKIT_defconfig4
-rw-r--r--arch/blackfin/configs/BF533-EZKIT_defconfig4
-rw-r--r--arch/blackfin/configs/BF533-STAMP_defconfig4
-rw-r--r--arch/blackfin/configs/BF537-STAMP_defconfig14
-rw-r--r--arch/blackfin/configs/BF538-EZKIT_defconfig4
-rw-r--r--arch/blackfin/configs/BF548-EZKIT_defconfig6
-rw-r--r--arch/blackfin/configs/BF561-EZKIT_defconfig4
-rw-r--r--arch/blackfin/configs/BlackStamp_defconfig4
-rw-r--r--arch/blackfin/configs/CM-BF527_defconfig4
-rw-r--r--arch/blackfin/configs/CM-BF548_defconfig6
-rw-r--r--arch/blackfin/configs/IP0X_defconfig2
-rw-r--r--arch/blackfin/configs/SRV1_defconfig4
-rw-r--r--arch/blackfin/include/asm/Kbuild1
-rw-r--r--arch/blackfin/include/asm/bfin_sport.h45
-rw-r--r--arch/blackfin/include/asm/ipipe.h100
-rw-r--r--arch/blackfin/include/asm/ipipe_base.h12
-rw-r--r--arch/blackfin/include/asm/irq.h36
-rw-r--r--arch/blackfin/include/asm/percpu.h10
-rw-r--r--arch/blackfin/include/asm/thread_info.h2
-rw-r--r--arch/blackfin/kernel/Makefile8
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c4
-rw-r--r--arch/blackfin/kernel/ipipe.c176
-rw-r--r--arch/blackfin/kernel/irqchip.c14
-rw-r--r--arch/blackfin/kernel/kgdb_test.c9
-rw-r--r--arch/blackfin/kernel/ptrace.c5
-rw-r--r--arch/blackfin/kernel/setup.c10
-rw-r--r--arch/blackfin/kernel/time.c5
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c33
-rw-r--r--arch/blackfin/mach-bf518/include/mach/anomaly.h17
-rw-r--r--arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h4
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c26
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c24
-rw-r--r--arch/blackfin/mach-bf527/include/mach/anomaly.h6
-rw-r--r--arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h4
-rw-r--r--arch/blackfin/mach-bf533/boards/Kconfig5
-rw-r--r--arch/blackfin/mach-bf533/boards/Makefile1
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c24
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c24
-rw-r--r--arch/blackfin/mach-bf533/boards/generic_board.c126
-rw-r--r--arch/blackfin/mach-bf533/boards/ip0x.c13
-rw-r--r--arch/blackfin/mach-bf533/include/mach/anomaly.h7
-rw-r--r--arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h2
-rw-r--r--arch/blackfin/mach-bf537/boards/Kconfig5
-rw-r--r--arch/blackfin/mach-bf537/boards/Makefile1
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537.c26
-rw-r--r--arch/blackfin/mach-bf537/boards/generic_board.c745
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c24
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c24
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c24
-rw-r--r--arch/blackfin/mach-bf537/include/mach/anomaly.h7
-rw-r--r--arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h4
-rw-r--r--arch/blackfin/mach-bf538/include/mach/anomaly.h6
-rw-r--r--arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h4
-rw-r--r--arch/blackfin/mach-bf548/include/mach/anomaly.h22
-rw-r--r--arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h22
-rw-r--r--arch/blackfin/mach-bf548/include/mach/irq.h8
-rw-r--r--arch/blackfin/mach-bf561/boards/Kconfig5
-rw-r--r--arch/blackfin/mach-bf561/boards/Makefile1
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c15
-rw-r--r--arch/blackfin/mach-bf561/boards/generic_board.c113
-rw-r--r--arch/blackfin/mach-bf561/include/mach/anomaly.h7
-rw-r--r--arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h2
-rw-r--r--arch/blackfin/mach-common/arch_checks.c9
-rw-r--r--arch/blackfin/mach-common/cache.S22
-rw-r--r--arch/blackfin/mach-common/clocks-init.c2
-rw-r--r--arch/blackfin/mach-common/dpmc_modes.S24
-rw-r--r--arch/blackfin/mach-common/entry.S61
-rw-r--r--arch/blackfin/mach-common/interrupt.S12
-rw-r--r--arch/blackfin/mach-common/ints-priority.c126
-rw-r--r--arch/blackfin/mach-common/smp.c6
-rw-r--r--arch/blackfin/mm/init.c2
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c7
-rw-r--r--arch/m68knommu/platform/5206e/config.c1
-rw-r--r--arch/m68knommu/platform/528x/config.c228
-rw-r--r--arch/mips/include/asm/compat.h7
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c4
-rw-r--r--arch/s390/crypto/aes_s390.c2
-rw-r--r--arch/sh/boards/board-ap325rxa.c1
-rw-r--r--arch/x86/Kconfig12
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/apicdef.h1
-rw-r--r--arch/x86/include/asm/cacheflush.h53
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/include/asm/entry_arch.h2
-rw-r--r--arch/x86/include/asm/fixmap.h10
-rw-r--r--arch/x86/include/asm/hardirq.h1
-rw-r--r--arch/x86/include/asm/hw_irq.h1
-rw-r--r--arch/x86/include/asm/i387.h8
-rw-r--r--arch/x86/include/asm/init.h18
-rw-r--r--arch/x86/include/asm/io.h3
-rw-r--r--arch/x86/include/asm/irq.h1
-rw-r--r--arch/x86/include/asm/irq_vectors.h5
-rw-r--r--arch/x86/include/asm/kexec.h13
-rw-r--r--arch/x86/include/asm/linkage.h16
-rw-r--r--arch/x86/include/asm/mce.h35
-rw-r--r--arch/x86/include/asm/mmzone_32.h43
-rw-r--r--arch/x86/include/asm/msr-index.h5
-rw-r--r--arch/x86/include/asm/page_types.h6
-rw-r--r--arch/x86/include/asm/pat.h5
-rw-r--r--arch/x86/include/asm/percpu.h8
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/pgtable_32_types.h5
-rw-r--r--arch/x86/include/asm/pgtable_types.h1
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h4
-rw-r--r--arch/x86/include/asm/xen/page.h1
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/alternative.c17
-rw-r--r--arch/x86/kernel/apic/apic.c15
-rw-r--r--arch/x86/kernel/cpu/amd.c52
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c1
-rw-r--r--arch/x86/kernel/cpu/intel.c25
-rw-r--r--arch/x86/kernel/cpu/mcheck/Makefile1
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_32.c14
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c530
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c22
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel_64.c207
-rw-r--r--arch/x86/kernel/cpu/mcheck/threshold.c29
-rw-r--r--arch/x86/kernel/ds.c3
-rw-r--r--arch/x86/kernel/efi.c7
-rw-r--r--arch/x86/kernel/efi_64.c21
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/i387.c2
-rw-r--r--arch/x86/kernel/irq.c34
-rw-r--r--arch/x86/kernel/irq_32.c29
-rw-r--r--arch/x86/kernel/irqinit_32.c3
-rw-r--r--arch/x86/kernel/irqinit_64.c3
-rw-r--r--arch/x86/kernel/machine_kexec_32.c17
-rw-r--r--arch/x86/kernel/machine_kexec_64.c99
-rw-r--r--arch/x86/kernel/mpparse.c25
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S24
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S189
-rw-r--r--arch/x86/kernel/setup.c9
-rw-r--r--arch/x86/kernel/setup_percpu.c398
-rw-r--r--arch/x86/kernel/smpboot.c78
-rw-r--r--arch/x86/kernel/tlb_uv.c2
-rw-r--r--arch/x86/kernel/uv_time.c393
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S7
-rw-r--r--arch/x86/lguest/boot.c21
-rw-r--r--arch/x86/math-emu/fpu_aux.c31
-rw-r--r--arch/x86/mm/highmem_32.c9
-rw-r--r--arch/x86/mm/init.c344
-rw-r--r--arch/x86/mm/init_32.c273
-rw-r--r--arch/x86/mm/init_64.c352
-rw-r--r--arch/x86/mm/ioremap.c35
-rw-r--r--arch/x86/mm/kmmio.c164
-rw-r--r--arch/x86/mm/memtest.c3
-rw-r--r--arch/x86/mm/numa_32.c5
-rw-r--r--arch/x86/mm/testmmiotrace.c70
-rw-r--r--arch/x86/xen/enlighten.c10
-rw-r--r--arch/x86/xen/mmu.c7
-rw-r--r--arch/x86/xen/smp.c6
-rw-r--r--arch/xtensa/Kconfig3
-rw-r--r--arch/xtensa/kernel/setup.c2
-rw-r--r--arch/xtensa/kernel/traps.c1
-rw-r--r--arch/xtensa/mm/fault.c1
-rw-r--r--arch/xtensa/platforms/iss/console.c6
-rw-r--r--block/blk-merge.c25
-rw-r--r--block/blktrace.c2
-rw-r--r--crypto/api.c15
-rw-r--r--drivers/acpi/processor_perflib.c4
-rw-r--r--drivers/ata/ahci.c24
-rw-r--r--drivers/ata/libata-core.c14
-rw-r--r--drivers/ata/libata-eh.c7
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/block/aoe/aoedev.c2
-rw-r--r--drivers/block/cciss.c8
-rw-r--r--drivers/block/loop.c3
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/char/agp/amd64-agp.c13
-rw-r--r--drivers/char/agp/intel-agp.c8
-rw-r--r--drivers/cpufreq/cpufreq.c51
-rw-r--r--drivers/crypto/ixp4xx_crypto.c6
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/crypto/padlock-sha.c4
-rw-r--r--drivers/dca/dca-core.c2
-rw-r--r--drivers/dma/dmatest.c6
-rw-r--r--drivers/dma/fsldma.c8
-rw-r--r--drivers/dma/ioat.c2
-rw-r--r--drivers/dma/ioat_dca.c26
-rw-r--r--drivers/dma/ioat_dma.c39
-rw-r--r--drivers/dma/ioatdma.h8
-rw-r--r--drivers/dma/ioatdma_hw.h2
-rw-r--r--drivers/dma/ioatdma_registers.h2
-rw-r--r--drivers/dma/iop-adma.c18
-rw-r--r--drivers/dma/ipu/ipu_idmac.c2
-rw-r--r--drivers/dma/mv_xor.c18
-rw-r--r--drivers/gpu/drm/drm_stub.c2
-rw-r--r--drivers/hwmon/lm85.c8
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c4
-rw-r--r--drivers/ide/Kconfig5
-rw-r--r--drivers/ide/Makefile1
-rw-r--r--drivers/ide/at91_ide.c467
-rw-r--r--drivers/ide/ide-disk_proc.c2
-rw-r--r--drivers/ide/ide-floppy_proc.c2
-rw-r--r--drivers/ide/ide-io.c3
-rw-r--r--drivers/ide/ide-iops.c2
-rw-r--r--drivers/ide/ide-probe.c7
-rw-r--r--drivers/ide/ide-proc.c2
-rw-r--r--drivers/ide/ide-tape.c2
-rw-r--r--drivers/lguest/lguest_device.c6
-rw-r--r--drivers/md/md.c30
-rw-r--r--drivers/mmc/core/mmc_ops.c15
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c3
-rw-r--r--drivers/mtd/maps/physmap.c19
-rw-r--r--drivers/mtd/nand/orion_nand.c2
-rw-r--r--drivers/net/arm/Makefile2
-rw-r--r--drivers/net/arm/etherh.c10
-rw-r--r--drivers/net/arm/ks8695net.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/jme.c3
-rw-r--r--drivers/net/pcmcia/3c574_cs.c3
-rw-r--r--drivers/net/pcmcia/3c589_cs.c3
-rw-r--r--drivers/net/smc911x.h12
-rw-r--r--drivers/net/sungem.c2
-rw-r--r--drivers/net/tg3.c3
-rw-r--r--drivers/net/tokenring/tmspci.c18
-rw-r--r--drivers/net/ucc_geth_mii.c4
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c17
-rw-r--r--drivers/net/wireless/p54/p54common.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c32
-rw-r--r--drivers/video/i810/i810_main.c5
-rw-r--r--drivers/video/pxafb.c2
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c6
-rw-r--r--drivers/watchdog/gef_wdt.c2
-rw-r--r--drivers/watchdog/ks8695_wdt.c1
-rw-r--r--drivers/watchdog/orion5x_wdt.c1
-rw-r--r--drivers/watchdog/rc32434_wdt.c168
-rw-r--r--fs/btrfs/ctree.c10
-rw-r--r--fs/btrfs/disk-io.c4
-rw-r--r--fs/btrfs/extent-tree.c4
-rw-r--r--fs/btrfs/locking.c6
-rw-r--r--fs/btrfs/locking.h2
-rw-r--r--fs/devpts/inode.c5
-rw-r--r--fs/ext4/ialloc.c8
-rw-r--r--fs/squashfs/block.c13
-rw-r--r--fs/squashfs/cache.c4
-rw-r--r--fs/squashfs/inode.c6
-rw-r--r--fs/squashfs/squashfs.h2
-rw-r--r--fs/squashfs/super.c2
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/bootmem.h36
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/dmaengine.h7
-rw-r--r--include/linux/hdreg.h1
-rw-r--r--include/linux/ide.h1
-rw-r--r--include/linux/libata.h6
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/percpu.h108
-rw-r--r--include/linux/rcuclassic.h6
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--include/linux/rcupreempt.h15
-rw-r--r--include/linux/rcutree.h6
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/serio.h2
-rw-r--r--include/linux/vmalloc.h4
-rw-r--r--include/net/net_namespace.h27
-rw-r--r--init/Kconfig30
-rw-r--r--init/main.c3
-rw-r--r--kernel/fork.c11
-rw-r--r--kernel/module.c64
-rw-r--r--kernel/rcuclassic.c4
-rw-r--r--kernel/rcupdate.c12
-rw-r--r--kernel/rcupreempt.c3
-rw-r--r--kernel/rcutree.c4
-rw-r--r--kernel/sched.c21
-rw-r--r--kernel/softirq.c1
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/sys.c31
-rw-r--r--kernel/tsacct.c6
-rw-r--r--kernel/user.c32
-rw-r--r--lib/idr.c2
-rw-r--r--mm/Makefile4
-rw-r--r--mm/allocpercpu.c32
-rw-r--r--mm/bootmem.c35
-rw-r--r--mm/percpu.c1226
-rw-r--r--mm/vmalloc.c94
-rw-r--r--net/802/tr.c2
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/core/dev.c61
-rw-r--r--net/core/net-sysfs.c4
-rw-r--r--net/core/net_namespace.c3
-rw-r--r--net/ipv4/af_inet.c4
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/addrconf.c53
-rw-r--r--net/ipv6/af_inet6.c21
-rw-r--r--net/netlink/af_netlink.c10
-rw-r--r--net/sched/act_police.c13
-rw-r--r--net/sctp/protocol.c16
-rw-r--r--net/sctp/sm_sideeffect.c54
-rw-r--r--net/sctp/sm_statefuns.c16
-rw-r--r--net/wireless/reg.c3
-rw-r--r--security/smack/smack_lsm.c43
-rw-r--r--security/smack/smackfs.c64
-rw-r--r--sound/pci/hda/patch_sigmatel.c6
320 files changed, 6530 insertions, 3656 deletions
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 6e253407b3dc..accfe2f5247d 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -298,3 +298,15 @@ over a rather long period of time, but improvements are always welcome!
298 298
299 Note that, rcu_assign_pointer() and rcu_dereference() relate to 299 Note that, rcu_assign_pointer() and rcu_dereference() relate to
300 SRCU just as they do to other forms of RCU. 300 SRCU just as they do to other forms of RCU.
301
30215. The whole point of call_rcu(), synchronize_rcu(), and friends
303 is to wait until all pre-existing readers have finished before
304 carrying out some otherwise-destructive operation. It is
305 therefore critically important to -first- remove any path
306 that readers can follow that could be affected by the
307 destructive operation, and -only- -then- invoke call_rcu(),
308 synchronize_rcu(), or friends.
309
310 Because these primitives only wait for pre-existing readers,
311 it is the caller's responsibility to guarantee safety to
312 any subsequent readers.
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 5ddbe350487a..20d3b94703a4 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -335,3 +335,12 @@ Why: In 2.6.18 the Secmark concept was introduced to replace the "compat_net"
335 Secmark, it is time to deprecate the older mechanism and start the 335 Secmark, it is time to deprecate the older mechanism and start the
336 process of removing the old code. 336 process of removing the old code.
337Who: Paul Moore <paul.moore@hp.com> 337Who: Paul Moore <paul.moore@hp.com>
338---------------------------
339
340What: sysfs ui for changing p4-clockmod parameters
341When: September 2009
342Why: See commits 129f8ae9b1b5be94517da76009ea956e89104ce8 and
343 e088e4c9cdb618675874becb91b2fd581ee707e6.
344 Removal is subject to fixing any remaining bugs in ACPI which may
345 cause the thermal throttling not to happen at the right time.
346Who: Dave Jones <davej@redhat.com>, Matthew Garrett <mjg@redhat.com>
diff --git a/Documentation/filesystems/squashfs.txt b/Documentation/filesystems/squashfs.txt
index 3e79e4a7a392..b324c033035a 100644
--- a/Documentation/filesystems/squashfs.txt
+++ b/Documentation/filesystems/squashfs.txt
@@ -22,7 +22,7 @@ Squashfs filesystem features versus Cramfs:
22 22
23 Squashfs Cramfs 23 Squashfs Cramfs
24 24
25Max filesystem size: 2^64 16 MiB 25Max filesystem size: 2^64 256 MiB
26Max file size: ~ 2 TiB 16 MiB 26Max file size: ~ 2 TiB 16 MiB
27Max files: unlimited unlimited 27Max files: unlimited unlimited
28Max directories: unlimited unlimited 28Max directories: unlimited unlimited
diff --git a/Documentation/networking/ipv6.txt b/Documentation/networking/ipv6.txt
new file mode 100644
index 000000000000..268e5c103dd8
--- /dev/null
+++ b/Documentation/networking/ipv6.txt
@@ -0,0 +1,35 @@
1
2Options for the ipv6 module are supplied as parameters at load time.
3
4Module options may be given as command line arguments to the insmod
5or modprobe command, but are usually specified in either the
6/etc/modules.conf or /etc/modprobe.conf configuration file, or in a
7distro-specific configuration file.
8
9The available ipv6 module parameters are listed below. If a parameter
10is not specified the default value is used.
11
12The parameters are as follows:
13
14disable
15
16 Specifies whether to load the IPv6 module, but disable all
17 its functionality. This might be used when another module
18 has a dependency on the IPv6 module being loaded, but no
19 IPv6 addresses or operations are desired.
20
21 The possible values and their effects are:
22
23 0
24 IPv6 is enabled.
25
26 This is the default value.
27
28 1
29 IPv6 is disabled.
30
31 No IPv6 addresses will be added to interfaces, and
32 it will not be possible to open an IPv6 socket.
33
34 A reboot is required to enable IPv6.
35
diff --git a/Documentation/x86/earlyprintk.txt b/Documentation/x86/earlyprintk.txt
new file mode 100644
index 000000000000..607b1a016064
--- /dev/null
+++ b/Documentation/x86/earlyprintk.txt
@@ -0,0 +1,101 @@
1
2Mini-HOWTO for using the earlyprintk=dbgp boot option with a
3USB2 Debug port key and a debug cable, on x86 systems.
4
5You need two computers, the 'USB debug key' special gadget and
6and two USB cables, connected like this:
7
8 [host/target] <-------> [USB debug key] <-------> [client/console]
9
101. There are three specific hardware requirements:
11
12 a.) Host/target system needs to have USB debug port capability.
13
14 You can check this capability by looking at a 'Debug port' bit in
15 the lspci -vvv output:
16
17 # lspci -vvv
18 ...
19 00:1d.7 USB Controller: Intel Corporation 82801H (ICH8 Family) USB2 EHCI Controller #1 (rev 03) (prog-if 20 [EHCI])
20 Subsystem: Lenovo ThinkPad T61
21 Control: I/O- Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR+ FastB2B- DisINTx-
22 Status: Cap+ 66MHz- UDF- FastB2B+ ParErr- DEVSEL=medium >TAbort- <TAbort- <MAbort- >SERR- <PERR- INTx-
23 Latency: 0
24 Interrupt: pin D routed to IRQ 19
25 Region 0: Memory at fe227000 (32-bit, non-prefetchable) [size=1K]
26 Capabilities: [50] Power Management version 2
27 Flags: PMEClk- DSI- D1- D2- AuxCurrent=375mA PME(D0+,D1-,D2-,D3hot+,D3cold+)
28 Status: D0 PME-Enable- DSel=0 DScale=0 PME+
29 Capabilities: [58] Debug port: BAR=1 offset=00a0
30 ^^^^^^^^^^^ <==================== [ HERE ]
31 Kernel driver in use: ehci_hcd
32 Kernel modules: ehci-hcd
33 ...
34
35( If your system does not list a debug port capability then you probably
36 wont be able to use the USB debug key. )
37
38 b.) You also need a Netchip USB debug cable/key:
39
40 http://www.plxtech.com/products/NET2000/NET20DC/default.asp
41
42 This is a small blue plastic connector with two USB connections,
43 it draws power from its USB connections.
44
45 c.) Thirdly, you need a second client/console system with a regular USB port.
46
472. Software requirements:
48
49 a.) On the host/target system:
50
51 You need to enable the following kernel config option:
52
53 CONFIG_EARLY_PRINTK_DBGP=y
54
55 And you need to add the boot command line: "earlyprintk=dbgp".
56 (If you are using Grub, append it to the 'kernel' line in
57 /etc/grub.conf)
58
59 NOTE: normally earlyprintk console gets turned off once the
60 regular console is alive - use "earlyprintk=dbgp,keep" to keep
61 this channel open beyond early bootup. This can be useful for
62 debugging crashes under Xorg, etc.
63
64 b.) On the client/console system:
65
66 You should enable the following kernel config option:
67
68 CONFIG_USB_SERIAL_DEBUG=y
69
70 On the next bootup with the modified kernel you should
71 get a /dev/ttyUSBx device(s).
72
73 Now this channel of kernel messages is ready to be used: start
74 your favorite terminal emulator (minicom, etc.) and set
75 it up to use /dev/ttyUSB0 - or use a raw 'cat /dev/ttyUSBx' to
76 see the raw output.
77
78 c.) On Nvidia Southbridge based systems: the kernel will try to probe
79 and find out which port has debug device connected.
80
813. Testing that it works fine:
82
83 You can test the output by using earlyprintk=dbgp,keep and provoking
84 kernel messages on the host/target system. You can provoke a harmless
85 kernel message by for example doing:
86
87 echo h > /proc/sysrq-trigger
88
89 On the host/target system you should see this help line in "dmesg" output:
90
91 SysRq : HELP : loglevel(0-9) reBoot Crashdump terminate-all-tasks(E) memory-full-oom-kill(F) kill-all-tasks(I) saK show-backtrace-all-active-cpus(L) show-memory-usage(M) nice-all-RT-tasks(N) powerOff show-registers(P) show-all-timers(Q) unRaw Sync show-task-states(T) Unmount show-blocked-tasks(W) dump-ftrace-buffer(Z)
92
93 On the client/console system do:
94
95 cat /dev/ttyUSB0
96
97 And you should see the help line above displayed shortly after you've
98 provoked it on the host system.
99
100If it does not work then please ask about it on the linux-kernel@vger.kernel.org
101mailing list or contact the x86 maintainers.
diff --git a/Makefile b/Makefile
index 27fb890a2bff..c40d83aedebe 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 29 3SUBLEVEL = 29
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Erotic Pickled Herring 5NAME = Erotic Pickled Herring
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 5d7a16eab312..af71d38c8e41 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -189,9 +189,21 @@ callback_init(void * kernel_end)
189 189
190 if (alpha_using_srm) { 190 if (alpha_using_srm) {
191 static struct vm_struct console_remap_vm; 191 static struct vm_struct console_remap_vm;
192 unsigned long vaddr = VMALLOC_START; 192 unsigned long nr_pages = 0;
193 unsigned long vaddr;
193 unsigned long i, j; 194 unsigned long i, j;
194 195
196 /* calculate needed size */
197 for (i = 0; i < crb->map_entries; ++i)
198 nr_pages += crb->map[i].count;
199
200 /* register the vm area */
201 console_remap_vm.flags = VM_ALLOC;
202 console_remap_vm.size = nr_pages << PAGE_SHIFT;
203 vm_area_register_early(&console_remap_vm, PAGE_SIZE);
204
205 vaddr = (unsigned long)console_remap_vm.addr;
206
195 /* Set up the third level PTEs and update the virtual 207 /* Set up the third level PTEs and update the virtual
196 addresses of the CRB entries. */ 208 addresses of the CRB entries. */
197 for (i = 0; i < crb->map_entries; ++i) { 209 for (i = 0; i < crb->map_entries; ++i) {
@@ -213,12 +225,6 @@ callback_init(void * kernel_end)
213 vaddr += PAGE_SIZE; 225 vaddr += PAGE_SIZE;
214 } 226 }
215 } 227 }
216
217 /* Let vmalloc know that we've allocated some space. */
218 console_remap_vm.flags = VM_ALLOC;
219 console_remap_vm.addr = (void *) VMALLOC_START;
220 console_remap_vm.size = vaddr - VMALLOC_START;
221 vmlist = &console_remap_vm;
222 } 228 }
223 229
224 callback_init_done = 1; 230 callback_init_done = 1;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 7049815d66d5..68d6494c0389 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -233,12 +233,13 @@ static void __init cacheid_init(void)
233 unsigned int cachetype = read_cpuid_cachetype(); 233 unsigned int cachetype = read_cpuid_cachetype();
234 unsigned int arch = cpu_architecture(); 234 unsigned int arch = cpu_architecture();
235 235
236 if (arch >= CPU_ARCH_ARMv7) { 236 if (arch >= CPU_ARCH_ARMv6) {
237 cacheid = CACHEID_VIPT_NONALIASING; 237 if ((cachetype & (7 << 29)) == 4 << 29) {
238 if ((cachetype & (3 << 14)) == 1 << 14) 238 /* ARMv7 register format */
239 cacheid |= CACHEID_ASID_TAGGED; 239 cacheid = CACHEID_VIPT_NONALIASING;
240 } else if (arch >= CPU_ARCH_ARMv6) { 240 if ((cachetype & (3 << 14)) == 1 << 14)
241 if (cachetype & (1 << 23)) 241 cacheid |= CACHEID_ASID_TAGGED;
242 } else if (cachetype & (1 << 23))
242 cacheid = CACHEID_VIPT_ALIASING; 243 cacheid = CACHEID_VIPT_ALIASING;
243 else 244 else
244 cacheid = CACHEID_VIPT_NONALIASING; 245 cacheid = CACHEID_VIPT_NONALIASING;
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index 134af97ff340..b7f233242315 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -347,6 +347,111 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
347void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {} 347void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {}
348#endif 348#endif
349 349
350/* --------------------------------------------------------------------
351 * Compact Flash (PCMCIA or IDE)
352 * -------------------------------------------------------------------- */
353
354#if defined(CONFIG_AT91_CF) || defined(CONFIG_AT91_CF_MODULE) || \
355 defined(CONFIG_BLK_DEV_IDE_AT91) || defined(CONFIG_BLK_DEV_IDE_AT91_MODULE)
356
357static struct at91_cf_data cf0_data;
358
359static struct resource cf0_resources[] = {
360 [0] = {
361 .start = AT91_CHIPSELECT_4,
362 .end = AT91_CHIPSELECT_4 + SZ_256M - 1,
363 .flags = IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT,
364 }
365};
366
367static struct platform_device cf0_device = {
368 .id = 0,
369 .dev = {
370 .platform_data = &cf0_data,
371 },
372 .resource = cf0_resources,
373 .num_resources = ARRAY_SIZE(cf0_resources),
374};
375
376static struct at91_cf_data cf1_data;
377
378static struct resource cf1_resources[] = {
379 [0] = {
380 .start = AT91_CHIPSELECT_5,
381 .end = AT91_CHIPSELECT_5 + SZ_256M - 1,
382 .flags = IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT,
383 }
384};
385
386static struct platform_device cf1_device = {
387 .id = 1,
388 .dev = {
389 .platform_data = &cf1_data,
390 },
391 .resource = cf1_resources,
392 .num_resources = ARRAY_SIZE(cf1_resources),
393};
394
395void __init at91_add_device_cf(struct at91_cf_data *data)
396{
397 unsigned long ebi0_csa;
398 struct platform_device *pdev;
399
400 if (!data)
401 return;
402
403 /*
404 * assign CS4 or CS5 to SMC with Compact Flash logic support,
405 * we assume SMC timings are configured by board code,
406 * except True IDE where timings are controlled by driver
407 */
408 ebi0_csa = at91_sys_read(AT91_MATRIX_EBI0CSA);
409 switch (data->chipselect) {
410 case 4:
411 at91_set_A_periph(AT91_PIN_PD6, 0); /* EBI0_NCS4/CFCS0 */
412 ebi0_csa |= AT91_MATRIX_EBI0_CS4A_SMC_CF1;
413 cf0_data = *data;
414 pdev = &cf0_device;
415 break;
416 case 5:
417 at91_set_A_periph(AT91_PIN_PD7, 0); /* EBI0_NCS5/CFCS1 */
418 ebi0_csa |= AT91_MATRIX_EBI0_CS5A_SMC_CF2;
419 cf1_data = *data;
420 pdev = &cf1_device;
421 break;
422 default:
423 printk(KERN_ERR "AT91 CF: bad chip-select requested (%u)\n",
424 data->chipselect);
425 return;
426 }
427 at91_sys_write(AT91_MATRIX_EBI0CSA, ebi0_csa);
428
429 if (data->det_pin) {
430 at91_set_gpio_input(data->det_pin, 1);
431 at91_set_deglitch(data->det_pin, 1);
432 }
433
434 if (data->irq_pin) {
435 at91_set_gpio_input(data->irq_pin, 1);
436 at91_set_deglitch(data->irq_pin, 1);
437 }
438
439 if (data->vcc_pin)
440 /* initially off */
441 at91_set_gpio_output(data->vcc_pin, 0);
442
443 /* enable EBI controlled pins */
444 at91_set_A_periph(AT91_PIN_PD5, 1); /* NWAIT */
445 at91_set_A_periph(AT91_PIN_PD8, 0); /* CFCE1 */
446 at91_set_A_periph(AT91_PIN_PD9, 0); /* CFCE2 */
447 at91_set_A_periph(AT91_PIN_PD14, 0); /* CFNRW */
448
449 pdev->name = (data->flags & AT91_CF_TRUE_IDE) ? "at91_ide" : "at91_cf";
450 platform_device_register(pdev);
451}
452#else
453void __init at91_add_device_cf(struct at91_cf_data *data) {}
454#endif
350 455
351/* -------------------------------------------------------------------- 456/* --------------------------------------------------------------------
352 * NAND / SmartMedia 457 * NAND / SmartMedia
diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h
index 0b3ae21b4565..793fe7b25f36 100644
--- a/arch/arm/mach-at91/include/mach/board.h
+++ b/arch/arm/mach-at91/include/mach/board.h
@@ -56,6 +56,9 @@ struct at91_cf_data {
56 u8 vcc_pin; /* power switching */ 56 u8 vcc_pin; /* power switching */
57 u8 rst_pin; /* card reset */ 57 u8 rst_pin; /* card reset */
58 u8 chipselect; /* EBI Chip Select number */ 58 u8 chipselect; /* EBI Chip Select number */
59 u8 flags;
60#define AT91_CF_TRUE_IDE 0x01
61#define AT91_IDE_SWAP_A0_A2 0x02
59}; 62};
60extern void __init at91_add_device_cf(struct at91_cf_data *data); 63extern void __init at91_add_device_cf(struct at91_cf_data *data);
61 64
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 9bb4f043aa22..7ac812dc055a 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -332,7 +332,6 @@ static int at91_pm_enter(suspend_state_t state)
332 at91_sys_read(AT91_AIC_IPR) & at91_sys_read(AT91_AIC_IMR)); 332 at91_sys_read(AT91_AIC_IPR) & at91_sys_read(AT91_AIC_IMR));
333 333
334error: 334error:
335 sdram_selfrefresh_disable();
336 target_state = PM_SUSPEND_ON; 335 target_state = PM_SUSPEND_ON;
337 at91_irq_resume(); 336 at91_irq_resume();
338 at91_gpio_resume(); 337 at91_gpio_resume();
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index f6a13451d1fd..6031e179926b 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -81,7 +81,7 @@ static inline void __init ldp_init_smc911x(void)
81 } 81 }
82 82
83 ldp_smc911x_resources[0].start = cs_mem_base + 0x0; 83 ldp_smc911x_resources[0].start = cs_mem_base + 0x0;
84 ldp_smc911x_resources[0].end = cs_mem_base + 0xf; 84 ldp_smc911x_resources[0].end = cs_mem_base + 0xff;
85 udelay(100); 85 udelay(100);
86 86
87 eth_gpio = LDP_SMC911X_GPIO; 87 eth_gpio = LDP_SMC911X_GPIO;
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8a7f65ba14b7..94077fbd96b7 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -23,7 +23,8 @@ ENTRY(v6_early_abort)
23#ifdef CONFIG_CPU_32v6K 23#ifdef CONFIG_CPU_32v6K
24 clrex 24 clrex
25#else 25#else
26 strex r0, r1, [sp] @ Clear the exclusive monitor 26 sub r1, sp, #4 @ Get unused stack location
27 strex r0, r1, [r1] @ Clear the exclusive monitor
27#endif 28#endif
28 mrc p15, 0, r1, c5, c0, 0 @ get FSR 29 mrc p15, 0, r1, c5, c0, 0 @ get FSR
29 mrc p15, 0, r0, c6, c0, 0 @ get FAR 30 mrc p15, 0, r0, c6, c0, 0 @ get FAR
diff --git a/arch/arm/plat-s3c64xx/irq-eint.c b/arch/arm/plat-s3c64xx/irq-eint.c
index 1f7cc0067f5c..ebb305ce7689 100644
--- a/arch/arm/plat-s3c64xx/irq-eint.c
+++ b/arch/arm/plat-s3c64xx/irq-eint.c
@@ -55,7 +55,7 @@ static void s3c_irq_eint_unmask(unsigned int irq)
55 u32 mask; 55 u32 mask;
56 56
57 mask = __raw_readl(S3C64XX_EINT0MASK); 57 mask = __raw_readl(S3C64XX_EINT0MASK);
58 mask |= eint_irq_to_bit(irq); 58 mask &= ~eint_irq_to_bit(irq);
59 __raw_writel(mask, S3C64XX_EINT0MASK); 59 __raw_writel(mask, S3C64XX_EINT0MASK);
60} 60}
61 61
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index b189680d18b0..05fe3053dcae 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -181,7 +181,7 @@ source "kernel/Kconfig.preempt"
181config QUICKLIST 181config QUICKLIST
182 def_bool y 182 def_bool y
183 183
184config HAVE_ARCH_BOOTMEM_NODE 184config HAVE_ARCH_BOOTMEM
185 def_bool n 185 def_bool n
186 186
187config ARCH_HAVE_MEMORY_PRESENT 187config ARCH_HAVE_MEMORY_PRESENT
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 8f1f97d56e1e..0c1f86e3e44a 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -1129,6 +1129,7 @@ endchoice
1129 1129
1130config PM_WAKEUP_BY_GPIO 1130config PM_WAKEUP_BY_GPIO
1131 bool "Allow Wakeup from Standby by GPIO" 1131 bool "Allow Wakeup from Standby by GPIO"
1132 depends on PM && !BF54x
1132 1133
1133config PM_WAKEUP_GPIO_NUMBER 1134config PM_WAKEUP_GPIO_NUMBER
1134 int "GPIO number" 1135 int "GPIO number"
@@ -1168,6 +1169,12 @@ config PM_BFIN_WAKE_GP
1168 default n 1169 default n
1169 help 1170 help
1170 Enable General-Purpose Wake-Up (Voltage Regulator Power-Up) 1171 Enable General-Purpose Wake-Up (Voltage Regulator Power-Up)
1172 (all processors, except ADSP-BF549). This option sets
1173 the general-purpose wake-up enable (GPWE) control bit to enable
1174 wake-up upon detection of an active low signal on the /GPW (PH7) pin.
1175 On ADSP-BF549 this option enables the the same functionality on the
1176 /MRXON pin also PH7.
1177
1171endmenu 1178endmenu
1172 1179
1173menu "CPU Frequency scaling" 1180menu "CPU Frequency scaling"
diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
index 5f981d9ca625..79e7e63ab709 100644
--- a/arch/blackfin/Kconfig.debug
+++ b/arch/blackfin/Kconfig.debug
@@ -21,12 +21,6 @@ config DEBUG_STACK_USAGE
21config HAVE_ARCH_KGDB 21config HAVE_ARCH_KGDB
22 def_bool y 22 def_bool y
23 23
24config KGDB_TESTCASE
25 tristate "KGDB: for test case in expect"
26 default n
27 help
28 This is a kgdb test case for automated testing.
29
30config DEBUG_VERBOSE 24config DEBUG_VERBOSE
31 bool "Verbose fault messages" 25 bool "Verbose fault messages"
32 default y 26 default y
diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
index 4fdb9e04759f..281f4b60e603 100644
--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28-rc2 3# Linux kernel version: 2.6.28
4# Fri Jan 9 17:58:41 2009 4# Fri Feb 20 10:01:44 2009
5# 5#
6# CONFIG_MMU is not set 6# CONFIG_MMU is not set
7# CONFIG_FPU is not set 7# CONFIG_FPU is not set
@@ -133,10 +133,15 @@ CONFIG_BF518=y
133# CONFIG_BF538 is not set 133# CONFIG_BF538 is not set
134# CONFIG_BF539 is not set 134# CONFIG_BF539 is not set
135# CONFIG_BF542 is not set 135# CONFIG_BF542 is not set
136# CONFIG_BF542M is not set
136# CONFIG_BF544 is not set 137# CONFIG_BF544 is not set
138# CONFIG_BF544M is not set
137# CONFIG_BF547 is not set 139# CONFIG_BF547 is not set
140# CONFIG_BF547M is not set
138# CONFIG_BF548 is not set 141# CONFIG_BF548 is not set
142# CONFIG_BF548M is not set
139# CONFIG_BF549 is not set 143# CONFIG_BF549 is not set
144# CONFIG_BF549M is not set
140# CONFIG_BF561 is not set 145# CONFIG_BF561 is not set
141CONFIG_BF_REV_MIN=0 146CONFIG_BF_REV_MIN=0
142CONFIG_BF_REV_MAX=2 147CONFIG_BF_REV_MAX=2
@@ -426,7 +431,17 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
426# CONFIG_TIPC is not set 431# CONFIG_TIPC is not set
427# CONFIG_ATM is not set 432# CONFIG_ATM is not set
428# CONFIG_BRIDGE is not set 433# CONFIG_BRIDGE is not set
429# CONFIG_NET_DSA is not set 434CONFIG_NET_DSA=y
435# CONFIG_NET_DSA_TAG_DSA is not set
436# CONFIG_NET_DSA_TAG_EDSA is not set
437# CONFIG_NET_DSA_TAG_TRAILER is not set
438CONFIG_NET_DSA_TAG_STPID=y
439# CONFIG_NET_DSA_MV88E6XXX is not set
440# CONFIG_NET_DSA_MV88E6060 is not set
441# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
442# CONFIG_NET_DSA_MV88E6131 is not set
443# CONFIG_NET_DSA_MV88E6123_61_65 is not set
444CONFIG_NET_DSA_KSZ8893M=y
430# CONFIG_VLAN_8021Q is not set 445# CONFIG_VLAN_8021Q is not set
431# CONFIG_DECNET is not set 446# CONFIG_DECNET is not set
432# CONFIG_LLC2 is not set 447# CONFIG_LLC2 is not set
@@ -529,6 +544,8 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y
529# 544#
530# Self-contained MTD device drivers 545# Self-contained MTD device drivers
531# 546#
547# CONFIG_MTD_DATAFLASH is not set
548# CONFIG_MTD_M25P80 is not set
532# CONFIG_MTD_SLRAM is not set 549# CONFIG_MTD_SLRAM is not set
533# CONFIG_MTD_PHRAM is not set 550# CONFIG_MTD_PHRAM is not set
534# CONFIG_MTD_MTDRAM is not set 551# CONFIG_MTD_MTDRAM is not set
@@ -561,7 +578,9 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
561# CONFIG_BLK_DEV_HD is not set 578# CONFIG_BLK_DEV_HD is not set
562CONFIG_MISC_DEVICES=y 579CONFIG_MISC_DEVICES=y
563# CONFIG_EEPROM_93CX6 is not set 580# CONFIG_EEPROM_93CX6 is not set
581# CONFIG_ICS932S401 is not set
564# CONFIG_ENCLOSURE_SERVICES is not set 582# CONFIG_ENCLOSURE_SERVICES is not set
583# CONFIG_C2PORT is not set
565CONFIG_HAVE_IDE=y 584CONFIG_HAVE_IDE=y
566# CONFIG_IDE is not set 585# CONFIG_IDE is not set
567 586
@@ -607,6 +626,7 @@ CONFIG_BFIN_RX_DESC_NUM=20
607# CONFIG_SMC91X is not set 626# CONFIG_SMC91X is not set
608# CONFIG_SMSC911X is not set 627# CONFIG_SMSC911X is not set
609# CONFIG_DM9000 is not set 628# CONFIG_DM9000 is not set
629# CONFIG_ENC28J60 is not set
610# CONFIG_IBM_NEW_EMAC_ZMII is not set 630# CONFIG_IBM_NEW_EMAC_ZMII is not set
611# CONFIG_IBM_NEW_EMAC_RGMII is not set 631# CONFIG_IBM_NEW_EMAC_RGMII is not set
612# CONFIG_IBM_NEW_EMAC_TAH is not set 632# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -764,7 +784,23 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
764# CONFIG_I2C_DEBUG_ALGO is not set 784# CONFIG_I2C_DEBUG_ALGO is not set
765# CONFIG_I2C_DEBUG_BUS is not set 785# CONFIG_I2C_DEBUG_BUS is not set
766# CONFIG_I2C_DEBUG_CHIP is not set 786# CONFIG_I2C_DEBUG_CHIP is not set
767# CONFIG_SPI is not set 787CONFIG_SPI=y
788# CONFIG_SPI_DEBUG is not set
789CONFIG_SPI_MASTER=y
790
791#
792# SPI Master Controller Drivers
793#
794CONFIG_SPI_BFIN=y
795# CONFIG_SPI_BFIN_LOCK is not set
796# CONFIG_SPI_BITBANG is not set
797
798#
799# SPI Protocol Masters
800#
801# CONFIG_SPI_AT25 is not set
802# CONFIG_SPI_SPIDEV is not set
803# CONFIG_SPI_TLE62X0 is not set
768CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 804CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
769# CONFIG_GPIOLIB is not set 805# CONFIG_GPIOLIB is not set
770# CONFIG_W1 is not set 806# CONFIG_W1 is not set
@@ -788,8 +824,10 @@ CONFIG_BFIN_WDT=y
788# CONFIG_MFD_SM501 is not set 824# CONFIG_MFD_SM501 is not set
789# CONFIG_HTC_PASIC3 is not set 825# CONFIG_HTC_PASIC3 is not set
790# CONFIG_MFD_TMIO is not set 826# CONFIG_MFD_TMIO is not set
827# CONFIG_PMIC_DA903X is not set
791# CONFIG_MFD_WM8400 is not set 828# CONFIG_MFD_WM8400 is not set
792# CONFIG_MFD_WM8350_I2C is not set 829# CONFIG_MFD_WM8350_I2C is not set
830# CONFIG_REGULATOR is not set
793 831
794# 832#
795# Multimedia devices 833# Multimedia devices
@@ -861,10 +899,18 @@ CONFIG_RTC_INTF_DEV=y
861# CONFIG_RTC_DRV_M41T80 is not set 899# CONFIG_RTC_DRV_M41T80 is not set
862# CONFIG_RTC_DRV_S35390A is not set 900# CONFIG_RTC_DRV_S35390A is not set
863# CONFIG_RTC_DRV_FM3130 is not set 901# CONFIG_RTC_DRV_FM3130 is not set
902# CONFIG_RTC_DRV_RX8581 is not set
864 903
865# 904#
866# SPI RTC drivers 905# SPI RTC drivers
867# 906#
907# CONFIG_RTC_DRV_M41T94 is not set
908# CONFIG_RTC_DRV_DS1305 is not set
909# CONFIG_RTC_DRV_DS1390 is not set
910# CONFIG_RTC_DRV_MAX6902 is not set
911# CONFIG_RTC_DRV_R9701 is not set
912# CONFIG_RTC_DRV_RS5C348 is not set
913# CONFIG_RTC_DRV_DS3234 is not set
868 914
869# 915#
870# Platform RTC drivers 916# Platform RTC drivers
@@ -1062,12 +1108,20 @@ CONFIG_DEBUG_INFO=y
1062# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1108# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1063# CONFIG_FAULT_INJECTION is not set 1109# CONFIG_FAULT_INJECTION is not set
1064CONFIG_SYSCTL_SYSCALL_CHECK=y 1110CONFIG_SYSCTL_SYSCALL_CHECK=y
1111
1112#
1113# Tracers
1114#
1115# CONFIG_SCHED_TRACER is not set
1116# CONFIG_CONTEXT_SWITCH_TRACER is not set
1117# CONFIG_BOOT_TRACER is not set
1065# CONFIG_DYNAMIC_PRINTK_DEBUG is not set 1118# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
1066# CONFIG_SAMPLES is not set 1119# CONFIG_SAMPLES is not set
1067CONFIG_HAVE_ARCH_KGDB=y 1120CONFIG_HAVE_ARCH_KGDB=y
1068# CONFIG_KGDB is not set 1121# CONFIG_KGDB is not set
1069# CONFIG_DEBUG_STACKOVERFLOW is not set 1122# CONFIG_DEBUG_STACKOVERFLOW is not set
1070# CONFIG_DEBUG_STACK_USAGE is not set 1123# CONFIG_DEBUG_STACK_USAGE is not set
1124# CONFIG_KGDB_TESTCASE is not set
1071CONFIG_DEBUG_VERBOSE=y 1125CONFIG_DEBUG_VERBOSE=y
1072CONFIG_DEBUG_MMRS=y 1126CONFIG_DEBUG_MMRS=y
1073# CONFIG_DEBUG_HWERR is not set 1127# CONFIG_DEBUG_HWERR is not set
@@ -1100,6 +1154,7 @@ CONFIG_CRYPTO=y
1100# 1154#
1101# CONFIG_CRYPTO_FIPS is not set 1155# CONFIG_CRYPTO_FIPS is not set
1102# CONFIG_CRYPTO_MANAGER is not set 1156# CONFIG_CRYPTO_MANAGER is not set
1157# CONFIG_CRYPTO_MANAGER2 is not set
1103# CONFIG_CRYPTO_GF128MUL is not set 1158# CONFIG_CRYPTO_GF128MUL is not set
1104# CONFIG_CRYPTO_NULL is not set 1159# CONFIG_CRYPTO_NULL is not set
1105# CONFIG_CRYPTO_CRYPTD is not set 1160# CONFIG_CRYPTO_CRYPTD is not set
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index 833128b39724..a50050f17706 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -327,8 +327,8 @@ CONFIG_BFIN_ICACHE=y
327CONFIG_BFIN_DCACHE=y 327CONFIG_BFIN_DCACHE=y
328# CONFIG_BFIN_DCACHE_BANKA is not set 328# CONFIG_BFIN_DCACHE_BANKA is not set
329# CONFIG_BFIN_ICACHE_LOCK is not set 329# CONFIG_BFIN_ICACHE_LOCK is not set
330# CONFIG_BFIN_WB is not set 330CONFIG_BFIN_WB=y
331CONFIG_BFIN_WT=y 331# CONFIG_BFIN_WT is not set
332# CONFIG_MPU is not set 332# CONFIG_MPU is not set
333 333
334# 334#
diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
index 334c94b51c40..0a2a00d63887 100644
--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
@@ -290,8 +290,8 @@ CONFIG_BFIN_ICACHE=y
290CONFIG_BFIN_DCACHE=y 290CONFIG_BFIN_DCACHE=y
291# CONFIG_BFIN_DCACHE_BANKA is not set 291# CONFIG_BFIN_DCACHE_BANKA is not set
292# CONFIG_BFIN_ICACHE_LOCK is not set 292# CONFIG_BFIN_ICACHE_LOCK is not set
293# CONFIG_BFIN_WB is not set 293CONFIG_BFIN_WB=y
294CONFIG_BFIN_WT=y 294# CONFIG_BFIN_WT is not set
295# CONFIG_MPU is not set 295# CONFIG_MPU is not set
296 296
297# 297#
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index 9d733436e300..eb027587a355 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -290,8 +290,8 @@ CONFIG_BFIN_ICACHE=y
290CONFIG_BFIN_DCACHE=y 290CONFIG_BFIN_DCACHE=y
291# CONFIG_BFIN_DCACHE_BANKA is not set 291# CONFIG_BFIN_DCACHE_BANKA is not set
292# CONFIG_BFIN_ICACHE_LOCK is not set 292# CONFIG_BFIN_ICACHE_LOCK is not set
293# CONFIG_BFIN_WB is not set 293CONFIG_BFIN_WB=y
294CONFIG_BFIN_WT=y 294# CONFIG_BFIN_WT is not set
295# CONFIG_MPU is not set 295# CONFIG_MPU is not set
296 296
297# 297#
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index 4fb4108d3103..9e62b9f40eb1 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -298,8 +298,8 @@ CONFIG_BFIN_ICACHE=y
298CONFIG_BFIN_DCACHE=y 298CONFIG_BFIN_DCACHE=y
299# CONFIG_BFIN_DCACHE_BANKA is not set 299# CONFIG_BFIN_DCACHE_BANKA is not set
300# CONFIG_BFIN_ICACHE_LOCK is not set 300# CONFIG_BFIN_ICACHE_LOCK is not set
301# CONFIG_BFIN_WB is not set 301CONFIG_BFIN_WB=y
302CONFIG_BFIN_WT=y 302# CONFIG_BFIN_WT is not set
303# CONFIG_MPU is not set 303# CONFIG_MPU is not set
304 304
305# 305#
@@ -568,15 +568,7 @@ CONFIG_MTD_PHYSMAP_BANKWIDTH=2
568# CONFIG_MTD_DOC2000 is not set 568# CONFIG_MTD_DOC2000 is not set
569# CONFIG_MTD_DOC2001 is not set 569# CONFIG_MTD_DOC2001 is not set
570# CONFIG_MTD_DOC2001PLUS is not set 570# CONFIG_MTD_DOC2001PLUS is not set
571CONFIG_MTD_NAND=m 571# CONFIG_MTD_NAND is not set
572# CONFIG_MTD_NAND_VERIFY_WRITE is not set
573# CONFIG_MTD_NAND_ECC_SMC is not set
574# CONFIG_MTD_NAND_MUSEUM_IDS is not set
575# CONFIG_MTD_NAND_BFIN is not set
576CONFIG_MTD_NAND_IDS=m
577# CONFIG_MTD_NAND_DISKONCHIP is not set
578# CONFIG_MTD_NAND_NANDSIM is not set
579CONFIG_MTD_NAND_PLATFORM=m
580# CONFIG_MTD_ONENAND is not set 572# CONFIG_MTD_ONENAND is not set
581 573
582# 574#
diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
index cb32f5624a1b..dd6ad6be1c87 100644
--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
@@ -306,8 +306,8 @@ CONFIG_BFIN_ICACHE=y
306CONFIG_BFIN_DCACHE=y 306CONFIG_BFIN_DCACHE=y
307# CONFIG_BFIN_DCACHE_BANKA is not set 307# CONFIG_BFIN_DCACHE_BANKA is not set
308# CONFIG_BFIN_ICACHE_LOCK is not set 308# CONFIG_BFIN_ICACHE_LOCK is not set
309# CONFIG_BFIN_WB is not set 309CONFIG_BFIN_WB=y
310CONFIG_BFIN_WT=y 310# CONFIG_BFIN_WT is not set
311# CONFIG_MPU is not set 311# CONFIG_MPU is not set
312 312
313# 313#
diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
index 0f8697618aa5..6bc2fb1b2a70 100644
--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
@@ -361,8 +361,8 @@ CONFIG_BFIN_ICACHE=y
361CONFIG_BFIN_DCACHE=y 361CONFIG_BFIN_DCACHE=y
362# CONFIG_BFIN_DCACHE_BANKA is not set 362# CONFIG_BFIN_DCACHE_BANKA is not set
363# CONFIG_BFIN_ICACHE_LOCK is not set 363# CONFIG_BFIN_ICACHE_LOCK is not set
364# CONFIG_BFIN_WB is not set 364CONFIG_BFIN_WB=y
365CONFIG_BFIN_WT=y 365# CONFIG_BFIN_WT is not set
366# CONFIG_BFIN_L2_CACHEABLE is not set 366# CONFIG_BFIN_L2_CACHEABLE is not set
367# CONFIG_MPU is not set 367# CONFIG_MPU is not set
368 368
@@ -680,7 +680,7 @@ CONFIG_SCSI=y
680CONFIG_SCSI_DMA=y 680CONFIG_SCSI_DMA=y
681# CONFIG_SCSI_TGT is not set 681# CONFIG_SCSI_TGT is not set
682# CONFIG_SCSI_NETLINK is not set 682# CONFIG_SCSI_NETLINK is not set
683CONFIG_SCSI_PROC_FS=y 683# CONFIG_SCSI_PROC_FS is not set
684 684
685# 685#
686# SCSI support type (disk, tape, CD-ROM) 686# SCSI support type (disk, tape, CD-ROM)
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index 042c7adfccfa..69714fb3e608 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -329,8 +329,8 @@ CONFIG_BFIN_ICACHE=y
329CONFIG_BFIN_DCACHE=y 329CONFIG_BFIN_DCACHE=y
330# CONFIG_BFIN_DCACHE_BANKA is not set 330# CONFIG_BFIN_DCACHE_BANKA is not set
331# CONFIG_BFIN_ICACHE_LOCK is not set 331# CONFIG_BFIN_ICACHE_LOCK is not set
332# CONFIG_BFIN_WB is not set 332CONFIG_BFIN_WB=y
333CONFIG_BFIN_WT=y 333# CONFIG_BFIN_WT is not set
334# CONFIG_BFIN_L2_CACHEABLE is not set 334# CONFIG_BFIN_L2_CACHEABLE is not set
335# CONFIG_MPU is not set 335# CONFIG_MPU is not set
336 336
diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
index 3a20e281d23c..017c6ea071b5 100644
--- a/arch/blackfin/configs/BlackStamp_defconfig
+++ b/arch/blackfin/configs/BlackStamp_defconfig
@@ -288,8 +288,8 @@ CONFIG_BFIN_ICACHE=y
288CONFIG_BFIN_DCACHE=y 288CONFIG_BFIN_DCACHE=y
289# CONFIG_BFIN_DCACHE_BANKA is not set 289# CONFIG_BFIN_DCACHE_BANKA is not set
290# CONFIG_BFIN_ICACHE_LOCK is not set 290# CONFIG_BFIN_ICACHE_LOCK is not set
291# CONFIG_BFIN_WB is not set 291CONFIG_BFIN_WB=y
292CONFIG_BFIN_WT=y 292# CONFIG_BFIN_WT is not set
293# CONFIG_MPU is not set 293# CONFIG_MPU is not set
294 294
295# 295#
diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
index 865ed85a5760..d880ef786770 100644
--- a/arch/blackfin/configs/CM-BF527_defconfig
+++ b/arch/blackfin/configs/CM-BF527_defconfig
@@ -332,8 +332,8 @@ CONFIG_BFIN_ICACHE=y
332CONFIG_BFIN_DCACHE=y 332CONFIG_BFIN_DCACHE=y
333# CONFIG_BFIN_DCACHE_BANKA is not set 333# CONFIG_BFIN_DCACHE_BANKA is not set
334# CONFIG_BFIN_ICACHE_LOCK is not set 334# CONFIG_BFIN_ICACHE_LOCK is not set
335# CONFIG_BFIN_WB is not set 335CONFIG_BFIN_WB=y
336CONFIG_BFIN_WT=y 336# CONFIG_BFIN_WT is not set
337# CONFIG_MPU is not set 337# CONFIG_MPU is not set
338 338
339# 339#
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index efe9741b1f14..f410430b4e3d 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -336,8 +336,8 @@ CONFIG_BFIN_ICACHE=y
336CONFIG_BFIN_DCACHE=y 336CONFIG_BFIN_DCACHE=y
337# CONFIG_BFIN_DCACHE_BANKA is not set 337# CONFIG_BFIN_DCACHE_BANKA is not set
338# CONFIG_BFIN_ICACHE_LOCK is not set 338# CONFIG_BFIN_ICACHE_LOCK is not set
339# CONFIG_BFIN_WB is not set 339CONFIG_BFIN_WB=y
340CONFIG_BFIN_WT=y 340# CONFIG_BFIN_WT is not set
341CONFIG_L1_MAX_PIECE=16 341CONFIG_L1_MAX_PIECE=16
342# CONFIG_MPU is not set 342# CONFIG_MPU is not set
343 343
@@ -595,7 +595,7 @@ CONFIG_SCSI=y
595CONFIG_SCSI_DMA=y 595CONFIG_SCSI_DMA=y
596# CONFIG_SCSI_TGT is not set 596# CONFIG_SCSI_TGT is not set
597# CONFIG_SCSI_NETLINK is not set 597# CONFIG_SCSI_NETLINK is not set
598CONFIG_SCSI_PROC_FS=y 598# CONFIG_SCSI_PROC_FS is not set
599 599
600# 600#
601# SCSI support type (disk, tape, CD-ROM) 601# SCSI support type (disk, tape, CD-ROM)
diff --git a/arch/blackfin/configs/IP0X_defconfig b/arch/blackfin/configs/IP0X_defconfig
index eae83b5de92f..7db93874c987 100644
--- a/arch/blackfin/configs/IP0X_defconfig
+++ b/arch/blackfin/configs/IP0X_defconfig
@@ -612,7 +612,7 @@ CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
612CONFIG_SCSI=y 612CONFIG_SCSI=y
613# CONFIG_SCSI_TGT is not set 613# CONFIG_SCSI_TGT is not set
614# CONFIG_SCSI_NETLINK is not set 614# CONFIG_SCSI_NETLINK is not set
615CONFIG_SCSI_PROC_FS=y 615# CONFIG_SCSI_PROC_FS is not set
616 616
617# 617#
618# SCSI support type (disk, tape, CD-ROM) 618# SCSI support type (disk, tape, CD-ROM)
diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
index fa580affc9d6..a46529c6ade3 100644
--- a/arch/blackfin/configs/SRV1_defconfig
+++ b/arch/blackfin/configs/SRV1_defconfig
@@ -282,8 +282,8 @@ CONFIG_BFIN_ICACHE=y
282CONFIG_BFIN_DCACHE=y 282CONFIG_BFIN_DCACHE=y
283# CONFIG_BFIN_DCACHE_BANKA is not set 283# CONFIG_BFIN_DCACHE_BANKA is not set
284# CONFIG_BFIN_ICACHE_LOCK is not set 284# CONFIG_BFIN_ICACHE_LOCK is not set
285# CONFIG_BFIN_WB is not set 285CONFIG_BFIN_WB=y
286CONFIG_BFIN_WT=y 286# CONFIG_BFIN_WT is not set
287CONFIG_L1_MAX_PIECE=16 287CONFIG_L1_MAX_PIECE=16
288 288
289# 289#
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 606ecfdcc962..09c31418cc08 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -1,3 +1,4 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3unifdef-y += bfin_sport.h
3unifdef-y += fixed_code.h 4unifdef-y += fixed_code.h
diff --git a/arch/blackfin/include/asm/bfin_sport.h b/arch/blackfin/include/asm/bfin_sport.h
index fe88a2c19213..65a651db5b07 100644
--- a/arch/blackfin/include/asm/bfin_sport.h
+++ b/arch/blackfin/include/asm/bfin_sport.h
@@ -1,30 +1,9 @@
1/* 1/*
2 * File: include/asm-blackfin/bfin_sport.h 2 * bfin_sport.h - userspace header for bfin sport driver
3 * Based on:
4 * Author: Roy Huang (roy.huang@analog.com)
5 * 3 *
6 * Created: Thu Aug. 24 2006 4 * Copyright 2004-2008 Analog Devices Inc.
7 * Description:
8 * 5 *
9 * Modified: 6 * Licensed under the GPL-2 or later.
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */ 7 */
29 8
30#ifndef __BFIN_SPORT_H__ 9#ifndef __BFIN_SPORT_H__
@@ -42,11 +21,10 @@
42#define NORM_FORMAT 0x0 21#define NORM_FORMAT 0x0
43#define ALAW_FORMAT 0x2 22#define ALAW_FORMAT 0x2
44#define ULAW_FORMAT 0x3 23#define ULAW_FORMAT 0x3
45struct sport_register;
46 24
47/* Function driver which use sport must initialize the structure */ 25/* Function driver which use sport must initialize the structure */
48struct sport_config { 26struct sport_config {
49 /*TDM (multichannels), I2S or other mode */ 27 /* TDM (multichannels), I2S or other mode */
50 unsigned int mode:3; 28 unsigned int mode:3;
51 29
52 /* if TDM mode is selected, channels must be set */ 30 /* if TDM mode is selected, channels must be set */
@@ -72,12 +50,18 @@ struct sport_config {
72 int serial_clk; 50 int serial_clk;
73 int fsync_clk; 51 int fsync_clk;
74 52
75 unsigned int data_format:2; /*Normal, u-law or a-law */ 53 unsigned int data_format:2; /* Normal, u-law or a-law */
76 54
77 int word_len; /* How length of the word in bits, 3-32 bits */ 55 int word_len; /* How length of the word in bits, 3-32 bits */
78 int dma_enabled; 56 int dma_enabled;
79}; 57};
80 58
59/* Userspace interface */
60#define SPORT_IOC_MAGIC 'P'
61#define SPORT_IOC_CONFIG _IOWR('P', 0x01, struct sport_config)
62
63#ifdef __KERNEL__
64
81struct sport_register { 65struct sport_register {
82 unsigned short tcr1; 66 unsigned short tcr1;
83 unsigned short reserved0; 67 unsigned short reserved0;
@@ -117,9 +101,6 @@ struct sport_register {
117 unsigned long mrcs3; 101 unsigned long mrcs3;
118}; 102};
119 103
120#define SPORT_IOC_MAGIC 'P'
121#define SPORT_IOC_CONFIG _IOWR('P', 0x01, struct sport_config)
122
123struct sport_dev { 104struct sport_dev {
124 struct cdev cdev; /* Char device structure */ 105 struct cdev cdev; /* Char device structure */
125 106
@@ -149,6 +130,8 @@ struct sport_dev {
149 struct sport_config config; 130 struct sport_config config;
150}; 131};
151 132
133#endif
134
152#define SPORT_TCR1 0 135#define SPORT_TCR1 0
153#define SPORT_TCR2 1 136#define SPORT_TCR2 1
154#define SPORT_TCLKDIV 2 137#define SPORT_TCLKDIV 2
@@ -169,4 +152,4 @@ struct sport_dev {
169#define SPORT_MRCS2 22 152#define SPORT_MRCS2 22
170#define SPORT_MRCS3 23 153#define SPORT_MRCS3 23
171 154
172#endif /*__BFIN_SPORT_H__*/ 155#endif
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h
index 76f53d8b9a0d..343b56361ec9 100644
--- a/arch/blackfin/include/asm/ipipe.h
+++ b/arch/blackfin/include/asm/ipipe.h
@@ -35,9 +35,9 @@
35#include <asm/atomic.h> 35#include <asm/atomic.h>
36#include <asm/traps.h> 36#include <asm/traps.h>
37 37
38#define IPIPE_ARCH_STRING "1.8-00" 38#define IPIPE_ARCH_STRING "1.9-00"
39#define IPIPE_MAJOR_NUMBER 1 39#define IPIPE_MAJOR_NUMBER 1
40#define IPIPE_MINOR_NUMBER 8 40#define IPIPE_MINOR_NUMBER 9
41#define IPIPE_PATCH_NUMBER 0 41#define IPIPE_PATCH_NUMBER 0
42 42
43#ifdef CONFIG_SMP 43#ifdef CONFIG_SMP
@@ -83,9 +83,9 @@ struct ipipe_sysinfo {
83 "%2 = CYCLES2\n" \ 83 "%2 = CYCLES2\n" \
84 "CC = %2 == %0\n" \ 84 "CC = %2 == %0\n" \
85 "if ! CC jump 1b\n" \ 85 "if ! CC jump 1b\n" \
86 : "=r" (((unsigned long *)&t)[1]), \ 86 : "=d,a" (((unsigned long *)&t)[1]), \
87 "=r" (((unsigned long *)&t)[0]), \ 87 "=d,a" (((unsigned long *)&t)[0]), \
88 "=r" (__cy2) \ 88 "=d,a" (__cy2) \
89 : /*no input*/ : "CC"); \ 89 : /*no input*/ : "CC"); \
90 t; \ 90 t; \
91 }) 91 })
@@ -118,35 +118,40 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd,
118 118
119#define __ipipe_disable_irq(irq) (irq_desc[irq].chip->mask(irq)) 119#define __ipipe_disable_irq(irq) (irq_desc[irq].chip->mask(irq))
120 120
121#define __ipipe_lock_root() \ 121static inline int __ipipe_check_tickdev(const char *devname)
122 set_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags) 122{
123 return 1;
124}
123 125
124#define __ipipe_unlock_root() \ 126static inline void __ipipe_lock_root(void)
125 clear_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags) 127{
128 set_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status));
129}
130
131static inline void __ipipe_unlock_root(void)
132{
133 clear_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status));
134}
126 135
127void __ipipe_enable_pipeline(void); 136void __ipipe_enable_pipeline(void);
128 137
129#define __ipipe_hook_critical_ipi(ipd) do { } while (0) 138#define __ipipe_hook_critical_ipi(ipd) do { } while (0)
130 139
131#define __ipipe_sync_pipeline(syncmask) \ 140#define __ipipe_sync_pipeline ___ipipe_sync_pipeline
132 do { \ 141void ___ipipe_sync_pipeline(unsigned long syncmask);
133 struct ipipe_domain *ipd = ipipe_current_domain; \
134 if (likely(ipd != ipipe_root_domain || !test_bit(IPIPE_ROOTLOCK_FLAG, &ipd->flags))) \
135 __ipipe_sync_stage(syncmask); \
136 } while (0)
137 142
138void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs); 143void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs);
139 144
140int __ipipe_get_irq_priority(unsigned irq); 145int __ipipe_get_irq_priority(unsigned irq);
141 146
142int __ipipe_get_irqthread_priority(unsigned irq);
143
144void __ipipe_stall_root_raw(void); 147void __ipipe_stall_root_raw(void);
145 148
146void __ipipe_unstall_root_raw(void); 149void __ipipe_unstall_root_raw(void);
147 150
148void __ipipe_serial_debug(const char *fmt, ...); 151void __ipipe_serial_debug(const char *fmt, ...);
149 152
153asmlinkage void __ipipe_call_irqtail(unsigned long addr);
154
150DECLARE_PER_CPU(struct pt_regs, __ipipe_tick_regs); 155DECLARE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
151 156
152extern unsigned long __ipipe_core_clock; 157extern unsigned long __ipipe_core_clock;
@@ -162,42 +167,25 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
162 167
163#define __ipipe_run_irqtail() /* Must be a macro */ \ 168#define __ipipe_run_irqtail() /* Must be a macro */ \
164 do { \ 169 do { \
165 asmlinkage void __ipipe_call_irqtail(void); \
166 unsigned long __pending; \ 170 unsigned long __pending; \
167 CSYNC(); \ 171 CSYNC(); \
168 __pending = bfin_read_IPEND(); \ 172 __pending = bfin_read_IPEND(); \
169 if (__pending & 0x8000) { \ 173 if (__pending & 0x8000) { \
170 __pending &= ~0x8010; \ 174 __pending &= ~0x8010; \
171 if (__pending && (__pending & (__pending - 1)) == 0) \ 175 if (__pending && (__pending & (__pending - 1)) == 0) \
172 __ipipe_call_irqtail(); \ 176 __ipipe_call_irqtail(__ipipe_irq_tail_hook); \
173 } \ 177 } \
174 } while (0) 178 } while (0)
175 179
176#define __ipipe_run_isr(ipd, irq) \ 180#define __ipipe_run_isr(ipd, irq) \
177 do { \ 181 do { \
178 if (ipd == ipipe_root_domain) { \ 182 if (ipd == ipipe_root_domain) { \
179 /* \ 183 local_irq_enable_hw(); \
180 * Note: the I-pipe implements a threaded interrupt model on \ 184 if (ipipe_virtual_irq_p(irq)) \
181 * this arch for Linux external IRQs. The interrupt handler we \
182 * call here only wakes up the associated IRQ thread. \
183 */ \
184 if (ipipe_virtual_irq_p(irq)) { \
185 /* No irqtail here; virtual interrupts have no effect \
186 on IPEND so there is no need for processing \
187 deferral. */ \
188 local_irq_enable_nohead(ipd); \
189 ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \ 185 ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
190 local_irq_disable_nohead(ipd); \ 186 else \
191 } else \
192 /* \
193 * No need to run the irqtail here either; \
194 * we can't be preempted by hw IRQs, so \
195 * non-Linux IRQs cannot stack over the short \
196 * thread wakeup code. Which in turn means \
197 * that no irqtail condition could be pending \
198 * for domains above Linux in the pipeline. \
199 */ \
200 ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \ 187 ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \
188 local_irq_disable_hw(); \
201 } else { \ 189 } else { \
202 __clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ 190 __clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
203 local_irq_enable_nohead(ipd); \ 191 local_irq_enable_nohead(ipd); \
@@ -217,42 +205,24 @@ void ipipe_init_irq_threads(void);
217 205
218int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc); 206int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
219 207
220#define IS_SYSIRQ(irq) ((irq) > IRQ_CORETMR && (irq) <= SYS_IRQS) 208#ifdef CONFIG_GENERIC_CLOCKEVENTS
221#define IS_GPIOIRQ(irq) ((irq) >= GPIO_IRQ_BASE && (irq) < NR_IRQS) 209#define IRQ_SYSTMR IRQ_CORETMR
222 210#define IRQ_PRIOTMR IRQ_CORETMR
211#else
223#define IRQ_SYSTMR IRQ_TIMER0 212#define IRQ_SYSTMR IRQ_TIMER0
224#define IRQ_PRIOTMR CONFIG_IRQ_TIMER0 213#define IRQ_PRIOTMR CONFIG_IRQ_TIMER0
214#endif
225 215
226#if defined(CONFIG_BF531) || defined(CONFIG_BF532) || defined(CONFIG_BF533) 216#ifdef CONFIG_BF561
227#define PRIO_GPIODEMUX(irq) CONFIG_PFA
228#elif defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537)
229#define PRIO_GPIODEMUX(irq) CONFIG_IRQ_PROG_INTA
230#elif defined(CONFIG_BF52x)
231#define PRIO_GPIODEMUX(irq) ((irq) == IRQ_PORTF_INTA ? CONFIG_IRQ_PORTF_INTA : \
232 (irq) == IRQ_PORTG_INTA ? CONFIG_IRQ_PORTG_INTA : \
233 (irq) == IRQ_PORTH_INTA ? CONFIG_IRQ_PORTH_INTA : \
234 -1)
235#elif defined(CONFIG_BF561)
236#define PRIO_GPIODEMUX(irq) ((irq) == IRQ_PROG0_INTA ? CONFIG_IRQ_PROG0_INTA : \
237 (irq) == IRQ_PROG1_INTA ? CONFIG_IRQ_PROG1_INTA : \
238 (irq) == IRQ_PROG2_INTA ? CONFIG_IRQ_PROG2_INTA : \
239 -1)
240#define bfin_write_TIMER_DISABLE(val) bfin_write_TMRS8_DISABLE(val) 217#define bfin_write_TIMER_DISABLE(val) bfin_write_TMRS8_DISABLE(val)
241#define bfin_write_TIMER_ENABLE(val) bfin_write_TMRS8_ENABLE(val) 218#define bfin_write_TIMER_ENABLE(val) bfin_write_TMRS8_ENABLE(val)
242#define bfin_write_TIMER_STATUS(val) bfin_write_TMRS8_STATUS(val) 219#define bfin_write_TIMER_STATUS(val) bfin_write_TMRS8_STATUS(val)
243#define bfin_read_TIMER_STATUS() bfin_read_TMRS8_STATUS() 220#define bfin_read_TIMER_STATUS() bfin_read_TMRS8_STATUS()
244#elif defined(CONFIG_BF54x) 221#elif defined(CONFIG_BF54x)
245#define PRIO_GPIODEMUX(irq) ((irq) == IRQ_PINT0 ? CONFIG_IRQ_PINT0 : \
246 (irq) == IRQ_PINT1 ? CONFIG_IRQ_PINT1 : \
247 (irq) == IRQ_PINT2 ? CONFIG_IRQ_PINT2 : \
248 (irq) == IRQ_PINT3 ? CONFIG_IRQ_PINT3 : \
249 -1)
250#define bfin_write_TIMER_DISABLE(val) bfin_write_TIMER_DISABLE0(val) 222#define bfin_write_TIMER_DISABLE(val) bfin_write_TIMER_DISABLE0(val)
251#define bfin_write_TIMER_ENABLE(val) bfin_write_TIMER_ENABLE0(val) 223#define bfin_write_TIMER_ENABLE(val) bfin_write_TIMER_ENABLE0(val)
252#define bfin_write_TIMER_STATUS(val) bfin_write_TIMER_STATUS0(val) 224#define bfin_write_TIMER_STATUS(val) bfin_write_TIMER_STATUS0(val)
253#define bfin_read_TIMER_STATUS(val) bfin_read_TIMER_STATUS0(val) 225#define bfin_read_TIMER_STATUS(val) bfin_read_TIMER_STATUS0(val)
254#else
255# error "no PRIO_GPIODEMUX() for this part"
256#endif 226#endif
257 227
258#define __ipipe_root_tick_p(regs) ((regs->ipend & 0x10) != 0) 228#define __ipipe_root_tick_p(regs) ((regs->ipend & 0x10) != 0)
@@ -275,4 +245,6 @@ int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
275 245
276#endif /* !CONFIG_IPIPE */ 246#endif /* !CONFIG_IPIPE */
277 247
248#define ipipe_update_tick_evtdev(evtdev) do { } while (0)
249
278#endif /* !__ASM_BLACKFIN_IPIPE_H */ 250#endif /* !__ASM_BLACKFIN_IPIPE_H */
diff --git a/arch/blackfin/include/asm/ipipe_base.h b/arch/blackfin/include/asm/ipipe_base.h
index cb1025aeabcf..3e8acbd1a3be 100644
--- a/arch/blackfin/include/asm/ipipe_base.h
+++ b/arch/blackfin/include/asm/ipipe_base.h
@@ -1,5 +1,5 @@
1/* -*- linux-c -*- 1/* -*- linux-c -*-
2 * include/asm-blackfin/_baseipipe.h 2 * include/asm-blackfin/ipipe_base.h
3 * 3 *
4 * Copyright (C) 2007 Philippe Gerum. 4 * Copyright (C) 2007 Philippe Gerum.
5 * 5 *
@@ -27,8 +27,9 @@
27#define IPIPE_NR_XIRQS NR_IRQS 27#define IPIPE_NR_XIRQS NR_IRQS
28#define IPIPE_IRQ_ISHIFT 5 /* 2^5 for 32bits arch. */ 28#define IPIPE_IRQ_ISHIFT 5 /* 2^5 for 32bits arch. */
29 29
30/* Blackfin-specific, global domain flags */ 30/* Blackfin-specific, per-cpu pipeline status */
31#define IPIPE_ROOTLOCK_FLAG 1 /* Lock pipeline for root */ 31#define IPIPE_SYNCDEFER_FLAG 15
32#define IPIPE_SYNCDEFER_MASK (1L << IPIPE_SYNCDEFER_MASK)
32 33
33 /* Blackfin traps -- i.e. exception vector numbers */ 34 /* Blackfin traps -- i.e. exception vector numbers */
34#define IPIPE_NR_FAULTS 52 /* We leave a gap after VEC_ILL_RES. */ 35#define IPIPE_NR_FAULTS 52 /* We leave a gap after VEC_ILL_RES. */
@@ -48,11 +49,6 @@
48 49
49#ifndef __ASSEMBLY__ 50#ifndef __ASSEMBLY__
50 51
51#include <linux/bitops.h>
52
53extern int test_bit(int nr, const void *addr);
54
55
56extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */ 52extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */
57 53
58static inline void __ipipe_stall_root(void) 54static inline void __ipipe_stall_root(void)
diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h
index 3d977909ce7d..7645e85a5f6f 100644
--- a/arch/blackfin/include/asm/irq.h
+++ b/arch/blackfin/include/asm/irq.h
@@ -61,20 +61,38 @@ void __ipipe_restore_root(unsigned long flags);
61#define raw_irqs_disabled_flags(flags) (!irqs_enabled_from_flags_hw(flags)) 61#define raw_irqs_disabled_flags(flags) (!irqs_enabled_from_flags_hw(flags))
62#define local_test_iflag_hw(x) irqs_enabled_from_flags_hw(x) 62#define local_test_iflag_hw(x) irqs_enabled_from_flags_hw(x)
63 63
64#define local_save_flags(x) \ 64#define local_save_flags(x) \
65 do { \ 65 do { \
66 (x) = __ipipe_test_root() ? \ 66 (x) = __ipipe_test_root() ? \
67 __all_masked_irq_flags : bfin_irq_flags; \ 67 __all_masked_irq_flags : bfin_irq_flags; \
68 barrier(); \
68 } while (0) 69 } while (0)
69 70
70#define local_irq_save(x) \ 71#define local_irq_save(x) \
71 do { \ 72 do { \
72 (x) = __ipipe_test_and_stall_root(); \ 73 (x) = __ipipe_test_and_stall_root() ? \
74 __all_masked_irq_flags : bfin_irq_flags; \
75 barrier(); \
76 } while (0)
77
78static inline void local_irq_restore(unsigned long x)
79{
80 barrier();
81 __ipipe_restore_root(x == __all_masked_irq_flags);
82}
83
84#define local_irq_disable() \
85 do { \
86 __ipipe_stall_root(); \
87 barrier(); \
73 } while (0) 88 } while (0)
74 89
75#define local_irq_restore(x) __ipipe_restore_root(x) 90static inline void local_irq_enable(void)
76#define local_irq_disable() __ipipe_stall_root() 91{
77#define local_irq_enable() __ipipe_unstall_root() 92 barrier();
93 __ipipe_unstall_root();
94}
95
78#define irqs_disabled() __ipipe_test_root() 96#define irqs_disabled() __ipipe_test_root()
79 97
80#define local_save_flags_hw(x) \ 98#define local_save_flags_hw(x) \
diff --git a/arch/blackfin/include/asm/percpu.h b/arch/blackfin/include/asm/percpu.h
index 797c0c165069..c94c7bc88c71 100644
--- a/arch/blackfin/include/asm/percpu.h
+++ b/arch/blackfin/include/asm/percpu.h
@@ -3,14 +3,4 @@
3 3
4#include <asm-generic/percpu.h> 4#include <asm-generic/percpu.h>
5 5
6#ifdef CONFIG_MODULES
7#define PERCPU_MODULE_RESERVE 8192
8#else
9#define PERCPU_MODULE_RESERVE 0
10#endif
11
12#define PERCPU_ENOUGH_ROOM \
13 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
14 PERCPU_MODULE_RESERVE)
15
16#endif /* __ARCH_BLACKFIN_PERCPU__ */ 6#endif /* __ARCH_BLACKFIN_PERCPU__ */
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index e721ce55956c..2920087516f2 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -122,6 +122,7 @@ static inline struct thread_info *current_thread_info(void)
122#define TIF_MEMDIE 4 122#define TIF_MEMDIE 4
123#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ 123#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
124#define TIF_FREEZE 6 /* is freezing for suspend */ 124#define TIF_FREEZE 6 /* is freezing for suspend */
125#define TIF_IRQ_SYNC 7 /* sync pipeline stage */
125 126
126/* as above, but as bit values */ 127/* as above, but as bit values */
127#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 128#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -130,6 +131,7 @@ static inline struct thread_info *current_thread_info(void)
130#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 131#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
131#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 132#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
132#define _TIF_FREEZE (1<<TIF_FREEZE) 133#define _TIF_FREEZE (1<<TIF_FREEZE)
134#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC)
133 135
134#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 136#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
135 137
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index 4a92a86824b7..fd4d4328a0f2 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -15,13 +15,15 @@ else
15 obj-y += time.o 15 obj-y += time.o
16endif 16endif
17 17
18CFLAGS_kgdb_test.o := -mlong-calls -O0
19
20obj-$(CONFIG_IPIPE) += ipipe.o 18obj-$(CONFIG_IPIPE) += ipipe.o
21obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o 19obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o
22obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o 20obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
23obj-$(CONFIG_CPLB_INFO) += cplbinfo.o 21obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
24obj-$(CONFIG_MODULES) += module.o 22obj-$(CONFIG_MODULES) += module.o
25obj-$(CONFIG_KGDB) += kgdb.o 23obj-$(CONFIG_KGDB) += kgdb.o
26obj-$(CONFIG_KGDB_TESTCASE) += kgdb_test.o 24obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o
27obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 25obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
26
27# the kgdb test puts code into L2 and without linker
28# relaxation, we need to force long calls to/from it
29CFLAGS_kgdb_test.o := -mlong-calls -O0
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index 0e28f7595733..d6c067782e63 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -53,9 +53,13 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
53 53
54 i_d = i_i = 0; 54 i_d = i_i = 0;
55 55
56#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
56 /* Set up the zero page. */ 57 /* Set up the zero page. */
57 d_tbl[i_d].addr = 0; 58 d_tbl[i_d].addr = 0;
58 d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB; 59 d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
60 i_tbl[i_i].addr = 0;
61 i_tbl[i_i++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
62#endif
59 63
60 /* Cover kernel memory with 4M pages. */ 64 /* Cover kernel memory with 4M pages. */
61 addr = 0; 65 addr = 0;
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index 339be5a3ae6a..a5de8d45424c 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -35,14 +35,8 @@
35#include <asm/atomic.h> 35#include <asm/atomic.h>
36#include <asm/io.h> 36#include <asm/io.h>
37 37
38static int create_irq_threads;
39
40DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs); 38DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
41 39
42static DEFINE_PER_CPU(unsigned long, pending_irqthread_mask);
43
44static DEFINE_PER_CPU(int [IVG13 + 1], pending_irq_count);
45
46asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs); 40asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
47 41
48static void __ipipe_no_irqtail(void); 42static void __ipipe_no_irqtail(void);
@@ -93,6 +87,7 @@ void __ipipe_enable_pipeline(void)
93 */ 87 */
94void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) 88void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
95{ 89{
90 struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
96 struct ipipe_domain *this_domain, *next_domain; 91 struct ipipe_domain *this_domain, *next_domain;
97 struct list_head *head, *pos; 92 struct list_head *head, *pos;
98 int m_ack, s = -1; 93 int m_ack, s = -1;
@@ -104,7 +99,6 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
104 * interrupt. 99 * interrupt.
105 */ 100 */
106 m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR); 101 m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
107
108 this_domain = ipipe_current_domain; 102 this_domain = ipipe_current_domain;
109 103
110 if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))) 104 if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
@@ -114,49 +108,28 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
114 next_domain = list_entry(head, struct ipipe_domain, p_link); 108 next_domain = list_entry(head, struct ipipe_domain, p_link);
115 if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) { 109 if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
116 if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) 110 if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
117 next_domain->irqs[irq].acknowledge(irq, irq_desc + irq); 111 next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
118 if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)) 112 if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
119 s = __test_and_set_bit(IPIPE_STALL_FLAG, 113 s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
120 &ipipe_root_cpudom_var(status));
121 __ipipe_dispatch_wired(next_domain, irq); 114 __ipipe_dispatch_wired(next_domain, irq);
122 goto finalize; 115 goto out;
123 return;
124 } 116 }
125 } 117 }
126 118
127 /* Ack the interrupt. */ 119 /* Ack the interrupt. */
128 120
129 pos = head; 121 pos = head;
130
131 while (pos != &__ipipe_pipeline) { 122 while (pos != &__ipipe_pipeline) {
132 next_domain = list_entry(pos, struct ipipe_domain, p_link); 123 next_domain = list_entry(pos, struct ipipe_domain, p_link);
133 /*
134 * For each domain handling the incoming IRQ, mark it
135 * as pending in its log.
136 */
137 if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) { 124 if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
138 /*
139 * Domains that handle this IRQ are polled for
140 * acknowledging it by decreasing priority
141 * order. The interrupt must be made pending
142 * _first_ in the domain's status flags before
143 * the PIC is unlocked.
144 */
145 __ipipe_set_irq_pending(next_domain, irq); 125 __ipipe_set_irq_pending(next_domain, irq);
146
147 if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) { 126 if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
148 next_domain->irqs[irq].acknowledge(irq, irq_desc + irq); 127 next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
149 m_ack = 1; 128 m_ack = 1;
150 } 129 }
151 } 130 }
152
153 /*
154 * If the domain does not want the IRQ to be passed
155 * down the interrupt pipe, exit the loop now.
156 */
157 if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control)) 131 if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
158 break; 132 break;
159
160 pos = next_domain->p_link.next; 133 pos = next_domain->p_link.next;
161 } 134 }
162 135
@@ -166,18 +139,24 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
166 * immediately to the current domain if the interrupt has been 139 * immediately to the current domain if the interrupt has been
167 * marked as 'sticky'. This search does not go beyond the 140 * marked as 'sticky'. This search does not go beyond the
168 * current domain in the pipeline. We also enforce the 141 * current domain in the pipeline. We also enforce the
169 * additional root stage lock (blackfin-specific). */ 142 * additional root stage lock (blackfin-specific).
143 */
144 if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
145 s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
170 146
171 if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)) 147 /*
172 s = __test_and_set_bit(IPIPE_STALL_FLAG, 148 * If the interrupt preempted the head domain, then do not
173 &ipipe_root_cpudom_var(status)); 149 * even try to walk the pipeline, unless an interrupt is
174finalize: 150 * pending for it.
151 */
152 if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
153 ipipe_head_cpudom_var(irqpend_himask) == 0)
154 goto out;
175 155
176 __ipipe_walk_pipeline(head); 156 __ipipe_walk_pipeline(head);
177 157out:
178 if (!s) 158 if (!s)
179 __clear_bit(IPIPE_STALL_FLAG, 159 __clear_bit(IPIPE_STALL_FLAG, &p->status);
180 &ipipe_root_cpudom_var(status));
181} 160}
182 161
183int __ipipe_check_root(void) 162int __ipipe_check_root(void)
@@ -187,7 +166,7 @@ int __ipipe_check_root(void)
187 166
188void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq) 167void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
189{ 168{
190 struct irq_desc *desc = irq_desc + irq; 169 struct irq_desc *desc = irq_to_desc(irq);
191 int prio = desc->ic_prio; 170 int prio = desc->ic_prio;
192 171
193 desc->depth = 0; 172 desc->depth = 0;
@@ -199,7 +178,7 @@ EXPORT_SYMBOL(__ipipe_enable_irqdesc);
199 178
200void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) 179void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
201{ 180{
202 struct irq_desc *desc = irq_desc + irq; 181 struct irq_desc *desc = irq_to_desc(irq);
203 int prio = desc->ic_prio; 182 int prio = desc->ic_prio;
204 183
205 if (ipd != &ipipe_root && 184 if (ipd != &ipipe_root &&
@@ -236,15 +215,18 @@ int __ipipe_syscall_root(struct pt_regs *regs)
236{ 215{
237 unsigned long flags; 216 unsigned long flags;
238 217
239 /* We need to run the IRQ tail hook whenever we don't 218 /*
219 * We need to run the IRQ tail hook whenever we don't
240 * propagate a syscall to higher domains, because we know that 220 * propagate a syscall to higher domains, because we know that
241 * important operations might be pending there (e.g. Xenomai 221 * important operations might be pending there (e.g. Xenomai
242 * deferred rescheduling). */ 222 * deferred rescheduling).
223 */
243 224
244 if (!__ipipe_syscall_watched_p(current, regs->orig_p0)) { 225 if (regs->orig_p0 < NR_syscalls) {
245 void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; 226 void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
246 hook(); 227 hook();
247 return 0; 228 if ((current->flags & PF_EVNOTIFY) == 0)
229 return 0;
248 } 230 }
249 231
250 /* 232 /*
@@ -312,112 +294,46 @@ int ipipe_trigger_irq(unsigned irq)
312{ 294{
313 unsigned long flags; 295 unsigned long flags;
314 296
297#ifdef CONFIG_IPIPE_DEBUG
315 if (irq >= IPIPE_NR_IRQS || 298 if (irq >= IPIPE_NR_IRQS ||
316 (ipipe_virtual_irq_p(irq) 299 (ipipe_virtual_irq_p(irq)
317 && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map))) 300 && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
318 return -EINVAL; 301 return -EINVAL;
302#endif
319 303
320 local_irq_save_hw(flags); 304 local_irq_save_hw(flags);
321
322 __ipipe_handle_irq(irq, NULL); 305 __ipipe_handle_irq(irq, NULL);
323
324 local_irq_restore_hw(flags); 306 local_irq_restore_hw(flags);
325 307
326 return 1; 308 return 1;
327} 309}
328 310
329/* Move Linux IRQ to threads. */ 311asmlinkage void __ipipe_sync_root(void)
330
331static int do_irqd(void *__desc)
332{ 312{
333 struct irq_desc *desc = __desc; 313 unsigned long flags;
334 unsigned irq = desc - irq_desc;
335 int thrprio = desc->thr_prio;
336 int thrmask = 1 << thrprio;
337 int cpu = smp_processor_id();
338 cpumask_t cpumask;
339
340 sigfillset(&current->blocked);
341 current->flags |= PF_NOFREEZE;
342 cpumask = cpumask_of_cpu(cpu);
343 set_cpus_allowed(current, cpumask);
344 ipipe_setscheduler_root(current, SCHED_FIFO, 50 + thrprio);
345
346 while (!kthread_should_stop()) {
347 local_irq_disable();
348 if (!(desc->status & IRQ_SCHEDULED)) {
349 set_current_state(TASK_INTERRUPTIBLE);
350resched:
351 local_irq_enable();
352 schedule();
353 local_irq_disable();
354 }
355 __set_current_state(TASK_RUNNING);
356 /*
357 * If higher priority interrupt servers are ready to
358 * run, reschedule immediately. We need this for the
359 * GPIO demux IRQ handler to unmask the interrupt line
360 * _last_, after all GPIO IRQs have run.
361 */
362 if (per_cpu(pending_irqthread_mask, cpu) & ~(thrmask|(thrmask-1)))
363 goto resched;
364 if (--per_cpu(pending_irq_count[thrprio], cpu) == 0)
365 per_cpu(pending_irqthread_mask, cpu) &= ~thrmask;
366 desc->status &= ~IRQ_SCHEDULED;
367 desc->thr_handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs));
368 local_irq_enable();
369 }
370 __set_current_state(TASK_RUNNING);
371 return 0;
372}
373 314
374static void kick_irqd(unsigned irq, void *cookie) 315 BUG_ON(irqs_disabled());
375{
376 struct irq_desc *desc = irq_desc + irq;
377 int thrprio = desc->thr_prio;
378 int thrmask = 1 << thrprio;
379 int cpu = smp_processor_id();
380
381 if (!(desc->status & IRQ_SCHEDULED)) {
382 desc->status |= IRQ_SCHEDULED;
383 per_cpu(pending_irqthread_mask, cpu) |= thrmask;
384 ++per_cpu(pending_irq_count[thrprio], cpu);
385 wake_up_process(desc->thread);
386 }
387}
388 316
389int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc) 317 local_irq_save_hw(flags);
390{
391 if (desc->thread || !create_irq_threads)
392 return 0;
393
394 desc->thread = kthread_create(do_irqd, desc, "IRQ %d", irq);
395 if (desc->thread == NULL) {
396 printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq);
397 return -ENOMEM;
398 }
399 318
400 wake_up_process(desc->thread); 319 clear_thread_flag(TIF_IRQ_SYNC);
401 320
402 desc->thr_handler = ipipe_root_domain->irqs[irq].handler; 321 if (ipipe_root_cpudom_var(irqpend_himask) != 0)
403 ipipe_root_domain->irqs[irq].handler = &kick_irqd; 322 __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY);
404 323
405 return 0; 324 local_irq_restore_hw(flags);
406} 325}
407 326
408void __init ipipe_init_irq_threads(void) 327void ___ipipe_sync_pipeline(unsigned long syncmask)
409{ 328{
410 unsigned irq; 329 struct ipipe_domain *ipd = ipipe_current_domain;
411 struct irq_desc *desc;
412
413 create_irq_threads = 1;
414 330
415 for (irq = 0; irq < NR_IRQS; irq++) { 331 if (ipd == ipipe_root_domain) {
416 desc = irq_desc + irq; 332 if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
417 if (desc->action != NULL || 333 return;
418 (desc->status & IRQ_NOREQUEST) != 0)
419 ipipe_start_irq_thread(irq, desc);
420 } 334 }
335
336 __ipipe_sync_stage(syncmask);
421} 337}
422 338
423EXPORT_SYMBOL(show_stack); 339EXPORT_SYMBOL(show_stack);
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 23e9aa080710..1ab5b532ec72 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -149,11 +149,15 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
149#endif 149#endif
150 generic_handle_irq(irq); 150 generic_handle_irq(irq);
151 151
152#ifndef CONFIG_IPIPE /* Useless and bugous over the I-pipe: IRQs are threaded. */ 152#ifndef CONFIG_IPIPE
153 /* If we're the only interrupt running (ignoring IRQ15 which is for 153 /*
154 syscalls), lower our priority to IRQ14 so that softirqs run at 154 * If we're the only interrupt running (ignoring IRQ15 which
155 that level. If there's another, lower-level interrupt, irq_exit 155 * is for syscalls), lower our priority to IRQ14 so that
156 will defer softirqs to that. */ 156 * softirqs run at that level. If there's another,
157 * lower-level interrupt, irq_exit will defer softirqs to
158 * that. If the interrupt pipeline is enabled, we are already
159 * running at IRQ14 priority, so we don't need this code.
160 */
157 CSYNC(); 161 CSYNC();
158 pending = bfin_read_IPEND() & ~0x8000; 162 pending = bfin_read_IPEND() & ~0x8000;
159 other_ints = pending & (pending - 1); 163 other_ints = pending & (pending - 1);
diff --git a/arch/blackfin/kernel/kgdb_test.c b/arch/blackfin/kernel/kgdb_test.c
index 3dba9c17304a..dbcf3e45cb0b 100644
--- a/arch/blackfin/kernel/kgdb_test.c
+++ b/arch/blackfin/kernel/kgdb_test.c
@@ -20,6 +20,7 @@
20static char cmdline[256]; 20static char cmdline[256];
21static unsigned long len; 21static unsigned long len;
22 22
23#ifndef CONFIG_SMP
23static int num1 __attribute__((l1_data)); 24static int num1 __attribute__((l1_data));
24 25
25void kgdb_l1_test(void) __attribute__((l1_text)); 26void kgdb_l1_test(void) __attribute__((l1_text));
@@ -32,6 +33,8 @@ void kgdb_l1_test(void)
32 printk(KERN_ALERT "L1(after change) : data variable addr = 0x%p, data value is %d\n", &num1, num1); 33 printk(KERN_ALERT "L1(after change) : data variable addr = 0x%p, data value is %d\n", &num1, num1);
33 return ; 34 return ;
34} 35}
36#endif
37
35#if L2_LENGTH 38#if L2_LENGTH
36 39
37static int num2 __attribute__((l2)); 40static int num2 __attribute__((l2));
@@ -59,10 +62,12 @@ int kgdb_test(char *name, int len, int count, int z)
59static int test_proc_output(char *buf) 62static int test_proc_output(char *buf)
60{ 63{
61 kgdb_test("hello world!", 12, 0x55, 0x10); 64 kgdb_test("hello world!", 12, 0x55, 0x10);
65#ifndef CONFIG_SMP
62 kgdb_l1_test(); 66 kgdb_l1_test();
63 #if L2_LENGTH 67#endif
68#if L2_LENGTH
64 kgdb_l2_test(); 69 kgdb_l2_test();
65 #endif 70#endif
66 71
67 return 0; 72 return 0;
68} 73}
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 594e325b40e4..d76618db50df 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -45,6 +45,7 @@
45#include <asm/asm-offsets.h> 45#include <asm/asm-offsets.h>
46#include <asm/dma.h> 46#include <asm/dma.h>
47#include <asm/fixed_code.h> 47#include <asm/fixed_code.h>
48#include <asm/cacheflush.h>
48#include <asm/mem_map.h> 49#include <asm/mem_map.h>
49 50
50#define TEXT_OFFSET 0 51#define TEXT_OFFSET 0
@@ -240,7 +241,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
240 241
241 } else if (addr >= FIXED_CODE_START 242 } else if (addr >= FIXED_CODE_START
242 && addr + sizeof(tmp) <= FIXED_CODE_END) { 243 && addr + sizeof(tmp) <= FIXED_CODE_END) {
243 memcpy(&tmp, (const void *)(addr), sizeof(tmp)); 244 copy_from_user_page(0, 0, 0, &tmp, (const void *)(addr), sizeof(tmp));
244 copied = sizeof(tmp); 245 copied = sizeof(tmp);
245 246
246 } else 247 } else
@@ -320,7 +321,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
320 321
321 } else if (addr >= FIXED_CODE_START 322 } else if (addr >= FIXED_CODE_START
322 && addr + sizeof(data) <= FIXED_CODE_END) { 323 && addr + sizeof(data) <= FIXED_CODE_END) {
323 memcpy((void *)(addr), &data, sizeof(data)); 324 copy_to_user_page(0, 0, 0, (void *)(addr), &data, sizeof(data));
324 copied = sizeof(data); 325 copied = sizeof(data);
325 326
326 } else 327 } else
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index e5c116230800..a58687bdee6a 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -889,6 +889,10 @@ void __init setup_arch(char **cmdline_p)
889 CPU, bfin_revid()); 889 CPU, bfin_revid());
890 } 890 }
891 891
892 /* We can't run on BF548-0.1 due to ANOMALY 05000448 */
893 if (bfin_cpuid() == 0x27de && bfin_revid() == 1)
894 panic("You can't run on this processor due to 05000448\n");
895
892 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n"); 896 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
893 897
894 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n", 898 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
@@ -1141,12 +1145,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1141 icache_size = 0; 1145 icache_size = 0;
1142 1146
1143 seq_printf(m, "cache size\t: %d KB(L1 icache) " 1147 seq_printf(m, "cache size\t: %d KB(L1 icache) "
1144 "%d KB(L1 dcache-%s) %d KB(L2 cache)\n", 1148 "%d KB(L1 dcache%s) %d KB(L2 cache)\n",
1145 icache_size, dcache_size, 1149 icache_size, dcache_size,
1146#if defined CONFIG_BFIN_WB 1150#if defined CONFIG_BFIN_WB
1147 "wb" 1151 "-wb"
1148#elif defined CONFIG_BFIN_WT 1152#elif defined CONFIG_BFIN_WT
1149 "wt" 1153 "-wt"
1150#endif 1154#endif
1151 "", 0); 1155 "", 0);
1152 1156
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index 172b4c588467..1bbacfbd4c5d 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -134,7 +134,10 @@ irqreturn_t timer_interrupt(int irq, void *dummy)
134 134
135 write_seqlock(&xtime_lock); 135 write_seqlock(&xtime_lock);
136#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE) 136#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
137/* FIXME: Here TIMIL0 is not set when IPIPE enabled, why? */ 137 /*
138 * TIMIL0 is latched in __ipipe_grab_irq() when the I-Pipe is
139 * enabled.
140 */
138 if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) { 141 if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) {
139#endif 142#endif
140 do_timer(1); 143 do_timer(1);
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index 0e175342112e..41f2eacfef20 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -113,7 +113,6 @@ static struct platform_device bfin_mac_device = {
113 .name = "bfin_mac", 113 .name = "bfin_mac",
114 .dev.platform_data = &bfin_mii_bus, 114 .dev.platform_data = &bfin_mii_bus,
115}; 115};
116#endif
117 116
118#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE) 117#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
119static struct dsa_platform_data ksz8893m_switch_data = { 118static struct dsa_platform_data ksz8893m_switch_data = {
@@ -132,6 +131,7 @@ static struct platform_device ksz8893m_switch_device = {
132 .dev.platform_data = &ksz8893m_switch_data, 131 .dev.platform_data = &ksz8893m_switch_data,
133}; 132};
134#endif 133#endif
134#endif
135 135
136#if defined(CONFIG_MTD_M25P80) \ 136#if defined(CONFIG_MTD_M25P80) \
137 || defined(CONFIG_MTD_M25P80_MODULE) 137 || defined(CONFIG_MTD_M25P80_MODULE)
@@ -171,6 +171,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
171}; 171};
172#endif 172#endif
173 173
174#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
174#if defined(CONFIG_NET_DSA_KSZ8893M) \ 175#if defined(CONFIG_NET_DSA_KSZ8893M) \
175 || defined(CONFIG_NET_DSA_KSZ8893M_MODULE) 176 || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
176/* SPI SWITCH CHIP */ 177/* SPI SWITCH CHIP */
@@ -179,10 +180,11 @@ static struct bfin5xx_spi_chip spi_switch_info = {
179 .bits_per_word = 8, 180 .bits_per_word = 8,
180}; 181};
181#endif 182#endif
183#endif
182 184
183#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 185#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
184static struct bfin5xx_spi_chip spi_mmc_chip_info = { 186static struct bfin5xx_spi_chip mmc_spi_chip_info = {
185 .enable_dma = 1, 187 .enable_dma = 0,
186 .bits_per_word = 8, 188 .bits_per_word = 8,
187}; 189};
188#endif 190#endif
@@ -259,6 +261,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
259 }, 261 },
260#endif 262#endif
261 263
264#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
262#if defined(CONFIG_NET_DSA_KSZ8893M) \ 265#if defined(CONFIG_NET_DSA_KSZ8893M) \
263 || defined(CONFIG_NET_DSA_KSZ8893M_MODULE) 266 || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
264 { 267 {
@@ -271,24 +274,15 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
271 .mode = SPI_MODE_3, 274 .mode = SPI_MODE_3,
272 }, 275 },
273#endif 276#endif
277#endif
274 278
275#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 279#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
276 { 280 {
277 .modalias = "spi_mmc_dummy", 281 .modalias = "mmc_spi",
278 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ 282 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
279 .bus_num = 0, 283 .bus_num = 0,
280 .chip_select = 0, 284 .chip_select = 5,
281 .platform_data = NULL, 285 .controller_data = &mmc_spi_chip_info,
282 .controller_data = &spi_mmc_chip_info,
283 .mode = SPI_MODE_3,
284 },
285 {
286 .modalias = "spi_mmc",
287 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
288 .bus_num = 0,
289 .chip_select = CONFIG_SPI_MMC_CS_CHAN,
290 .platform_data = NULL,
291 .controller_data = &spi_mmc_chip_info,
292 .mode = SPI_MODE_3, 286 .mode = SPI_MODE_3,
293 }, 287 },
294#endif 288#endif
@@ -630,11 +624,10 @@ static struct platform_device *stamp_devices[] __initdata = {
630#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 624#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
631 &bfin_mii_bus, 625 &bfin_mii_bus,
632 &bfin_mac_device, 626 &bfin_mac_device,
633#endif
634
635#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE) 627#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
636 &ksz8893m_switch_device, 628 &ksz8893m_switch_device,
637#endif 629#endif
630#endif
638 631
639#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) 632#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
640 &bfin_spi0_device, 633 &bfin_spi0_device,
diff --git a/arch/blackfin/mach-bf518/include/mach/anomaly.h b/arch/blackfin/mach-bf518/include/mach/anomaly.h
index e5b4bef0edae..c847bb101076 100644
--- a/arch/blackfin/mach-bf518/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf518/include/mach/anomaly.h
@@ -2,12 +2,12 @@
2 * File: include/asm-blackfin/mach-bf518/anomaly.h 2 * File: include/asm-blackfin/mach-bf518/anomaly.h
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * Bugs: Enter bugs at http://blackfin.uclinux.org/
4 * 4 *
5 * Copyright (C) 2004-2008 Analog Devices Inc. 5 * Copyright (C) 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
9/* This file shoule be up to date with: 9/* This file shoule be up to date with:
10 * - ???? 10 * - Revision B, 02/03/2009; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List
11 */ 11 */
12 12
13#ifndef _MACH_ANOMALY_H_ 13#ifndef _MACH_ANOMALY_H_
@@ -19,6 +19,8 @@
19#define ANOMALY_05000122 (1) 19#define ANOMALY_05000122 (1)
20/* False Hardware Error from an Access in the Shadow of a Conditional Branch */ 20/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
21#define ANOMALY_05000245 (1) 21#define ANOMALY_05000245 (1)
22/* Incorrect Timer Pulse Width in Single-Shot PWM_OUT Mode with External Clock */
23#define ANOMALY_05000254 (1)
22/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */ 24/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
23#define ANOMALY_05000265 (1) 25#define ANOMALY_05000265 (1)
24/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */ 26/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
@@ -53,6 +55,12 @@
53#define ANOMALY_05000443 (1) 55#define ANOMALY_05000443 (1)
54/* Incorrect L1 Instruction Bank B Memory Map Location */ 56/* Incorrect L1 Instruction Bank B Memory Map Location */
55#define ANOMALY_05000444 (1) 57#define ANOMALY_05000444 (1)
58/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */
59#define ANOMALY_05000452 (1)
60/* PWM_TRIPB Signal Not Available on PG10 */
61#define ANOMALY_05000453 (1)
62/* PPI_FS3 is Driven One Half Cycle Later Than PPI Data */
63#define ANOMALY_05000455 (1)
56 64
57/* Anomalies that don't exist on this proc */ 65/* Anomalies that don't exist on this proc */
58#define ANOMALY_05000125 (0) 66#define ANOMALY_05000125 (0)
@@ -65,15 +73,20 @@
65#define ANOMALY_05000263 (0) 73#define ANOMALY_05000263 (0)
66#define ANOMALY_05000266 (0) 74#define ANOMALY_05000266 (0)
67#define ANOMALY_05000273 (0) 75#define ANOMALY_05000273 (0)
76#define ANOMALY_05000278 (0)
68#define ANOMALY_05000285 (0) 77#define ANOMALY_05000285 (0)
78#define ANOMALY_05000305 (0)
69#define ANOMALY_05000307 (0) 79#define ANOMALY_05000307 (0)
70#define ANOMALY_05000311 (0) 80#define ANOMALY_05000311 (0)
71#define ANOMALY_05000312 (0) 81#define ANOMALY_05000312 (0)
72#define ANOMALY_05000323 (0) 82#define ANOMALY_05000323 (0)
73#define ANOMALY_05000353 (0) 83#define ANOMALY_05000353 (0)
74#define ANOMALY_05000363 (0) 84#define ANOMALY_05000363 (0)
85#define ANOMALY_05000380 (0)
75#define ANOMALY_05000386 (0) 86#define ANOMALY_05000386 (0)
76#define ANOMALY_05000412 (0) 87#define ANOMALY_05000412 (0)
77#define ANOMALY_05000432 (0) 88#define ANOMALY_05000432 (0)
89#define ANOMALY_05000447 (0)
90#define ANOMALY_05000448 (0)
78 91
79#endif 92#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
index b50a63b975a2..e21c1c3e4ec7 100644
--- a/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
@@ -144,7 +144,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
144 CH_UART0_TX, 144 CH_UART0_TX,
145 CH_UART0_RX, 145 CH_UART0_RX,
146#endif 146#endif
147#ifdef CONFIG_BFIN_UART0_CTSRTS 147#ifdef CONFIG_SERIAL_BFIN_CTSRTS
148 CONFIG_UART0_CTS_PIN, 148 CONFIG_UART0_CTS_PIN,
149 CONFIG_UART0_RTS_PIN, 149 CONFIG_UART0_RTS_PIN,
150#endif 150#endif
@@ -158,7 +158,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
158 CH_UART1_TX, 158 CH_UART1_TX,
159 CH_UART1_RX, 159 CH_UART1_RX,
160#endif 160#endif
161#ifdef CONFIG_BFIN_UART1_CTSRTS 161#ifdef CONFIG_SERIAL_BFIN_CTSRTS
162 CONFIG_UART1_CTS_PIN, 162 CONFIG_UART1_CTS_PIN,
163 CONFIG_UART1_RTS_PIN, 163 CONFIG_UART1_RTS_PIN,
164#endif 164#endif
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 856c097b5317..48e69eecdba4 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -487,9 +487,9 @@ static struct bfin5xx_spi_chip ad9960_spi_chip_info = {
487}; 487};
488#endif 488#endif
489 489
490#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 490#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
491static struct bfin5xx_spi_chip spi_mmc_chip_info = { 491static struct bfin5xx_spi_chip mmc_spi_chip_info = {
492 .enable_dma = 1, 492 .enable_dma = 0,
493 .bits_per_word = 8, 493 .bits_per_word = 8,
494}; 494};
495#endif 495#endif
@@ -585,23 +585,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
585 .controller_data = &ad9960_spi_chip_info, 585 .controller_data = &ad9960_spi_chip_info,
586 }, 586 },
587#endif 587#endif
588#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 588#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
589 { 589 {
590 .modalias = "spi_mmc_dummy", 590 .modalias = "mmc_spi",
591 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ 591 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
592 .bus_num = 0,
593 .chip_select = 0,
594 .platform_data = NULL,
595 .controller_data = &spi_mmc_chip_info,
596 .mode = SPI_MODE_3,
597 },
598 {
599 .modalias = "spi_mmc",
600 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
601 .bus_num = 0, 592 .bus_num = 0,
602 .chip_select = CONFIG_SPI_MMC_CS_CHAN, 593 .chip_select = 5,
603 .platform_data = NULL, 594 .controller_data = &mmc_spi_chip_info,
604 .controller_data = &spi_mmc_chip_info,
605 .mode = SPI_MODE_3, 595 .mode = SPI_MODE_3,
606 }, 596 },
607#endif 597#endif
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 83606fcdde27..7fe480e4ebe8 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -256,9 +256,9 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
256}; 256};
257#endif 257#endif
258 258
259#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 259#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
260static struct bfin5xx_spi_chip spi_mmc_chip_info = { 260static struct bfin5xx_spi_chip mmc_spi_chip_info = {
261 .enable_dma = 1, 261 .enable_dma = 0,
262 .bits_per_word = 8, 262 .bits_per_word = 8,
263}; 263};
264#endif 264#endif
@@ -366,23 +366,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
366 }, 366 },
367#endif 367#endif
368 368
369#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 369#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
370 { 370 {
371 .modalias = "spi_mmc_dummy", 371 .modalias = "mmc_spi",
372 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ 372 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
373 .bus_num = 0, 373 .bus_num = 0,
374 .chip_select = 0, 374 .chip_select = 5,
375 .platform_data = NULL, 375 .controller_data = &mmc_spi_chip_info,
376 .controller_data = &spi_mmc_chip_info,
377 .mode = SPI_MODE_3,
378 },
379 {
380 .modalias = "spi_mmc",
381 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
382 .bus_num = 0,
383 .chip_select = CONFIG_SPI_MMC_CS_CHAN,
384 .platform_data = NULL,
385 .controller_data = &spi_mmc_chip_info,
386 .mode = SPI_MODE_3, 376 .mode = SPI_MODE_3,
387 }, 377 },
388#endif 378#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/anomaly.h b/arch/blackfin/mach-bf527/include/mach/anomaly.h
index 035e8d835058..df6808d8a6ef 100644
--- a/arch/blackfin/mach-bf527/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf527/include/mach/anomaly.h
@@ -2,7 +2,7 @@
2 * File: include/asm-blackfin/mach-bf527/anomaly.h 2 * File: include/asm-blackfin/mach-bf527/anomaly.h
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * Bugs: Enter bugs at http://blackfin.uclinux.org/
4 * 4 *
5 * Copyright (C) 2004-2008 Analog Devices Inc. 5 * Copyright (C) 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
@@ -167,12 +167,16 @@
167#define ANOMALY_05000263 (0) 167#define ANOMALY_05000263 (0)
168#define ANOMALY_05000266 (0) 168#define ANOMALY_05000266 (0)
169#define ANOMALY_05000273 (0) 169#define ANOMALY_05000273 (0)
170#define ANOMALY_05000278 (0)
170#define ANOMALY_05000285 (0) 171#define ANOMALY_05000285 (0)
172#define ANOMALY_05000305 (0)
171#define ANOMALY_05000307 (0) 173#define ANOMALY_05000307 (0)
172#define ANOMALY_05000311 (0) 174#define ANOMALY_05000311 (0)
173#define ANOMALY_05000312 (0) 175#define ANOMALY_05000312 (0)
174#define ANOMALY_05000323 (0) 176#define ANOMALY_05000323 (0)
175#define ANOMALY_05000363 (0) 177#define ANOMALY_05000363 (0)
176#define ANOMALY_05000412 (0) 178#define ANOMALY_05000412 (0)
179#define ANOMALY_05000447 (0)
180#define ANOMALY_05000448 (0)
177 181
178#endif 182#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
index 75722d6008b0..e8c41fd842b5 100644
--- a/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
@@ -144,7 +144,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
144 CH_UART0_TX, 144 CH_UART0_TX,
145 CH_UART0_RX, 145 CH_UART0_RX,
146#endif 146#endif
147#ifdef CONFIG_BFIN_UART0_CTSRTS 147#ifdef CONFIG_SERIAL_BFIN_CTSRTS
148 CONFIG_UART0_CTS_PIN, 148 CONFIG_UART0_CTS_PIN,
149 CONFIG_UART0_RTS_PIN, 149 CONFIG_UART0_RTS_PIN,
150#endif 150#endif
@@ -158,7 +158,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
158 CH_UART1_TX, 158 CH_UART1_TX,
159 CH_UART1_RX, 159 CH_UART1_RX,
160#endif 160#endif
161#ifdef CONFIG_BFIN_UART1_CTSRTS 161#ifdef CONFIG_SERIAL_BFIN_CTSRTS
162 CONFIG_UART1_CTS_PIN, 162 CONFIG_UART1_CTS_PIN,
163 CONFIG_UART1_RTS_PIN, 163 CONFIG_UART1_RTS_PIN,
164#endif 164#endif
diff --git a/arch/blackfin/mach-bf533/boards/Kconfig b/arch/blackfin/mach-bf533/boards/Kconfig
index 308c98dc5aba..8d8b3e7321e6 100644
--- a/arch/blackfin/mach-bf533/boards/Kconfig
+++ b/arch/blackfin/mach-bf533/boards/Kconfig
@@ -38,9 +38,4 @@ config BFIN532_IP0X
38 help 38 help
39 Core support for IP04/IP04 open hardware IP-PBX. 39 Core support for IP04/IP04 open hardware IP-PBX.
40 40
41config GENERIC_BF533_BOARD
42 bool "Generic"
43 help
44 Generic or Custom board support.
45
46endchoice 41endchoice
diff --git a/arch/blackfin/mach-bf533/boards/Makefile b/arch/blackfin/mach-bf533/boards/Makefile
index 9afbe72b484f..ff1e832f80d2 100644
--- a/arch/blackfin/mach-bf533/boards/Makefile
+++ b/arch/blackfin/mach-bf533/boards/Makefile
@@ -2,7 +2,6 @@
2# arch/blackfin/mach-bf533/boards/Makefile 2# arch/blackfin/mach-bf533/boards/Makefile
3# 3#
4 4
5obj-$(CONFIG_GENERIC_BF533_BOARD) += generic_board.o
6obj-$(CONFIG_BFIN533_STAMP) += stamp.o 5obj-$(CONFIG_BFIN533_STAMP) += stamp.o
7obj-$(CONFIG_BFIN532_IP0X) += ip0x.o 6obj-$(CONFIG_BFIN532_IP0X) += ip0x.o
8obj-$(CONFIG_BFIN533_EZKIT) += ezkit.o 7obj-$(CONFIG_BFIN533_EZKIT) += ezkit.o
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 015c18f85e7f..0765872a8ada 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -101,9 +101,9 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
101}; 101};
102#endif 102#endif
103 103
104#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 104#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
105static struct bfin5xx_spi_chip spi_mmc_chip_info = { 105static struct bfin5xx_spi_chip mmc_spi_chip_info = {
106 .enable_dma = 1, 106 .enable_dma = 0,
107 .bits_per_word = 8, 107 .bits_per_word = 8,
108}; 108};
109#endif 109#endif
@@ -129,23 +129,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
129 }, 129 },
130#endif 130#endif
131 131
132#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 132#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
133 {
134 .modalias = "spi_mmc_dummy",
135 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
136 .bus_num = 0,
137 .chip_select = 0,
138 .platform_data = NULL,
139 .controller_data = &spi_mmc_chip_info,
140 .mode = SPI_MODE_3,
141 },
142 { 133 {
143 .modalias = "spi_mmc", 134 .modalias = "mmc_spi",
144 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 135 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
145 .bus_num = 0, 136 .bus_num = 0,
146 .chip_select = CONFIG_SPI_MMC_CS_CHAN, 137 .chip_select = 5,
147 .platform_data = NULL, 138 .controller_data = &mmc_spi_chip_info,
148 .controller_data = &spi_mmc_chip_info,
149 .mode = SPI_MODE_3, 139 .mode = SPI_MODE_3,
150 }, 140 },
151#endif 141#endif
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index e7061c7e8c42..e8974878d8c2 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -96,9 +96,9 @@ static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
96}; 96};
97#endif 97#endif
98 98
99#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 99#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
100static struct bfin5xx_spi_chip spi_mmc_chip_info = { 100static struct bfin5xx_spi_chip mmc_spi_chip_info = {
101 .enable_dma = 1, 101 .enable_dma = 0,
102 .bits_per_word = 8, 102 .bits_per_word = 8,
103}; 103};
104#endif 104#endif
@@ -138,23 +138,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
138 }, 138 },
139#endif 139#endif
140 140
141#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 141#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
142 {
143 .modalias = "spi_mmc_dummy",
144 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
145 .bus_num = 0,
146 .chip_select = 0,
147 .platform_data = NULL,
148 .controller_data = &spi_mmc_chip_info,
149 .mode = SPI_MODE_3,
150 },
151 { 142 {
152 .modalias = "spi_mmc", 143 .modalias = "mmc_spi",
153 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ 144 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
154 .bus_num = 0, 145 .bus_num = 0,
155 .chip_select = CONFIG_SPI_MMC_CS_CHAN, 146 .chip_select = 5,
156 .platform_data = NULL, 147 .controller_data = &mmc_spi_chip_info,
157 .controller_data = &spi_mmc_chip_info,
158 .mode = SPI_MODE_3, 148 .mode = SPI_MODE_3,
159 }, 149 },
160#endif 150#endif
diff --git a/arch/blackfin/mach-bf533/boards/generic_board.c b/arch/blackfin/mach-bf533/boards/generic_board.c
deleted file mode 100644
index 986eeec53b1f..000000000000
--- a/arch/blackfin/mach-bf533/boards/generic_board.c
+++ /dev/null
@@ -1,126 +0,0 @@
1/*
2 * File: arch/blackfin/mach-bf533/generic_board.c
3 * Based on: arch/blackfin/mach-bf533/ezkit.c
4 * Author: Aidan Williams <aidan@nicta.com.au>
5 *
6 * Created: 2005
7 * Description:
8 *
9 * Modified:
10 * Copyright 2005 National ICT Australia (NICTA)
11 * Copyright 2004-2006 Analog Devices Inc.
12 *
13 * Bugs: Enter bugs at http://blackfin.uclinux.org/
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see the file COPYING, or write
27 * to the Free Software Foundation, Inc.,
28 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 */
30
31#include <linux/device.h>
32#include <linux/platform_device.h>
33#include <linux/irq.h>
34
35/*
36 * Name the Board for the /proc/cpuinfo
37 */
38const char bfin_board_name[] = "UNKNOWN BOARD";
39
40#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
41static struct platform_device rtc_device = {
42 .name = "rtc-bfin",
43 .id = -1,
44};
45#endif
46
47/*
48 * Driver needs to know address, irq and flag pin.
49 */
50#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
51static struct resource smc91x_resources[] = {
52 {
53 .start = 0x20300300,
54 .end = 0x20300300 + 16,
55 .flags = IORESOURCE_MEM,
56 }, {
57 .start = IRQ_PROG_INTB,
58 .end = IRQ_PROG_INTB,
59 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
60 }, {
61 .start = IRQ_PF7,
62 .end = IRQ_PF7,
63 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
64 },
65};
66
67static struct platform_device smc91x_device = {
68 .name = "smc91x",
69 .id = 0,
70 .num_resources = ARRAY_SIZE(smc91x_resources),
71 .resource = smc91x_resources,
72};
73#endif
74
75#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
76#ifdef CONFIG_BFIN_SIR0
77static struct resource bfin_sir0_resources[] = {
78 {
79 .start = 0xFFC00400,
80 .end = 0xFFC004FF,
81 .flags = IORESOURCE_MEM,
82 },
83 {
84 .start = IRQ_UART0_RX,
85 .end = IRQ_UART0_RX+1,
86 .flags = IORESOURCE_IRQ,
87 },
88 {
89 .start = CH_UART0_RX,
90 .end = CH_UART0_RX+1,
91 .flags = IORESOURCE_DMA,
92 },
93};
94
95static struct platform_device bfin_sir0_device = {
96 .name = "bfin_sir",
97 .id = 0,
98 .num_resources = ARRAY_SIZE(bfin_sir0_resources),
99 .resource = bfin_sir0_resources,
100};
101#endif
102#endif
103
104static struct platform_device *generic_board_devices[] __initdata = {
105#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
106 &rtc_device,
107#endif
108
109#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
110 &smc91x_device,
111#endif
112
113#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
114#ifdef CONFIG_BFIN_SIR0
115 &bfin_sir0_device,
116#endif
117#endif
118};
119
120static int __init generic_board_init(void)
121{
122 printk(KERN_INFO "%s(): registering device resources\n", __func__);
123 return platform_add_devices(generic_board_devices, ARRAY_SIZE(generic_board_devices));
124}
125
126arch_initcall(generic_board_init);
diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c
index e30b1b7d1442..f19b63378b12 100644
--- a/arch/blackfin/mach-bf533/boards/ip0x.c
+++ b/arch/blackfin/mach-bf533/boards/ip0x.c
@@ -127,8 +127,8 @@ static struct platform_device dm9000_device2 = {
127#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) 127#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
128/* all SPI peripherals info goes here */ 128/* all SPI peripherals info goes here */
129 129
130#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 130#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
131static struct bfin5xx_spi_chip spi_mmc_chip_info = { 131static struct bfin5xx_spi_chip mmc_spi_chip_info = {
132/* 132/*
133 * CPOL (Clock Polarity) 133 * CPOL (Clock Polarity)
134 * 0 - Active high SCK 134 * 0 - Active high SCK
@@ -152,14 +152,13 @@ static struct bfin5xx_spi_chip spi_mmc_chip_info = {
152/* Notice: for blackfin, the speed_hz is the value of register 152/* Notice: for blackfin, the speed_hz is the value of register
153 * SPI_BAUD, not the real baudrate */ 153 * SPI_BAUD, not the real baudrate */
154static struct spi_board_info bfin_spi_board_info[] __initdata = { 154static struct spi_board_info bfin_spi_board_info[] __initdata = {
155#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 155#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
156 { 156 {
157 .modalias = "spi_mmc", 157 .modalias = "mmc_spi",
158 .max_speed_hz = 2, 158 .max_speed_hz = 2,
159 .bus_num = 1, 159 .bus_num = 1,
160 .chip_select = CONFIG_SPI_MMC_CS_CHAN, 160 .chip_select = 5,
161 .platform_data = NULL, 161 .controller_data = &mmc_spi_chip_info,
162 .controller_data = &spi_mmc_chip_info,
163 }, 162 },
164#endif 163#endif
165}; 164};
diff --git a/arch/blackfin/mach-bf533/include/mach/anomaly.h b/arch/blackfin/mach-bf533/include/mach/anomaly.h
index 0d3a03429fb9..1cf893e2e55b 100644
--- a/arch/blackfin/mach-bf533/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf533/include/mach/anomaly.h
@@ -2,7 +2,7 @@
2 * File: include/asm-blackfin/mach-bf533/anomaly.h 2 * File: include/asm-blackfin/mach-bf533/anomaly.h
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * Bugs: Enter bugs at http://blackfin.uclinux.org/
4 * 4 *
5 * Copyright (C) 2004-2008 Analog Devices Inc. 5 * Copyright (C) 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
@@ -160,7 +160,7 @@
160#define ANOMALY_05000301 (__SILICON_REVISION__ < 6) 160#define ANOMALY_05000301 (__SILICON_REVISION__ < 6)
161/* SSYNCs After Writes To DMA MMR Registers May Not Be Handled Correctly */ 161/* SSYNCs After Writes To DMA MMR Registers May Not Be Handled Correctly */
162#define ANOMALY_05000302 (__SILICON_REVISION__ < 5) 162#define ANOMALY_05000302 (__SILICON_REVISION__ < 5)
163/* New Feature: Additional Hysteresis on SPORT Input Pins (Not Available On Older Silicon) */ 163/* SPORT_HYS Bit in PLL_CTL Register Is Not Functional */
164#define ANOMALY_05000305 (__SILICON_REVISION__ < 5) 164#define ANOMALY_05000305 (__SILICON_REVISION__ < 5)
165/* New Feature: Additional PPI Frame Sync Sampling Options (Not Available On Older Silicon) */ 165/* New Feature: Additional PPI Frame Sync Sampling Options (Not Available On Older Silicon) */
166#define ANOMALY_05000306 (__SILICON_REVISION__ < 5) 166#define ANOMALY_05000306 (__SILICON_REVISION__ < 5)
@@ -278,9 +278,12 @@
278#define ANOMALY_05000266 (0) 278#define ANOMALY_05000266 (0)
279#define ANOMALY_05000323 (0) 279#define ANOMALY_05000323 (0)
280#define ANOMALY_05000353 (1) 280#define ANOMALY_05000353 (1)
281#define ANOMALY_05000380 (0)
281#define ANOMALY_05000386 (1) 282#define ANOMALY_05000386 (1)
282#define ANOMALY_05000412 (0) 283#define ANOMALY_05000412 (0)
283#define ANOMALY_05000432 (0) 284#define ANOMALY_05000432 (0)
284#define ANOMALY_05000435 (0) 285#define ANOMALY_05000435 (0)
286#define ANOMALY_05000447 (0)
287#define ANOMALY_05000448 (0)
285 288
286#endif 289#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
index f3d9e495230c..5f517f53b0fd 100644
--- a/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
@@ -134,7 +134,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
134 CH_UART_TX, 134 CH_UART_TX,
135 CH_UART_RX, 135 CH_UART_RX,
136#endif 136#endif
137#ifdef CONFIG_BFIN_UART0_CTSRTS 137#ifdef CONFIG_SERIAL_BFIN_CTSRTS
138 CONFIG_UART0_CTS_PIN, 138 CONFIG_UART0_CTS_PIN,
139 CONFIG_UART0_RTS_PIN, 139 CONFIG_UART0_RTS_PIN,
140#endif 140#endif
diff --git a/arch/blackfin/mach-bf537/boards/Kconfig b/arch/blackfin/mach-bf537/boards/Kconfig
index 42a57b0acb29..77c59da87e85 100644
--- a/arch/blackfin/mach-bf537/boards/Kconfig
+++ b/arch/blackfin/mach-bf537/boards/Kconfig
@@ -33,9 +33,4 @@ config CAMSIG_MINOTAUR
33 help 33 help
34 Board supply package for CSP Minotaur 34 Board supply package for CSP Minotaur
35 35
36config GENERIC_BF537_BOARD
37 bool "Generic"
38 help
39 Generic or Custom board support.
40
41endchoice 36endchoice
diff --git a/arch/blackfin/mach-bf537/boards/Makefile b/arch/blackfin/mach-bf537/boards/Makefile
index 7168cc14afd8..68b98a7af6a6 100644
--- a/arch/blackfin/mach-bf537/boards/Makefile
+++ b/arch/blackfin/mach-bf537/boards/Makefile
@@ -2,7 +2,6 @@
2# arch/blackfin/mach-bf537/boards/Makefile 2# arch/blackfin/mach-bf537/boards/Makefile
3# 3#
4 4
5obj-$(CONFIG_GENERIC_BF537_BOARD) += generic_board.o
6obj-$(CONFIG_BFIN537_STAMP) += stamp.o 5obj-$(CONFIG_BFIN537_STAMP) += stamp.o
7obj-$(CONFIG_BFIN537_BLUETECHNIX_CM) += cm_bf537.o 6obj-$(CONFIG_BFIN537_BLUETECHNIX_CM) += cm_bf537.o
8obj-$(CONFIG_BFIN537_BLUETECHNIX_TCM) += tcm_bf537.o 7obj-$(CONFIG_BFIN537_BLUETECHNIX_TCM) += tcm_bf537.o
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537.c b/arch/blackfin/mach-bf537/boards/cm_bf537.c
index 9cd8fb2a30d3..41c75b9bfac0 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537.c
@@ -108,9 +108,9 @@ static struct bfin5xx_spi_chip ad9960_spi_chip_info = {
108}; 108};
109#endif 109#endif
110 110
111#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 111#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
112static struct bfin5xx_spi_chip spi_mmc_chip_info = { 112static struct bfin5xx_spi_chip mmc_spi_chip_info = {
113 .enable_dma = 1, 113 .enable_dma = 0,
114 .bits_per_word = 8, 114 .bits_per_word = 8,
115}; 115};
116#endif 116#endif
@@ -160,23 +160,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
160 }, 160 },
161#endif 161#endif
162 162
163#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 163#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
164 {
165 .modalias = "spi_mmc_dummy",
166 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
167 .bus_num = 0,
168 .chip_select = 7,
169 .platform_data = NULL,
170 .controller_data = &spi_mmc_chip_info,
171 .mode = SPI_MODE_3,
172 },
173 { 164 {
174 .modalias = "spi_mmc", 165 .modalias = "mmc_spi",
175 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ 166 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
176 .bus_num = 0, 167 .bus_num = 0,
177 .chip_select = CONFIG_SPI_MMC_CS_CHAN, 168 .chip_select = 1,
178 .platform_data = NULL, 169 .controller_data = &mmc_spi_chip_info,
179 .controller_data = &spi_mmc_chip_info,
180 .mode = SPI_MODE_3, 170 .mode = SPI_MODE_3,
181 }, 171 },
182#endif 172#endif
diff --git a/arch/blackfin/mach-bf537/boards/generic_board.c b/arch/blackfin/mach-bf537/boards/generic_board.c
deleted file mode 100644
index da710fdc4569..000000000000
--- a/arch/blackfin/mach-bf537/boards/generic_board.c
+++ /dev/null
@@ -1,745 +0,0 @@
1/*
2 * File: arch/blackfin/mach-bf537/boards/generic_board.c
3 * Based on: arch/blackfin/mach-bf533/boards/ezkit.c
4 * Author: Aidan Williams <aidan@nicta.com.au>
5 *
6 * Created:
7 * Description:
8 *
9 * Modified:
10 * Copyright 2005 National ICT Australia (NICTA)
11 * Copyright 2004-2008 Analog Devices Inc.
12 *
13 * Bugs: Enter bugs at http://blackfin.uclinux.org/
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see the file COPYING, or write
27 * to the Free Software Foundation, Inc.,
28 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 */
30
31#include <linux/device.h>
32#include <linux/etherdevice.h>
33#include <linux/platform_device.h>
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/partitions.h>
36#include <linux/spi/spi.h>
37#include <linux/spi/flash.h>
38#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
39#include <linux/usb/isp1362.h>
40#endif
41#include <linux/irq.h>
42#include <linux/interrupt.h>
43#include <linux/usb/sl811.h>
44#include <asm/dma.h>
45#include <asm/bfin5xx_spi.h>
46#include <asm/reboot.h>
47#include <asm/portmux.h>
48#include <linux/spi/ad7877.h>
49
50/*
51 * Name the Board for the /proc/cpuinfo
52 */
53const char bfin_board_name[] = "UNKNOWN BOARD";
54
55/*
56 * Driver needs to know address, irq and flag pin.
57 */
58
59#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
60#include <linux/usb/isp1760.h>
61static struct resource bfin_isp1760_resources[] = {
62 [0] = {
63 .start = 0x203C0000,
64 .end = 0x203C0000 + 0x000fffff,
65 .flags = IORESOURCE_MEM,
66 },
67 [1] = {
68 .start = IRQ_PF7,
69 .end = IRQ_PF7,
70 .flags = IORESOURCE_IRQ,
71 },
72};
73
74static struct isp1760_platform_data isp1760_priv = {
75 .is_isp1761 = 0,
76 .port1_disable = 0,
77 .bus_width_16 = 1,
78 .port1_otg = 0,
79 .analog_oc = 0,
80 .dack_polarity_high = 0,
81 .dreq_polarity_high = 0,
82};
83
84static struct platform_device bfin_isp1760_device = {
85 .name = "isp1760-hcd",
86 .id = 0,
87 .dev = {
88 .platform_data = &isp1760_priv,
89 },
90 .num_resources = ARRAY_SIZE(bfin_isp1760_resources),
91 .resource = bfin_isp1760_resources,
92};
93#endif
94
95#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
96static struct resource bfin_pcmcia_cf_resources[] = {
97 {
98 .start = 0x20310000, /* IO PORT */
99 .end = 0x20312000,
100 .flags = IORESOURCE_MEM,
101 }, {
102 .start = 0x20311000, /* Attribute Memory */
103 .end = 0x20311FFF,
104 .flags = IORESOURCE_MEM,
105 }, {
106 .start = IRQ_PF4,
107 .end = IRQ_PF4,
108 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
109 }, {
110 .start = 6, /* Card Detect PF6 */
111 .end = 6,
112 .flags = IORESOURCE_IRQ,
113 },
114};
115
116static struct platform_device bfin_pcmcia_cf_device = {
117 .name = "bfin_cf_pcmcia",
118 .id = -1,
119 .num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources),
120 .resource = bfin_pcmcia_cf_resources,
121};
122#endif
123
124#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
125static struct platform_device rtc_device = {
126 .name = "rtc-bfin",
127 .id = -1,
128};
129#endif
130
131#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
132static struct resource smc91x_resources[] = {
133 {
134 .name = "smc91x-regs",
135 .start = 0x20300300,
136 .end = 0x20300300 + 16,
137 .flags = IORESOURCE_MEM,
138 }, {
139
140 .start = IRQ_PF7,
141 .end = IRQ_PF7,
142 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
143 },
144};
145static struct platform_device smc91x_device = {
146 .name = "smc91x",
147 .id = 0,
148 .num_resources = ARRAY_SIZE(smc91x_resources),
149 .resource = smc91x_resources,
150};
151#endif
152
153#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
154static struct resource dm9000_resources[] = {
155 [0] = {
156 .start = 0x203FB800,
157 .end = 0x203FB800 + 1,
158 .flags = IORESOURCE_MEM,
159 },
160 [1] = {
161 .start = 0x203FB800 + 4,
162 .end = 0x203FB800 + 5,
163 .flags = IORESOURCE_MEM,
164 },
165 [2] = {
166 .start = IRQ_PF9,
167 .end = IRQ_PF9,
168 .flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE),
169 },
170};
171
172static struct platform_device dm9000_device = {
173 .name = "dm9000",
174 .id = -1,
175 .num_resources = ARRAY_SIZE(dm9000_resources),
176 .resource = dm9000_resources,
177};
178#endif
179
180#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE)
181static struct resource sl811_hcd_resources[] = {
182 {
183 .start = 0x20340000,
184 .end = 0x20340000,
185 .flags = IORESOURCE_MEM,
186 }, {
187 .start = 0x20340004,
188 .end = 0x20340004,
189 .flags = IORESOURCE_MEM,
190 }, {
191 .start = CONFIG_USB_SL811_BFIN_IRQ,
192 .end = CONFIG_USB_SL811_BFIN_IRQ,
193 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
194 },
195};
196
197#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS)
198void sl811_port_power(struct device *dev, int is_on)
199{
200 gpio_request(CONFIG_USB_SL811_BFIN_GPIO_VBUS, "usb:SL811_VBUS");
201 gpio_direction_output(CONFIG_USB_SL811_BFIN_GPIO_VBUS, is_on);
202
203}
204#endif
205
206static struct sl811_platform_data sl811_priv = {
207 .potpg = 10,
208 .power = 250, /* == 500mA */
209#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS)
210 .port_power = &sl811_port_power,
211#endif
212};
213
214static struct platform_device sl811_hcd_device = {
215 .name = "sl811-hcd",
216 .id = 0,
217 .dev = {
218 .platform_data = &sl811_priv,
219 },
220 .num_resources = ARRAY_SIZE(sl811_hcd_resources),
221 .resource = sl811_hcd_resources,
222};
223#endif
224
225#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
226static struct resource isp1362_hcd_resources[] = {
227 {
228 .start = 0x20360000,
229 .end = 0x20360000,
230 .flags = IORESOURCE_MEM,
231 }, {
232 .start = 0x20360004,
233 .end = 0x20360004,
234 .flags = IORESOURCE_MEM,
235 }, {
236 .start = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ,
237 .end = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ,
238 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
239 },
240};
241
242static struct isp1362_platform_data isp1362_priv = {
243 .sel15Kres = 1,
244 .clknotstop = 0,
245 .oc_enable = 0,
246 .int_act_high = 0,
247 .int_edge_triggered = 0,
248 .remote_wakeup_connected = 0,
249 .no_power_switching = 1,
250 .power_switching_mode = 0,
251};
252
253static struct platform_device isp1362_hcd_device = {
254 .name = "isp1362-hcd",
255 .id = 0,
256 .dev = {
257 .platform_data = &isp1362_priv,
258 },
259 .num_resources = ARRAY_SIZE(isp1362_hcd_resources),
260 .resource = isp1362_hcd_resources,
261};
262#endif
263
264#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
265static struct platform_device bfin_mii_bus = {
266 .name = "bfin_mii_bus",
267};
268
269static struct platform_device bfin_mac_device = {
270 .name = "bfin_mac",
271 .dev.platform_data = &bfin_mii_bus,
272};
273#endif
274
275#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
276static struct resource net2272_bfin_resources[] = {
277 {
278 .start = 0x20300000,
279 .end = 0x20300000 + 0x100,
280 .flags = IORESOURCE_MEM,
281 }, {
282 .start = IRQ_PF7,
283 .end = IRQ_PF7,
284 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
285 },
286};
287
288static struct platform_device net2272_bfin_device = {
289 .name = "net2272",
290 .id = -1,
291 .num_resources = ARRAY_SIZE(net2272_bfin_resources),
292 .resource = net2272_bfin_resources,
293};
294#endif
295
296#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
297/* all SPI peripherals info goes here */
298
299#if defined(CONFIG_MTD_M25P80) \
300 || defined(CONFIG_MTD_M25P80_MODULE)
301static struct mtd_partition bfin_spi_flash_partitions[] = {
302 {
303 .name = "bootloader(spi)",
304 .size = 0x00020000,
305 .offset = 0,
306 .mask_flags = MTD_CAP_ROM
307 }, {
308 .name = "linux kernel(spi)",
309 .size = 0xe0000,
310 .offset = 0x20000
311 }, {
312 .name = "file system(spi)",
313 .size = 0x700000,
314 .offset = 0x00100000,
315 }
316};
317
318static struct flash_platform_data bfin_spi_flash_data = {
319 .name = "m25p80",
320 .parts = bfin_spi_flash_partitions,
321 .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
322 .type = "m25p64",
323};
324
325/* SPI flash chip (m25p64) */
326static struct bfin5xx_spi_chip spi_flash_chip_info = {
327 .enable_dma = 0, /* use dma transfer with this chip*/
328 .bits_per_word = 8,
329};
330#endif
331
332#if defined(CONFIG_SPI_ADC_BF533) \
333 || defined(CONFIG_SPI_ADC_BF533_MODULE)
334/* SPI ADC chip */
335static struct bfin5xx_spi_chip spi_adc_chip_info = {
336 .enable_dma = 1, /* use dma transfer with this chip*/
337 .bits_per_word = 16,
338};
339#endif
340
341#if defined(CONFIG_SND_BLACKFIN_AD1836) \
342 || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE)
343static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
344 .enable_dma = 0,
345 .bits_per_word = 16,
346};
347#endif
348
349#if defined(CONFIG_AD9960) || defined(CONFIG_AD9960_MODULE)
350static struct bfin5xx_spi_chip ad9960_spi_chip_info = {
351 .enable_dma = 0,
352 .bits_per_word = 16,
353};
354#endif
355
356#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
357static struct bfin5xx_spi_chip spi_mmc_chip_info = {
358 .enable_dma = 1,
359 .bits_per_word = 8,
360};
361#endif
362
363#if defined(CONFIG_PBX)
364static struct bfin5xx_spi_chip spi_si3xxx_chip_info = {
365 .ctl_reg = 0x4, /* send zero */
366 .enable_dma = 0,
367 .bits_per_word = 8,
368 .cs_change_per_word = 1,
369};
370#endif
371
372#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
373static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
374 .enable_dma = 0,
375 .bits_per_word = 16,
376};
377
378static const struct ad7877_platform_data bfin_ad7877_ts_info = {
379 .model = 7877,
380 .vref_delay_usecs = 50, /* internal, no capacitor */
381 .x_plate_ohms = 419,
382 .y_plate_ohms = 486,
383 .pressure_max = 1000,
384 .pressure_min = 0,
385 .stopacq_polarity = 1,
386 .first_conversion_delay = 3,
387 .acquisition_time = 1,
388 .averaging = 1,
389 .pen_down_acc_interval = 1,
390};
391#endif
392
393static struct spi_board_info bfin_spi_board_info[] __initdata = {
394#if defined(CONFIG_MTD_M25P80) \
395 || defined(CONFIG_MTD_M25P80_MODULE)
396 {
397 /* the modalias must be the same as spi device driver name */
398 .modalias = "m25p80", /* Name of spi_driver for this device */
399 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
400 .bus_num = 0, /* Framework bus number */
401 .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
402 .platform_data = &bfin_spi_flash_data,
403 .controller_data = &spi_flash_chip_info,
404 .mode = SPI_MODE_3,
405 },
406#endif
407
408#if defined(CONFIG_SPI_ADC_BF533) \
409 || defined(CONFIG_SPI_ADC_BF533_MODULE)
410 {
411 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
412 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
413 .bus_num = 0, /* Framework bus number */
414 .chip_select = 1, /* Framework chip select. */
415 .platform_data = NULL, /* No spi_driver specific config */
416 .controller_data = &spi_adc_chip_info,
417 },
418#endif
419
420#if defined(CONFIG_SND_BLACKFIN_AD1836) \
421 || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE)
422 {
423 .modalias = "ad1836-spi",
424 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
425 .bus_num = 0,
426 .chip_select = CONFIG_SND_BLACKFIN_SPI_PFBIT,
427 .controller_data = &ad1836_spi_chip_info,
428 },
429#endif
430#if defined(CONFIG_AD9960) || defined(CONFIG_AD9960_MODULE)
431 {
432 .modalias = "ad9960-spi",
433 .max_speed_hz = 10000000, /* max spi clock (SCK) speed in HZ */
434 .bus_num = 0,
435 .chip_select = 1,
436 .controller_data = &ad9960_spi_chip_info,
437 },
438#endif
439#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
440 {
441 .modalias = "spi_mmc_dummy",
442 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
443 .bus_num = 0,
444 .chip_select = 0,
445 .platform_data = NULL,
446 .controller_data = &spi_mmc_chip_info,
447 .mode = SPI_MODE_3,
448 },
449 {
450 .modalias = "spi_mmc",
451 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
452 .bus_num = 0,
453 .chip_select = CONFIG_SPI_MMC_CS_CHAN,
454 .platform_data = NULL,
455 .controller_data = &spi_mmc_chip_info,
456 .mode = SPI_MODE_3,
457 },
458#endif
459#if defined(CONFIG_PBX)
460 {
461 .modalias = "fxs-spi",
462 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
463 .bus_num = 0,
464 .chip_select = 8 - CONFIG_J11_JUMPER,
465 .controller_data = &spi_si3xxx_chip_info,
466 .mode = SPI_MODE_3,
467 },
468 {
469 .modalias = "fxo-spi",
470 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
471 .bus_num = 0,
472 .chip_select = 8 - CONFIG_J19_JUMPER,
473 .controller_data = &spi_si3xxx_chip_info,
474 .mode = SPI_MODE_3,
475 },
476#endif
477#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
478 {
479 .modalias = "ad7877",
480 .platform_data = &bfin_ad7877_ts_info,
481 .irq = IRQ_PF6,
482 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
483 .bus_num = 0,
484 .chip_select = 1,
485 .controller_data = &spi_ad7877_chip_info,
486 },
487#endif
488};
489
490/* SPI controller data */
491static struct bfin5xx_spi_master bfin_spi0_info = {
492 .num_chipselect = 8,
493 .enable_dma = 1, /* master has the ability to do dma transfer */
494 .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
495};
496
497/* SPI (0) */
498static struct resource bfin_spi0_resource[] = {
499 [0] = {
500 .start = SPI0_REGBASE,
501 .end = SPI0_REGBASE + 0xFF,
502 .flags = IORESOURCE_MEM,
503 },
504 [1] = {
505 .start = CH_SPI,
506 .end = CH_SPI,
507 .flags = IORESOURCE_IRQ,
508 },
509};
510
511static struct platform_device bfin_spi0_device = {
512 .name = "bfin-spi",
513 .id = 0, /* Bus number */
514 .num_resources = ARRAY_SIZE(bfin_spi0_resource),
515 .resource = bfin_spi0_resource,
516 .dev = {
517 .platform_data = &bfin_spi0_info, /* Passed to driver */
518 },
519};
520#endif /* spi master and devices */
521
522#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE)
523static struct platform_device bfin_fb_device = {
524 .name = "bf537-lq035",
525};
526#endif
527
528#if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE)
529static struct platform_device bfin_fb_adv7393_device = {
530 .name = "bfin-adv7393",
531};
532#endif
533
534#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
535static struct resource bfin_uart_resources[] = {
536 {
537 .start = 0xFFC00400,
538 .end = 0xFFC004FF,
539 .flags = IORESOURCE_MEM,
540 }, {
541 .start = 0xFFC02000,
542 .end = 0xFFC020FF,
543 .flags = IORESOURCE_MEM,
544 },
545};
546
547static struct platform_device bfin_uart_device = {
548 .name = "bfin-uart",
549 .id = 1,
550 .num_resources = ARRAY_SIZE(bfin_uart_resources),
551 .resource = bfin_uart_resources,
552};
553#endif
554
555#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
556#ifdef CONFIG_BFIN_SIR0
557static struct resource bfin_sir0_resources[] = {
558 {
559 .start = 0xFFC00400,
560 .end = 0xFFC004FF,
561 .flags = IORESOURCE_MEM,
562 },
563 {
564 .start = IRQ_UART0_RX,
565 .end = IRQ_UART0_RX+1,
566 .flags = IORESOURCE_IRQ,
567 },
568 {
569 .start = CH_UART0_RX,
570 .end = CH_UART0_RX+1,
571 .flags = IORESOURCE_DMA,
572 },
573};
574
575static struct platform_device bfin_sir0_device = {
576 .name = "bfin_sir",
577 .id = 0,
578 .num_resources = ARRAY_SIZE(bfin_sir0_resources),
579 .resource = bfin_sir0_resources,
580};
581#endif
582#ifdef CONFIG_BFIN_SIR1
583static struct resource bfin_sir1_resources[] = {
584 {
585 .start = 0xFFC02000,
586 .end = 0xFFC020FF,
587 .flags = IORESOURCE_MEM,
588 },
589 {
590 .start = IRQ_UART1_RX,
591 .end = IRQ_UART1_RX+1,
592 .flags = IORESOURCE_IRQ,
593 },
594 {
595 .start = CH_UART1_RX,
596 .end = CH_UART1_RX+1,
597 .flags = IORESOURCE_DMA,
598 },
599};
600
601static struct platform_device bfin_sir1_device = {
602 .name = "bfin_sir",
603 .id = 1,
604 .num_resources = ARRAY_SIZE(bfin_sir1_resources),
605 .resource = bfin_sir1_resources,
606};
607#endif
608#endif
609
610#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
611static struct resource bfin_twi0_resource[] = {
612 [0] = {
613 .start = TWI0_REGBASE,
614 .end = TWI0_REGBASE + 0xFF,
615 .flags = IORESOURCE_MEM,
616 },
617 [1] = {
618 .start = IRQ_TWI,
619 .end = IRQ_TWI,
620 .flags = IORESOURCE_IRQ,
621 },
622};
623
624static struct platform_device i2c_bfin_twi_device = {
625 .name = "i2c-bfin-twi",
626 .id = 0,
627 .num_resources = ARRAY_SIZE(bfin_twi0_resource),
628 .resource = bfin_twi0_resource,
629};
630#endif
631
632#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
633static struct platform_device bfin_sport0_uart_device = {
634 .name = "bfin-sport-uart",
635 .id = 0,
636};
637
638static struct platform_device bfin_sport1_uart_device = {
639 .name = "bfin-sport-uart",
640 .id = 1,
641};
642#endif
643
644static struct platform_device *stamp_devices[] __initdata = {
645#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
646 &bfin_pcmcia_cf_device,
647#endif
648
649#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
650 &rtc_device,
651#endif
652
653#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE)
654 &sl811_hcd_device,
655#endif
656
657#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
658 &isp1362_hcd_device,
659#endif
660
661#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
662 &smc91x_device,
663#endif
664
665#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
666 &dm9000_device,
667#endif
668
669#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
670 &bfin_mii_bus,
671 &bfin_mac_device,
672#endif
673
674#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
675 &net2272_bfin_device,
676#endif
677
678#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
679 &bfin_isp1760_device,
680#endif
681
682#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
683 &bfin_spi0_device,
684#endif
685
686#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE)
687 &bfin_fb_device,
688#endif
689
690#if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE)
691 &bfin_fb_adv7393_device,
692#endif
693
694#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
695 &bfin_uart_device,
696#endif
697
698#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
699#ifdef CONFIG_BFIN_SIR0
700 &bfin_sir0_device,
701#endif
702#ifdef CONFIG_BFIN_SIR1
703 &bfin_sir1_device,
704#endif
705#endif
706
707#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
708 &i2c_bfin_twi_device,
709#endif
710
711#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
712 &bfin_sport0_uart_device,
713 &bfin_sport1_uart_device,
714#endif
715};
716
717static int __init generic_init(void)
718{
719 printk(KERN_INFO "%s(): registering device resources\n", __func__);
720 platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
721#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
722 spi_register_board_info(bfin_spi_board_info,
723 ARRAY_SIZE(bfin_spi_board_info));
724#endif
725
726 return 0;
727}
728
729arch_initcall(generic_init);
730
731void native_machine_restart(char *cmd)
732{
733 /* workaround reboot hang when booting from SPI */
734 if ((bfin_read_SYSCR() & 0x7) == 0x3)
735 bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS);
736}
737
738#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
739void bfin_get_ether_addr(char *addr)
740{
741 random_ether_addr(addr);
742 printk(KERN_WARNING "%s:%s: Setting Ethernet MAC to a random one\n", __FILE__, __func__);
743}
744EXPORT_SYMBOL(bfin_get_ether_addr);
745#endif
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index db7d3a385e4b..3c159819e555 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -134,9 +134,9 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
134}; 134};
135#endif 135#endif
136 136
137#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 137#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
138static struct bfin5xx_spi_chip spi_mmc_chip_info = { 138static struct bfin5xx_spi_chip mmc_spi_chip_info = {
139 .enable_dma = 1, 139 .enable_dma = 0,
140 .bits_per_word = 8, 140 .bits_per_word = 8,
141}; 141};
142#endif 142#endif
@@ -156,23 +156,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
156 }, 156 },
157#endif 157#endif
158 158
159#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 159#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
160 { 160 {
161 .modalias = "spi_mmc_dummy", 161 .modalias = "mmc_spi",
162 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ 162 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
163 .bus_num = 0, 163 .bus_num = 0,
164 .chip_select = 0, 164 .chip_select = 5,
165 .platform_data = NULL, 165 .controller_data = &mmc_spi_chip_info,
166 .controller_data = &spi_mmc_chip_info,
167 .mode = SPI_MODE_3,
168 },
169 {
170 .modalias = "spi_mmc",
171 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
172 .bus_num = 0,
173 .chip_select = CONFIG_SPI_MMC_CS_CHAN,
174 .platform_data = NULL,
175 .controller_data = &spi_mmc_chip_info,
176 .mode = SPI_MODE_3, 166 .mode = SPI_MODE_3,
177 }, 167 },
178#endif 168#endif
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index 590eb3a139b7..4e1de1e53f89 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -289,9 +289,9 @@ static struct bfin5xx_spi_chip ad9960_spi_chip_info = {
289}; 289};
290#endif 290#endif
291 291
292#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 292#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
293static struct bfin5xx_spi_chip spi_mmc_chip_info = { 293static struct bfin5xx_spi_chip mmc_spi_chip_info = {
294 .enable_dma = 1, 294 .enable_dma = 0,
295 .bits_per_word = 8, 295 .bits_per_word = 8,
296}; 296};
297#endif 297#endif
@@ -364,23 +364,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
364 .controller_data = &ad9960_spi_chip_info, 364 .controller_data = &ad9960_spi_chip_info,
365 }, 365 },
366#endif 366#endif
367#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 367#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
368 {
369 .modalias = "spi_mmc_dummy",
370 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
371 .bus_num = 0,
372 .chip_select = 7,
373 .platform_data = NULL,
374 .controller_data = &spi_mmc_chip_info,
375 .mode = SPI_MODE_3,
376 },
377 { 368 {
378 .modalias = "spi_mmc", 369 .modalias = "mmc_spi",
379 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ 370 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
380 .bus_num = 0, 371 .bus_num = 0,
381 .chip_select = CONFIG_SPI_MMC_CS_CHAN, 372 .chip_select = 5,
382 .platform_data = NULL, 373 .controller_data = &mmc_spi_chip_info,
383 .controller_data = &spi_mmc_chip_info,
384 .mode = SPI_MODE_3, 374 .mode = SPI_MODE_3,
385 }, 375 },
386#endif 376#endif
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 3f4f203a06ec..53ad10f3cd76 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -108,9 +108,9 @@ static struct bfin5xx_spi_chip ad9960_spi_chip_info = {
108}; 108};
109#endif 109#endif
110 110
111#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 111#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
112static struct bfin5xx_spi_chip spi_mmc_chip_info = { 112static struct bfin5xx_spi_chip mmc_spi_chip_info = {
113 .enable_dma = 1, 113 .enable_dma = 0,
114 .bits_per_word = 8, 114 .bits_per_word = 8,
115}; 115};
116#endif 116#endif
@@ -160,23 +160,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
160 }, 160 },
161#endif 161#endif
162 162
163#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 163#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
164 {
165 .modalias = "spi_mmc_dummy",
166 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
167 .bus_num = 0,
168 .chip_select = 7,
169 .platform_data = NULL,
170 .controller_data = &spi_mmc_chip_info,
171 .mode = SPI_MODE_3,
172 },
173 { 164 {
174 .modalias = "spi_mmc", 165 .modalias = "mmc_spi",
175 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ 166 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
176 .bus_num = 0, 167 .bus_num = 0,
177 .chip_select = CONFIG_SPI_MMC_CS_CHAN, 168 .chip_select = 5,
178 .platform_data = NULL, 169 .controller_data = &mmc_spi_chip_info,
179 .controller_data = &spi_mmc_chip_info,
180 .mode = SPI_MODE_3, 170 .mode = SPI_MODE_3,
181 }, 171 },
182#endif 172#endif
diff --git a/arch/blackfin/mach-bf537/include/mach/anomaly.h b/arch/blackfin/mach-bf537/include/mach/anomaly.h
index 9cb39121d1cb..1bfd80c26c90 100644
--- a/arch/blackfin/mach-bf537/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf537/include/mach/anomaly.h
@@ -2,7 +2,7 @@
2 * File: include/asm-blackfin/mach-bf537/anomaly.h 2 * File: include/asm-blackfin/mach-bf537/anomaly.h
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * Bugs: Enter bugs at http://blackfin.uclinux.org/
4 * 4 *
5 * Copyright (C) 2004-2008 Analog Devices Inc. 5 * Copyright (C) 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
@@ -110,7 +110,7 @@
110#define ANOMALY_05000301 (1) 110#define ANOMALY_05000301 (1)
111/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */ 111/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */
112#define ANOMALY_05000304 (__SILICON_REVISION__ < 3) 112#define ANOMALY_05000304 (__SILICON_REVISION__ < 3)
113/* New Feature: Additional Hysteresis on SPORT Input Pins (Not Available On Older Silicon) */ 113/* SPORT_HYS Bit in PLL_CTL Register Is Not Functional */
114#define ANOMALY_05000305 (__SILICON_REVISION__ < 3) 114#define ANOMALY_05000305 (__SILICON_REVISION__ < 3)
115/* SCKELOW Bit Does Not Maintain State Through Hibernate */ 115/* SCKELOW Bit Does Not Maintain State Through Hibernate */
116#define ANOMALY_05000307 (__SILICON_REVISION__ < 3) 116#define ANOMALY_05000307 (__SILICON_REVISION__ < 3)
@@ -168,9 +168,12 @@
168#define ANOMALY_05000323 (0) 168#define ANOMALY_05000323 (0)
169#define ANOMALY_05000353 (1) 169#define ANOMALY_05000353 (1)
170#define ANOMALY_05000363 (0) 170#define ANOMALY_05000363 (0)
171#define ANOMALY_05000380 (0)
171#define ANOMALY_05000386 (1) 172#define ANOMALY_05000386 (1)
172#define ANOMALY_05000412 (0) 173#define ANOMALY_05000412 (0)
173#define ANOMALY_05000432 (0) 174#define ANOMALY_05000432 (0)
174#define ANOMALY_05000435 (0) 175#define ANOMALY_05000435 (0)
176#define ANOMALY_05000447 (0)
177#define ANOMALY_05000448 (0)
175 178
176#endif 179#endif
diff --git a/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
index b3f87e1d16a2..9e34700844a2 100644
--- a/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
@@ -144,7 +144,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
144 CH_UART0_TX, 144 CH_UART0_TX,
145 CH_UART0_RX, 145 CH_UART0_RX,
146#endif 146#endif
147#ifdef CONFIG_BFIN_UART0_CTSRTS 147#ifdef CONFIG_SERIAL_BFIN_CTSRTS
148 CONFIG_UART0_CTS_PIN, 148 CONFIG_UART0_CTS_PIN,
149 CONFIG_UART0_RTS_PIN, 149 CONFIG_UART0_RTS_PIN,
150#endif 150#endif
@@ -158,7 +158,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
158 CH_UART1_TX, 158 CH_UART1_TX,
159 CH_UART1_RX, 159 CH_UART1_RX,
160#endif 160#endif
161#ifdef CONFIG_BFIN_UART1_CTSRTS 161#ifdef CONFIG_SERIAL_BFIN_CTSRTS
162 CONFIG_UART1_CTS_PIN, 162 CONFIG_UART1_CTS_PIN,
163 CONFIG_UART1_RTS_PIN, 163 CONFIG_UART1_RTS_PIN,
164#endif 164#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/anomaly.h b/arch/blackfin/mach-bf538/include/mach/anomaly.h
index e130b4f8a05d..3a5699827363 100644
--- a/arch/blackfin/mach-bf538/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf538/include/mach/anomaly.h
@@ -2,7 +2,7 @@
2 * File: include/asm-blackfin/mach-bf538/anomaly.h 2 * File: include/asm-blackfin/mach-bf538/anomaly.h
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * Bugs: Enter bugs at http://blackfin.uclinux.org/
4 * 4 *
5 * Copyright (C) 2004-2008 Analog Devices Inc. 5 * Copyright (C) 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
@@ -120,13 +120,17 @@
120#define ANOMALY_05000198 (0) 120#define ANOMALY_05000198 (0)
121#define ANOMALY_05000230 (0) 121#define ANOMALY_05000230 (0)
122#define ANOMALY_05000263 (0) 122#define ANOMALY_05000263 (0)
123#define ANOMALY_05000305 (0)
123#define ANOMALY_05000311 (0) 124#define ANOMALY_05000311 (0)
124#define ANOMALY_05000323 (0) 125#define ANOMALY_05000323 (0)
125#define ANOMALY_05000353 (1) 126#define ANOMALY_05000353 (1)
126#define ANOMALY_05000363 (0) 127#define ANOMALY_05000363 (0)
128#define ANOMALY_05000380 (0)
127#define ANOMALY_05000386 (1) 129#define ANOMALY_05000386 (1)
128#define ANOMALY_05000412 (0) 130#define ANOMALY_05000412 (0)
129#define ANOMALY_05000432 (0) 131#define ANOMALY_05000432 (0)
130#define ANOMALY_05000435 (0) 132#define ANOMALY_05000435 (0)
133#define ANOMALY_05000447 (0)
134#define ANOMALY_05000448 (0)
131 135
132#endif 136#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
index 40503b6b89a3..3c2811ebecdd 100644
--- a/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
@@ -144,7 +144,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
144 CH_UART0_TX, 144 CH_UART0_TX,
145 CH_UART0_RX, 145 CH_UART0_RX,
146#endif 146#endif
147#ifdef CONFIG_BFIN_UART0_CTSRTS 147#ifdef CONFIG_SERIAL_BFIN_CTSRTS
148 CONFIG_UART0_CTS_PIN, 148 CONFIG_UART0_CTS_PIN,
149 CONFIG_UART0_RTS_PIN, 149 CONFIG_UART0_RTS_PIN,
150#endif 150#endif
@@ -158,7 +158,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
158 CH_UART1_TX, 158 CH_UART1_TX,
159 CH_UART1_RX, 159 CH_UART1_RX,
160#endif 160#endif
161#ifdef CONFIG_BFIN_UART1_CTSRTS 161#ifdef CONFIG_SERIAL_BFIN_CTSRTS
162 CONFIG_UART1_CTS_PIN, 162 CONFIG_UART1_CTS_PIN,
163 CONFIG_UART1_RTS_PIN, 163 CONFIG_UART1_RTS_PIN,
164#endif 164#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/anomaly.h b/arch/blackfin/mach-bf548/include/mach/anomaly.h
index 23d03c52f4b4..882e40ccf0d1 100644
--- a/arch/blackfin/mach-bf548/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf548/include/mach/anomaly.h
@@ -2,12 +2,12 @@
2 * File: include/asm-blackfin/mach-bf548/anomaly.h 2 * File: include/asm-blackfin/mach-bf548/anomaly.h
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * Bugs: Enter bugs at http://blackfin.uclinux.org/
4 * 4 *
5 * Copyright (C) 2004-2008 Analog Devices Inc. 5 * Copyright (C) 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
9/* This file shoule be up to date with: 9/* This file shoule be up to date with:
10 * - Revision G, 08/07/2008; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List 10 * - Revision H, 01/16/2009; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List
11 */ 11 */
12 12
13#ifndef _MACH_ANOMALY_H_ 13#ifndef _MACH_ANOMALY_H_
@@ -91,8 +91,6 @@
91#define ANOMALY_05000371 (__SILICON_REVISION__ < 2) 91#define ANOMALY_05000371 (__SILICON_REVISION__ < 2)
92/* USB DP/DM Data Pins May Lose State When Entering Hibernate */ 92/* USB DP/DM Data Pins May Lose State When Entering Hibernate */
93#define ANOMALY_05000372 (__SILICON_REVISION__ < 1) 93#define ANOMALY_05000372 (__SILICON_REVISION__ < 1)
94/* Mobile DDR Operation Not Functional */
95#define ANOMALY_05000377 (1)
96/* Security/Authentication Speedpath Causes Authentication To Fail To Initiate */ 94/* Security/Authentication Speedpath Causes Authentication To Fail To Initiate */
97#define ANOMALY_05000378 (__SILICON_REVISION__ < 2) 95#define ANOMALY_05000378 (__SILICON_REVISION__ < 2)
98/* 16-Bit NAND FLASH Boot Mode Is Not Functional */ 96/* 16-Bit NAND FLASH Boot Mode Is Not Functional */
@@ -157,8 +155,22 @@
157#define ANOMALY_05000429 (__SILICON_REVISION__ < 2) 155#define ANOMALY_05000429 (__SILICON_REVISION__ < 2)
158/* Software System Reset Corrupts PLL_LOCKCNT Register */ 156/* Software System Reset Corrupts PLL_LOCKCNT Register */
159#define ANOMALY_05000430 (__SILICON_REVISION__ >= 2) 157#define ANOMALY_05000430 (__SILICON_REVISION__ >= 2)
158/* Incorrect Use of Stack in Lockbox Firmware During Authentication */
159#define ANOMALY_05000431 (__SILICON_REVISION__ < 3)
160/* OTP Write Accesses Not Supported */
161#define ANOMALY_05000442 (__SILICON_REVISION__ < 1)
160/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */ 162/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
161#define ANOMALY_05000443 (1) 163#define ANOMALY_05000443 (1)
164/* CDMAPRIO and L2DMAPRIO Bits in the SYSCR Register Are Not Functional */
165#define ANOMALY_05000446 (1)
166/* UART IrDA Receiver Fails on Extended Bit Pulses */
167#define ANOMALY_05000447 (1)
168/* DDR Clock Duty Cycle Spec Violation (tCH, tCL) */
169#define ANOMALY_05000448 (__SILICON_REVISION__ == 1)
170/* Reduced Timing Margins on DDR Output Setup and Hold (tDS and tDH) */
171#define ANOMALY_05000449 (__SILICON_REVISION__ == 1)
172/* USB DMA Mode 1 Short Packet Data Corruption */
173#define ANOMALY_05000450 (1
162 174
163/* Anomalies that don't exist on this proc */ 175/* Anomalies that don't exist on this proc */
164#define ANOMALY_05000125 (0) 176#define ANOMALY_05000125 (0)
@@ -171,6 +183,8 @@
171#define ANOMALY_05000263 (0) 183#define ANOMALY_05000263 (0)
172#define ANOMALY_05000266 (0) 184#define ANOMALY_05000266 (0)
173#define ANOMALY_05000273 (0) 185#define ANOMALY_05000273 (0)
186#define ANOMALY_05000278 (0)
187#define ANOMALY_05000305 (0)
174#define ANOMALY_05000307 (0) 188#define ANOMALY_05000307 (0)
175#define ANOMALY_05000311 (0) 189#define ANOMALY_05000311 (0)
176#define ANOMALY_05000323 (0) 190#define ANOMALY_05000323 (0)
diff --git a/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
index e4cf35e7ab9f..c05e79cba257 100644
--- a/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
@@ -63,7 +63,7 @@
63#define UART_ENABLE_INTS(x, v) UART_SET_IER(x, v) 63#define UART_ENABLE_INTS(x, v) UART_SET_IER(x, v)
64#define UART_DISABLE_INTS(x) UART_CLEAR_IER(x, 0xF) 64#define UART_DISABLE_INTS(x) UART_CLEAR_IER(x, 0xF)
65 65
66#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) 66#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART2_CTSRTS)
67# define CONFIG_SERIAL_BFIN_CTSRTS 67# define CONFIG_SERIAL_BFIN_CTSRTS
68 68
69# ifndef CONFIG_UART0_CTS_PIN 69# ifndef CONFIG_UART0_CTS_PIN
@@ -74,12 +74,12 @@
74# define CONFIG_UART0_RTS_PIN -1 74# define CONFIG_UART0_RTS_PIN -1
75# endif 75# endif
76 76
77# ifndef CONFIG_UART1_CTS_PIN 77# ifndef CONFIG_UART2_CTS_PIN
78# define CONFIG_UART1_CTS_PIN -1 78# define CONFIG_UART2_CTS_PIN -1
79# endif 79# endif
80 80
81# ifndef CONFIG_UART1_RTS_PIN 81# ifndef CONFIG_UART2_RTS_PIN
82# define CONFIG_UART1_RTS_PIN -1 82# define CONFIG_UART2_RTS_PIN -1
83# endif 83# endif
84#endif 84#endif
85 85
@@ -130,7 +130,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
130 CH_UART0_TX, 130 CH_UART0_TX,
131 CH_UART0_RX, 131 CH_UART0_RX,
132#endif 132#endif
133#ifdef CONFIG_BFIN_UART0_CTSRTS 133#ifdef CONFIG_SERIAL_BFIN_CTSRTS
134 CONFIG_UART0_CTS_PIN, 134 CONFIG_UART0_CTS_PIN,
135 CONFIG_UART0_RTS_PIN, 135 CONFIG_UART0_RTS_PIN,
136#endif 136#endif
@@ -144,6 +144,10 @@ struct bfin_serial_res bfin_serial_resource[] = {
144 CH_UART1_TX, 144 CH_UART1_TX,
145 CH_UART1_RX, 145 CH_UART1_RX,
146#endif 146#endif
147#ifdef CONFIG_SERIAL_BFIN_CTSRTS
148 0,
149 0,
150#endif
147 }, 151 },
148#endif 152#endif
149#ifdef CONFIG_SERIAL_BFIN_UART2 153#ifdef CONFIG_SERIAL_BFIN_UART2
@@ -154,7 +158,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
154 CH_UART2_TX, 158 CH_UART2_TX,
155 CH_UART2_RX, 159 CH_UART2_RX,
156#endif 160#endif
157#ifdef CONFIG_BFIN_UART2_CTSRTS 161#ifdef CONFIG_SERIAL_BFIN_CTSRTS
158 CONFIG_UART2_CTS_PIN, 162 CONFIG_UART2_CTS_PIN,
159 CONFIG_UART2_RTS_PIN, 163 CONFIG_UART2_RTS_PIN,
160#endif 164#endif
@@ -168,6 +172,10 @@ struct bfin_serial_res bfin_serial_resource[] = {
168 CH_UART3_TX, 172 CH_UART3_TX,
169 CH_UART3_RX, 173 CH_UART3_RX,
170#endif 174#endif
175#ifdef CONFIG_SERIAL_BFIN_CTSRTS
176 0,
177 0,
178#endif
171 }, 179 },
172#endif 180#endif
173}; 181};
diff --git a/arch/blackfin/mach-bf548/include/mach/irq.h b/arch/blackfin/mach-bf548/include/mach/irq.h
index 60299a71e090..f194625f6821 100644
--- a/arch/blackfin/mach-bf548/include/mach/irq.h
+++ b/arch/blackfin/mach-bf548/include/mach/irq.h
@@ -123,8 +123,8 @@ Events (highest priority) EMU 0
123#define IRQ_MXVR_ERROR BFIN_IRQ(51) /* MXVR Status (Error) Interrupt */ 123#define IRQ_MXVR_ERROR BFIN_IRQ(51) /* MXVR Status (Error) Interrupt */
124#define IRQ_MXVR_MSG BFIN_IRQ(52) /* MXVR Message Interrupt */ 124#define IRQ_MXVR_MSG BFIN_IRQ(52) /* MXVR Message Interrupt */
125#define IRQ_MXVR_PKT BFIN_IRQ(53) /* MXVR Packet Interrupt */ 125#define IRQ_MXVR_PKT BFIN_IRQ(53) /* MXVR Packet Interrupt */
126#define IRQ_EPP1_ERROR BFIN_IRQ(54) /* EPPI1 Error Interrupt */ 126#define IRQ_EPPI1_ERROR BFIN_IRQ(54) /* EPPI1 Error Interrupt */
127#define IRQ_EPP2_ERROR BFIN_IRQ(55) /* EPPI2 Error Interrupt */ 127#define IRQ_EPPI2_ERROR BFIN_IRQ(55) /* EPPI2 Error Interrupt */
128#define IRQ_UART3_ERROR BFIN_IRQ(56) /* UART3 Status (Error) Interrupt */ 128#define IRQ_UART3_ERROR BFIN_IRQ(56) /* UART3 Status (Error) Interrupt */
129#define IRQ_HOST_ERROR BFIN_IRQ(57) /* HOST Status (Error) Interrupt */ 129#define IRQ_HOST_ERROR BFIN_IRQ(57) /* HOST Status (Error) Interrupt */
130#define IRQ_PIXC_ERROR BFIN_IRQ(59) /* PIXC Status (Error) Interrupt */ 130#define IRQ_PIXC_ERROR BFIN_IRQ(59) /* PIXC Status (Error) Interrupt */
@@ -361,8 +361,8 @@ Events (highest priority) EMU 0
361#define IRQ_UART2_ERR IRQ_UART2_ERROR 361#define IRQ_UART2_ERR IRQ_UART2_ERROR
362#define IRQ_CAN0_ERR IRQ_CAN0_ERROR 362#define IRQ_CAN0_ERR IRQ_CAN0_ERROR
363#define IRQ_MXVR_ERR IRQ_MXVR_ERROR 363#define IRQ_MXVR_ERR IRQ_MXVR_ERROR
364#define IRQ_EPP1_ERR IRQ_EPP1_ERROR 364#define IRQ_EPPI1_ERR IRQ_EPPI1_ERROR
365#define IRQ_EPP2_ERR IRQ_EPP2_ERROR 365#define IRQ_EPPI2_ERR IRQ_EPPI2_ERROR
366#define IRQ_UART3_ERR IRQ_UART3_ERROR 366#define IRQ_UART3_ERR IRQ_UART3_ERROR
367#define IRQ_HOST_ERR IRQ_HOST_ERROR 367#define IRQ_HOST_ERR IRQ_HOST_ERROR
368#define IRQ_PIXC_ERR IRQ_PIXC_ERROR 368#define IRQ_PIXC_ERR IRQ_PIXC_ERROR
diff --git a/arch/blackfin/mach-bf561/boards/Kconfig b/arch/blackfin/mach-bf561/boards/Kconfig
index e41a67b1fb53..e4bc6d7c5a6a 100644
--- a/arch/blackfin/mach-bf561/boards/Kconfig
+++ b/arch/blackfin/mach-bf561/boards/Kconfig
@@ -19,9 +19,4 @@ config BFIN561_BLUETECHNIX_CM
19 help 19 help
20 CM-BF561 support for EVAL- and DEV-Board. 20 CM-BF561 support for EVAL- and DEV-Board.
21 21
22config GENERIC_BF561_BOARD
23 bool "Generic"
24 help
25 Generic or Custom board support.
26
27endchoice 22endchoice
diff --git a/arch/blackfin/mach-bf561/boards/Makefile b/arch/blackfin/mach-bf561/boards/Makefile
index 04add010b568..3a152559e957 100644
--- a/arch/blackfin/mach-bf561/boards/Makefile
+++ b/arch/blackfin/mach-bf561/boards/Makefile
@@ -2,7 +2,6 @@
2# arch/blackfin/mach-bf561/boards/Makefile 2# arch/blackfin/mach-bf561/boards/Makefile
3# 3#
4 4
5obj-$(CONFIG_GENERIC_BF561_BOARD) += generic_board.o
6obj-$(CONFIG_BFIN561_BLUETECHNIX_CM) += cm_bf561.o 5obj-$(CONFIG_BFIN561_BLUETECHNIX_CM) += cm_bf561.o
7obj-$(CONFIG_BFIN561_EZKIT) += ezkit.o 6obj-$(CONFIG_BFIN561_EZKIT) += ezkit.o
8obj-$(CONFIG_BFIN561_TEPLA) += tepla.o 7obj-$(CONFIG_BFIN561_TEPLA) += tepla.o
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 6880d1ebfe60..f623c6b0719f 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -105,9 +105,9 @@ static struct bfin5xx_spi_chip ad9960_spi_chip_info = {
105}; 105};
106#endif 106#endif
107 107
108#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 108#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
109static struct bfin5xx_spi_chip spi_mmc_chip_info = { 109static struct bfin5xx_spi_chip mmc_spi_chip_info = {
110 .enable_dma = 1, 110 .enable_dma = 0,
111 .bits_per_word = 8, 111 .bits_per_word = 8,
112}; 112};
113#endif 113#endif
@@ -155,14 +155,13 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
155 .controller_data = &ad9960_spi_chip_info, 155 .controller_data = &ad9960_spi_chip_info,
156 }, 156 },
157#endif 157#endif
158#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE) 158#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
159 { 159 {
160 .modalias = "spi_mmc", 160 .modalias = "mmc_spi",
161 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ 161 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
162 .bus_num = 0, 162 .bus_num = 0,
163 .chip_select = CONFIG_SPI_MMC_CS_CHAN, 163 .chip_select = 5,
164 .platform_data = NULL, 164 .controller_data = &mmc_spi_chip_info,
165 .controller_data = &spi_mmc_chip_info,
166 .mode = SPI_MODE_3, 165 .mode = SPI_MODE_3,
167 }, 166 },
168#endif 167#endif
diff --git a/arch/blackfin/mach-bf561/boards/generic_board.c b/arch/blackfin/mach-bf561/boards/generic_board.c
deleted file mode 100644
index 0ba366a0e696..000000000000
--- a/arch/blackfin/mach-bf561/boards/generic_board.c
+++ /dev/null
@@ -1,113 +0,0 @@
1/*
2 * File: arch/blackfin/mach-bf561/generic_board.c
3 * Based on: arch/blackfin/mach-bf533/ezkit.c
4 * Author: Aidan Williams <aidan@nicta.com.au>
5 *
6 * Created:
7 * Description:
8 *
9 * Modified:
10 * Copyright 2005 National ICT Australia (NICTA)
11 * Copyright 2004-2006 Analog Devices Inc.
12 *
13 * Bugs: Enter bugs at http://blackfin.uclinux.org/
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see the file COPYING, or write
27 * to the Free Software Foundation, Inc.,
28 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 */
30
31#include <linux/device.h>
32#include <linux/platform_device.h>
33#include <linux/irq.h>
34
35const char bfin_board_name[] = "UNKNOWN BOARD";
36
37/*
38 * Driver needs to know address, irq and flag pin.
39 */
40#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
41static struct resource smc91x_resources[] = {
42 {
43 .start = 0x2C010300,
44 .end = 0x2C010300 + 16,
45 .flags = IORESOURCE_MEM,
46 }, {
47 .start = IRQ_PROG_INTB,
48 .end = IRQ_PROG_INTB,
49 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
50 }, {
51 .start = IRQ_PF9,
52 .end = IRQ_PF9,
53 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
54 },
55};
56
57static struct platform_device smc91x_device = {
58 .name = "smc91x",
59 .id = 0,
60 .num_resources = ARRAY_SIZE(smc91x_resources),
61 .resource = smc91x_resources,
62};
63#endif
64
65#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
66#ifdef CONFIG_BFIN_SIR0
67static struct resource bfin_sir0_resources[] = {
68 {
69 .start = 0xFFC00400,
70 .end = 0xFFC004FF,
71 .flags = IORESOURCE_MEM,
72 },
73 {
74 .start = IRQ_UART0_RX,
75 .end = IRQ_UART0_RX+1,
76 .flags = IORESOURCE_IRQ,
77 },
78 {
79 .start = CH_UART0_RX,
80 .end = CH_UART0_RX+1,
81 .flags = IORESOURCE_DMA,
82 },
83};
84
85static struct platform_device bfin_sir0_device = {
86 .name = "bfin_sir",
87 .id = 0,
88 .num_resources = ARRAY_SIZE(bfin_sir0_resources),
89 .resource = bfin_sir0_resources,
90};
91#endif
92#endif
93
94static struct platform_device *generic_board_devices[] __initdata = {
95#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
96 &smc91x_device,
97#endif
98
99#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
100#ifdef CONFIG_BFIN_SIR0
101 &bfin_sir0_device,
102#endif
103#endif
104};
105
106static int __init generic_board_init(void)
107{
108 printk(KERN_INFO "%s(): registering device resources\n", __func__);
109 return platform_add_devices(generic_board_devices,
110 ARRAY_SIZE(generic_board_devices));
111}
112
113arch_initcall(generic_board_init);
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index 1a9e17562821..d0b0b3506440 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -2,7 +2,7 @@
2 * File: include/asm-blackfin/mach-bf561/anomaly.h 2 * File: include/asm-blackfin/mach-bf561/anomaly.h
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * Bugs: Enter bugs at http://blackfin.uclinux.org/
4 * 4 *
5 * Copyright (C) 2004-2008 Analog Devices Inc. 5 * Copyright (C) 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
@@ -224,7 +224,7 @@
224#define ANOMALY_05000301 (1) 224#define ANOMALY_05000301 (1)
225/* SSYNCs After Writes To DMA MMR Registers May Not Be Handled Correctly */ 225/* SSYNCs After Writes To DMA MMR Registers May Not Be Handled Correctly */
226#define ANOMALY_05000302 (1) 226#define ANOMALY_05000302 (1)
227/* New Feature: Additional Hysteresis on SPORT Input Pins (Not Available On Older Silicon) */ 227/* SPORT_HYS Bit in PLL_CTL Register Is Not Functional */
228#define ANOMALY_05000305 (__SILICON_REVISION__ < 5) 228#define ANOMALY_05000305 (__SILICON_REVISION__ < 5)
229/* SCKELOW Bit Does Not Maintain State Through Hibernate */ 229/* SCKELOW Bit Does Not Maintain State Through Hibernate */
230#define ANOMALY_05000307 (__SILICON_REVISION__ < 5) 230#define ANOMALY_05000307 (__SILICON_REVISION__ < 5)
@@ -283,8 +283,11 @@
283#define ANOMALY_05000273 (0) 283#define ANOMALY_05000273 (0)
284#define ANOMALY_05000311 (0) 284#define ANOMALY_05000311 (0)
285#define ANOMALY_05000353 (1) 285#define ANOMALY_05000353 (1)
286#define ANOMALY_05000380 (0)
286#define ANOMALY_05000386 (1) 287#define ANOMALY_05000386 (1)
287#define ANOMALY_05000432 (0) 288#define ANOMALY_05000432 (0)
288#define ANOMALY_05000435 (0) 289#define ANOMALY_05000435 (0)
290#define ANOMALY_05000447 (0)
291#define ANOMALY_05000448 (0)
289 292
290#endif 293#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
index 043bfcf26c52..ca8c5f645209 100644
--- a/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
@@ -134,7 +134,7 @@ struct bfin_serial_res bfin_serial_resource[] = {
134 CH_UART_TX, 134 CH_UART_TX,
135 CH_UART_RX, 135 CH_UART_RX,
136#endif 136#endif
137#ifdef CONFIG_BFIN_UART0_CTSRTS 137#ifdef CONFIG_SERIAL_BFIN_CTSRTS
138 CONFIG_UART0_CTS_PIN, 138 CONFIG_UART0_CTS_PIN,
139 CONFIG_UART0_RTS_PIN, 139 CONFIG_UART0_RTS_PIN,
140#endif 140#endif
diff --git a/arch/blackfin/mach-common/arch_checks.c b/arch/blackfin/mach-common/arch_checks.c
index 98133b968f7b..80d39b2f9db2 100644
--- a/arch/blackfin/mach-common/arch_checks.c
+++ b/arch/blackfin/mach-common/arch_checks.c
@@ -62,3 +62,12 @@
62#if (CONFIG_BOOT_LOAD & 0x3) 62#if (CONFIG_BOOT_LOAD & 0x3)
63# error "The kernel load address must be 4 byte aligned" 63# error "The kernel load address must be 4 byte aligned"
64#endif 64#endif
65
66/* The entire kernel must be able to make a 24bit pcrel call to start of L1 */
67#if ((0xffffffff - L1_CODE_START + 1) + CONFIG_BOOT_LOAD) > 0x1000000
68# error "The kernel load address is too high; keep it below 10meg for safety"
69#endif
70
71#if ANOMALY_05000448
72# error You are using a part with anomaly 05000448, this issue causes random memory read/write failures - that means random crashes.
73#endif
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S
index 3c98dacbf289..aa0648c6a9fe 100644
--- a/arch/blackfin/mach-common/cache.S
+++ b/arch/blackfin/mach-common/cache.S
@@ -66,11 +66,33 @@
66 66
67/* Invalidate all instruction cache lines assocoiated with this memory area */ 67/* Invalidate all instruction cache lines assocoiated with this memory area */
68ENTRY(_blackfin_icache_flush_range) 68ENTRY(_blackfin_icache_flush_range)
69/*
70 * Walkaround to avoid loading wrong instruction after invalidating icache
71 * and following sequence is met.
72 *
73 * 1) One instruction address is cached in the instruction cache.
74 * 2) This instruction in SDRAM is changed.
75 * 3) IFLASH[P0] is executed only once in blackfin_icache_flush_range().
76 * 4) This instruction is executed again, but the old one is loaded.
77 */
78 P0 = R0;
79 IFLUSH[P0];
69 do_flush IFLUSH, , nop 80 do_flush IFLUSH, , nop
70ENDPROC(_blackfin_icache_flush_range) 81ENDPROC(_blackfin_icache_flush_range)
71 82
72/* Flush all cache lines assocoiated with this area of memory. */ 83/* Flush all cache lines assocoiated with this area of memory. */
73ENTRY(_blackfin_icache_dcache_flush_range) 84ENTRY(_blackfin_icache_dcache_flush_range)
85/*
86 * Walkaround to avoid loading wrong instruction after invalidating icache
87 * and following sequence is met.
88 *
89 * 1) One instruction address is cached in the instruction cache.
90 * 2) This instruction in SDRAM is changed.
91 * 3) IFLASH[P0] is executed only once in blackfin_icache_flush_range().
92 * 4) This instruction is executed again, but the old one is loaded.
93 */
94 P0 = R0;
95 IFLUSH[P0];
74 do_flush FLUSH, IFLUSH 96 do_flush FLUSH, IFLUSH
75ENDPROC(_blackfin_icache_dcache_flush_range) 97ENDPROC(_blackfin_icache_dcache_flush_range)
76 98
diff --git a/arch/blackfin/mach-common/clocks-init.c b/arch/blackfin/mach-common/clocks-init.c
index 9dddb6f8cc85..35393651359b 100644
--- a/arch/blackfin/mach-common/clocks-init.c
+++ b/arch/blackfin/mach-common/clocks-init.c
@@ -17,7 +17,7 @@
17#define SDGCTL_WIDTH (1 << 31) /* SDRAM external data path width */ 17#define SDGCTL_WIDTH (1 << 31) /* SDRAM external data path width */
18#define PLL_CTL_VAL \ 18#define PLL_CTL_VAL \
19 (((CONFIG_VCO_MULT & 63) << 9) | CLKIN_HALF | \ 19 (((CONFIG_VCO_MULT & 63) << 9) | CLKIN_HALF | \
20 (PLL_BYPASS << 8) | (ANOMALY_05000265 ? 0x8000 : 0)) 20 (PLL_BYPASS << 8) | (ANOMALY_05000305 ? 0 : 0x8000))
21 21
22__attribute__((l1_text)) 22__attribute__((l1_text))
23static void do_sync(void) 23static void do_sync(void)
diff --git a/arch/blackfin/mach-common/dpmc_modes.S b/arch/blackfin/mach-common/dpmc_modes.S
index 4da50bcd9300..8009a512fb11 100644
--- a/arch/blackfin/mach-common/dpmc_modes.S
+++ b/arch/blackfin/mach-common/dpmc_modes.S
@@ -376,10 +376,22 @@ ENTRY(_do_hibernate)
376#endif 376#endif
377 377
378#ifdef PINT0_ASSIGN 378#ifdef PINT0_ASSIGN
379 PM_SYS_PUSH(PINT0_MASK_SET)
380 PM_SYS_PUSH(PINT1_MASK_SET)
381 PM_SYS_PUSH(PINT2_MASK_SET)
382 PM_SYS_PUSH(PINT3_MASK_SET)
379 PM_SYS_PUSH(PINT0_ASSIGN) 383 PM_SYS_PUSH(PINT0_ASSIGN)
380 PM_SYS_PUSH(PINT1_ASSIGN) 384 PM_SYS_PUSH(PINT1_ASSIGN)
381 PM_SYS_PUSH(PINT2_ASSIGN) 385 PM_SYS_PUSH(PINT2_ASSIGN)
382 PM_SYS_PUSH(PINT3_ASSIGN) 386 PM_SYS_PUSH(PINT3_ASSIGN)
387 PM_SYS_PUSH(PINT0_INVERT_SET)
388 PM_SYS_PUSH(PINT1_INVERT_SET)
389 PM_SYS_PUSH(PINT2_INVERT_SET)
390 PM_SYS_PUSH(PINT3_INVERT_SET)
391 PM_SYS_PUSH(PINT0_EDGE_SET)
392 PM_SYS_PUSH(PINT1_EDGE_SET)
393 PM_SYS_PUSH(PINT2_EDGE_SET)
394 PM_SYS_PUSH(PINT3_EDGE_SET)
383#endif 395#endif
384 396
385 PM_SYS_PUSH(EBIU_AMBCTL0) 397 PM_SYS_PUSH(EBIU_AMBCTL0)
@@ -714,10 +726,22 @@ ENTRY(_do_hibernate)
714 PM_SYS_POP(EBIU_AMBCTL0) 726 PM_SYS_POP(EBIU_AMBCTL0)
715 727
716#ifdef PINT0_ASSIGN 728#ifdef PINT0_ASSIGN
729 PM_SYS_POP(PINT3_EDGE_SET)
730 PM_SYS_POP(PINT2_EDGE_SET)
731 PM_SYS_POP(PINT1_EDGE_SET)
732 PM_SYS_POP(PINT0_EDGE_SET)
733 PM_SYS_POP(PINT3_INVERT_SET)
734 PM_SYS_POP(PINT2_INVERT_SET)
735 PM_SYS_POP(PINT1_INVERT_SET)
736 PM_SYS_POP(PINT0_INVERT_SET)
717 PM_SYS_POP(PINT3_ASSIGN) 737 PM_SYS_POP(PINT3_ASSIGN)
718 PM_SYS_POP(PINT2_ASSIGN) 738 PM_SYS_POP(PINT2_ASSIGN)
719 PM_SYS_POP(PINT1_ASSIGN) 739 PM_SYS_POP(PINT1_ASSIGN)
720 PM_SYS_POP(PINT0_ASSIGN) 740 PM_SYS_POP(PINT0_ASSIGN)
741 PM_SYS_POP(PINT3_MASK_SET)
742 PM_SYS_POP(PINT2_MASK_SET)
743 PM_SYS_POP(PINT1_MASK_SET)
744 PM_SYS_POP(PINT0_MASK_SET)
721#endif 745#endif
722 746
723#ifdef SICA_IWR1 747#ifdef SICA_IWR1
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 88de053bbe8e..21e65a339a22 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -600,6 +600,19 @@ ENTRY(_system_call)
600 p2 = [p2]; 600 p2 = [p2];
601 601
602 [p2+(TASK_THREAD+THREAD_KSP)] = sp; 602 [p2+(TASK_THREAD+THREAD_KSP)] = sp;
603#ifdef CONFIG_IPIPE
604 r0 = sp;
605 SP += -12;
606 call ___ipipe_syscall_root;
607 SP += 12;
608 cc = r0 == 1;
609 if cc jump .Lsyscall_really_exit;
610 cc = r0 == -1;
611 if cc jump .Lresume_userspace;
612 r3 = [sp + PT_R3];
613 r4 = [sp + PT_R4];
614 p0 = [sp + PT_ORIG_P0];
615#endif /* CONFIG_IPIPE */
603 616
604 /* Check the System Call */ 617 /* Check the System Call */
605 r7 = __NR_syscall; 618 r7 = __NR_syscall;
@@ -654,6 +667,17 @@ ENTRY(_system_call)
654 r7 = r7 & r4; 667 r7 = r7 & r4;
655 668
656.Lsyscall_resched: 669.Lsyscall_resched:
670#ifdef CONFIG_IPIPE
671 cc = BITTST(r7, TIF_IRQ_SYNC);
672 if !cc jump .Lsyscall_no_irqsync;
673 [--sp] = reti;
674 r0 = [sp++];
675 SP += -12;
676 call ___ipipe_sync_root;
677 SP += 12;
678 jump .Lresume_userspace_1;
679.Lsyscall_no_irqsync:
680#endif
657 cc = BITTST(r7, TIF_NEED_RESCHED); 681 cc = BITTST(r7, TIF_NEED_RESCHED);
658 if !cc jump .Lsyscall_sigpending; 682 if !cc jump .Lsyscall_sigpending;
659 683
@@ -685,6 +709,10 @@ ENTRY(_system_call)
685.Lsyscall_really_exit: 709.Lsyscall_really_exit:
686 r5 = [sp + PT_RESERVED]; 710 r5 = [sp + PT_RESERVED];
687 rets = r5; 711 rets = r5;
712#ifdef CONFIG_IPIPE
713 [--sp] = reti;
714 r5 = [sp++];
715#endif /* CONFIG_IPIPE */
688 rts; 716 rts;
689ENDPROC(_system_call) 717ENDPROC(_system_call)
690 718
@@ -771,6 +799,15 @@ _new_old_task:
771ENDPROC(_resume) 799ENDPROC(_resume)
772 800
773ENTRY(_ret_from_exception) 801ENTRY(_ret_from_exception)
802#ifdef CONFIG_IPIPE
803 [--sp] = rets;
804 SP += -12;
805 call ___ipipe_check_root
806 SP += 12
807 rets = [sp++];
808 cc = r0 == 0;
809 if cc jump 4f; /* not on behalf of Linux, get out */
810#endif /* CONFIG_IPIPE */
774 p2.l = lo(IPEND); 811 p2.l = lo(IPEND);
775 p2.h = hi(IPEND); 812 p2.h = hi(IPEND);
776 813
@@ -827,6 +864,28 @@ ENTRY(_ret_from_exception)
827 rts; 864 rts;
828ENDPROC(_ret_from_exception) 865ENDPROC(_ret_from_exception)
829 866
867#ifdef CONFIG_IPIPE
868
869_sync_root_irqs:
870 [--sp] = reti; /* Reenable interrupts */
871 r0 = [sp++];
872 jump.l ___ipipe_sync_root
873
874_resume_kernel_from_int:
875 r0.l = _sync_root_irqs
876 r0.h = _sync_root_irqs
877 [--sp] = rets;
878 [--sp] = ( r7:4, p5:3 );
879 SP += -12;
880 call ___ipipe_call_irqtail
881 SP += 12;
882 ( r7:4, p5:3 ) = [sp++];
883 rets = [sp++];
884 rts
885#else
886#define _resume_kernel_from_int 2f
887#endif
888
830ENTRY(_return_from_int) 889ENTRY(_return_from_int)
831 /* If someone else already raised IRQ 15, do nothing. */ 890 /* If someone else already raised IRQ 15, do nothing. */
832 csync; 891 csync;
@@ -848,7 +907,7 @@ ENTRY(_return_from_int)
848 r1 = r0 - r1; 907 r1 = r0 - r1;
849 r2 = r0 & r1; 908 r2 = r0 & r1;
850 cc = r2 == 0; 909 cc = r2 == 0;
851 if !cc jump 2f; 910 if !cc jump _resume_kernel_from_int;
852 911
853 /* Lower the interrupt level to 15. */ 912 /* Lower the interrupt level to 15. */
854 p0.l = lo(EVT15); 913 p0.l = lo(EVT15);
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S
index 43c4eb9acb65..0069c2dd4625 100644
--- a/arch/blackfin/mach-common/interrupt.S
+++ b/arch/blackfin/mach-common/interrupt.S
@@ -235,6 +235,7 @@ ENDPROC(_evt_system_call)
235 235
236#ifdef CONFIG_IPIPE 236#ifdef CONFIG_IPIPE
237ENTRY(___ipipe_call_irqtail) 237ENTRY(___ipipe_call_irqtail)
238 p0 = r0;
238 r0.l = 1f; 239 r0.l = 1f;
239 r0.h = 1f; 240 r0.h = 1f;
240 reti = r0; 241 reti = r0;
@@ -242,9 +243,6 @@ ENTRY(___ipipe_call_irqtail)
2421: 2431:
243 [--sp] = rets; 244 [--sp] = rets;
244 [--sp] = ( r7:4, p5:3 ); 245 [--sp] = ( r7:4, p5:3 );
245 p0.l = ___ipipe_irq_tail_hook;
246 p0.h = ___ipipe_irq_tail_hook;
247 p0 = [p0];
248 sp += -12; 246 sp += -12;
249 call (p0); 247 call (p0);
250 sp += 12; 248 sp += 12;
@@ -259,7 +257,7 @@ ENTRY(___ipipe_call_irqtail)
259 p0.h = hi(EVT14); 257 p0.h = hi(EVT14);
260 [p0] = r0; 258 [p0] = r0;
261 csync; 259 csync;
262 r0 = 0x401f; 260 r0 = 0x401f (z);
263 sti r0; 261 sti r0;
264 raise 14; 262 raise 14;
265 [--sp] = reti; /* IRQs on. */ 263 [--sp] = reti; /* IRQs on. */
@@ -277,11 +275,7 @@ ENTRY(___ipipe_call_irqtail)
277 p0.h = _bfin_irq_flags; 275 p0.h = _bfin_irq_flags;
278 r0 = [p0]; 276 r0 = [p0];
279 sti r0; 277 sti r0;
280#if 0 /* FIXME: this actually raises scheduling latencies */
281 /* Reenable interrupts */
282 [--sp] = reti;
283 r0 = [sp++];
284#endif
285 rts; 278 rts;
286ENDPROC(___ipipe_call_irqtail) 279ENDPROC(___ipipe_call_irqtail)
280
287#endif /* CONFIG_IPIPE */ 281#endif /* CONFIG_IPIPE */
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 202494568c6c..a7d7b2dd4059 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -161,11 +161,15 @@ static void bfin_core_unmask_irq(unsigned int irq)
161 161
162static void bfin_internal_mask_irq(unsigned int irq) 162static void bfin_internal_mask_irq(unsigned int irq)
163{ 163{
164 unsigned long flags;
165
164#ifdef CONFIG_BF53x 166#ifdef CONFIG_BF53x
167 local_irq_save_hw(flags);
165 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & 168 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
166 ~(1 << SIC_SYSIRQ(irq))); 169 ~(1 << SIC_SYSIRQ(irq)));
167#else 170#else
168 unsigned mask_bank, mask_bit; 171 unsigned mask_bank, mask_bit;
172 local_irq_save_hw(flags);
169 mask_bank = SIC_SYSIRQ(irq) / 32; 173 mask_bank = SIC_SYSIRQ(irq) / 32;
170 mask_bit = SIC_SYSIRQ(irq) % 32; 174 mask_bit = SIC_SYSIRQ(irq) % 32;
171 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) & 175 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
@@ -175,15 +179,20 @@ static void bfin_internal_mask_irq(unsigned int irq)
175 ~(1 << mask_bit)); 179 ~(1 << mask_bit));
176#endif 180#endif
177#endif 181#endif
182 local_irq_restore_hw(flags);
178} 183}
179 184
180static void bfin_internal_unmask_irq(unsigned int irq) 185static void bfin_internal_unmask_irq(unsigned int irq)
181{ 186{
187 unsigned long flags;
188
182#ifdef CONFIG_BF53x 189#ifdef CONFIG_BF53x
190 local_irq_save_hw(flags);
183 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | 191 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
184 (1 << SIC_SYSIRQ(irq))); 192 (1 << SIC_SYSIRQ(irq)));
185#else 193#else
186 unsigned mask_bank, mask_bit; 194 unsigned mask_bank, mask_bit;
195 local_irq_save_hw(flags);
187 mask_bank = SIC_SYSIRQ(irq) / 32; 196 mask_bank = SIC_SYSIRQ(irq) / 32;
188 mask_bit = SIC_SYSIRQ(irq) % 32; 197 mask_bit = SIC_SYSIRQ(irq) % 32;
189 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) | 198 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) |
@@ -193,6 +202,7 @@ static void bfin_internal_unmask_irq(unsigned int irq)
193 (1 << mask_bit)); 202 (1 << mask_bit));
194#endif 203#endif
195#endif 204#endif
205 local_irq_restore_hw(flags);
196} 206}
197 207
198#ifdef CONFIG_PM 208#ifdef CONFIG_PM
@@ -390,7 +400,7 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
390static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) 400static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
391{ 401{
392#ifdef CONFIG_IPIPE 402#ifdef CONFIG_IPIPE
393 _set_irq_handler(irq, handle_edge_irq); 403 _set_irq_handler(irq, handle_level_irq);
394#else 404#else
395 struct irq_desc *desc = irq_desc + irq; 405 struct irq_desc *desc = irq_desc + irq;
396 /* May not call generic set_irq_handler() due to spinlock 406 /* May not call generic set_irq_handler() due to spinlock
@@ -1055,13 +1065,18 @@ int __init init_arch_irq(void)
1055#endif 1065#endif
1056 default: 1066 default:
1057#ifdef CONFIG_IPIPE 1067#ifdef CONFIG_IPIPE
1058 /* 1068 /*
1059 * We want internal interrupt sources to be masked, because 1069 * We want internal interrupt sources to be
1060 * ISRs may trigger interrupts recursively (e.g. DMA), but 1070 * masked, because ISRs may trigger interrupts
1061 * interrupts are _not_ masked at CPU level. So let's handle 1071 * recursively (e.g. DMA), but interrupts are
1062 * them as level interrupts. 1072 * _not_ masked at CPU level. So let's handle
1063 */ 1073 * most of them as level interrupts, except
1064 set_irq_handler(irq, handle_level_irq); 1074 * the timer interrupt which is special.
1075 */
1076 if (irq == IRQ_SYSTMR || irq == IRQ_CORETMR)
1077 set_irq_handler(irq, handle_simple_irq);
1078 else
1079 set_irq_handler(irq, handle_level_irq);
1065#else /* !CONFIG_IPIPE */ 1080#else /* !CONFIG_IPIPE */
1066 set_irq_handler(irq, handle_simple_irq); 1081 set_irq_handler(irq, handle_simple_irq);
1067#endif /* !CONFIG_IPIPE */ 1082#endif /* !CONFIG_IPIPE */
@@ -1123,9 +1138,8 @@ int __init init_arch_irq(void)
1123 1138
1124#ifdef CONFIG_IPIPE 1139#ifdef CONFIG_IPIPE
1125 for (irq = 0; irq < NR_IRQS; irq++) { 1140 for (irq = 0; irq < NR_IRQS; irq++) {
1126 struct irq_desc *desc = irq_desc + irq; 1141 struct irq_desc *desc = irq_to_desc(irq);
1127 desc->ic_prio = __ipipe_get_irq_priority(irq); 1142 desc->ic_prio = __ipipe_get_irq_priority(irq);
1128 desc->thr_prio = __ipipe_get_irqthread_priority(irq);
1129 } 1143 }
1130#endif /* CONFIG_IPIPE */ 1144#endif /* CONFIG_IPIPE */
1131 1145
@@ -1208,76 +1222,21 @@ int __ipipe_get_irq_priority(unsigned irq)
1208 return IVG15; 1222 return IVG15;
1209} 1223}
1210 1224
1211int __ipipe_get_irqthread_priority(unsigned irq)
1212{
1213 int ient, prio;
1214 int demux_irq;
1215
1216 /* The returned priority value is rescaled to [0..IVG13+1]
1217 * with 0 being the lowest effective priority level. */
1218
1219 if (irq <= IRQ_CORETMR)
1220 return IVG13 - irq + 1;
1221
1222 /* GPIO IRQs are given the priority of the demux
1223 * interrupt. */
1224 if (IS_GPIOIRQ(irq)) {
1225#if defined(CONFIG_BF54x)
1226 u32 bank = PINT_2_BANK(irq2pint_lut[irq - SYS_IRQS]);
1227 demux_irq = (bank == 0 ? IRQ_PINT0 :
1228 bank == 1 ? IRQ_PINT1 :
1229 bank == 2 ? IRQ_PINT2 :
1230 IRQ_PINT3);
1231#elif defined(CONFIG_BF561)
1232 demux_irq = (irq >= IRQ_PF32 ? IRQ_PROG2_INTA :
1233 irq >= IRQ_PF16 ? IRQ_PROG1_INTA :
1234 IRQ_PROG0_INTA);
1235#elif defined(CONFIG_BF52x)
1236 demux_irq = (irq >= IRQ_PH0 ? IRQ_PORTH_INTA :
1237 irq >= IRQ_PG0 ? IRQ_PORTG_INTA :
1238 IRQ_PORTF_INTA);
1239#else
1240 demux_irq = irq;
1241#endif
1242 return IVG13 - PRIO_GPIODEMUX(demux_irq) + 1;
1243 }
1244
1245 /* The GPIO demux interrupt is given a lower priority
1246 * than the GPIO IRQs, so that its threaded handler
1247 * unmasks the interrupt line after the decoded IRQs
1248 * have been processed. */
1249 prio = PRIO_GPIODEMUX(irq);
1250 /* demux irq? */
1251 if (prio != -1)
1252 return IVG13 - prio;
1253
1254 for (ient = 0; ient < NR_PERI_INTS; ient++) {
1255 struct ivgx *ivg = ivg_table + ient;
1256 if (ivg->irqno == irq) {
1257 for (prio = 0; prio <= IVG13-IVG7; prio++) {
1258 if (ivg7_13[prio].ifirst <= ivg &&
1259 ivg7_13[prio].istop > ivg)
1260 return IVG7 - prio;
1261 }
1262 }
1263 }
1264
1265 return 0;
1266}
1267
1268/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */ 1225/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
1269#ifdef CONFIG_DO_IRQ_L1 1226#ifdef CONFIG_DO_IRQ_L1
1270__attribute__((l1_text)) 1227__attribute__((l1_text))
1271#endif 1228#endif
1272asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs) 1229asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1273{ 1230{
1231 struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
1232 struct ipipe_domain *this_domain = ipipe_current_domain;
1274 struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop; 1233 struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
1275 struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst; 1234 struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
1276 int irq; 1235 int irq, s;
1277 1236
1278 if (likely(vec == EVT_IVTMR_P)) { 1237 if (likely(vec == EVT_IVTMR_P)) {
1279 irq = IRQ_CORETMR; 1238 irq = IRQ_CORETMR;
1280 goto handle_irq; 1239 goto core_tick;
1281 } 1240 }
1282 1241
1283 SSYNC(); 1242 SSYNC();
@@ -1319,24 +1278,39 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1319 irq = ivg->irqno; 1278 irq = ivg->irqno;
1320 1279
1321 if (irq == IRQ_SYSTMR) { 1280 if (irq == IRQ_SYSTMR) {
1281#ifdef CONFIG_GENERIC_CLOCKEVENTS
1282core_tick:
1283#else
1322 bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */ 1284 bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
1285#endif
1323 /* This is basically what we need from the register frame. */ 1286 /* This is basically what we need from the register frame. */
1324 __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend; 1287 __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
1325 __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc; 1288 __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
1326 if (!ipipe_root_domain_p) 1289 if (this_domain != ipipe_root_domain)
1327 __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
1328 else
1329 __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10; 1290 __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
1291 else
1292 __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
1330 } 1293 }
1331 1294
1332handle_irq: 1295#ifndef CONFIG_GENERIC_CLOCKEVENTS
1296core_tick:
1297#endif
1298 if (this_domain == ipipe_root_domain) {
1299 s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1300 barrier();
1301 }
1333 1302
1334 ipipe_trace_irq_entry(irq); 1303 ipipe_trace_irq_entry(irq);
1335 __ipipe_handle_irq(irq, regs); 1304 __ipipe_handle_irq(irq, regs);
1336 ipipe_trace_irq_exit(irq); 1305 ipipe_trace_irq_exit(irq);
1337 1306
1338 if (ipipe_root_domain_p) 1307 if (this_domain == ipipe_root_domain) {
1339 return !test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)); 1308 set_thread_flag(TIF_IRQ_SYNC);
1309 if (!s) {
1310 __clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1311 return !test_bit(IPIPE_STALL_FLAG, &p->status);
1312 }
1313 }
1340 1314
1341 return 0; 1315 return 0;
1342} 1316}
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 77c992847094..93eab6146079 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -158,10 +158,14 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance)
158 kfree(msg); 158 kfree(msg);
159 break; 159 break;
160 case BFIN_IPI_CALL_FUNC: 160 case BFIN_IPI_CALL_FUNC:
161 spin_unlock(&msg_queue->lock);
161 ipi_call_function(cpu, msg); 162 ipi_call_function(cpu, msg);
163 spin_lock(&msg_queue->lock);
162 break; 164 break;
163 case BFIN_IPI_CPU_STOP: 165 case BFIN_IPI_CPU_STOP:
166 spin_unlock(&msg_queue->lock);
164 ipi_cpu_stop(cpu); 167 ipi_cpu_stop(cpu);
168 spin_lock(&msg_queue->lock);
165 kfree(msg); 169 kfree(msg);
166 break; 170 break;
167 default: 171 default:
@@ -457,7 +461,7 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end)
457 smp_flush_data.start = start; 461 smp_flush_data.start = start;
458 smp_flush_data.end = end; 462 smp_flush_data.end = end;
459 463
460 if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 1)) 464 if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0))
461 printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n"); 465 printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
462} 466}
463EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); 467EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index d0532b72bba5..9c3629b9a689 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -104,7 +104,7 @@ void __init paging_init(void)
104 } 104 }
105} 105}
106 106
107asmlinkage void init_pda(void) 107asmlinkage void __init init_pda(void)
108{ 108{
109 unsigned int cpu = raw_smp_processor_id(); 109 unsigned int cpu = raw_smp_processor_id();
110 110
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index e626e50a938a..060df4aa9916 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -135,11 +135,10 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
135 if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) 135 if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
136 pci_addr = IS_PIC_SOFT(pcibus_info) ? 136 pci_addr = IS_PIC_SOFT(pcibus_info) ?
137 PHYS_TO_DMA(paddr) : 137 PHYS_TO_DMA(paddr) :
138 PHYS_TO_TIODMA(paddr) | dma_attributes; 138 PHYS_TO_TIODMA(paddr);
139 else 139 else
140 pci_addr = IS_PIC_SOFT(pcibus_info) ? 140 pci_addr = paddr;
141 paddr : 141 pci_addr |= dma_attributes;
142 paddr | dma_attributes;
143 142
144 /* Handle Bus mode */ 143 /* Handle Bus mode */
145 if (IS_PCIX(pcibus_info)) 144 if (IS_PCIX(pcibus_info))
diff --git a/arch/m68knommu/platform/5206e/config.c b/arch/m68knommu/platform/5206e/config.c
index d01a5d2b7557..db902540bf2c 100644
--- a/arch/m68knommu/platform/5206e/config.c
+++ b/arch/m68knommu/platform/5206e/config.c
@@ -17,6 +17,7 @@
17#include <asm/coldfire.h> 17#include <asm/coldfire.h>
18#include <asm/mcfsim.h> 18#include <asm/mcfsim.h>
19#include <asm/mcfdma.h> 19#include <asm/mcfdma.h>
20#include <asm/mcfuart.h>
20 21
21/***************************************************************************/ 22/***************************************************************************/
22 23
diff --git a/arch/m68knommu/platform/528x/config.c b/arch/m68knommu/platform/528x/config.c
index dfdb5c2ed8e6..44baeb225dc7 100644
--- a/arch/m68knommu/platform/528x/config.c
+++ b/arch/m68knommu/platform/528x/config.c
@@ -24,7 +24,6 @@
24#include <asm/coldfire.h> 24#include <asm/coldfire.h>
25#include <asm/mcfsim.h> 25#include <asm/mcfsim.h>
26#include <asm/mcfuart.h> 26#include <asm/mcfuart.h>
27#include <asm/mcfqspi.h>
28 27
29#ifdef CONFIG_MTD_PARTITIONS 28#ifdef CONFIG_MTD_PARTITIONS
30#include <linux/mtd/partitions.h> 29#include <linux/mtd/partitions.h>
@@ -33,233 +32,6 @@
33/***************************************************************************/ 32/***************************************************************************/
34 33
35void coldfire_reset(void); 34void coldfire_reset(void);
36static void coldfire_qspi_cs_control(u8 cs, u8 command);
37
38/***************************************************************************/
39
40#if defined(CONFIG_SPI)
41
42#if defined(CONFIG_WILDFIRE)
43#define SPI_NUM_CHIPSELECTS 0x02
44#define SPI_PAR_VAL 0x07 /* Enable DIN, DOUT, CLK */
45#define SPI_CS_MASK 0x18
46
47#define FLASH_BLOCKSIZE (1024*64)
48#define FLASH_NUMBLOCKS 16
49#define FLASH_TYPE "m25p80"
50
51#define M25P80_CS 0
52#define MMC_CS 1
53
54#ifdef CONFIG_MTD_PARTITIONS
55static struct mtd_partition stm25p_partitions[] = {
56 /* sflash */
57 [0] = {
58 .name = "stm25p80",
59 .offset = 0x00000000,
60 .size = FLASH_BLOCKSIZE * FLASH_NUMBLOCKS,
61 .mask_flags = 0
62 }
63};
64
65#endif
66
67#elif defined(CONFIG_WILDFIREMOD)
68
69#define SPI_NUM_CHIPSELECTS 0x08
70#define SPI_PAR_VAL 0x07 /* Enable DIN, DOUT, CLK */
71#define SPI_CS_MASK 0x78
72
73#define FLASH_BLOCKSIZE (1024*64)
74#define FLASH_NUMBLOCKS 64
75#define FLASH_TYPE "m25p32"
76/* Reserve 1M for the kernel parition */
77#define FLASH_KERNEL_SIZE (1024 * 1024)
78
79#define M25P80_CS 5
80#define MMC_CS 6
81
82#ifdef CONFIG_MTD_PARTITIONS
83static struct mtd_partition stm25p_partitions[] = {
84 /* sflash */
85 [0] = {
86 .name = "kernel",
87 .offset = FLASH_BLOCKSIZE * FLASH_NUMBLOCKS - FLASH_KERNEL_SIZE,
88 .size = FLASH_KERNEL_SIZE,
89 .mask_flags = 0
90 },
91 [1] = {
92 .name = "image",
93 .offset = 0x00000000,
94 .size = FLASH_BLOCKSIZE * FLASH_NUMBLOCKS - FLASH_KERNEL_SIZE,
95 .mask_flags = 0
96 },
97 [2] = {
98 .name = "all",
99 .offset = 0x00000000,
100 .size = FLASH_BLOCKSIZE * FLASH_NUMBLOCKS,
101 .mask_flags = 0
102 }
103};
104#endif
105
106#else
107#define SPI_NUM_CHIPSELECTS 0x04
108#define SPI_PAR_VAL 0x7F /* Enable DIN, DOUT, CLK, CS0 - CS4 */
109#endif
110
111#ifdef MMC_CS
112static struct coldfire_spi_chip flash_chip_info = {
113 .mode = SPI_MODE_0,
114 .bits_per_word = 16,
115 .del_cs_to_clk = 17,
116 .del_after_trans = 1,
117 .void_write_data = 0
118};
119
120static struct coldfire_spi_chip mmc_chip_info = {
121 .mode = SPI_MODE_0,
122 .bits_per_word = 16,
123 .del_cs_to_clk = 17,
124 .del_after_trans = 1,
125 .void_write_data = 0xFFFF
126};
127#endif
128
129#ifdef M25P80_CS
130static struct flash_platform_data stm25p80_platform_data = {
131 .name = "ST M25P80 SPI Flash chip",
132#ifdef CONFIG_MTD_PARTITIONS
133 .parts = stm25p_partitions,
134 .nr_parts = sizeof(stm25p_partitions) / sizeof(*stm25p_partitions),
135#endif
136 .type = FLASH_TYPE
137};
138#endif
139
140static struct spi_board_info spi_board_info[] __initdata = {
141#ifdef M25P80_CS
142 {
143 .modalias = "m25p80",
144 .max_speed_hz = 16000000,
145 .bus_num = 1,
146 .chip_select = M25P80_CS,
147 .platform_data = &stm25p80_platform_data,
148 .controller_data = &flash_chip_info
149 },
150#endif
151#ifdef MMC_CS
152 {
153 .modalias = "mmc_spi",
154 .max_speed_hz = 16000000,
155 .bus_num = 1,
156 .chip_select = MMC_CS,
157 .controller_data = &mmc_chip_info
158 }
159#endif
160};
161
162static struct coldfire_spi_master coldfire_master_info = {
163 .bus_num = 1,
164 .num_chipselect = SPI_NUM_CHIPSELECTS,
165 .irq_source = MCF5282_QSPI_IRQ_SOURCE,
166 .irq_vector = MCF5282_QSPI_IRQ_VECTOR,
167 .irq_mask = ((0x01 << MCF5282_QSPI_IRQ_SOURCE) | 0x01),
168 .irq_lp = 0x2B, /* Level 5 and Priority 3 */
169 .par_val = SPI_PAR_VAL,
170 .cs_control = coldfire_qspi_cs_control,
171};
172
173static struct resource coldfire_spi_resources[] = {
174 [0] = {
175 .name = "qspi-par",
176 .start = MCF5282_QSPI_PAR,
177 .end = MCF5282_QSPI_PAR,
178 .flags = IORESOURCE_MEM
179 },
180
181 [1] = {
182 .name = "qspi-module",
183 .start = MCF5282_QSPI_QMR,
184 .end = MCF5282_QSPI_QMR + 0x18,
185 .flags = IORESOURCE_MEM
186 },
187
188 [2] = {
189 .name = "qspi-int-level",
190 .start = MCF5282_INTC0 + MCFINTC_ICR0 + MCF5282_QSPI_IRQ_SOURCE,
191 .end = MCF5282_INTC0 + MCFINTC_ICR0 + MCF5282_QSPI_IRQ_SOURCE,
192 .flags = IORESOURCE_MEM
193 },
194
195 [3] = {
196 .name = "qspi-int-mask",
197 .start = MCF5282_INTC0 + MCFINTC_IMRL,
198 .end = MCF5282_INTC0 + MCFINTC_IMRL,
199 .flags = IORESOURCE_MEM
200 }
201};
202
203static struct platform_device coldfire_spi = {
204 .name = "spi_coldfire",
205 .id = -1,
206 .resource = coldfire_spi_resources,
207 .num_resources = ARRAY_SIZE(coldfire_spi_resources),
208 .dev = {
209 .platform_data = &coldfire_master_info,
210 }
211};
212
213static void coldfire_qspi_cs_control(u8 cs, u8 command)
214{
215 u8 cs_bit = ((0x01 << cs) << 3) & SPI_CS_MASK;
216
217#if defined(CONFIG_WILDFIRE)
218 u8 cs_mask = ~(((0x01 << cs) << 3) & SPI_CS_MASK);
219#endif
220#if defined(CONFIG_WILDFIREMOD)
221 u8 cs_mask = (cs << 3) & SPI_CS_MASK;
222#endif
223
224 /*
225 * Don't do anything if the chip select is not
226 * one of the port qs pins.
227 */
228 if (command & QSPI_CS_INIT) {
229#if defined(CONFIG_WILDFIRE)
230 MCF5282_GPIO_DDRQS |= cs_bit;
231 MCF5282_GPIO_PQSPAR &= ~cs_bit;
232#endif
233
234#if defined(CONFIG_WILDFIREMOD)
235 MCF5282_GPIO_DDRQS |= SPI_CS_MASK;
236 MCF5282_GPIO_PQSPAR &= ~SPI_CS_MASK;
237#endif
238 }
239
240 if (command & QSPI_CS_ASSERT) {
241 MCF5282_GPIO_PORTQS &= ~SPI_CS_MASK;
242 MCF5282_GPIO_PORTQS |= cs_mask;
243 } else if (command & QSPI_CS_DROP) {
244 MCF5282_GPIO_PORTQS |= SPI_CS_MASK;
245 }
246}
247
248static int __init spi_dev_init(void)
249{
250 int retval;
251
252 retval = platform_device_register(&coldfire_spi);
253 if (retval < 0)
254 return retval;
255
256 if (ARRAY_SIZE(spi_board_info))
257 retval = spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
258
259 return retval;
260}
261
262#endif /* CONFIG_SPI */
263 35
264/***************************************************************************/ 36/***************************************************************************/
265 37
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index ac5d541368e9..6c5b40905dd6 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -3,6 +3,8 @@
3/* 3/*
4 * Architecture specific compatibility types 4 * Architecture specific compatibility types
5 */ 5 */
6#include <linux/seccomp.h>
7#include <linux/thread_info.h>
6#include <linux/types.h> 8#include <linux/types.h>
7#include <asm/page.h> 9#include <asm/page.h>
8#include <asm/ptrace.h> 10#include <asm/ptrace.h>
@@ -218,4 +220,9 @@ struct compat_shmid64_ds {
218 compat_ulong_t __unused2; 220 compat_ulong_t __unused2;
219}; 221};
220 222
223static inline int is_compat_task(void)
224{
225 return test_thread_flag(TIF_32BIT);
226}
227
221#endif /* _ASM_COMPAT_H */ 228#endif /* _ASM_COMPAT_H */
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index fb371f5ce132..d6b772ba3b8f 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -142,6 +142,10 @@ static void __init gef_sbc610_nec_fixup(struct pci_dev *pdev)
142{ 142{
143 unsigned int val; 143 unsigned int val;
144 144
145 /* Do not do the fixup on other platforms! */
146 if (!machine_is(gef_sbc610))
147 return;
148
145 printk(KERN_INFO "Running NEC uPD720101 Fixup\n"); 149 printk(KERN_INFO "Running NEC uPD720101 Fixup\n");
146 150
147 /* Ensure ports 1, 2, 3, 4 & 5 are enabled */ 151 /* Ensure ports 1, 2, 3, 4 & 5 are enabled */
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index c42cd898f68b..6118890c946d 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -556,7 +556,7 @@ static void __exit aes_s390_fini(void)
556module_init(aes_s390_init); 556module_init(aes_s390_init);
557module_exit(aes_s390_fini); 557module_exit(aes_s390_fini);
558 558
559MODULE_ALIAS("aes"); 559MODULE_ALIAS("aes-all");
560 560
561MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); 561MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
562MODULE_LICENSE("GPL"); 562MODULE_LICENSE("GPL");
diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c
index 72da416f6162..15b6d450fbf0 100644
--- a/arch/sh/boards/board-ap325rxa.c
+++ b/arch/sh/boards/board-ap325rxa.c
@@ -22,6 +22,7 @@
22#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <linux/spi/spi.h> 23#include <linux/spi/spi.h>
24#include <linux/spi/spi_gpio.h> 24#include <linux/spi/spi_gpio.h>
25#include <media/soc_camera.h>
25#include <media/soc_camera_platform.h> 26#include <media/soc_camera_platform.h>
26#include <media/sh_mobile_ceu.h> 27#include <media/sh_mobile_ceu.h>
27#include <video/sh_mobile_lcdc.h> 28#include <video/sh_mobile_lcdc.h>
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 469f3450bf81..87717f3687d2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -138,6 +138,9 @@ config ARCH_HAS_CACHE_LINE_SIZE
138config HAVE_SETUP_PER_CPU_AREA 138config HAVE_SETUP_PER_CPU_AREA
139 def_bool y 139 def_bool y
140 140
141config HAVE_DYNAMIC_PER_CPU_AREA
142 def_bool y
143
141config HAVE_CPUMASK_OF_CPU_MAP 144config HAVE_CPUMASK_OF_CPU_MAP
142 def_bool X86_64_SMP 145 def_bool X86_64_SMP
143 146
@@ -780,6 +783,11 @@ config X86_MCE_AMD
780 Additional support for AMD specific MCE features such as 783 Additional support for AMD specific MCE features such as
781 the DRAM Error Threshold. 784 the DRAM Error Threshold.
782 785
786config X86_MCE_THRESHOLD
787 depends on X86_MCE_AMD || X86_MCE_INTEL
788 bool
789 default y
790
783config X86_MCE_NONFATAL 791config X86_MCE_NONFATAL
784 tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4" 792 tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4"
785 depends on X86_32 && X86_MCE 793 depends on X86_32 && X86_MCE
@@ -1125,7 +1133,7 @@ config NODES_SHIFT
1125 Specify the maximum number of NUMA Nodes available on the target 1133 Specify the maximum number of NUMA Nodes available on the target
1126 system. Increases memory reserved to accomodate various tables. 1134 system. Increases memory reserved to accomodate various tables.
1127 1135
1128config HAVE_ARCH_BOOTMEM_NODE 1136config HAVE_ARCH_BOOTMEM
1129 def_bool y 1137 def_bool y
1130 depends on X86_32 && NUMA 1138 depends on X86_32 && NUMA
1131 1139
@@ -1423,7 +1431,7 @@ config CRASH_DUMP
1423config KEXEC_JUMP 1431config KEXEC_JUMP
1424 bool "kexec jump (EXPERIMENTAL)" 1432 bool "kexec jump (EXPERIMENTAL)"
1425 depends on EXPERIMENTAL 1433 depends on EXPERIMENTAL
1426 depends on KEXEC && HIBERNATION && X86_32 1434 depends on KEXEC && HIBERNATION
1427 ---help--- 1435 ---help---
1428 Jump between original kernel and kexeced kernel and invoke 1436 Jump between original kernel and kexeced kernel and invoke
1429 code in physical address mode via KEXEC 1437 code in physical address mode via KEXEC
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 4ef949c1972e..394d177d721b 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -379,6 +379,7 @@ static inline u32 safe_apic_wait_icr_idle(void)
379 379
380static inline void ack_APIC_irq(void) 380static inline void ack_APIC_irq(void)
381{ 381{
382#ifdef CONFIG_X86_LOCAL_APIC
382 /* 383 /*
383 * ack_APIC_irq() actually gets compiled as a single instruction 384 * ack_APIC_irq() actually gets compiled as a single instruction
384 * ... yummie. 385 * ... yummie.
@@ -386,6 +387,7 @@ static inline void ack_APIC_irq(void)
386 387
387 /* Docs say use 0 for future compatibility */ 388 /* Docs say use 0 for future compatibility */
388 apic_write(APIC_EOI, 0); 389 apic_write(APIC_EOI, 0);
390#endif
389} 391}
390 392
391static inline unsigned default_get_apic_id(unsigned long x) 393static inline unsigned default_get_apic_id(unsigned long x)
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index 63134e31e8b9..bc9514fb3b13 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -53,6 +53,7 @@
53#define APIC_ESR_SENDILL 0x00020 53#define APIC_ESR_SENDILL 0x00020
54#define APIC_ESR_RECVILL 0x00040 54#define APIC_ESR_RECVILL 0x00040
55#define APIC_ESR_ILLREGA 0x00080 55#define APIC_ESR_ILLREGA 0x00080
56#define APIC_LVTCMCI 0x2f0
56#define APIC_ICR 0x300 57#define APIC_ICR 0x300
57#define APIC_DEST_SELF 0x40000 58#define APIC_DEST_SELF 0x40000
58#define APIC_DEST_ALLINC 0x80000 59#define APIC_DEST_ALLINC 0x80000
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 2f8466540fb5..5b301b7ff5f4 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -5,24 +5,43 @@
5#include <linux/mm.h> 5#include <linux/mm.h>
6 6
7/* Caches aren't brain-dead on the intel. */ 7/* Caches aren't brain-dead on the intel. */
8#define flush_cache_all() do { } while (0) 8static inline void flush_cache_all(void) { }
9#define flush_cache_mm(mm) do { } while (0) 9static inline void flush_cache_mm(struct mm_struct *mm) { }
10#define flush_cache_dup_mm(mm) do { } while (0) 10static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
11#define flush_cache_range(vma, start, end) do { } while (0) 11static inline void flush_cache_range(struct vm_area_struct *vma,
12#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 12 unsigned long start, unsigned long end) { }
13#define flush_dcache_page(page) do { } while (0) 13static inline void flush_cache_page(struct vm_area_struct *vma,
14#define flush_dcache_mmap_lock(mapping) do { } while (0) 14 unsigned long vmaddr, unsigned long pfn) { }
15#define flush_dcache_mmap_unlock(mapping) do { } while (0) 15static inline void flush_dcache_page(struct page *page) { }
16#define flush_icache_range(start, end) do { } while (0) 16static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
17#define flush_icache_page(vma, pg) do { } while (0) 17static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
18#define flush_icache_user_range(vma, pg, adr, len) do { } while (0) 18static inline void flush_icache_range(unsigned long start,
19#define flush_cache_vmap(start, end) do { } while (0) 19 unsigned long end) { }
20#define flush_cache_vunmap(start, end) do { } while (0) 20static inline void flush_icache_page(struct vm_area_struct *vma,
21 struct page *page) { }
22static inline void flush_icache_user_range(struct vm_area_struct *vma,
23 struct page *page,
24 unsigned long addr,
25 unsigned long len) { }
26static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
27static inline void flush_cache_vunmap(unsigned long start,
28 unsigned long end) { }
21 29
22#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 30static inline void copy_to_user_page(struct vm_area_struct *vma,
23 memcpy((dst), (src), (len)) 31 struct page *page, unsigned long vaddr,
24#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 32 void *dst, const void *src,
25 memcpy((dst), (src), (len)) 33 unsigned long len)
34{
35 memcpy(dst, src, len);
36}
37
38static inline void copy_from_user_page(struct vm_area_struct *vma,
39 struct page *page, unsigned long vaddr,
40 void *dst, const void *src,
41 unsigned long len)
42{
43 memcpy(dst, src, len);
44}
26 45
27#define PG_non_WB PG_arch_1 46#define PG_non_WB PG_arch_1
28PAGEFLAG(NonWB, non_WB) 47PAGEFLAG(NonWB, non_WB)
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index ca5ffb2856b6..edc90f23e708 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -37,8 +37,6 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
37 37
38#else /* !CONFIG_X86_32 */ 38#else /* !CONFIG_X86_32 */
39 39
40#define MAX_EFI_IO_PAGES 100
41
42extern u64 efi_call0(void *fp); 40extern u64 efi_call0(void *fp);
43extern u64 efi_call1(void *fp, u64 arg1); 41extern u64 efi_call1(void *fp, u64 arg1);
44extern u64 efi_call2(void *fp, u64 arg1, u64 arg2); 42extern u64 efi_call2(void *fp, u64 arg1, u64 arg2);
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 854d538ae857..c2e6bedaf258 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -33,6 +33,8 @@ BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
33 smp_invalidate_interrupt) 33 smp_invalidate_interrupt)
34#endif 34#endif
35 35
36BUILD_INTERRUPT(generic_interrupt, GENERIC_INTERRUPT_VECTOR)
37
36/* 38/*
37 * every pentium local APIC has two 'local interrupts', with a 39 * every pentium local APIC has two 'local interrupts', with a
38 * soft-definable vector attached to both interrupts, one of 40 * soft-definable vector attached to both interrupts, one of
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index dca8f03da5b2..63a79c77d220 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -24,9 +24,6 @@
24#include <asm/kmap_types.h> 24#include <asm/kmap_types.h>
25#else 25#else
26#include <asm/vsyscall.h> 26#include <asm/vsyscall.h>
27#ifdef CONFIG_EFI
28#include <asm/efi.h>
29#endif
30#endif 27#endif
31 28
32/* 29/*
@@ -92,13 +89,6 @@ enum fixed_addresses {
92 FIX_IO_APIC_BASE_0, 89 FIX_IO_APIC_BASE_0,
93 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, 90 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
94#endif 91#endif
95#ifdef CONFIG_X86_64
96#ifdef CONFIG_EFI
97 FIX_EFI_IO_MAP_LAST_PAGE,
98 FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
99 + MAX_EFI_IO_PAGES - 1,
100#endif
101#endif
102#ifdef CONFIG_X86_VISWS_APIC 92#ifdef CONFIG_X86_VISWS_APIC
103 FIX_CO_CPU, /* Cobalt timer */ 93 FIX_CO_CPU, /* Cobalt timer */
104 FIX_CO_APIC, /* Cobalt APIC Redirection Table */ 94 FIX_CO_APIC, /* Cobalt APIC Redirection Table */
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 176f058e7159..039db6aa8e02 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -12,6 +12,7 @@ typedef struct {
12 unsigned int apic_timer_irqs; /* arch dependent */ 12 unsigned int apic_timer_irqs; /* arch dependent */
13 unsigned int irq_spurious_count; 13 unsigned int irq_spurious_count;
14#endif 14#endif
15 unsigned int generic_irqs; /* arch dependent */
15#ifdef CONFIG_SMP 16#ifdef CONFIG_SMP
16 unsigned int irq_resched_count; 17 unsigned int irq_resched_count;
17 unsigned int irq_call_count; 18 unsigned int irq_call_count;
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 370e1c83bb49..b762ea49bd70 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -27,6 +27,7 @@
27 27
28/* Interrupt handlers registered during init_IRQ */ 28/* Interrupt handlers registered during init_IRQ */
29extern void apic_timer_interrupt(void); 29extern void apic_timer_interrupt(void);
30extern void generic_interrupt(void);
30extern void error_interrupt(void); 31extern void error_interrupt(void);
31extern void spurious_interrupt(void); 32extern void spurious_interrupt(void);
32extern void thermal_interrupt(void); 33extern void thermal_interrupt(void);
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 48f0004db8c9..71c9e5183982 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -172,7 +172,13 @@ static inline void __save_init_fpu(struct task_struct *tsk)
172 172
173#else /* CONFIG_X86_32 */ 173#else /* CONFIG_X86_32 */
174 174
175extern void finit(void); 175#ifdef CONFIG_MATH_EMULATION
176extern void finit_task(struct task_struct *tsk);
177#else
178static inline void finit_task(struct task_struct *tsk)
179{
180}
181#endif
176 182
177static inline void tolerant_fwait(void) 183static inline void tolerant_fwait(void)
178{ 184{
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
new file mode 100644
index 000000000000..36fb1a6a5109
--- /dev/null
+++ b/arch/x86/include/asm/init.h
@@ -0,0 +1,18 @@
1#ifndef _ASM_X86_INIT_32_H
2#define _ASM_X86_INIT_32_H
3
4#ifdef CONFIG_X86_32
5extern void __init early_ioremap_page_table_range_init(void);
6#endif
7
8extern unsigned long __init
9kernel_physical_mapping_init(unsigned long start,
10 unsigned long end,
11 unsigned long page_size_mask);
12
13
14extern unsigned long __initdata e820_table_start;
15extern unsigned long __meminitdata e820_table_end;
16extern unsigned long __meminitdata e820_table_top;
17
18#endif /* _ASM_X86_INIT_32_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 683d0b4c00fc..e5383e3d2f8c 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -172,8 +172,6 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
172 172
173extern void iounmap(volatile void __iomem *addr); 173extern void iounmap(volatile void __iomem *addr);
174 174
175extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
176
177 175
178#ifdef CONFIG_X86_32 176#ifdef CONFIG_X86_32
179# include "io_32.h" 177# include "io_32.h"
@@ -198,7 +196,6 @@ extern void early_ioremap_reset(void);
198extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); 196extern void __iomem *early_ioremap(unsigned long offset, unsigned long size);
199extern void __iomem *early_memremap(unsigned long offset, unsigned long size); 197extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
200extern void early_iounmap(void __iomem *addr, unsigned long size); 198extern void early_iounmap(void __iomem *addr, unsigned long size);
201extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
202 199
203#define IO_SPACE_LIMIT 0xffff 200#define IO_SPACE_LIMIT 0xffff
204 201
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 107eb2196691..f38481bcd455 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -36,6 +36,7 @@ static inline int irq_canonicalize(int irq)
36extern void fixup_irqs(void); 36extern void fixup_irqs(void);
37#endif 37#endif
38 38
39extern void (*generic_interrupt_extension)(void);
39extern void init_IRQ(void); 40extern void init_IRQ(void);
40extern void native_init_IRQ(void); 41extern void native_init_IRQ(void);
41extern bool handle_irq(unsigned irq, struct pt_regs *regs); 42extern bool handle_irq(unsigned irq, struct pt_regs *regs);
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 8a285f356f8a..3cbd79bbb47c 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -112,6 +112,11 @@
112#define LOCAL_PERF_VECTOR 0xee 112#define LOCAL_PERF_VECTOR 0xee
113 113
114/* 114/*
115 * Generic system vector for platform specific use
116 */
117#define GENERIC_INTERRUPT_VECTOR 0xed
118
119/*
115 * First APIC vector available to drivers: (vectors 0x30-0xee) we 120 * First APIC vector available to drivers: (vectors 0x30-0xee) we
116 * start at 0x31(0x41) to spread out vectors evenly between priority 121 * start at 0x31(0x41) to spread out vectors evenly between priority
117 * levels. (0x80 is the syscall vector) 122 * levels. (0x80 is the syscall vector)
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 0ceb6d19ed30..317ff1703d0b 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -9,13 +9,13 @@
9# define PAGES_NR 4 9# define PAGES_NR 4
10#else 10#else
11# define PA_CONTROL_PAGE 0 11# define PA_CONTROL_PAGE 0
12# define PA_TABLE_PAGE 1 12# define VA_CONTROL_PAGE 1
13# define PAGES_NR 2 13# define PA_TABLE_PAGE 2
14# define PA_SWAP_PAGE 3
15# define PAGES_NR 4
14#endif 16#endif
15 17
16#ifdef CONFIG_X86_32
17# define KEXEC_CONTROL_CODE_MAX_SIZE 2048 18# define KEXEC_CONTROL_CODE_MAX_SIZE 2048
18#endif
19 19
20#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
21 21
@@ -136,10 +136,11 @@ relocate_kernel(unsigned long indirection_page,
136 unsigned int has_pae, 136 unsigned int has_pae,
137 unsigned int preserve_context); 137 unsigned int preserve_context);
138#else 138#else
139NORET_TYPE void 139unsigned long
140relocate_kernel(unsigned long indirection_page, 140relocate_kernel(unsigned long indirection_page,
141 unsigned long page_list, 141 unsigned long page_list,
142 unsigned long start_address) ATTRIB_NORET; 142 unsigned long start_address,
143 unsigned int preserve_context);
143#endif 144#endif
144 145
145#define ARCH_HAS_KIMAGE_ARCH 146#define ARCH_HAS_KIMAGE_ARCH
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 9320e2a8a26a..a0d70b46c27c 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -4,11 +4,6 @@
4#undef notrace 4#undef notrace
5#define notrace __attribute__((no_instrument_function)) 5#define notrace __attribute__((no_instrument_function))
6 6
7#ifdef CONFIG_X86_64
8#define __ALIGN .p2align 4,,15
9#define __ALIGN_STR ".p2align 4,,15"
10#endif
11
12#ifdef CONFIG_X86_32 7#ifdef CONFIG_X86_32
13#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) 8#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
14/* 9/*
@@ -50,16 +45,25 @@
50 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ 45 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
51 "g" (arg4), "g" (arg5), "g" (arg6)) 46 "g" (arg4), "g" (arg5), "g" (arg6))
52 47
53#endif 48#endif /* CONFIG_X86_32 */
49
50#ifdef __ASSEMBLY__
54 51
55#define GLOBAL(name) \ 52#define GLOBAL(name) \
56 .globl name; \ 53 .globl name; \
57 name: 54 name:
58 55
56#ifdef CONFIG_X86_64
57#define __ALIGN .p2align 4,,15
58#define __ALIGN_STR ".p2align 4,,15"
59#endif
60
59#ifdef CONFIG_X86_ALIGNMENT_16 61#ifdef CONFIG_X86_ALIGNMENT_16
60#define __ALIGN .align 16,0x90 62#define __ALIGN .align 16,0x90
61#define __ALIGN_STR ".align 16,0x90" 63#define __ALIGN_STR ".align 16,0x90"
62#endif 64#endif
63 65
66#endif /* __ASSEMBLY__ */
67
64#endif /* _ASM_X86_LINKAGE_H */ 68#endif /* _ASM_X86_LINKAGE_H */
65 69
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 32c6e17b960b..563933e06a35 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -11,6 +11,8 @@
11 */ 11 */
12 12
13#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */ 13#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */
14#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
15#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
14 16
15#define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */ 17#define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */
16#define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */ 18#define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */
@@ -90,14 +92,29 @@ extern int mce_disabled;
90 92
91#include <asm/atomic.h> 93#include <asm/atomic.h>
92 94
95void mce_setup(struct mce *m);
93void mce_log(struct mce *m); 96void mce_log(struct mce *m);
94DECLARE_PER_CPU(struct sys_device, device_mce); 97DECLARE_PER_CPU(struct sys_device, device_mce);
95extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); 98extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
96 99
100/*
101 * To support more than 128 would need to escape the predefined
102 * Linux defined extended banks first.
103 */
104#define MAX_NR_BANKS (MCE_EXTENDED_BANK - 1)
105
97#ifdef CONFIG_X86_MCE_INTEL 106#ifdef CONFIG_X86_MCE_INTEL
98void mce_intel_feature_init(struct cpuinfo_x86 *c); 107void mce_intel_feature_init(struct cpuinfo_x86 *c);
108void cmci_clear(void);
109void cmci_reenable(void);
110void cmci_rediscover(int dying);
111void cmci_recheck(void);
99#else 112#else
100static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } 113static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
114static inline void cmci_clear(void) {}
115static inline void cmci_reenable(void) {}
116static inline void cmci_rediscover(int dying) {}
117static inline void cmci_recheck(void) {}
101#endif 118#endif
102 119
103#ifdef CONFIG_X86_MCE_AMD 120#ifdef CONFIG_X86_MCE_AMD
@@ -106,11 +123,23 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c);
106static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { } 123static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
107#endif 124#endif
108 125
109void mce_log_therm_throt_event(unsigned int cpu, __u64 status); 126extern int mce_available(struct cpuinfo_x86 *c);
127
128void mce_log_therm_throt_event(__u64 status);
110 129
111extern atomic_t mce_entry; 130extern atomic_t mce_entry;
112 131
113extern void do_machine_check(struct pt_regs *, long); 132extern void do_machine_check(struct pt_regs *, long);
133
134typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
135DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
136
137enum mcp_flags {
138 MCP_TIMESTAMP = (1 << 0), /* log time stamp */
139 MCP_UC = (1 << 1), /* log uncorrected errors */
140};
141extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
142
114extern int mce_notify_user(void); 143extern int mce_notify_user(void);
115 144
116#endif /* !CONFIG_X86_32 */ 145#endif /* !CONFIG_X86_32 */
@@ -120,8 +149,8 @@ extern void mcheck_init(struct cpuinfo_x86 *c);
120#else 149#else
121#define mcheck_init(c) do { } while (0) 150#define mcheck_init(c) do { } while (0)
122#endif 151#endif
123extern void stop_mce(void); 152
124extern void restart_mce(void); 153extern void (*mce_threshold_vector)(void);
125 154
126#endif /* __KERNEL__ */ 155#endif /* __KERNEL__ */
127#endif /* _ASM_X86_MCE_H */ 156#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 105fb90a0635..ede6998bd92c 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -91,46 +91,9 @@ static inline int pfn_valid(int pfn)
91#endif /* CONFIG_DISCONTIGMEM */ 91#endif /* CONFIG_DISCONTIGMEM */
92 92
93#ifdef CONFIG_NEED_MULTIPLE_NODES 93#ifdef CONFIG_NEED_MULTIPLE_NODES
94 94/* always use node 0 for bootmem on this numa platform */
95/* 95#define bootmem_arch_preferred_node(__bdata, size, align, goal, limit) \
96 * Following are macros that are specific to this numa platform. 96 (NODE_DATA(0)->bdata)
97 */
98#define reserve_bootmem(addr, size, flags) \
99 reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags))
100#define alloc_bootmem(x) \
101 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
102#define alloc_bootmem_nopanic(x) \
103 __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
104 __pa(MAX_DMA_ADDRESS))
105#define alloc_bootmem_low(x) \
106 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
107#define alloc_bootmem_pages(x) \
108 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
109#define alloc_bootmem_pages_nopanic(x) \
110 __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \
111 __pa(MAX_DMA_ADDRESS))
112#define alloc_bootmem_low_pages(x) \
113 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
114#define alloc_bootmem_node(pgdat, x) \
115({ \
116 struct pglist_data __maybe_unused \
117 *__alloc_bootmem_node__pgdat = (pgdat); \
118 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
119 __pa(MAX_DMA_ADDRESS)); \
120})
121#define alloc_bootmem_pages_node(pgdat, x) \
122({ \
123 struct pglist_data __maybe_unused \
124 *__alloc_bootmem_node__pgdat = (pgdat); \
125 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
126 __pa(MAX_DMA_ADDRESS)); \
127})
128#define alloc_bootmem_low_pages_node(pgdat, x) \
129({ \
130 struct pglist_data __maybe_unused \
131 *__alloc_bootmem_node__pgdat = (pgdat); \
132 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \
133})
134#endif /* CONFIG_NEED_MULTIPLE_NODES */ 97#endif /* CONFIG_NEED_MULTIPLE_NODES */
135 98
136#endif /* _ASM_X86_MMZONE_32_H */ 99#endif /* _ASM_X86_MMZONE_32_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 358acc59ae04..2dbd2314139e 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -77,6 +77,11 @@
77#define MSR_IA32_MC0_ADDR 0x00000402 77#define MSR_IA32_MC0_ADDR 0x00000402
78#define MSR_IA32_MC0_MISC 0x00000403 78#define MSR_IA32_MC0_MISC 0x00000403
79 79
80/* These are consecutive and not in the normal 4er MCE bank block */
81#define MSR_IA32_MC0_CTL2 0x00000280
82#define CMCI_EN (1ULL << 30)
83#define CMCI_THRESHOLD_MASK 0xffffULL
84
80#define MSR_P6_PERFCTR0 0x000000c1 85#define MSR_P6_PERFCTR0 0x000000c1
81#define MSR_P6_PERFCTR1 0x000000c2 86#define MSR_P6_PERFCTR1 0x000000c2
82#define MSR_P6_EVNTSEL0 0x00000186 87#define MSR_P6_EVNTSEL0 0x00000186
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 2d625da6603c..826ad37006ab 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -40,14 +40,8 @@
40 40
41#ifndef __ASSEMBLY__ 41#ifndef __ASSEMBLY__
42 42
43struct pgprot;
44
45extern int page_is_ram(unsigned long pagenr); 43extern int page_is_ram(unsigned long pagenr);
46extern int devmem_is_allowed(unsigned long pagenr); 44extern int devmem_is_allowed(unsigned long pagenr);
47extern void map_devmem(unsigned long pfn, unsigned long size,
48 struct pgprot vma_prot);
49extern void unmap_devmem(unsigned long pfn, unsigned long size,
50 struct pgprot vma_prot);
51 45
52extern unsigned long max_low_pfn_mapped; 46extern unsigned long max_low_pfn_mapped;
53extern unsigned long max_pfn_mapped; 47extern unsigned long max_pfn_mapped;
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index b0e70056838e..2cd07b9422f4 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -2,6 +2,7 @@
2#define _ASM_X86_PAT_H 2#define _ASM_X86_PAT_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/pgtable_types.h>
5 6
6#ifdef CONFIG_X86_PAT 7#ifdef CONFIG_X86_PAT
7extern int pat_enabled; 8extern int pat_enabled;
@@ -17,5 +18,9 @@ extern int free_memtype(u64 start, u64 end);
17 18
18extern int kernel_map_sync_memtype(u64 base, unsigned long size, 19extern int kernel_map_sync_memtype(u64 base, unsigned long size,
19 unsigned long flag); 20 unsigned long flag);
21extern void map_devmem(unsigned long pfn, unsigned long size,
22 struct pgprot vma_prot);
23extern void unmap_devmem(unsigned long pfn, unsigned long size,
24 struct pgprot vma_prot);
20 25
21#endif /* _ASM_X86_PAT_H */ 26#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index aee103b26d01..8f1d2fbec1d4 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -43,6 +43,14 @@
43#else /* ...!ASSEMBLY */ 43#else /* ...!ASSEMBLY */
44 44
45#include <linux/stringify.h> 45#include <linux/stringify.h>
46#include <asm/sections.h>
47
48#define __addr_to_pcpu_ptr(addr) \
49 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
50 + (unsigned long)__per_cpu_start)
51#define __pcpu_ptr_to_addr(ptr) \
52 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
53 - (unsigned long)__per_cpu_start)
46 54
47#ifdef CONFIG_SMP 55#ifdef CONFIG_SMP
48#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x 56#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 1c097a3a6669..d0812e155f1d 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -288,6 +288,8 @@ static inline int is_new_memtype_allowed(unsigned long flags,
288 return 1; 288 return 1;
289} 289}
290 290
291pmd_t *populate_extra_pmd(unsigned long vaddr);
292pte_t *populate_extra_pte(unsigned long vaddr);
291#endif /* __ASSEMBLY__ */ 293#endif /* __ASSEMBLY__ */
292 294
293#ifdef CONFIG_X86_32 295#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
index bd8df3b2fe04..2733fad45f98 100644
--- a/arch/x86/include/asm/pgtable_32_types.h
+++ b/arch/x86/include/asm/pgtable_32_types.h
@@ -25,6 +25,11 @@
25 * area for the same reason. ;) 25 * area for the same reason. ;)
26 */ 26 */
27#define VMALLOC_OFFSET (8 * 1024 * 1024) 27#define VMALLOC_OFFSET (8 * 1024 * 1024)
28
29#ifndef __ASSEMBLER__
30extern bool __vmalloc_start_set; /* set once high_memory is set */
31#endif
32
28#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET) 33#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
29#ifdef CONFIG_X86_PAE 34#ifdef CONFIG_X86_PAE
30#define LAST_PKMAP 512 35#define LAST_PKMAP 512
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 4d258ad76a0f..b8238dc8786d 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -273,6 +273,7 @@ typedef struct page *pgtable_t;
273 273
274extern pteval_t __supported_pte_mask; 274extern pteval_t __supported_pte_mask;
275extern int nx_enabled; 275extern int nx_enabled;
276extern void set_nx(void);
276 277
277#define pgprot_writecombine pgprot_writecombine 278#define pgprot_writecombine pgprot_writecombine
278extern pgprot_t pgprot_writecombine(pgprot_t prot); 279extern pgprot_t pgprot_writecombine(pgprot_t prot);
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 777327ef05c1..9f4dfba33b28 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -199,6 +199,10 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
199#define SCIR_CPU_ACTIVITY 0x02 /* not idle */ 199#define SCIR_CPU_ACTIVITY 0x02 /* not idle */
200#define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */ 200#define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */
201 201
202/* Loop through all installed blades */
203#define for_each_possible_blade(bid) \
204 for ((bid) = 0; (bid) < uv_num_possible_blades(); (bid)++)
205
202/* 206/*
203 * Macros for converting between kernel virtual addresses, socket local physical 207 * Macros for converting between kernel virtual addresses, socket local physical
204 * addresses, and UV global physical addresses. 208 * addresses, and UV global physical addresses.
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 4bd990ee43df..1a918dde46b5 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -164,6 +164,7 @@ static inline pte_t __pte_ma(pteval_t x)
164 164
165 165
166xmaddr_t arbitrary_virt_to_machine(void *address); 166xmaddr_t arbitrary_virt_to_machine(void *address);
167unsigned long arbitrary_virt_to_mfn(void *vaddr);
167void make_lowmem_page_readonly(void *vaddr); 168void make_lowmem_page_readonly(void *vaddr);
168void make_lowmem_page_readwrite(void *vaddr); 169void make_lowmem_page_readwrite(void *vaddr);
169 170
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 95f216bbfaf1..339ce35648e6 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -111,7 +111,7 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64
111### 111###
112# 64 bit specific files 112# 64 bit specific files
113ifeq ($(CONFIG_X86_64),y) 113ifeq ($(CONFIG_X86_64),y)
114 obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o 114 obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o
115 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o 115 obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
116 obj-$(CONFIG_AUDIT) += audit_64.o 116 obj-$(CONFIG_AUDIT) += audit_64.o
117 117
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 6907b8e85d52..4c80f1557433 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -414,9 +414,17 @@ void __init alternative_instructions(void)
414 that might execute the to be patched code. 414 that might execute the to be patched code.
415 Other CPUs are not running. */ 415 Other CPUs are not running. */
416 stop_nmi(); 416 stop_nmi();
417#ifdef CONFIG_X86_MCE 417
418 stop_mce(); 418 /*
419#endif 419 * Don't stop machine check exceptions while patching.
420 * MCEs only happen when something got corrupted and in this
421 * case we must do something about the corruption.
422 * Ignoring it is worse than a unlikely patching race.
423 * Also machine checks tend to be broadcast and if one CPU
424 * goes into machine check the others follow quickly, so we don't
425 * expect a machine check to cause undue problems during to code
426 * patching.
427 */
420 428
421 apply_alternatives(__alt_instructions, __alt_instructions_end); 429 apply_alternatives(__alt_instructions, __alt_instructions_end);
422 430
@@ -456,9 +464,6 @@ void __init alternative_instructions(void)
456 (unsigned long)__smp_locks_end); 464 (unsigned long)__smp_locks_end);
457 465
458 restart_nmi(); 466 restart_nmi();
459#ifdef CONFIG_X86_MCE
460 restart_mce();
461#endif
462} 467}
463 468
464/** 469/**
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index f9cecdfd05c5..30909a258d0f 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -46,6 +46,7 @@
46#include <asm/idle.h> 46#include <asm/idle.h>
47#include <asm/mtrr.h> 47#include <asm/mtrr.h>
48#include <asm/smp.h> 48#include <asm/smp.h>
49#include <asm/mce.h>
49 50
50unsigned int num_processors; 51unsigned int num_processors;
51 52
@@ -842,6 +843,14 @@ void clear_local_APIC(void)
842 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); 843 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
843 } 844 }
844#endif 845#endif
846#ifdef CONFIG_X86_MCE_INTEL
847 if (maxlvt >= 6) {
848 v = apic_read(APIC_LVTCMCI);
849 if (!(v & APIC_LVT_MASKED))
850 apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED);
851 }
852#endif
853
845 /* 854 /*
846 * Clean APIC state for other OSs: 855 * Clean APIC state for other OSs:
847 */ 856 */
@@ -1241,6 +1250,12 @@ void __cpuinit setup_local_APIC(void)
1241 apic_write(APIC_LVT1, value); 1250 apic_write(APIC_LVT1, value);
1242 1251
1243 preempt_enable(); 1252 preempt_enable();
1253
1254#ifdef CONFIG_X86_MCE_INTEL
1255 /* Recheck CMCI information after local APIC is up on CPU #0 */
1256 if (smp_processor_id() == 0)
1257 cmci_recheck();
1258#endif
1244} 1259}
1245 1260
1246void __cpuinit end_local_APIC_setup(void) 1261void __cpuinit end_local_APIC_setup(void)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 25423a5b80ed..f47df59016c5 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -5,6 +5,7 @@
5#include <asm/io.h> 5#include <asm/io.h>
6#include <asm/processor.h> 6#include <asm/processor.h>
7#include <asm/apic.h> 7#include <asm/apic.h>
8#include <asm/cpu.h>
8 9
9#ifdef CONFIG_X86_64 10#ifdef CONFIG_X86_64
10# include <asm/numa_64.h> 11# include <asm/numa_64.h>
@@ -141,6 +142,55 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
141 } 142 }
142} 143}
143 144
145static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
146{
147#ifdef CONFIG_SMP
148 /* calling is from identify_secondary_cpu() ? */
149 if (c->cpu_index == boot_cpu_id)
150 return;
151
152 /*
153 * Certain Athlons might work (for various values of 'work') in SMP
154 * but they are not certified as MP capable.
155 */
156 /* Athlon 660/661 is valid. */
157 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
158 (c->x86_mask == 1)))
159 goto valid_k7;
160
161 /* Duron 670 is valid */
162 if ((c->x86_model == 7) && (c->x86_mask == 0))
163 goto valid_k7;
164
165 /*
166 * Athlon 662, Duron 671, and Athlon >model 7 have capability
167 * bit. It's worth noting that the A5 stepping (662) of some
168 * Athlon XP's have the MP bit set.
169 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
170 * more.
171 */
172 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
173 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
174 (c->x86_model > 7))
175 if (cpu_has_mp)
176 goto valid_k7;
177
178 /* If we get here, not a certified SMP capable AMD system. */
179
180 /*
181 * Don't taint if we are running SMP kernel on a single non-MP
182 * approved Athlon
183 */
184 WARN_ONCE(1, "WARNING: This combination of AMD"
185 "processors is not suitable for SMP.\n");
186 if (!test_taint(TAINT_UNSAFE_SMP))
187 add_taint(TAINT_UNSAFE_SMP);
188
189valid_k7:
190 ;
191#endif
192}
193
144static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) 194static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
145{ 195{
146 u32 l, h; 196 u32 l, h;
@@ -175,6 +225,8 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
175 } 225 }
176 226
177 set_cpu_cap(c, X86_FEATURE_K7); 227 set_cpu_cap(c, X86_FEATURE_K7);
228
229 amd_k7_smp_check(c);
178} 230}
179#endif 231#endif
180 232
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 4b1c319d30c3..22590cf688ae 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -601,7 +601,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
601 if (!data) 601 if (!data)
602 return -ENOMEM; 602 return -ENOMEM;
603 603
604 data->acpi_data = percpu_ptr(acpi_perf_data, cpu); 604 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
605 per_cpu(drv_data, cpu) = data; 605 per_cpu(drv_data, cpu) = data;
606 606
607 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) 607 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index b585e04cbc9e..3178c3acd97e 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -277,7 +277,6 @@ static struct cpufreq_driver p4clockmod_driver = {
277 .name = "p4-clockmod", 277 .name = "p4-clockmod",
278 .owner = THIS_MODULE, 278 .owner = THIS_MODULE,
279 .attr = p4clockmod_attr, 279 .attr = p4clockmod_attr,
280 .hide_interface = 1,
281}; 280};
282 281
283 282
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 25c559ba8d54..191117f1ad51 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -13,6 +13,7 @@
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14#include <asm/ds.h> 14#include <asm/ds.h>
15#include <asm/bugs.h> 15#include <asm/bugs.h>
16#include <asm/cpu.h>
16 17
17#ifdef CONFIG_X86_64 18#ifdef CONFIG_X86_64
18#include <asm/topology.h> 19#include <asm/topology.h>
@@ -110,6 +111,28 @@ static void __cpuinit trap_init_f00f_bug(void)
110} 111}
111#endif 112#endif
112 113
114static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
115{
116#ifdef CONFIG_SMP
117 /* calling is from identify_secondary_cpu() ? */
118 if (c->cpu_index == boot_cpu_id)
119 return;
120
121 /*
122 * Mask B, Pentium, but not Pentium MMX
123 */
124 if (c->x86 == 5 &&
125 c->x86_mask >= 1 && c->x86_mask <= 4 &&
126 c->x86_model <= 3) {
127 /*
128 * Remember we have B step Pentia with bugs
129 */
130 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
131 "with B stepping processors.\n");
132 }
133#endif
134}
135
113static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 136static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
114{ 137{
115 unsigned long lo, hi; 138 unsigned long lo, hi;
@@ -186,6 +209,8 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
186#ifdef CONFIG_X86_NUMAQ 209#ifdef CONFIG_X86_NUMAQ
187 numaq_tsc_disable(); 210 numaq_tsc_disable();
188#endif 211#endif
212
213 intel_smp_check(c);
189} 214}
190#else 215#else
191static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 216static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile
index d7d2323bbb69..b2f89829bbe8 100644
--- a/arch/x86/kernel/cpu/mcheck/Makefile
+++ b/arch/x86/kernel/cpu/mcheck/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o
4obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o 4obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o
5obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o 5obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o
6obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o 6obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o
7obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c
index dfaebce3633e..3552119b091d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_32.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_32.c
@@ -60,20 +60,6 @@ void mcheck_init(struct cpuinfo_x86 *c)
60 } 60 }
61} 61}
62 62
63static unsigned long old_cr4 __initdata;
64
65void __init stop_mce(void)
66{
67 old_cr4 = read_cr4();
68 clear_in_cr4(X86_CR4_MCE);
69}
70
71void __init restart_mce(void)
72{
73 if (old_cr4 & X86_CR4_MCE)
74 set_in_cr4(X86_CR4_MCE);
75}
76
77static int __init mcheck_disable(char *str) 63static int __init mcheck_disable(char *str)
78{ 64{
79 mce_disabled = 1; 65 mce_disabled = 1;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index fe79985ce0f2..ca14604611ec 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -3,6 +3,8 @@
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. 3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 * Rest from unknown author(s). 4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it. 5 * 2004 Andi Kleen. Rewrote most of it.
6 * Copyright 2008 Intel Corporation
7 * Author: Andi Kleen
6 */ 8 */
7 9
8#include <linux/init.h> 10#include <linux/init.h>
@@ -24,6 +26,9 @@
24#include <linux/ctype.h> 26#include <linux/ctype.h>
25#include <linux/kmod.h> 27#include <linux/kmod.h>
26#include <linux/kdebug.h> 28#include <linux/kdebug.h>
29#include <linux/kobject.h>
30#include <linux/sysfs.h>
31#include <linux/ratelimit.h>
27#include <asm/processor.h> 32#include <asm/processor.h>
28#include <asm/msr.h> 33#include <asm/msr.h>
29#include <asm/mce.h> 34#include <asm/mce.h>
@@ -32,7 +37,6 @@
32#include <asm/idle.h> 37#include <asm/idle.h>
33 38
34#define MISC_MCELOG_MINOR 227 39#define MISC_MCELOG_MINOR 227
35#define NR_SYSFS_BANKS 6
36 40
37atomic_t mce_entry; 41atomic_t mce_entry;
38 42
@@ -47,7 +51,7 @@ static int mce_dont_init;
47 */ 51 */
48static int tolerant = 1; 52static int tolerant = 1;
49static int banks; 53static int banks;
50static unsigned long bank[NR_SYSFS_BANKS] = { [0 ... NR_SYSFS_BANKS-1] = ~0UL }; 54static u64 *bank;
51static unsigned long notify_user; 55static unsigned long notify_user;
52static int rip_msr; 56static int rip_msr;
53static int mce_bootlog = -1; 57static int mce_bootlog = -1;
@@ -58,6 +62,19 @@ static char *trigger_argv[2] = { trigger, NULL };
58 62
59static DECLARE_WAIT_QUEUE_HEAD(mce_wait); 63static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
60 64
65/* MCA banks polled by the period polling timer for corrected events */
66DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
67 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
68};
69
70/* Do initial initialization of a struct mce */
71void mce_setup(struct mce *m)
72{
73 memset(m, 0, sizeof(struct mce));
74 m->cpu = smp_processor_id();
75 rdtscll(m->tsc);
76}
77
61/* 78/*
62 * Lockless MCE logging infrastructure. 79 * Lockless MCE logging infrastructure.
63 * This avoids deadlocks on printk locks without having to break locks. Also 80 * This avoids deadlocks on printk locks without having to break locks. Also
@@ -119,11 +136,11 @@ static void print_mce(struct mce *m)
119 print_symbol("{%s}", m->ip); 136 print_symbol("{%s}", m->ip);
120 printk("\n"); 137 printk("\n");
121 } 138 }
122 printk(KERN_EMERG "TSC %Lx ", m->tsc); 139 printk(KERN_EMERG "TSC %llx ", m->tsc);
123 if (m->addr) 140 if (m->addr)
124 printk("ADDR %Lx ", m->addr); 141 printk("ADDR %llx ", m->addr);
125 if (m->misc) 142 if (m->misc)
126 printk("MISC %Lx ", m->misc); 143 printk("MISC %llx ", m->misc);
127 printk("\n"); 144 printk("\n");
128 printk(KERN_EMERG "This is not a software problem!\n"); 145 printk(KERN_EMERG "This is not a software problem!\n");
129 printk(KERN_EMERG "Run through mcelog --ascii to decode " 146 printk(KERN_EMERG "Run through mcelog --ascii to decode "
@@ -149,8 +166,10 @@ static void mce_panic(char *msg, struct mce *backup, unsigned long start)
149 panic(msg); 166 panic(msg);
150} 167}
151 168
152static int mce_available(struct cpuinfo_x86 *c) 169int mce_available(struct cpuinfo_x86 *c)
153{ 170{
171 if (mce_dont_init)
172 return 0;
154 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); 173 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
155} 174}
156 175
@@ -172,7 +191,77 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
172} 191}
173 192
174/* 193/*
175 * The actual machine check handler 194 * Poll for corrected events or events that happened before reset.
195 * Those are just logged through /dev/mcelog.
196 *
197 * This is executed in standard interrupt context.
198 */
199void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
200{
201 struct mce m;
202 int i;
203
204 mce_setup(&m);
205
206 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
207 for (i = 0; i < banks; i++) {
208 if (!bank[i] || !test_bit(i, *b))
209 continue;
210
211 m.misc = 0;
212 m.addr = 0;
213 m.bank = i;
214 m.tsc = 0;
215
216 barrier();
217 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
218 if (!(m.status & MCI_STATUS_VAL))
219 continue;
220
221 /*
222 * Uncorrected events are handled by the exception handler
223 * when it is enabled. But when the exception is disabled log
224 * everything.
225 *
226 * TBD do the same check for MCI_STATUS_EN here?
227 */
228 if ((m.status & MCI_STATUS_UC) && !(flags & MCP_UC))
229 continue;
230
231 if (m.status & MCI_STATUS_MISCV)
232 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
233 if (m.status & MCI_STATUS_ADDRV)
234 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
235
236 if (!(flags & MCP_TIMESTAMP))
237 m.tsc = 0;
238 /*
239 * Don't get the IP here because it's unlikely to
240 * have anything to do with the actual error location.
241 */
242
243 mce_log(&m);
244 add_taint(TAINT_MACHINE_CHECK);
245
246 /*
247 * Clear state for this bank.
248 */
249 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
250 }
251
252 /*
253 * Don't clear MCG_STATUS here because it's only defined for
254 * exceptions.
255 */
256}
257
258/*
259 * The actual machine check handler. This only handles real
260 * exceptions when something got corrupted coming in through int 18.
261 *
262 * This is executed in NMI context not subject to normal locking rules. This
263 * implies that most kernel services cannot be safely used. Don't even
264 * think about putting a printk in there!
176 */ 265 */
177void do_machine_check(struct pt_regs * regs, long error_code) 266void do_machine_check(struct pt_regs * regs, long error_code)
178{ 267{
@@ -190,17 +279,18 @@ void do_machine_check(struct pt_regs * regs, long error_code)
190 * error. 279 * error.
191 */ 280 */
192 int kill_it = 0; 281 int kill_it = 0;
282 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
193 283
194 atomic_inc(&mce_entry); 284 atomic_inc(&mce_entry);
195 285
196 if ((regs 286 if (notify_die(DIE_NMI, "machine check", regs, error_code,
197 && notify_die(DIE_NMI, "machine check", regs, error_code,
198 18, SIGKILL) == NOTIFY_STOP) 287 18, SIGKILL) == NOTIFY_STOP)
199 || !banks) 288 goto out2;
289 if (!banks)
200 goto out2; 290 goto out2;
201 291
202 memset(&m, 0, sizeof(struct mce)); 292 mce_setup(&m);
203 m.cpu = smp_processor_id(); 293
204 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); 294 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
205 /* if the restart IP is not valid, we're done for */ 295 /* if the restart IP is not valid, we're done for */
206 if (!(m.mcgstatus & MCG_STATUS_RIPV)) 296 if (!(m.mcgstatus & MCG_STATUS_RIPV))
@@ -210,18 +300,32 @@ void do_machine_check(struct pt_regs * regs, long error_code)
210 barrier(); 300 barrier();
211 301
212 for (i = 0; i < banks; i++) { 302 for (i = 0; i < banks; i++) {
213 if (i < NR_SYSFS_BANKS && !bank[i]) 303 __clear_bit(i, toclear);
304 if (!bank[i])
214 continue; 305 continue;
215 306
216 m.misc = 0; 307 m.misc = 0;
217 m.addr = 0; 308 m.addr = 0;
218 m.bank = i; 309 m.bank = i;
219 m.tsc = 0;
220 310
221 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); 311 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
222 if ((m.status & MCI_STATUS_VAL) == 0) 312 if ((m.status & MCI_STATUS_VAL) == 0)
223 continue; 313 continue;
224 314
315 /*
316 * Non uncorrected errors are handled by machine_check_poll
317 * Leave them alone.
318 */
319 if ((m.status & MCI_STATUS_UC) == 0)
320 continue;
321
322 /*
323 * Set taint even when machine check was not enabled.
324 */
325 add_taint(TAINT_MACHINE_CHECK);
326
327 __set_bit(i, toclear);
328
225 if (m.status & MCI_STATUS_EN) { 329 if (m.status & MCI_STATUS_EN) {
226 /* if PCC was set, there's no way out */ 330 /* if PCC was set, there's no way out */
227 no_way_out |= !!(m.status & MCI_STATUS_PCC); 331 no_way_out |= !!(m.status & MCI_STATUS_PCC);
@@ -235,6 +339,12 @@ void do_machine_check(struct pt_regs * regs, long error_code)
235 no_way_out = 1; 339 no_way_out = 1;
236 kill_it = 1; 340 kill_it = 1;
237 } 341 }
342 } else {
343 /*
344 * Machine check event was not enabled. Clear, but
345 * ignore.
346 */
347 continue;
238 } 348 }
239 349
240 if (m.status & MCI_STATUS_MISCV) 350 if (m.status & MCI_STATUS_MISCV)
@@ -243,10 +353,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
243 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); 353 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
244 354
245 mce_get_rip(&m, regs); 355 mce_get_rip(&m, regs);
246 if (error_code >= 0) 356 mce_log(&m);
247 rdtscll(m.tsc);
248 if (error_code != -2)
249 mce_log(&m);
250 357
251 /* Did this bank cause the exception? */ 358 /* Did this bank cause the exception? */
252 /* Assume that the bank with uncorrectable errors did it, 359 /* Assume that the bank with uncorrectable errors did it,
@@ -255,14 +362,8 @@ void do_machine_check(struct pt_regs * regs, long error_code)
255 panicm = m; 362 panicm = m;
256 panicm_found = 1; 363 panicm_found = 1;
257 } 364 }
258
259 add_taint(TAINT_MACHINE_CHECK);
260 } 365 }
261 366
262 /* Never do anything final in the polling timer */
263 if (!regs)
264 goto out;
265
266 /* If we didn't find an uncorrectable error, pick 367 /* If we didn't find an uncorrectable error, pick
267 the last one (shouldn't happen, just being safe). */ 368 the last one (shouldn't happen, just being safe). */
268 if (!panicm_found) 369 if (!panicm_found)
@@ -309,10 +410,11 @@ void do_machine_check(struct pt_regs * regs, long error_code)
309 /* notify userspace ASAP */ 410 /* notify userspace ASAP */
310 set_thread_flag(TIF_MCE_NOTIFY); 411 set_thread_flag(TIF_MCE_NOTIFY);
311 412
312 out:
313 /* the last thing we do is clear state */ 413 /* the last thing we do is clear state */
314 for (i = 0; i < banks; i++) 414 for (i = 0; i < banks; i++) {
315 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); 415 if (test_bit(i, toclear))
416 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
417 }
316 wrmsrl(MSR_IA32_MCG_STATUS, 0); 418 wrmsrl(MSR_IA32_MCG_STATUS, 0);
317 out2: 419 out2:
318 atomic_dec(&mce_entry); 420 atomic_dec(&mce_entry);
@@ -332,15 +434,13 @@ void do_machine_check(struct pt_regs * regs, long error_code)
332 * and historically has been the register value of the 434 * and historically has been the register value of the
333 * MSR_IA32_THERMAL_STATUS (Intel) msr. 435 * MSR_IA32_THERMAL_STATUS (Intel) msr.
334 */ 436 */
335void mce_log_therm_throt_event(unsigned int cpu, __u64 status) 437void mce_log_therm_throt_event(__u64 status)
336{ 438{
337 struct mce m; 439 struct mce m;
338 440
339 memset(&m, 0, sizeof(m)); 441 mce_setup(&m);
340 m.cpu = cpu;
341 m.bank = MCE_THERMAL_BANK; 442 m.bank = MCE_THERMAL_BANK;
342 m.status = status; 443 m.status = status;
343 rdtscll(m.tsc);
344 mce_log(&m); 444 mce_log(&m);
345} 445}
346#endif /* CONFIG_X86_MCE_INTEL */ 446#endif /* CONFIG_X86_MCE_INTEL */
@@ -353,18 +453,18 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
353 453
354static int check_interval = 5 * 60; /* 5 minutes */ 454static int check_interval = 5 * 60; /* 5 minutes */
355static int next_interval; /* in jiffies */ 455static int next_interval; /* in jiffies */
356static void mcheck_timer(struct work_struct *work); 456static void mcheck_timer(unsigned long);
357static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer); 457static DEFINE_PER_CPU(struct timer_list, mce_timer);
358 458
359static void mcheck_check_cpu(void *info) 459static void mcheck_timer(unsigned long data)
360{ 460{
361 if (mce_available(&current_cpu_data)) 461 struct timer_list *t = &per_cpu(mce_timer, data);
362 do_machine_check(NULL, 0);
363}
364 462
365static void mcheck_timer(struct work_struct *work) 463 WARN_ON(smp_processor_id() != data);
366{ 464
367 on_each_cpu(mcheck_check_cpu, NULL, 1); 465 if (mce_available(&current_cpu_data))
466 machine_check_poll(MCP_TIMESTAMP,
467 &__get_cpu_var(mce_poll_banks));
368 468
369 /* 469 /*
370 * Alert userspace if needed. If we logged an MCE, reduce the 470 * Alert userspace if needed. If we logged an MCE, reduce the
@@ -377,31 +477,41 @@ static void mcheck_timer(struct work_struct *work)
377 (int)round_jiffies_relative(check_interval*HZ)); 477 (int)round_jiffies_relative(check_interval*HZ));
378 } 478 }
379 479
380 schedule_delayed_work(&mcheck_work, next_interval); 480 t->expires = jiffies + next_interval;
481 add_timer(t);
482}
483
484static void mce_do_trigger(struct work_struct *work)
485{
486 call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
381} 487}
382 488
489static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
490
383/* 491/*
384 * This is only called from process context. This is where we do 492 * Notify the user(s) about new machine check events.
385 * anything we need to alert userspace about new MCEs. This is called 493 * Can be called from interrupt context, but not from machine check/NMI
386 * directly from the poller and also from entry.S and idle, thanks to 494 * context.
387 * TIF_MCE_NOTIFY.
388 */ 495 */
389int mce_notify_user(void) 496int mce_notify_user(void)
390{ 497{
498 /* Not more than two messages every minute */
499 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
500
391 clear_thread_flag(TIF_MCE_NOTIFY); 501 clear_thread_flag(TIF_MCE_NOTIFY);
392 if (test_and_clear_bit(0, &notify_user)) { 502 if (test_and_clear_bit(0, &notify_user)) {
393 static unsigned long last_print;
394 unsigned long now = jiffies;
395
396 wake_up_interruptible(&mce_wait); 503 wake_up_interruptible(&mce_wait);
397 if (trigger[0])
398 call_usermodehelper(trigger, trigger_argv, NULL,
399 UMH_NO_WAIT);
400 504
401 if (time_after_eq(now, last_print + (check_interval*HZ))) { 505 /*
402 last_print = now; 506 * There is no risk of missing notifications because
507 * work_pending is always cleared before the function is
508 * executed.
509 */
510 if (trigger[0] && !work_pending(&mce_trigger_work))
511 schedule_work(&mce_trigger_work);
512
513 if (__ratelimit(&ratelimit))
403 printk(KERN_INFO "Machine check events logged\n"); 514 printk(KERN_INFO "Machine check events logged\n");
404 }
405 515
406 return 1; 516 return 1;
407 } 517 }
@@ -425,63 +535,78 @@ static struct notifier_block mce_idle_notifier = {
425 535
426static __init int periodic_mcheck_init(void) 536static __init int periodic_mcheck_init(void)
427{ 537{
428 next_interval = check_interval * HZ; 538 idle_notifier_register(&mce_idle_notifier);
429 if (next_interval) 539 return 0;
430 schedule_delayed_work(&mcheck_work,
431 round_jiffies_relative(next_interval));
432 idle_notifier_register(&mce_idle_notifier);
433 return 0;
434} 540}
435__initcall(periodic_mcheck_init); 541__initcall(periodic_mcheck_init);
436 542
437
438/* 543/*
439 * Initialize Machine Checks for a CPU. 544 * Initialize Machine Checks for a CPU.
440 */ 545 */
441static void mce_init(void *dummy) 546static int mce_cap_init(void)
442{ 547{
443 u64 cap; 548 u64 cap;
444 int i; 549 unsigned b;
445 550
446 rdmsrl(MSR_IA32_MCG_CAP, cap); 551 rdmsrl(MSR_IA32_MCG_CAP, cap);
447 banks = cap & 0xff; 552 b = cap & 0xff;
448 if (banks > MCE_EXTENDED_BANK) { 553 if (b > MAX_NR_BANKS) {
449 banks = MCE_EXTENDED_BANK; 554 printk(KERN_WARNING
450 printk(KERN_INFO "MCE: warning: using only %d banks\n", 555 "MCE: Using only %u machine check banks out of %u\n",
451 MCE_EXTENDED_BANK); 556 MAX_NR_BANKS, b);
557 b = MAX_NR_BANKS;
452 } 558 }
559
560 /* Don't support asymmetric configurations today */
561 WARN_ON(banks != 0 && b != banks);
562 banks = b;
563 if (!bank) {
564 bank = kmalloc(banks * sizeof(u64), GFP_KERNEL);
565 if (!bank)
566 return -ENOMEM;
567 memset(bank, 0xff, banks * sizeof(u64));
568 }
569
453 /* Use accurate RIP reporting if available. */ 570 /* Use accurate RIP reporting if available. */
454 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) 571 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
455 rip_msr = MSR_IA32_MCG_EIP; 572 rip_msr = MSR_IA32_MCG_EIP;
456 573
457 /* Log the machine checks left over from the previous reset. 574 return 0;
458 This also clears all registers */ 575}
459 do_machine_check(NULL, mce_bootlog ? -1 : -2); 576
577static void mce_init(void *dummy)
578{
579 u64 cap;
580 int i;
581 mce_banks_t all_banks;
582
583 /*
584 * Log the machine checks left over from the previous reset.
585 */
586 bitmap_fill(all_banks, MAX_NR_BANKS);
587 machine_check_poll(MCP_UC, &all_banks);
460 588
461 set_in_cr4(X86_CR4_MCE); 589 set_in_cr4(X86_CR4_MCE);
462 590
591 rdmsrl(MSR_IA32_MCG_CAP, cap);
463 if (cap & MCG_CTL_P) 592 if (cap & MCG_CTL_P)
464 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); 593 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
465 594
466 for (i = 0; i < banks; i++) { 595 for (i = 0; i < banks; i++) {
467 if (i < NR_SYSFS_BANKS) 596 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
468 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
469 else
470 wrmsrl(MSR_IA32_MC0_CTL+4*i, ~0UL);
471
472 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); 597 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
473 } 598 }
474} 599}
475 600
476/* Add per CPU specific workarounds here */ 601/* Add per CPU specific workarounds here */
477static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) 602static void mce_cpu_quirks(struct cpuinfo_x86 *c)
478{ 603{
479 /* This should be disabled by the BIOS, but isn't always */ 604 /* This should be disabled by the BIOS, but isn't always */
480 if (c->x86_vendor == X86_VENDOR_AMD) { 605 if (c->x86_vendor == X86_VENDOR_AMD) {
481 if(c->x86 == 15) 606 if (c->x86 == 15 && banks > 4)
482 /* disable GART TBL walk error reporting, which trips off 607 /* disable GART TBL walk error reporting, which trips off
483 incorrectly with the IOMMU & 3ware & Cerberus. */ 608 incorrectly with the IOMMU & 3ware & Cerberus. */
484 clear_bit(10, &bank[4]); 609 clear_bit(10, (unsigned long *)&bank[4]);
485 if(c->x86 <= 17 && mce_bootlog < 0) 610 if(c->x86 <= 17 && mce_bootlog < 0)
486 /* Lots of broken BIOS around that don't clear them 611 /* Lots of broken BIOS around that don't clear them
487 by default and leave crap in there. Don't log. */ 612 by default and leave crap in there. Don't log. */
@@ -504,20 +629,38 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
504 } 629 }
505} 630}
506 631
632static void mce_init_timer(void)
633{
634 struct timer_list *t = &__get_cpu_var(mce_timer);
635
636 /* data race harmless because everyone sets to the same value */
637 if (!next_interval)
638 next_interval = check_interval * HZ;
639 if (!next_interval)
640 return;
641 setup_timer(t, mcheck_timer, smp_processor_id());
642 t->expires = round_jiffies(jiffies + next_interval);
643 add_timer(t);
644}
645
507/* 646/*
508 * Called for each booted CPU to set up machine checks. 647 * Called for each booted CPU to set up machine checks.
509 * Must be called with preempt off. 648 * Must be called with preempt off.
510 */ 649 */
511void __cpuinit mcheck_init(struct cpuinfo_x86 *c) 650void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
512{ 651{
513 mce_cpu_quirks(c); 652 if (!mce_available(c))
653 return;
514 654
515 if (mce_dont_init || 655 if (mce_cap_init() < 0) {
516 !mce_available(c)) 656 mce_dont_init = 1;
517 return; 657 return;
658 }
659 mce_cpu_quirks(c);
518 660
519 mce_init(NULL); 661 mce_init(NULL);
520 mce_cpu_features(c); 662 mce_cpu_features(c);
663 mce_init_timer();
521} 664}
522 665
523/* 666/*
@@ -573,7 +716,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
573{ 716{
574 unsigned long *cpu_tsc; 717 unsigned long *cpu_tsc;
575 static DEFINE_MUTEX(mce_read_mutex); 718 static DEFINE_MUTEX(mce_read_mutex);
576 unsigned next; 719 unsigned prev, next;
577 char __user *buf = ubuf; 720 char __user *buf = ubuf;
578 int i, err; 721 int i, err;
579 722
@@ -592,25 +735,32 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
592 } 735 }
593 736
594 err = 0; 737 err = 0;
595 for (i = 0; i < next; i++) { 738 prev = 0;
596 unsigned long start = jiffies; 739 do {
597 740 for (i = prev; i < next; i++) {
598 while (!mcelog.entry[i].finished) { 741 unsigned long start = jiffies;
599 if (time_after_eq(jiffies, start + 2)) { 742
600 memset(mcelog.entry + i,0, sizeof(struct mce)); 743 while (!mcelog.entry[i].finished) {
601 goto timeout; 744 if (time_after_eq(jiffies, start + 2)) {
745 memset(mcelog.entry + i, 0,
746 sizeof(struct mce));
747 goto timeout;
748 }
749 cpu_relax();
602 } 750 }
603 cpu_relax(); 751 smp_rmb();
752 err |= copy_to_user(buf, mcelog.entry + i,
753 sizeof(struct mce));
754 buf += sizeof(struct mce);
755timeout:
756 ;
604 } 757 }
605 smp_rmb();
606 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
607 buf += sizeof(struct mce);
608 timeout:
609 ;
610 }
611 758
612 memset(mcelog.entry, 0, next * sizeof(struct mce)); 759 memset(mcelog.entry + prev, 0,
613 mcelog.next = 0; 760 (next - prev) * sizeof(struct mce));
761 prev = next;
762 next = cmpxchg(&mcelog.next, prev, 0);
763 } while (next != prev);
614 764
615 synchronize_sched(); 765 synchronize_sched();
616 766
@@ -680,20 +830,6 @@ static struct miscdevice mce_log_device = {
680 &mce_chrdev_ops, 830 &mce_chrdev_ops,
681}; 831};
682 832
683static unsigned long old_cr4 __initdata;
684
685void __init stop_mce(void)
686{
687 old_cr4 = read_cr4();
688 clear_in_cr4(X86_CR4_MCE);
689}
690
691void __init restart_mce(void)
692{
693 if (old_cr4 & X86_CR4_MCE)
694 set_in_cr4(X86_CR4_MCE);
695}
696
697/* 833/*
698 * Old style boot options parsing. Only for compatibility. 834 * Old style boot options parsing. Only for compatibility.
699 */ 835 */
@@ -703,8 +839,7 @@ static int __init mcheck_disable(char *str)
703 return 1; 839 return 1;
704} 840}
705 841
706/* mce=off disables machine check. Note you can re-enable it later 842/* mce=off disables machine check.
707 using sysfs.
708 mce=TOLERANCELEVEL (number, see above) 843 mce=TOLERANCELEVEL (number, see above)
709 mce=bootlog Log MCEs from before booting. Disabled by default on AMD. 844 mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
710 mce=nobootlog Don't log MCEs from before booting. */ 845 mce=nobootlog Don't log MCEs from before booting. */
@@ -728,6 +863,29 @@ __setup("mce=", mcheck_enable);
728 * Sysfs support 863 * Sysfs support
729 */ 864 */
730 865
866/*
867 * Disable machine checks on suspend and shutdown. We can't really handle
868 * them later.
869 */
870static int mce_disable(void)
871{
872 int i;
873
874 for (i = 0; i < banks; i++)
875 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
876 return 0;
877}
878
879static int mce_suspend(struct sys_device *dev, pm_message_t state)
880{
881 return mce_disable();
882}
883
884static int mce_shutdown(struct sys_device *dev)
885{
886 return mce_disable();
887}
888
731/* On resume clear all MCE state. Don't want to see leftovers from the BIOS. 889/* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
732 Only one CPU is active at this time, the others get readded later using 890 Only one CPU is active at this time, the others get readded later using
733 CPU hotplug. */ 891 CPU hotplug. */
@@ -738,20 +896,24 @@ static int mce_resume(struct sys_device *dev)
738 return 0; 896 return 0;
739} 897}
740 898
899static void mce_cpu_restart(void *data)
900{
901 del_timer_sync(&__get_cpu_var(mce_timer));
902 if (mce_available(&current_cpu_data))
903 mce_init(NULL);
904 mce_init_timer();
905}
906
741/* Reinit MCEs after user configuration changes */ 907/* Reinit MCEs after user configuration changes */
742static void mce_restart(void) 908static void mce_restart(void)
743{ 909{
744 if (next_interval)
745 cancel_delayed_work(&mcheck_work);
746 /* Timer race is harmless here */
747 on_each_cpu(mce_init, NULL, 1);
748 next_interval = check_interval * HZ; 910 next_interval = check_interval * HZ;
749 if (next_interval) 911 on_each_cpu(mce_cpu_restart, NULL, 1);
750 schedule_delayed_work(&mcheck_work,
751 round_jiffies_relative(next_interval));
752} 912}
753 913
754static struct sysdev_class mce_sysclass = { 914static struct sysdev_class mce_sysclass = {
915 .suspend = mce_suspend,
916 .shutdown = mce_shutdown,
755 .resume = mce_resume, 917 .resume = mce_resume,
756 .name = "machinecheck", 918 .name = "machinecheck",
757}; 919};
@@ -778,16 +940,26 @@ void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinit
778 } \ 940 } \
779 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); 941 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
780 942
781/* 943static struct sysdev_attribute *bank_attrs;
782 * TBD should generate these dynamically based on number of available banks. 944
783 * Have only 6 contol banks in /sysfs until then. 945static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
784 */ 946 char *buf)
785ACCESSOR(bank0ctl,bank[0],mce_restart()) 947{
786ACCESSOR(bank1ctl,bank[1],mce_restart()) 948 u64 b = bank[attr - bank_attrs];
787ACCESSOR(bank2ctl,bank[2],mce_restart()) 949 return sprintf(buf, "%llx\n", b);
788ACCESSOR(bank3ctl,bank[3],mce_restart()) 950}
789ACCESSOR(bank4ctl,bank[4],mce_restart()) 951
790ACCESSOR(bank5ctl,bank[5],mce_restart()) 952static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
953 const char *buf, size_t siz)
954{
955 char *end;
956 u64 new = simple_strtoull(buf, &end, 0);
957 if (end == buf)
958 return -EINVAL;
959 bank[attr - bank_attrs] = new;
960 mce_restart();
961 return end-buf;
962}
791 963
792static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr, 964static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr,
793 char *buf) 965 char *buf)
@@ -814,8 +986,6 @@ static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
814static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); 986static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
815ACCESSOR(check_interval,check_interval,mce_restart()) 987ACCESSOR(check_interval,check_interval,mce_restart())
816static struct sysdev_attribute *mce_attributes[] = { 988static struct sysdev_attribute *mce_attributes[] = {
817 &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
818 &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
819 &attr_tolerant.attr, &attr_check_interval, &attr_trigger, 989 &attr_tolerant.attr, &attr_check_interval, &attr_trigger,
820 NULL 990 NULL
821}; 991};
@@ -845,11 +1015,22 @@ static __cpuinit int mce_create_device(unsigned int cpu)
845 if (err) 1015 if (err)
846 goto error; 1016 goto error;
847 } 1017 }
1018 for (i = 0; i < banks; i++) {
1019 err = sysdev_create_file(&per_cpu(device_mce, cpu),
1020 &bank_attrs[i]);
1021 if (err)
1022 goto error2;
1023 }
848 cpu_set(cpu, mce_device_initialized); 1024 cpu_set(cpu, mce_device_initialized);
849 1025
850 return 0; 1026 return 0;
1027error2:
1028 while (--i >= 0) {
1029 sysdev_remove_file(&per_cpu(device_mce, cpu),
1030 &bank_attrs[i]);
1031 }
851error: 1032error:
852 while (i--) { 1033 while (--i >= 0) {
853 sysdev_remove_file(&per_cpu(device_mce,cpu), 1034 sysdev_remove_file(&per_cpu(device_mce,cpu),
854 mce_attributes[i]); 1035 mce_attributes[i]);
855 } 1036 }
@@ -868,15 +1049,46 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
868 for (i = 0; mce_attributes[i]; i++) 1049 for (i = 0; mce_attributes[i]; i++)
869 sysdev_remove_file(&per_cpu(device_mce,cpu), 1050 sysdev_remove_file(&per_cpu(device_mce,cpu),
870 mce_attributes[i]); 1051 mce_attributes[i]);
1052 for (i = 0; i < banks; i++)
1053 sysdev_remove_file(&per_cpu(device_mce, cpu),
1054 &bank_attrs[i]);
871 sysdev_unregister(&per_cpu(device_mce,cpu)); 1055 sysdev_unregister(&per_cpu(device_mce,cpu));
872 cpu_clear(cpu, mce_device_initialized); 1056 cpu_clear(cpu, mce_device_initialized);
873} 1057}
874 1058
1059/* Make sure there are no machine checks on offlined CPUs. */
1060static void mce_disable_cpu(void *h)
1061{
1062 int i;
1063 unsigned long action = *(unsigned long *)h;
1064
1065 if (!mce_available(&current_cpu_data))
1066 return;
1067 if (!(action & CPU_TASKS_FROZEN))
1068 cmci_clear();
1069 for (i = 0; i < banks; i++)
1070 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
1071}
1072
1073static void mce_reenable_cpu(void *h)
1074{
1075 int i;
1076 unsigned long action = *(unsigned long *)h;
1077
1078 if (!mce_available(&current_cpu_data))
1079 return;
1080 if (!(action & CPU_TASKS_FROZEN))
1081 cmci_reenable();
1082 for (i = 0; i < banks; i++)
1083 wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]);
1084}
1085
875/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 1086/* Get notified when a cpu comes on/off. Be hotplug friendly. */
876static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, 1087static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
877 unsigned long action, void *hcpu) 1088 unsigned long action, void *hcpu)
878{ 1089{
879 unsigned int cpu = (unsigned long)hcpu; 1090 unsigned int cpu = (unsigned long)hcpu;
1091 struct timer_list *t = &per_cpu(mce_timer, cpu);
880 1092
881 switch (action) { 1093 switch (action) {
882 case CPU_ONLINE: 1094 case CPU_ONLINE:
@@ -891,6 +1103,21 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
891 threshold_cpu_callback(action, cpu); 1103 threshold_cpu_callback(action, cpu);
892 mce_remove_device(cpu); 1104 mce_remove_device(cpu);
893 break; 1105 break;
1106 case CPU_DOWN_PREPARE:
1107 case CPU_DOWN_PREPARE_FROZEN:
1108 del_timer_sync(t);
1109 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
1110 break;
1111 case CPU_DOWN_FAILED:
1112 case CPU_DOWN_FAILED_FROZEN:
1113 t->expires = round_jiffies(jiffies + next_interval);
1114 add_timer_on(t, cpu);
1115 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1116 break;
1117 case CPU_POST_DEAD:
1118 /* intentionally ignoring frozen here */
1119 cmci_rediscover(cpu);
1120 break;
894 } 1121 }
895 return NOTIFY_OK; 1122 return NOTIFY_OK;
896} 1123}
@@ -899,6 +1126,34 @@ static struct notifier_block mce_cpu_notifier __cpuinitdata = {
899 .notifier_call = mce_cpu_callback, 1126 .notifier_call = mce_cpu_callback,
900}; 1127};
901 1128
1129static __init int mce_init_banks(void)
1130{
1131 int i;
1132
1133 bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks,
1134 GFP_KERNEL);
1135 if (!bank_attrs)
1136 return -ENOMEM;
1137
1138 for (i = 0; i < banks; i++) {
1139 struct sysdev_attribute *a = &bank_attrs[i];
1140 a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i);
1141 if (!a->attr.name)
1142 goto nomem;
1143 a->attr.mode = 0644;
1144 a->show = show_bank;
1145 a->store = set_bank;
1146 }
1147 return 0;
1148
1149nomem:
1150 while (--i >= 0)
1151 kfree(bank_attrs[i].attr.name);
1152 kfree(bank_attrs);
1153 bank_attrs = NULL;
1154 return -ENOMEM;
1155}
1156
902static __init int mce_init_device(void) 1157static __init int mce_init_device(void)
903{ 1158{
904 int err; 1159 int err;
@@ -906,6 +1161,11 @@ static __init int mce_init_device(void)
906 1161
907 if (!mce_available(&boot_cpu_data)) 1162 if (!mce_available(&boot_cpu_data))
908 return -EIO; 1163 return -EIO;
1164
1165 err = mce_init_banks();
1166 if (err)
1167 return err;
1168
909 err = sysdev_class_register(&mce_sysclass); 1169 err = sysdev_class_register(&mce_sysclass);
910 if (err) 1170 if (err)
911 return err; 1171 return err;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 9817506dd469..c5a32f92d07e 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -79,6 +79,8 @@ static unsigned char shared_bank[NR_BANKS] = {
79 79
80static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ 80static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
81 81
82static void amd_threshold_interrupt(void);
83
82/* 84/*
83 * CPU Initialization 85 * CPU Initialization
84 */ 86 */
@@ -174,6 +176,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
174 tr.reset = 0; 176 tr.reset = 0;
175 tr.old_limit = 0; 177 tr.old_limit = 0;
176 threshold_restart_bank(&tr); 178 threshold_restart_bank(&tr);
179
180 mce_threshold_vector = amd_threshold_interrupt;
177 } 181 }
178 } 182 }
179} 183}
@@ -187,19 +191,13 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
187 * the interrupt goes off when error_count reaches threshold_limit. 191 * the interrupt goes off when error_count reaches threshold_limit.
188 * the handler will simply log mcelog w/ software defined bank number. 192 * the handler will simply log mcelog w/ software defined bank number.
189 */ 193 */
190asmlinkage void mce_threshold_interrupt(void) 194static void amd_threshold_interrupt(void)
191{ 195{
192 unsigned int bank, block; 196 unsigned int bank, block;
193 struct mce m; 197 struct mce m;
194 u32 low = 0, high = 0, address = 0; 198 u32 low = 0, high = 0, address = 0;
195 199
196 ack_APIC_irq(); 200 mce_setup(&m);
197 exit_idle();
198 irq_enter();
199
200 memset(&m, 0, sizeof(m));
201 rdtscll(m.tsc);
202 m.cpu = smp_processor_id();
203 201
204 /* assume first bank caused it */ 202 /* assume first bank caused it */
205 for (bank = 0; bank < NR_BANKS; ++bank) { 203 for (bank = 0; bank < NR_BANKS; ++bank) {
@@ -233,7 +231,8 @@ asmlinkage void mce_threshold_interrupt(void)
233 231
234 /* Log the machine check that caused the threshold 232 /* Log the machine check that caused the threshold
235 event. */ 233 event. */
236 do_machine_check(NULL, 0); 234 machine_check_poll(MCP_TIMESTAMP,
235 &__get_cpu_var(mce_poll_banks));
237 236
238 if (high & MASK_OVERFLOW_HI) { 237 if (high & MASK_OVERFLOW_HI) {
239 rdmsrl(address, m.misc); 238 rdmsrl(address, m.misc);
@@ -243,13 +242,10 @@ asmlinkage void mce_threshold_interrupt(void)
243 + bank * NR_BLOCKS 242 + bank * NR_BLOCKS
244 + block; 243 + block;
245 mce_log(&m); 244 mce_log(&m);
246 goto out; 245 return;
247 } 246 }
248 } 247 }
249 } 248 }
250out:
251 inc_irq_stat(irq_threshold_count);
252 irq_exit();
253} 249}
254 250
255/* 251/*
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
index aa5e287c98e0..aaa7d9730938 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
@@ -1,6 +1,8 @@
1/* 1/*
2 * Intel specific MCE features. 2 * Intel specific MCE features.
3 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> 3 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
4 * Copyright (C) 2008, 2009 Intel Corporation
5 * Author: Andi Kleen
4 */ 6 */
5 7
6#include <linux/init.h> 8#include <linux/init.h>
@@ -13,6 +15,7 @@
13#include <asm/hw_irq.h> 15#include <asm/hw_irq.h>
14#include <asm/idle.h> 16#include <asm/idle.h>
15#include <asm/therm_throt.h> 17#include <asm/therm_throt.h>
18#include <asm/apic.h>
16 19
17asmlinkage void smp_thermal_interrupt(void) 20asmlinkage void smp_thermal_interrupt(void)
18{ 21{
@@ -25,7 +28,7 @@ asmlinkage void smp_thermal_interrupt(void)
25 28
26 rdmsrl(MSR_IA32_THERM_STATUS, msr_val); 29 rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
27 if (therm_throt_process(msr_val & 1)) 30 if (therm_throt_process(msr_val & 1))
28 mce_log_therm_throt_event(smp_processor_id(), msr_val); 31 mce_log_therm_throt_event(msr_val);
29 32
30 inc_irq_stat(irq_thermal_count); 33 inc_irq_stat(irq_thermal_count);
31 irq_exit(); 34 irq_exit();
@@ -85,7 +88,209 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
85 return; 88 return;
86} 89}
87 90
91/*
92 * Support for Intel Correct Machine Check Interrupts. This allows
93 * the CPU to raise an interrupt when a corrected machine check happened.
94 * Normally we pick those up using a regular polling timer.
95 * Also supports reliable discovery of shared banks.
96 */
97
98static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
99
100/*
101 * cmci_discover_lock protects against parallel discovery attempts
102 * which could race against each other.
103 */
104static DEFINE_SPINLOCK(cmci_discover_lock);
105
106#define CMCI_THRESHOLD 1
107
108static int cmci_supported(int *banks)
109{
110 u64 cap;
111
112 /*
113 * Vendor check is not strictly needed, but the initial
114 * initialization is vendor keyed and this
115 * makes sure none of the backdoors are entered otherwise.
116 */
117 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
118 return 0;
119 if (!cpu_has_apic || lapic_get_maxlvt() < 6)
120 return 0;
121 rdmsrl(MSR_IA32_MCG_CAP, cap);
122 *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
123 return !!(cap & MCG_CMCI_P);
124}
125
126/*
127 * The interrupt handler. This is called on every event.
128 * Just call the poller directly to log any events.
129 * This could in theory increase the threshold under high load,
130 * but doesn't for now.
131 */
132static void intel_threshold_interrupt(void)
133{
134 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
135 mce_notify_user();
136}
137
138static void print_update(char *type, int *hdr, int num)
139{
140 if (*hdr == 0)
141 printk(KERN_INFO "CPU %d MCA banks", smp_processor_id());
142 *hdr = 1;
143 printk(KERN_CONT " %s:%d", type, num);
144}
145
146/*
147 * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
148 * on this CPU. Use the algorithm recommended in the SDM to discover shared
149 * banks.
150 */
151static void cmci_discover(int banks, int boot)
152{
153 unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
154 int hdr = 0;
155 int i;
156
157 spin_lock(&cmci_discover_lock);
158 for (i = 0; i < banks; i++) {
159 u64 val;
160
161 if (test_bit(i, owned))
162 continue;
163
164 rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
165
166 /* Already owned by someone else? */
167 if (val & CMCI_EN) {
168 if (test_and_clear_bit(i, owned) || boot)
169 print_update("SHD", &hdr, i);
170 __clear_bit(i, __get_cpu_var(mce_poll_banks));
171 continue;
172 }
173
174 val |= CMCI_EN | CMCI_THRESHOLD;
175 wrmsrl(MSR_IA32_MC0_CTL2 + i, val);
176 rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
177
178 /* Did the enable bit stick? -- the bank supports CMCI */
179 if (val & CMCI_EN) {
180 if (!test_and_set_bit(i, owned) || boot)
181 print_update("CMCI", &hdr, i);
182 __clear_bit(i, __get_cpu_var(mce_poll_banks));
183 } else {
184 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
185 }
186 }
187 spin_unlock(&cmci_discover_lock);
188 if (hdr)
189 printk(KERN_CONT "\n");
190}
191
192/*
193 * Just in case we missed an event during initialization check
194 * all the CMCI owned banks.
195 */
196void cmci_recheck(void)
197{
198 unsigned long flags;
199 int banks;
200
201 if (!mce_available(&current_cpu_data) || !cmci_supported(&banks))
202 return;
203 local_irq_save(flags);
204 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
205 local_irq_restore(flags);
206}
207
208/*
209 * Disable CMCI on this CPU for all banks it owns when it goes down.
210 * This allows other CPUs to claim the banks on rediscovery.
211 */
212void cmci_clear(void)
213{
214 int i;
215 int banks;
216 u64 val;
217
218 if (!cmci_supported(&banks))
219 return;
220 spin_lock(&cmci_discover_lock);
221 for (i = 0; i < banks; i++) {
222 if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
223 continue;
224 /* Disable CMCI */
225 rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
226 val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK);
227 wrmsrl(MSR_IA32_MC0_CTL2 + i, val);
228 __clear_bit(i, __get_cpu_var(mce_banks_owned));
229 }
230 spin_unlock(&cmci_discover_lock);
231}
232
233/*
234 * After a CPU went down cycle through all the others and rediscover
235 * Must run in process context.
236 */
237void cmci_rediscover(int dying)
238{
239 int banks;
240 int cpu;
241 cpumask_var_t old;
242
243 if (!cmci_supported(&banks))
244 return;
245 if (!alloc_cpumask_var(&old, GFP_KERNEL))
246 return;
247 cpumask_copy(old, &current->cpus_allowed);
248
249 for_each_online_cpu (cpu) {
250 if (cpu == dying)
251 continue;
252 if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)))
253 continue;
254 /* Recheck banks in case CPUs don't all have the same */
255 if (cmci_supported(&banks))
256 cmci_discover(banks, 0);
257 }
258
259 set_cpus_allowed_ptr(current, old);
260 free_cpumask_var(old);
261}
262
263/*
264 * Reenable CMCI on this CPU in case a CPU down failed.
265 */
266void cmci_reenable(void)
267{
268 int banks;
269 if (cmci_supported(&banks))
270 cmci_discover(banks, 0);
271}
272
273static __cpuinit void intel_init_cmci(void)
274{
275 int banks;
276
277 if (!cmci_supported(&banks))
278 return;
279
280 mce_threshold_vector = intel_threshold_interrupt;
281 cmci_discover(banks, 1);
282 /*
283 * For CPU #0 this runs with still disabled APIC, but that's
284 * ok because only the vector is set up. We still do another
285 * check for the banks later for CPU #0 just to make sure
286 * to not miss any events.
287 */
288 apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
289 cmci_recheck();
290}
291
88void mce_intel_feature_init(struct cpuinfo_x86 *c) 292void mce_intel_feature_init(struct cpuinfo_x86 *c)
89{ 293{
90 intel_init_thermal(c); 294 intel_init_thermal(c);
295 intel_init_cmci();
91} 296}
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
new file mode 100644
index 000000000000..23ee9e730f78
--- /dev/null
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
@@ -0,0 +1,29 @@
1/*
2 * Common corrected MCE threshold handler code:
3 */
4#include <linux/interrupt.h>
5#include <linux/kernel.h>
6
7#include <asm/irq_vectors.h>
8#include <asm/apic.h>
9#include <asm/idle.h>
10#include <asm/mce.h>
11
12static void default_threshold_interrupt(void)
13{
14 printk(KERN_ERR "Unexpected threshold interrupt at vector %x\n",
15 THRESHOLD_APIC_VECTOR);
16}
17
18void (*mce_threshold_vector)(void) = default_threshold_interrupt;
19
20asmlinkage void mce_threshold_interrupt(void)
21{
22 exit_idle();
23 irq_enter();
24 inc_irq_stat(irq_threshold_count);
25 mce_threshold_vector();
26 irq_exit();
27 /* Ack only at the end to avoid potential reentry */
28 ack_APIC_irq();
29}
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index 169a120587be..87b67e3a765a 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -729,7 +729,7 @@ struct pebs_tracer *ds_request_pebs(struct task_struct *task,
729 729
730 spin_unlock_irqrestore(&ds_lock, irq); 730 spin_unlock_irqrestore(&ds_lock, irq);
731 731
732 ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_bts); 732 ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_pebs);
733 ds_resume_pebs(tracer); 733 ds_resume_pebs(tracer);
734 734
735 return tracer; 735 return tracer;
@@ -1029,5 +1029,4 @@ void ds_copy_thread(struct task_struct *tsk, struct task_struct *father)
1029 1029
1030void ds_exit_thread(struct task_struct *tsk) 1030void ds_exit_thread(struct task_struct *tsk)
1031{ 1031{
1032 WARN_ON(tsk->thread.ds_ctx);
1033} 1032}
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index b205272ad394..1736acc4d7aa 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -469,7 +469,7 @@ void __init efi_enter_virtual_mode(void)
469 efi_memory_desc_t *md; 469 efi_memory_desc_t *md;
470 efi_status_t status; 470 efi_status_t status;
471 unsigned long size; 471 unsigned long size;
472 u64 end, systab, addr, npages; 472 u64 end, systab, addr, npages, end_pfn;
473 void *p, *va; 473 void *p, *va;
474 474
475 efi.systab = NULL; 475 efi.systab = NULL;
@@ -481,7 +481,10 @@ void __init efi_enter_virtual_mode(void)
481 size = md->num_pages << EFI_PAGE_SHIFT; 481 size = md->num_pages << EFI_PAGE_SHIFT;
482 end = md->phys_addr + size; 482 end = md->phys_addr + size;
483 483
484 if (PFN_UP(end) <= max_low_pfn_mapped) 484 end_pfn = PFN_UP(end);
485 if (end_pfn <= max_low_pfn_mapped
486 || (end_pfn > (1UL << (32 - PAGE_SHIFT))
487 && end_pfn <= max_pfn_mapped))
485 va = __va(md->phys_addr); 488 va = __va(md->phys_addr);
486 else 489 else
487 va = efi_ioremap(md->phys_addr, size); 490 va = efi_ioremap(md->phys_addr, size);
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c
index a4ee29127fdf..22c3b7828c50 100644
--- a/arch/x86/kernel/efi_64.c
+++ b/arch/x86/kernel/efi_64.c
@@ -100,24 +100,11 @@ void __init efi_call_phys_epilog(void)
100 100
101void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size) 101void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size)
102{ 102{
103 static unsigned pages_mapped __initdata; 103 unsigned long last_map_pfn;
104 unsigned i, pages;
105 unsigned long offset;
106 104
107 pages = PFN_UP(phys_addr + size) - PFN_DOWN(phys_addr); 105 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
108 offset = phys_addr & ~PAGE_MASK; 106 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size)
109 phys_addr &= PAGE_MASK;
110
111 if (pages_mapped + pages > MAX_EFI_IO_PAGES)
112 return NULL; 107 return NULL;
113 108
114 for (i = 0; i < pages; i++) { 109 return (void __iomem *)__va(phys_addr);
115 __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
116 phys_addr, PAGE_KERNEL);
117 phys_addr += PAGE_SIZE;
118 pages_mapped++;
119 }
120
121 return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \
122 (pages_mapped - pages)) + offset;
123} 110}
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 83d1836b9467..7ba4621c0dfa 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -984,6 +984,8 @@ apicinterrupt UV_BAU_MESSAGE \
984#endif 984#endif
985apicinterrupt LOCAL_TIMER_VECTOR \ 985apicinterrupt LOCAL_TIMER_VECTOR \
986 apic_timer_interrupt smp_apic_timer_interrupt 986 apic_timer_interrupt smp_apic_timer_interrupt
987apicinterrupt GENERIC_INTERRUPT_VECTOR \
988 generic_interrupt smp_generic_interrupt
987 989
988#ifdef CONFIG_SMP 990#ifdef CONFIG_SMP
989apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \ 991apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index b0f61f0dcd0a..f2f8540a7f3d 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -136,7 +136,7 @@ int init_fpu(struct task_struct *tsk)
136#ifdef CONFIG_X86_32 136#ifdef CONFIG_X86_32
137 if (!HAVE_HWFP) { 137 if (!HAVE_HWFP) {
138 memset(tsk->thread.xstate, 0, xstate_size); 138 memset(tsk->thread.xstate, 0, xstate_size);
139 finit(); 139 finit_task(tsk);
140 set_stopped_child_used_math(tsk); 140 set_stopped_child_used_math(tsk);
141 return 0; 141 return 0;
142 } 142 }
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index f13ca1650aaf..b864341dcc45 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -15,6 +15,9 @@
15 15
16atomic_t irq_err_count; 16atomic_t irq_err_count;
17 17
18/* Function pointer for generic interrupt vector handling */
19void (*generic_interrupt_extension)(void) = NULL;
20
18/* 21/*
19 * 'what should we do if we get a hw irq event on an illegal vector'. 22 * 'what should we do if we get a hw irq event on an illegal vector'.
20 * each architecture has to answer this themselves. 23 * each architecture has to answer this themselves.
@@ -56,6 +59,12 @@ static int show_other_interrupts(struct seq_file *p)
56 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); 59 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
57 seq_printf(p, " Local timer interrupts\n"); 60 seq_printf(p, " Local timer interrupts\n");
58#endif 61#endif
62 if (generic_interrupt_extension) {
63 seq_printf(p, "PLT: ");
64 for_each_online_cpu(j)
65 seq_printf(p, "%10u ", irq_stats(j)->generic_irqs);
66 seq_printf(p, " Platform interrupts\n");
67 }
59#ifdef CONFIG_SMP 68#ifdef CONFIG_SMP
60 seq_printf(p, "RES: "); 69 seq_printf(p, "RES: ");
61 for_each_online_cpu(j) 70 for_each_online_cpu(j)
@@ -163,6 +172,8 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
163#ifdef CONFIG_X86_LOCAL_APIC 172#ifdef CONFIG_X86_LOCAL_APIC
164 sum += irq_stats(cpu)->apic_timer_irqs; 173 sum += irq_stats(cpu)->apic_timer_irqs;
165#endif 174#endif
175 if (generic_interrupt_extension)
176 sum += irq_stats(cpu)->generic_irqs;
166#ifdef CONFIG_SMP 177#ifdef CONFIG_SMP
167 sum += irq_stats(cpu)->irq_resched_count; 178 sum += irq_stats(cpu)->irq_resched_count;
168 sum += irq_stats(cpu)->irq_call_count; 179 sum += irq_stats(cpu)->irq_call_count;
@@ -226,4 +237,27 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
226 return 1; 237 return 1;
227} 238}
228 239
240/*
241 * Handler for GENERIC_INTERRUPT_VECTOR.
242 */
243void smp_generic_interrupt(struct pt_regs *regs)
244{
245 struct pt_regs *old_regs = set_irq_regs(regs);
246
247 ack_APIC_irq();
248
249 exit_idle();
250
251 irq_enter();
252
253 inc_irq_stat(generic_irqs);
254
255 if (generic_interrupt_extension)
256 generic_interrupt_extension();
257
258 irq_exit();
259
260 set_irq_regs(old_regs);
261}
262
229EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); 263EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 9dc6b2b24275..3b09634a5153 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -16,6 +16,7 @@
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/uaccess.h> 18#include <linux/uaccess.h>
19#include <linux/percpu.h>
19 20
20#include <asm/apic.h> 21#include <asm/apic.h>
21 22
@@ -55,13 +56,13 @@ static inline void print_stack_overflow(void) { }
55union irq_ctx { 56union irq_ctx {
56 struct thread_info tinfo; 57 struct thread_info tinfo;
57 u32 stack[THREAD_SIZE/sizeof(u32)]; 58 u32 stack[THREAD_SIZE/sizeof(u32)];
58}; 59} __attribute__((aligned(PAGE_SIZE)));
59 60
60static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; 61static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
61static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; 62static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
62 63
63static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; 64static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, hardirq_stack);
64static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; 65static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, softirq_stack);
65 66
66static void call_on_stack(void *func, void *stack) 67static void call_on_stack(void *func, void *stack)
67{ 68{
@@ -81,7 +82,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
81 u32 *isp, arg1, arg2; 82 u32 *isp, arg1, arg2;
82 83
83 curctx = (union irq_ctx *) current_thread_info(); 84 curctx = (union irq_ctx *) current_thread_info();
84 irqctx = hardirq_ctx[smp_processor_id()]; 85 irqctx = __get_cpu_var(hardirq_ctx);
85 86
86 /* 87 /*
87 * this is where we switch to the IRQ stack. However, if we are 88 * this is where we switch to the IRQ stack. However, if we are
@@ -125,34 +126,34 @@ void __cpuinit irq_ctx_init(int cpu)
125{ 126{
126 union irq_ctx *irqctx; 127 union irq_ctx *irqctx;
127 128
128 if (hardirq_ctx[cpu]) 129 if (per_cpu(hardirq_ctx, cpu))
129 return; 130 return;
130 131
131 irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; 132 irqctx = &per_cpu(hardirq_stack, cpu);
132 irqctx->tinfo.task = NULL; 133 irqctx->tinfo.task = NULL;
133 irqctx->tinfo.exec_domain = NULL; 134 irqctx->tinfo.exec_domain = NULL;
134 irqctx->tinfo.cpu = cpu; 135 irqctx->tinfo.cpu = cpu;
135 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; 136 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
136 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 137 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
137 138
138 hardirq_ctx[cpu] = irqctx; 139 per_cpu(hardirq_ctx, cpu) = irqctx;
139 140
140 irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE]; 141 irqctx = &per_cpu(softirq_stack, cpu);
141 irqctx->tinfo.task = NULL; 142 irqctx->tinfo.task = NULL;
142 irqctx->tinfo.exec_domain = NULL; 143 irqctx->tinfo.exec_domain = NULL;
143 irqctx->tinfo.cpu = cpu; 144 irqctx->tinfo.cpu = cpu;
144 irqctx->tinfo.preempt_count = 0; 145 irqctx->tinfo.preempt_count = 0;
145 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 146 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
146 147
147 softirq_ctx[cpu] = irqctx; 148 per_cpu(softirq_ctx, cpu) = irqctx;
148 149
149 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", 150 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
150 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); 151 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
151} 152}
152 153
153void irq_ctx_exit(int cpu) 154void irq_ctx_exit(int cpu)
154{ 155{
155 hardirq_ctx[cpu] = NULL; 156 per_cpu(hardirq_ctx, cpu) = NULL;
156} 157}
157 158
158asmlinkage void do_softirq(void) 159asmlinkage void do_softirq(void)
@@ -169,7 +170,7 @@ asmlinkage void do_softirq(void)
169 170
170 if (local_softirq_pending()) { 171 if (local_softirq_pending()) {
171 curctx = current_thread_info(); 172 curctx = current_thread_info();
172 irqctx = softirq_ctx[smp_processor_id()]; 173 irqctx = __get_cpu_var(softirq_ctx);
173 irqctx->tinfo.task = curctx->task; 174 irqctx->tinfo.task = curctx->task;
174 irqctx->tinfo.previous_esp = current_stack_pointer; 175 irqctx->tinfo.previous_esp = current_stack_pointer;
175 176
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 50b8c3a3006c..bc1326105448 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -175,6 +175,9 @@ void __init native_init_IRQ(void)
175 /* self generated IPI for local APIC timer */ 175 /* self generated IPI for local APIC timer */
176 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); 176 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
177 177
178 /* generic IPI for platform specific use */
179 alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
180
178 /* IPI vectors for APIC spurious and error interrupts */ 181 /* IPI vectors for APIC spurious and error interrupts */
179 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); 182 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
180 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); 183 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
index da481a1e3f30..c7a49e0ffbfb 100644
--- a/arch/x86/kernel/irqinit_64.c
+++ b/arch/x86/kernel/irqinit_64.c
@@ -147,6 +147,9 @@ static void __init apic_intr_init(void)
147 /* self generated IPI for local APIC timer */ 147 /* self generated IPI for local APIC timer */
148 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); 148 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
149 149
150 /* generic IPI for platform specific use */
151 alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
152
150 /* IPI vectors for APIC spurious and error interrupts */ 153 /* IPI vectors for APIC spurious and error interrupts */
151 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); 154 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
152 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); 155 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index f5fc8c781a62..e7368c1da01d 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -14,12 +14,12 @@
14#include <linux/ftrace.h> 14#include <linux/ftrace.h>
15#include <linux/suspend.h> 15#include <linux/suspend.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/io.h>
17 18
18#include <asm/pgtable.h> 19#include <asm/pgtable.h>
19#include <asm/pgalloc.h> 20#include <asm/pgalloc.h>
20#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
21#include <asm/mmu_context.h> 22#include <asm/mmu_context.h>
22#include <asm/io.h>
23#include <asm/apic.h> 23#include <asm/apic.h>
24#include <asm/cpufeature.h> 24#include <asm/cpufeature.h>
25#include <asm/desc.h> 25#include <asm/desc.h>
@@ -63,7 +63,7 @@ static void load_segments(void)
63 "\tmovl %%eax,%%fs\n" 63 "\tmovl %%eax,%%fs\n"
64 "\tmovl %%eax,%%gs\n" 64 "\tmovl %%eax,%%gs\n"
65 "\tmovl %%eax,%%ss\n" 65 "\tmovl %%eax,%%ss\n"
66 ::: "eax", "memory"); 66 : : : "eax", "memory");
67#undef STR 67#undef STR
68#undef __STR 68#undef __STR
69} 69}
@@ -205,7 +205,8 @@ void machine_kexec(struct kimage *image)
205 205
206 if (image->preserve_context) { 206 if (image->preserve_context) {
207#ifdef CONFIG_X86_IO_APIC 207#ifdef CONFIG_X86_IO_APIC
208 /* We need to put APICs in legacy mode so that we can 208 /*
209 * We need to put APICs in legacy mode so that we can
209 * get timer interrupts in second kernel. kexec/kdump 210 * get timer interrupts in second kernel. kexec/kdump
210 * paths already have calls to disable_IO_APIC() in 211 * paths already have calls to disable_IO_APIC() in
211 * one form or other. kexec jump path also need 212 * one form or other. kexec jump path also need
@@ -227,7 +228,8 @@ void machine_kexec(struct kimage *image)
227 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) 228 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
228 << PAGE_SHIFT); 229 << PAGE_SHIFT);
229 230
230 /* The segment registers are funny things, they have both a 231 /*
232 * The segment registers are funny things, they have both a
231 * visible and an invisible part. Whenever the visible part is 233 * visible and an invisible part. Whenever the visible part is
232 * set to a specific selector, the invisible part is loaded 234 * set to a specific selector, the invisible part is loaded
233 * with from a table in memory. At no other time is the 235 * with from a table in memory. At no other time is the
@@ -237,11 +239,12 @@ void machine_kexec(struct kimage *image)
237 * segments, before I zap the gdt with an invalid value. 239 * segments, before I zap the gdt with an invalid value.
238 */ 240 */
239 load_segments(); 241 load_segments();
240 /* The gdt & idt are now invalid. 242 /*
243 * The gdt & idt are now invalid.
241 * If you want to load them you must set up your own idt & gdt. 244 * If you want to load them you must set up your own idt & gdt.
242 */ 245 */
243 set_gdt(phys_to_virt(0),0); 246 set_gdt(phys_to_virt(0), 0);
244 set_idt(phys_to_virt(0),0); 247 set_idt(phys_to_virt(0), 0);
245 248
246 /* now call it */ 249 /* now call it */
247 image->start = relocate_kernel_ptr((unsigned long)image->head, 250 image->start = relocate_kernel_ptr((unsigned long)image->head,
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 6993d51b7fd8..89cea4d44679 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -12,11 +12,47 @@
12#include <linux/reboot.h> 12#include <linux/reboot.h>
13#include <linux/numa.h> 13#include <linux/numa.h>
14#include <linux/ftrace.h> 14#include <linux/ftrace.h>
15#include <linux/io.h>
16#include <linux/suspend.h>
15 17
16#include <asm/pgtable.h> 18#include <asm/pgtable.h>
17#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
18#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
19#include <asm/io.h> 21
22static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
23 unsigned long addr)
24{
25 pud_t *pud;
26 pmd_t *pmd;
27 struct page *page;
28 int result = -ENOMEM;
29
30 addr &= PMD_MASK;
31 pgd += pgd_index(addr);
32 if (!pgd_present(*pgd)) {
33 page = kimage_alloc_control_pages(image, 0);
34 if (!page)
35 goto out;
36 pud = (pud_t *)page_address(page);
37 memset(pud, 0, PAGE_SIZE);
38 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
39 }
40 pud = pud_offset(pgd, addr);
41 if (!pud_present(*pud)) {
42 page = kimage_alloc_control_pages(image, 0);
43 if (!page)
44 goto out;
45 pmd = (pmd_t *)page_address(page);
46 memset(pmd, 0, PAGE_SIZE);
47 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
48 }
49 pmd = pmd_offset(pud, addr);
50 if (!pmd_present(*pmd))
51 set_pmd(pmd, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
52 result = 0;
53out:
54 return result;
55}
20 56
21static void init_level2_page(pmd_t *level2p, unsigned long addr) 57static void init_level2_page(pmd_t *level2p, unsigned long addr)
22{ 58{
@@ -83,9 +119,8 @@ static int init_level4_page(struct kimage *image, pgd_t *level4p,
83 } 119 }
84 level3p = (pud_t *)page_address(page); 120 level3p = (pud_t *)page_address(page);
85 result = init_level3_page(image, level3p, addr, last_addr); 121 result = init_level3_page(image, level3p, addr, last_addr);
86 if (result) { 122 if (result)
87 goto out; 123 goto out;
88 }
89 set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE)); 124 set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
90 addr += PGDIR_SIZE; 125 addr += PGDIR_SIZE;
91 } 126 }
@@ -156,6 +191,13 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
156 result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT); 191 result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
157 if (result) 192 if (result)
158 return result; 193 return result;
194 /*
195 * image->start may be outside 0 ~ max_pfn, for example when
196 * jump back to original kernel from kexeced kernel
197 */
198 result = init_one_level2_page(image, level4p, image->start);
199 if (result)
200 return result;
159 return init_transition_pgtable(image, level4p); 201 return init_transition_pgtable(image, level4p);
160} 202}
161 203
@@ -229,20 +271,45 @@ void machine_kexec(struct kimage *image)
229{ 271{
230 unsigned long page_list[PAGES_NR]; 272 unsigned long page_list[PAGES_NR];
231 void *control_page; 273 void *control_page;
274 int save_ftrace_enabled;
232 275
233 tracer_disable(); 276#ifdef CONFIG_KEXEC_JUMP
277 if (kexec_image->preserve_context)
278 save_processor_state();
279#endif
280
281 save_ftrace_enabled = __ftrace_enabled_save();
234 282
235 /* Interrupts aren't acceptable while we reboot */ 283 /* Interrupts aren't acceptable while we reboot */
236 local_irq_disable(); 284 local_irq_disable();
237 285
286 if (image->preserve_context) {
287#ifdef CONFIG_X86_IO_APIC
288 /*
289 * We need to put APICs in legacy mode so that we can
290 * get timer interrupts in second kernel. kexec/kdump
291 * paths already have calls to disable_IO_APIC() in
292 * one form or other. kexec jump path also need
293 * one.
294 */
295 disable_IO_APIC();
296#endif
297 }
298
238 control_page = page_address(image->control_code_page) + PAGE_SIZE; 299 control_page = page_address(image->control_code_page) + PAGE_SIZE;
239 memcpy(control_page, relocate_kernel, PAGE_SIZE); 300 memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
240 301
241 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page); 302 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
303 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
242 page_list[PA_TABLE_PAGE] = 304 page_list[PA_TABLE_PAGE] =
243 (unsigned long)__pa(page_address(image->control_code_page)); 305 (unsigned long)__pa(page_address(image->control_code_page));
244 306
245 /* The segment registers are funny things, they have both a 307 if (image->type == KEXEC_TYPE_DEFAULT)
308 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
309 << PAGE_SHIFT);
310
311 /*
312 * The segment registers are funny things, they have both a
246 * visible and an invisible part. Whenever the visible part is 313 * visible and an invisible part. Whenever the visible part is
247 * set to a specific selector, the invisible part is loaded 314 * set to a specific selector, the invisible part is loaded
248 * with from a table in memory. At no other time is the 315 * with from a table in memory. At no other time is the
@@ -252,15 +319,25 @@ void machine_kexec(struct kimage *image)
252 * segments, before I zap the gdt with an invalid value. 319 * segments, before I zap the gdt with an invalid value.
253 */ 320 */
254 load_segments(); 321 load_segments();
255 /* The gdt & idt are now invalid. 322 /*
323 * The gdt & idt are now invalid.
256 * If you want to load them you must set up your own idt & gdt. 324 * If you want to load them you must set up your own idt & gdt.
257 */ 325 */
258 set_gdt(phys_to_virt(0),0); 326 set_gdt(phys_to_virt(0), 0);
259 set_idt(phys_to_virt(0),0); 327 set_idt(phys_to_virt(0), 0);
260 328
261 /* now call it */ 329 /* now call it */
262 relocate_kernel((unsigned long)image->head, (unsigned long)page_list, 330 image->start = relocate_kernel((unsigned long)image->head,
263 image->start); 331 (unsigned long)page_list,
332 image->start,
333 image->preserve_context);
334
335#ifdef CONFIG_KEXEC_JUMP
336 if (kexec_image->preserve_context)
337 restore_processor_state();
338#endif
339
340 __ftrace_enabled_restore(save_ftrace_enabled);
264} 341}
265 342
266void arch_crash_save_vmcoreinfo(void) 343void arch_crash_save_vmcoreinfo(void)
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 37cb1bda1baf..e8192401da47 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -558,6 +558,19 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
558 558
559static struct mpf_intel *mpf_found; 559static struct mpf_intel *mpf_found;
560 560
561static unsigned long __init get_mpc_size(unsigned long physptr)
562{
563 struct mpc_table *mpc;
564 unsigned long size;
565
566 mpc = early_ioremap(physptr, PAGE_SIZE);
567 size = mpc->length;
568 early_iounmap(mpc, PAGE_SIZE);
569 apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size);
570
571 return size;
572}
573
561/* 574/*
562 * Scan the memory blocks for an SMP configuration block. 575 * Scan the memory blocks for an SMP configuration block.
563 */ 576 */
@@ -611,12 +624,16 @@ static void __init __get_smp_config(unsigned int early)
611 construct_default_ISA_mptable(mpf->feature1); 624 construct_default_ISA_mptable(mpf->feature1);
612 625
613 } else if (mpf->physptr) { 626 } else if (mpf->physptr) {
627 struct mpc_table *mpc;
628 unsigned long size;
614 629
630 size = get_mpc_size(mpf->physptr);
631 mpc = early_ioremap(mpf->physptr, size);
615 /* 632 /*
616 * Read the physical hardware table. Anything here will 633 * Read the physical hardware table. Anything here will
617 * override the defaults. 634 * override the defaults.
618 */ 635 */
619 if (!smp_read_mpc(phys_to_virt(mpf->physptr), early)) { 636 if (!smp_read_mpc(mpc, early)) {
620#ifdef CONFIG_X86_LOCAL_APIC 637#ifdef CONFIG_X86_LOCAL_APIC
621 smp_found_config = 0; 638 smp_found_config = 0;
622#endif 639#endif
@@ -624,8 +641,10 @@ static void __init __get_smp_config(unsigned int early)
624 "BIOS bug, MP table errors detected!...\n"); 641 "BIOS bug, MP table errors detected!...\n");
625 printk(KERN_ERR "... disabling SMP support. " 642 printk(KERN_ERR "... disabling SMP support. "
626 "(tell your hw vendor)\n"); 643 "(tell your hw vendor)\n");
644 early_iounmap(mpc, size);
627 return; 645 return;
628 } 646 }
647 early_iounmap(mpc, size);
629 648
630 if (early) 649 if (early)
631 return; 650 return;
@@ -697,10 +716,10 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
697 716
698 if (!reserve) 717 if (!reserve)
699 return 1; 718 return 1;
700 reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE, 719 reserve_bootmem_generic(virt_to_phys(mpf), sizeof(*mpf),
701 BOOTMEM_DEFAULT); 720 BOOTMEM_DEFAULT);
702 if (mpf->physptr) { 721 if (mpf->physptr) {
703 unsigned long size = PAGE_SIZE; 722 unsigned long size = get_mpc_size(mpf->physptr);
704#ifdef CONFIG_X86_32 723#ifdef CONFIG_X86_32
705 /* 724 /*
706 * We cannot access to MPC table to compute 725 * We cannot access to MPC table to compute
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 1cc18d439bbb..2aef36d8aca2 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -216,6 +216,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
216 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), 216 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
217 }, 217 },
218 }, 218 },
219 { /* Handle problems with rebooting on Dell XPS710 */
220 .callback = set_bios_reboot,
221 .ident = "Dell XPS710",
222 .matches = {
223 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
224 DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
225 },
226 },
219 { } 227 { }
220}; 228};
221 229
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index 2064d0aa8d28..41235531b11c 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -17,7 +17,8 @@
17 17
18#define PTR(x) (x << 2) 18#define PTR(x) (x << 2)
19 19
20/* control_page + KEXEC_CONTROL_CODE_MAX_SIZE 20/*
21 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
21 * ~ control_page + PAGE_SIZE are used as data storage and stack for 22 * ~ control_page + PAGE_SIZE are used as data storage and stack for
22 * jumping back 23 * jumping back
23 */ 24 */
@@ -76,8 +77,10 @@ relocate_kernel:
76 movl %eax, CP_PA_SWAP_PAGE(%edi) 77 movl %eax, CP_PA_SWAP_PAGE(%edi)
77 movl %ebx, CP_PA_BACKUP_PAGES_MAP(%edi) 78 movl %ebx, CP_PA_BACKUP_PAGES_MAP(%edi)
78 79
79 /* get physical address of control page now */ 80 /*
80 /* this is impossible after page table switch */ 81 * get physical address of control page now
82 * this is impossible after page table switch
83 */
81 movl PTR(PA_CONTROL_PAGE)(%ebp), %edi 84 movl PTR(PA_CONTROL_PAGE)(%ebp), %edi
82 85
83 /* switch to new set of page tables */ 86 /* switch to new set of page tables */
@@ -97,7 +100,8 @@ identity_mapped:
97 /* store the start address on the stack */ 100 /* store the start address on the stack */
98 pushl %edx 101 pushl %edx
99 102
100 /* Set cr0 to a known state: 103 /*
104 * Set cr0 to a known state:
101 * - Paging disabled 105 * - Paging disabled
102 * - Alignment check disabled 106 * - Alignment check disabled
103 * - Write protect disabled 107 * - Write protect disabled
@@ -113,7 +117,8 @@ identity_mapped:
113 /* clear cr4 if applicable */ 117 /* clear cr4 if applicable */
114 testl %ecx, %ecx 118 testl %ecx, %ecx
115 jz 1f 119 jz 1f
116 /* Set cr4 to a known state: 120 /*
121 * Set cr4 to a known state:
117 * Setting everything to zero seems safe. 122 * Setting everything to zero seems safe.
118 */ 123 */
119 xorl %eax, %eax 124 xorl %eax, %eax
@@ -132,15 +137,18 @@ identity_mapped:
132 call swap_pages 137 call swap_pages
133 addl $8, %esp 138 addl $8, %esp
134 139
135 /* To be certain of avoiding problems with self-modifying code 140 /*
141 * To be certain of avoiding problems with self-modifying code
136 * I need to execute a serializing instruction here. 142 * I need to execute a serializing instruction here.
137 * So I flush the TLB, it's handy, and not processor dependent. 143 * So I flush the TLB, it's handy, and not processor dependent.
138 */ 144 */
139 xorl %eax, %eax 145 xorl %eax, %eax
140 movl %eax, %cr3 146 movl %eax, %cr3
141 147
142 /* set all of the registers to known values */ 148 /*
143 /* leave %esp alone */ 149 * set all of the registers to known values
150 * leave %esp alone
151 */
144 152
145 testl %esi, %esi 153 testl %esi, %esi
146 jnz 1f 154 jnz 1f
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index d32cfb27a479..4de8f5b3d476 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -19,29 +19,77 @@
19#define PTR(x) (x << 3) 19#define PTR(x) (x << 3)
20#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 20#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
21 21
22/*
23 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
24 * ~ control_page + PAGE_SIZE are used as data storage and stack for
25 * jumping back
26 */
27#define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset))
28
29/* Minimal CPU state */
30#define RSP DATA(0x0)
31#define CR0 DATA(0x8)
32#define CR3 DATA(0x10)
33#define CR4 DATA(0x18)
34
35/* other data */
36#define CP_PA_TABLE_PAGE DATA(0x20)
37#define CP_PA_SWAP_PAGE DATA(0x28)
38#define CP_PA_BACKUP_PAGES_MAP DATA(0x30)
39
22 .text 40 .text
23 .align PAGE_SIZE 41 .align PAGE_SIZE
24 .code64 42 .code64
25 .globl relocate_kernel 43 .globl relocate_kernel
26relocate_kernel: 44relocate_kernel:
27 /* %rdi indirection_page 45 /*
46 * %rdi indirection_page
28 * %rsi page_list 47 * %rsi page_list
29 * %rdx start address 48 * %rdx start address
49 * %rcx preserve_context
30 */ 50 */
31 51
52 /* Save the CPU context, used for jumping back */
53 pushq %rbx
54 pushq %rbp
55 pushq %r12
56 pushq %r13
57 pushq %r14
58 pushq %r15
59 pushf
60
61 movq PTR(VA_CONTROL_PAGE)(%rsi), %r11
62 movq %rsp, RSP(%r11)
63 movq %cr0, %rax
64 movq %rax, CR0(%r11)
65 movq %cr3, %rax
66 movq %rax, CR3(%r11)
67 movq %cr4, %rax
68 movq %rax, CR4(%r11)
69
32 /* zero out flags, and disable interrupts */ 70 /* zero out flags, and disable interrupts */
33 pushq $0 71 pushq $0
34 popfq 72 popfq
35 73
36 /* get physical address of control page now */ 74 /*
37 /* this is impossible after page table switch */ 75 * get physical address of control page now
76 * this is impossible after page table switch
77 */
38 movq PTR(PA_CONTROL_PAGE)(%rsi), %r8 78 movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
39 79
40 /* get physical address of page table now too */ 80 /* get physical address of page table now too */
41 movq PTR(PA_TABLE_PAGE)(%rsi), %rcx 81 movq PTR(PA_TABLE_PAGE)(%rsi), %r9
82
83 /* get physical address of swap page now */
84 movq PTR(PA_SWAP_PAGE)(%rsi), %r10
85
86 /* save some information for jumping back */
87 movq %r9, CP_PA_TABLE_PAGE(%r11)
88 movq %r10, CP_PA_SWAP_PAGE(%r11)
89 movq %rdi, CP_PA_BACKUP_PAGES_MAP(%r11)
42 90
43 /* Switch to the identity mapped page tables */ 91 /* Switch to the identity mapped page tables */
44 movq %rcx, %cr3 92 movq %r9, %cr3
45 93
46 /* setup a new stack at the end of the physical control page */ 94 /* setup a new stack at the end of the physical control page */
47 lea PAGE_SIZE(%r8), %rsp 95 lea PAGE_SIZE(%r8), %rsp
@@ -55,7 +103,8 @@ identity_mapped:
55 /* store the start address on the stack */ 103 /* store the start address on the stack */
56 pushq %rdx 104 pushq %rdx
57 105
58 /* Set cr0 to a known state: 106 /*
107 * Set cr0 to a known state:
59 * - Paging enabled 108 * - Paging enabled
60 * - Alignment check disabled 109 * - Alignment check disabled
61 * - Write protect disabled 110 * - Write protect disabled
@@ -68,7 +117,8 @@ identity_mapped:
68 orl $(X86_CR0_PG | X86_CR0_PE), %eax 117 orl $(X86_CR0_PG | X86_CR0_PE), %eax
69 movq %rax, %cr0 118 movq %rax, %cr0
70 119
71 /* Set cr4 to a known state: 120 /*
121 * Set cr4 to a known state:
72 * - physical address extension enabled 122 * - physical address extension enabled
73 */ 123 */
74 movq $X86_CR4_PAE, %rax 124 movq $X86_CR4_PAE, %rax
@@ -78,9 +128,87 @@ identity_mapped:
781: 1281:
79 129
80 /* Flush the TLB (needed?) */ 130 /* Flush the TLB (needed?) */
81 movq %rcx, %cr3 131 movq %r9, %cr3
132
133 movq %rcx, %r11
134 call swap_pages
135
136 /*
137 * To be certain of avoiding problems with self-modifying code
138 * I need to execute a serializing instruction here.
139 * So I flush the TLB by reloading %cr3 here, it's handy,
140 * and not processor dependent.
141 */
142 movq %cr3, %rax
143 movq %rax, %cr3
144
145 /*
146 * set all of the registers to known values
147 * leave %rsp alone
148 */
149
150 testq %r11, %r11
151 jnz 1f
152 xorq %rax, %rax
153 xorq %rbx, %rbx
154 xorq %rcx, %rcx
155 xorq %rdx, %rdx
156 xorq %rsi, %rsi
157 xorq %rdi, %rdi
158 xorq %rbp, %rbp
159 xorq %r8, %r8
160 xorq %r9, %r9
161 xorq %r10, %r9
162 xorq %r11, %r11
163 xorq %r12, %r12
164 xorq %r13, %r13
165 xorq %r14, %r14
166 xorq %r15, %r15
167
168 ret
169
1701:
171 popq %rdx
172 leaq PAGE_SIZE(%r10), %rsp
173 call *%rdx
174
175 /* get the re-entry point of the peer system */
176 movq 0(%rsp), %rbp
177 call 1f
1781:
179 popq %r8
180 subq $(1b - relocate_kernel), %r8
181 movq CP_PA_SWAP_PAGE(%r8), %r10
182 movq CP_PA_BACKUP_PAGES_MAP(%r8), %rdi
183 movq CP_PA_TABLE_PAGE(%r8), %rax
184 movq %rax, %cr3
185 lea PAGE_SIZE(%r8), %rsp
186 call swap_pages
187 movq $virtual_mapped, %rax
188 pushq %rax
189 ret
190
191virtual_mapped:
192 movq RSP(%r8), %rsp
193 movq CR4(%r8), %rax
194 movq %rax, %cr4
195 movq CR3(%r8), %rax
196 movq CR0(%r8), %r8
197 movq %rax, %cr3
198 movq %r8, %cr0
199 movq %rbp, %rax
200
201 popf
202 popq %r15
203 popq %r14
204 popq %r13
205 popq %r12
206 popq %rbp
207 popq %rbx
208 ret
82 209
83 /* Do the copies */ 210 /* Do the copies */
211swap_pages:
84 movq %rdi, %rcx /* Put the page_list in %rcx */ 212 movq %rdi, %rcx /* Put the page_list in %rcx */
85 xorq %rdi, %rdi 213 xorq %rdi, %rdi
86 xorq %rsi, %rsi 214 xorq %rsi, %rsi
@@ -112,36 +240,27 @@ identity_mapped:
112 movq %rcx, %rsi /* For ever source page do a copy */ 240 movq %rcx, %rsi /* For ever source page do a copy */
113 andq $0xfffffffffffff000, %rsi 241 andq $0xfffffffffffff000, %rsi
114 242
243 movq %rdi, %rdx
244 movq %rsi, %rax
245
246 movq %r10, %rdi
115 movq $512, %rcx 247 movq $512, %rcx
116 rep ; movsq 248 rep ; movsq
117 jmp 0b
1183:
119
120 /* To be certain of avoiding problems with self-modifying code
121 * I need to execute a serializing instruction here.
122 * So I flush the TLB by reloading %cr3 here, it's handy,
123 * and not processor dependent.
124 */
125 movq %cr3, %rax
126 movq %rax, %cr3
127 249
128 /* set all of the registers to known values */ 250 movq %rax, %rdi
129 /* leave %rsp alone */ 251 movq %rdx, %rsi
252 movq $512, %rcx
253 rep ; movsq
130 254
131 xorq %rax, %rax 255 movq %rdx, %rdi
132 xorq %rbx, %rbx 256 movq %r10, %rsi
133 xorq %rcx, %rcx 257 movq $512, %rcx
134 xorq %rdx, %rdx 258 rep ; movsq
135 xorq %rsi, %rsi
136 xorq %rdi, %rdi
137 xorq %rbp, %rbp
138 xorq %r8, %r8
139 xorq %r9, %r9
140 xorq %r10, %r9
141 xorq %r11, %r11
142 xorq %r12, %r12
143 xorq %r13, %r13
144 xorq %r14, %r14
145 xorq %r15, %r15
146 259
260 lea PAGE_SIZE(%rax), %rsi
261 jmp 0b
2623:
147 ret 263 ret
264
265 .globl kexec_control_code_size
266.set kexec_control_code_size, . - relocate_kernel
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 4c54bc0d8ff3..f28c56e6bf94 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -202,7 +202,9 @@ struct ist_info ist_info;
202#endif 202#endif
203 203
204#else 204#else
205struct cpuinfo_x86 boot_cpu_data __read_mostly; 205struct cpuinfo_x86 boot_cpu_data __read_mostly = {
206 .x86_phys_bits = MAX_PHYSMEM_BITS,
207};
206EXPORT_SYMBOL(boot_cpu_data); 208EXPORT_SYMBOL(boot_cpu_data);
207#endif 209#endif
208 210
@@ -770,6 +772,9 @@ void __init setup_arch(char **cmdline_p)
770 772
771 finish_e820_parsing(); 773 finish_e820_parsing();
772 774
775 if (efi_enabled)
776 efi_init();
777
773 dmi_scan_machine(); 778 dmi_scan_machine();
774 779
775 dmi_check_system(bad_bios_dmi_table); 780 dmi_check_system(bad_bios_dmi_table);
@@ -789,8 +794,6 @@ void __init setup_arch(char **cmdline_p)
789 insert_resource(&iomem_resource, &data_resource); 794 insert_resource(&iomem_resource, &data_resource);
790 insert_resource(&iomem_resource, &bss_resource); 795 insert_resource(&iomem_resource, &bss_resource);
791 796
792 if (efi_enabled)
793 efi_init();
794 797
795#ifdef CONFIG_X86_32 798#ifdef CONFIG_X86_32
796 if (ppro_with_ram_bug()) { 799 if (ppro_with_ram_bug()) {
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index d992e6cff730..efa615f2bf43 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -7,6 +7,7 @@
7#include <linux/crash_dump.h> 7#include <linux/crash_dump.h>
8#include <linux/smp.h> 8#include <linux/smp.h>
9#include <linux/topology.h> 9#include <linux/topology.h>
10#include <linux/pfn.h>
10#include <asm/sections.h> 11#include <asm/sections.h>
11#include <asm/processor.h> 12#include <asm/processor.h>
12#include <asm/setup.h> 13#include <asm/setup.h>
@@ -41,6 +42,352 @@ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
41}; 42};
42EXPORT_SYMBOL(__per_cpu_offset); 43EXPORT_SYMBOL(__per_cpu_offset);
43 44
45/*
46 * On x86_64 symbols referenced from code should be reachable using
47 * 32bit relocations. Reserve space for static percpu variables in
48 * modules so that they are always served from the first chunk which
49 * is located at the percpu segment base. On x86_32, anything can
50 * address anywhere. No need to reserve space in the first chunk.
51 */
52#ifdef CONFIG_X86_64
53#define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
54#else
55#define PERCPU_FIRST_CHUNK_RESERVE 0
56#endif
57
58/**
59 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
60 *
61 * If NUMA is not configured or there is only one NUMA node available,
62 * there is no reason to consider NUMA. This function determines
63 * whether percpu allocation should consider NUMA or not.
64 *
65 * RETURNS:
66 * true if NUMA should be considered; otherwise, false.
67 */
68static bool __init pcpu_need_numa(void)
69{
70#ifdef CONFIG_NEED_MULTIPLE_NODES
71 pg_data_t *last = NULL;
72 unsigned int cpu;
73
74 for_each_possible_cpu(cpu) {
75 int node = early_cpu_to_node(cpu);
76
77 if (node_online(node) && NODE_DATA(node) &&
78 last && last != NODE_DATA(node))
79 return true;
80
81 last = NODE_DATA(node);
82 }
83#endif
84 return false;
85}
86
87/**
88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89 * @cpu: cpu to allocate for
90 * @size: size allocation in bytes
91 * @align: alignment
92 *
93 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
94 * does the right thing for NUMA regardless of the current
95 * configuration.
96 *
97 * RETURNS:
98 * Pointer to the allocated area on success, NULL on failure.
99 */
100static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
101 unsigned long align)
102{
103 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
104#ifdef CONFIG_NEED_MULTIPLE_NODES
105 int node = early_cpu_to_node(cpu);
106 void *ptr;
107
108 if (!node_online(node) || !NODE_DATA(node)) {
109 ptr = __alloc_bootmem_nopanic(size, align, goal);
110 pr_info("cpu %d has no node %d or node-local memory\n",
111 cpu, node);
112 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113 cpu, size, __pa(ptr));
114 } else {
115 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
116 size, align, goal);
117 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
118 "%016lx\n", cpu, size, node, __pa(ptr));
119 }
120 return ptr;
121#else
122 return __alloc_bootmem_nopanic(size, align, goal);
123#endif
124}
125
126/*
127 * Remap allocator
128 *
129 * This allocator uses PMD page as unit. A PMD page is allocated for
130 * each cpu and each is remapped into vmalloc area using PMD mapping.
131 * As PMD page is quite large, only part of it is used for the first
132 * chunk. Unused part is returned to the bootmem allocator.
133 *
134 * So, the PMD pages are mapped twice - once to the physical mapping
135 * and to the vmalloc area for the first percpu chunk. The double
136 * mapping does add one more PMD TLB entry pressure but still is much
137 * better than only using 4k mappings while still being NUMA friendly.
138 */
139#ifdef CONFIG_NEED_MULTIPLE_NODES
140static size_t pcpur_size __initdata;
141static void **pcpur_ptrs __initdata;
142
143static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
144{
145 size_t off = (size_t)pageno << PAGE_SHIFT;
146
147 if (off >= pcpur_size)
148 return NULL;
149
150 return virt_to_page(pcpur_ptrs[cpu] + off);
151}
152
153static ssize_t __init setup_pcpu_remap(size_t static_size)
154{
155 static struct vm_struct vm;
156 pg_data_t *last;
157 size_t ptrs_size, dyn_size;
158 unsigned int cpu;
159 ssize_t ret;
160
161 /*
162 * If large page isn't supported, there's no benefit in doing
163 * this. Also, on non-NUMA, embedding is better.
164 */
165 if (!cpu_has_pse || pcpu_need_numa())
166 return -EINVAL;
167
168 last = NULL;
169 for_each_possible_cpu(cpu) {
170 int node = early_cpu_to_node(cpu);
171
172 if (node_online(node) && NODE_DATA(node) &&
173 last && last != NODE_DATA(node))
174 goto proceed;
175
176 last = NODE_DATA(node);
177 }
178 return -EINVAL;
179
180proceed:
181 /*
182 * Currently supports only single page. Supporting multiple
183 * pages won't be too difficult if it ever becomes necessary.
184 */
185 pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
186 PERCPU_DYNAMIC_RESERVE);
187 if (pcpur_size > PMD_SIZE) {
188 pr_warning("PERCPU: static data is larger than large page, "
189 "can't use large page\n");
190 return -EINVAL;
191 }
192 dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
193
194 /* allocate pointer array and alloc large pages */
195 ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
196 pcpur_ptrs = alloc_bootmem(ptrs_size);
197
198 for_each_possible_cpu(cpu) {
199 pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
200 if (!pcpur_ptrs[cpu])
201 goto enomem;
202
203 /*
204 * Only use pcpur_size bytes and give back the rest.
205 *
206 * Ingo: The 2MB up-rounding bootmem is needed to make
207 * sure the partial 2MB page is still fully RAM - it's
208 * not well-specified to have a PAT-incompatible area
209 * (unmapped RAM, device memory, etc.) in that hole.
210 */
211 free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
212 PMD_SIZE - pcpur_size);
213
214 memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
215 }
216
217 /* allocate address and map */
218 vm.flags = VM_ALLOC;
219 vm.size = num_possible_cpus() * PMD_SIZE;
220 vm_area_register_early(&vm, PMD_SIZE);
221
222 for_each_possible_cpu(cpu) {
223 pmd_t *pmd;
224
225 pmd = populate_extra_pmd((unsigned long)vm.addr
226 + cpu * PMD_SIZE);
227 set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
228 PAGE_KERNEL_LARGE));
229 }
230
231 /* we're ready, commit */
232 pr_info("PERCPU: Remapped at %p with large pages, static data "
233 "%zu bytes\n", vm.addr, static_size);
234
235 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
236 PERCPU_FIRST_CHUNK_RESERVE,
237 PMD_SIZE, dyn_size, vm.addr, NULL);
238 goto out_free_ar;
239
240enomem:
241 for_each_possible_cpu(cpu)
242 if (pcpur_ptrs[cpu])
243 free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
244 ret = -ENOMEM;
245out_free_ar:
246 free_bootmem(__pa(pcpur_ptrs), ptrs_size);
247 return ret;
248}
249#else
250static ssize_t __init setup_pcpu_remap(size_t static_size)
251{
252 return -EINVAL;
253}
254#endif
255
256/*
257 * Embedding allocator
258 *
259 * The first chunk is sized to just contain the static area plus
260 * module and dynamic reserves, and allocated as a contiguous area
261 * using bootmem allocator and used as-is without being mapped into
262 * vmalloc area. This enables the first chunk to piggy back on the
263 * linear physical PMD mapping and doesn't add any additional pressure
264 * to TLB. Note that if the needed size is smaller than the minimum
265 * unit size, the leftover is returned to the bootmem allocator.
266 */
267static void *pcpue_ptr __initdata;
268static size_t pcpue_size __initdata;
269static size_t pcpue_unit_size __initdata;
270
271static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
272{
273 size_t off = (size_t)pageno << PAGE_SHIFT;
274
275 if (off >= pcpue_size)
276 return NULL;
277
278 return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
279}
280
281static ssize_t __init setup_pcpu_embed(size_t static_size)
282{
283 unsigned int cpu;
284 size_t dyn_size;
285
286 /*
287 * If large page isn't supported, there's no benefit in doing
288 * this. Also, embedding allocation doesn't play well with
289 * NUMA.
290 */
291 if (!cpu_has_pse || pcpu_need_numa())
292 return -EINVAL;
293
294 /* allocate and copy */
295 pcpue_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
296 PERCPU_DYNAMIC_RESERVE);
297 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
298 dyn_size = pcpue_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
299
300 pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size,
301 PAGE_SIZE);
302 if (!pcpue_ptr)
303 return -ENOMEM;
304
305 for_each_possible_cpu(cpu) {
306 void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
307
308 free_bootmem(__pa(ptr + pcpue_size),
309 pcpue_unit_size - pcpue_size);
310 memcpy(ptr, __per_cpu_load, static_size);
311 }
312
313 /* we're ready, commit */
314 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
315 pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
316
317 return pcpu_setup_first_chunk(pcpue_get_page, static_size,
318 PERCPU_FIRST_CHUNK_RESERVE,
319 pcpue_unit_size, dyn_size,
320 pcpue_ptr, NULL);
321}
322
323/*
324 * 4k page allocator
325 *
326 * This is the basic allocator. Static percpu area is allocated
327 * page-by-page and most of initialization is done by the generic
328 * setup function.
329 */
330static struct page **pcpu4k_pages __initdata;
331static int pcpu4k_nr_static_pages __initdata;
332
333static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
334{
335 if (pageno < pcpu4k_nr_static_pages)
336 return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
337 return NULL;
338}
339
340static void __init pcpu4k_populate_pte(unsigned long addr)
341{
342 populate_extra_pte(addr);
343}
344
345static ssize_t __init setup_pcpu_4k(size_t static_size)
346{
347 size_t pages_size;
348 unsigned int cpu;
349 int i, j;
350 ssize_t ret;
351
352 pcpu4k_nr_static_pages = PFN_UP(static_size);
353
354 /* unaligned allocations can't be freed, round up to page size */
355 pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
356 * sizeof(pcpu4k_pages[0]));
357 pcpu4k_pages = alloc_bootmem(pages_size);
358
359 /* allocate and copy */
360 j = 0;
361 for_each_possible_cpu(cpu)
362 for (i = 0; i < pcpu4k_nr_static_pages; i++) {
363 void *ptr;
364
365 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
366 if (!ptr)
367 goto enomem;
368
369 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
370 pcpu4k_pages[j++] = virt_to_page(ptr);
371 }
372
373 /* we're ready, commit */
374 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
375 pcpu4k_nr_static_pages, static_size);
376
377 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
378 PERCPU_FIRST_CHUNK_RESERVE, -1, -1, NULL,
379 pcpu4k_populate_pte);
380 goto out_free_ar;
381
382enomem:
383 while (--j >= 0)
384 free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
385 ret = -ENOMEM;
386out_free_ar:
387 free_bootmem(__pa(pcpu4k_pages), pages_size);
388 return ret;
389}
390
44static inline void setup_percpu_segment(int cpu) 391static inline void setup_percpu_segment(int cpu)
45{ 392{
46#ifdef CONFIG_X86_32 393#ifdef CONFIG_X86_32
@@ -61,38 +408,35 @@ static inline void setup_percpu_segment(int cpu)
61 */ 408 */
62void __init setup_per_cpu_areas(void) 409void __init setup_per_cpu_areas(void)
63{ 410{
64 ssize_t size; 411 size_t static_size = __per_cpu_end - __per_cpu_start;
65 char *ptr; 412 unsigned int cpu;
66 int cpu; 413 unsigned long delta;
67 414 size_t pcpu_unit_size;
68 /* Copy section for each CPU (we discard the original) */ 415 ssize_t ret;
69 size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
70 416
71 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 417 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
72 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 418 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
73 419
74 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size); 420 /*
421 * Allocate percpu area. If PSE is supported, try to make use
422 * of large page mappings. Please read comments on top of
423 * each allocator for details.
424 */
425 ret = setup_pcpu_remap(static_size);
426 if (ret < 0)
427 ret = setup_pcpu_embed(static_size);
428 if (ret < 0)
429 ret = setup_pcpu_4k(static_size);
430 if (ret < 0)
431 panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
432 static_size, ret);
75 433
76 for_each_possible_cpu(cpu) { 434 pcpu_unit_size = ret;
77#ifndef CONFIG_NEED_MULTIPLE_NODES
78 ptr = alloc_bootmem_pages(size);
79#else
80 int node = early_cpu_to_node(cpu);
81 if (!node_online(node) || !NODE_DATA(node)) {
82 ptr = alloc_bootmem_pages(size);
83 pr_info("cpu %d has no node %d or node-local memory\n",
84 cpu, node);
85 pr_debug("per cpu data for cpu%d at %016lx\n",
86 cpu, __pa(ptr));
87 } else {
88 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
89 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
90 cpu, node, __pa(ptr));
91 }
92#endif
93 435
94 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); 436 /* alrighty, percpu areas up and running */
95 per_cpu_offset(cpu) = ptr - __per_cpu_start; 437 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
438 for_each_possible_cpu(cpu) {
439 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
96 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); 440 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
97 per_cpu(cpu_number, cpu) = cpu; 441 per_cpu(cpu_number, cpu) = cpu;
98 setup_percpu_segment(cpu); 442 setup_percpu_segment(cpu);
@@ -125,8 +469,6 @@ void __init setup_per_cpu_areas(void)
125 */ 469 */
126 if (cpu == boot_cpu_id) 470 if (cpu == boot_cpu_id)
127 switch_to_new_gdt(cpu); 471 switch_to_new_gdt(cpu);
128
129 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
130 } 472 }
131 473
132 /* indicate the early static arrays will soon be gone */ 474 /* indicate the early static arrays will soon be gone */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 249334f5080a..ef7d10170c30 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -114,10 +114,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
114 114
115atomic_t init_deasserted; 115atomic_t init_deasserted;
116 116
117
118/* Set if we find a B stepping CPU */
119static int __cpuinitdata smp_b_stepping;
120
121#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) 117#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
122 118
123/* which logical CPUs are on which nodes */ 119/* which logical CPUs are on which nodes */
@@ -271,8 +267,6 @@ static void __cpuinit smp_callin(void)
271 cpumask_set_cpu(cpuid, cpu_callin_mask); 267 cpumask_set_cpu(cpuid, cpu_callin_mask);
272} 268}
273 269
274static int __cpuinitdata unsafe_smp;
275
276/* 270/*
277 * Activate a secondary processor. 271 * Activate a secondary processor.
278 */ 272 */
@@ -340,76 +334,6 @@ notrace static void __cpuinit start_secondary(void *unused)
340 cpu_idle(); 334 cpu_idle();
341} 335}
342 336
343static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
344{
345 /*
346 * Mask B, Pentium, but not Pentium MMX
347 */
348 if (c->x86_vendor == X86_VENDOR_INTEL &&
349 c->x86 == 5 &&
350 c->x86_mask >= 1 && c->x86_mask <= 4 &&
351 c->x86_model <= 3)
352 /*
353 * Remember we have B step Pentia with bugs
354 */
355 smp_b_stepping = 1;
356
357 /*
358 * Certain Athlons might work (for various values of 'work') in SMP
359 * but they are not certified as MP capable.
360 */
361 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
362
363 if (num_possible_cpus() == 1)
364 goto valid_k7;
365
366 /* Athlon 660/661 is valid. */
367 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
368 (c->x86_mask == 1)))
369 goto valid_k7;
370
371 /* Duron 670 is valid */
372 if ((c->x86_model == 7) && (c->x86_mask == 0))
373 goto valid_k7;
374
375 /*
376 * Athlon 662, Duron 671, and Athlon >model 7 have capability
377 * bit. It's worth noting that the A5 stepping (662) of some
378 * Athlon XP's have the MP bit set.
379 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
380 * more.
381 */
382 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
383 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
384 (c->x86_model > 7))
385 if (cpu_has_mp)
386 goto valid_k7;
387
388 /* If we get here, not a certified SMP capable AMD system. */
389 unsafe_smp = 1;
390 }
391
392valid_k7:
393 ;
394}
395
396static void __cpuinit smp_checks(void)
397{
398 if (smp_b_stepping)
399 printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
400 "with B stepping processors.\n");
401
402 /*
403 * Don't taint if we are running SMP kernel on a single non-MP
404 * approved Athlon
405 */
406 if (unsafe_smp && num_online_cpus() > 1) {
407 printk(KERN_INFO "WARNING: This combination of AMD"
408 "processors is not suitable for SMP.\n");
409 add_taint(TAINT_UNSAFE_SMP);
410 }
411}
412
413/* 337/*
414 * The bootstrap kernel entry code has set these up. Save them for 338 * The bootstrap kernel entry code has set these up. Save them for
415 * a given CPU 339 * a given CPU
@@ -423,7 +347,6 @@ void __cpuinit smp_store_cpu_info(int id)
423 c->cpu_index = id; 347 c->cpu_index = id;
424 if (id != 0) 348 if (id != 0)
425 identify_secondary_cpu(c); 349 identify_secondary_cpu(c);
426 smp_apply_quirks(c);
427} 350}
428 351
429 352
@@ -1193,7 +1116,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1193 pr_debug("Boot done.\n"); 1116 pr_debug("Boot done.\n");
1194 1117
1195 impress_friends(); 1118 impress_friends();
1196 smp_checks();
1197#ifdef CONFIG_X86_IO_APIC 1119#ifdef CONFIG_X86_IO_APIC
1198 setup_ioapic_dest(); 1120 setup_ioapic_dest();
1199#endif 1121#endif
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index f04549afcfe9..d038b9c45cf8 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -314,8 +314,6 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
314 int locals = 0; 314 int locals = 0;
315 struct bau_desc *bau_desc; 315 struct bau_desc *bau_desc;
316 316
317 WARN_ON(!in_atomic());
318
319 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); 317 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
320 318
321 uv_cpu = uv_blade_processor_id(); 319 uv_cpu = uv_blade_processor_id();
diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/kernel/uv_time.c
new file mode 100644
index 000000000000..2ffb6c53326e
--- /dev/null
+++ b/arch/x86/kernel/uv_time.c
@@ -0,0 +1,393 @@
1/*
2 * SGI RTC clock/timer routines.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Copyright (c) 2009 Silicon Graphics, Inc. All Rights Reserved.
19 * Copyright (c) Dimitri Sivanich
20 */
21#include <linux/clockchips.h>
22
23#include <asm/uv/uv_mmrs.h>
24#include <asm/uv/uv_hub.h>
25#include <asm/uv/bios.h>
26#include <asm/uv/uv.h>
27#include <asm/apic.h>
28#include <asm/cpu.h>
29
30#define RTC_NAME "sgi_rtc"
31
32static cycle_t uv_read_rtc(void);
33static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
34static void uv_rtc_timer_setup(enum clock_event_mode,
35 struct clock_event_device *);
36
37static struct clocksource clocksource_uv = {
38 .name = RTC_NAME,
39 .rating = 400,
40 .read = uv_read_rtc,
41 .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK,
42 .shift = 10,
43 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
44};
45
46static struct clock_event_device clock_event_device_uv = {
47 .name = RTC_NAME,
48 .features = CLOCK_EVT_FEAT_ONESHOT,
49 .shift = 20,
50 .rating = 400,
51 .irq = -1,
52 .set_next_event = uv_rtc_next_event,
53 .set_mode = uv_rtc_timer_setup,
54 .event_handler = NULL,
55};
56
57static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
58
59/* There is one of these allocated per node */
60struct uv_rtc_timer_head {
61 spinlock_t lock;
62 /* next cpu waiting for timer, local node relative: */
63 int next_cpu;
64 /* number of cpus on this node: */
65 int ncpus;
66 struct {
67 int lcpu; /* systemwide logical cpu number */
68 u64 expires; /* next timer expiration for this cpu */
69 } cpu[1];
70};
71
72/*
73 * Access to uv_rtc_timer_head via blade id.
74 */
75static struct uv_rtc_timer_head **blade_info __read_mostly;
76
77static int uv_rtc_enable;
78
79/*
80 * Hardware interface routines
81 */
82
83/* Send IPIs to another node */
84static void uv_rtc_send_IPI(int cpu)
85{
86 unsigned long apicid, val;
87 int pnode;
88
89 apicid = cpu_physical_id(cpu);
90 pnode = uv_apicid_to_pnode(apicid);
91 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
92 (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
93 (GENERIC_INTERRUPT_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
94
95 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
96}
97
98/* Check for an RTC interrupt pending */
99static int uv_intr_pending(int pnode)
100{
101 return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
102 UVH_EVENT_OCCURRED0_RTC1_MASK;
103}
104
105/* Setup interrupt and return non-zero if early expiration occurred. */
106static int uv_setup_intr(int cpu, u64 expires)
107{
108 u64 val;
109 int pnode = uv_cpu_to_pnode(cpu);
110
111 uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
112 UVH_RTC1_INT_CONFIG_M_MASK);
113 uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
114
115 uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
116 UVH_EVENT_OCCURRED0_RTC1_MASK);
117
118 val = (GENERIC_INTERRUPT_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
119 ((u64)cpu_physical_id(cpu) << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
120
121 /* Set configuration */
122 uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
123 /* Initialize comparator value */
124 uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
125
126 return (expires < uv_read_rtc() && !uv_intr_pending(pnode));
127}
128
129/*
130 * Per-cpu timer tracking routines
131 */
132
133static __init void uv_rtc_deallocate_timers(void)
134{
135 int bid;
136
137 for_each_possible_blade(bid) {
138 kfree(blade_info[bid]);
139 }
140 kfree(blade_info);
141}
142
143/* Allocate per-node list of cpu timer expiration times. */
144static __init int uv_rtc_allocate_timers(void)
145{
146 int cpu;
147
148 blade_info = kmalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL);
149 if (!blade_info)
150 return -ENOMEM;
151 memset(blade_info, 0, uv_possible_blades * sizeof(void *));
152
153 for_each_present_cpu(cpu) {
154 int nid = cpu_to_node(cpu);
155 int bid = uv_cpu_to_blade_id(cpu);
156 int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
157 struct uv_rtc_timer_head *head = blade_info[bid];
158
159 if (!head) {
160 head = kmalloc_node(sizeof(struct uv_rtc_timer_head) +
161 (uv_blade_nr_possible_cpus(bid) *
162 2 * sizeof(u64)),
163 GFP_KERNEL, nid);
164 if (!head) {
165 uv_rtc_deallocate_timers();
166 return -ENOMEM;
167 }
168 spin_lock_init(&head->lock);
169 head->ncpus = uv_blade_nr_possible_cpus(bid);
170 head->next_cpu = -1;
171 blade_info[bid] = head;
172 }
173
174 head->cpu[bcpu].lcpu = cpu;
175 head->cpu[bcpu].expires = ULLONG_MAX;
176 }
177
178 return 0;
179}
180
181/* Find and set the next expiring timer. */
182static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode)
183{
184 u64 lowest = ULLONG_MAX;
185 int c, bcpu = -1;
186
187 head->next_cpu = -1;
188 for (c = 0; c < head->ncpus; c++) {
189 u64 exp = head->cpu[c].expires;
190 if (exp < lowest) {
191 bcpu = c;
192 lowest = exp;
193 }
194 }
195 if (bcpu >= 0) {
196 head->next_cpu = bcpu;
197 c = head->cpu[bcpu].lcpu;
198 if (uv_setup_intr(c, lowest))
199 /* If we didn't set it up in time, trigger */
200 uv_rtc_send_IPI(c);
201 } else {
202 uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
203 UVH_RTC1_INT_CONFIG_M_MASK);
204 }
205}
206
207/*
208 * Set expiration time for current cpu.
209 *
210 * Returns 1 if we missed the expiration time.
211 */
212static int uv_rtc_set_timer(int cpu, u64 expires)
213{
214 int pnode = uv_cpu_to_pnode(cpu);
215 int bid = uv_cpu_to_blade_id(cpu);
216 struct uv_rtc_timer_head *head = blade_info[bid];
217 int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
218 u64 *t = &head->cpu[bcpu].expires;
219 unsigned long flags;
220 int next_cpu;
221
222 spin_lock_irqsave(&head->lock, flags);
223
224 next_cpu = head->next_cpu;
225 *t = expires;
226 /* Will this one be next to go off? */
227 if (next_cpu < 0 || bcpu == next_cpu ||
228 expires < head->cpu[next_cpu].expires) {
229 head->next_cpu = bcpu;
230 if (uv_setup_intr(cpu, expires)) {
231 *t = ULLONG_MAX;
232 uv_rtc_find_next_timer(head, pnode);
233 spin_unlock_irqrestore(&head->lock, flags);
234 return 1;
235 }
236 }
237
238 spin_unlock_irqrestore(&head->lock, flags);
239 return 0;
240}
241
242/*
243 * Unset expiration time for current cpu.
244 *
245 * Returns 1 if this timer was pending.
246 */
247static int uv_rtc_unset_timer(int cpu)
248{
249 int pnode = uv_cpu_to_pnode(cpu);
250 int bid = uv_cpu_to_blade_id(cpu);
251 struct uv_rtc_timer_head *head = blade_info[bid];
252 int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
253 u64 *t = &head->cpu[bcpu].expires;
254 unsigned long flags;
255 int rc = 0;
256
257 spin_lock_irqsave(&head->lock, flags);
258
259 if (head->next_cpu == bcpu && uv_read_rtc() >= *t)
260 rc = 1;
261
262 *t = ULLONG_MAX;
263
264 /* Was the hardware setup for this timer? */
265 if (head->next_cpu == bcpu)
266 uv_rtc_find_next_timer(head, pnode);
267
268 spin_unlock_irqrestore(&head->lock, flags);
269
270 return rc;
271}
272
273
274/*
275 * Kernel interface routines.
276 */
277
278/*
279 * Read the RTC.
280 */
281static cycle_t uv_read_rtc(void)
282{
283 return (cycle_t)uv_read_local_mmr(UVH_RTC);
284}
285
286/*
287 * Program the next event, relative to now
288 */
289static int uv_rtc_next_event(unsigned long delta,
290 struct clock_event_device *ced)
291{
292 int ced_cpu = cpumask_first(ced->cpumask);
293
294 return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc());
295}
296
297/*
298 * Setup the RTC timer in oneshot mode
299 */
300static void uv_rtc_timer_setup(enum clock_event_mode mode,
301 struct clock_event_device *evt)
302{
303 int ced_cpu = cpumask_first(evt->cpumask);
304
305 switch (mode) {
306 case CLOCK_EVT_MODE_PERIODIC:
307 case CLOCK_EVT_MODE_ONESHOT:
308 case CLOCK_EVT_MODE_RESUME:
309 /* Nothing to do here yet */
310 break;
311 case CLOCK_EVT_MODE_UNUSED:
312 case CLOCK_EVT_MODE_SHUTDOWN:
313 uv_rtc_unset_timer(ced_cpu);
314 break;
315 }
316}
317
318static void uv_rtc_interrupt(void)
319{
320 struct clock_event_device *ced = &__get_cpu_var(cpu_ced);
321 int cpu = smp_processor_id();
322
323 if (!ced || !ced->event_handler)
324 return;
325
326 if (uv_rtc_unset_timer(cpu) != 1)
327 return;
328
329 ced->event_handler(ced);
330}
331
332static int __init uv_enable_rtc(char *str)
333{
334 uv_rtc_enable = 1;
335
336 return 1;
337}
338__setup("uvrtc", uv_enable_rtc);
339
340static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
341{
342 struct clock_event_device *ced = &__get_cpu_var(cpu_ced);
343
344 *ced = clock_event_device_uv;
345 ced->cpumask = cpumask_of(smp_processor_id());
346 clockevents_register_device(ced);
347}
348
349static __init int uv_rtc_setup_clock(void)
350{
351 int rc;
352
353 if (!uv_rtc_enable || !is_uv_system() || generic_interrupt_extension)
354 return -ENODEV;
355
356 generic_interrupt_extension = uv_rtc_interrupt;
357
358 clocksource_uv.mult = clocksource_hz2mult(sn_rtc_cycles_per_second,
359 clocksource_uv.shift);
360
361 rc = clocksource_register(&clocksource_uv);
362 if (rc) {
363 generic_interrupt_extension = NULL;
364 return rc;
365 }
366
367 /* Setup and register clockevents */
368 rc = uv_rtc_allocate_timers();
369 if (rc) {
370 clocksource_unregister(&clocksource_uv);
371 generic_interrupt_extension = NULL;
372 return rc;
373 }
374
375 clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second,
376 NSEC_PER_SEC, clock_event_device_uv.shift);
377
378 clock_event_device_uv.min_delta_ns = NSEC_PER_SEC /
379 sn_rtc_cycles_per_second;
380
381 clock_event_device_uv.max_delta_ns = clocksource_uv.mask *
382 (NSEC_PER_SEC / sn_rtc_cycles_per_second);
383
384 rc = schedule_on_each_cpu(uv_rtc_register_clockevents);
385 if (rc) {
386 clocksource_unregister(&clocksource_uv);
387 generic_interrupt_extension = NULL;
388 uv_rtc_deallocate_timers();
389 }
390
391 return rc;
392}
393arch_initcall(uv_rtc_setup_clock);
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index fbfced6f6800..5bf54e40c6ef 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -275,3 +275,10 @@ ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
275ASSERT((per_cpu__irq_stack_union == 0), 275ASSERT((per_cpu__irq_stack_union == 0),
276 "irq_stack_union is not at start of per-cpu area"); 276 "irq_stack_union is not at start of per-cpu area");
277#endif 277#endif
278
279#ifdef CONFIG_KEXEC
280#include <asm/kexec.h>
281
282ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
283 "kexec control code size is too big")
284#endif
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index f3a5305b8adf..9fe4ddaa8f6f 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -348,6 +348,11 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
348 * flush_tlb_user() for both user and kernel mappings unless 348 * flush_tlb_user() for both user and kernel mappings unless
349 * the Page Global Enable (PGE) feature bit is set. */ 349 * the Page Global Enable (PGE) feature bit is set. */
350 *dx |= 0x00002000; 350 *dx |= 0x00002000;
351 /* We also lie, and say we're family id 5. 6 or greater
352 * leads to a rdmsr in early_init_intel which we can't handle.
353 * Family ID is returned as bits 8-12 in ax. */
354 *ax &= 0xFFFFF0FF;
355 *ax |= 0x00000500;
351 break; 356 break;
352 case 0x80000000: 357 case 0x80000000:
353 /* Futureproof this a little: if they ask how much extended 358 /* Futureproof this a little: if they ask how much extended
@@ -594,19 +599,21 @@ static void __init lguest_init_IRQ(void)
594 /* Some systems map "vectors" to interrupts weirdly. Lguest has 599 /* Some systems map "vectors" to interrupts weirdly. Lguest has
595 * a straightforward 1 to 1 mapping, so force that here. */ 600 * a straightforward 1 to 1 mapping, so force that here. */
596 __get_cpu_var(vector_irq)[vector] = i; 601 __get_cpu_var(vector_irq)[vector] = i;
597 if (vector != SYSCALL_VECTOR) { 602 if (vector != SYSCALL_VECTOR)
598 set_intr_gate(vector, 603 set_intr_gate(vector, interrupt[i]);
599 interrupt[vector-FIRST_EXTERNAL_VECTOR]);
600 set_irq_chip_and_handler_name(i, &lguest_irq_controller,
601 handle_level_irq,
602 "level");
603 }
604 } 604 }
605 /* This call is required to set up for 4k stacks, where we have 605 /* This call is required to set up for 4k stacks, where we have
606 * separate stacks for hard and soft interrupts. */ 606 * separate stacks for hard and soft interrupts. */
607 irq_ctx_init(smp_processor_id()); 607 irq_ctx_init(smp_processor_id());
608} 608}
609 609
610void lguest_setup_irq(unsigned int irq)
611{
612 irq_to_desc_alloc_cpu(irq, 0);
613 set_irq_chip_and_handler_name(irq, &lguest_irq_controller,
614 handle_level_irq, "level");
615}
616
610/* 617/*
611 * Time. 618 * Time.
612 * 619 *
diff --git a/arch/x86/math-emu/fpu_aux.c b/arch/x86/math-emu/fpu_aux.c
index 491e737ce547..aa0987088774 100644
--- a/arch/x86/math-emu/fpu_aux.c
+++ b/arch/x86/math-emu/fpu_aux.c
@@ -30,20 +30,29 @@ static void fclex(void)
30} 30}
31 31
32/* Needs to be externally visible */ 32/* Needs to be externally visible */
33void finit(void) 33void finit_task(struct task_struct *tsk)
34{ 34{
35 control_word = 0x037f; 35 struct i387_soft_struct *soft = &tsk->thread.xstate->soft;
36 partial_status = 0; 36 struct address *oaddr, *iaddr;
37 top = 0; /* We don't keep top in the status word internally. */ 37 soft->cwd = 0x037f;
38 fpu_tag_word = 0xffff; 38 soft->swd = 0;
39 soft->ftop = 0; /* We don't keep top in the status word internally. */
40 soft->twd = 0xffff;
39 /* The behaviour is different from that detailed in 41 /* The behaviour is different from that detailed in
40 Section 15.1.6 of the Intel manual */ 42 Section 15.1.6 of the Intel manual */
41 operand_address.offset = 0; 43 oaddr = (struct address *)&soft->foo;
42 operand_address.selector = 0; 44 oaddr->offset = 0;
43 instruction_address.offset = 0; 45 oaddr->selector = 0;
44 instruction_address.selector = 0; 46 iaddr = (struct address *)&soft->fip;
45 instruction_address.opcode = 0; 47 iaddr->offset = 0;
46 no_ip_update = 1; 48 iaddr->selector = 0;
49 iaddr->opcode = 0;
50 soft->no_update = 1;
51}
52
53void finit(void)
54{
55 finit_task(current);
47} 56}
48 57
49/* 58/*
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 00f127c80b0e..d11745334a67 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -158,7 +158,6 @@ EXPORT_SYMBOL(kunmap);
158EXPORT_SYMBOL(kmap_atomic); 158EXPORT_SYMBOL(kmap_atomic);
159EXPORT_SYMBOL(kunmap_atomic); 159EXPORT_SYMBOL(kunmap_atomic);
160 160
161#ifdef CONFIG_NUMA
162void __init set_highmem_pages_init(void) 161void __init set_highmem_pages_init(void)
163{ 162{
164 struct zone *zone; 163 struct zone *zone;
@@ -182,11 +181,3 @@ void __init set_highmem_pages_init(void)
182 } 181 }
183 totalram_pages += totalhigh_pages; 182 totalram_pages += totalhigh_pages;
184} 183}
185#else
186void __init set_highmem_pages_init(void)
187{
188 add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
189
190 totalram_pages += totalhigh_pages;
191}
192#endif /* CONFIG_NUMA */
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index ce6a722587d8..15219e0d1243 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -1,8 +1,345 @@
1#include <linux/ioport.h>
1#include <linux/swap.h> 2#include <linux/swap.h>
3
2#include <asm/cacheflush.h> 4#include <asm/cacheflush.h>
5#include <asm/e820.h>
6#include <asm/init.h>
3#include <asm/page.h> 7#include <asm/page.h>
8#include <asm/page_types.h>
4#include <asm/sections.h> 9#include <asm/sections.h>
5#include <asm/system.h> 10#include <asm/system.h>
11#include <asm/tlbflush.h>
12
13unsigned long __initdata e820_table_start;
14unsigned long __meminitdata e820_table_end;
15unsigned long __meminitdata e820_table_top;
16
17int after_bootmem;
18
19int direct_gbpages
20#ifdef CONFIG_DIRECT_GBPAGES
21 = 1
22#endif
23;
24
25static void __init find_early_table_space(unsigned long end, int use_pse,
26 int use_gbpages)
27{
28 unsigned long puds, pmds, ptes, tables, start;
29
30 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
31 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
32
33 if (use_gbpages) {
34 unsigned long extra;
35
36 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
37 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
38 } else
39 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
40
41 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
42
43 if (use_pse) {
44 unsigned long extra;
45
46 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
47#ifdef CONFIG_X86_32
48 extra += PMD_SIZE;
49#endif
50 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
51 } else
52 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
53
54 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
55
56#ifdef CONFIG_X86_32
57 /* for fixmap */
58 tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
59#endif
60
61 /*
62 * RED-PEN putting page tables only on node 0 could
63 * cause a hotspot and fill up ZONE_DMA. The page tables
64 * need roughly 0.5KB per GB.
65 */
66#ifdef CONFIG_X86_32
67 start = 0x7000;
68 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
69 tables, PAGE_SIZE);
70#else /* CONFIG_X86_64 */
71 start = 0x8000;
72 e820_table_start = find_e820_area(start, end, tables, PAGE_SIZE);
73#endif
74 if (e820_table_start == -1UL)
75 panic("Cannot find space for the kernel page tables");
76
77 e820_table_start >>= PAGE_SHIFT;
78 e820_table_end = e820_table_start;
79 e820_table_top = e820_table_start + (tables >> PAGE_SHIFT);
80
81 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
82 end, e820_table_start << PAGE_SHIFT, e820_table_top << PAGE_SHIFT);
83}
84
85struct map_range {
86 unsigned long start;
87 unsigned long end;
88 unsigned page_size_mask;
89};
90
91#ifdef CONFIG_X86_32
92#define NR_RANGE_MR 3
93#else /* CONFIG_X86_64 */
94#define NR_RANGE_MR 5
95#endif
96
97static int save_mr(struct map_range *mr, int nr_range,
98 unsigned long start_pfn, unsigned long end_pfn,
99 unsigned long page_size_mask)
100{
101 if (start_pfn < end_pfn) {
102 if (nr_range >= NR_RANGE_MR)
103 panic("run out of range for init_memory_mapping\n");
104 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
105 mr[nr_range].end = end_pfn<<PAGE_SHIFT;
106 mr[nr_range].page_size_mask = page_size_mask;
107 nr_range++;
108 }
109
110 return nr_range;
111}
112
113#ifdef CONFIG_X86_64
114static void __init init_gbpages(void)
115{
116 if (direct_gbpages && cpu_has_gbpages)
117 printk(KERN_INFO "Using GB pages for direct mapping\n");
118 else
119 direct_gbpages = 0;
120}
121#else
122static inline void init_gbpages(void)
123{
124}
125#endif
126
127/*
128 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
129 * This runs before bootmem is initialized and gets pages directly from
130 * the physical memory. To access them they are temporarily mapped.
131 */
132unsigned long __init_refok init_memory_mapping(unsigned long start,
133 unsigned long end)
134{
135 unsigned long page_size_mask = 0;
136 unsigned long start_pfn, end_pfn;
137 unsigned long ret = 0;
138 unsigned long pos;
139
140 struct map_range mr[NR_RANGE_MR];
141 int nr_range, i;
142 int use_pse, use_gbpages;
143
144 printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
145
146 if (!after_bootmem)
147 init_gbpages();
148
149#ifdef CONFIG_DEBUG_PAGEALLOC
150 /*
151 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
152 * This will simplify cpa(), which otherwise needs to support splitting
153 * large pages into small in interrupt context, etc.
154 */
155 use_pse = use_gbpages = 0;
156#else
157 use_pse = cpu_has_pse;
158 use_gbpages = direct_gbpages;
159#endif
160
161#ifdef CONFIG_X86_32
162#ifdef CONFIG_X86_PAE
163 set_nx();
164 if (nx_enabled)
165 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
166#endif
167
168 /* Enable PSE if available */
169 if (cpu_has_pse)
170 set_in_cr4(X86_CR4_PSE);
171
172 /* Enable PGE if available */
173 if (cpu_has_pge) {
174 set_in_cr4(X86_CR4_PGE);
175 __supported_pte_mask |= _PAGE_GLOBAL;
176 }
177#endif
178
179 if (use_gbpages)
180 page_size_mask |= 1 << PG_LEVEL_1G;
181 if (use_pse)
182 page_size_mask |= 1 << PG_LEVEL_2M;
183
184 memset(mr, 0, sizeof(mr));
185 nr_range = 0;
186
187 /* head if not big page alignment ? */
188 start_pfn = start >> PAGE_SHIFT;
189 pos = start_pfn << PAGE_SHIFT;
190#ifdef CONFIG_X86_32
191 /*
192 * Don't use a large page for the first 2/4MB of memory
193 * because there are often fixed size MTRRs in there
194 * and overlapping MTRRs into large pages can cause
195 * slowdowns.
196 */
197 if (pos == 0)
198 end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT);
199 else
200 end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
201 << (PMD_SHIFT - PAGE_SHIFT);
202#else /* CONFIG_X86_64 */
203 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
204 << (PMD_SHIFT - PAGE_SHIFT);
205#endif
206 if (end_pfn > (end >> PAGE_SHIFT))
207 end_pfn = end >> PAGE_SHIFT;
208 if (start_pfn < end_pfn) {
209 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
210 pos = end_pfn << PAGE_SHIFT;
211 }
212
213 /* big page (2M) range */
214 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
215 << (PMD_SHIFT - PAGE_SHIFT);
216#ifdef CONFIG_X86_32
217 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
218#else /* CONFIG_X86_64 */
219 end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
220 << (PUD_SHIFT - PAGE_SHIFT);
221 if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
222 end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
223#endif
224
225 if (start_pfn < end_pfn) {
226 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
227 page_size_mask & (1<<PG_LEVEL_2M));
228 pos = end_pfn << PAGE_SHIFT;
229 }
230
231#ifdef CONFIG_X86_64
232 /* big page (1G) range */
233 start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
234 << (PUD_SHIFT - PAGE_SHIFT);
235 end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
236 if (start_pfn < end_pfn) {
237 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
238 page_size_mask &
239 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
240 pos = end_pfn << PAGE_SHIFT;
241 }
242
243 /* tail is not big page (1G) alignment */
244 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
245 << (PMD_SHIFT - PAGE_SHIFT);
246 end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
247 if (start_pfn < end_pfn) {
248 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
249 page_size_mask & (1<<PG_LEVEL_2M));
250 pos = end_pfn << PAGE_SHIFT;
251 }
252#endif
253
254 /* tail is not big page (2M) alignment */
255 start_pfn = pos>>PAGE_SHIFT;
256 end_pfn = end>>PAGE_SHIFT;
257 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
258
259 /* try to merge same page size and continuous */
260 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
261 unsigned long old_start;
262 if (mr[i].end != mr[i+1].start ||
263 mr[i].page_size_mask != mr[i+1].page_size_mask)
264 continue;
265 /* move it */
266 old_start = mr[i].start;
267 memmove(&mr[i], &mr[i+1],
268 (nr_range - 1 - i) * sizeof(struct map_range));
269 mr[i--].start = old_start;
270 nr_range--;
271 }
272
273 for (i = 0; i < nr_range; i++)
274 printk(KERN_DEBUG " %010lx - %010lx page %s\n",
275 mr[i].start, mr[i].end,
276 (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
277 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
278
279 /*
280 * Find space for the kernel direct mapping tables.
281 *
282 * Later we should allocate these tables in the local node of the
283 * memory mapped. Unfortunately this is done currently before the
284 * nodes are discovered.
285 */
286 if (!after_bootmem)
287 find_early_table_space(end, use_pse, use_gbpages);
288
289#ifdef CONFIG_X86_32
290 for (i = 0; i < nr_range; i++)
291 kernel_physical_mapping_init(mr[i].start, mr[i].end,
292 mr[i].page_size_mask);
293 ret = end;
294#else /* CONFIG_X86_64 */
295 for (i = 0; i < nr_range; i++)
296 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
297 mr[i].page_size_mask);
298#endif
299
300#ifdef CONFIG_X86_32
301 early_ioremap_page_table_range_init();
302
303 load_cr3(swapper_pg_dir);
304#endif
305
306#ifdef CONFIG_X86_64
307 if (!after_bootmem)
308 mmu_cr4_features = read_cr4();
309#endif
310 __flush_tlb_all();
311
312 if (!after_bootmem && e820_table_end > e820_table_start)
313 reserve_early(e820_table_start << PAGE_SHIFT,
314 e820_table_end << PAGE_SHIFT, "PGTABLE");
315
316 if (!after_bootmem)
317 early_memtest(start, end);
318
319 return ret >> PAGE_SHIFT;
320}
321
322
323/*
324 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
325 * is valid. The argument is a physical page number.
326 *
327 *
328 * On x86, access has to be given to the first megabyte of ram because that area
329 * contains bios code and data regions used by X and dosemu and similar apps.
330 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
331 * mmio resources as well as potential bios/acpi data regions.
332 */
333int devmem_is_allowed(unsigned long pagenr)
334{
335 if (pagenr <= 256)
336 return 1;
337 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
338 return 0;
339 if (!page_is_ram(pagenr))
340 return 1;
341 return 0;
342}
6 343
7void free_init_pages(char *what, unsigned long begin, unsigned long end) 344void free_init_pages(char *what, unsigned long begin, unsigned long end)
8{ 345{
@@ -47,3 +384,10 @@ void free_initmem(void)
47 (unsigned long)(&__init_begin), 384 (unsigned long)(&__init_begin),
48 (unsigned long)(&__init_end)); 385 (unsigned long)(&__init_end));
49} 386}
387
388#ifdef CONFIG_BLK_DEV_INITRD
389void free_initrd_mem(unsigned long start, unsigned long end)
390{
391 free_init_pages("initrd memory", start, end);
392}
393#endif
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 0b087dcd2c18..db81e9a8556b 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -49,6 +49,7 @@
49#include <asm/paravirt.h> 49#include <asm/paravirt.h>
50#include <asm/setup.h> 50#include <asm/setup.h>
51#include <asm/cacheflush.h> 51#include <asm/cacheflush.h>
52#include <asm/init.h>
52 53
53unsigned long max_low_pfn_mapped; 54unsigned long max_low_pfn_mapped;
54unsigned long max_pfn_mapped; 55unsigned long max_pfn_mapped;
@@ -58,19 +59,14 @@ unsigned long highstart_pfn, highend_pfn;
58 59
59static noinline int do_test_wp_bit(void); 60static noinline int do_test_wp_bit(void);
60 61
61 62bool __read_mostly __vmalloc_start_set = false;
62static unsigned long __initdata table_start;
63static unsigned long __meminitdata table_end;
64static unsigned long __meminitdata table_top;
65
66static int __initdata after_init_bootmem;
67 63
68static __init void *alloc_low_page(void) 64static __init void *alloc_low_page(void)
69{ 65{
70 unsigned long pfn = table_end++; 66 unsigned long pfn = e820_table_end++;
71 void *adr; 67 void *adr;
72 68
73 if (pfn >= table_top) 69 if (pfn >= e820_table_top)
74 panic("alloc_low_page: ran out of memory"); 70 panic("alloc_low_page: ran out of memory");
75 71
76 adr = __va(pfn * PAGE_SIZE); 72 adr = __va(pfn * PAGE_SIZE);
@@ -90,7 +86,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
90 86
91#ifdef CONFIG_X86_PAE 87#ifdef CONFIG_X86_PAE
92 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { 88 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
93 if (after_init_bootmem) 89 if (after_bootmem)
94 pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); 90 pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
95 else 91 else
96 pmd_table = (pmd_t *)alloc_low_page(); 92 pmd_table = (pmd_t *)alloc_low_page();
@@ -117,7 +113,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
117 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { 113 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
118 pte_t *page_table = NULL; 114 pte_t *page_table = NULL;
119 115
120 if (after_init_bootmem) { 116 if (after_bootmem) {
121#ifdef CONFIG_DEBUG_PAGEALLOC 117#ifdef CONFIG_DEBUG_PAGEALLOC
122 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); 118 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
123#endif 119#endif
@@ -135,6 +131,23 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
135 return pte_offset_kernel(pmd, 0); 131 return pte_offset_kernel(pmd, 0);
136} 132}
137 133
134pmd_t * __init populate_extra_pmd(unsigned long vaddr)
135{
136 int pgd_idx = pgd_index(vaddr);
137 int pmd_idx = pmd_index(vaddr);
138
139 return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
140}
141
142pte_t * __init populate_extra_pte(unsigned long vaddr)
143{
144 int pte_idx = pte_index(vaddr);
145 pmd_t *pmd;
146
147 pmd = populate_extra_pmd(vaddr);
148 return one_page_table_init(pmd) + pte_idx;
149}
150
138static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, 151static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
139 unsigned long vaddr, pte_t *lastpte) 152 unsigned long vaddr, pte_t *lastpte)
140{ 153{
@@ -151,12 +164,12 @@ static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
151 if (pmd_idx_kmap_begin != pmd_idx_kmap_end 164 if (pmd_idx_kmap_begin != pmd_idx_kmap_end
152 && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin 165 && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
153 && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end 166 && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
154 && ((__pa(pte) >> PAGE_SHIFT) < table_start 167 && ((__pa(pte) >> PAGE_SHIFT) < e820_table_start
155 || (__pa(pte) >> PAGE_SHIFT) >= table_end)) { 168 || (__pa(pte) >> PAGE_SHIFT) >= e820_table_end)) {
156 pte_t *newpte; 169 pte_t *newpte;
157 int i; 170 int i;
158 171
159 BUG_ON(after_init_bootmem); 172 BUG_ON(after_bootmem);
160 newpte = alloc_low_page(); 173 newpte = alloc_low_page();
161 for (i = 0; i < PTRS_PER_PTE; i++) 174 for (i = 0; i < PTRS_PER_PTE; i++)
162 set_pte(newpte + i, pte[i]); 175 set_pte(newpte + i, pte[i]);
@@ -225,11 +238,14 @@ static inline int is_kernel_text(unsigned long addr)
225 * of max_low_pfn pages, by creating page tables starting from address 238 * of max_low_pfn pages, by creating page tables starting from address
226 * PAGE_OFFSET: 239 * PAGE_OFFSET:
227 */ 240 */
228static void __init kernel_physical_mapping_init(pgd_t *pgd_base, 241unsigned long __init
229 unsigned long start_pfn, 242kernel_physical_mapping_init(unsigned long start,
230 unsigned long end_pfn, 243 unsigned long end,
231 int use_pse) 244 unsigned long page_size_mask)
232{ 245{
246 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
247 unsigned long start_pfn, end_pfn;
248 pgd_t *pgd_base = swapper_pg_dir;
233 int pgd_idx, pmd_idx, pte_ofs; 249 int pgd_idx, pmd_idx, pte_ofs;
234 unsigned long pfn; 250 unsigned long pfn;
235 pgd_t *pgd; 251 pgd_t *pgd;
@@ -238,6 +254,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
238 unsigned pages_2m, pages_4k; 254 unsigned pages_2m, pages_4k;
239 int mapping_iter; 255 int mapping_iter;
240 256
257 start_pfn = start >> PAGE_SHIFT;
258 end_pfn = end >> PAGE_SHIFT;
259
241 /* 260 /*
242 * First iteration will setup identity mapping using large/small pages 261 * First iteration will setup identity mapping using large/small pages
243 * based on use_pse, with other attributes same as set by 262 * based on use_pse, with other attributes same as set by
@@ -352,26 +371,6 @@ repeat:
352 mapping_iter = 2; 371 mapping_iter = 2;
353 goto repeat; 372 goto repeat;
354 } 373 }
355}
356
357/*
358 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
359 * is valid. The argument is a physical page number.
360 *
361 *
362 * On x86, access has to be given to the first megabyte of ram because that area
363 * contains bios code and data regions used by X and dosemu and similar apps.
364 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
365 * mmio resources as well as potential bios/acpi data regions.
366 */
367int devmem_is_allowed(unsigned long pagenr)
368{
369 if (pagenr <= 256)
370 return 1;
371 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
372 return 0;
373 if (!page_is_ram(pagenr))
374 return 1;
375 return 0; 374 return 0;
376} 375}
377 376
@@ -528,8 +527,9 @@ void __init native_pagetable_setup_done(pgd_t *base)
528 * be partially populated, and so it avoids stomping on any existing 527 * be partially populated, and so it avoids stomping on any existing
529 * mappings. 528 * mappings.
530 */ 529 */
531static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base) 530void __init early_ioremap_page_table_range_init(void)
532{ 531{
532 pgd_t *pgd_base = swapper_pg_dir;
533 unsigned long vaddr, end; 533 unsigned long vaddr, end;
534 534
535 /* 535 /*
@@ -624,7 +624,7 @@ static int __init noexec_setup(char *str)
624} 624}
625early_param("noexec", noexec_setup); 625early_param("noexec", noexec_setup);
626 626
627static void __init set_nx(void) 627void __init set_nx(void)
628{ 628{
629 unsigned int v[4], l, h; 629 unsigned int v[4], l, h;
630 630
@@ -776,6 +776,8 @@ void __init initmem_init(unsigned long start_pfn,
776#ifdef CONFIG_FLATMEM 776#ifdef CONFIG_FLATMEM
777 max_mapnr = num_physpages; 777 max_mapnr = num_physpages;
778#endif 778#endif
779 __vmalloc_start_set = true;
780
779 printk(KERN_NOTICE "%ldMB LOWMEM available.\n", 781 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
780 pages_to_mb(max_low_pfn)); 782 pages_to_mb(max_low_pfn));
781 783
@@ -797,176 +799,66 @@ static void __init zone_sizes_init(void)
797 free_area_init_nodes(max_zone_pfns); 799 free_area_init_nodes(max_zone_pfns);
798} 800}
799 801
802static unsigned long __init setup_node_bootmem(int nodeid,
803 unsigned long start_pfn,
804 unsigned long end_pfn,
805 unsigned long bootmap)
806{
807 unsigned long bootmap_size;
808
809 /* don't touch min_low_pfn */
810 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
811 bootmap >> PAGE_SHIFT,
812 start_pfn, end_pfn);
813 printk(KERN_INFO " node %d low ram: %08lx - %08lx\n",
814 nodeid, start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
815 printk(KERN_INFO " node %d bootmap %08lx - %08lx\n",
816 nodeid, bootmap, bootmap + bootmap_size);
817 free_bootmem_with_active_regions(nodeid, end_pfn);
818 early_res_to_bootmem(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
819
820 return bootmap + bootmap_size;
821}
822
800void __init setup_bootmem_allocator(void) 823void __init setup_bootmem_allocator(void)
801{ 824{
802 int i; 825 int nodeid;
803 unsigned long bootmap_size, bootmap; 826 unsigned long bootmap_size, bootmap;
804 /* 827 /*
805 * Initialize the boot-time allocator (with low memory only): 828 * Initialize the boot-time allocator (with low memory only):
806 */ 829 */
807 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT; 830 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
808 bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT, 831 bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
809 max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
810 PAGE_SIZE); 832 PAGE_SIZE);
811 if (bootmap == -1L) 833 if (bootmap == -1L)
812 panic("Cannot find bootmem map of size %ld\n", bootmap_size); 834 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
813 reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP"); 835 reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
814 836
815 /* don't touch min_low_pfn */
816 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
817 min_low_pfn, max_low_pfn);
818 printk(KERN_INFO " mapped low ram: 0 - %08lx\n", 837 printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
819 max_pfn_mapped<<PAGE_SHIFT); 838 max_pfn_mapped<<PAGE_SHIFT);
820 printk(KERN_INFO " low ram: %08lx - %08lx\n", 839 printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
821 min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
822 printk(KERN_INFO " bootmap %08lx - %08lx\n",
823 bootmap, bootmap + bootmap_size);
824 for_each_online_node(i)
825 free_bootmem_with_active_regions(i, max_low_pfn);
826 early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
827
828 after_init_bootmem = 1;
829}
830
831static void __init find_early_table_space(unsigned long end, int use_pse)
832{
833 unsigned long puds, pmds, ptes, tables, start;
834
835 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
836 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
837
838 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
839 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
840
841 if (use_pse) {
842 unsigned long extra;
843 840
844 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); 841 for_each_online_node(nodeid) {
845 extra += PMD_SIZE; 842 unsigned long start_pfn, end_pfn;
846 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
847 } else
848 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
849 843
850 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); 844#ifdef CONFIG_NEED_MULTIPLE_NODES
851 845 start_pfn = node_start_pfn[nodeid];
852 /* for fixmap */ 846 end_pfn = node_end_pfn[nodeid];
853 tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); 847 if (start_pfn > max_low_pfn)
854 848 continue;
855 /* 849 if (end_pfn > max_low_pfn)
856 * RED-PEN putting page tables only on node 0 could 850 end_pfn = max_low_pfn;
857 * cause a hotspot and fill up ZONE_DMA. The page tables
858 * need roughly 0.5KB per GB.
859 */
860 start = 0x7000;
861 table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
862 tables, PAGE_SIZE);
863 if (table_start == -1UL)
864 panic("Cannot find space for the kernel page tables");
865
866 table_start >>= PAGE_SHIFT;
867 table_end = table_start;
868 table_top = table_start + (tables>>PAGE_SHIFT);
869
870 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
871 end, table_start << PAGE_SHIFT,
872 (table_start << PAGE_SHIFT) + tables);
873}
874
875unsigned long __init_refok init_memory_mapping(unsigned long start,
876 unsigned long end)
877{
878 pgd_t *pgd_base = swapper_pg_dir;
879 unsigned long start_pfn, end_pfn;
880 unsigned long big_page_start;
881#ifdef CONFIG_DEBUG_PAGEALLOC
882 /*
883 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
884 * This will simplify cpa(), which otherwise needs to support splitting
885 * large pages into small in interrupt context, etc.
886 */
887 int use_pse = 0;
888#else 851#else
889 int use_pse = cpu_has_pse; 852 start_pfn = 0;
890#endif 853 end_pfn = max_low_pfn;
891
892 /*
893 * Find space for the kernel direct mapping tables.
894 */
895 if (!after_init_bootmem)
896 find_early_table_space(end, use_pse);
897
898#ifdef CONFIG_X86_PAE
899 set_nx();
900 if (nx_enabled)
901 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
902#endif 854#endif
903 855 bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn,
904 /* Enable PSE if available */ 856 bootmap);
905 if (cpu_has_pse)
906 set_in_cr4(X86_CR4_PSE);
907
908 /* Enable PGE if available */
909 if (cpu_has_pge) {
910 set_in_cr4(X86_CR4_PGE);
911 __supported_pte_mask |= _PAGE_GLOBAL;
912 }
913
914 /*
915 * Don't use a large page for the first 2/4MB of memory
916 * because there are often fixed size MTRRs in there
917 * and overlapping MTRRs into large pages can cause
918 * slowdowns.
919 */
920 big_page_start = PMD_SIZE;
921
922 if (start < big_page_start) {
923 start_pfn = start >> PAGE_SHIFT;
924 end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
925 } else {
926 /* head is not big page alignment ? */
927 start_pfn = start >> PAGE_SHIFT;
928 end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
929 << (PMD_SHIFT - PAGE_SHIFT);
930 }
931 if (start_pfn < end_pfn)
932 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
933
934 /* big page range */
935 start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
936 << (PMD_SHIFT - PAGE_SHIFT);
937 if (start_pfn < (big_page_start >> PAGE_SHIFT))
938 start_pfn = big_page_start >> PAGE_SHIFT;
939 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
940 if (start_pfn < end_pfn)
941 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
942 use_pse);
943
944 /* tail is not big page alignment ? */
945 start_pfn = end_pfn;
946 if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
947 end_pfn = end >> PAGE_SHIFT;
948 if (start_pfn < end_pfn)
949 kernel_physical_mapping_init(pgd_base, start_pfn,
950 end_pfn, 0);
951 } 857 }
952 858
953 early_ioremap_page_table_range_init(pgd_base); 859 after_bootmem = 1;
954
955 load_cr3(swapper_pg_dir);
956
957 __flush_tlb_all();
958
959 if (!after_init_bootmem)
960 reserve_early(table_start << PAGE_SHIFT,
961 table_end << PAGE_SHIFT, "PGTABLE");
962
963 if (!after_init_bootmem)
964 early_memtest(start, end);
965
966 return end >> PAGE_SHIFT;
967} 860}
968 861
969
970/* 862/*
971 * paging_init() sets up the page tables - note that the first 8MB are 863 * paging_init() sets up the page tables - note that the first 8MB are
972 * already mapped by head.S. 864 * already mapped by head.S.
@@ -1200,13 +1092,6 @@ void mark_rodata_ro(void)
1200} 1092}
1201#endif 1093#endif
1202 1094
1203#ifdef CONFIG_BLK_DEV_INITRD
1204void free_initrd_mem(unsigned long start, unsigned long end)
1205{
1206 free_init_pages("initrd memory", start, end);
1207}
1208#endif
1209
1210int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, 1095int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
1211 int flags) 1096 int flags)
1212{ 1097{
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 724e537432e7..54efa57d1c03 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -48,6 +48,7 @@
48#include <asm/kdebug.h> 48#include <asm/kdebug.h>
49#include <asm/numa.h> 49#include <asm/numa.h>
50#include <asm/cacheflush.h> 50#include <asm/cacheflush.h>
51#include <asm/init.h>
51 52
52/* 53/*
53 * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. 54 * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
@@ -61,12 +62,6 @@ static unsigned long dma_reserve __initdata;
61 62
62DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 63DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
63 64
64int direct_gbpages
65#ifdef CONFIG_DIRECT_GBPAGES
66 = 1
67#endif
68;
69
70static int __init parse_direct_gbpages_off(char *arg) 65static int __init parse_direct_gbpages_off(char *arg)
71{ 66{
72 direct_gbpages = 0; 67 direct_gbpages = 0;
@@ -87,12 +82,10 @@ early_param("gbpages", parse_direct_gbpages_on);
87 * around without checking the pgd every time. 82 * around without checking the pgd every time.
88 */ 83 */
89 84
90int after_bootmem;
91
92pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP; 85pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
93EXPORT_SYMBOL_GPL(__supported_pte_mask); 86EXPORT_SYMBOL_GPL(__supported_pte_mask);
94 87
95static int do_not_nx __cpuinitdata; 88static int disable_nx __cpuinitdata;
96 89
97/* 90/*
98 * noexec=on|off 91 * noexec=on|off
@@ -107,9 +100,9 @@ static int __init nonx_setup(char *str)
107 return -EINVAL; 100 return -EINVAL;
108 if (!strncmp(str, "on", 2)) { 101 if (!strncmp(str, "on", 2)) {
109 __supported_pte_mask |= _PAGE_NX; 102 __supported_pte_mask |= _PAGE_NX;
110 do_not_nx = 0; 103 disable_nx = 0;
111 } else if (!strncmp(str, "off", 3)) { 104 } else if (!strncmp(str, "off", 3)) {
112 do_not_nx = 1; 105 disable_nx = 1;
113 __supported_pte_mask &= ~_PAGE_NX; 106 __supported_pte_mask &= ~_PAGE_NX;
114 } 107 }
115 return 0; 108 return 0;
@@ -121,7 +114,7 @@ void __cpuinit check_efer(void)
121 unsigned long efer; 114 unsigned long efer;
122 115
123 rdmsrl(MSR_EFER, efer); 116 rdmsrl(MSR_EFER, efer);
124 if (!(efer & EFER_NX) || do_not_nx) 117 if (!(efer & EFER_NX) || disable_nx)
125 __supported_pte_mask &= ~_PAGE_NX; 118 __supported_pte_mask &= ~_PAGE_NX;
126} 119}
127 120
@@ -168,34 +161,51 @@ static __ref void *spp_getpage(void)
168 return ptr; 161 return ptr;
169} 162}
170 163
171void 164static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
172set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
173{ 165{
174 pud_t *pud; 166 if (pgd_none(*pgd)) {
175 pmd_t *pmd; 167 pud_t *pud = (pud_t *)spp_getpage();
176 pte_t *pte; 168 pgd_populate(&init_mm, pgd, pud);
169 if (pud != pud_offset(pgd, 0))
170 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
171 pud, pud_offset(pgd, 0));
172 }
173 return pud_offset(pgd, vaddr);
174}
177 175
178 pud = pud_page + pud_index(vaddr); 176static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
177{
179 if (pud_none(*pud)) { 178 if (pud_none(*pud)) {
180 pmd = (pmd_t *) spp_getpage(); 179 pmd_t *pmd = (pmd_t *) spp_getpage();
181 pud_populate(&init_mm, pud, pmd); 180 pud_populate(&init_mm, pud, pmd);
182 if (pmd != pmd_offset(pud, 0)) { 181 if (pmd != pmd_offset(pud, 0))
183 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", 182 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
184 pmd, pmd_offset(pud, 0)); 183 pmd, pmd_offset(pud, 0));
185 return;
186 }
187 } 184 }
188 pmd = pmd_offset(pud, vaddr); 185 return pmd_offset(pud, vaddr);
186}
187
188static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
189{
189 if (pmd_none(*pmd)) { 190 if (pmd_none(*pmd)) {
190 pte = (pte_t *) spp_getpage(); 191 pte_t *pte = (pte_t *) spp_getpage();
191 pmd_populate_kernel(&init_mm, pmd, pte); 192 pmd_populate_kernel(&init_mm, pmd, pte);
192 if (pte != pte_offset_kernel(pmd, 0)) { 193 if (pte != pte_offset_kernel(pmd, 0))
193 printk(KERN_ERR "PAGETABLE BUG #02!\n"); 194 printk(KERN_ERR "PAGETABLE BUG #02!\n");
194 return;
195 }
196 } 195 }
196 return pte_offset_kernel(pmd, vaddr);
197}
198
199void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
200{
201 pud_t *pud;
202 pmd_t *pmd;
203 pte_t *pte;
204
205 pud = pud_page + pud_index(vaddr);
206 pmd = fill_pmd(pud, vaddr);
207 pte = fill_pte(pmd, vaddr);
197 208
198 pte = pte_offset_kernel(pmd, vaddr);
199 set_pte(pte, new_pte); 209 set_pte(pte, new_pte);
200 210
201 /* 211 /*
@@ -205,8 +215,7 @@ set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
205 __flush_tlb_one(vaddr); 215 __flush_tlb_one(vaddr);
206} 216}
207 217
208void 218void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
209set_pte_vaddr(unsigned long vaddr, pte_t pteval)
210{ 219{
211 pgd_t *pgd; 220 pgd_t *pgd;
212 pud_t *pud_page; 221 pud_t *pud_page;
@@ -223,6 +232,24 @@ set_pte_vaddr(unsigned long vaddr, pte_t pteval)
223 set_pte_vaddr_pud(pud_page, vaddr, pteval); 232 set_pte_vaddr_pud(pud_page, vaddr, pteval);
224} 233}
225 234
235pmd_t * __init populate_extra_pmd(unsigned long vaddr)
236{
237 pgd_t *pgd;
238 pud_t *pud;
239
240 pgd = pgd_offset_k(vaddr);
241 pud = fill_pud(pgd, vaddr);
242 return fill_pmd(pud, vaddr);
243}
244
245pte_t * __init populate_extra_pte(unsigned long vaddr)
246{
247 pmd_t *pmd;
248
249 pmd = populate_extra_pmd(vaddr);
250 return fill_pte(pmd, vaddr);
251}
252
226/* 253/*
227 * Create large page table mappings for a range of physical addresses. 254 * Create large page table mappings for a range of physical addresses.
228 */ 255 */
@@ -291,13 +318,9 @@ void __init cleanup_highmap(void)
291 } 318 }
292} 319}
293 320
294static unsigned long __initdata table_start;
295static unsigned long __meminitdata table_end;
296static unsigned long __meminitdata table_top;
297
298static __ref void *alloc_low_page(unsigned long *phys) 321static __ref void *alloc_low_page(unsigned long *phys)
299{ 322{
300 unsigned long pfn = table_end++; 323 unsigned long pfn = e820_table_end++;
301 void *adr; 324 void *adr;
302 325
303 if (after_bootmem) { 326 if (after_bootmem) {
@@ -307,7 +330,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
307 return adr; 330 return adr;
308 } 331 }
309 332
310 if (pfn >= table_top) 333 if (pfn >= e820_table_top)
311 panic("alloc_low_page: ran out of memory"); 334 panic("alloc_low_page: ran out of memory");
312 335
313 adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); 336 adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
@@ -547,58 +570,10 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
547 return phys_pud_init(pud, addr, end, page_size_mask); 570 return phys_pud_init(pud, addr, end, page_size_mask);
548} 571}
549 572
550static void __init find_early_table_space(unsigned long end, int use_pse, 573unsigned long __init
551 int use_gbpages) 574kernel_physical_mapping_init(unsigned long start,
552{ 575 unsigned long end,
553 unsigned long puds, pmds, ptes, tables, start; 576 unsigned long page_size_mask)
554
555 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
556 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
557 if (use_gbpages) {
558 unsigned long extra;
559 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
560 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
561 } else
562 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
563 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
564
565 if (use_pse) {
566 unsigned long extra;
567 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
568 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
569 } else
570 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
571 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
572
573 /*
574 * RED-PEN putting page tables only on node 0 could
575 * cause a hotspot and fill up ZONE_DMA. The page tables
576 * need roughly 0.5KB per GB.
577 */
578 start = 0x8000;
579 table_start = find_e820_area(start, end, tables, PAGE_SIZE);
580 if (table_start == -1UL)
581 panic("Cannot find space for the kernel page tables");
582
583 table_start >>= PAGE_SHIFT;
584 table_end = table_start;
585 table_top = table_start + (tables >> PAGE_SHIFT);
586
587 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
588 end, table_start << PAGE_SHIFT, table_top << PAGE_SHIFT);
589}
590
591static void __init init_gbpages(void)
592{
593 if (direct_gbpages && cpu_has_gbpages)
594 printk(KERN_INFO "Using GB pages for direct mapping\n");
595 else
596 direct_gbpages = 0;
597}
598
599static unsigned long __meminit kernel_physical_mapping_init(unsigned long start,
600 unsigned long end,
601 unsigned long page_size_mask)
602{ 577{
603 578
604 unsigned long next, last_map_addr = end; 579 unsigned long next, last_map_addr = end;
@@ -635,176 +610,6 @@ static unsigned long __meminit kernel_physical_mapping_init(unsigned long start,
635 return last_map_addr; 610 return last_map_addr;
636} 611}
637 612
638struct map_range {
639 unsigned long start;
640 unsigned long end;
641 unsigned page_size_mask;
642};
643
644#define NR_RANGE_MR 5
645
646static int save_mr(struct map_range *mr, int nr_range,
647 unsigned long start_pfn, unsigned long end_pfn,
648 unsigned long page_size_mask)
649{
650
651 if (start_pfn < end_pfn) {
652 if (nr_range >= NR_RANGE_MR)
653 panic("run out of range for init_memory_mapping\n");
654 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
655 mr[nr_range].end = end_pfn<<PAGE_SHIFT;
656 mr[nr_range].page_size_mask = page_size_mask;
657 nr_range++;
658 }
659
660 return nr_range;
661}
662
663/*
664 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
665 * This runs before bootmem is initialized and gets pages directly from
666 * the physical memory. To access them they are temporarily mapped.
667 */
668unsigned long __init_refok init_memory_mapping(unsigned long start,
669 unsigned long end)
670{
671 unsigned long last_map_addr = 0;
672 unsigned long page_size_mask = 0;
673 unsigned long start_pfn, end_pfn;
674 unsigned long pos;
675
676 struct map_range mr[NR_RANGE_MR];
677 int nr_range, i;
678 int use_pse, use_gbpages;
679
680 printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
681
682 /*
683 * Find space for the kernel direct mapping tables.
684 *
685 * Later we should allocate these tables in the local node of the
686 * memory mapped. Unfortunately this is done currently before the
687 * nodes are discovered.
688 */
689 if (!after_bootmem)
690 init_gbpages();
691
692#ifdef CONFIG_DEBUG_PAGEALLOC
693 /*
694 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
695 * This will simplify cpa(), which otherwise needs to support splitting
696 * large pages into small in interrupt context, etc.
697 */
698 use_pse = use_gbpages = 0;
699#else
700 use_pse = cpu_has_pse;
701 use_gbpages = direct_gbpages;
702#endif
703
704 if (use_gbpages)
705 page_size_mask |= 1 << PG_LEVEL_1G;
706 if (use_pse)
707 page_size_mask |= 1 << PG_LEVEL_2M;
708
709 memset(mr, 0, sizeof(mr));
710 nr_range = 0;
711
712 /* head if not big page alignment ?*/
713 start_pfn = start >> PAGE_SHIFT;
714 pos = start_pfn << PAGE_SHIFT;
715 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
716 << (PMD_SHIFT - PAGE_SHIFT);
717 if (end_pfn > (end >> PAGE_SHIFT))
718 end_pfn = end >> PAGE_SHIFT;
719 if (start_pfn < end_pfn) {
720 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
721 pos = end_pfn << PAGE_SHIFT;
722 }
723
724 /* big page (2M) range*/
725 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
726 << (PMD_SHIFT - PAGE_SHIFT);
727 end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
728 << (PUD_SHIFT - PAGE_SHIFT);
729 if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
730 end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
731 if (start_pfn < end_pfn) {
732 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
733 page_size_mask & (1<<PG_LEVEL_2M));
734 pos = end_pfn << PAGE_SHIFT;
735 }
736
737 /* big page (1G) range */
738 start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
739 << (PUD_SHIFT - PAGE_SHIFT);
740 end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
741 if (start_pfn < end_pfn) {
742 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
743 page_size_mask &
744 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
745 pos = end_pfn << PAGE_SHIFT;
746 }
747
748 /* tail is not big page (1G) alignment */
749 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
750 << (PMD_SHIFT - PAGE_SHIFT);
751 end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
752 if (start_pfn < end_pfn) {
753 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
754 page_size_mask & (1<<PG_LEVEL_2M));
755 pos = end_pfn << PAGE_SHIFT;
756 }
757
758 /* tail is not big page (2M) alignment */
759 start_pfn = pos>>PAGE_SHIFT;
760 end_pfn = end>>PAGE_SHIFT;
761 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
762
763 /* try to merge same page size and continuous */
764 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
765 unsigned long old_start;
766 if (mr[i].end != mr[i+1].start ||
767 mr[i].page_size_mask != mr[i+1].page_size_mask)
768 continue;
769 /* move it */
770 old_start = mr[i].start;
771 memmove(&mr[i], &mr[i+1],
772 (nr_range - 1 - i) * sizeof (struct map_range));
773 mr[i--].start = old_start;
774 nr_range--;
775 }
776
777 for (i = 0; i < nr_range; i++)
778 printk(KERN_DEBUG " %010lx - %010lx page %s\n",
779 mr[i].start, mr[i].end,
780 (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
781 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
782
783 if (!after_bootmem)
784 find_early_table_space(end, use_pse, use_gbpages);
785
786 for (i = 0; i < nr_range; i++)
787 last_map_addr = kernel_physical_mapping_init(
788 mr[i].start, mr[i].end,
789 mr[i].page_size_mask);
790
791 if (!after_bootmem)
792 mmu_cr4_features = read_cr4();
793 __flush_tlb_all();
794
795 if (!after_bootmem && table_end > table_start)
796 reserve_early(table_start << PAGE_SHIFT,
797 table_end << PAGE_SHIFT, "PGTABLE");
798
799 printk(KERN_INFO "last_map_addr: %lx end: %lx\n",
800 last_map_addr, end);
801
802 if (!after_bootmem)
803 early_memtest(start, end);
804
805 return last_map_addr >> PAGE_SHIFT;
806}
807
808#ifndef CONFIG_NUMA 613#ifndef CONFIG_NUMA
809void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn) 614void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn)
810{ 615{
@@ -876,28 +681,6 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
876 681
877#endif /* CONFIG_MEMORY_HOTPLUG */ 682#endif /* CONFIG_MEMORY_HOTPLUG */
878 683
879/*
880 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
881 * is valid. The argument is a physical page number.
882 *
883 *
884 * On x86, access has to be given to the first megabyte of ram because that area
885 * contains bios code and data regions used by X and dosemu and similar apps.
886 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
887 * mmio resources as well as potential bios/acpi data regions.
888 */
889int devmem_is_allowed(unsigned long pagenr)
890{
891 if (pagenr <= 256)
892 return 1;
893 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
894 return 0;
895 if (!page_is_ram(pagenr))
896 return 1;
897 return 0;
898}
899
900
901static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, 684static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
902 kcore_modules, kcore_vsyscall; 685 kcore_modules, kcore_vsyscall;
903 686
@@ -985,13 +768,6 @@ void mark_rodata_ro(void)
985 768
986#endif 769#endif
987 770
988#ifdef CONFIG_BLK_DEV_INITRD
989void free_initrd_mem(unsigned long start, unsigned long end)
990{
991 free_init_pages("initrd memory", start, end);
992}
993#endif
994
995int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, 771int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
996 int flags) 772 int flags)
997{ 773{
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 433f7bd4648a..aca924a30ee6 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -38,8 +38,7 @@ unsigned long __phys_addr(unsigned long x)
38 } else { 38 } else {
39 VIRTUAL_BUG_ON(x < PAGE_OFFSET); 39 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
40 x -= PAGE_OFFSET; 40 x -= PAGE_OFFSET;
41 VIRTUAL_BUG_ON(system_state == SYSTEM_BOOTING ? x > MAXMEM : 41 VIRTUAL_BUG_ON(!phys_addr_valid(x));
42 !phys_addr_valid(x));
43 } 42 }
44 return x; 43 return x;
45} 44}
@@ -56,10 +55,8 @@ bool __virt_addr_valid(unsigned long x)
56 if (x < PAGE_OFFSET) 55 if (x < PAGE_OFFSET)
57 return false; 56 return false;
58 x -= PAGE_OFFSET; 57 x -= PAGE_OFFSET;
59 if (system_state == SYSTEM_BOOTING ? 58 if (!phys_addr_valid(x))
60 x > MAXMEM : !phys_addr_valid(x)) {
61 return false; 59 return false;
62 }
63 } 60 }
64 61
65 return pfn_valid(x >> PAGE_SHIFT); 62 return pfn_valid(x >> PAGE_SHIFT);
@@ -76,10 +73,9 @@ static inline int phys_addr_valid(unsigned long addr)
76#ifdef CONFIG_DEBUG_VIRTUAL 73#ifdef CONFIG_DEBUG_VIRTUAL
77unsigned long __phys_addr(unsigned long x) 74unsigned long __phys_addr(unsigned long x)
78{ 75{
79 /* VMALLOC_* aren't constants; not available at the boot time */ 76 /* VMALLOC_* aren't constants */
80 VIRTUAL_BUG_ON(x < PAGE_OFFSET); 77 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
81 VIRTUAL_BUG_ON(system_state != SYSTEM_BOOTING && 78 VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
82 is_vmalloc_addr((void *) x));
83 return x - PAGE_OFFSET; 79 return x - PAGE_OFFSET;
84} 80}
85EXPORT_SYMBOL(__phys_addr); 81EXPORT_SYMBOL(__phys_addr);
@@ -89,7 +85,9 @@ bool __virt_addr_valid(unsigned long x)
89{ 85{
90 if (x < PAGE_OFFSET) 86 if (x < PAGE_OFFSET)
91 return false; 87 return false;
92 if (system_state != SYSTEM_BOOTING && is_vmalloc_addr((void *) x)) 88 if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
89 return false;
90 if (x >= FIXADDR_START)
93 return false; 91 return false;
94 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT); 92 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
95} 93}
@@ -508,13 +506,19 @@ static inline pte_t * __init early_ioremap_pte(unsigned long addr)
508 return &bm_pte[pte_index(addr)]; 506 return &bm_pte[pte_index(addr)];
509} 507}
510 508
509static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
510
511void __init early_ioremap_init(void) 511void __init early_ioremap_init(void)
512{ 512{
513 pmd_t *pmd; 513 pmd_t *pmd;
514 int i;
514 515
515 if (early_ioremap_debug) 516 if (early_ioremap_debug)
516 printk(KERN_INFO "early_ioremap_init()\n"); 517 printk(KERN_INFO "early_ioremap_init()\n");
517 518
519 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
520 slot_virt[i] = fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
521
518 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 522 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
519 memset(bm_pte, 0, sizeof(bm_pte)); 523 memset(bm_pte, 0, sizeof(bm_pte));
520 pmd_populate_kernel(&init_mm, pmd, bm_pte); 524 pmd_populate_kernel(&init_mm, pmd, bm_pte);
@@ -581,6 +585,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
581 585
582static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; 586static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
583static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; 587static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
588
584static int __init check_early_ioremap_leak(void) 589static int __init check_early_ioremap_leak(void)
585{ 590{
586 int count = 0; 591 int count = 0;
@@ -602,7 +607,8 @@ static int __init check_early_ioremap_leak(void)
602} 607}
603late_initcall(check_early_ioremap_leak); 608late_initcall(check_early_ioremap_leak);
604 609
605static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) 610static void __init __iomem *
611__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
606{ 612{
607 unsigned long offset, last_addr; 613 unsigned long offset, last_addr;
608 unsigned int nrpages; 614 unsigned int nrpages;
@@ -668,9 +674,9 @@ static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned lo
668 --nrpages; 674 --nrpages;
669 } 675 }
670 if (early_ioremap_debug) 676 if (early_ioremap_debug)
671 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); 677 printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
672 678
673 prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0)); 679 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
674 return prev_map[slot]; 680 return prev_map[slot];
675} 681}
676 682
@@ -738,8 +744,3 @@ void __init early_iounmap(void __iomem *addr, unsigned long size)
738 } 744 }
739 prev_map[slot] = NULL; 745 prev_map[slot] = NULL;
740} 746}
741
742void __this_fixmap_does_not_exist(void)
743{
744 WARN_ON(1);
745}
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 93d82038af4b..6a518dd08a36 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -32,11 +32,14 @@ struct kmmio_fault_page {
32 struct list_head list; 32 struct list_head list;
33 struct kmmio_fault_page *release_next; 33 struct kmmio_fault_page *release_next;
34 unsigned long page; /* location of the fault page */ 34 unsigned long page; /* location of the fault page */
35 bool old_presence; /* page presence prior to arming */
36 bool armed;
35 37
36 /* 38 /*
37 * Number of times this page has been registered as a part 39 * Number of times this page has been registered as a part
38 * of a probe. If zero, page is disarmed and this may be freed. 40 * of a probe. If zero, page is disarmed and this may be freed.
39 * Used only by writers (RCU). 41 * Used only by writers (RCU) and post_kmmio_handler().
42 * Protected by kmmio_lock, when linked into kmmio_page_table.
40 */ 43 */
41 int count; 44 int count;
42}; 45};
@@ -105,57 +108,85 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
105 return NULL; 108 return NULL;
106} 109}
107 110
108static void set_page_present(unsigned long addr, bool present, 111static void set_pmd_presence(pmd_t *pmd, bool present, bool *old)
109 unsigned int *pglevel) 112{
113 pmdval_t v = pmd_val(*pmd);
114 *old = !!(v & _PAGE_PRESENT);
115 v &= ~_PAGE_PRESENT;
116 if (present)
117 v |= _PAGE_PRESENT;
118 set_pmd(pmd, __pmd(v));
119}
120
121static void set_pte_presence(pte_t *pte, bool present, bool *old)
122{
123 pteval_t v = pte_val(*pte);
124 *old = !!(v & _PAGE_PRESENT);
125 v &= ~_PAGE_PRESENT;
126 if (present)
127 v |= _PAGE_PRESENT;
128 set_pte_atomic(pte, __pte(v));
129}
130
131static int set_page_presence(unsigned long addr, bool present, bool *old)
110{ 132{
111 pteval_t pteval;
112 pmdval_t pmdval;
113 unsigned int level; 133 unsigned int level;
114 pmd_t *pmd;
115 pte_t *pte = lookup_address(addr, &level); 134 pte_t *pte = lookup_address(addr, &level);
116 135
117 if (!pte) { 136 if (!pte) {
118 pr_err("kmmio: no pte for page 0x%08lx\n", addr); 137 pr_err("kmmio: no pte for page 0x%08lx\n", addr);
119 return; 138 return -1;
120 } 139 }
121 140
122 if (pglevel)
123 *pglevel = level;
124
125 switch (level) { 141 switch (level) {
126 case PG_LEVEL_2M: 142 case PG_LEVEL_2M:
127 pmd = (pmd_t *)pte; 143 set_pmd_presence((pmd_t *)pte, present, old);
128 pmdval = pmd_val(*pmd) & ~_PAGE_PRESENT;
129 if (present)
130 pmdval |= _PAGE_PRESENT;
131 set_pmd(pmd, __pmd(pmdval));
132 break; 144 break;
133
134 case PG_LEVEL_4K: 145 case PG_LEVEL_4K:
135 pteval = pte_val(*pte) & ~_PAGE_PRESENT; 146 set_pte_presence(pte, present, old);
136 if (present)
137 pteval |= _PAGE_PRESENT;
138 set_pte_atomic(pte, __pte(pteval));
139 break; 147 break;
140
141 default: 148 default:
142 pr_err("kmmio: unexpected page level 0x%x.\n", level); 149 pr_err("kmmio: unexpected page level 0x%x.\n", level);
143 return; 150 return -1;
144 } 151 }
145 152
146 __flush_tlb_one(addr); 153 __flush_tlb_one(addr);
154 return 0;
147} 155}
148 156
149/** Mark the given page as not present. Access to it will trigger a fault. */ 157/*
150static void arm_kmmio_fault_page(unsigned long page, unsigned int *pglevel) 158 * Mark the given page as not present. Access to it will trigger a fault.
159 *
160 * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
161 * protection is ignored here. RCU read lock is assumed held, so the struct
162 * will not disappear unexpectedly. Furthermore, the caller must guarantee,
163 * that double arming the same virtual address (page) cannot occur.
164 *
165 * Double disarming on the other hand is allowed, and may occur when a fault
166 * and mmiotrace shutdown happen simultaneously.
167 */
168static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
151{ 169{
152 set_page_present(page & PAGE_MASK, false, pglevel); 170 int ret;
171 WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
172 if (f->armed) {
173 pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
174 f->page, f->count, f->old_presence);
175 }
176 ret = set_page_presence(f->page, false, &f->old_presence);
177 WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
178 f->armed = true;
179 return ret;
153} 180}
154 181
155/** Mark the given page as present. */ 182/** Restore the given page to saved presence state. */
156static void disarm_kmmio_fault_page(unsigned long page, unsigned int *pglevel) 183static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
157{ 184{
158 set_page_present(page & PAGE_MASK, true, pglevel); 185 bool tmp;
186 int ret = set_page_presence(f->page, f->old_presence, &tmp);
187 WARN_ONCE(ret < 0,
188 KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
189 f->armed = false;
159} 190}
160 191
161/* 192/*
@@ -202,28 +233,32 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
202 233
203 ctx = &get_cpu_var(kmmio_ctx); 234 ctx = &get_cpu_var(kmmio_ctx);
204 if (ctx->active) { 235 if (ctx->active) {
205 disarm_kmmio_fault_page(faultpage->page, NULL);
206 if (addr == ctx->addr) { 236 if (addr == ctx->addr) {
207 /* 237 /*
208 * On SMP we sometimes get recursive probe hits on the 238 * A second fault on the same page means some other
209 * same address. Context is already saved, fall out. 239 * condition needs handling by do_page_fault(), the
240 * page really not being present is the most common.
210 */ 241 */
211 pr_debug("kmmio: duplicate probe hit on CPU %d, for " 242 pr_debug("kmmio: secondary hit for 0x%08lx CPU %d.\n",
212 "address 0x%08lx.\n", 243 addr, smp_processor_id());
213 smp_processor_id(), addr); 244
214 ret = 1; 245 if (!faultpage->old_presence)
215 goto no_kmmio_ctx; 246 pr_info("kmmio: unexpected secondary hit for "
216 } 247 "address 0x%08lx on CPU %d.\n", addr,
217 /* 248 smp_processor_id());
218 * Prevent overwriting already in-flight context. 249 } else {
219 * This should not happen, let's hope disarming at least 250 /*
220 * prevents a panic. 251 * Prevent overwriting already in-flight context.
221 */ 252 * This should not happen, let's hope disarming at
222 pr_emerg("kmmio: recursive probe hit on CPU %d, " 253 * least prevents a panic.
254 */
255 pr_emerg("kmmio: recursive probe hit on CPU %d, "
223 "for address 0x%08lx. Ignoring.\n", 256 "for address 0x%08lx. Ignoring.\n",
224 smp_processor_id(), addr); 257 smp_processor_id(), addr);
225 pr_emerg("kmmio: previous hit was at 0x%08lx.\n", 258 pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
226 ctx->addr); 259 ctx->addr);
260 disarm_kmmio_fault_page(faultpage);
261 }
227 goto no_kmmio_ctx; 262 goto no_kmmio_ctx;
228 } 263 }
229 ctx->active++; 264 ctx->active++;
@@ -244,7 +279,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
244 regs->flags &= ~X86_EFLAGS_IF; 279 regs->flags &= ~X86_EFLAGS_IF;
245 280
246 /* Now we set present bit in PTE and single step. */ 281 /* Now we set present bit in PTE and single step. */
247 disarm_kmmio_fault_page(ctx->fpage->page, NULL); 282 disarm_kmmio_fault_page(ctx->fpage);
248 283
249 /* 284 /*
250 * If another cpu accesses the same page while we are stepping, 285 * If another cpu accesses the same page while we are stepping,
@@ -275,7 +310,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
275 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); 310 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
276 311
277 if (!ctx->active) { 312 if (!ctx->active) {
278 pr_debug("kmmio: spurious debug trap on CPU %d.\n", 313 pr_warning("kmmio: spurious debug trap on CPU %d.\n",
279 smp_processor_id()); 314 smp_processor_id());
280 goto out; 315 goto out;
281 } 316 }
@@ -283,7 +318,11 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
283 if (ctx->probe && ctx->probe->post_handler) 318 if (ctx->probe && ctx->probe->post_handler)
284 ctx->probe->post_handler(ctx->probe, condition, regs); 319 ctx->probe->post_handler(ctx->probe, condition, regs);
285 320
286 arm_kmmio_fault_page(ctx->fpage->page, NULL); 321 /* Prevent racing against release_kmmio_fault_page(). */
322 spin_lock(&kmmio_lock);
323 if (ctx->fpage->count)
324 arm_kmmio_fault_page(ctx->fpage);
325 spin_unlock(&kmmio_lock);
287 326
288 regs->flags &= ~X86_EFLAGS_TF; 327 regs->flags &= ~X86_EFLAGS_TF;
289 regs->flags |= ctx->saved_flags; 328 regs->flags |= ctx->saved_flags;
@@ -315,20 +354,24 @@ static int add_kmmio_fault_page(unsigned long page)
315 f = get_kmmio_fault_page(page); 354 f = get_kmmio_fault_page(page);
316 if (f) { 355 if (f) {
317 if (!f->count) 356 if (!f->count)
318 arm_kmmio_fault_page(f->page, NULL); 357 arm_kmmio_fault_page(f);
319 f->count++; 358 f->count++;
320 return 0; 359 return 0;
321 } 360 }
322 361
323 f = kmalloc(sizeof(*f), GFP_ATOMIC); 362 f = kzalloc(sizeof(*f), GFP_ATOMIC);
324 if (!f) 363 if (!f)
325 return -1; 364 return -1;
326 365
327 f->count = 1; 366 f->count = 1;
328 f->page = page; 367 f->page = page;
329 list_add_rcu(&f->list, kmmio_page_list(f->page));
330 368
331 arm_kmmio_fault_page(f->page, NULL); 369 if (arm_kmmio_fault_page(f)) {
370 kfree(f);
371 return -1;
372 }
373
374 list_add_rcu(&f->list, kmmio_page_list(f->page));
332 375
333 return 0; 376 return 0;
334} 377}
@@ -347,7 +390,7 @@ static void release_kmmio_fault_page(unsigned long page,
347 f->count--; 390 f->count--;
348 BUG_ON(f->count < 0); 391 BUG_ON(f->count < 0);
349 if (!f->count) { 392 if (!f->count) {
350 disarm_kmmio_fault_page(f->page, NULL); 393 disarm_kmmio_fault_page(f);
351 f->release_next = *release_list; 394 f->release_next = *release_list;
352 *release_list = f; 395 *release_list = f;
353 } 396 }
@@ -408,23 +451,24 @@ static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
408 451
409static void remove_kmmio_fault_pages(struct rcu_head *head) 452static void remove_kmmio_fault_pages(struct rcu_head *head)
410{ 453{
411 struct kmmio_delayed_release *dr = container_of( 454 struct kmmio_delayed_release *dr =
412 head, 455 container_of(head, struct kmmio_delayed_release, rcu);
413 struct kmmio_delayed_release,
414 rcu);
415 struct kmmio_fault_page *p = dr->release_list; 456 struct kmmio_fault_page *p = dr->release_list;
416 struct kmmio_fault_page **prevp = &dr->release_list; 457 struct kmmio_fault_page **prevp = &dr->release_list;
417 unsigned long flags; 458 unsigned long flags;
459
418 spin_lock_irqsave(&kmmio_lock, flags); 460 spin_lock_irqsave(&kmmio_lock, flags);
419 while (p) { 461 while (p) {
420 if (!p->count) 462 if (!p->count) {
421 list_del_rcu(&p->list); 463 list_del_rcu(&p->list);
422 else 464 prevp = &p->release_next;
465 } else {
423 *prevp = p->release_next; 466 *prevp = p->release_next;
424 prevp = &p->release_next; 467 }
425 p = p->release_next; 468 p = p->release_next;
426 } 469 }
427 spin_unlock_irqrestore(&kmmio_lock, flags); 470 spin_unlock_irqrestore(&kmmio_lock, flags);
471
428 /* This is the real RCU destroy call. */ 472 /* This is the real RCU destroy call. */
429 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages); 473 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
430} 474}
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index 0bcd7883d036..605c8be06217 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -100,6 +100,9 @@ static int __init parse_memtest(char *arg)
100{ 100{
101 if (arg) 101 if (arg)
102 memtest_pattern = simple_strtoul(arg, NULL, 0); 102 memtest_pattern = simple_strtoul(arg, NULL, 0);
103 else
104 memtest_pattern = ARRAY_SIZE(patterns);
105
103 return 0; 106 return 0;
104} 107}
105 108
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 451fe95a0352..3daefa04ace5 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -416,10 +416,11 @@ void __init initmem_init(unsigned long start_pfn,
416 for_each_online_node(nid) 416 for_each_online_node(nid)
417 propagate_e820_map_node(nid); 417 propagate_e820_map_node(nid);
418 418
419 for_each_online_node(nid) 419 for_each_online_node(nid) {
420 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 420 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
421 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
422 }
421 423
422 NODE_DATA(0)->bdata = &bootmem_node_data[0];
423 setup_bootmem_allocator(); 424 setup_bootmem_allocator();
424} 425}
425 426
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
index ab50a8d7402c..427fd1b56df5 100644
--- a/arch/x86/mm/testmmiotrace.c
+++ b/arch/x86/mm/testmmiotrace.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Written by Pekka Paalanen, 2008 <pq@iki.fi> 2 * Written by Pekka Paalanen, 2008-2009 <pq@iki.fi>
3 */ 3 */
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/io.h> 5#include <linux/io.h>
@@ -9,35 +9,74 @@
9 9
10static unsigned long mmio_address; 10static unsigned long mmio_address;
11module_param(mmio_address, ulong, 0); 11module_param(mmio_address, ulong, 0);
12MODULE_PARM_DESC(mmio_address, "Start address of the mapping of 16 kB."); 12MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB "
13 "(or 8 MB if read_far is non-zero).");
14
15static unsigned long read_far = 0x400100;
16module_param(read_far, ulong, 0);
17MODULE_PARM_DESC(read_far, " Offset of a 32-bit read within 8 MB "
18 "(default: 0x400100).");
19
20static unsigned v16(unsigned i)
21{
22 return i * 12 + 7;
23}
24
25static unsigned v32(unsigned i)
26{
27 return i * 212371 + 13;
28}
13 29
14static void do_write_test(void __iomem *p) 30static void do_write_test(void __iomem *p)
15{ 31{
16 unsigned int i; 32 unsigned int i;
33 pr_info(MODULE_NAME ": write test.\n");
17 mmiotrace_printk("Write test.\n"); 34 mmiotrace_printk("Write test.\n");
35
18 for (i = 0; i < 256; i++) 36 for (i = 0; i < 256; i++)
19 iowrite8(i, p + i); 37 iowrite8(i, p + i);
38
20 for (i = 1024; i < (5 * 1024); i += 2) 39 for (i = 1024; i < (5 * 1024); i += 2)
21 iowrite16(i * 12 + 7, p + i); 40 iowrite16(v16(i), p + i);
41
22 for (i = (5 * 1024); i < (16 * 1024); i += 4) 42 for (i = (5 * 1024); i < (16 * 1024); i += 4)
23 iowrite32(i * 212371 + 13, p + i); 43 iowrite32(v32(i), p + i);
24} 44}
25 45
26static void do_read_test(void __iomem *p) 46static void do_read_test(void __iomem *p)
27{ 47{
28 unsigned int i; 48 unsigned int i;
49 unsigned errs[3] = { 0 };
50 pr_info(MODULE_NAME ": read test.\n");
29 mmiotrace_printk("Read test.\n"); 51 mmiotrace_printk("Read test.\n");
52
30 for (i = 0; i < 256; i++) 53 for (i = 0; i < 256; i++)
31 ioread8(p + i); 54 if (ioread8(p + i) != i)
55 ++errs[0];
56
32 for (i = 1024; i < (5 * 1024); i += 2) 57 for (i = 1024; i < (5 * 1024); i += 2)
33 ioread16(p + i); 58 if (ioread16(p + i) != v16(i))
59 ++errs[1];
60
34 for (i = (5 * 1024); i < (16 * 1024); i += 4) 61 for (i = (5 * 1024); i < (16 * 1024); i += 4)
35 ioread32(p + i); 62 if (ioread32(p + i) != v32(i))
63 ++errs[2];
64
65 mmiotrace_printk("Read errors: 8-bit %d, 16-bit %d, 32-bit %d.\n",
66 errs[0], errs[1], errs[2]);
36} 67}
37 68
38static void do_test(void) 69static void do_read_far_test(void __iomem *p)
39{ 70{
40 void __iomem *p = ioremap_nocache(mmio_address, 0x4000); 71 pr_info(MODULE_NAME ": read far test.\n");
72 mmiotrace_printk("Read far test.\n");
73
74 ioread32(p + read_far);
75}
76
77static void do_test(unsigned long size)
78{
79 void __iomem *p = ioremap_nocache(mmio_address, size);
41 if (!p) { 80 if (!p) {
42 pr_err(MODULE_NAME ": could not ioremap, aborting.\n"); 81 pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
43 return; 82 return;
@@ -45,11 +84,15 @@ static void do_test(void)
45 mmiotrace_printk("ioremap returned %p.\n", p); 84 mmiotrace_printk("ioremap returned %p.\n", p);
46 do_write_test(p); 85 do_write_test(p);
47 do_read_test(p); 86 do_read_test(p);
87 if (read_far && read_far < size - 4)
88 do_read_far_test(p);
48 iounmap(p); 89 iounmap(p);
49} 90}
50 91
51static int __init init(void) 92static int __init init(void)
52{ 93{
94 unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
95
53 if (mmio_address == 0) { 96 if (mmio_address == 0) {
54 pr_err(MODULE_NAME ": you have to use the module argument " 97 pr_err(MODULE_NAME ": you have to use the module argument "
55 "mmio_address.\n"); 98 "mmio_address.\n");
@@ -58,10 +101,11 @@ static int __init init(void)
58 return -ENXIO; 101 return -ENXIO;
59 } 102 }
60 103
61 pr_warning(MODULE_NAME ": WARNING: mapping 16 kB @ 0x%08lx " 104 pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI "
62 "in PCI address space, and writing " 105 "address space, and writing 16 kB of rubbish in there.\n",
63 "rubbish in there.\n", mmio_address); 106 size >> 10, mmio_address);
64 do_test(); 107 do_test(size);
108 pr_info(MODULE_NAME ": All done.\n");
65 return 0; 109 return 0;
66} 110}
67 111
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c52f4034c7fd..82cd39a6cbd3 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -103,7 +103,7 @@ static void xen_vcpu_setup(int cpu)
103 103
104 vcpup = &per_cpu(xen_vcpu_info, cpu); 104 vcpup = &per_cpu(xen_vcpu_info, cpu);
105 105
106 info.mfn = virt_to_mfn(vcpup); 106 info.mfn = arbitrary_virt_to_mfn(vcpup);
107 info.offset = offset_in_page(vcpup); 107 info.offset = offset_in_page(vcpup);
108 108
109 printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n", 109 printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n",
@@ -301,8 +301,10 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
301 frames = mcs.args; 301 frames = mcs.args;
302 302
303 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { 303 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
304 frames[f] = virt_to_mfn(va); 304 frames[f] = arbitrary_virt_to_mfn((void *)va);
305
305 make_lowmem_page_readonly((void *)va); 306 make_lowmem_page_readonly((void *)va);
307 make_lowmem_page_readonly(mfn_to_virt(frames[f]));
306 } 308 }
307 309
308 MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct)); 310 MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct));
@@ -314,7 +316,7 @@ static void load_TLS_descriptor(struct thread_struct *t,
314 unsigned int cpu, unsigned int i) 316 unsigned int cpu, unsigned int i)
315{ 317{
316 struct desc_struct *gdt = get_cpu_gdt_table(cpu); 318 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
317 xmaddr_t maddr = virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); 319 xmaddr_t maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
318 struct multicall_space mc = __xen_mc_entry(0); 320 struct multicall_space mc = __xen_mc_entry(0);
319 321
320 MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]); 322 MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
@@ -488,7 +490,7 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
488 break; 490 break;
489 491
490 default: { 492 default: {
491 xmaddr_t maddr = virt_to_machine(&dt[entry]); 493 xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
492 494
493 xen_mc_flush(); 495 xen_mc_flush();
494 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) 496 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 319bd40a57c2..cb6afa4ec95c 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -276,6 +276,13 @@ void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
276 p2m_top[topidx][idx] = mfn; 276 p2m_top[topidx][idx] = mfn;
277} 277}
278 278
279unsigned long arbitrary_virt_to_mfn(void *vaddr)
280{
281 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
282
283 return PFN_DOWN(maddr.maddr);
284}
285
279xmaddr_t arbitrary_virt_to_machine(void *vaddr) 286xmaddr_t arbitrary_virt_to_machine(void *vaddr)
280{ 287{
281 unsigned long address = (unsigned long)vaddr; 288 unsigned long address = (unsigned long)vaddr;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 035582ae815d..8d470562ffc9 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -219,6 +219,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
219{ 219{
220 struct vcpu_guest_context *ctxt; 220 struct vcpu_guest_context *ctxt;
221 struct desc_struct *gdt; 221 struct desc_struct *gdt;
222 unsigned long gdt_mfn;
222 223
223 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) 224 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
224 return 0; 225 return 0;
@@ -248,9 +249,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
248 ctxt->ldt_ents = 0; 249 ctxt->ldt_ents = 0;
249 250
250 BUG_ON((unsigned long)gdt & ~PAGE_MASK); 251 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
252
253 gdt_mfn = arbitrary_virt_to_mfn(gdt);
251 make_lowmem_page_readonly(gdt); 254 make_lowmem_page_readonly(gdt);
255 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
252 256
253 ctxt->gdt_frames[0] = virt_to_mfn(gdt); 257 ctxt->gdt_frames[0] = gdt_mfn;
254 ctxt->gdt_ents = GDT_ENTRIES; 258 ctxt->gdt_ents = GDT_ENTRIES;
255 259
256 ctxt->user_regs.cs = __KERNEL_CS; 260 ctxt->user_regs.cs = __KERNEL_CS;
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 6c873dceb177..981200830432 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -103,9 +103,6 @@ config MATH_EMULATION
103 help 103 help
104 Can we use information of configuration file? 104 Can we use information of configuration file?
105 105
106config HIGHMEM
107 bool "High memory support"
108
109endmenu 106endmenu
110 107
111menu "Platform options" 108menu "Platform options"
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 9606d2bd1dd9..4ec1633c2941 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -44,6 +44,8 @@
44#include <asm/setup.h> 44#include <asm/setup.h>
45#include <asm/param.h> 45#include <asm/param.h>
46 46
47#include <platform/hardware.h>
48
47#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) 49#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
48struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16}; 50struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
49#endif 51#endif
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index c7a021d9f696..c44f830b6c7a 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -30,6 +30,7 @@
30#include <linux/stringify.h> 30#include <linux/stringify.h>
31#include <linux/kallsyms.h> 31#include <linux/kallsyms.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/hardirq.h>
33 34
34#include <asm/ptrace.h> 35#include <asm/ptrace.h>
35#include <asm/timex.h> 36#include <asm/timex.h>
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 33f366be323f..bdd860d93f72 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/hardirq.h>
17#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
18#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
19#include <asm/hardirq.h> 20#include <asm/hardirq.h>
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index efed8897bef3..25d46c84eb08 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -140,16 +140,14 @@ static void rs_poll(unsigned long priv)
140} 140}
141 141
142 142
143static void rs_put_char(struct tty_struct *tty, unsigned char ch) 143static int rs_put_char(struct tty_struct *tty, unsigned char ch)
144{ 144{
145 char buf[2]; 145 char buf[2];
146 146
147 if (!tty)
148 return;
149
150 buf[0] = ch; 147 buf[0] = ch;
151 buf[1] = '\0'; /* Is this NULL necessary? */ 148 buf[1] = '\0'; /* Is this NULL necessary? */
152 __simc (SYS_write, 1, (unsigned long) buf, 1, 0, 0); 149 __simc (SYS_write, 1, (unsigned long) buf, 1, 0, 0);
150 return 1;
153} 151}
154 152
155static void rs_flush_chars(struct tty_struct *tty) 153static void rs_flush_chars(struct tty_struct *tty)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index a104593e70c3..5a244f05360f 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -39,14 +39,13 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
39} 39}
40 40
41static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 41static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
42 struct bio *bio, 42 struct bio *bio)
43 unsigned int *seg_size_ptr)
44{ 43{
45 unsigned int phys_size; 44 unsigned int phys_size;
46 struct bio_vec *bv, *bvprv = NULL; 45 struct bio_vec *bv, *bvprv = NULL;
47 int cluster, i, high, highprv = 1; 46 int cluster, i, high, highprv = 1;
48 unsigned int seg_size, nr_phys_segs; 47 unsigned int seg_size, nr_phys_segs;
49 struct bio *fbio; 48 struct bio *fbio, *bbio;
50 49
51 if (!bio) 50 if (!bio)
52 return 0; 51 return 0;
@@ -87,26 +86,20 @@ new_segment:
87 seg_size = bv->bv_len; 86 seg_size = bv->bv_len;
88 highprv = high; 87 highprv = high;
89 } 88 }
89 bbio = bio;
90 } 90 }
91 91
92 if (seg_size_ptr) 92 if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
93 *seg_size_ptr = seg_size; 93 fbio->bi_seg_front_size = seg_size;
94 if (seg_size > bbio->bi_seg_back_size)
95 bbio->bi_seg_back_size = seg_size;
94 96
95 return nr_phys_segs; 97 return nr_phys_segs;
96} 98}
97 99
98void blk_recalc_rq_segments(struct request *rq) 100void blk_recalc_rq_segments(struct request *rq)
99{ 101{
100 unsigned int seg_size = 0, phys_segs; 102 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
101
102 phys_segs = __blk_recalc_rq_segments(rq->q, rq->bio, &seg_size);
103
104 if (phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
105 rq->bio->bi_seg_front_size = seg_size;
106 if (seg_size > rq->biotail->bi_seg_back_size)
107 rq->biotail->bi_seg_back_size = seg_size;
108
109 rq->nr_phys_segments = phys_segs;
110} 103}
111 104
112void blk_recount_segments(struct request_queue *q, struct bio *bio) 105void blk_recount_segments(struct request_queue *q, struct bio *bio)
@@ -114,7 +107,7 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
114 struct bio *nxt = bio->bi_next; 107 struct bio *nxt = bio->bi_next;
115 108
116 bio->bi_next = NULL; 109 bio->bi_next = NULL;
117 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, NULL); 110 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
118 bio->bi_next = nxt; 111 bio->bi_next = nxt;
119 bio->bi_flags |= (1 << BIO_SEG_VALID); 112 bio->bi_flags |= (1 << BIO_SEG_VALID);
120} 113}
diff --git a/block/blktrace.c b/block/blktrace.c
index 7cf9d1ff45a0..028120a0965a 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -363,7 +363,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
363 if (!bt->sequence) 363 if (!bt->sequence)
364 goto err; 364 goto err;
365 365
366 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG); 366 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
367 if (!bt->msg_data) 367 if (!bt->msg_data)
368 goto err; 368 goto err;
369 369
diff --git a/crypto/api.c b/crypto/api.c
index efe77df6863f..38a2bc02a98c 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -215,8 +215,19 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
215 mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); 215 mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
216 type &= mask; 216 type &= mask;
217 217
218 alg = try_then_request_module(crypto_alg_lookup(name, type, mask), 218 alg = crypto_alg_lookup(name, type, mask);
219 name); 219 if (!alg) {
220 char tmp[CRYPTO_MAX_ALG_NAME];
221
222 request_module(name);
223
224 if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask) &&
225 snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp))
226 request_module(tmp);
227
228 alg = crypto_alg_lookup(name, type, mask);
229 }
230
220 if (alg) 231 if (alg)
221 return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg; 232 return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
222 233
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 9cc769b587ff..68fd3d292799 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -516,12 +516,12 @@ int acpi_processor_preregister_performance(
516 continue; 516 continue;
517 } 517 }
518 518
519 if (!performance || !percpu_ptr(performance, i)) { 519 if (!performance || !per_cpu_ptr(performance, i)) {
520 retval = -EINVAL; 520 retval = -EINVAL;
521 continue; 521 continue;
522 } 522 }
523 523
524 pr->performance = percpu_ptr(performance, i); 524 pr->performance = per_cpu_ptr(performance, i);
525 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 525 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
526 if (acpi_processor_get_psd(pr)) { 526 if (acpi_processor_get_psd(pr)) {
527 retval = -EINVAL; 527 retval = -EINVAL;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index a603bbf9b1b7..66e012cd3271 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -582,18 +582,18 @@ static const struct pci_device_id ahci_pci_tbl[] = {
582 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */ 582 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
583 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */ 583 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
584 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */ 584 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
585 { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */ 585 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
586 { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */ 586 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
587 { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */ 587 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
588 { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */ 588 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
589 { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */ 589 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
590 { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */ 590 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
591 { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */ 591 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
592 { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */ 592 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
593 { PCI_VDEVICE(NVIDIA, 0x0bc4), board_ahci }, /* MCP7B */ 593 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
594 { PCI_VDEVICE(NVIDIA, 0x0bc5), board_ahci }, /* MCP7B */ 594 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
595 { PCI_VDEVICE(NVIDIA, 0x0bc6), board_ahci }, /* MCP7B */ 595 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
596 { PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci }, /* MCP7B */ 596 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
597 597
598 /* SiS */ 598 /* SiS */
599 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ 599 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 9fbf0595f3d4..060bcd601f57 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1322,14 +1322,16 @@ static u64 ata_id_n_sectors(const u16 *id)
1322{ 1322{
1323 if (ata_id_has_lba(id)) { 1323 if (ata_id_has_lba(id)) {
1324 if (ata_id_has_lba48(id)) 1324 if (ata_id_has_lba48(id))
1325 return ata_id_u64(id, 100); 1325 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1326 else 1326 else
1327 return ata_id_u32(id, 60); 1327 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1328 } else { 1328 } else {
1329 if (ata_id_current_chs_valid(id)) 1329 if (ata_id_current_chs_valid(id))
1330 return ata_id_u32(id, 57); 1330 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1331 id[ATA_ID_CUR_SECTORS];
1331 else 1332 else
1332 return id[1] * id[3] * id[6]; 1333 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1334 id[ATA_ID_SECTORS];
1333 } 1335 }
1334} 1336}
1335 1337
@@ -4612,7 +4614,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
4612 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4614 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4613 4615
4614 if (qc->n_elem) 4616 if (qc->n_elem)
4615 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); 4617 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4616 4618
4617 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4619 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4618 qc->sg = NULL; 4620 qc->sg = NULL;
@@ -4727,7 +4729,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
4727 return -1; 4729 return -1;
4728 4730
4729 DPRINTK("%d sg elements mapped\n", n_elem); 4731 DPRINTK("%d sg elements mapped\n", n_elem);
4730 4732 qc->orig_n_elem = qc->n_elem;
4731 qc->n_elem = n_elem; 4733 qc->n_elem = n_elem;
4732 qc->flags |= ATA_QCFLAG_DMAMAP; 4734 qc->flags |= ATA_QCFLAG_DMAMAP;
4733 4735
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index ce2ef0475339..ea890911d4fa 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2423,11 +2423,14 @@ int ata_eh_reset(struct ata_link *link, int classify,
2423 } 2423 }
2424 2424
2425 /* prereset() might have cleared ATA_EH_RESET. If so, 2425 /* prereset() might have cleared ATA_EH_RESET. If so,
2426 * bang classes and return. 2426 * bang classes, thaw and return.
2427 */ 2427 */
2428 if (reset && !(ehc->i.action & ATA_EH_RESET)) { 2428 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2429 ata_for_each_dev(dev, link, ALL) 2429 ata_for_each_dev(dev, link, ALL)
2430 classes[dev->devno] = ATA_DEV_NONE; 2430 classes[dev->devno] = ATA_DEV_NONE;
2431 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2432 ata_is_host_link(link))
2433 ata_eh_thaw_port(ap);
2431 rc = 0; 2434 rc = 0;
2432 goto out; 2435 goto out;
2433 } 2436 }
@@ -2901,7 +2904,7 @@ static int atapi_eh_clear_ua(struct ata_device *dev)
2901 int i; 2904 int i;
2902 2905
2903 for (i = 0; i < ATA_EH_UA_TRIES; i++) { 2906 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
2904 u8 sense_buffer[SCSI_SENSE_BUFFERSIZE]; 2907 u8 *sense_buffer = dev->link->ap->sector_buf;
2905 u8 sense_key = 0; 2908 u8 sense_key = 0;
2906 unsigned int err_mask; 2909 unsigned int err_mask;
2907 2910
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 55a8eed3f3a3..f65b53785a8f 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -2523,7 +2523,7 @@ static void __exit nv_exit(void)
2523module_init(nv_init); 2523module_init(nv_init);
2524module_exit(nv_exit); 2524module_exit(nv_exit);
2525module_param_named(adma, adma_enabled, bool, 0444); 2525module_param_named(adma, adma_enabled, bool, 0444);
2526MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)"); 2526MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2527module_param_named(swncq, swncq_enabled, bool, 0444); 2527module_param_named(swncq, swncq_enabled, bool, 0444);
2528MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)"); 2528MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2529 2529
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 43fa90b837ee..f8f578a71b25 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -303,7 +303,7 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
303 sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); 303 sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index);
304 sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; 304 sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
305 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { 305 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
306 unsigned int nid; 306 int nid;
307 307
308 nid = get_nid_for_pfn(pfn); 308 nid = get_nid_for_pfn(pfn);
309 if (nid < 0) 309 if (nid < 0)
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index cc250577d405..eeea477d9601 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -173,7 +173,7 @@ skbfree(struct sk_buff *skb)
173 return; 173 return;
174 while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0) 174 while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0)
175 msleep(Sms); 175 msleep(Sms);
176 if (i <= 0) { 176 if (i < 0) {
177 printk(KERN_ERR 177 printk(KERN_ERR
178 "aoe: %s holds ref: %s\n", 178 "aoe: %s holds ref: %s\n",
179 skb->dev ? skb->dev->name : "netif", 179 skb->dev ? skb->dev->name : "netif",
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index b5a061114630..4f9b6d792017 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -3606,11 +3606,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3606 if (cciss_hard_reset_controller(pdev) || cciss_reset_msi(pdev)) 3606 if (cciss_hard_reset_controller(pdev) || cciss_reset_msi(pdev))
3607 return -ENODEV; 3607 return -ENODEV;
3608 3608
3609 /* Some devices (notably the HP Smart Array 5i Controller) 3609 /* Now try to get the controller to respond to a no-op. Some
3610 need a little pause here */ 3610 devices (notably the HP Smart Array 5i Controller) need
3611 schedule_timeout_uninterruptible(30*HZ); 3611 up to 30 seconds to respond. */
3612
3613 /* Now try to get the controller to respond to a no-op */
3614 for (i=0; i<30; i++) { 3612 for (i=0; i<30; i++) {
3615 if (cciss_noop(pdev) == 0) 3613 if (cciss_noop(pdev) == 0)
3616 break; 3614 break;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index edbaac6c0573..bf0345577672 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -392,8 +392,7 @@ lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
392 struct loop_device *lo = p->lo; 392 struct loop_device *lo = p->lo;
393 struct page *page = buf->page; 393 struct page *page = buf->page;
394 sector_t IV; 394 sector_t IV;
395 size_t size; 395 int size, ret;
396 int ret;
397 396
398 ret = buf->ops->confirm(pipe, buf); 397 ret = buf->ops->confirm(pipe, buf);
399 if (unlikely(ret)) 398 if (unlikely(ret))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b6c8ce254359..8f905089b72b 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -977,6 +977,8 @@ static void backend_changed(struct xenbus_device *dev,
977 break; 977 break;
978 978
979 case XenbusStateClosing: 979 case XenbusStateClosing:
980 if (info->gd == NULL)
981 xenbus_dev_fatal(dev, -ENODEV, "gd is NULL");
980 bd = bdget_disk(info->gd, 0); 982 bd = bdget_disk(info->gd, 0);
981 if (bd == NULL) 983 if (bd == NULL)
982 xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); 984 xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 52f4361eb6e4..d765afda9c2a 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -271,15 +271,15 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
271 nb_order = (nb_order >> 1) & 7; 271 nb_order = (nb_order >> 1) & 7;
272 pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base); 272 pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
273 nb_aper = nb_base << 25; 273 nb_aper = nb_base << 25;
274 if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) {
275 return 0;
276 }
277 274
278 /* Northbridge seems to contain crap. Try the AGP bridge. */ 275 /* Northbridge seems to contain crap. Try the AGP bridge. */
279 276
280 pci_read_config_word(agp, cap+0x14, &apsize); 277 pci_read_config_word(agp, cap+0x14, &apsize);
281 if (apsize == 0xffff) 278 if (apsize == 0xffff) {
279 if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
280 return 0;
282 return -1; 281 return -1;
282 }
283 283
284 apsize &= 0xfff; 284 apsize &= 0xfff;
285 /* Some BIOS use weird encodings not in the AGPv3 table. */ 285 /* Some BIOS use weird encodings not in the AGPv3 table. */
@@ -301,6 +301,11 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
301 order = nb_order; 301 order = nb_order;
302 } 302 }
303 303
304 if (nb_order >= order) {
305 if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
306 return 0;
307 }
308
304 dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n", 309 dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n",
305 aper, 32 << order); 310 aper, 32 << order);
306 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order)) 311 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index c7714185f831..4373adb2119a 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -633,13 +633,15 @@ static void intel_i830_init_gtt_entries(void)
633 break; 633 break;
634 } 634 }
635 } 635 }
636 if (gtt_entries > 0) 636 if (gtt_entries > 0) {
637 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", 637 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
638 gtt_entries / KB(1), local ? "local" : "stolen"); 638 gtt_entries / KB(1), local ? "local" : "stolen");
639 else 639 gtt_entries /= KB(4);
640 } else {
640 dev_info(&agp_bridge->dev->dev, 641 dev_info(&agp_bridge->dev->dev,
641 "no pre-allocated video memory detected\n"); 642 "no pre-allocated video memory detected\n");
642 gtt_entries /= KB(4); 643 gtt_entries = 0;
644 }
643 645
644 intel_private.gtt_entries = gtt_entries; 646 intel_private.gtt_entries = gtt_entries;
645} 647}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b55cb67435bd..d6daf3c507d3 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -754,11 +754,6 @@ static struct kobj_type ktype_cpufreq = {
754 .release = cpufreq_sysfs_release, 754 .release = cpufreq_sysfs_release,
755}; 755};
756 756
757static struct kobj_type ktype_empty_cpufreq = {
758 .sysfs_ops = &sysfs_ops,
759 .release = cpufreq_sysfs_release,
760};
761
762 757
763/** 758/**
764 * cpufreq_add_dev - add a CPU device 759 * cpufreq_add_dev - add a CPU device
@@ -892,36 +887,26 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
892 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); 887 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
893 888
894 /* prepare interface data */ 889 /* prepare interface data */
895 if (!cpufreq_driver->hide_interface) { 890 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
896 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, 891 "cpufreq");
897 &sys_dev->kobj, "cpufreq"); 892 if (ret)
893 goto err_out_driver_exit;
894
895 /* set up files for this cpu device */
896 drv_attr = cpufreq_driver->attr;
897 while ((drv_attr) && (*drv_attr)) {
898 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
898 if (ret) 899 if (ret)
899 goto err_out_driver_exit; 900 goto err_out_driver_exit;
900 901 drv_attr++;
901 /* set up files for this cpu device */ 902 }
902 drv_attr = cpufreq_driver->attr; 903 if (cpufreq_driver->get) {
903 while ((drv_attr) && (*drv_attr)) { 904 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
904 ret = sysfs_create_file(&policy->kobj, 905 if (ret)
905 &((*drv_attr)->attr)); 906 goto err_out_driver_exit;
906 if (ret) 907 }
907 goto err_out_driver_exit; 908 if (cpufreq_driver->target) {
908 drv_attr++; 909 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
909 }
910 if (cpufreq_driver->get) {
911 ret = sysfs_create_file(&policy->kobj,
912 &cpuinfo_cur_freq.attr);
913 if (ret)
914 goto err_out_driver_exit;
915 }
916 if (cpufreq_driver->target) {
917 ret = sysfs_create_file(&policy->kobj,
918 &scaling_cur_freq.attr);
919 if (ret)
920 goto err_out_driver_exit;
921 }
922 } else {
923 ret = kobject_init_and_add(&policy->kobj, &ktype_empty_cpufreq,
924 &sys_dev->kobj, "cpufreq");
925 if (ret) 910 if (ret)
926 goto err_out_driver_exit; 911 goto err_out_driver_exit;
927 } 912 }
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 2d637e0fbc03..d9e751be8c5f 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -457,10 +457,12 @@ static int init_ixp_crypto(void)
457 if (!ctx_pool) { 457 if (!ctx_pool) {
458 goto err; 458 goto err;
459 } 459 }
460 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0); 460 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
461 "ixp_crypto:out", NULL);
461 if (ret) 462 if (ret)
462 goto err; 463 goto err;
463 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0); 464 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
465 "ixp_crypto:in", NULL);
464 if (ret) { 466 if (ret) {
465 qmgr_release_queue(SEND_QID); 467 qmgr_release_queue(SEND_QID);
466 goto err; 468 goto err;
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 856b3cc25583..3f0fdd18255d 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -489,4 +489,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
489MODULE_LICENSE("GPL"); 489MODULE_LICENSE("GPL");
490MODULE_AUTHOR("Michal Ludvig"); 490MODULE_AUTHOR("Michal Ludvig");
491 491
492MODULE_ALIAS("aes"); 492MODULE_ALIAS("aes-all");
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index a7fbadebf623..a2c8e8514b63 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -304,7 +304,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
304MODULE_LICENSE("GPL"); 304MODULE_LICENSE("GPL");
305MODULE_AUTHOR("Michal Ludvig"); 305MODULE_AUTHOR("Michal Ludvig");
306 306
307MODULE_ALIAS("sha1"); 307MODULE_ALIAS("sha1-all");
308MODULE_ALIAS("sha256"); 308MODULE_ALIAS("sha256-all");
309MODULE_ALIAS("sha1-padlock"); 309MODULE_ALIAS("sha1-padlock");
310MODULE_ALIAS("sha256-padlock"); 310MODULE_ALIAS("sha256-padlock");
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index 33bd75347518..25b743abfb59 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved. 2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free 5 * under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 732fa1ec36ab..e190d8b30700 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -430,13 +430,15 @@ late_initcall(dmatest_init);
430static void __exit dmatest_exit(void) 430static void __exit dmatest_exit(void)
431{ 431{
432 struct dmatest_chan *dtc, *_dtc; 432 struct dmatest_chan *dtc, *_dtc;
433 struct dma_chan *chan;
433 434
434 list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) { 435 list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
435 list_del(&dtc->node); 436 list_del(&dtc->node);
437 chan = dtc->chan;
436 dmatest_cleanup_channel(dtc); 438 dmatest_cleanup_channel(dtc);
437 pr_debug("dmatest: dropped channel %s\n", 439 pr_debug("dmatest: dropped channel %s\n",
438 dma_chan_name(dtc->chan)); 440 dma_chan_name(chan));
439 dma_release_channel(dtc->chan); 441 dma_release_channel(chan);
440 } 442 }
441} 443}
442module_exit(dmatest_exit); 444module_exit(dmatest_exit);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 70126a606239..86d6da47f558 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -158,7 +158,8 @@ static void dma_start(struct fsl_dma_chan *fsl_chan)
158 158
159static void dma_halt(struct fsl_dma_chan *fsl_chan) 159static void dma_halt(struct fsl_dma_chan *fsl_chan)
160{ 160{
161 int i = 0; 161 int i;
162
162 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 163 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
163 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, 164 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
164 32); 165 32);
@@ -166,8 +167,11 @@ static void dma_halt(struct fsl_dma_chan *fsl_chan)
166 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS 167 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
167 | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); 168 | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
168 169
169 while (!dma_is_idle(fsl_chan) && (i++ < 100)) 170 for (i = 0; i < 100; i++) {
171 if (dma_is_idle(fsl_chan))
172 break;
170 udelay(10); 173 udelay(10);
174 }
171 if (i >= 100 && !dma_is_idle(fsl_chan)) 175 if (i >= 100 && !dma_is_idle(fsl_chan))
172 dev_err(fsl_chan->dev, "DMA halt timeout!\n"); 176 dev_err(fsl_chan->dev, "DMA halt timeout!\n");
173} 177}
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
index 4105d6575b64..ed83dd9df192 100644
--- a/drivers/dma/ioat.c
+++ b/drivers/dma/ioat.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Intel I/OAT DMA Linux driver 2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2007 Intel Corporation. 3 * Copyright(c) 2007 - 2009 Intel Corporation.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat_dca.c
index 6cf622da0286..c012a1e15043 100644
--- a/drivers/dma/ioat_dca.c
+++ b/drivers/dma/ioat_dca.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Intel I/OAT DMA Linux driver 2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2007 Intel Corporation. 3 * Copyright(c) 2007 - 2009 Intel Corporation.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -49,6 +49,23 @@
49 49
50#define DCA_TAG_MAP_MASK 0xDF 50#define DCA_TAG_MAP_MASK 0xDF
51 51
52/* expected tag map bytes for I/OAT ver.2 */
53#define DCA2_TAG_MAP_BYTE0 0x80
54#define DCA2_TAG_MAP_BYTE1 0x0
55#define DCA2_TAG_MAP_BYTE2 0x81
56#define DCA2_TAG_MAP_BYTE3 0x82
57#define DCA2_TAG_MAP_BYTE4 0x82
58
59/* verify if tag map matches expected values */
60static inline int dca2_tag_map_valid(u8 *tag_map)
61{
62 return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) &&
63 (tag_map[1] == DCA2_TAG_MAP_BYTE1) &&
64 (tag_map[2] == DCA2_TAG_MAP_BYTE2) &&
65 (tag_map[3] == DCA2_TAG_MAP_BYTE3) &&
66 (tag_map[4] == DCA2_TAG_MAP_BYTE4));
67}
68
52/* 69/*
53 * "Legacy" DCA systems do not implement the DCA register set in the 70 * "Legacy" DCA systems do not implement the DCA register set in the
54 * I/OAT device. Software needs direct support for their tag mappings. 71 * I/OAT device. Software needs direct support for their tag mappings.
@@ -452,6 +469,13 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
452 ioatdca->tag_map[i] = 0; 469 ioatdca->tag_map[i] = 0;
453 } 470 }
454 471
472 if (!dca2_tag_map_valid(ioatdca->tag_map)) {
473 dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, "
474 "disabling DCA\n");
475 free_dca_provider(dca);
476 return NULL;
477 }
478
455 err = register_dca_provider(dca, &pdev->dev); 479 err = register_dca_provider(dca, &pdev->dev);
456 if (err) { 480 if (err) {
457 free_dca_provider(dca); 481 free_dca_provider(dca);
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index b3759c4b6536..5905cd36bcd2 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Intel I/OAT DMA Linux driver 2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2007 Intel Corporation. 3 * Copyright(c) 2004 - 2009 Intel Corporation.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -189,11 +189,13 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
189 ioat_chan->xfercap = xfercap; 189 ioat_chan->xfercap = xfercap;
190 ioat_chan->desccount = 0; 190 ioat_chan->desccount = 0;
191 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); 191 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
192 if (ioat_chan->device->version != IOAT_VER_1_2) { 192 if (ioat_chan->device->version == IOAT_VER_2_0)
193 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE 193 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE |
194 | IOAT_DMA_DCA_ANY_CPU, 194 IOAT_DMA_DCA_ANY_CPU,
195 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); 195 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
196 } 196 else if (ioat_chan->device->version == IOAT_VER_3_0)
197 writel(IOAT_DMA_DCA_ANY_CPU,
198 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
197 spin_lock_init(&ioat_chan->cleanup_lock); 199 spin_lock_init(&ioat_chan->cleanup_lock);
198 spin_lock_init(&ioat_chan->desc_lock); 200 spin_lock_init(&ioat_chan->desc_lock);
199 INIT_LIST_HEAD(&ioat_chan->free_desc); 201 INIT_LIST_HEAD(&ioat_chan->free_desc);
@@ -1169,9 +1171,8 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
1169 * up if the client is done with the descriptor 1171 * up if the client is done with the descriptor
1170 */ 1172 */
1171 if (async_tx_test_ack(&desc->async_tx)) { 1173 if (async_tx_test_ack(&desc->async_tx)) {
1172 list_del(&desc->node); 1174 list_move_tail(&desc->node,
1173 list_add_tail(&desc->node, 1175 &ioat_chan->free_desc);
1174 &ioat_chan->free_desc);
1175 } else 1176 } else
1176 desc->async_tx.cookie = 0; 1177 desc->async_tx.cookie = 0;
1177 } else { 1178 } else {
@@ -1362,6 +1363,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1362 dma_cookie_t cookie; 1363 dma_cookie_t cookie;
1363 int err = 0; 1364 int err = 0;
1364 struct completion cmp; 1365 struct completion cmp;
1366 unsigned long tmo;
1365 1367
1366 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); 1368 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1367 if (!src) 1369 if (!src)
@@ -1413,9 +1415,10 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1413 } 1415 }
1414 device->common.device_issue_pending(dma_chan); 1416 device->common.device_issue_pending(dma_chan);
1415 1417
1416 wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1418 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1417 1419
1418 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) 1420 if (tmo == 0 ||
1421 device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1419 != DMA_SUCCESS) { 1422 != DMA_SUCCESS) {
1420 dev_err(&device->pdev->dev, 1423 dev_err(&device->pdev->dev,
1421 "Self-test copy timed out, disabling\n"); 1424 "Self-test copy timed out, disabling\n");
@@ -1657,6 +1660,13 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1657 " %d channels, device version 0x%02x, driver version %s\n", 1660 " %d channels, device version 0x%02x, driver version %s\n",
1658 device->common.chancnt, device->version, IOAT_DMA_VERSION); 1661 device->common.chancnt, device->version, IOAT_DMA_VERSION);
1659 1662
1663 if (!device->common.chancnt) {
1664 dev_err(&device->pdev->dev,
1665 "Intel(R) I/OAT DMA Engine problem found: "
1666 "zero channels detected\n");
1667 goto err_setup_interrupts;
1668 }
1669
1660 err = ioat_dma_setup_interrupts(device); 1670 err = ioat_dma_setup_interrupts(device);
1661 if (err) 1671 if (err)
1662 goto err_setup_interrupts; 1672 goto err_setup_interrupts;
@@ -1696,6 +1706,9 @@ void ioat_dma_remove(struct ioatdma_device *device)
1696 struct dma_chan *chan, *_chan; 1706 struct dma_chan *chan, *_chan;
1697 struct ioat_dma_chan *ioat_chan; 1707 struct ioat_dma_chan *ioat_chan;
1698 1708
1709 if (device->version != IOAT_VER_3_0)
1710 cancel_delayed_work(&device->work);
1711
1699 ioat_dma_remove_interrupts(device); 1712 ioat_dma_remove_interrupts(device);
1700 1713
1701 dma_async_device_unregister(&device->common); 1714 dma_async_device_unregister(&device->common);
@@ -1707,10 +1720,6 @@ void ioat_dma_remove(struct ioatdma_device *device)
1707 pci_release_regions(device->pdev); 1720 pci_release_regions(device->pdev);
1708 pci_disable_device(device->pdev); 1721 pci_disable_device(device->pdev);
1709 1722
1710 if (device->version != IOAT_VER_3_0) {
1711 cancel_delayed_work(&device->work);
1712 }
1713
1714 list_for_each_entry_safe(chan, _chan, 1723 list_for_each_entry_safe(chan, _chan,
1715 &device->common.channels, device_node) { 1724 &device->common.channels, device_node) {
1716 ioat_chan = to_ioat_chan(chan); 1725 ioat_chan = to_ioat_chan(chan);
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h
index a3306d0e1372..a52ff4bd4601 100644
--- a/drivers/dma/ioatdma.h
+++ b/drivers/dma/ioatdma.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved. 2 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free 5 * under the terms of the GNU General Public License as published by the Free
@@ -29,7 +29,7 @@
29#include <linux/pci_ids.h> 29#include <linux/pci_ids.h>
30#include <net/tcp.h> 30#include <net/tcp.h>
31 31
32#define IOAT_DMA_VERSION "3.30" 32#define IOAT_DMA_VERSION "3.64"
33 33
34enum ioat_interrupt { 34enum ioat_interrupt {
35 none = 0, 35 none = 0,
@@ -135,12 +135,14 @@ static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev)
135 #ifdef CONFIG_NET_DMA 135 #ifdef CONFIG_NET_DMA
136 switch (dev->version) { 136 switch (dev->version) {
137 case IOAT_VER_1_2: 137 case IOAT_VER_1_2:
138 case IOAT_VER_3_0:
139 sysctl_tcp_dma_copybreak = 4096; 138 sysctl_tcp_dma_copybreak = 4096;
140 break; 139 break;
141 case IOAT_VER_2_0: 140 case IOAT_VER_2_0:
142 sysctl_tcp_dma_copybreak = 2048; 141 sysctl_tcp_dma_copybreak = 2048;
143 break; 142 break;
143 case IOAT_VER_3_0:
144 sysctl_tcp_dma_copybreak = 262144;
145 break;
144 } 146 }
145 #endif 147 #endif
146} 148}
diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h
index f1ae2c776f74..afa57eef86c9 100644
--- a/drivers/dma/ioatdma_hw.h
+++ b/drivers/dma/ioatdma_hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved. 2 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free 5 * under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h
index 827cb503cac6..49bc277424f8 100644
--- a/drivers/dma/ioatdma_registers.h
+++ b/drivers/dma/ioatdma_registers.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved. 2 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free 5 * under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index ea5440dd10dc..16adbe61cfb2 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -928,19 +928,19 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
928 928
929 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { 929 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
930 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 930 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
931 if (!xor_srcs[src_idx]) 931 if (!xor_srcs[src_idx]) {
932 while (src_idx--) { 932 while (src_idx--)
933 __free_page(xor_srcs[src_idx]); 933 __free_page(xor_srcs[src_idx]);
934 return -ENOMEM; 934 return -ENOMEM;
935 } 935 }
936 } 936 }
937 937
938 dest = alloc_page(GFP_KERNEL); 938 dest = alloc_page(GFP_KERNEL);
939 if (!dest) 939 if (!dest) {
940 while (src_idx--) { 940 while (src_idx--)
941 __free_page(xor_srcs[src_idx]); 941 __free_page(xor_srcs[src_idx]);
942 return -ENOMEM; 942 return -ENOMEM;
943 } 943 }
944 944
945 /* Fill in src buffers */ 945 /* Fill in src buffers */
946 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { 946 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
@@ -1401,7 +1401,7 @@ MODULE_ALIAS("platform:iop-adma");
1401 1401
1402static struct platform_driver iop_adma_driver = { 1402static struct platform_driver iop_adma_driver = {
1403 .probe = iop_adma_probe, 1403 .probe = iop_adma_probe,
1404 .remove = iop_adma_remove, 1404 .remove = __devexit_p(iop_adma_remove),
1405 .driver = { 1405 .driver = {
1406 .owner = THIS_MODULE, 1406 .owner = THIS_MODULE,
1407 .name = "iop-adma", 1407 .name = "iop-adma",
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 1f154d08e98f..ae50a9d1a4e6 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -729,7 +729,7 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
729 729
730 ichan->status = IPU_CHANNEL_READY; 730 ichan->status = IPU_CHANNEL_READY;
731 731
732 spin_unlock_irqrestore(ipu->lock, flags); 732 spin_unlock_irqrestore(&ipu->lock, flags);
733 733
734 return 0; 734 return 0;
735} 735}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index d35cbd1ff0b3..cb7f26fb9f18 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1019,19 +1019,19 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
1019 1019
1020 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 1020 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1021 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 1021 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1022 if (!xor_srcs[src_idx]) 1022 if (!xor_srcs[src_idx]) {
1023 while (src_idx--) { 1023 while (src_idx--)
1024 __free_page(xor_srcs[src_idx]); 1024 __free_page(xor_srcs[src_idx]);
1025 return -ENOMEM; 1025 return -ENOMEM;
1026 } 1026 }
1027 } 1027 }
1028 1028
1029 dest = alloc_page(GFP_KERNEL); 1029 dest = alloc_page(GFP_KERNEL);
1030 if (!dest) 1030 if (!dest) {
1031 while (src_idx--) { 1031 while (src_idx--)
1032 __free_page(xor_srcs[src_idx]); 1032 __free_page(xor_srcs[src_idx]);
1033 return -ENOMEM; 1033 return -ENOMEM;
1034 } 1034 }
1035 1035
1036 /* Fill in src buffers */ 1036 /* Fill in src buffers */
1037 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 1037 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
@@ -1287,7 +1287,7 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
1287 1287
1288static struct platform_driver mv_xor_driver = { 1288static struct platform_driver mv_xor_driver = {
1289 .probe = mv_xor_probe, 1289 .probe = mv_xor_probe,
1290 .remove = mv_xor_remove, 1290 .remove = __devexit_p(mv_xor_remove),
1291 .driver = { 1291 .driver = {
1292 .owner = THIS_MODULE, 1292 .owner = THIS_MODULE,
1293 .name = MV_XOR_NAME, 1293 .name = MV_XOR_NAME,
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 096e2a37446d..7c8b15b22bf2 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -168,7 +168,7 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
168 file_priv->minor->master != file_priv->master) { 168 file_priv->minor->master != file_priv->master) {
169 mutex_lock(&dev->struct_mutex); 169 mutex_lock(&dev->struct_mutex);
170 file_priv->minor->master = drm_master_get(file_priv->master); 170 file_priv->minor->master = drm_master_get(file_priv->master);
171 mutex_lock(&dev->struct_mutex); 171 mutex_unlock(&dev->struct_mutex);
172 } 172 }
173 173
174 return 0; 174 return 0;
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index cfc1ee90f5a3..b251d8674b41 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -72,6 +72,7 @@ I2C_CLIENT_INSMOD_7(lm85b, lm85c, adm1027, adt7463, adt7468, emc6d100,
72#define LM85_COMPANY_SMSC 0x5c 72#define LM85_COMPANY_SMSC 0x5c
73#define LM85_VERSTEP_VMASK 0xf0 73#define LM85_VERSTEP_VMASK 0xf0
74#define LM85_VERSTEP_GENERIC 0x60 74#define LM85_VERSTEP_GENERIC 0x60
75#define LM85_VERSTEP_GENERIC2 0x70
75#define LM85_VERSTEP_LM85C 0x60 76#define LM85_VERSTEP_LM85C 0x60
76#define LM85_VERSTEP_LM85B 0x62 77#define LM85_VERSTEP_LM85B 0x62
77#define LM85_VERSTEP_ADM1027 0x60 78#define LM85_VERSTEP_ADM1027 0x60
@@ -334,6 +335,7 @@ static struct lm85_data *lm85_update_device(struct device *dev);
334static const struct i2c_device_id lm85_id[] = { 335static const struct i2c_device_id lm85_id[] = {
335 { "adm1027", adm1027 }, 336 { "adm1027", adm1027 },
336 { "adt7463", adt7463 }, 337 { "adt7463", adt7463 },
338 { "adt7468", adt7468 },
337 { "lm85", any_chip }, 339 { "lm85", any_chip },
338 { "lm85b", lm85b }, 340 { "lm85b", lm85b },
339 { "lm85c", lm85c }, 341 { "lm85c", lm85c },
@@ -408,7 +410,8 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
408 struct lm85_data *data = lm85_update_device(dev); 410 struct lm85_data *data = lm85_update_device(dev);
409 int vid; 411 int vid;
410 412
411 if (data->type == adt7463 && (data->vid & 0x80)) { 413 if ((data->type == adt7463 || data->type == adt7468) &&
414 (data->vid & 0x80)) {
412 /* 6-pin VID (VRM 10) */ 415 /* 6-pin VID (VRM 10) */
413 vid = vid_from_reg(data->vid & 0x3f, data->vrm); 416 vid = vid_from_reg(data->vid & 0x3f, data->vrm);
414 } else { 417 } else {
@@ -1153,7 +1156,8 @@ static int lm85_detect(struct i2c_client *client, int kind,
1153 address, company, verstep); 1156 address, company, verstep);
1154 1157
1155 /* All supported chips have the version in common */ 1158 /* All supported chips have the version in common */
1156 if ((verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC) { 1159 if ((verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC &&
1160 (verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC2) {
1157 dev_dbg(&adapter->dev, "Autodetection failed: " 1161 dev_dbg(&adapter->dev, "Autodetection failed: "
1158 "unsupported version\n"); 1162 "unsupported version\n");
1159 return -ENODEV; 1163 return -ENODEV;
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index eeda276f8f16..7f186bbcb99d 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -482,7 +482,7 @@ mv64xxx_i2c_map_regs(struct platform_device *pd,
482 return 0; 482 return 0;
483} 483}
484 484
485static void __devexit 485static void
486mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data) 486mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data)
487{ 487{
488 if (drv_data->reg_base) { 488 if (drv_data->reg_base) {
@@ -577,7 +577,7 @@ mv64xxx_i2c_remove(struct platform_device *dev)
577 577
578static struct platform_driver mv64xxx_i2c_driver = { 578static struct platform_driver mv64xxx_i2c_driver = {
579 .probe = mv64xxx_i2c_probe, 579 .probe = mv64xxx_i2c_probe,
580 .remove = mv64xxx_i2c_remove, 580 .remove = __devexit_p(mv64xxx_i2c_remove),
581 .driver = { 581 .driver = {
582 .owner = THIS_MODULE, 582 .owner = THIS_MODULE,
583 .name = MV64XXX_I2C_CTLR_NAME, 583 .name = MV64XXX_I2C_CTLR_NAME,
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index e072903b12f0..5ea3bfad172a 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -721,6 +721,11 @@ config BLK_DEV_IDE_TX4939
721 depends on SOC_TX4939 721 depends on SOC_TX4939
722 select BLK_DEV_IDEDMA_SFF 722 select BLK_DEV_IDEDMA_SFF
723 723
724config BLK_DEV_IDE_AT91
725 tristate "Atmel AT91 (SAM9, CAP9, AT572D940HF) IDE support"
726 depends on ARM && ARCH_AT91 && !ARCH_AT91RM9200 && !ARCH_AT91X40
727 select IDE_TIMINGS
728
724config IDE_ARM 729config IDE_ARM
725 tristate "ARM IDE support" 730 tristate "ARM IDE support"
726 depends on ARM && (ARCH_RPC || ARCH_SHARK) 731 depends on ARM && (ARCH_RPC || ARCH_SHARK)
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index d0e3d7d5b467..1c326d94aa6d 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -116,3 +116,4 @@ obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o
116 116
117obj-$(CONFIG_BLK_DEV_IDE_TX4938) += tx4938ide.o 117obj-$(CONFIG_BLK_DEV_IDE_TX4938) += tx4938ide.o
118obj-$(CONFIG_BLK_DEV_IDE_TX4939) += tx4939ide.o 118obj-$(CONFIG_BLK_DEV_IDE_TX4939) += tx4939ide.o
119obj-$(CONFIG_BLK_DEV_IDE_AT91) += at91_ide.o
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c
new file mode 100644
index 000000000000..1bb50f46388d
--- /dev/null
+++ b/drivers/ide/at91_ide.c
@@ -0,0 +1,467 @@
1/*
2 * IDE host driver for AT91 (SAM9, CAP9, AT572D940HF) Static Memory Controller
3 * with Compact Flash True IDE logic
4 *
5 * Copyright (c) 2008, 2009 Kelvatek Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 */
22
23#include <linux/version.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/clk.h>
27#include <linux/err.h>
28#include <linux/ide.h>
29#include <linux/platform_device.h>
30
31#include <mach/board.h>
32#include <mach/gpio.h>
33#include <mach/at91sam9263.h>
34#include <mach/at91sam9_smc.h>
35#include <mach/at91sam9263_matrix.h>
36
37#define DRV_NAME "at91_ide"
38
39#define perr(fmt, args...) pr_err(DRV_NAME ": " fmt, ##args)
40#define pdbg(fmt, args...) pr_debug("%s " fmt, __func__, ##args)
41
42/*
43 * Access to IDE device is possible through EBI Static Memory Controller
44 * with Compact Flash logic. For details see EBI and SMC datasheet sections
45 * of any microcontroller from AT91SAM9 family.
46 *
47 * Within SMC chip select address space, lines A[23:21] distinguish Compact
48 * Flash modes (I/O, common memory, attribute memory, True IDE). IDE modes are:
49 * 0x00c0000 - True IDE
50 * 0x00e0000 - Alternate True IDE (Alt Status Register)
51 *
52 * On True IDE mode Task File and Data Register are mapped at the same address.
53 * To distinguish access between these two different bus data width is used:
54 * 8Bit for Task File, 16Bit for Data I/O.
55 *
56 * After initialization we do 8/16 bit flipping (changes in SMC MODE register)
57 * only inside IDE callback routines which are serialized by IDE layer,
58 * so no additional locking needed.
59 */
60
61#define TASK_FILE 0x00c00000
62#define ALT_MODE 0x00e00000
63#define REGS_SIZE 8
64
65#define enter_16bit(cs, mode) do { \
66 mode = at91_sys_read(AT91_SMC_MODE(cs)); \
67 at91_sys_write(AT91_SMC_MODE(cs), mode | AT91_SMC_DBW_16); \
68} while (0)
69
70#define leave_16bit(cs, mode) at91_sys_write(AT91_SMC_MODE(cs), mode);
71
72static void set_smc_timings(const u8 chipselect, const u16 cycle,
73 const u16 setup, const u16 pulse,
74 const u16 data_float, int use_iordy)
75{
76 unsigned long mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE |
77 AT91_SMC_BAT_SELECT;
78
79 /* disable or enable waiting for IORDY signal */
80 if (use_iordy)
81 mode |= AT91_SMC_EXNWMODE_READY;
82
83 /* add data float cycles if needed */
84 if (data_float)
85 mode |= AT91_SMC_TDF_(data_float);
86
87 at91_sys_write(AT91_SMC_MODE(chipselect), mode);
88
89 /* setup timings in SMC */
90 at91_sys_write(AT91_SMC_SETUP(chipselect), AT91_SMC_NWESETUP_(setup) |
91 AT91_SMC_NCS_WRSETUP_(0) |
92 AT91_SMC_NRDSETUP_(setup) |
93 AT91_SMC_NCS_RDSETUP_(0));
94 at91_sys_write(AT91_SMC_PULSE(chipselect), AT91_SMC_NWEPULSE_(pulse) |
95 AT91_SMC_NCS_WRPULSE_(cycle) |
96 AT91_SMC_NRDPULSE_(pulse) |
97 AT91_SMC_NCS_RDPULSE_(cycle));
98 at91_sys_write(AT91_SMC_CYCLE(chipselect), AT91_SMC_NWECYCLE_(cycle) |
99 AT91_SMC_NRDCYCLE_(cycle));
100}
101
102static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz)
103{
104 u64 tmp = ns;
105
106 tmp *= mck_hz;
107 tmp += 1000*1000*1000 - 1; /* round up */
108 do_div(tmp, 1000*1000*1000);
109 return (unsigned int) tmp;
110}
111
112static void apply_timings(const u8 chipselect, const u8 pio,
113 const struct ide_timing *timing, int use_iordy)
114{
115 unsigned int t0, t1, t2, t6z;
116 unsigned int cycle, setup, pulse, data_float;
117 unsigned int mck_hz;
118 struct clk *mck;
119
120 /* see table 22 of Compact Flash standard 4.1 for the meaning,
121 * we do not stretch active (t2) time, so setup (t1) + hold time (th)
122 * assure at least minimal recovery (t2i) time */
123 t0 = timing->cyc8b;
124 t1 = timing->setup;
125 t2 = timing->act8b;
126 t6z = (pio < 5) ? 30 : 20;
127
128 pdbg("t0=%u t1=%u t2=%u t6z=%u\n", t0, t1, t2, t6z);
129
130 mck = clk_get(NULL, "mck");
131 BUG_ON(IS_ERR(mck));
132 mck_hz = clk_get_rate(mck);
133 pdbg("mck_hz=%u\n", mck_hz);
134
135 cycle = calc_mck_cycles(t0, mck_hz);
136 setup = calc_mck_cycles(t1, mck_hz);
137 pulse = calc_mck_cycles(t2, mck_hz);
138 data_float = calc_mck_cycles(t6z, mck_hz);
139
140 pdbg("cycle=%u setup=%u pulse=%u data_float=%u\n",
141 cycle, setup, pulse, data_float);
142
143 set_smc_timings(chipselect, cycle, setup, pulse, data_float, use_iordy);
144}
145
146static void at91_ide_input_data(ide_drive_t *drive, struct request *rq,
147 void *buf, unsigned int len)
148{
149 ide_hwif_t *hwif = drive->hwif;
150 struct ide_io_ports *io_ports = &hwif->io_ports;
151 u8 chipselect = hwif->select_data;
152 unsigned long mode;
153
154 pdbg("cs %u buf %p len %d\n", chipselect, buf, len);
155
156 len++;
157
158 enter_16bit(chipselect, mode);
159 __ide_mm_insw((void __iomem *) io_ports->data_addr, buf, len / 2);
160 leave_16bit(chipselect, mode);
161}
162
163static void at91_ide_output_data(ide_drive_t *drive, struct request *rq,
164 void *buf, unsigned int len)
165{
166 ide_hwif_t *hwif = drive->hwif;
167 struct ide_io_ports *io_ports = &hwif->io_ports;
168 u8 chipselect = hwif->select_data;
169 unsigned long mode;
170
171 pdbg("cs %u buf %p len %d\n", chipselect, buf, len);
172
173 enter_16bit(chipselect, mode);
174 __ide_mm_outsw((void __iomem *) io_ports->data_addr, buf, len / 2);
175 leave_16bit(chipselect, mode);
176}
177
178static u8 ide_mm_inb(unsigned long port)
179{
180 return readb((void __iomem *) port);
181}
182
183static void ide_mm_outb(u8 value, unsigned long port)
184{
185 writeb(value, (void __iomem *) port);
186}
187
188static void at91_ide_tf_load(ide_drive_t *drive, ide_task_t *task)
189{
190 ide_hwif_t *hwif = drive->hwif;
191 struct ide_io_ports *io_ports = &hwif->io_ports;
192 struct ide_taskfile *tf = &task->tf;
193 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
194
195 if (task->tf_flags & IDE_TFLAG_FLAGGED)
196 HIHI = 0xFF;
197
198 if (task->tf_flags & IDE_TFLAG_OUT_DATA) {
199 u16 data = (tf->hob_data << 8) | tf->data;
200
201 at91_ide_output_data(drive, NULL, &data, 2);
202 }
203
204 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
205 ide_mm_outb(tf->hob_feature, io_ports->feature_addr);
206 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
207 ide_mm_outb(tf->hob_nsect, io_ports->nsect_addr);
208 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
209 ide_mm_outb(tf->hob_lbal, io_ports->lbal_addr);
210 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
211 ide_mm_outb(tf->hob_lbam, io_ports->lbam_addr);
212 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
213 ide_mm_outb(tf->hob_lbah, io_ports->lbah_addr);
214
215 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
216 ide_mm_outb(tf->feature, io_ports->feature_addr);
217 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
218 ide_mm_outb(tf->nsect, io_ports->nsect_addr);
219 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
220 ide_mm_outb(tf->lbal, io_ports->lbal_addr);
221 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
222 ide_mm_outb(tf->lbam, io_ports->lbam_addr);
223 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
224 ide_mm_outb(tf->lbah, io_ports->lbah_addr);
225
226 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
227 ide_mm_outb((tf->device & HIHI) | drive->select, io_ports->device_addr);
228}
229
230static void at91_ide_tf_read(ide_drive_t *drive, ide_task_t *task)
231{
232 ide_hwif_t *hwif = drive->hwif;
233 struct ide_io_ports *io_ports = &hwif->io_ports;
234 struct ide_taskfile *tf = &task->tf;
235
236 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
237 u16 data;
238
239 at91_ide_input_data(drive, NULL, &data, 2);
240 tf->data = data & 0xff;
241 tf->hob_data = (data >> 8) & 0xff;
242 }
243
244 /* be sure we're looking at the low order bits */
245 ide_mm_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
246
247 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
248 tf->feature = ide_mm_inb(io_ports->feature_addr);
249 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
250 tf->nsect = ide_mm_inb(io_ports->nsect_addr);
251 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
252 tf->lbal = ide_mm_inb(io_ports->lbal_addr);
253 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
254 tf->lbam = ide_mm_inb(io_ports->lbam_addr);
255 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
256 tf->lbah = ide_mm_inb(io_ports->lbah_addr);
257 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
258 tf->device = ide_mm_inb(io_ports->device_addr);
259
260 if (task->tf_flags & IDE_TFLAG_LBA48) {
261 ide_mm_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
262
263 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
264 tf->hob_feature = ide_mm_inb(io_ports->feature_addr);
265 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
266 tf->hob_nsect = ide_mm_inb(io_ports->nsect_addr);
267 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
268 tf->hob_lbal = ide_mm_inb(io_ports->lbal_addr);
269 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
270 tf->hob_lbam = ide_mm_inb(io_ports->lbam_addr);
271 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
272 tf->hob_lbah = ide_mm_inb(io_ports->lbah_addr);
273 }
274}
275
276static void at91_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
277{
278 struct ide_timing *timing;
279 u8 chipselect = drive->hwif->select_data;
280 int use_iordy = 0;
281
282 pdbg("chipselect %u pio %u\n", chipselect, pio);
283
284 timing = ide_timing_find_mode(XFER_PIO_0 + pio);
285 BUG_ON(!timing);
286
287 if ((pio > 2 || ata_id_has_iordy(drive->id)) &&
288 !(ata_id_is_cfa(drive->id) && pio > 4))
289 use_iordy = 1;
290
291 apply_timings(chipselect, pio, timing, use_iordy);
292}
293
294static const struct ide_tp_ops at91_ide_tp_ops = {
295 .exec_command = ide_exec_command,
296 .read_status = ide_read_status,
297 .read_altstatus = ide_read_altstatus,
298 .set_irq = ide_set_irq,
299
300 .tf_load = at91_ide_tf_load,
301 .tf_read = at91_ide_tf_read,
302
303 .input_data = at91_ide_input_data,
304 .output_data = at91_ide_output_data,
305};
306
307static const struct ide_port_ops at91_ide_port_ops = {
308 .set_pio_mode = at91_ide_set_pio_mode,
309};
310
311static const struct ide_port_info at91_ide_port_info __initdata = {
312 .port_ops = &at91_ide_port_ops,
313 .tp_ops = &at91_ide_tp_ops,
314 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE |
315 IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS,
316 .pio_mask = ATA_PIO5,
317};
318
319/*
320 * If interrupt is delivered through GPIO, IRQ are triggered on falling
321 * and rising edge of signal. Whereas IDE device request interrupt on high
322 * level (rising edge in our case). This mean we have fake interrupts, so
323 * we need to check interrupt pin and exit instantly from ISR when line
324 * is on low level.
325 */
326
327irqreturn_t at91_irq_handler(int irq, void *dev_id)
328{
329 int ntries = 8;
330 int pin_val1, pin_val2;
331
332 /* additional deglitch, line can be noisy in badly designed PCB */
333 do {
334 pin_val1 = at91_get_gpio_value(irq);
335 pin_val2 = at91_get_gpio_value(irq);
336 } while (pin_val1 != pin_val2 && --ntries > 0);
337
338 if (pin_val1 == 0 || ntries <= 0)
339 return IRQ_HANDLED;
340
341 return ide_intr(irq, dev_id);
342}
343
344static int __init at91_ide_probe(struct platform_device *pdev)
345{
346 int ret;
347 hw_regs_t hw;
348 hw_regs_t *hws[] = { &hw, NULL, NULL, NULL };
349 struct ide_host *host;
350 struct resource *res;
351 unsigned long tf_base = 0, ctl_base = 0;
352 struct at91_cf_data *board = pdev->dev.platform_data;
353
354 if (!board)
355 return -ENODEV;
356
357 if (board->det_pin && at91_get_gpio_value(board->det_pin) != 0) {
358 perr("no device detected\n");
359 return -ENODEV;
360 }
361
362 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
363 if (!res) {
364 perr("can't get memory resource\n");
365 return -ENODEV;
366 }
367
368 if (!devm_request_mem_region(&pdev->dev, res->start + TASK_FILE,
369 REGS_SIZE, "ide") ||
370 !devm_request_mem_region(&pdev->dev, res->start + ALT_MODE,
371 REGS_SIZE, "alt")) {
372 perr("memory resources in use\n");
373 return -EBUSY;
374 }
375
376 pdbg("chipselect %u irq %u res %08lx\n", board->chipselect,
377 board->irq_pin, (unsigned long) res->start);
378
379 tf_base = (unsigned long) devm_ioremap(&pdev->dev, res->start + TASK_FILE,
380 REGS_SIZE);
381 ctl_base = (unsigned long) devm_ioremap(&pdev->dev, res->start + ALT_MODE,
382 REGS_SIZE);
383 if (!tf_base || !ctl_base) {
384 perr("can't map memory regions\n");
385 return -EBUSY;
386 }
387
388 memset(&hw, 0, sizeof(hw));
389
390 if (board->flags & AT91_IDE_SWAP_A0_A2) {
391 /* workaround for stupid hardware bug */
392 hw.io_ports.data_addr = tf_base + 0;
393 hw.io_ports.error_addr = tf_base + 4;
394 hw.io_ports.nsect_addr = tf_base + 2;
395 hw.io_ports.lbal_addr = tf_base + 6;
396 hw.io_ports.lbam_addr = tf_base + 1;
397 hw.io_ports.lbah_addr = tf_base + 5;
398 hw.io_ports.device_addr = tf_base + 3;
399 hw.io_ports.command_addr = tf_base + 7;
400 hw.io_ports.ctl_addr = ctl_base + 3;
401 } else
402 ide_std_init_ports(&hw, tf_base, ctl_base + 6);
403
404 hw.irq = board->irq_pin;
405 hw.chipset = ide_generic;
406 hw.dev = &pdev->dev;
407
408 host = ide_host_alloc(&at91_ide_port_info, hws);
409 if (!host) {
410 perr("failed to allocate ide host\n");
411 return -ENOMEM;
412 }
413
414 /* setup Static Memory Controller - PIO 0 as default */
415 apply_timings(board->chipselect, 0, ide_timing_find_mode(XFER_PIO_0), 0);
416
417 /* with GPIO interrupt we have to do quirks in handler */
418 if (board->irq_pin >= PIN_BASE)
419 host->irq_handler = at91_irq_handler;
420
421 host->ports[0]->select_data = board->chipselect;
422
423 ret = ide_host_register(host, &at91_ide_port_info, hws);
424 if (ret) {
425 perr("failed to register ide host\n");
426 goto err_free_host;
427 }
428 platform_set_drvdata(pdev, host);
429 return 0;
430
431err_free_host:
432 ide_host_free(host);
433 return ret;
434}
435
436static int __exit at91_ide_remove(struct platform_device *pdev)
437{
438 struct ide_host *host = platform_get_drvdata(pdev);
439
440 ide_host_remove(host);
441 return 0;
442}
443
444static struct platform_driver at91_ide_driver = {
445 .driver = {
446 .name = DRV_NAME,
447 .owner = THIS_MODULE,
448 },
449 .remove = __exit_p(at91_ide_remove),
450};
451
452static int __init at91_ide_init(void)
453{
454 return platform_driver_probe(&at91_ide_driver, at91_ide_probe);
455}
456
457static void __exit at91_ide_exit(void)
458{
459 platform_driver_unregister(&at91_ide_driver);
460}
461
462module_init(at91_ide_init);
463module_exit(at91_ide_exit);
464
465MODULE_LICENSE("GPL");
466MODULE_AUTHOR("Stanislaw Gruszka <stf_xl@wp.pl>");
467
diff --git a/drivers/ide/ide-disk_proc.c b/drivers/ide/ide-disk_proc.c
index 1146f4204c6e..1f86dcbd2b1c 100644
--- a/drivers/ide/ide-disk_proc.c
+++ b/drivers/ide/ide-disk_proc.c
@@ -125,5 +125,5 @@ const struct ide_proc_devset ide_disk_settings[] = {
125 IDE_PROC_DEVSET(multcount, 0, 16), 125 IDE_PROC_DEVSET(multcount, 0, 16),
126 IDE_PROC_DEVSET(nowerr, 0, 1), 126 IDE_PROC_DEVSET(nowerr, 0, 1),
127 IDE_PROC_DEVSET(wcache, 0, 1), 127 IDE_PROC_DEVSET(wcache, 0, 1),
128 { 0 }, 128 { NULL },
129}; 129};
diff --git a/drivers/ide/ide-floppy_proc.c b/drivers/ide/ide-floppy_proc.c
index 3ec762cb60ab..fcd4d8153df5 100644
--- a/drivers/ide/ide-floppy_proc.c
+++ b/drivers/ide/ide-floppy_proc.c
@@ -29,5 +29,5 @@ const struct ide_proc_devset ide_floppy_settings[] = {
29 IDE_PROC_DEVSET(bios_head, 0, 255), 29 IDE_PROC_DEVSET(bios_head, 0, 255),
30 IDE_PROC_DEVSET(bios_sect, 0, 63), 30 IDE_PROC_DEVSET(bios_sect, 0, 63),
31 IDE_PROC_DEVSET(ticks, 0, 255), 31 IDE_PROC_DEVSET(ticks, 0, 255),
32 { 0 }, 32 { NULL },
33}; 33};
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 9ee51adf567f..a9a6c208288a 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -908,7 +908,7 @@ void ide_timer_expiry (unsigned long data)
908 ide_drive_t *uninitialized_var(drive); 908 ide_drive_t *uninitialized_var(drive);
909 ide_handler_t *handler; 909 ide_handler_t *handler;
910 unsigned long flags; 910 unsigned long flags;
911 unsigned long wait = -1; 911 int wait = -1;
912 int plug_device = 0; 912 int plug_device = 0;
913 913
914 spin_lock_irqsave(&hwif->lock, flags); 914 spin_lock_irqsave(&hwif->lock, flags);
@@ -1162,6 +1162,7 @@ out_early:
1162 1162
1163 return irq_ret; 1163 return irq_ret;
1164} 1164}
1165EXPORT_SYMBOL_GPL(ide_intr);
1165 1166
1166/** 1167/**
1167 * ide_do_drive_cmd - issue IDE special command 1168 * ide_do_drive_cmd - issue IDE special command
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 753b92ebe0ae..b1892bd95c6f 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -315,6 +315,8 @@ void ide_output_data(ide_drive_t *drive, struct request *rq, void *buf,
315 u8 io_32bit = drive->io_32bit; 315 u8 io_32bit = drive->io_32bit;
316 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 316 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
317 317
318 len++;
319
318 if (io_32bit) { 320 if (io_32bit) {
319 unsigned long uninitialized_var(flags); 321 unsigned long uninitialized_var(flags);
320 322
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index ce0818a993f6..ee8e3e7cad51 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -950,6 +950,7 @@ static int ide_port_setup_devices(ide_hwif_t *hwif)
950static int init_irq (ide_hwif_t *hwif) 950static int init_irq (ide_hwif_t *hwif)
951{ 951{
952 struct ide_io_ports *io_ports = &hwif->io_ports; 952 struct ide_io_ports *io_ports = &hwif->io_ports;
953 irq_handler_t irq_handler;
953 int sa = 0; 954 int sa = 0;
954 955
955 mutex_lock(&ide_cfg_mtx); 956 mutex_lock(&ide_cfg_mtx);
@@ -959,6 +960,10 @@ static int init_irq (ide_hwif_t *hwif)
959 hwif->timer.function = &ide_timer_expiry; 960 hwif->timer.function = &ide_timer_expiry;
960 hwif->timer.data = (unsigned long)hwif; 961 hwif->timer.data = (unsigned long)hwif;
961 962
963 irq_handler = hwif->host->irq_handler;
964 if (irq_handler == NULL)
965 irq_handler = ide_intr;
966
962#if defined(__mc68000__) 967#if defined(__mc68000__)
963 sa = IRQF_SHARED; 968 sa = IRQF_SHARED;
964#endif /* __mc68000__ */ 969#endif /* __mc68000__ */
@@ -969,7 +974,7 @@ static int init_irq (ide_hwif_t *hwif)
969 if (io_ports->ctl_addr) 974 if (io_ports->ctl_addr)
970 hwif->tp_ops->set_irq(hwif, 1); 975 hwif->tp_ops->set_irq(hwif, 1);
971 976
972 if (request_irq(hwif->irq, &ide_intr, sa, hwif->name, hwif)) 977 if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
973 goto out_up; 978 goto out_up;
974 979
975 if (!hwif->rqsize) { 980 if (!hwif->rqsize) {
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 1d8978b3314a..a7b9287ee0d4 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -231,7 +231,7 @@ static const struct ide_proc_devset ide_generic_settings[] = {
231 IDE_PROC_DEVSET(pio_mode, 0, 255), 231 IDE_PROC_DEVSET(pio_mode, 0, 255),
232 IDE_PROC_DEVSET(unmaskirq, 0, 1), 232 IDE_PROC_DEVSET(unmaskirq, 0, 1),
233 IDE_PROC_DEVSET(using_dma, 0, 1), 233 IDE_PROC_DEVSET(using_dma, 0, 1),
234 { 0 }, 234 { NULL },
235}; 235};
236 236
237static void proc_ide_settings_warn(void) 237static void proc_ide_settings_warn(void)
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index bb450a7608c2..4e6181c7bbda 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -2166,7 +2166,7 @@ static const struct ide_proc_devset idetape_settings[] = {
2166 __IDE_PROC_DEVSET(speed, 0, 0xffff, NULL, NULL), 2166 __IDE_PROC_DEVSET(speed, 0, 0xffff, NULL, NULL),
2167 __IDE_PROC_DEVSET(tdsc, IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX, 2167 __IDE_PROC_DEVSET(tdsc, IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX,
2168 mulf_tdsc, divf_tdsc), 2168 mulf_tdsc, divf_tdsc),
2169 { 0 }, 2169 { NULL },
2170}; 2170};
2171#endif 2171#endif
2172 2172
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index b4d44e571d76..8132533d71f9 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -212,6 +212,9 @@ static void lg_notify(struct virtqueue *vq)
212 hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0); 212 hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0);
213} 213}
214 214
215/* An extern declaration inside a C file is bad form. Don't do it. */
216extern void lguest_setup_irq(unsigned int irq);
217
215/* This routine finds the first virtqueue described in the configuration of 218/* This routine finds the first virtqueue described in the configuration of
216 * this device and sets it up. 219 * this device and sets it up.
217 * 220 *
@@ -266,6 +269,9 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
266 goto unmap; 269 goto unmap;
267 } 270 }
268 271
272 /* Make sure the interrupt is allocated. */
273 lguest_setup_irq(lvq->config.irq);
274
269 /* Tell the interrupt for this virtqueue to go to the virtio_ring 275 /* Tell the interrupt for this virtqueue to go to the virtio_ring
270 * interrupt handler. */ 276 * interrupt handler. */
271 /* FIXME: We used to have a flag for the Host to tell us we could use 277 /* FIXME: We used to have a flag for the Host to tell us we could use
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 03b4cd0a6344..a307f87eb90e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -214,12 +214,7 @@ static inline mddev_t *mddev_get(mddev_t *mddev)
214 return mddev; 214 return mddev;
215} 215}
216 216
217static void mddev_delayed_delete(struct work_struct *ws) 217static void mddev_delayed_delete(struct work_struct *ws);
218{
219 mddev_t *mddev = container_of(ws, mddev_t, del_work);
220 kobject_del(&mddev->kobj);
221 kobject_put(&mddev->kobj);
222}
223 218
224static void mddev_put(mddev_t *mddev) 219static void mddev_put(mddev_t *mddev)
225{ 220{
@@ -3542,6 +3537,21 @@ static struct kobj_type md_ktype = {
3542 3537
3543int mdp_major = 0; 3538int mdp_major = 0;
3544 3539
3540static void mddev_delayed_delete(struct work_struct *ws)
3541{
3542 mddev_t *mddev = container_of(ws, mddev_t, del_work);
3543
3544 if (mddev->private == &md_redundancy_group) {
3545 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3546 if (mddev->sysfs_action)
3547 sysfs_put(mddev->sysfs_action);
3548 mddev->sysfs_action = NULL;
3549 mddev->private = NULL;
3550 }
3551 kobject_del(&mddev->kobj);
3552 kobject_put(&mddev->kobj);
3553}
3554
3545static int md_alloc(dev_t dev, char *name) 3555static int md_alloc(dev_t dev, char *name)
3546{ 3556{
3547 static DEFINE_MUTEX(disks_mutex); 3557 static DEFINE_MUTEX(disks_mutex);
@@ -4033,13 +4043,9 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4033 mddev->queue->merge_bvec_fn = NULL; 4043 mddev->queue->merge_bvec_fn = NULL;
4034 mddev->queue->unplug_fn = NULL; 4044 mddev->queue->unplug_fn = NULL;
4035 mddev->queue->backing_dev_info.congested_fn = NULL; 4045 mddev->queue->backing_dev_info.congested_fn = NULL;
4036 if (mddev->pers->sync_request) {
4037 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
4038 if (mddev->sysfs_action)
4039 sysfs_put(mddev->sysfs_action);
4040 mddev->sysfs_action = NULL;
4041 }
4042 module_put(mddev->pers->owner); 4046 module_put(mddev->pers->owner);
4047 if (mddev->pers->sync_request)
4048 mddev->private = &md_redundancy_group;
4043 mddev->pers = NULL; 4049 mddev->pers = NULL;
4044 /* tell userspace to handle 'inactive' */ 4050 /* tell userspace to handle 'inactive' */
4045 sysfs_notify_dirent(mddev->sysfs_state); 4051 sysfs_notify_dirent(mddev->sysfs_state);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 9c50e6f1c236..34ce2703d29a 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -248,12 +248,15 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
248 248
249 sg_init_one(&sg, data_buf, len); 249 sg_init_one(&sg, data_buf, len);
250 250
251 /* 251 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
252 * The spec states that CSR and CID accesses have a timeout 252 /*
253 * of 64 clock cycles. 253 * The spec states that CSR and CID accesses have a timeout
254 */ 254 * of 64 clock cycles.
255 data.timeout_ns = 0; 255 */
256 data.timeout_clks = 64; 256 data.timeout_ns = 0;
257 data.timeout_clks = 64;
258 } else
259 mmc_set_data_timeout(&data, card);
257 260
258 mmc_wait_for_req(host, &mrq); 261 mmc_wait_for_req(host, &mrq);
259 262
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index d44f741ae229..6d9f810565c8 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -821,7 +821,8 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
821 if (!(info->flags & IS_POW2PS)) 821 if (!(info->flags & IS_POW2PS))
822 return info; 822 return info;
823 } 823 }
824 } 824 } else
825 return info;
825 } 826 }
826 } 827 }
827 828
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 4b122e7ab4b3..229718222db7 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -46,16 +46,19 @@ static int physmap_flash_remove(struct platform_device *dev)
46 46
47 physmap_data = dev->dev.platform_data; 47 physmap_data = dev->dev.platform_data;
48 48
49 if (info->cmtd) {
49#ifdef CONFIG_MTD_PARTITIONS 50#ifdef CONFIG_MTD_PARTITIONS
50 if (info->nr_parts) { 51 if (info->nr_parts || physmap_data->nr_parts)
51 del_mtd_partitions(info->cmtd); 52 del_mtd_partitions(info->cmtd);
52 kfree(info->parts); 53 else
53 } else if (physmap_data->nr_parts) 54 del_mtd_device(info->cmtd);
54 del_mtd_partitions(info->cmtd);
55 else
56 del_mtd_device(info->cmtd);
57#else 55#else
58 del_mtd_device(info->cmtd); 56 del_mtd_device(info->cmtd);
57#endif
58 }
59#ifdef CONFIG_MTD_PARTITIONS
60 if (info->nr_parts)
61 kfree(info->parts);
59#endif 62#endif
60 63
61#ifdef CONFIG_MTD_CONCAT 64#ifdef CONFIG_MTD_CONCAT
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 917cf8d3ae95..c2dfd3ea353d 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -149,7 +149,7 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
149 149
150static struct platform_driver orion_nand_driver = { 150static struct platform_driver orion_nand_driver = {
151 .probe = orion_nand_probe, 151 .probe = orion_nand_probe,
152 .remove = orion_nand_remove, 152 .remove = __devexit_p(orion_nand_remove),
153 .driver = { 153 .driver = {
154 .name = "orion_nand", 154 .name = "orion_nand",
155 .owner = THIS_MODULE, 155 .owner = THIS_MODULE,
diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile
index c69c0cdba4a2..811a3ccd14c1 100644
--- a/drivers/net/arm/Makefile
+++ b/drivers/net/arm/Makefile
@@ -4,7 +4,7 @@
4# 4#
5 5
6obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o 6obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
7obj-$(CONFIG_ARM_ETHERH) += etherh.o ../8390.o 7obj-$(CONFIG_ARM_ETHERH) += etherh.o
8obj-$(CONFIG_ARM_ETHER3) += ether3.o 8obj-$(CONFIG_ARM_ETHER3) += ether3.o
9obj-$(CONFIG_ARM_ETHER1) += ether1.o 9obj-$(CONFIG_ARM_ETHER1) += ether1.o
10obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o 10obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 54b52e5b1821..f52f668c49bf 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -641,15 +641,15 @@ static const struct net_device_ops etherh_netdev_ops = {
641 .ndo_open = etherh_open, 641 .ndo_open = etherh_open,
642 .ndo_stop = etherh_close, 642 .ndo_stop = etherh_close,
643 .ndo_set_config = etherh_set_config, 643 .ndo_set_config = etherh_set_config,
644 .ndo_start_xmit = ei_start_xmit, 644 .ndo_start_xmit = __ei_start_xmit,
645 .ndo_tx_timeout = ei_tx_timeout, 645 .ndo_tx_timeout = __ei_tx_timeout,
646 .ndo_get_stats = ei_get_stats, 646 .ndo_get_stats = __ei_get_stats,
647 .ndo_set_multicast_list = ei_set_multicast_list, 647 .ndo_set_multicast_list = __ei_set_multicast_list,
648 .ndo_validate_addr = eth_validate_addr, 648 .ndo_validate_addr = eth_validate_addr,
649 .ndo_set_mac_address = eth_mac_addr, 649 .ndo_set_mac_address = eth_mac_addr,
650 .ndo_change_mtu = eth_change_mtu, 650 .ndo_change_mtu = eth_change_mtu,
651#ifdef CONFIG_NET_POLL_CONTROLLER 651#ifdef CONFIG_NET_POLL_CONTROLLER
652 .ndo_poll_controller = ei_poll, 652 .ndo_poll_controller = __ei_poll,
653#endif 653#endif
654}; 654};
655 655
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 1cf2f949c0b4..f3a127434897 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -560,7 +560,7 @@ ks8695_reset(struct ks8695_priv *ksp)
560 msleep(1); 560 msleep(1);
561 } 561 }
562 562
563 if (reset_timeout == 0) { 563 if (reset_timeout < 0) {
564 dev_crit(ksp->dev, 564 dev_crit(ksp->dev,
565 "Timeout waiting for DMA engines to reset\n"); 565 "Timeout waiting for DMA engines to reset\n");
566 /* And blithely carry on */ 566 /* And blithely carry on */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9fb388388fb7..e0578fe8c0db 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4113,7 +4113,7 @@ static int bond_neigh_setup(struct net_device *dev, struct neigh_parms *parms)
4113 const struct net_device_ops *slave_ops 4113 const struct net_device_ops *slave_ops
4114 = slave->dev->netdev_ops; 4114 = slave->dev->netdev_ops;
4115 if (slave_ops->ndo_neigh_setup) 4115 if (slave_ops->ndo_neigh_setup)
4116 return slave_ops->ndo_neigh_setup(dev, parms); 4116 return slave_ops->ndo_neigh_setup(slave->dev, parms);
4117 } 4117 }
4118 return 0; 4118 return 0;
4119} 4119}
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 08b34051c646..a6e1a35a13cb 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -957,13 +957,14 @@ jme_process_receive(struct jme_adapter *jme, int limit)
957 goto out_inc; 957 goto out_inc;
958 958
959 i = atomic_read(&rxring->next_to_clean); 959 i = atomic_read(&rxring->next_to_clean);
960 while (limit-- > 0) { 960 while (limit > 0) {
961 rxdesc = rxring->desc; 961 rxdesc = rxring->desc;
962 rxdesc += i; 962 rxdesc += i;
963 963
964 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) || 964 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
965 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) 965 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
966 goto out; 966 goto out;
967 --limit;
967 968
968 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; 969 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
969 970
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index e5cb6b1f0ebd..2404a838b1fe 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -1035,7 +1035,8 @@ static int el3_rx(struct net_device *dev, int worklimit)
1035 DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", 1035 DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
1036 dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus)); 1036 dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
1037 while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) && 1037 while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) &&
1038 (--worklimit >= 0)) { 1038 worklimit > 0) {
1039 worklimit--;
1039 if (rx_status & 0x4000) { /* Error, update stats. */ 1040 if (rx_status & 0x4000) { /* Error, update stats. */
1040 short error = rx_status & 0x3800; 1041 short error = rx_status & 0x3800;
1041 dev->stats.rx_errors++; 1042 dev->stats.rx_errors++;
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 73ecc657999d..1e01b8a6dbf3 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -857,7 +857,8 @@ static int el3_rx(struct net_device *dev)
857 DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", 857 DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
858 dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); 858 dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
859 while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && 859 while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
860 (--worklimit >= 0)) { 860 worklimit > 0) {
861 worklimit--;
861 if (rx_status & 0x4000) { /* Error, update stats. */ 862 if (rx_status & 0x4000) { /* Error, update stats. */
862 short error = rx_status & 0x3800; 863 short error = rx_status & 0x3800;
863 dev->stats.rx_errors++; 864 dev->stats.rx_errors++;
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h
index 870b4c33f108..a45952e72018 100644
--- a/drivers/net/smc911x.h
+++ b/drivers/net/smc911x.h
@@ -42,6 +42,16 @@
42 #define SMC_USE_16BIT 0 42 #define SMC_USE_16BIT 0
43 #define SMC_USE_32BIT 1 43 #define SMC_USE_32BIT 1
44 #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW 44 #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
45#elif defined(CONFIG_ARCH_OMAP34XX)
46 #define SMC_USE_16BIT 0
47 #define SMC_USE_32BIT 1
48 #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
49 #define SMC_MEM_RESERVED 1
50#elif defined(CONFIG_ARCH_OMAP24XX)
51 #define SMC_USE_16BIT 0
52 #define SMC_USE_32BIT 1
53 #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
54 #define SMC_MEM_RESERVED 1
45#else 55#else
46/* 56/*
47 * Default configuration 57 * Default configuration
@@ -675,6 +685,7 @@ smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr,
675#define CHIP_9116 0x0116 685#define CHIP_9116 0x0116
676#define CHIP_9117 0x0117 686#define CHIP_9117 0x0117
677#define CHIP_9118 0x0118 687#define CHIP_9118 0x0118
688#define CHIP_9211 0x9211
678#define CHIP_9215 0x115A 689#define CHIP_9215 0x115A
679#define CHIP_9217 0x117A 690#define CHIP_9217 0x117A
680#define CHIP_9218 0x118A 691#define CHIP_9218 0x118A
@@ -689,6 +700,7 @@ static const struct chip_id chip_ids[] = {
689 { CHIP_9116, "LAN9116" }, 700 { CHIP_9116, "LAN9116" },
690 { CHIP_9117, "LAN9117" }, 701 { CHIP_9117, "LAN9117" },
691 { CHIP_9118, "LAN9118" }, 702 { CHIP_9118, "LAN9118" },
703 { CHIP_9211, "LAN9211" },
692 { CHIP_9215, "LAN9215" }, 704 { CHIP_9215, "LAN9215" },
693 { CHIP_9217, "LAN9217" }, 705 { CHIP_9217, "LAN9217" },
694 { CHIP_9218, "LAN9218" }, 706 { CHIP_9218, "LAN9218" },
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 8d64b1da0465..0fcb7503363d 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1229,7 +1229,7 @@ static void gem_reset(struct gem *gp)
1229 break; 1229 break;
1230 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); 1230 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
1231 1231
1232 if (limit <= 0) 1232 if (limit < 0)
1233 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); 1233 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
1234 1234
1235 if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) 1235 if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index b080f9493d83..dabdf59f8016 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -1473,7 +1473,8 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1473{ 1473{
1474 u32 reg; 1474 u32 reg;
1475 1475
1476 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 1476 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1477 return; 1478 return;
1478 1479
1479 reg = MII_TG3_MISC_SHDW_WREN | 1480 reg = MII_TG3_MISC_SHDW_WREN |
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index 5f601773c260..e2150b3c83d9 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -121,11 +121,6 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
121 goto err_out_trdev; 121 goto err_out_trdev;
122 } 122 }
123 123
124 ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
125 dev->name, dev);
126 if (ret)
127 goto err_out_region;
128
129 dev->base_addr = pci_ioaddr; 124 dev->base_addr = pci_ioaddr;
130 dev->irq = pci_irq_line; 125 dev->irq = pci_irq_line;
131 dev->dma = 0; 126 dev->dma = 0;
@@ -142,7 +137,7 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
142 ret = tmsdev_init(dev, &pdev->dev); 137 ret = tmsdev_init(dev, &pdev->dev);
143 if (ret) { 138 if (ret) {
144 printk("%s: unable to get memory for dev->priv.\n", dev->name); 139 printk("%s: unable to get memory for dev->priv.\n", dev->name);
145 goto err_out_irq; 140 goto err_out_region;
146 } 141 }
147 142
148 tp = netdev_priv(dev); 143 tp = netdev_priv(dev);
@@ -157,6 +152,11 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
157 152
158 tp->tmspriv = cardinfo; 153 tp->tmspriv = cardinfo;
159 154
155 ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
156 dev->name, dev);
157 if (ret)
158 goto err_out_tmsdev;
159
160 dev->open = tms380tr_open; 160 dev->open = tms380tr_open;
161 dev->stop = tms380tr_close; 161 dev->stop = tms380tr_close;
162 pci_set_drvdata(pdev, dev); 162 pci_set_drvdata(pdev, dev);
@@ -164,15 +164,15 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
164 164
165 ret = register_netdev(dev); 165 ret = register_netdev(dev);
166 if (ret) 166 if (ret)
167 goto err_out_tmsdev; 167 goto err_out_irq;
168 168
169 return 0; 169 return 0;
170 170
171err_out_irq:
172 free_irq(pdev->irq, dev);
171err_out_tmsdev: 173err_out_tmsdev:
172 pci_set_drvdata(pdev, NULL); 174 pci_set_drvdata(pdev, NULL);
173 tmsdev_term(dev); 175 tmsdev_term(dev);
174err_out_irq:
175 free_irq(pdev->irq, dev);
176err_out_region: 176err_out_region:
177 release_region(pci_ioaddr, TMS_PCI_IO_EXTENT); 177 release_region(pci_ioaddr, TMS_PCI_IO_EXTENT);
178err_out_trdev: 178err_out_trdev:
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
index 54635911305c..0ada4edd56eb 100644
--- a/drivers/net/ucc_geth_mii.c
+++ b/drivers/net/ucc_geth_mii.c
@@ -107,7 +107,7 @@ int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
107static int uec_mdio_reset(struct mii_bus *bus) 107static int uec_mdio_reset(struct mii_bus *bus)
108{ 108{
109 struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv; 109 struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv;
110 unsigned int timeout = PHY_INIT_TIMEOUT; 110 int timeout = PHY_INIT_TIMEOUT;
111 111
112 mutex_lock(&bus->mdio_lock); 112 mutex_lock(&bus->mdio_lock);
113 113
@@ -123,7 +123,7 @@ static int uec_mdio_reset(struct mii_bus *bus)
123 123
124 mutex_unlock(&bus->mdio_lock); 124 mutex_unlock(&bus->mdio_lock);
125 125
126 if (timeout <= 0) { 126 if (timeout < 0) {
127 printk(KERN_ERR "%s: The MII Bus is stuck!\n", bus->name); 127 printk(KERN_ERR "%s: The MII Bus is stuck!\n", bus->name);
128 return -EBUSY; 128 return -EBUSY;
129 } 129 }
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 5b67bbf1987e..81682c6defa0 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -635,6 +635,10 @@ static const struct usb_device_id products[] = {
635 USB_DEVICE(0x0a47, 0x9601), /* Hirose USB-100 */ 635 USB_DEVICE(0x0a47, 0x9601), /* Hirose USB-100 */
636 .driver_info = (unsigned long)&dm9601_info, 636 .driver_info = (unsigned long)&dm9601_info,
637 }, 637 },
638 {
639 USB_DEVICE(0x0fe6, 0x8101), /* DM9601 USB to Fast Ethernet Adapter */
640 .driver_info = (unsigned long)&dm9601_info,
641 },
638 {}, // END 642 {}, // END
639}; 643};
640 644
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 36bafeb353ce..129e2d330abb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -3868,7 +3868,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3868 } 3868 }
3869 err = iwl_eeprom_check_version(priv); 3869 err = iwl_eeprom_check_version(priv);
3870 if (err) 3870 if (err)
3871 goto out_iounmap; 3871 goto out_free_eeprom;
3872 3872
3873 /* extract MAC Address */ 3873 /* extract MAC Address */
3874 iwl_eeprom_get_mac(priv, priv->mac_addr); 3874 iwl_eeprom_get_mac(priv, priv->mac_addr);
@@ -3945,6 +3945,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3945 return 0; 3945 return 0;
3946 3946
3947 out_remove_sysfs: 3947 out_remove_sysfs:
3948 destroy_workqueue(priv->workqueue);
3949 priv->workqueue = NULL;
3948 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); 3950 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
3949 out_uninit_drv: 3951 out_uninit_drv:
3950 iwl_uninit_drv(priv); 3952 iwl_uninit_drv(priv);
@@ -3953,8 +3955,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3953 out_iounmap: 3955 out_iounmap:
3954 pci_iounmap(pdev, priv->hw_base); 3956 pci_iounmap(pdev, priv->hw_base);
3955 out_pci_release_regions: 3957 out_pci_release_regions:
3956 pci_release_regions(pdev);
3957 pci_set_drvdata(pdev, NULL); 3958 pci_set_drvdata(pdev, NULL);
3959 pci_release_regions(pdev);
3958 out_pci_disable_device: 3960 out_pci_disable_device:
3959 pci_disable_device(pdev); 3961 pci_disable_device(pdev);
3960 out_ieee80211_free_hw: 3962 out_ieee80211_free_hw:
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 93be74a1f139..57dd34e256d8 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -7911,7 +7911,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7911 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 7911 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
7912 if (err < 0) { 7912 if (err < 0) {
7913 IWL_DEBUG_INFO("Failed to init the card\n"); 7913 IWL_DEBUG_INFO("Failed to init the card\n");
7914 goto out_remove_sysfs; 7914 goto out_iounmap;
7915 } 7915 }
7916 7916
7917 /*********************** 7917 /***********************
@@ -7921,7 +7921,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7921 err = iwl3945_eeprom_init(priv); 7921 err = iwl3945_eeprom_init(priv);
7922 if (err) { 7922 if (err) {
7923 IWL_ERROR("Unable to init EEPROM\n"); 7923 IWL_ERROR("Unable to init EEPROM\n");
7924 goto out_remove_sysfs; 7924 goto out_iounmap;
7925 } 7925 }
7926 /* MAC Address location in EEPROM same for 3945/4965 */ 7926 /* MAC Address location in EEPROM same for 3945/4965 */
7927 get_eeprom_mac(priv, priv->mac_addr); 7927 get_eeprom_mac(priv, priv->mac_addr);
@@ -7975,7 +7975,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7975 err = iwl3945_init_channel_map(priv); 7975 err = iwl3945_init_channel_map(priv);
7976 if (err) { 7976 if (err) {
7977 IWL_ERROR("initializing regulatory failed: %d\n", err); 7977 IWL_ERROR("initializing regulatory failed: %d\n", err);
7978 goto out_release_irq; 7978 goto out_unset_hw_setting;
7979 } 7979 }
7980 7980
7981 err = iwl3945_init_geos(priv); 7981 err = iwl3945_init_geos(priv);
@@ -8045,25 +8045,22 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8045 return 0; 8045 return 0;
8046 8046
8047 out_remove_sysfs: 8047 out_remove_sysfs:
8048 destroy_workqueue(priv->workqueue);
8049 priv->workqueue = NULL;
8048 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); 8050 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
8049 out_free_geos: 8051 out_free_geos:
8050 iwl3945_free_geos(priv); 8052 iwl3945_free_geos(priv);
8051 out_free_channel_map: 8053 out_free_channel_map:
8052 iwl3945_free_channel_map(priv); 8054 iwl3945_free_channel_map(priv);
8053 8055 out_unset_hw_setting:
8054
8055 out_release_irq:
8056 destroy_workqueue(priv->workqueue);
8057 priv->workqueue = NULL;
8058 iwl3945_unset_hw_setting(priv); 8056 iwl3945_unset_hw_setting(priv);
8059
8060 out_iounmap: 8057 out_iounmap:
8061 pci_iounmap(pdev, priv->hw_base); 8058 pci_iounmap(pdev, priv->hw_base);
8062 out_pci_release_regions: 8059 out_pci_release_regions:
8063 pci_release_regions(pdev); 8060 pci_release_regions(pdev);
8064 out_pci_disable_device: 8061 out_pci_disable_device:
8065 pci_disable_device(pdev);
8066 pci_set_drvdata(pdev, NULL); 8062 pci_set_drvdata(pdev, NULL);
8063 pci_disable_device(pdev);
8067 out_ieee80211_free_hw: 8064 out_ieee80211_free_hw:
8068 ieee80211_free_hw(priv->hw); 8065 ieee80211_free_hw(priv->hw);
8069 out: 8066 out:
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 34561e6e816b..f170106bf0ae 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -710,10 +710,11 @@ static struct sk_buff *p54_find_tx_entry(struct ieee80211_hw *dev,
710 __le32 req_id) 710 __le32 req_id)
711{ 711{
712 struct p54_common *priv = dev->priv; 712 struct p54_common *priv = dev->priv;
713 struct sk_buff *entry = priv->tx_queue.next; 713 struct sk_buff *entry;
714 unsigned long flags; 714 unsigned long flags;
715 715
716 spin_lock_irqsave(&priv->tx_queue.lock, flags); 716 spin_lock_irqsave(&priv->tx_queue.lock, flags);
717 entry = priv->tx_queue.next;
717 while (entry != (struct sk_buff *)&priv->tx_queue) { 718 while (entry != (struct sk_buff *)&priv->tx_queue) {
718 struct p54_hdr *hdr = (struct p54_hdr *) entry->data; 719 struct p54_hdr *hdr = (struct p54_hdr *) entry->data;
719 720
@@ -732,7 +733,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
732 struct p54_common *priv = dev->priv; 733 struct p54_common *priv = dev->priv;
733 struct p54_hdr *hdr = (struct p54_hdr *) skb->data; 734 struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
734 struct p54_frame_sent *payload = (struct p54_frame_sent *) hdr->data; 735 struct p54_frame_sent *payload = (struct p54_frame_sent *) hdr->data;
735 struct sk_buff *entry = (struct sk_buff *) priv->tx_queue.next; 736 struct sk_buff *entry;
736 u32 addr = le32_to_cpu(hdr->req_id) - priv->headroom; 737 u32 addr = le32_to_cpu(hdr->req_id) - priv->headroom;
737 struct memrecord *range = NULL; 738 struct memrecord *range = NULL;
738 u32 freed = 0; 739 u32 freed = 0;
@@ -741,6 +742,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
741 int count, idx; 742 int count, idx;
742 743
743 spin_lock_irqsave(&priv->tx_queue.lock, flags); 744 spin_lock_irqsave(&priv->tx_queue.lock, flags);
745 entry = (struct sk_buff *) priv->tx_queue.next;
744 while (entry != (struct sk_buff *)&priv->tx_queue) { 746 while (entry != (struct sk_buff *)&priv->tx_queue) {
745 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry); 747 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
746 struct p54_hdr *entry_hdr; 748 struct p54_hdr *entry_hdr;
@@ -976,7 +978,7 @@ static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
976 struct p54_hdr *data, u32 len) 978 struct p54_hdr *data, u32 len)
977{ 979{
978 struct p54_common *priv = dev->priv; 980 struct p54_common *priv = dev->priv;
979 struct sk_buff *entry = priv->tx_queue.next; 981 struct sk_buff *entry;
980 struct sk_buff *target_skb = NULL; 982 struct sk_buff *target_skb = NULL;
981 struct ieee80211_tx_info *info; 983 struct ieee80211_tx_info *info;
982 struct memrecord *range; 984 struct memrecord *range;
@@ -1014,6 +1016,7 @@ static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
1014 } 1016 }
1015 } 1017 }
1016 1018
1019 entry = priv->tx_queue.next;
1017 while (left--) { 1020 while (left--) {
1018 u32 hole_size; 1021 u32 hole_size;
1019 info = IEEE80211_SKB_CB(entry); 1022 info = IEEE80211_SKB_CB(entry);
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index af6b5847be5c..3e2ac2bbb12f 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1952,6 +1952,8 @@ static struct usb_device_id rt2500usb_device_table[] = {
1952 { USB_DEVICE(0x13b1, 0x000d), USB_DEVICE_DATA(&rt2500usb_ops) }, 1952 { USB_DEVICE(0x13b1, 0x000d), USB_DEVICE_DATA(&rt2500usb_ops) },
1953 { USB_DEVICE(0x13b1, 0x0011), USB_DEVICE_DATA(&rt2500usb_ops) }, 1953 { USB_DEVICE(0x13b1, 0x0011), USB_DEVICE_DATA(&rt2500usb_ops) },
1954 { USB_DEVICE(0x13b1, 0x001a), USB_DEVICE_DATA(&rt2500usb_ops) }, 1954 { USB_DEVICE(0x13b1, 0x001a), USB_DEVICE_DATA(&rt2500usb_ops) },
1955 /* CNet */
1956 { USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt2500usb_ops) },
1955 /* Conceptronic */ 1957 /* Conceptronic */
1956 { USB_DEVICE(0x14b2, 0x3c02), USB_DEVICE_DATA(&rt2500usb_ops) }, 1958 { USB_DEVICE(0x14b2, 0x3c02), USB_DEVICE_DATA(&rt2500usb_ops) },
1957 /* D-LINK */ 1959 /* D-LINK */
@@ -1976,14 +1978,20 @@ static struct usb_device_id rt2500usb_device_table[] = {
1976 { USB_DEVICE(0x148f, 0x2570), USB_DEVICE_DATA(&rt2500usb_ops) }, 1978 { USB_DEVICE(0x148f, 0x2570), USB_DEVICE_DATA(&rt2500usb_ops) },
1977 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt2500usb_ops) }, 1979 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt2500usb_ops) },
1978 { USB_DEVICE(0x148f, 0x9020), USB_DEVICE_DATA(&rt2500usb_ops) }, 1980 { USB_DEVICE(0x148f, 0x9020), USB_DEVICE_DATA(&rt2500usb_ops) },
1981 /* Sagem */
1982 { USB_DEVICE(0x079b, 0x004b), USB_DEVICE_DATA(&rt2500usb_ops) },
1979 /* Siemens */ 1983 /* Siemens */
1980 { USB_DEVICE(0x0681, 0x3c06), USB_DEVICE_DATA(&rt2500usb_ops) }, 1984 { USB_DEVICE(0x0681, 0x3c06), USB_DEVICE_DATA(&rt2500usb_ops) },
1981 /* SMC */ 1985 /* SMC */
1982 { USB_DEVICE(0x0707, 0xee13), USB_DEVICE_DATA(&rt2500usb_ops) }, 1986 { USB_DEVICE(0x0707, 0xee13), USB_DEVICE_DATA(&rt2500usb_ops) },
1983 /* Spairon */ 1987 /* Spairon */
1984 { USB_DEVICE(0x114b, 0x0110), USB_DEVICE_DATA(&rt2500usb_ops) }, 1988 { USB_DEVICE(0x114b, 0x0110), USB_DEVICE_DATA(&rt2500usb_ops) },
1989 /* SURECOM */
1990 { USB_DEVICE(0x0769, 0x11f3), USB_DEVICE_DATA(&rt2500usb_ops) },
1985 /* Trust */ 1991 /* Trust */
1986 { USB_DEVICE(0x0eb0, 0x9020), USB_DEVICE_DATA(&rt2500usb_ops) }, 1992 { USB_DEVICE(0x0eb0, 0x9020), USB_DEVICE_DATA(&rt2500usb_ops) },
1993 /* VTech */
1994 { USB_DEVICE(0x0f88, 0x3012), USB_DEVICE_DATA(&rt2500usb_ops) },
1987 /* Zinwell */ 1995 /* Zinwell */
1988 { USB_DEVICE(0x5a57, 0x0260), USB_DEVICE_DATA(&rt2500usb_ops) }, 1996 { USB_DEVICE(0x5a57, 0x0260), USB_DEVICE_DATA(&rt2500usb_ops) },
1989 { 0, } 1997 { 0, }
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 96a8d69f8790..cefee1b26cd8 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2281,7 +2281,18 @@ static const struct rt2x00_ops rt73usb_ops = {
2281 */ 2281 */
2282static struct usb_device_id rt73usb_device_table[] = { 2282static struct usb_device_id rt73usb_device_table[] = {
2283 /* AboCom */ 2283 /* AboCom */
2284 { USB_DEVICE(0x07b8, 0xb21b), USB_DEVICE_DATA(&rt73usb_ops) },
2285 { USB_DEVICE(0x07b8, 0xb21c), USB_DEVICE_DATA(&rt73usb_ops) },
2284 { USB_DEVICE(0x07b8, 0xb21d), USB_DEVICE_DATA(&rt73usb_ops) }, 2286 { USB_DEVICE(0x07b8, 0xb21d), USB_DEVICE_DATA(&rt73usb_ops) },
2287 { USB_DEVICE(0x07b8, 0xb21e), USB_DEVICE_DATA(&rt73usb_ops) },
2288 { USB_DEVICE(0x07b8, 0xb21f), USB_DEVICE_DATA(&rt73usb_ops) },
2289 /* AL */
2290 { USB_DEVICE(0x14b2, 0x3c10), USB_DEVICE_DATA(&rt73usb_ops) },
2291 /* Amigo */
2292 { USB_DEVICE(0x148f, 0x9021), USB_DEVICE_DATA(&rt73usb_ops) },
2293 { USB_DEVICE(0x0eb0, 0x9021), USB_DEVICE_DATA(&rt73usb_ops) },
2294 /* AMIT */
2295 { USB_DEVICE(0x18c5, 0x0002), USB_DEVICE_DATA(&rt73usb_ops) },
2285 /* Askey */ 2296 /* Askey */
2286 { USB_DEVICE(0x1690, 0x0722), USB_DEVICE_DATA(&rt73usb_ops) }, 2297 { USB_DEVICE(0x1690, 0x0722), USB_DEVICE_DATA(&rt73usb_ops) },
2287 /* ASUS */ 2298 /* ASUS */
@@ -2294,7 +2305,9 @@ static struct usb_device_id rt73usb_device_table[] = {
2294 { USB_DEVICE(0x050d, 0x905c), USB_DEVICE_DATA(&rt73usb_ops) }, 2305 { USB_DEVICE(0x050d, 0x905c), USB_DEVICE_DATA(&rt73usb_ops) },
2295 /* Billionton */ 2306 /* Billionton */
2296 { USB_DEVICE(0x1631, 0xc019), USB_DEVICE_DATA(&rt73usb_ops) }, 2307 { USB_DEVICE(0x1631, 0xc019), USB_DEVICE_DATA(&rt73usb_ops) },
2308 { USB_DEVICE(0x08dd, 0x0120), USB_DEVICE_DATA(&rt73usb_ops) },
2297 /* Buffalo */ 2309 /* Buffalo */
2310 { USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) },
2298 { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) }, 2311 { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) },
2299 /* CNet */ 2312 /* CNet */
2300 { USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt73usb_ops) }, 2313 { USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt73usb_ops) },
@@ -2308,6 +2321,11 @@ static struct usb_device_id rt73usb_device_table[] = {
2308 { USB_DEVICE(0x07d1, 0x3c04), USB_DEVICE_DATA(&rt73usb_ops) }, 2321 { USB_DEVICE(0x07d1, 0x3c04), USB_DEVICE_DATA(&rt73usb_ops) },
2309 { USB_DEVICE(0x07d1, 0x3c06), USB_DEVICE_DATA(&rt73usb_ops) }, 2322 { USB_DEVICE(0x07d1, 0x3c06), USB_DEVICE_DATA(&rt73usb_ops) },
2310 { USB_DEVICE(0x07d1, 0x3c07), USB_DEVICE_DATA(&rt73usb_ops) }, 2323 { USB_DEVICE(0x07d1, 0x3c07), USB_DEVICE_DATA(&rt73usb_ops) },
2324 /* Edimax */
2325 { USB_DEVICE(0x7392, 0x7318), USB_DEVICE_DATA(&rt73usb_ops) },
2326 { USB_DEVICE(0x7392, 0x7618), USB_DEVICE_DATA(&rt73usb_ops) },
2327 /* EnGenius */
2328 { USB_DEVICE(0x1740, 0x3701), USB_DEVICE_DATA(&rt73usb_ops) },
2311 /* Gemtek */ 2329 /* Gemtek */
2312 { USB_DEVICE(0x15a9, 0x0004), USB_DEVICE_DATA(&rt73usb_ops) }, 2330 { USB_DEVICE(0x15a9, 0x0004), USB_DEVICE_DATA(&rt73usb_ops) },
2313 /* Gigabyte */ 2331 /* Gigabyte */
@@ -2328,22 +2346,34 @@ static struct usb_device_id rt73usb_device_table[] = {
2328 { USB_DEVICE(0x0db0, 0xa861), USB_DEVICE_DATA(&rt73usb_ops) }, 2346 { USB_DEVICE(0x0db0, 0xa861), USB_DEVICE_DATA(&rt73usb_ops) },
2329 { USB_DEVICE(0x0db0, 0xa874), USB_DEVICE_DATA(&rt73usb_ops) }, 2347 { USB_DEVICE(0x0db0, 0xa874), USB_DEVICE_DATA(&rt73usb_ops) },
2330 /* Ralink */ 2348 /* Ralink */
2349 { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) },
2331 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, 2350 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) },
2332 { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) }, 2351 { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) },
2333 /* Qcom */ 2352 /* Qcom */
2334 { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) }, 2353 { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) },
2335 { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) }, 2354 { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) },
2336 { USB_DEVICE(0x18e8, 0x6238), USB_DEVICE_DATA(&rt73usb_ops) }, 2355 { USB_DEVICE(0x18e8, 0x6238), USB_DEVICE_DATA(&rt73usb_ops) },
2356 /* Samsung */
2357 { USB_DEVICE(0x04e8, 0x4471), USB_DEVICE_DATA(&rt73usb_ops) },
2337 /* Senao */ 2358 /* Senao */
2338 { USB_DEVICE(0x1740, 0x7100), USB_DEVICE_DATA(&rt73usb_ops) }, 2359 { USB_DEVICE(0x1740, 0x7100), USB_DEVICE_DATA(&rt73usb_ops) },
2339 /* Sitecom */ 2360 /* Sitecom */
2340 { USB_DEVICE(0x0df6, 0x9712), USB_DEVICE_DATA(&rt73usb_ops) }, 2361 { USB_DEVICE(0x0df6, 0x0024), USB_DEVICE_DATA(&rt73usb_ops) },
2362 { USB_DEVICE(0x0df6, 0x0027), USB_DEVICE_DATA(&rt73usb_ops) },
2363 { USB_DEVICE(0x0df6, 0x002f), USB_DEVICE_DATA(&rt73usb_ops) },
2341 { USB_DEVICE(0x0df6, 0x90ac), USB_DEVICE_DATA(&rt73usb_ops) }, 2364 { USB_DEVICE(0x0df6, 0x90ac), USB_DEVICE_DATA(&rt73usb_ops) },
2365 { USB_DEVICE(0x0df6, 0x9712), USB_DEVICE_DATA(&rt73usb_ops) },
2342 /* Surecom */ 2366 /* Surecom */
2343 { USB_DEVICE(0x0769, 0x31f3), USB_DEVICE_DATA(&rt73usb_ops) }, 2367 { USB_DEVICE(0x0769, 0x31f3), USB_DEVICE_DATA(&rt73usb_ops) },
2368 /* Philips */
2369 { USB_DEVICE(0x0471, 0x200a), USB_DEVICE_DATA(&rt73usb_ops) },
2344 /* Planex */ 2370 /* Planex */
2345 { USB_DEVICE(0x2019, 0xab01), USB_DEVICE_DATA(&rt73usb_ops) }, 2371 { USB_DEVICE(0x2019, 0xab01), USB_DEVICE_DATA(&rt73usb_ops) },
2346 { USB_DEVICE(0x2019, 0xab50), USB_DEVICE_DATA(&rt73usb_ops) }, 2372 { USB_DEVICE(0x2019, 0xab50), USB_DEVICE_DATA(&rt73usb_ops) },
2373 /* Zcom */
2374 { USB_DEVICE(0x0cde, 0x001c), USB_DEVICE_DATA(&rt73usb_ops) },
2375 /* ZyXEL */
2376 { USB_DEVICE(0x0586, 0x3415), USB_DEVICE_DATA(&rt73usb_ops) },
2347 { 0, } 2377 { 0, }
2348}; 2378};
2349 2379
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index a24e680d2b9c..2e940199fc89 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -993,6 +993,7 @@ static int i810_check_params(struct fb_var_screeninfo *var,
993 struct i810fb_par *par = info->par; 993 struct i810fb_par *par = info->par;
994 int line_length, vidmem, mode_valid = 0, retval = 0; 994 int line_length, vidmem, mode_valid = 0, retval = 0;
995 u32 vyres = var->yres_virtual, vxres = var->xres_virtual; 995 u32 vyres = var->yres_virtual, vxres = var->xres_virtual;
996
996 /* 997 /*
997 * Memory limit 998 * Memory limit
998 */ 999 */
@@ -1002,12 +1003,12 @@ static int i810_check_params(struct fb_var_screeninfo *var,
1002 if (vidmem > par->fb.size) { 1003 if (vidmem > par->fb.size) {
1003 vyres = par->fb.size/line_length; 1004 vyres = par->fb.size/line_length;
1004 if (vyres < var->yres) { 1005 if (vyres < var->yres) {
1005 vyres = yres; 1006 vyres = info->var.yres;
1006 vxres = par->fb.size/vyres; 1007 vxres = par->fb.size/vyres;
1007 vxres /= var->bits_per_pixel >> 3; 1008 vxres /= var->bits_per_pixel >> 3;
1008 line_length = get_line_length(par, vxres, 1009 line_length = get_line_length(par, vxres,
1009 var->bits_per_pixel); 1010 var->bits_per_pixel);
1010 vidmem = line_length * yres; 1011 vidmem = line_length * info->var.yres;
1011 if (vxres < var->xres) { 1012 if (vxres < var->xres) {
1012 printk("i810fb: required video memory, " 1013 printk("i810fb: required video memory, "
1013 "%d bytes, for %dx%d-%d (virtual) " 1014 "%d bytes, for %dx%d-%d (virtual) "
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 48ff701d3a72..2552b9f325ee 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -2230,7 +2230,7 @@ static int __devexit pxafb_remove(struct platform_device *dev)
2230 2230
2231static struct platform_driver pxafb_driver = { 2231static struct platform_driver pxafb_driver = {
2232 .probe = pxafb_probe, 2232 .probe = pxafb_probe,
2233 .remove = pxafb_remove, 2233 .remove = __devexit_p(pxafb_remove),
2234 .suspend = pxafb_suspend, 2234 .suspend = pxafb_suspend,
2235 .resume = pxafb_resume, 2235 .resume = pxafb_resume,
2236 .driver = { 2236 .driver = {
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 0e2b8fd24df1..2c5d069e5f06 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -446,7 +446,6 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
446{ 446{
447 struct sh_mobile_lcdc_chan *ch; 447 struct sh_mobile_lcdc_chan *ch;
448 struct sh_mobile_lcdc_board_cfg *board_cfg; 448 struct sh_mobile_lcdc_board_cfg *board_cfg;
449 unsigned long tmp;
450 int k; 449 int k;
451 450
452 /* tell the board code to disable the panel */ 451 /* tell the board code to disable the panel */
@@ -456,9 +455,8 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
456 if (board_cfg->display_off) 455 if (board_cfg->display_off)
457 board_cfg->display_off(board_cfg->board_data); 456 board_cfg->display_off(board_cfg->board_data);
458 457
459 /* cleanup deferred io if SYS bus */ 458 /* cleanup deferred io if enabled */
460 tmp = ch->cfg.sys_bus_cfg.deferred_io_msec; 459 if (ch->info.fbdefio) {
461 if (ch->ldmt1r_value & (1 << 12) && tmp) {
462 fb_deferred_io_cleanup(&ch->info); 460 fb_deferred_io_cleanup(&ch->info);
463 ch->info.fbdefio = NULL; 461 ch->info.fbdefio = NULL;
464 } 462 }
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index f0c2b7a1a175..734d9806a872 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -269,7 +269,7 @@ static int __devinit gef_wdt_probe(struct of_device *dev,
269 bus_clk = 133; /* in MHz */ 269 bus_clk = 133; /* in MHz */
270 270
271 freq = fsl_get_sys_freq(); 271 freq = fsl_get_sys_freq();
272 if (freq > 0) 272 if (freq != -1)
273 bus_clk = freq; 273 bus_clk = freq;
274 274
275 /* Map devices registers into memory */ 275 /* Map devices registers into memory */
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index 0b798fdaa378..74c92d384112 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -21,6 +21,7 @@
21#include <linux/watchdog.h> 21#include <linux/watchdog.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24#include <mach/timex.h>
24#include <mach/regs-timer.h> 25#include <mach/regs-timer.h>
25 26
26#define WDT_DEFAULT_TIME 5 /* seconds */ 27#define WDT_DEFAULT_TIME 5 /* seconds */
diff --git a/drivers/watchdog/orion5x_wdt.c b/drivers/watchdog/orion5x_wdt.c
index 14a339f58b6a..b64ae1a17832 100644
--- a/drivers/watchdog/orion5x_wdt.c
+++ b/drivers/watchdog/orion5x_wdt.c
@@ -29,6 +29,7 @@
29#define WDT_EN 0x0010 29#define WDT_EN 0x0010
30#define WDT_VAL (TIMER_VIRT_BASE + 0x0024) 30#define WDT_VAL (TIMER_VIRT_BASE + 0x0024)
31 31
32#define ORION5X_TCLK 166666667
32#define WDT_MAX_DURATION (0xffffffff / ORION5X_TCLK) 33#define WDT_MAX_DURATION (0xffffffff / ORION5X_TCLK)
33#define WDT_IN_USE 0 34#define WDT_IN_USE 0
34#define WDT_OK_TO_CLOSE 1 35#define WDT_OK_TO_CLOSE 1
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index 57027f4653ce..f3553fa40b17 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -34,104 +34,89 @@
34#include <asm/time.h> 34#include <asm/time.h>
35#include <asm/mach-rc32434/integ.h> 35#include <asm/mach-rc32434/integ.h>
36 36
37#define MAX_TIMEOUT 20 37#define VERSION "0.4"
38#define RC32434_WDT_INTERVAL (15 * HZ)
39
40#define VERSION "0.2"
41 38
42static struct { 39static struct {
43 struct completion stop;
44 int running;
45 struct timer_list timer;
46 int queue;
47 int default_ticks;
48 unsigned long inuse; 40 unsigned long inuse;
49} rc32434_wdt_device; 41} rc32434_wdt_device;
50 42
51static struct integ __iomem *wdt_reg; 43static struct integ __iomem *wdt_reg;
52static int ticks = 100 * HZ;
53 44
54static int expect_close; 45static int expect_close;
55static int timeout; 46
47/* Board internal clock speed in Hz,
48 * the watchdog timer ticks at. */
49extern unsigned int idt_cpu_freq;
50
51/* translate wtcompare value to seconds and vice versa */
52#define WTCOMP2SEC(x) (x / idt_cpu_freq)
53#define SEC2WTCOMP(x) (x * idt_cpu_freq)
54
55/* Use a default timeout of 20s. This should be
56 * safe for CPU clock speeds up to 400MHz, as
57 * ((2 ^ 32) - 1) / (400MHz / 2) = 21s. */
58#define WATCHDOG_TIMEOUT 20
59
60static int timeout = WATCHDOG_TIMEOUT;
56 61
57static int nowayout = WATCHDOG_NOWAYOUT; 62static int nowayout = WATCHDOG_NOWAYOUT;
58module_param(nowayout, int, 0); 63module_param(nowayout, int, 0);
59MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" 64MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
60 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 65 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
61 66
67/* apply or and nand masks to data read from addr and write back */
68#define SET_BITS(addr, or, nand) \
69 writel((readl(&addr) | or) & ~nand, &addr)
62 70
63static void rc32434_wdt_start(void) 71static void rc32434_wdt_start(void)
64{ 72{
65 u32 val; 73 u32 or, nand;
66
67 if (!rc32434_wdt_device.inuse) {
68 writel(0, &wdt_reg->wtcount);
69 74
70 val = RC32434_ERR_WRE; 75 /* zero the counter before enabling */
71 writel(readl(&wdt_reg->errcs) | val, &wdt_reg->errcs); 76 writel(0, &wdt_reg->wtcount);
72 77
73 val = RC32434_WTC_EN; 78 /* don't generate a non-maskable interrupt,
74 writel(readl(&wdt_reg->wtc) | val, &wdt_reg->wtc); 79 * do a warm reset instead */
75 } 80 nand = 1 << RC32434_ERR_WNE;
76 rc32434_wdt_device.running++; 81 or = 1 << RC32434_ERR_WRE;
77}
78 82
79static void rc32434_wdt_stop(void) 83 /* reset the ERRCS timeout bit in case it's set */
80{ 84 nand |= 1 << RC32434_ERR_WTO;
81 u32 val;
82 85
83 if (rc32434_wdt_device.running) { 86 SET_BITS(wdt_reg->errcs, or, nand);
84 87
85 val = ~RC32434_WTC_EN; 88 /* reset WTC timeout bit and enable WDT */
86 writel(readl(&wdt_reg->wtc) & val, &wdt_reg->wtc); 89 nand = 1 << RC32434_WTC_TO;
90 or = 1 << RC32434_WTC_EN;
87 91
88 val = ~RC32434_ERR_WRE; 92 SET_BITS(wdt_reg->wtc, or, nand);
89 writel(readl(&wdt_reg->errcs) & val, &wdt_reg->errcs); 93}
90 94
91 rc32434_wdt_device.running = 0; 95static void rc32434_wdt_stop(void)
92 } 96{
97 /* Disable WDT */
98 SET_BITS(wdt_reg->wtc, 0, 1 << RC32434_WTC_EN);
93} 99}
94 100
95static void rc32434_wdt_set(int new_timeout) 101static int rc32434_wdt_set(int new_timeout)
96{ 102{
97 u32 cmp = new_timeout * HZ; 103 int max_to = WTCOMP2SEC((u32)-1);
98 u32 state, val;
99 104
105 if (new_timeout < 0 || new_timeout > max_to) {
106 printk(KERN_ERR KBUILD_MODNAME
107 ": timeout value must be between 0 and %d",
108 max_to);
109 return -EINVAL;
110 }
100 timeout = new_timeout; 111 timeout = new_timeout;
101 /* 112 writel(SEC2WTCOMP(timeout), &wdt_reg->wtcompare);
102 * store and disable WTC
103 */
104 state = (u32)(readl(&wdt_reg->wtc) & RC32434_WTC_EN);
105 val = ~RC32434_WTC_EN;
106 writel(readl(&wdt_reg->wtc) & val, &wdt_reg->wtc);
107
108 writel(0, &wdt_reg->wtcount);
109 writel(cmp, &wdt_reg->wtcompare);
110
111 /*
112 * restore WTC
113 */
114
115 writel(readl(&wdt_reg->wtc) | state, &wdt_reg);
116}
117 113
118static void rc32434_wdt_reset(void) 114 return 0;
119{
120 ticks = rc32434_wdt_device.default_ticks;
121} 115}
122 116
123static void rc32434_wdt_update(unsigned long unused) 117static void rc32434_wdt_ping(void)
124{ 118{
125 if (rc32434_wdt_device.running)
126 ticks--;
127
128 writel(0, &wdt_reg->wtcount); 119 writel(0, &wdt_reg->wtcount);
129
130 if (rc32434_wdt_device.queue && ticks)
131 mod_timer(&rc32434_wdt_device.timer,
132 jiffies + RC32434_WDT_INTERVAL);
133 else
134 complete(&rc32434_wdt_device.stop);
135} 120}
136 121
137static int rc32434_wdt_open(struct inode *inode, struct file *file) 122static int rc32434_wdt_open(struct inode *inode, struct file *file)
@@ -142,19 +127,23 @@ static int rc32434_wdt_open(struct inode *inode, struct file *file)
142 if (nowayout) 127 if (nowayout)
143 __module_get(THIS_MODULE); 128 __module_get(THIS_MODULE);
144 129
130 rc32434_wdt_start();
131 rc32434_wdt_ping();
132
145 return nonseekable_open(inode, file); 133 return nonseekable_open(inode, file);
146} 134}
147 135
148static int rc32434_wdt_release(struct inode *inode, struct file *file) 136static int rc32434_wdt_release(struct inode *inode, struct file *file)
149{ 137{
150 if (expect_close && nowayout == 0) { 138 if (expect_close == 42) {
151 rc32434_wdt_stop(); 139 rc32434_wdt_stop();
152 printk(KERN_INFO KBUILD_MODNAME ": disabling watchdog timer\n"); 140 printk(KERN_INFO KBUILD_MODNAME ": disabling watchdog timer\n");
153 module_put(THIS_MODULE); 141 module_put(THIS_MODULE);
154 } else 142 } else {
155 printk(KERN_CRIT KBUILD_MODNAME 143 printk(KERN_CRIT KBUILD_MODNAME
156 ": device closed unexpectedly. WDT will not stop !\n"); 144 ": device closed unexpectedly. WDT will not stop !\n");
157 145 rc32434_wdt_ping();
146 }
158 clear_bit(0, &rc32434_wdt_device.inuse); 147 clear_bit(0, &rc32434_wdt_device.inuse);
159 return 0; 148 return 0;
160} 149}
@@ -174,10 +163,10 @@ static ssize_t rc32434_wdt_write(struct file *file, const char *data,
174 if (get_user(c, data + i)) 163 if (get_user(c, data + i))
175 return -EFAULT; 164 return -EFAULT;
176 if (c == 'V') 165 if (c == 'V')
177 expect_close = 1; 166 expect_close = 42;
178 } 167 }
179 } 168 }
180 rc32434_wdt_update(0); 169 rc32434_wdt_ping();
181 return len; 170 return len;
182 } 171 }
183 return 0; 172 return 0;
@@ -197,11 +186,11 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
197 }; 186 };
198 switch (cmd) { 187 switch (cmd) {
199 case WDIOC_KEEPALIVE: 188 case WDIOC_KEEPALIVE:
200 rc32434_wdt_reset(); 189 rc32434_wdt_ping();
201 break; 190 break;
202 case WDIOC_GETSTATUS: 191 case WDIOC_GETSTATUS:
203 case WDIOC_GETBOOTSTATUS: 192 case WDIOC_GETBOOTSTATUS:
204 value = readl(&wdt_reg->wtcount); 193 value = 0;
205 if (copy_to_user(argp, &value, sizeof(int))) 194 if (copy_to_user(argp, &value, sizeof(int)))
206 return -EFAULT; 195 return -EFAULT;
207 break; 196 break;
@@ -218,6 +207,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
218 break; 207 break;
219 case WDIOS_DISABLECARD: 208 case WDIOS_DISABLECARD:
220 rc32434_wdt_stop(); 209 rc32434_wdt_stop();
210 break;
221 default: 211 default:
222 return -EINVAL; 212 return -EINVAL;
223 } 213 }
@@ -225,11 +215,9 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
225 case WDIOC_SETTIMEOUT: 215 case WDIOC_SETTIMEOUT:
226 if (copy_from_user(&new_timeout, argp, sizeof(int))) 216 if (copy_from_user(&new_timeout, argp, sizeof(int)))
227 return -EFAULT; 217 return -EFAULT;
228 if (new_timeout < 1) 218 if (rc32434_wdt_set(new_timeout))
229 return -EINVAL; 219 return -EINVAL;
230 if (new_timeout > MAX_TIMEOUT) 220 /* Fall through */
231 return -EINVAL;
232 rc32434_wdt_set(new_timeout);
233 case WDIOC_GETTIMEOUT: 221 case WDIOC_GETTIMEOUT:
234 return copy_to_user(argp, &timeout, sizeof(int)); 222 return copy_to_user(argp, &timeout, sizeof(int));
235 default: 223 default:
@@ -254,15 +242,15 @@ static struct miscdevice rc32434_wdt_miscdev = {
254 .fops = &rc32434_wdt_fops, 242 .fops = &rc32434_wdt_fops,
255}; 243};
256 244
257static char banner[] = KERN_INFO KBUILD_MODNAME 245static char banner[] __devinitdata = KERN_INFO KBUILD_MODNAME
258 ": Watchdog Timer version " VERSION ", timer margin: %d sec\n"; 246 ": Watchdog Timer version " VERSION ", timer margin: %d sec\n";
259 247
260static int rc32434_wdt_probe(struct platform_device *pdev) 248static int __devinit rc32434_wdt_probe(struct platform_device *pdev)
261{ 249{
262 int ret; 250 int ret;
263 struct resource *r; 251 struct resource *r;
264 252
265 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rb500_wdt_res"); 253 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rb532_wdt_res");
266 if (!r) { 254 if (!r) {
267 printk(KERN_ERR KBUILD_MODNAME 255 printk(KERN_ERR KBUILD_MODNAME
268 "failed to retrieve resources\n"); 256 "failed to retrieve resources\n");
@@ -277,24 +265,12 @@ static int rc32434_wdt_probe(struct platform_device *pdev)
277 } 265 }
278 266
279 ret = misc_register(&rc32434_wdt_miscdev); 267 ret = misc_register(&rc32434_wdt_miscdev);
280
281 if (ret < 0) { 268 if (ret < 0) {
282 printk(KERN_ERR KBUILD_MODNAME 269 printk(KERN_ERR KBUILD_MODNAME
283 "failed to register watchdog device\n"); 270 "failed to register watchdog device\n");
284 goto unmap; 271 goto unmap;
285 } 272 }
286 273
287 init_completion(&rc32434_wdt_device.stop);
288 rc32434_wdt_device.queue = 0;
289
290 clear_bit(0, &rc32434_wdt_device.inuse);
291
292 setup_timer(&rc32434_wdt_device.timer, rc32434_wdt_update, 0L);
293
294 rc32434_wdt_device.default_ticks = ticks;
295
296 rc32434_wdt_start();
297
298 printk(banner, timeout); 274 printk(banner, timeout);
299 275
300 return 0; 276 return 0;
@@ -304,23 +280,17 @@ unmap:
304 return ret; 280 return ret;
305} 281}
306 282
307static int rc32434_wdt_remove(struct platform_device *pdev) 283static int __devexit rc32434_wdt_remove(struct platform_device *pdev)
308{ 284{
309 if (rc32434_wdt_device.queue) {
310 rc32434_wdt_device.queue = 0;
311 wait_for_completion(&rc32434_wdt_device.stop);
312 }
313 misc_deregister(&rc32434_wdt_miscdev); 285 misc_deregister(&rc32434_wdt_miscdev);
314
315 iounmap(wdt_reg); 286 iounmap(wdt_reg);
316
317 return 0; 287 return 0;
318} 288}
319 289
320static struct platform_driver rc32434_wdt = { 290static struct platform_driver rc32434_wdt = {
321 .probe = rc32434_wdt_probe, 291 .probe = rc32434_wdt_probe,
322 .remove = rc32434_wdt_remove, 292 .remove = __devexit_p(rc32434_wdt_remove),
323 .driver = { 293 .driver = {
324 .name = "rc32434_wdt", 294 .name = "rc32434_wdt",
325 } 295 }
326}; 296};
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 42491d728e99..37f31b5529aa 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -277,7 +277,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
277 if (*cow_ret == buf) 277 if (*cow_ret == buf)
278 unlock_orig = 1; 278 unlock_orig = 1;
279 279
280 WARN_ON(!btrfs_tree_locked(buf)); 280 btrfs_assert_tree_locked(buf);
281 281
282 if (parent) 282 if (parent)
283 parent_start = parent->start; 283 parent_start = parent->start;
@@ -2365,7 +2365,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2365 if (slot >= btrfs_header_nritems(upper) - 1) 2365 if (slot >= btrfs_header_nritems(upper) - 1)
2366 return 1; 2366 return 1;
2367 2367
2368 WARN_ON(!btrfs_tree_locked(path->nodes[1])); 2368 btrfs_assert_tree_locked(path->nodes[1]);
2369 2369
2370 right = read_node_slot(root, upper, slot + 1); 2370 right = read_node_slot(root, upper, slot + 1);
2371 btrfs_tree_lock(right); 2371 btrfs_tree_lock(right);
@@ -2562,7 +2562,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2562 if (right_nritems == 0) 2562 if (right_nritems == 0)
2563 return 1; 2563 return 1;
2564 2564
2565 WARN_ON(!btrfs_tree_locked(path->nodes[1])); 2565 btrfs_assert_tree_locked(path->nodes[1]);
2566 2566
2567 left = read_node_slot(root, path->nodes[1], slot - 1); 2567 left = read_node_slot(root, path->nodes[1], slot - 1);
2568 btrfs_tree_lock(left); 2568 btrfs_tree_lock(left);
@@ -4101,7 +4101,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4101 4101
4102 next = read_node_slot(root, c, slot); 4102 next = read_node_slot(root, c, slot);
4103 if (!path->skip_locking) { 4103 if (!path->skip_locking) {
4104 WARN_ON(!btrfs_tree_locked(c)); 4104 btrfs_assert_tree_locked(c);
4105 btrfs_tree_lock(next); 4105 btrfs_tree_lock(next);
4106 btrfs_set_lock_blocking(next); 4106 btrfs_set_lock_blocking(next);
4107 } 4107 }
@@ -4126,7 +4126,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4126 reada_for_search(root, path, level, slot, 0); 4126 reada_for_search(root, path, level, slot, 0);
4127 next = read_node_slot(root, next, 0); 4127 next = read_node_slot(root, next, 0);
4128 if (!path->skip_locking) { 4128 if (!path->skip_locking) {
4129 WARN_ON(!btrfs_tree_locked(path->nodes[level])); 4129 btrfs_assert_tree_locked(path->nodes[level]);
4130 btrfs_tree_lock(next); 4130 btrfs_tree_lock(next);
4131 btrfs_set_lock_blocking(next); 4131 btrfs_set_lock_blocking(next);
4132 } 4132 }
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index adda739a0215..3e18175248e0 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -857,7 +857,7 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
857 struct inode *btree_inode = root->fs_info->btree_inode; 857 struct inode *btree_inode = root->fs_info->btree_inode;
858 if (btrfs_header_generation(buf) == 858 if (btrfs_header_generation(buf) ==
859 root->fs_info->running_transaction->transid) { 859 root->fs_info->running_transaction->transid) {
860 WARN_ON(!btrfs_tree_locked(buf)); 860 btrfs_assert_tree_locked(buf);
861 861
862 /* ugh, clear_extent_buffer_dirty can be expensive */ 862 /* ugh, clear_extent_buffer_dirty can be expensive */
863 btrfs_set_lock_blocking(buf); 863 btrfs_set_lock_blocking(buf);
@@ -2361,7 +2361,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2361 2361
2362 btrfs_set_lock_blocking(buf); 2362 btrfs_set_lock_blocking(buf);
2363 2363
2364 WARN_ON(!btrfs_tree_locked(buf)); 2364 btrfs_assert_tree_locked(buf);
2365 if (transid != root->fs_info->generation) { 2365 if (transid != root->fs_info->generation) {
2366 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, " 2366 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
2367 "found %llu running %llu\n", 2367 "found %llu running %llu\n",
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 6b5966aacf44..9abf81f71c46 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4418,13 +4418,13 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
4418 path = btrfs_alloc_path(); 4418 path = btrfs_alloc_path();
4419 BUG_ON(!path); 4419 BUG_ON(!path);
4420 4420
4421 BUG_ON(!btrfs_tree_locked(parent)); 4421 btrfs_assert_tree_locked(parent);
4422 parent_level = btrfs_header_level(parent); 4422 parent_level = btrfs_header_level(parent);
4423 extent_buffer_get(parent); 4423 extent_buffer_get(parent);
4424 path->nodes[parent_level] = parent; 4424 path->nodes[parent_level] = parent;
4425 path->slots[parent_level] = btrfs_header_nritems(parent); 4425 path->slots[parent_level] = btrfs_header_nritems(parent);
4426 4426
4427 BUG_ON(!btrfs_tree_locked(node)); 4427 btrfs_assert_tree_locked(node);
4428 level = btrfs_header_level(node); 4428 level = btrfs_header_level(node);
4429 extent_buffer_get(node); 4429 extent_buffer_get(node);
4430 path->nodes[level] = node; 4430 path->nodes[level] = node;
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 85506c4a3af7..47b0a88c12a2 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -220,8 +220,8 @@ int btrfs_tree_unlock(struct extent_buffer *eb)
220 return 0; 220 return 0;
221} 221}
222 222
223int btrfs_tree_locked(struct extent_buffer *eb) 223void btrfs_assert_tree_locked(struct extent_buffer *eb)
224{ 224{
225 return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) || 225 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
226 spin_is_locked(&eb->lock); 226 assert_spin_locked(&eb->lock);
227} 227}
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 6bb0afbff928..6c4ce457168c 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -21,11 +21,11 @@
21 21
22int btrfs_tree_lock(struct extent_buffer *eb); 22int btrfs_tree_lock(struct extent_buffer *eb);
23int btrfs_tree_unlock(struct extent_buffer *eb); 23int btrfs_tree_unlock(struct extent_buffer *eb);
24int btrfs_tree_locked(struct extent_buffer *eb);
25 24
26int btrfs_try_tree_lock(struct extent_buffer *eb); 25int btrfs_try_tree_lock(struct extent_buffer *eb);
27int btrfs_try_spin_lock(struct extent_buffer *eb); 26int btrfs_try_spin_lock(struct extent_buffer *eb);
28 27
29void btrfs_set_lock_blocking(struct extent_buffer *eb); 28void btrfs_set_lock_blocking(struct extent_buffer *eb);
30void btrfs_clear_lock_blocking(struct extent_buffer *eb); 29void btrfs_clear_lock_blocking(struct extent_buffer *eb);
30void btrfs_assert_tree_locked(struct extent_buffer *eb);
31#endif 31#endif
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 5f3231b9633f..bff4052b05e7 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -198,9 +198,6 @@ static int mknod_ptmx(struct super_block *sb)
198 198
199 fsi->ptmx_dentry = dentry; 199 fsi->ptmx_dentry = dentry;
200 rc = 0; 200 rc = 0;
201
202 printk(KERN_DEBUG "Created ptmx node in devpts ino %lu\n",
203 inode->i_ino);
204out: 201out:
205 mutex_unlock(&root->d_inode->i_mutex); 202 mutex_unlock(&root->d_inode->i_mutex);
206 return rc; 203 return rc;
@@ -369,8 +366,6 @@ static int new_pts_mount(struct file_system_type *fs_type, int flags,
369 struct pts_fs_info *fsi; 366 struct pts_fs_info *fsi;
370 struct pts_mount_opts *opts; 367 struct pts_mount_opts *opts;
371 368
372 printk(KERN_NOTICE "devpts: newinstance mount\n");
373
374 err = get_sb_nodev(fs_type, flags, data, devpts_fill_super, mnt); 369 err = get_sb_nodev(fs_type, flags, data, devpts_fill_super, mnt);
375 if (err) 370 if (err)
376 return err; 371 return err;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index f18a919be70b..627f8c3337a3 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -188,7 +188,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
188 struct ext4_group_desc *gdp; 188 struct ext4_group_desc *gdp;
189 struct ext4_super_block *es; 189 struct ext4_super_block *es;
190 struct ext4_sb_info *sbi; 190 struct ext4_sb_info *sbi;
191 int fatal = 0, err, count; 191 int fatal = 0, err, count, cleared;
192 ext4_group_t flex_group; 192 ext4_group_t flex_group;
193 193
194 if (atomic_read(&inode->i_count) > 1) { 194 if (atomic_read(&inode->i_count) > 1) {
@@ -248,8 +248,10 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
248 goto error_return; 248 goto error_return;
249 249
250 /* Ok, now we can actually update the inode bitmaps.. */ 250 /* Ok, now we can actually update the inode bitmaps.. */
251 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group), 251 spin_lock(sb_bgl_lock(sbi, block_group));
252 bit, bitmap_bh->b_data)) 252 cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
253 spin_unlock(sb_bgl_lock(sbi, block_group));
254 if (!cleared)
253 ext4_error(sb, "ext4_free_inode", 255 ext4_error(sb, "ext4_free_inode",
254 "bit already cleared for inode %lu", ino); 256 "bit already cleared for inode %lu", ino);
255 else { 257 else {
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index c837dfc2b3c6..321728f48f2d 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -80,7 +80,7 @@ static struct buffer_head *get_block_length(struct super_block *sb,
80 * generated a larger block - this does occasionally happen with zlib). 80 * generated a larger block - this does occasionally happen with zlib).
81 */ 81 */
82int squashfs_read_data(struct super_block *sb, void **buffer, u64 index, 82int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
83 int length, u64 *next_index, int srclength) 83 int length, u64 *next_index, int srclength, int pages)
84{ 84{
85 struct squashfs_sb_info *msblk = sb->s_fs_info; 85 struct squashfs_sb_info *msblk = sb->s_fs_info;
86 struct buffer_head **bh; 86 struct buffer_head **bh;
@@ -185,6 +185,14 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
185 } 185 }
186 186
187 if (msblk->stream.avail_out == 0) { 187 if (msblk->stream.avail_out == 0) {
188 if (page == pages) {
189 ERROR("zlib_inflate tried to "
190 "decompress too much data, "
191 "expected %d bytes. Zlib "
192 "data probably corrupt\n",
193 srclength);
194 goto release_mutex;
195 }
188 msblk->stream.next_out = buffer[page++]; 196 msblk->stream.next_out = buffer[page++];
189 msblk->stream.avail_out = PAGE_CACHE_SIZE; 197 msblk->stream.avail_out = PAGE_CACHE_SIZE;
190 } 198 }
@@ -268,7 +276,8 @@ block_release:
268 put_bh(bh[k]); 276 put_bh(bh[k]);
269 277
270read_failure: 278read_failure:
271 ERROR("sb_bread failed reading block 0x%llx\n", cur_index); 279 ERROR("squashfs_read_data failed to read block 0x%llx\n",
280 (unsigned long long) index);
272 kfree(bh); 281 kfree(bh);
273 return -EIO; 282 return -EIO;
274} 283}
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index f29eda16d25e..1c4739e33af6 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -119,7 +119,7 @@ struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
119 119
120 entry->length = squashfs_read_data(sb, entry->data, 120 entry->length = squashfs_read_data(sb, entry->data,
121 block, length, &entry->next_index, 121 block, length, &entry->next_index,
122 cache->block_size); 122 cache->block_size, cache->pages);
123 123
124 spin_lock(&cache->lock); 124 spin_lock(&cache->lock);
125 125
@@ -406,7 +406,7 @@ int squashfs_read_table(struct super_block *sb, void *buffer, u64 block,
406 for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) 406 for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
407 data[i] = buffer; 407 data[i] = buffer;
408 res = squashfs_read_data(sb, data, block, length | 408 res = squashfs_read_data(sb, data, block, length |
409 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length); 409 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages);
410 kfree(data); 410 kfree(data);
411 return res; 411 return res;
412} 412}
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index 7a63398bb855..9101dbde39ec 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -133,7 +133,8 @@ int squashfs_read_inode(struct inode *inode, long long ino)
133 type = le16_to_cpu(sqshb_ino->inode_type); 133 type = le16_to_cpu(sqshb_ino->inode_type);
134 switch (type) { 134 switch (type) {
135 case SQUASHFS_REG_TYPE: { 135 case SQUASHFS_REG_TYPE: {
136 unsigned int frag_offset, frag_size, frag; 136 unsigned int frag_offset, frag;
137 int frag_size;
137 u64 frag_blk; 138 u64 frag_blk;
138 struct squashfs_reg_inode *sqsh_ino = &squashfs_ino.reg; 139 struct squashfs_reg_inode *sqsh_ino = &squashfs_ino.reg;
139 140
@@ -175,7 +176,8 @@ int squashfs_read_inode(struct inode *inode, long long ino)
175 break; 176 break;
176 } 177 }
177 case SQUASHFS_LREG_TYPE: { 178 case SQUASHFS_LREG_TYPE: {
178 unsigned int frag_offset, frag_size, frag; 179 unsigned int frag_offset, frag;
180 int frag_size;
179 u64 frag_blk; 181 u64 frag_blk;
180 struct squashfs_lreg_inode *sqsh_ino = &squashfs_ino.lreg; 182 struct squashfs_lreg_inode *sqsh_ino = &squashfs_ino.lreg;
181 183
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index 6b2515d027d5..0e9feb6adf7e 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -34,7 +34,7 @@ static inline struct squashfs_inode_info *squashfs_i(struct inode *inode)
34 34
35/* block.c */ 35/* block.c */
36extern int squashfs_read_data(struct super_block *, void **, u64, int, u64 *, 36extern int squashfs_read_data(struct super_block *, void **, u64, int, u64 *,
37 int); 37 int, int);
38 38
39/* cache.c */ 39/* cache.c */
40extern struct squashfs_cache *squashfs_cache_init(char *, int, int); 40extern struct squashfs_cache *squashfs_cache_init(char *, int, int);
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 071df5b5b491..681ec0d83799 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -389,7 +389,7 @@ static int __init init_squashfs_fs(void)
389 return err; 389 return err;
390 } 390 }
391 391
392 printk(KERN_INFO "squashfs: version 4.0 (2009/01/03) " 392 printk(KERN_INFO "squashfs: version 4.0 (2009/01/31) "
393 "Phillip Lougher\n"); 393 "Phillip Lougher\n");
394 394
395 return 0; 395 return 0;
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 08a86d5cdf1b..9a061accd8b8 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -89,6 +89,8 @@ enum {
89 ATA_ID_DLF = 128, 89 ATA_ID_DLF = 128,
90 ATA_ID_CSFO = 129, 90 ATA_ID_CSFO = 129,
91 ATA_ID_CFA_POWER = 160, 91 ATA_ID_CFA_POWER = 160,
92 ATA_ID_CFA_KEY_MGMT = 162,
93 ATA_ID_CFA_MODES = 163,
92 ATA_ID_ROT_SPEED = 217, 94 ATA_ID_ROT_SPEED = 217,
93 ATA_ID_PIO4 = (1 << 1), 95 ATA_ID_PIO4 = (1 << 1),
94 96
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 95837bfb5256..455d83219fae 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size);
65#define BOOTMEM_DEFAULT 0 65#define BOOTMEM_DEFAULT 0
66#define BOOTMEM_EXCLUSIVE (1<<0) 66#define BOOTMEM_EXCLUSIVE (1<<0)
67 67
68extern int reserve_bootmem(unsigned long addr,
69 unsigned long size,
70 int flags);
68extern int reserve_bootmem_node(pg_data_t *pgdat, 71extern int reserve_bootmem_node(pg_data_t *pgdat,
69 unsigned long physaddr, 72 unsigned long physaddr,
70 unsigned long size, 73 unsigned long size,
71 int flags); 74 int flags);
72#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
73extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
74#endif
75 75
76extern void *__alloc_bootmem_nopanic(unsigned long size, 76extern void *__alloc_bootmem(unsigned long size,
77 unsigned long align, 77 unsigned long align,
78 unsigned long goal); 78 unsigned long goal);
79extern void *__alloc_bootmem(unsigned long size, 79extern void *__alloc_bootmem_nopanic(unsigned long size,
80 unsigned long align, 80 unsigned long align,
81 unsigned long goal); 81 unsigned long goal);
82extern void *__alloc_bootmem_low(unsigned long size,
83 unsigned long align,
84 unsigned long goal);
85extern void *__alloc_bootmem_node(pg_data_t *pgdat, 82extern void *__alloc_bootmem_node(pg_data_t *pgdat,
86 unsigned long size, 83 unsigned long size,
87 unsigned long align, 84 unsigned long align,
@@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
90 unsigned long size, 87 unsigned long size,
91 unsigned long align, 88 unsigned long align,
92 unsigned long goal); 89 unsigned long goal);
90extern void *__alloc_bootmem_low(unsigned long size,
91 unsigned long align,
92 unsigned long goal);
93extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, 93extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
94 unsigned long size, 94 unsigned long size,
95 unsigned long align, 95 unsigned long align,
96 unsigned long goal); 96 unsigned long goal);
97#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE 97
98#define alloc_bootmem(x) \ 98#define alloc_bootmem(x) \
99 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 99 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
100#define alloc_bootmem_nopanic(x) \ 100#define alloc_bootmem_nopanic(x) \
101 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 101 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
102#define alloc_bootmem_low(x) \
103 __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
104#define alloc_bootmem_pages(x) \ 102#define alloc_bootmem_pages(x) \
105 __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 103 __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
106#define alloc_bootmem_pages_nopanic(x) \ 104#define alloc_bootmem_pages_nopanic(x) \
107 __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 105 __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
108#define alloc_bootmem_low_pages(x) \
109 __alloc_bootmem_low(x, PAGE_SIZE, 0)
110#define alloc_bootmem_node(pgdat, x) \ 106#define alloc_bootmem_node(pgdat, x) \
111 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 107 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
112#define alloc_bootmem_pages_node(pgdat, x) \ 108#define alloc_bootmem_pages_node(pgdat, x) \
113 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 109 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
110#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
111 __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
112
113#define alloc_bootmem_low(x) \
114 __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
115#define alloc_bootmem_low_pages(x) \
116 __alloc_bootmem_low(x, PAGE_SIZE, 0)
114#define alloc_bootmem_low_pages_node(pgdat, x) \ 117#define alloc_bootmem_low_pages_node(pgdat, x) \
115 __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) 118 __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
116#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
117 119
118extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, 120extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
119 int flags); 121 int flags);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 384b38d3e8e2..161042746afc 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -234,7 +234,6 @@ struct cpufreq_driver {
234 int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); 234 int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
235 int (*resume) (struct cpufreq_policy *policy); 235 int (*resume) (struct cpufreq_policy *policy);
236 struct freq_attr **attr; 236 struct freq_attr **attr;
237 bool hide_interface;
238}; 237};
239 238
240/* flags */ 239/* flags */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index f0413845f20e..1956c8d46d32 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -97,7 +97,6 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
97 97
98/** 98/**
99 * struct dma_chan_percpu - the per-CPU part of struct dma_chan 99 * struct dma_chan_percpu - the per-CPU part of struct dma_chan
100 * @refcount: local_t used for open-coded "bigref" counting
101 * @memcpy_count: transaction counter 100 * @memcpy_count: transaction counter
102 * @bytes_transferred: byte counter 101 * @bytes_transferred: byte counter
103 */ 102 */
@@ -114,9 +113,6 @@ struct dma_chan_percpu {
114 * @cookie: last cookie value returned to client 113 * @cookie: last cookie value returned to client
115 * @chan_id: channel ID for sysfs 114 * @chan_id: channel ID for sysfs
116 * @dev: class device for sysfs 115 * @dev: class device for sysfs
117 * @refcount: kref, used in "bigref" slow-mode
118 * @slow_ref: indicates that the DMA channel is free
119 * @rcu: the DMA channel's RCU head
120 * @device_node: used to add this to the device chan list 116 * @device_node: used to add this to the device chan list
121 * @local: per-cpu pointer to a struct dma_chan_percpu 117 * @local: per-cpu pointer to a struct dma_chan_percpu
122 * @client-count: how many clients are using this channel 118 * @client-count: how many clients are using this channel
@@ -213,8 +209,6 @@ struct dma_async_tx_descriptor {
213 * @global_node: list_head for global dma_device_list 209 * @global_node: list_head for global dma_device_list
214 * @cap_mask: one or more dma_capability flags 210 * @cap_mask: one or more dma_capability flags
215 * @max_xor: maximum number of xor sources, 0 if no capability 211 * @max_xor: maximum number of xor sources, 0 if no capability
216 * @refcount: reference count
217 * @done: IO completion struct
218 * @dev_id: unique device ID 212 * @dev_id: unique device ID
219 * @dev: struct device reference for dma mapping api 213 * @dev: struct device reference for dma mapping api
220 * @device_alloc_chan_resources: allocate resources and return the 214 * @device_alloc_chan_resources: allocate resources and return the
@@ -227,6 +221,7 @@ struct dma_async_tx_descriptor {
227 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 221 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
228 * @device_prep_slave_sg: prepares a slave dma operation 222 * @device_prep_slave_sg: prepares a slave dma operation
229 * @device_terminate_all: terminate all pending operations 223 * @device_terminate_all: terminate all pending operations
224 * @device_is_tx_complete: poll for transaction completion
230 * @device_issue_pending: push pending transactions to hardware 225 * @device_issue_pending: push pending transactions to hardware
231 */ 226 */
232struct dma_device { 227struct dma_device {
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h
index c37e9241fae7..ed21bd3dbd25 100644
--- a/include/linux/hdreg.h
+++ b/include/linux/hdreg.h
@@ -511,7 +511,6 @@ struct hd_driveid {
511 unsigned short words69_70[2]; /* reserved words 69-70 511 unsigned short words69_70[2]; /* reserved words 69-70
512 * future command overlap and queuing 512 * future command overlap and queuing
513 */ 513 */
514 /* HDIO_GET_IDENTITY currently returns only words 0 through 70 */
515 unsigned short words71_74[4]; /* reserved words 71-74 514 unsigned short words71_74[4]; /* reserved words 71-74
516 * for IDENTIFY PACKET DEVICE command 515 * for IDENTIFY PACKET DEVICE command
517 */ 516 */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index fe235b65207e..e0cedfe9fad4 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -866,6 +866,7 @@ struct ide_host {
866 unsigned int n_ports; 866 unsigned int n_ports;
867 struct device *dev[2]; 867 struct device *dev[2];
868 unsigned int (*init_chipset)(struct pci_dev *); 868 unsigned int (*init_chipset)(struct pci_dev *);
869 irq_handler_t irq_handler;
869 unsigned long host_flags; 870 unsigned long host_flags;
870 void *host_priv; 871 void *host_priv;
871 ide_hwif_t *cur_port; /* for hosts requiring serialization */ 872 ide_hwif_t *cur_port; /* for hosts requiring serialization */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5d87bc09a1f5..dc18b87ed722 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -275,7 +275,7 @@ enum {
275 * advised to wait only for the following duration before 275 * advised to wait only for the following duration before
276 * doing SRST. 276 * doing SRST.
277 */ 277 */
278 ATA_TMOUT_PMP_SRST_WAIT = 1000, 278 ATA_TMOUT_PMP_SRST_WAIT = 5000,
279 279
280 /* ATA bus states */ 280 /* ATA bus states */
281 BUS_UNKNOWN = 0, 281 BUS_UNKNOWN = 0,
@@ -530,6 +530,7 @@ struct ata_queued_cmd {
530 unsigned long flags; /* ATA_QCFLAG_xxx */ 530 unsigned long flags; /* ATA_QCFLAG_xxx */
531 unsigned int tag; 531 unsigned int tag;
532 unsigned int n_elem; 532 unsigned int n_elem;
533 unsigned int orig_n_elem;
533 534
534 int dma_dir; 535 int dma_dir;
535 536
@@ -750,7 +751,8 @@ struct ata_port {
750 acpi_handle acpi_handle; 751 acpi_handle acpi_handle;
751 struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */ 752 struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
752#endif 753#endif
753 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ 754 /* owned by EH */
755 u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
754}; 756};
755 757
756/* The following initializer overrides a method to NULL whether one of 758/* The following initializer overrides a method to NULL whether one of
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ec54785d34f9..659366734f3f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1079,6 +1079,7 @@ extern void synchronize_net(void);
1079extern int register_netdevice_notifier(struct notifier_block *nb); 1079extern int register_netdevice_notifier(struct notifier_block *nb);
1080extern int unregister_netdevice_notifier(struct notifier_block *nb); 1080extern int unregister_netdevice_notifier(struct notifier_block *nb);
1081extern int init_dummy_netdev(struct net_device *dev); 1081extern int init_dummy_netdev(struct net_device *dev);
1082extern void netdev_resync_ops(struct net_device *dev);
1082 1083
1083extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 1084extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1084extern struct net_device *dev_get_by_index(struct net *net, int ifindex); 1085extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 3577ffd90d45..54a968b4b924 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -5,6 +5,7 @@
5#include <linux/slab.h> /* For kmalloc() */ 5#include <linux/slab.h> /* For kmalloc() */
6#include <linux/smp.h> 6#include <linux/smp.h>
7#include <linux/cpumask.h> 7#include <linux/cpumask.h>
8#include <linux/pfn.h>
8 9
9#include <asm/percpu.h> 10#include <asm/percpu.h>
10 11
@@ -52,17 +53,18 @@
52#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) 53#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
53#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) 54#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
54 55
55/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ 56/* enough to cover all DEFINE_PER_CPUs in modules */
56#ifndef PERCPU_ENOUGH_ROOM
57#ifdef CONFIG_MODULES 57#ifdef CONFIG_MODULES
58#define PERCPU_MODULE_RESERVE 8192 58#define PERCPU_MODULE_RESERVE (8 << 10)
59#else 59#else
60#define PERCPU_MODULE_RESERVE 0 60#define PERCPU_MODULE_RESERVE 0
61#endif 61#endif
62 62
63#ifndef PERCPU_ENOUGH_ROOM
63#define PERCPU_ENOUGH_ROOM \ 64#define PERCPU_ENOUGH_ROOM \
64 (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE) 65 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
65#endif /* PERCPU_ENOUGH_ROOM */ 66 PERCPU_MODULE_RESERVE)
67#endif
66 68
67/* 69/*
68 * Must be an lvalue. Since @var must be a simple identifier, 70 * Must be an lvalue. Since @var must be a simple identifier,
@@ -76,52 +78,90 @@
76 78
77#ifdef CONFIG_SMP 79#ifdef CONFIG_SMP
78 80
81#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
82
83/* minimum unit size, also is the maximum supported allocation size */
84#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
85
86/*
87 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
88 * back on the first chunk for dynamic percpu allocation if arch is
89 * manually allocating and mapping it for faster access (as a part of
90 * large page mapping for example).
91 *
92 * The following values give between one and two pages of free space
93 * after typical minimal boot (2-way SMP, single disk and NIC) with
94 * both defconfig and a distro config on x86_64 and 32. More
95 * intelligent way to determine this would be nice.
96 */
97#if BITS_PER_LONG > 32
98#define PERCPU_DYNAMIC_RESERVE (20 << 10)
99#else
100#define PERCPU_DYNAMIC_RESERVE (12 << 10)
101#endif
102
103extern void *pcpu_base_addr;
104
105typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
106typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
107
108extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
109 size_t static_size, size_t reserved_size,
110 ssize_t unit_size, ssize_t dyn_size,
111 void *base_addr,
112 pcpu_populate_pte_fn_t populate_pte_fn);
113
114/*
115 * Use this to get to a cpu's version of the per-cpu object
116 * dynamically allocated. Non-atomic access to the current CPU's
117 * version should probably be combined with get_cpu()/put_cpu().
118 */
119#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
120
121extern void *__alloc_reserved_percpu(size_t size, size_t align);
122
123#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
124
79struct percpu_data { 125struct percpu_data {
80 void *ptrs[1]; 126 void *ptrs[1];
81}; 127};
82 128
83#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) 129#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
84/* 130
85 * Use this to get to a cpu's version of the per-cpu object dynamically 131#define per_cpu_ptr(ptr, cpu) \
86 * allocated. Non-atomic access to the current CPU's version should 132({ \
87 * probably be combined with get_cpu()/put_cpu(). 133 struct percpu_data *__p = __percpu_disguise(ptr); \
88 */ 134 (__typeof__(ptr))__p->ptrs[(cpu)]; \
89#define percpu_ptr(ptr, cpu) \
90({ \
91 struct percpu_data *__p = __percpu_disguise(ptr); \
92 (__typeof__(ptr))__p->ptrs[(cpu)]; \
93}) 135})
94 136
95extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); 137#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
96extern void percpu_free(void *__pdata); 138
139extern void *__alloc_percpu(size_t size, size_t align);
140extern void free_percpu(void *__pdata);
97 141
98#else /* CONFIG_SMP */ 142#else /* CONFIG_SMP */
99 143
100#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 144#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
101 145
102static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) 146static inline void *__alloc_percpu(size_t size, size_t align)
103{ 147{
104 return kzalloc(size, gfp); 148 /*
149 * Can't easily make larger alignment work with kmalloc. WARN
150 * on it. Larger alignment should only be used for module
151 * percpu sections on SMP for which this path isn't used.
152 */
153 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
154 return kzalloc(size, GFP_KERNEL);
105} 155}
106 156
107static inline void percpu_free(void *__pdata) 157static inline void free_percpu(void *p)
108{ 158{
109 kfree(__pdata); 159 kfree(p);
110} 160}
111 161
112#endif /* CONFIG_SMP */ 162#endif /* CONFIG_SMP */
113 163
114#define percpu_alloc_mask(size, gfp, mask) \ 164#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
115 __percpu_alloc_mask((size), (gfp), &(mask)) 165 __alignof__(type))
116
117#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
118
119/* (legacy) interface for use without CPU hotplug handling */
120
121#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \
122 cpu_possible_map)
123#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type))
124#define free_percpu(ptr) percpu_free((ptr))
125#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu))
126 166
127#endif /* __LINUX_PERCPU_H */ 167#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index f3f697df1d71..80044a4f3ab9 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -181,4 +181,10 @@ extern long rcu_batches_completed_bh(void);
181#define rcu_enter_nohz() do { } while (0) 181#define rcu_enter_nohz() do { } while (0)
182#define rcu_exit_nohz() do { } while (0) 182#define rcu_exit_nohz() do { } while (0)
183 183
184/* A context switch is a grace period for rcuclassic. */
185static inline int rcu_blocking_is_gp(void)
186{
187 return num_online_cpus() == 1;
188}
189
184#endif /* __LINUX_RCUCLASSIC_H */ 190#endif /* __LINUX_RCUCLASSIC_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 921340a7b71c..528343e6da51 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -52,6 +52,9 @@ struct rcu_head {
52 void (*func)(struct rcu_head *head); 52 void (*func)(struct rcu_head *head);
53}; 53};
54 54
55/* Internal to kernel, but needed by rcupreempt.h. */
56extern int rcu_scheduler_active;
57
55#if defined(CONFIG_CLASSIC_RCU) 58#if defined(CONFIG_CLASSIC_RCU)
56#include <linux/rcuclassic.h> 59#include <linux/rcuclassic.h>
57#elif defined(CONFIG_TREE_RCU) 60#elif defined(CONFIG_TREE_RCU)
@@ -265,6 +268,7 @@ extern void rcu_barrier_sched(void);
265 268
266/* Internal to kernel */ 269/* Internal to kernel */
267extern void rcu_init(void); 270extern void rcu_init(void);
271extern void rcu_scheduler_starting(void);
268extern int rcu_needs_cpu(int cpu); 272extern int rcu_needs_cpu(int cpu);
269 273
270#endif /* __LINUX_RCUPDATE_H */ 274#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index 3e05c09b54a2..74304b4538d8 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -142,4 +142,19 @@ static inline void rcu_exit_nohz(void)
142#define rcu_exit_nohz() do { } while (0) 142#define rcu_exit_nohz() do { } while (0)
143#endif /* CONFIG_NO_HZ */ 143#endif /* CONFIG_NO_HZ */
144 144
145/*
146 * A context switch is a grace period for rcupreempt synchronize_rcu()
147 * only during early boot, before the scheduler has been initialized.
148 * So, how the heck do we get a context switch? Well, if the caller
149 * invokes synchronize_rcu(), they are willing to accept a context
150 * switch, so we simply pretend that one happened.
151 *
152 * After boot, there might be a blocked or preempted task in an RCU
153 * read-side critical section, so we cannot then take the fastpath.
154 */
155static inline int rcu_blocking_is_gp(void)
156{
157 return num_online_cpus() == 1 && !rcu_scheduler_active;
158}
159
145#endif /* __LINUX_RCUPREEMPT_H */ 160#endif /* __LINUX_RCUPREEMPT_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index d4368b7975c3..a722fb67bb2d 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -326,4 +326,10 @@ static inline void rcu_exit_nohz(void)
326} 326}
327#endif /* CONFIG_NO_HZ */ 327#endif /* CONFIG_NO_HZ */
328 328
329/* A context switch is a grace period for rcutree. */
330static inline int rcu_blocking_is_gp(void)
331{
332 return num_online_cpus() == 1;
333}
334
329#endif /* __LINUX_RCUTREE_H */ 335#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f0a50b20e8a0..a7c7698583bb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2303,9 +2303,13 @@ extern long sched_group_rt_runtime(struct task_group *tg);
2303extern int sched_group_set_rt_period(struct task_group *tg, 2303extern int sched_group_set_rt_period(struct task_group *tg,
2304 long rt_period_us); 2304 long rt_period_us);
2305extern long sched_group_rt_period(struct task_group *tg); 2305extern long sched_group_rt_period(struct task_group *tg);
2306extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2306#endif 2307#endif
2307#endif 2308#endif
2308 2309
2310extern int task_can_switch_user(struct user_struct *up,
2311 struct task_struct *tsk);
2312
2309#ifdef CONFIG_TASK_XACCT 2313#ifdef CONFIG_TASK_XACCT
2310static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2314static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2311{ 2315{
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 1bcb357a01a1..e0417e4d3f15 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -212,7 +212,7 @@ static inline void serio_unpin_driver(struct serio *serio)
212#define SERIO_FUJITSU 0x35 212#define SERIO_FUJITSU 0x35
213#define SERIO_ZHENHUA 0x36 213#define SERIO_ZHENHUA 0x36
214#define SERIO_INEXIO 0x37 214#define SERIO_INEXIO 0x37
215#define SERIO_TOUCHIT213 0x37 215#define SERIO_TOUCHIT213 0x38
216#define SERIO_W8001 0x39 216#define SERIO_W8001 0x39
217 217
218#endif 218#endif
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 9c0890c7a06a..a43ebec3a7b9 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -95,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr);
95 95
96extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 96extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
97 struct page ***pages); 97 struct page ***pages);
98extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
99 pgprot_t prot, struct page **pages);
100extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
98extern void unmap_kernel_range(unsigned long addr, unsigned long size); 101extern void unmap_kernel_range(unsigned long addr, unsigned long size);
99 102
100/* Allocate/destroy a 'vmalloc' VM area. */ 103/* Allocate/destroy a 'vmalloc' VM area. */
@@ -110,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
110 */ 113 */
111extern rwlock_t vmlist_lock; 114extern rwlock_t vmlist_lock;
112extern struct vm_struct *vmlist; 115extern struct vm_struct *vmlist;
116extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
113 117
114#endif /* _LINUX_VMALLOC_H */ 118#endif /* _LINUX_VMALLOC_H */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 6fc13d905c5f..ded434b032a4 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -109,11 +109,6 @@ extern struct list_head net_namespace_list;
109#ifdef CONFIG_NET_NS 109#ifdef CONFIG_NET_NS
110extern void __put_net(struct net *net); 110extern void __put_net(struct net *net);
111 111
112static inline int net_alive(struct net *net)
113{
114 return net && atomic_read(&net->count);
115}
116
117static inline struct net *get_net(struct net *net) 112static inline struct net *get_net(struct net *net)
118{ 113{
119 atomic_inc(&net->count); 114 atomic_inc(&net->count);
@@ -145,11 +140,6 @@ int net_eq(const struct net *net1, const struct net *net2)
145} 140}
146#else 141#else
147 142
148static inline int net_alive(struct net *net)
149{
150 return 1;
151}
152
153static inline struct net *get_net(struct net *net) 143static inline struct net *get_net(struct net *net)
154{ 144{
155 return net; 145 return net;
@@ -234,6 +224,23 @@ struct pernet_operations {
234 void (*exit)(struct net *net); 224 void (*exit)(struct net *net);
235}; 225};
236 226
227/*
228 * Use these carefully. If you implement a network device and it
229 * needs per network namespace operations use device pernet operations,
230 * otherwise use pernet subsys operations.
231 *
232 * This is critically important. Most of the network code cleanup
233 * runs with the assumption that dev_remove_pack has been called so no
234 * new packets will arrive during and after the cleanup functions have
235 * been called. dev_remove_pack is not per namespace so instead the
236 * guarantee of no more packets arriving in a network namespace is
237 * provided by ensuring that all network devices and all sockets have
238 * left the network namespace before the cleanup methods are called.
239 *
240 * For the longest time the ipv4 icmp code was registered as a pernet
241 * device which caused kernel oops, and panics during network
242 * namespace cleanup. So please don't get this wrong.
243 */
237extern int register_pernet_subsys(struct pernet_operations *); 244extern int register_pernet_subsys(struct pernet_operations *);
238extern void unregister_pernet_subsys(struct pernet_operations *); 245extern void unregister_pernet_subsys(struct pernet_operations *);
239extern int register_pernet_gen_subsys(int *id, struct pernet_operations *); 246extern int register_pernet_gen_subsys(int *id, struct pernet_operations *);
diff --git a/init/Kconfig b/init/Kconfig
index 95a66131403a..38396ec7ee36 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -735,6 +735,9 @@ config CC_OPTIMIZE_FOR_SIZE
735config SYSCTL 735config SYSCTL
736 bool 736 bool
737 737
738config ANON_INODES
739 bool
740
738menuconfig EMBEDDED 741menuconfig EMBEDDED
739 bool "Configure standard kernel features (for small systems)" 742 bool "Configure standard kernel features (for small systems)"
740 help 743 help
@@ -840,18 +843,6 @@ config PCSPKR_PLATFORM
840 This option allows to disable the internal PC-Speaker 843 This option allows to disable the internal PC-Speaker
841 support, saving some memory. 844 support, saving some memory.
842 845
843config COMPAT_BRK
844 bool "Disable heap randomization"
845 default y
846 help
847 Randomizing heap placement makes heap exploits harder, but it
848 also breaks ancient binaries (including anything libc5 based).
849 This option changes the bootup default to heap randomization
850 disabled, and can be overriden runtime by setting
851 /proc/sys/kernel/randomize_va_space to 2.
852
853 On non-ancient distros (post-2000 ones) N is usually a safe choice.
854
855config BASE_FULL 846config BASE_FULL
856 default y 847 default y
857 bool "Enable full-sized data structures for core" if EMBEDDED 848 bool "Enable full-sized data structures for core" if EMBEDDED
@@ -869,9 +860,6 @@ config FUTEX
869 support for "fast userspace mutexes". The resulting kernel may not 860 support for "fast userspace mutexes". The resulting kernel may not
870 run glibc-based applications correctly. 861 run glibc-based applications correctly.
871 862
872config ANON_INODES
873 bool
874
875config EPOLL 863config EPOLL
876 bool "Enable eventpoll support" if EMBEDDED 864 bool "Enable eventpoll support" if EMBEDDED
877 default y 865 default y
@@ -957,6 +945,18 @@ config SLUB_DEBUG
957 SLUB sysfs support. /sys/slab will not exist and there will be 945 SLUB sysfs support. /sys/slab will not exist and there will be
958 no support for cache validation etc. 946 no support for cache validation etc.
959 947
948config COMPAT_BRK
949 bool "Disable heap randomization"
950 default y
951 help
952 Randomizing heap placement makes heap exploits harder, but it
953 also breaks ancient binaries (including anything libc5 based).
954 This option changes the bootup default to heap randomization
955 disabled, and can be overriden runtime by setting
956 /proc/sys/kernel/randomize_va_space to 2.
957
958 On non-ancient distros (post-2000 ones) N is usually a safe choice.
959
960choice 960choice
961 prompt "Choose SLAB allocator" 961 prompt "Choose SLAB allocator"
962 default SLUB 962 default SLUB
diff --git a/init/main.c b/init/main.c
index 6441083f8273..6bf83afd654d 100644
--- a/init/main.c
+++ b/init/main.c
@@ -98,7 +98,7 @@ static inline void mark_rodata_ro(void) { }
98extern void tc_init(void); 98extern void tc_init(void);
99#endif 99#endif
100 100
101enum system_states system_state; 101enum system_states system_state __read_mostly;
102EXPORT_SYMBOL(system_state); 102EXPORT_SYMBOL(system_state);
103 103
104/* 104/*
@@ -464,6 +464,7 @@ static noinline void __init_refok rest_init(void)
464 * at least once to get things moving: 464 * at least once to get things moving:
465 */ 465 */
466 init_idle_bootup_task(current); 466 init_idle_bootup_task(current);
467 rcu_scheduler_starting();
467 preempt_enable_no_resched(); 468 preempt_enable_no_resched();
468 schedule(); 469 schedule();
469 preempt_disable(); 470 preempt_disable();
diff --git a/kernel/fork.c b/kernel/fork.c
index 8de303bdd4e5..6715ebc3761d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1184,10 +1184,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1184#endif 1184#endif
1185 clear_all_latency_tracing(p); 1185 clear_all_latency_tracing(p);
1186 1186
1187 /* Our parent execution domain becomes current domain
1188 These must match for thread signalling to apply */
1189 p->parent_exec_id = p->self_exec_id;
1190
1191 /* ok, now we should be set up.. */ 1187 /* ok, now we should be set up.. */
1192 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); 1188 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1193 p->pdeath_signal = 0; 1189 p->pdeath_signal = 0;
@@ -1225,10 +1221,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1225 set_task_cpu(p, smp_processor_id()); 1221 set_task_cpu(p, smp_processor_id());
1226 1222
1227 /* CLONE_PARENT re-uses the old parent */ 1223 /* CLONE_PARENT re-uses the old parent */
1228 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) 1224 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1229 p->real_parent = current->real_parent; 1225 p->real_parent = current->real_parent;
1230 else 1226 p->parent_exec_id = current->parent_exec_id;
1227 } else {
1231 p->real_parent = current; 1228 p->real_parent = current;
1229 p->parent_exec_id = current->self_exec_id;
1230 }
1232 1231
1233 spin_lock(&current->sighand->siglock); 1232 spin_lock(&current->sighand->siglock);
1234 1233
diff --git a/kernel/module.c b/kernel/module.c
index ba22484a987e..f0e04d6b67d8 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -51,6 +51,7 @@
51#include <linux/tracepoint.h> 51#include <linux/tracepoint.h>
52#include <linux/ftrace.h> 52#include <linux/ftrace.h>
53#include <linux/async.h> 53#include <linux/async.h>
54#include <linux/percpu.h>
54 55
55#if 0 56#if 0
56#define DEBUGP printk 57#define DEBUGP printk
@@ -366,6 +367,34 @@ static struct module *find_module(const char *name)
366} 367}
367 368
368#ifdef CONFIG_SMP 369#ifdef CONFIG_SMP
370
371#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
372
373static void *percpu_modalloc(unsigned long size, unsigned long align,
374 const char *name)
375{
376 void *ptr;
377
378 if (align > PAGE_SIZE) {
379 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
380 name, align, PAGE_SIZE);
381 align = PAGE_SIZE;
382 }
383
384 ptr = __alloc_reserved_percpu(size, align);
385 if (!ptr)
386 printk(KERN_WARNING
387 "Could not allocate %lu bytes percpu data\n", size);
388 return ptr;
389}
390
391static void percpu_modfree(void *freeme)
392{
393 free_percpu(freeme);
394}
395
396#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
397
369/* Number of blocks used and allocated. */ 398/* Number of blocks used and allocated. */
370static unsigned int pcpu_num_used, pcpu_num_allocated; 399static unsigned int pcpu_num_used, pcpu_num_allocated;
371/* Size of each block. -ve means used. */ 400/* Size of each block. -ve means used. */
@@ -480,21 +509,6 @@ static void percpu_modfree(void *freeme)
480 } 509 }
481} 510}
482 511
483static unsigned int find_pcpusec(Elf_Ehdr *hdr,
484 Elf_Shdr *sechdrs,
485 const char *secstrings)
486{
487 return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
488}
489
490static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
491{
492 int cpu;
493
494 for_each_possible_cpu(cpu)
495 memcpy(pcpudest + per_cpu_offset(cpu), from, size);
496}
497
498static int percpu_modinit(void) 512static int percpu_modinit(void)
499{ 513{
500 pcpu_num_used = 2; 514 pcpu_num_used = 2;
@@ -513,7 +527,26 @@ static int percpu_modinit(void)
513 return 0; 527 return 0;
514} 528}
515__initcall(percpu_modinit); 529__initcall(percpu_modinit);
530
531#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
532
533static unsigned int find_pcpusec(Elf_Ehdr *hdr,
534 Elf_Shdr *sechdrs,
535 const char *secstrings)
536{
537 return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
538}
539
540static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
541{
542 int cpu;
543
544 for_each_possible_cpu(cpu)
545 memcpy(pcpudest + per_cpu_offset(cpu), from, size);
546}
547
516#else /* ... !CONFIG_SMP */ 548#else /* ... !CONFIG_SMP */
549
517static inline void *percpu_modalloc(unsigned long size, unsigned long align, 550static inline void *percpu_modalloc(unsigned long size, unsigned long align,
518 const char *name) 551 const char *name)
519{ 552{
@@ -535,6 +568,7 @@ static inline void percpu_modcopy(void *pcpudst, const void *src,
535 /* pcpusec should be 0, and size of that section should be 0. */ 568 /* pcpusec should be 0, and size of that section should be 0. */
536 BUG_ON(size != 0); 569 BUG_ON(size != 0);
537} 570}
571
538#endif /* CONFIG_SMP */ 572#endif /* CONFIG_SMP */
539 573
540#define MODINFO_ATTR(field) \ 574#define MODINFO_ATTR(field) \
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index bd5a9003497c..654c640a6b9c 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -679,8 +679,8 @@ int rcu_needs_cpu(int cpu)
679void rcu_check_callbacks(int cpu, int user) 679void rcu_check_callbacks(int cpu, int user)
680{ 680{
681 if (user || 681 if (user ||
682 (idle_cpu(cpu) && !in_softirq() && 682 (idle_cpu(cpu) && rcu_scheduler_active &&
683 hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 683 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
684 684
685 /* 685 /*
686 * Get here if this CPU took its interrupt from user 686 * Get here if this CPU took its interrupt from user
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index d92a76a881aa..cae8a059cf47 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -44,6 +44,7 @@
44#include <linux/cpu.h> 44#include <linux/cpu.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/kernel_stat.h>
47 48
48enum rcu_barrier { 49enum rcu_barrier {
49 RCU_BARRIER_STD, 50 RCU_BARRIER_STD,
@@ -55,6 +56,7 @@ static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
55static atomic_t rcu_barrier_cpu_count; 56static atomic_t rcu_barrier_cpu_count;
56static DEFINE_MUTEX(rcu_barrier_mutex); 57static DEFINE_MUTEX(rcu_barrier_mutex);
57static struct completion rcu_barrier_completion; 58static struct completion rcu_barrier_completion;
59int rcu_scheduler_active __read_mostly;
58 60
59/* 61/*
60 * Awaken the corresponding synchronize_rcu() instance now that a 62 * Awaken the corresponding synchronize_rcu() instance now that a
@@ -80,6 +82,10 @@ void wakeme_after_rcu(struct rcu_head *head)
80void synchronize_rcu(void) 82void synchronize_rcu(void)
81{ 83{
82 struct rcu_synchronize rcu; 84 struct rcu_synchronize rcu;
85
86 if (rcu_blocking_is_gp())
87 return;
88
83 init_completion(&rcu.completion); 89 init_completion(&rcu.completion);
84 /* Will wake me after RCU finished. */ 90 /* Will wake me after RCU finished. */
85 call_rcu(&rcu.head, wakeme_after_rcu); 91 call_rcu(&rcu.head, wakeme_after_rcu);
@@ -175,3 +181,9 @@ void __init rcu_init(void)
175 __rcu_init(); 181 __rcu_init();
176} 182}
177 183
184void rcu_scheduler_starting(void)
185{
186 WARN_ON(num_online_cpus() != 1);
187 WARN_ON(nr_context_switches() > 0);
188 rcu_scheduler_active = 1;
189}
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 33cfc50781f9..5d59e850fb71 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -1181,6 +1181,9 @@ void __synchronize_sched(void)
1181{ 1181{
1182 struct rcu_synchronize rcu; 1182 struct rcu_synchronize rcu;
1183 1183
1184 if (num_online_cpus() == 1)
1185 return; /* blocking is gp if only one CPU! */
1186
1184 init_completion(&rcu.completion); 1187 init_completion(&rcu.completion);
1185 /* Will wake me after RCU finished. */ 1188 /* Will wake me after RCU finished. */
1186 call_rcu_sched(&rcu.head, wakeme_after_rcu); 1189 call_rcu_sched(&rcu.head, wakeme_after_rcu);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index b2fd602a6f6f..97ce31579ec0 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -948,8 +948,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
948void rcu_check_callbacks(int cpu, int user) 948void rcu_check_callbacks(int cpu, int user)
949{ 949{
950 if (user || 950 if (user ||
951 (idle_cpu(cpu) && !in_softirq() && 951 (idle_cpu(cpu) && rcu_scheduler_active &&
952 hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 952 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
953 953
954 /* 954 /*
955 * Get here if this CPU took its interrupt from user 955 * Get here if this CPU took its interrupt from user
diff --git a/kernel/sched.c b/kernel/sched.c
index 7d97ff7c4478..0a76d0b6f215 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
223{ 223{
224 ktime_t now; 224 ktime_t now;
225 225
226 if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF) 226 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
227 return; 227 return;
228 228
229 if (hrtimer_active(&rt_b->rt_period_timer)) 229 if (hrtimer_active(&rt_b->rt_period_timer))
@@ -9219,6 +9219,16 @@ static int sched_rt_global_constraints(void)
9219 9219
9220 return ret; 9220 return ret;
9221} 9221}
9222
9223int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
9224{
9225 /* Don't accept realtime tasks when there is no way for them to run */
9226 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
9227 return 0;
9228
9229 return 1;
9230}
9231
9222#else /* !CONFIG_RT_GROUP_SCHED */ 9232#else /* !CONFIG_RT_GROUP_SCHED */
9223static int sched_rt_global_constraints(void) 9233static int sched_rt_global_constraints(void)
9224{ 9234{
@@ -9312,8 +9322,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
9312 struct task_struct *tsk) 9322 struct task_struct *tsk)
9313{ 9323{
9314#ifdef CONFIG_RT_GROUP_SCHED 9324#ifdef CONFIG_RT_GROUP_SCHED
9315 /* Don't accept realtime tasks when there is no way for them to run */ 9325 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
9316 if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
9317 return -EINVAL; 9326 return -EINVAL;
9318#else 9327#else
9319 /* We don't support RT-tasks being in separate groups */ 9328 /* We don't support RT-tasks being in separate groups */
@@ -9476,7 +9485,7 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
9476 9485
9477static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) 9486static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
9478{ 9487{
9479 u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); 9488 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9480 u64 data; 9489 u64 data;
9481 9490
9482#ifndef CONFIG_64BIT 9491#ifndef CONFIG_64BIT
@@ -9495,7 +9504,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
9495 9504
9496static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) 9505static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
9497{ 9506{
9498 u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); 9507 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9499 9508
9500#ifndef CONFIG_64BIT 9509#ifndef CONFIG_64BIT
9501 /* 9510 /*
@@ -9591,7 +9600,7 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9591 ca = task_ca(tsk); 9600 ca = task_ca(tsk);
9592 9601
9593 for (; ca; ca = ca->parent) { 9602 for (; ca; ca = ca->parent) {
9594 u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); 9603 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9595 *cpuusage += cputime; 9604 *cpuusage += cputime;
9596 } 9605 }
9597} 9606}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 0365b4899a3d..57d3f67f6f38 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -626,6 +626,7 @@ static int ksoftirqd(void * __bind_cpu)
626 preempt_enable_no_resched(); 626 preempt_enable_no_resched();
627 cond_resched(); 627 cond_resched();
628 preempt_disable(); 628 preempt_disable();
629 rcu_qsctr_inc((long)__bind_cpu);
629 } 630 }
630 preempt_enable(); 631 preempt_enable();
631 set_current_state(TASK_INTERRUPTIBLE); 632 set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 0cd415ee62a2..74541ca49536 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -170,7 +170,7 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
170 * doesn't hit this CPU until we're ready. */ 170 * doesn't hit this CPU until we're ready. */
171 get_cpu(); 171 get_cpu();
172 for_each_online_cpu(i) { 172 for_each_online_cpu(i) {
173 sm_work = percpu_ptr(stop_machine_work, i); 173 sm_work = per_cpu_ptr(stop_machine_work, i);
174 INIT_WORK(sm_work, stop_cpu); 174 INIT_WORK(sm_work, stop_cpu);
175 queue_work_on(i, stop_machine_wq, sm_work); 175 queue_work_on(i, stop_machine_wq, sm_work);
176 } 176 }
diff --git a/kernel/sys.c b/kernel/sys.c
index f145c415bc16..37f458e6882a 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -559,7 +559,7 @@ error:
559 abort_creds(new); 559 abort_creds(new);
560 return retval; 560 return retval;
561} 561}
562 562
563/* 563/*
564 * change the user struct in a credentials set to match the new UID 564 * change the user struct in a credentials set to match the new UID
565 */ 565 */
@@ -571,6 +571,11 @@ static int set_user(struct cred *new)
571 if (!new_user) 571 if (!new_user)
572 return -EAGAIN; 572 return -EAGAIN;
573 573
574 if (!task_can_switch_user(new_user, current)) {
575 free_uid(new_user);
576 return -EINVAL;
577 }
578
574 if (atomic_read(&new_user->processes) >= 579 if (atomic_read(&new_user->processes) >=
575 current->signal->rlim[RLIMIT_NPROC].rlim_cur && 580 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
576 new_user != INIT_USER) { 581 new_user != INIT_USER) {
@@ -631,10 +636,11 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
631 goto error; 636 goto error;
632 } 637 }
633 638
634 retval = -EAGAIN; 639 if (new->uid != old->uid) {
635 if (new->uid != old->uid && set_user(new) < 0) 640 retval = set_user(new);
636 goto error; 641 if (retval < 0)
637 642 goto error;
643 }
638 if (ruid != (uid_t) -1 || 644 if (ruid != (uid_t) -1 ||
639 (euid != (uid_t) -1 && euid != old->uid)) 645 (euid != (uid_t) -1 && euid != old->uid))
640 new->suid = new->euid; 646 new->suid = new->euid;
@@ -680,9 +686,10 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
680 retval = -EPERM; 686 retval = -EPERM;
681 if (capable(CAP_SETUID)) { 687 if (capable(CAP_SETUID)) {
682 new->suid = new->uid = uid; 688 new->suid = new->uid = uid;
683 if (uid != old->uid && set_user(new) < 0) { 689 if (uid != old->uid) {
684 retval = -EAGAIN; 690 retval = set_user(new);
685 goto error; 691 if (retval < 0)
692 goto error;
686 } 693 }
687 } else if (uid != old->uid && uid != new->suid) { 694 } else if (uid != old->uid && uid != new->suid) {
688 goto error; 695 goto error;
@@ -734,11 +741,13 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
734 goto error; 741 goto error;
735 } 742 }
736 743
737 retval = -EAGAIN;
738 if (ruid != (uid_t) -1) { 744 if (ruid != (uid_t) -1) {
739 new->uid = ruid; 745 new->uid = ruid;
740 if (ruid != old->uid && set_user(new) < 0) 746 if (ruid != old->uid) {
741 goto error; 747 retval = set_user(new);
748 if (retval < 0)
749 goto error;
750 }
742 } 751 }
743 if (euid != (uid_t) -1) 752 if (euid != (uid_t) -1)
744 new->euid = euid; 753 new->euid = euid;
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 43f891b05a4b..00d59d048edf 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -122,8 +122,10 @@ void acct_update_integrals(struct task_struct *tsk)
122 if (likely(tsk->mm)) { 122 if (likely(tsk->mm)) {
123 cputime_t time, dtime; 123 cputime_t time, dtime;
124 struct timeval value; 124 struct timeval value;
125 unsigned long flags;
125 u64 delta; 126 u64 delta;
126 127
128 local_irq_save(flags);
127 time = tsk->stime + tsk->utime; 129 time = tsk->stime + tsk->utime;
128 dtime = cputime_sub(time, tsk->acct_timexpd); 130 dtime = cputime_sub(time, tsk->acct_timexpd);
129 jiffies_to_timeval(cputime_to_jiffies(dtime), &value); 131 jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
@@ -131,10 +133,12 @@ void acct_update_integrals(struct task_struct *tsk)
131 delta = delta * USEC_PER_SEC + value.tv_usec; 133 delta = delta * USEC_PER_SEC + value.tv_usec;
132 134
133 if (delta == 0) 135 if (delta == 0)
134 return; 136 goto out;
135 tsk->acct_timexpd = time; 137 tsk->acct_timexpd = time;
136 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm); 138 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm);
137 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; 139 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm;
140 out:
141 local_irq_restore(flags);
138 } 142 }
139} 143}
140 144
diff --git a/kernel/user.c b/kernel/user.c
index 3551ac742395..fbb300e6191f 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -286,14 +286,12 @@ int __init uids_sysfs_init(void)
286/* work function to remove sysfs directory for a user and free up 286/* work function to remove sysfs directory for a user and free up
287 * corresponding structures. 287 * corresponding structures.
288 */ 288 */
289static void remove_user_sysfs_dir(struct work_struct *w) 289static void cleanup_user_struct(struct work_struct *w)
290{ 290{
291 struct user_struct *up = container_of(w, struct user_struct, work); 291 struct user_struct *up = container_of(w, struct user_struct, work);
292 unsigned long flags; 292 unsigned long flags;
293 int remove_user = 0; 293 int remove_user = 0;
294 294
295 if (up->user_ns != &init_user_ns)
296 return;
297 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() 295 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
298 * atomic. 296 * atomic.
299 */ 297 */
@@ -312,9 +310,11 @@ static void remove_user_sysfs_dir(struct work_struct *w)
312 if (!remove_user) 310 if (!remove_user)
313 goto done; 311 goto done;
314 312
315 kobject_uevent(&up->kobj, KOBJ_REMOVE); 313 if (up->user_ns == &init_user_ns) {
316 kobject_del(&up->kobj); 314 kobject_uevent(&up->kobj, KOBJ_REMOVE);
317 kobject_put(&up->kobj); 315 kobject_del(&up->kobj);
316 kobject_put(&up->kobj);
317 }
318 318
319 sched_destroy_user(up); 319 sched_destroy_user(up);
320 key_put(up->uid_keyring); 320 key_put(up->uid_keyring);
@@ -335,7 +335,7 @@ static void free_user(struct user_struct *up, unsigned long flags)
335 atomic_inc(&up->__count); 335 atomic_inc(&up->__count);
336 spin_unlock_irqrestore(&uidhash_lock, flags); 336 spin_unlock_irqrestore(&uidhash_lock, flags);
337 337
338 INIT_WORK(&up->work, remove_user_sysfs_dir); 338 INIT_WORK(&up->work, cleanup_user_struct);
339 schedule_work(&up->work); 339 schedule_work(&up->work);
340} 340}
341 341
@@ -362,6 +362,24 @@ static void free_user(struct user_struct *up, unsigned long flags)
362 362
363#endif 363#endif
364 364
365#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
366/*
367 * We need to check if a setuid can take place. This function should be called
368 * before successfully completing the setuid.
369 */
370int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
371{
372
373 return sched_rt_can_attach(up->tg, tsk);
374
375}
376#else
377int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
378{
379 return 1;
380}
381#endif
382
365/* 383/*
366 * Locate the user_struct for the passed UID. If found, take a ref on it. The 384 * Locate the user_struct for the passed UID. If found, take a ref on it. The
367 * caller must undo that ref with free_uid(). 385 * caller must undo that ref with free_uid().
diff --git a/lib/idr.c b/lib/idr.c
index c11c5765cdef..dab4bca86f5d 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -449,6 +449,7 @@ void idr_remove_all(struct idr *idp)
449 449
450 n = idp->layers * IDR_BITS; 450 n = idp->layers * IDR_BITS;
451 p = idp->top; 451 p = idp->top;
452 rcu_assign_pointer(idp->top, NULL);
452 max = 1 << n; 453 max = 1 << n;
453 454
454 id = 0; 455 id = 0;
@@ -467,7 +468,6 @@ void idr_remove_all(struct idr *idp)
467 p = *--paa; 468 p = *--paa;
468 } 469 }
469 } 470 }
470 rcu_assign_pointer(idp->top, NULL);
471 idp->layers = 0; 471 idp->layers = 0;
472} 472}
473EXPORT_SYMBOL(idr_remove_all); 473EXPORT_SYMBOL(idr_remove_all);
diff --git a/mm/Makefile b/mm/Makefile
index 72255be57f89..818569b68f46 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -30,6 +30,10 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
30obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 30obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
31obj-$(CONFIG_FS_XIP) += filemap_xip.o 31obj-$(CONFIG_FS_XIP) += filemap_xip.o
32obj-$(CONFIG_MIGRATION) += migrate.o 32obj-$(CONFIG_MIGRATION) += migrate.o
33ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
34obj-$(CONFIG_SMP) += percpu.o
35else
33obj-$(CONFIG_SMP) += allocpercpu.o 36obj-$(CONFIG_SMP) += allocpercpu.o
37endif
34obj-$(CONFIG_QUICKLIST) += quicklist.o 38obj-$(CONFIG_QUICKLIST) += quicklist.o
35obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o 39obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index 4297bc41bfd2..3653c570232b 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -99,45 +99,51 @@ static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
99 __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) 99 __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
100 100
101/** 101/**
102 * percpu_alloc_mask - initial setup of per-cpu data 102 * alloc_percpu - initial setup of per-cpu data
103 * @size: size of per-cpu object 103 * @size: size of per-cpu object
104 * @gfp: may sleep or not etc. 104 * @align: alignment
105 * @mask: populate per-data for cpu's selected through mask bits
106 * 105 *
107 * Populating per-cpu data for all online cpu's would be a typical use case, 106 * Allocate dynamic percpu area. Percpu objects are populated with
108 * which is simplified by the percpu_alloc() wrapper. 107 * zeroed buffers.
109 * Per-cpu objects are populated with zeroed buffers.
110 */ 108 */
111void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) 109void *__alloc_percpu(size_t size, size_t align)
112{ 110{
113 /* 111 /*
114 * We allocate whole cache lines to avoid false sharing 112 * We allocate whole cache lines to avoid false sharing
115 */ 113 */
116 size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size()); 114 size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
117 void *pdata = kzalloc(sz, gfp); 115 void *pdata = kzalloc(sz, GFP_KERNEL);
118 void *__pdata = __percpu_disguise(pdata); 116 void *__pdata = __percpu_disguise(pdata);
119 117
118 /*
119 * Can't easily make larger alignment work with kmalloc. WARN
120 * on it. Larger alignment should only be used for module
121 * percpu sections on SMP for which this path isn't used.
122 */
123 WARN_ON_ONCE(align > __alignof__(unsigned long long));
124
120 if (unlikely(!pdata)) 125 if (unlikely(!pdata))
121 return NULL; 126 return NULL;
122 if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask))) 127 if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
128 &cpu_possible_map)))
123 return __pdata; 129 return __pdata;
124 kfree(pdata); 130 kfree(pdata);
125 return NULL; 131 return NULL;
126} 132}
127EXPORT_SYMBOL_GPL(__percpu_alloc_mask); 133EXPORT_SYMBOL_GPL(__alloc_percpu);
128 134
129/** 135/**
130 * percpu_free - final cleanup of per-cpu data 136 * free_percpu - final cleanup of per-cpu data
131 * @__pdata: object to clean up 137 * @__pdata: object to clean up
132 * 138 *
133 * We simply clean up any per-cpu object left. No need for the client to 139 * We simply clean up any per-cpu object left. No need for the client to
134 * track and specify through a bis mask which per-cpu objects are to free. 140 * track and specify through a bis mask which per-cpu objects are to free.
135 */ 141 */
136void percpu_free(void *__pdata) 142void free_percpu(void *__pdata)
137{ 143{
138 if (unlikely(!__pdata)) 144 if (unlikely(!__pdata))
139 return; 145 return;
140 __percpu_depopulate_mask(__pdata, &cpu_possible_map); 146 __percpu_depopulate_mask(__pdata, &cpu_possible_map);
141 kfree(__percpu_disguise(__pdata)); 147 kfree(__percpu_disguise(__pdata));
142} 148}
143EXPORT_SYMBOL_GPL(percpu_free); 149EXPORT_SYMBOL_GPL(free_percpu);
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 51a0ccf61e0e..daf92713f7de 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -382,7 +382,6 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
382 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); 382 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
383} 383}
384 384
385#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
386/** 385/**
387 * reserve_bootmem - mark a page range as usable 386 * reserve_bootmem - mark a page range as usable
388 * @addr: starting address of the range 387 * @addr: starting address of the range
@@ -403,7 +402,6 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
403 402
404 return mark_bootmem(start, end, 1, flags); 403 return mark_bootmem(start, end, 1, flags);
405} 404}
406#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
407 405
408static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx, 406static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx,
409 unsigned long step) 407 unsigned long step)
@@ -429,8 +427,8 @@ static unsigned long align_off(struct bootmem_data *bdata, unsigned long off,
429} 427}
430 428
431static void * __init alloc_bootmem_core(struct bootmem_data *bdata, 429static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
432 unsigned long size, unsigned long align, 430 unsigned long size, unsigned long align,
433 unsigned long goal, unsigned long limit) 431 unsigned long goal, unsigned long limit)
434{ 432{
435 unsigned long fallback = 0; 433 unsigned long fallback = 0;
436 unsigned long min, max, start, sidx, midx, step; 434 unsigned long min, max, start, sidx, midx, step;
@@ -530,17 +528,34 @@ find_block:
530 return NULL; 528 return NULL;
531} 529}
532 530
531static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
532 unsigned long size, unsigned long align,
533 unsigned long goal, unsigned long limit)
534{
535#ifdef CONFIG_HAVE_ARCH_BOOTMEM
536 bootmem_data_t *p_bdata;
537
538 p_bdata = bootmem_arch_preferred_node(bdata, size, align, goal, limit);
539 if (p_bdata)
540 return alloc_bootmem_core(p_bdata, size, align, goal, limit);
541#endif
542 return NULL;
543}
544
533static void * __init ___alloc_bootmem_nopanic(unsigned long size, 545static void * __init ___alloc_bootmem_nopanic(unsigned long size,
534 unsigned long align, 546 unsigned long align,
535 unsigned long goal, 547 unsigned long goal,
536 unsigned long limit) 548 unsigned long limit)
537{ 549{
538 bootmem_data_t *bdata; 550 bootmem_data_t *bdata;
551 void *region;
539 552
540restart: 553restart:
541 list_for_each_entry(bdata, &bdata_list, list) { 554 region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
542 void *region; 555 if (region)
556 return region;
543 557
558 list_for_each_entry(bdata, &bdata_list, list) {
544 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal)) 559 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
545 continue; 560 continue;
546 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit)) 561 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
@@ -618,6 +633,10 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
618{ 633{
619 void *ptr; 634 void *ptr;
620 635
636 ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
637 if (ptr)
638 return ptr;
639
621 ptr = alloc_bootmem_core(bdata, size, align, goal, limit); 640 ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
622 if (ptr) 641 if (ptr)
623 return ptr; 642 return ptr;
@@ -674,6 +693,10 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
674{ 693{
675 void *ptr; 694 void *ptr;
676 695
696 ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
697 if (ptr)
698 return ptr;
699
677 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); 700 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
678 if (ptr) 701 if (ptr)
679 return ptr; 702 return ptr;
diff --git a/mm/percpu.c b/mm/percpu.c
new file mode 100644
index 000000000000..bfe6a3afaf45
--- /dev/null
+++ b/mm/percpu.c
@@ -0,0 +1,1226 @@
1/*
2 * linux/mm/percpu.c - percpu memory allocator
3 *
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each
11 * chunk is consisted of num_possible_cpus() units and the first chunk
12 * is used for static percpu variables in the kernel image (special
13 * boot time alloc/init handling necessary as these areas need to be
14 * brought up before allocation services are running). Unit grows as
15 * necessary and all units grow or shrink in unison. When a chunk is
16 * filled up, another chunk is allocated. ie. in vmalloc area
17 *
18 * c0 c1 c2
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
22 *
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
26 * percpu base registers UNIT_SIZE apart.
27 *
28 * There are usually many small percpu allocations many of them as
29 * small as 4 bytes. The allocator organizes chunks into lists
30 * according to free size and tries to allocate from the fullest one.
31 * Each chunk keeps the maximum contiguous area size hint which is
32 * guaranteed to be eqaul to or larger than the maximum contiguous
33 * area in the chunk. This helps the allocator not to iterate the
34 * chunk maps unnecessarily.
35 *
36 * Allocation state in each chunk is kept using an array of integers
37 * on chunk->map. A positive value in the map represents a free
38 * region and negative allocated. Allocation inside a chunk is done
39 * by scanning this map sequentially and serving the first matching
40 * entry. This is mostly copied from the percpu_modalloc() allocator.
41 * Chunks are also linked into a rb tree to ease address to chunk
42 * mapping during free.
43 *
44 * To use this allocator, arch code should do the followings.
45 *
46 * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
47 *
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back
50 *
51 * - use pcpu_setup_first_chunk() during percpu area initialization to
52 * setup the first chunk containing the kernel static percpu area
53 */
54
55#include <linux/bitmap.h>
56#include <linux/bootmem.h>
57#include <linux/list.h>
58#include <linux/mm.h>
59#include <linux/module.h>
60#include <linux/mutex.h>
61#include <linux/percpu.h>
62#include <linux/pfn.h>
63#include <linux/rbtree.h>
64#include <linux/slab.h>
65#include <linux/spinlock.h>
66#include <linux/vmalloc.h>
67#include <linux/workqueue.h>
68
69#include <asm/cacheflush.h>
70#include <asm/tlbflush.h>
71
72#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
73#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
74
75struct pcpu_chunk {
76 struct list_head list; /* linked to pcpu_slot lists */
77 struct rb_node rb_node; /* key is chunk->vm->addr */
78 int free_size; /* free bytes in the chunk */
79 int contig_hint; /* max contiguous size hint */
80 struct vm_struct *vm; /* mapped vmalloc region */
81 int map_used; /* # of map entries used */
82 int map_alloc; /* # of map entries allocated */
83 int *map; /* allocation map */
84 bool immutable; /* no [de]population allowed */
85 struct page **page; /* points to page array */
86 struct page *page_ar[]; /* #cpus * UNIT_PAGES */
87};
88
89static int pcpu_unit_pages __read_mostly;
90static int pcpu_unit_size __read_mostly;
91static int pcpu_chunk_size __read_mostly;
92static int pcpu_nr_slots __read_mostly;
93static size_t pcpu_chunk_struct_size __read_mostly;
94
95/* the address of the first chunk which starts with the kernel static area */
96void *pcpu_base_addr __read_mostly;
97EXPORT_SYMBOL_GPL(pcpu_base_addr);
98
99/* optional reserved chunk, only accessible for reserved allocations */
100static struct pcpu_chunk *pcpu_reserved_chunk;
101/* offset limit of the reserved chunk */
102static int pcpu_reserved_chunk_limit;
103
104/*
105 * Synchronization rules.
106 *
107 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
108 * protects allocation/reclaim paths, chunks and chunk->page arrays.
109 * The latter is a spinlock and protects the index data structures -
110 * chunk slots, rbtree, chunks and area maps in chunks.
111 *
112 * During allocation, pcpu_alloc_mutex is kept locked all the time and
113 * pcpu_lock is grabbed and released as necessary. All actual memory
114 * allocations are done using GFP_KERNEL with pcpu_lock released.
115 *
116 * Free path accesses and alters only the index data structures, so it
117 * can be safely called from atomic context. When memory needs to be
118 * returned to the system, free path schedules reclaim_work which
119 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
120 * reclaimed, release both locks and frees the chunks. Note that it's
121 * necessary to grab both locks to remove a chunk from circulation as
122 * allocation path might be referencing the chunk with only
123 * pcpu_alloc_mutex locked.
124 */
125static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
126static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
127
128static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
129static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */
130
131/* reclaim work to release fully free chunks, scheduled from free path */
132static void pcpu_reclaim(struct work_struct *work);
133static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
134
135static int __pcpu_size_to_slot(int size)
136{
137 int highbit = fls(size); /* size is in bytes */
138 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
139}
140
141static int pcpu_size_to_slot(int size)
142{
143 if (size == pcpu_unit_size)
144 return pcpu_nr_slots - 1;
145 return __pcpu_size_to_slot(size);
146}
147
148static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
149{
150 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
151 return 0;
152
153 return pcpu_size_to_slot(chunk->free_size);
154}
155
156static int pcpu_page_idx(unsigned int cpu, int page_idx)
157{
158 return cpu * pcpu_unit_pages + page_idx;
159}
160
161static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk,
162 unsigned int cpu, int page_idx)
163{
164 return &chunk->page[pcpu_page_idx(cpu, page_idx)];
165}
166
167static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
168 unsigned int cpu, int page_idx)
169{
170 return (unsigned long)chunk->vm->addr +
171 (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT);
172}
173
174static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
175 int page_idx)
176{
177 return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL;
178}
179
180/**
181 * pcpu_mem_alloc - allocate memory
182 * @size: bytes to allocate
183 *
184 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
185 * kzalloc() is used; otherwise, vmalloc() is used. The returned
186 * memory is always zeroed.
187 *
188 * CONTEXT:
189 * Does GFP_KERNEL allocation.
190 *
191 * RETURNS:
192 * Pointer to the allocated area on success, NULL on failure.
193 */
194static void *pcpu_mem_alloc(size_t size)
195{
196 if (size <= PAGE_SIZE)
197 return kzalloc(size, GFP_KERNEL);
198 else {
199 void *ptr = vmalloc(size);
200 if (ptr)
201 memset(ptr, 0, size);
202 return ptr;
203 }
204}
205
206/**
207 * pcpu_mem_free - free memory
208 * @ptr: memory to free
209 * @size: size of the area
210 *
211 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
212 */
213static void pcpu_mem_free(void *ptr, size_t size)
214{
215 if (size <= PAGE_SIZE)
216 kfree(ptr);
217 else
218 vfree(ptr);
219}
220
221/**
222 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
223 * @chunk: chunk of interest
224 * @oslot: the previous slot it was on
225 *
226 * This function is called after an allocation or free changed @chunk.
227 * New slot according to the changed state is determined and @chunk is
228 * moved to the slot. Note that the reserved chunk is never put on
229 * chunk slots.
230 *
231 * CONTEXT:
232 * pcpu_lock.
233 */
234static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
235{
236 int nslot = pcpu_chunk_slot(chunk);
237
238 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
239 if (oslot < nslot)
240 list_move(&chunk->list, &pcpu_slot[nslot]);
241 else
242 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
243 }
244}
245
246static struct rb_node **pcpu_chunk_rb_search(void *addr,
247 struct rb_node **parentp)
248{
249 struct rb_node **p = &pcpu_addr_root.rb_node;
250 struct rb_node *parent = NULL;
251 struct pcpu_chunk *chunk;
252
253 while (*p) {
254 parent = *p;
255 chunk = rb_entry(parent, struct pcpu_chunk, rb_node);
256
257 if (addr < chunk->vm->addr)
258 p = &(*p)->rb_left;
259 else if (addr > chunk->vm->addr)
260 p = &(*p)->rb_right;
261 else
262 break;
263 }
264
265 if (parentp)
266 *parentp = parent;
267 return p;
268}
269
270/**
271 * pcpu_chunk_addr_search - search for chunk containing specified address
272 * @addr: address to search for
273 *
274 * Look for chunk which might contain @addr. More specifically, it
275 * searchs for the chunk with the highest start address which isn't
276 * beyond @addr.
277 *
278 * CONTEXT:
279 * pcpu_lock.
280 *
281 * RETURNS:
282 * The address of the found chunk.
283 */
284static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
285{
286 struct rb_node *n, *parent;
287 struct pcpu_chunk *chunk;
288
289 /* is it in the reserved chunk? */
290 if (pcpu_reserved_chunk) {
291 void *start = pcpu_reserved_chunk->vm->addr;
292
293 if (addr >= start && addr < start + pcpu_reserved_chunk_limit)
294 return pcpu_reserved_chunk;
295 }
296
297 /* nah... search the regular ones */
298 n = *pcpu_chunk_rb_search(addr, &parent);
299 if (!n) {
300 /* no exactly matching chunk, the parent is the closest */
301 n = parent;
302 BUG_ON(!n);
303 }
304 chunk = rb_entry(n, struct pcpu_chunk, rb_node);
305
306 if (addr < chunk->vm->addr) {
307 /* the parent was the next one, look for the previous one */
308 n = rb_prev(n);
309 BUG_ON(!n);
310 chunk = rb_entry(n, struct pcpu_chunk, rb_node);
311 }
312
313 return chunk;
314}
315
316/**
317 * pcpu_chunk_addr_insert - insert chunk into address rb tree
318 * @new: chunk to insert
319 *
320 * Insert @new into address rb tree.
321 *
322 * CONTEXT:
323 * pcpu_lock.
324 */
325static void pcpu_chunk_addr_insert(struct pcpu_chunk *new)
326{
327 struct rb_node **p, *parent;
328
329 p = pcpu_chunk_rb_search(new->vm->addr, &parent);
330 BUG_ON(*p);
331 rb_link_node(&new->rb_node, parent, p);
332 rb_insert_color(&new->rb_node, &pcpu_addr_root);
333}
334
335/**
336 * pcpu_extend_area_map - extend area map for allocation
337 * @chunk: target chunk
338 *
339 * Extend area map of @chunk so that it can accomodate an allocation.
340 * A single allocation can split an area into three areas, so this
341 * function makes sure that @chunk->map has at least two extra slots.
342 *
343 * CONTEXT:
344 * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired
345 * if area map is extended.
346 *
347 * RETURNS:
348 * 0 if noop, 1 if successfully extended, -errno on failure.
349 */
350static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
351{
352 int new_alloc;
353 int *new;
354 size_t size;
355
356 /* has enough? */
357 if (chunk->map_alloc >= chunk->map_used + 2)
358 return 0;
359
360 spin_unlock_irq(&pcpu_lock);
361
362 new_alloc = PCPU_DFL_MAP_ALLOC;
363 while (new_alloc < chunk->map_used + 2)
364 new_alloc *= 2;
365
366 new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
367 if (!new) {
368 spin_lock_irq(&pcpu_lock);
369 return -ENOMEM;
370 }
371
372 /*
373 * Acquire pcpu_lock and switch to new area map. Only free
374 * could have happened inbetween, so map_used couldn't have
375 * grown.
376 */
377 spin_lock_irq(&pcpu_lock);
378 BUG_ON(new_alloc < chunk->map_used + 2);
379
380 size = chunk->map_alloc * sizeof(chunk->map[0]);
381 memcpy(new, chunk->map, size);
382
383 /*
384 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
385 * one of the first chunks and still using static map.
386 */
387 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
388 pcpu_mem_free(chunk->map, size);
389
390 chunk->map_alloc = new_alloc;
391 chunk->map = new;
392 return 0;
393}
394
395/**
396 * pcpu_split_block - split a map block
397 * @chunk: chunk of interest
398 * @i: index of map block to split
399 * @head: head size in bytes (can be 0)
400 * @tail: tail size in bytes (can be 0)
401 *
402 * Split the @i'th map block into two or three blocks. If @head is
403 * non-zero, @head bytes block is inserted before block @i moving it
404 * to @i+1 and reducing its size by @head bytes.
405 *
406 * If @tail is non-zero, the target block, which can be @i or @i+1
407 * depending on @head, is reduced by @tail bytes and @tail byte block
408 * is inserted after the target block.
409 *
410 * @chunk->map must have enough free slots to accomodate the split.
411 *
412 * CONTEXT:
413 * pcpu_lock.
414 */
415static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
416 int head, int tail)
417{
418 int nr_extra = !!head + !!tail;
419
420 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
421
422 /* insert new subblocks */
423 memmove(&chunk->map[i + nr_extra], &chunk->map[i],
424 sizeof(chunk->map[0]) * (chunk->map_used - i));
425 chunk->map_used += nr_extra;
426
427 if (head) {
428 chunk->map[i + 1] = chunk->map[i] - head;
429 chunk->map[i++] = head;
430 }
431 if (tail) {
432 chunk->map[i++] -= tail;
433 chunk->map[i] = tail;
434 }
435}
436
437/**
438 * pcpu_alloc_area - allocate area from a pcpu_chunk
439 * @chunk: chunk of interest
440 * @size: wanted size in bytes
441 * @align: wanted align
442 *
443 * Try to allocate @size bytes area aligned at @align from @chunk.
444 * Note that this function only allocates the offset. It doesn't
445 * populate or map the area.
446 *
447 * @chunk->map must have at least two free slots.
448 *
449 * CONTEXT:
450 * pcpu_lock.
451 *
452 * RETURNS:
453 * Allocated offset in @chunk on success, -1 if no matching area is
454 * found.
455 */
456static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
457{
458 int oslot = pcpu_chunk_slot(chunk);
459 int max_contig = 0;
460 int i, off;
461
462 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
463 bool is_last = i + 1 == chunk->map_used;
464 int head, tail;
465
466 /* extra for alignment requirement */
467 head = ALIGN(off, align) - off;
468 BUG_ON(i == 0 && head != 0);
469
470 if (chunk->map[i] < 0)
471 continue;
472 if (chunk->map[i] < head + size) {
473 max_contig = max(chunk->map[i], max_contig);
474 continue;
475 }
476
477 /*
478 * If head is small or the previous block is free,
479 * merge'em. Note that 'small' is defined as smaller
480 * than sizeof(int), which is very small but isn't too
481 * uncommon for percpu allocations.
482 */
483 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
484 if (chunk->map[i - 1] > 0)
485 chunk->map[i - 1] += head;
486 else {
487 chunk->map[i - 1] -= head;
488 chunk->free_size -= head;
489 }
490 chunk->map[i] -= head;
491 off += head;
492 head = 0;
493 }
494
495 /* if tail is small, just keep it around */
496 tail = chunk->map[i] - head - size;
497 if (tail < sizeof(int))
498 tail = 0;
499
500 /* split if warranted */
501 if (head || tail) {
502 pcpu_split_block(chunk, i, head, tail);
503 if (head) {
504 i++;
505 off += head;
506 max_contig = max(chunk->map[i - 1], max_contig);
507 }
508 if (tail)
509 max_contig = max(chunk->map[i + 1], max_contig);
510 }
511
512 /* update hint and mark allocated */
513 if (is_last)
514 chunk->contig_hint = max_contig; /* fully scanned */
515 else
516 chunk->contig_hint = max(chunk->contig_hint,
517 max_contig);
518
519 chunk->free_size -= chunk->map[i];
520 chunk->map[i] = -chunk->map[i];
521
522 pcpu_chunk_relocate(chunk, oslot);
523 return off;
524 }
525
526 chunk->contig_hint = max_contig; /* fully scanned */
527 pcpu_chunk_relocate(chunk, oslot);
528
529 /* tell the upper layer that this chunk has no matching area */
530 return -1;
531}
532
533/**
534 * pcpu_free_area - free area to a pcpu_chunk
535 * @chunk: chunk of interest
536 * @freeme: offset of area to free
537 *
538 * Free area starting from @freeme to @chunk. Note that this function
539 * only modifies the allocation map. It doesn't depopulate or unmap
540 * the area.
541 *
542 * CONTEXT:
543 * pcpu_lock.
544 */
545static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
546{
547 int oslot = pcpu_chunk_slot(chunk);
548 int i, off;
549
550 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
551 if (off == freeme)
552 break;
553 BUG_ON(off != freeme);
554 BUG_ON(chunk->map[i] > 0);
555
556 chunk->map[i] = -chunk->map[i];
557 chunk->free_size += chunk->map[i];
558
559 /* merge with previous? */
560 if (i > 0 && chunk->map[i - 1] >= 0) {
561 chunk->map[i - 1] += chunk->map[i];
562 chunk->map_used--;
563 memmove(&chunk->map[i], &chunk->map[i + 1],
564 (chunk->map_used - i) * sizeof(chunk->map[0]));
565 i--;
566 }
567 /* merge with next? */
568 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
569 chunk->map[i] += chunk->map[i + 1];
570 chunk->map_used--;
571 memmove(&chunk->map[i + 1], &chunk->map[i + 2],
572 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
573 }
574
575 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
576 pcpu_chunk_relocate(chunk, oslot);
577}
578
579/**
580 * pcpu_unmap - unmap pages out of a pcpu_chunk
581 * @chunk: chunk of interest
582 * @page_start: page index of the first page to unmap
583 * @page_end: page index of the last page to unmap + 1
584 * @flush: whether to flush cache and tlb or not
585 *
586 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
587 * If @flush is true, vcache is flushed before unmapping and tlb
588 * after.
589 */
590static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
591 bool flush)
592{
593 unsigned int last = num_possible_cpus() - 1;
594 unsigned int cpu;
595
596 /* unmap must not be done on immutable chunk */
597 WARN_ON(chunk->immutable);
598
599 /*
600 * Each flushing trial can be very expensive, issue flush on
601 * the whole region at once rather than doing it for each cpu.
602 * This could be an overkill but is more scalable.
603 */
604 if (flush)
605 flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
606 pcpu_chunk_addr(chunk, last, page_end));
607
608 for_each_possible_cpu(cpu)
609 unmap_kernel_range_noflush(
610 pcpu_chunk_addr(chunk, cpu, page_start),
611 (page_end - page_start) << PAGE_SHIFT);
612
613 /* ditto as flush_cache_vunmap() */
614 if (flush)
615 flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
616 pcpu_chunk_addr(chunk, last, page_end));
617}
618
619/**
620 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
621 * @chunk: chunk to depopulate
622 * @off: offset to the area to depopulate
623 * @size: size of the area to depopulate in bytes
624 * @flush: whether to flush cache and tlb or not
625 *
626 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
627 * from @chunk. If @flush is true, vcache is flushed before unmapping
628 * and tlb after.
629 *
630 * CONTEXT:
631 * pcpu_alloc_mutex.
632 */
633static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size,
634 bool flush)
635{
636 int page_start = PFN_DOWN(off);
637 int page_end = PFN_UP(off + size);
638 int unmap_start = -1;
639 int uninitialized_var(unmap_end);
640 unsigned int cpu;
641 int i;
642
643 for (i = page_start; i < page_end; i++) {
644 for_each_possible_cpu(cpu) {
645 struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
646
647 if (!*pagep)
648 continue;
649
650 __free_page(*pagep);
651
652 /*
653 * If it's partial depopulation, it might get
654 * populated or depopulated again. Mark the
655 * page gone.
656 */
657 *pagep = NULL;
658
659 unmap_start = unmap_start < 0 ? i : unmap_start;
660 unmap_end = i + 1;
661 }
662 }
663
664 if (unmap_start >= 0)
665 pcpu_unmap(chunk, unmap_start, unmap_end, flush);
666}
667
668/**
669 * pcpu_map - map pages into a pcpu_chunk
670 * @chunk: chunk of interest
671 * @page_start: page index of the first page to map
672 * @page_end: page index of the last page to map + 1
673 *
674 * For each cpu, map pages [@page_start,@page_end) into @chunk.
675 * vcache is flushed afterwards.
676 */
677static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
678{
679 unsigned int last = num_possible_cpus() - 1;
680 unsigned int cpu;
681 int err;
682
683 /* map must not be done on immutable chunk */
684 WARN_ON(chunk->immutable);
685
686 for_each_possible_cpu(cpu) {
687 err = map_kernel_range_noflush(
688 pcpu_chunk_addr(chunk, cpu, page_start),
689 (page_end - page_start) << PAGE_SHIFT,
690 PAGE_KERNEL,
691 pcpu_chunk_pagep(chunk, cpu, page_start));
692 if (err < 0)
693 return err;
694 }
695
696 /* flush at once, please read comments in pcpu_unmap() */
697 flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start),
698 pcpu_chunk_addr(chunk, last, page_end));
699 return 0;
700}
701
702/**
703 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
704 * @chunk: chunk of interest
705 * @off: offset to the area to populate
706 * @size: size of the area to populate in bytes
707 *
708 * For each cpu, populate and map pages [@page_start,@page_end) into
709 * @chunk. The area is cleared on return.
710 *
711 * CONTEXT:
712 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
713 */
714static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
715{
716 const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
717 int page_start = PFN_DOWN(off);
718 int page_end = PFN_UP(off + size);
719 int map_start = -1;
720 int uninitialized_var(map_end);
721 unsigned int cpu;
722 int i;
723
724 for (i = page_start; i < page_end; i++) {
725 if (pcpu_chunk_page_occupied(chunk, i)) {
726 if (map_start >= 0) {
727 if (pcpu_map(chunk, map_start, map_end))
728 goto err;
729 map_start = -1;
730 }
731 continue;
732 }
733
734 map_start = map_start < 0 ? i : map_start;
735 map_end = i + 1;
736
737 for_each_possible_cpu(cpu) {
738 struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
739
740 *pagep = alloc_pages_node(cpu_to_node(cpu),
741 alloc_mask, 0);
742 if (!*pagep)
743 goto err;
744 }
745 }
746
747 if (map_start >= 0 && pcpu_map(chunk, map_start, map_end))
748 goto err;
749
750 for_each_possible_cpu(cpu)
751 memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0,
752 size);
753
754 return 0;
755err:
756 /* likely under heavy memory pressure, give memory back */
757 pcpu_depopulate_chunk(chunk, off, size, true);
758 return -ENOMEM;
759}
760
761static void free_pcpu_chunk(struct pcpu_chunk *chunk)
762{
763 if (!chunk)
764 return;
765 if (chunk->vm)
766 free_vm_area(chunk->vm);
767 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
768 kfree(chunk);
769}
770
771static struct pcpu_chunk *alloc_pcpu_chunk(void)
772{
773 struct pcpu_chunk *chunk;
774
775 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
776 if (!chunk)
777 return NULL;
778
779 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
780 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
781 chunk->map[chunk->map_used++] = pcpu_unit_size;
782 chunk->page = chunk->page_ar;
783
784 chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL);
785 if (!chunk->vm) {
786 free_pcpu_chunk(chunk);
787 return NULL;
788 }
789
790 INIT_LIST_HEAD(&chunk->list);
791 chunk->free_size = pcpu_unit_size;
792 chunk->contig_hint = pcpu_unit_size;
793
794 return chunk;
795}
796
797/**
798 * pcpu_alloc - the percpu allocator
799 * @size: size of area to allocate in bytes
800 * @align: alignment of area (max PAGE_SIZE)
801 * @reserved: allocate from the reserved chunk if available
802 *
803 * Allocate percpu area of @size bytes aligned at @align.
804 *
805 * CONTEXT:
806 * Does GFP_KERNEL allocation.
807 *
808 * RETURNS:
809 * Percpu pointer to the allocated area on success, NULL on failure.
810 */
811static void *pcpu_alloc(size_t size, size_t align, bool reserved)
812{
813 struct pcpu_chunk *chunk;
814 int slot, off;
815
816 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
817 WARN(true, "illegal size (%zu) or align (%zu) for "
818 "percpu allocation\n", size, align);
819 return NULL;
820 }
821
822 mutex_lock(&pcpu_alloc_mutex);
823 spin_lock_irq(&pcpu_lock);
824
825 /* serve reserved allocations from the reserved chunk if available */
826 if (reserved && pcpu_reserved_chunk) {
827 chunk = pcpu_reserved_chunk;
828 if (size > chunk->contig_hint ||
829 pcpu_extend_area_map(chunk) < 0)
830 goto fail_unlock;
831 off = pcpu_alloc_area(chunk, size, align);
832 if (off >= 0)
833 goto area_found;
834 goto fail_unlock;
835 }
836
837restart:
838 /* search through normal chunks */
839 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
840 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
841 if (size > chunk->contig_hint)
842 continue;
843
844 switch (pcpu_extend_area_map(chunk)) {
845 case 0:
846 break;
847 case 1:
848 goto restart; /* pcpu_lock dropped, restart */
849 default:
850 goto fail_unlock;
851 }
852
853 off = pcpu_alloc_area(chunk, size, align);
854 if (off >= 0)
855 goto area_found;
856 }
857 }
858
859 /* hmmm... no space left, create a new chunk */
860 spin_unlock_irq(&pcpu_lock);
861
862 chunk = alloc_pcpu_chunk();
863 if (!chunk)
864 goto fail_unlock_mutex;
865
866 spin_lock_irq(&pcpu_lock);
867 pcpu_chunk_relocate(chunk, -1);
868 pcpu_chunk_addr_insert(chunk);
869 goto restart;
870
871area_found:
872 spin_unlock_irq(&pcpu_lock);
873
874 /* populate, map and clear the area */
875 if (pcpu_populate_chunk(chunk, off, size)) {
876 spin_lock_irq(&pcpu_lock);
877 pcpu_free_area(chunk, off);
878 goto fail_unlock;
879 }
880
881 mutex_unlock(&pcpu_alloc_mutex);
882
883 return __addr_to_pcpu_ptr(chunk->vm->addr + off);
884
885fail_unlock:
886 spin_unlock_irq(&pcpu_lock);
887fail_unlock_mutex:
888 mutex_unlock(&pcpu_alloc_mutex);
889 return NULL;
890}
891
892/**
893 * __alloc_percpu - allocate dynamic percpu area
894 * @size: size of area to allocate in bytes
895 * @align: alignment of area (max PAGE_SIZE)
896 *
897 * Allocate percpu area of @size bytes aligned at @align. Might
898 * sleep. Might trigger writeouts.
899 *
900 * CONTEXT:
901 * Does GFP_KERNEL allocation.
902 *
903 * RETURNS:
904 * Percpu pointer to the allocated area on success, NULL on failure.
905 */
906void *__alloc_percpu(size_t size, size_t align)
907{
908 return pcpu_alloc(size, align, false);
909}
910EXPORT_SYMBOL_GPL(__alloc_percpu);
911
912/**
913 * __alloc_reserved_percpu - allocate reserved percpu area
914 * @size: size of area to allocate in bytes
915 * @align: alignment of area (max PAGE_SIZE)
916 *
917 * Allocate percpu area of @size bytes aligned at @align from reserved
918 * percpu area if arch has set it up; otherwise, allocation is served
919 * from the same dynamic area. Might sleep. Might trigger writeouts.
920 *
921 * CONTEXT:
922 * Does GFP_KERNEL allocation.
923 *
924 * RETURNS:
925 * Percpu pointer to the allocated area on success, NULL on failure.
926 */
927void *__alloc_reserved_percpu(size_t size, size_t align)
928{
929 return pcpu_alloc(size, align, true);
930}
931
932/**
933 * pcpu_reclaim - reclaim fully free chunks, workqueue function
934 * @work: unused
935 *
936 * Reclaim all fully free chunks except for the first one.
937 *
938 * CONTEXT:
939 * workqueue context.
940 */
941static void pcpu_reclaim(struct work_struct *work)
942{
943 LIST_HEAD(todo);
944 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
945 struct pcpu_chunk *chunk, *next;
946
947 mutex_lock(&pcpu_alloc_mutex);
948 spin_lock_irq(&pcpu_lock);
949
950 list_for_each_entry_safe(chunk, next, head, list) {
951 WARN_ON(chunk->immutable);
952
953 /* spare the first one */
954 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
955 continue;
956
957 rb_erase(&chunk->rb_node, &pcpu_addr_root);
958 list_move(&chunk->list, &todo);
959 }
960
961 spin_unlock_irq(&pcpu_lock);
962 mutex_unlock(&pcpu_alloc_mutex);
963
964 list_for_each_entry_safe(chunk, next, &todo, list) {
965 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false);
966 free_pcpu_chunk(chunk);
967 }
968}
969
970/**
971 * free_percpu - free percpu area
972 * @ptr: pointer to area to free
973 *
974 * Free percpu area @ptr.
975 *
976 * CONTEXT:
977 * Can be called from atomic context.
978 */
979void free_percpu(void *ptr)
980{
981 void *addr = __pcpu_ptr_to_addr(ptr);
982 struct pcpu_chunk *chunk;
983 unsigned long flags;
984 int off;
985
986 if (!ptr)
987 return;
988
989 spin_lock_irqsave(&pcpu_lock, flags);
990
991 chunk = pcpu_chunk_addr_search(addr);
992 off = addr - chunk->vm->addr;
993
994 pcpu_free_area(chunk, off);
995
996 /* if there are more than one fully free chunks, wake up grim reaper */
997 if (chunk->free_size == pcpu_unit_size) {
998 struct pcpu_chunk *pos;
999
1000 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1001 if (pos != chunk) {
1002 schedule_work(&pcpu_reclaim_work);
1003 break;
1004 }
1005 }
1006
1007 spin_unlock_irqrestore(&pcpu_lock, flags);
1008}
1009EXPORT_SYMBOL_GPL(free_percpu);
1010
1011/**
1012 * pcpu_setup_first_chunk - initialize the first percpu chunk
1013 * @get_page_fn: callback to fetch page pointer
1014 * @static_size: the size of static percpu area in bytes
1015 * @reserved_size: the size of reserved percpu area in bytes
1016 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
1017 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1018 * @base_addr: mapped address, NULL for auto
1019 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
1020 *
1021 * Initialize the first percpu chunk which contains the kernel static
1022 * perpcu area. This function is to be called from arch percpu area
1023 * setup path. The first two parameters are mandatory. The rest are
1024 * optional.
1025 *
1026 * @get_page_fn() should return pointer to percpu page given cpu
1027 * number and page number. It should at least return enough pages to
1028 * cover the static area. The returned pages for static area should
1029 * have been initialized with valid data. If @unit_size is specified,
1030 * it can also return pages after the static area. NULL return
1031 * indicates end of pages for the cpu. Note that @get_page_fn() must
1032 * return the same number of pages for all cpus.
1033 *
1034 * @reserved_size, if non-zero, specifies the amount of bytes to
1035 * reserve after the static area in the first chunk. This reserves
1036 * the first chunk such that it's available only through reserved
1037 * percpu allocation. This is primarily used to serve module percpu
1038 * static areas on architectures where the addressing model has
1039 * limited offset range for symbol relocations to guarantee module
1040 * percpu symbols fall inside the relocatable range.
1041 *
1042 * @unit_size, if non-negative, specifies unit size and must be
1043 * aligned to PAGE_SIZE and equal to or larger than @static_size +
1044 * @reserved_size + @dyn_size.
1045 *
1046 * @dyn_size, if non-negative, limits the number of bytes available
1047 * for dynamic allocation in the first chunk. Specifying non-negative
1048 * value make percpu leave alone the area beyond @static_size +
1049 * @reserved_size + @dyn_size.
1050 *
1051 * Non-null @base_addr means that the caller already allocated virtual
1052 * region for the first chunk and mapped it. percpu must not mess
1053 * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL
1054 * @populate_pte_fn doesn't make any sense.
1055 *
1056 * @populate_pte_fn is used to populate the pagetable. NULL means the
1057 * caller already populated the pagetable.
1058 *
1059 * If the first chunk ends up with both reserved and dynamic areas, it
1060 * is served by two chunks - one to serve the core static and reserved
1061 * areas and the other for the dynamic area. They share the same vm
1062 * and page map but uses different area allocation map to stay away
1063 * from each other. The latter chunk is circulated in the chunk slots
1064 * and available for dynamic allocation like any other chunks.
1065 *
1066 * RETURNS:
1067 * The determined pcpu_unit_size which can be used to initialize
1068 * percpu access.
1069 */
1070size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
1071 size_t static_size, size_t reserved_size,
1072 ssize_t unit_size, ssize_t dyn_size,
1073 void *base_addr,
1074 pcpu_populate_pte_fn_t populate_pte_fn)
1075{
1076 static struct vm_struct first_vm;
1077 static int smap[2], dmap[2];
1078 struct pcpu_chunk *schunk, *dchunk = NULL;
1079 unsigned int cpu;
1080 int nr_pages;
1081 int err, i;
1082
1083 /* santiy checks */
1084 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1085 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1086 BUG_ON(!static_size);
1087 if (unit_size >= 0) {
1088 BUG_ON(unit_size < static_size + reserved_size +
1089 (dyn_size >= 0 ? dyn_size : 0));
1090 BUG_ON(unit_size & ~PAGE_MASK);
1091 } else {
1092 BUG_ON(dyn_size >= 0);
1093 BUG_ON(base_addr);
1094 }
1095 BUG_ON(base_addr && populate_pte_fn);
1096
1097 if (unit_size >= 0)
1098 pcpu_unit_pages = unit_size >> PAGE_SHIFT;
1099 else
1100 pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
1101 PFN_UP(static_size + reserved_size));
1102
1103 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1104 pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size;
1105 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk)
1106 + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *);
1107
1108 if (dyn_size < 0)
1109 dyn_size = pcpu_unit_size - static_size - reserved_size;
1110
1111 /*
1112 * Allocate chunk slots. The additional last slot is for
1113 * empty chunks.
1114 */
1115 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1116 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1117 for (i = 0; i < pcpu_nr_slots; i++)
1118 INIT_LIST_HEAD(&pcpu_slot[i]);
1119
1120 /*
1121 * Initialize static chunk. If reserved_size is zero, the
1122 * static chunk covers static area + dynamic allocation area
1123 * in the first chunk. If reserved_size is not zero, it
1124 * covers static area + reserved area (mostly used for module
1125 * static percpu allocation).
1126 */
1127 schunk = alloc_bootmem(pcpu_chunk_struct_size);
1128 INIT_LIST_HEAD(&schunk->list);
1129 schunk->vm = &first_vm;
1130 schunk->map = smap;
1131 schunk->map_alloc = ARRAY_SIZE(smap);
1132 schunk->page = schunk->page_ar;
1133
1134 if (reserved_size) {
1135 schunk->free_size = reserved_size;
1136 pcpu_reserved_chunk = schunk; /* not for dynamic alloc */
1137 } else {
1138 schunk->free_size = dyn_size;
1139 dyn_size = 0; /* dynamic area covered */
1140 }
1141 schunk->contig_hint = schunk->free_size;
1142
1143 schunk->map[schunk->map_used++] = -static_size;
1144 if (schunk->free_size)
1145 schunk->map[schunk->map_used++] = schunk->free_size;
1146
1147 pcpu_reserved_chunk_limit = static_size + schunk->free_size;
1148
1149 /* init dynamic chunk if necessary */
1150 if (dyn_size) {
1151 dchunk = alloc_bootmem(sizeof(struct pcpu_chunk));
1152 INIT_LIST_HEAD(&dchunk->list);
1153 dchunk->vm = &first_vm;
1154 dchunk->map = dmap;
1155 dchunk->map_alloc = ARRAY_SIZE(dmap);
1156 dchunk->page = schunk->page_ar; /* share page map with schunk */
1157
1158 dchunk->contig_hint = dchunk->free_size = dyn_size;
1159 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1160 dchunk->map[dchunk->map_used++] = dchunk->free_size;
1161 }
1162
1163 /* allocate vm address */
1164 first_vm.flags = VM_ALLOC;
1165 first_vm.size = pcpu_chunk_size;
1166
1167 if (!base_addr)
1168 vm_area_register_early(&first_vm, PAGE_SIZE);
1169 else {
1170 /*
1171 * Pages already mapped. No need to remap into
1172 * vmalloc area. In this case the first chunks can't
1173 * be mapped or unmapped by percpu and are marked
1174 * immutable.
1175 */
1176 first_vm.addr = base_addr;
1177 schunk->immutable = true;
1178 if (dchunk)
1179 dchunk->immutable = true;
1180 }
1181
1182 /* assign pages */
1183 nr_pages = -1;
1184 for_each_possible_cpu(cpu) {
1185 for (i = 0; i < pcpu_unit_pages; i++) {
1186 struct page *page = get_page_fn(cpu, i);
1187
1188 if (!page)
1189 break;
1190 *pcpu_chunk_pagep(schunk, cpu, i) = page;
1191 }
1192
1193 BUG_ON(i < PFN_UP(static_size));
1194
1195 if (nr_pages < 0)
1196 nr_pages = i;
1197 else
1198 BUG_ON(nr_pages != i);
1199 }
1200
1201 /* map them */
1202 if (populate_pte_fn) {
1203 for_each_possible_cpu(cpu)
1204 for (i = 0; i < nr_pages; i++)
1205 populate_pte_fn(pcpu_chunk_addr(schunk,
1206 cpu, i));
1207
1208 err = pcpu_map(schunk, 0, nr_pages);
1209 if (err)
1210 panic("failed to setup static percpu area, err=%d\n",
1211 err);
1212 }
1213
1214 /* link the first chunk in */
1215 if (!dchunk) {
1216 pcpu_chunk_relocate(schunk, -1);
1217 pcpu_chunk_addr_insert(schunk);
1218 } else {
1219 pcpu_chunk_relocate(dchunk, -1);
1220 pcpu_chunk_addr_insert(dchunk);
1221 }
1222
1223 /* we're done */
1224 pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
1225 return pcpu_unit_size;
1226}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 11a929872ebd..af58324c361a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -24,6 +24,7 @@
24#include <linux/radix-tree.h> 24#include <linux/radix-tree.h>
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/bootmem.h> 26#include <linux/bootmem.h>
27#include <linux/pfn.h>
27 28
28#include <asm/atomic.h> 29#include <asm/atomic.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
@@ -152,8 +153,8 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
152 * 153 *
153 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 154 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
154 */ 155 */
155static int vmap_page_range(unsigned long start, unsigned long end, 156static int vmap_page_range_noflush(unsigned long start, unsigned long end,
156 pgprot_t prot, struct page **pages) 157 pgprot_t prot, struct page **pages)
157{ 158{
158 pgd_t *pgd; 159 pgd_t *pgd;
159 unsigned long next; 160 unsigned long next;
@@ -169,13 +170,22 @@ static int vmap_page_range(unsigned long start, unsigned long end,
169 if (err) 170 if (err)
170 break; 171 break;
171 } while (pgd++, addr = next, addr != end); 172 } while (pgd++, addr = next, addr != end);
172 flush_cache_vmap(start, end);
173 173
174 if (unlikely(err)) 174 if (unlikely(err))
175 return err; 175 return err;
176 return nr; 176 return nr;
177} 177}
178 178
179static int vmap_page_range(unsigned long start, unsigned long end,
180 pgprot_t prot, struct page **pages)
181{
182 int ret;
183
184 ret = vmap_page_range_noflush(start, end, prot, pages);
185 flush_cache_vmap(start, end);
186 return ret;
187}
188
179static inline int is_vmalloc_or_module_addr(const void *x) 189static inline int is_vmalloc_or_module_addr(const void *x)
180{ 190{
181 /* 191 /*
@@ -990,6 +1000,32 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
990} 1000}
991EXPORT_SYMBOL(vm_map_ram); 1001EXPORT_SYMBOL(vm_map_ram);
992 1002
1003/**
1004 * vm_area_register_early - register vmap area early during boot
1005 * @vm: vm_struct to register
1006 * @align: requested alignment
1007 *
1008 * This function is used to register kernel vm area before
1009 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1010 * proper values on entry and other fields should be zero. On return,
1011 * vm->addr contains the allocated address.
1012 *
1013 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1014 */
1015void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1016{
1017 static size_t vm_init_off __initdata;
1018 unsigned long addr;
1019
1020 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1021 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1022
1023 vm->addr = (void *)addr;
1024
1025 vm->next = vmlist;
1026 vmlist = vm;
1027}
1028
993void __init vmalloc_init(void) 1029void __init vmalloc_init(void)
994{ 1030{
995 struct vmap_area *va; 1031 struct vmap_area *va;
@@ -1017,6 +1053,58 @@ void __init vmalloc_init(void)
1017 vmap_initialized = true; 1053 vmap_initialized = true;
1018} 1054}
1019 1055
1056/**
1057 * map_kernel_range_noflush - map kernel VM area with the specified pages
1058 * @addr: start of the VM area to map
1059 * @size: size of the VM area to map
1060 * @prot: page protection flags to use
1061 * @pages: pages to map
1062 *
1063 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
1064 * specify should have been allocated using get_vm_area() and its
1065 * friends.
1066 *
1067 * NOTE:
1068 * This function does NOT do any cache flushing. The caller is
1069 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1070 * before calling this function.
1071 *
1072 * RETURNS:
1073 * The number of pages mapped on success, -errno on failure.
1074 */
1075int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1076 pgprot_t prot, struct page **pages)
1077{
1078 return vmap_page_range_noflush(addr, addr + size, prot, pages);
1079}
1080
1081/**
1082 * unmap_kernel_range_noflush - unmap kernel VM area
1083 * @addr: start of the VM area to unmap
1084 * @size: size of the VM area to unmap
1085 *
1086 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
1087 * specify should have been allocated using get_vm_area() and its
1088 * friends.
1089 *
1090 * NOTE:
1091 * This function does NOT do any cache flushing. The caller is
1092 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1093 * before calling this function and flush_tlb_kernel_range() after.
1094 */
1095void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1096{
1097 vunmap_page_range(addr, addr + size);
1098}
1099
1100/**
1101 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1102 * @addr: start of the VM area to unmap
1103 * @size: size of the VM area to unmap
1104 *
1105 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1106 * the unmapping and tlb after.
1107 */
1020void unmap_kernel_range(unsigned long addr, unsigned long size) 1108void unmap_kernel_range(unsigned long addr, unsigned long size)
1021{ 1109{
1022 unsigned long end = addr + size; 1110 unsigned long end = addr + size;
diff --git a/net/802/tr.c b/net/802/tr.c
index 158150fee462..f47ae289d83b 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -668,3 +668,5 @@ module_init(rif_init);
668 668
669EXPORT_SYMBOL(tr_type_trans); 669EXPORT_SYMBOL(tr_type_trans);
670EXPORT_SYMBOL(alloc_trdev); 670EXPORT_SYMBOL(alloc_trdev);
671
672MODULE_LICENSE("GPL");
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4a19acd3a32b..1b34135cf990 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -553,7 +553,7 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
553 int err = 0; 553 int err = 0;
554 554
555 if (netif_device_present(real_dev) && ops->ndo_neigh_setup) 555 if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
556 err = ops->ndo_neigh_setup(dev, pa); 556 err = ops->ndo_neigh_setup(real_dev, pa);
557 557
558 return err; 558 return err;
559} 559}
@@ -639,6 +639,7 @@ static int vlan_dev_init(struct net_device *dev)
639 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; 639 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
640 dev->netdev_ops = &vlan_netdev_ops; 640 dev->netdev_ops = &vlan_netdev_ops;
641 } 641 }
642 netdev_resync_ops(dev);
642 643
643 if (is_vlan_dev(real_dev)) 644 if (is_vlan_dev(real_dev))
644 subclass = 1; 645 subclass = 1;
diff --git a/net/core/dev.c b/net/core/dev.c
index 72b0d26fd46d..f1129706ce7b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2267,12 +2267,6 @@ int netif_receive_skb(struct sk_buff *skb)
2267 2267
2268 rcu_read_lock(); 2268 rcu_read_lock();
2269 2269
2270 /* Don't receive packets in an exiting network namespace */
2271 if (!net_alive(dev_net(skb->dev))) {
2272 kfree_skb(skb);
2273 goto out;
2274 }
2275
2276#ifdef CONFIG_NET_CLS_ACT 2270#ifdef CONFIG_NET_CLS_ACT
2277 if (skb->tc_verd & TC_NCLS) { 2271 if (skb->tc_verd & TC_NCLS) {
2278 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 2272 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@ -4288,6 +4282,39 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
4288} 4282}
4289EXPORT_SYMBOL(netdev_fix_features); 4283EXPORT_SYMBOL(netdev_fix_features);
4290 4284
4285/* Some devices need to (re-)set their netdev_ops inside
4286 * ->init() or similar. If that happens, we have to setup
4287 * the compat pointers again.
4288 */
4289void netdev_resync_ops(struct net_device *dev)
4290{
4291#ifdef CONFIG_COMPAT_NET_DEV_OPS
4292 const struct net_device_ops *ops = dev->netdev_ops;
4293
4294 dev->init = ops->ndo_init;
4295 dev->uninit = ops->ndo_uninit;
4296 dev->open = ops->ndo_open;
4297 dev->change_rx_flags = ops->ndo_change_rx_flags;
4298 dev->set_rx_mode = ops->ndo_set_rx_mode;
4299 dev->set_multicast_list = ops->ndo_set_multicast_list;
4300 dev->set_mac_address = ops->ndo_set_mac_address;
4301 dev->validate_addr = ops->ndo_validate_addr;
4302 dev->do_ioctl = ops->ndo_do_ioctl;
4303 dev->set_config = ops->ndo_set_config;
4304 dev->change_mtu = ops->ndo_change_mtu;
4305 dev->neigh_setup = ops->ndo_neigh_setup;
4306 dev->tx_timeout = ops->ndo_tx_timeout;
4307 dev->get_stats = ops->ndo_get_stats;
4308 dev->vlan_rx_register = ops->ndo_vlan_rx_register;
4309 dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
4310 dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
4311#ifdef CONFIG_NET_POLL_CONTROLLER
4312 dev->poll_controller = ops->ndo_poll_controller;
4313#endif
4314#endif
4315}
4316EXPORT_SYMBOL(netdev_resync_ops);
4317
4291/** 4318/**
4292 * register_netdevice - register a network device 4319 * register_netdevice - register a network device
4293 * @dev: device to register 4320 * @dev: device to register
@@ -4332,27 +4359,7 @@ int register_netdevice(struct net_device *dev)
4332 * This is temporary until all network devices are converted. 4359 * This is temporary until all network devices are converted.
4333 */ 4360 */
4334 if (dev->netdev_ops) { 4361 if (dev->netdev_ops) {
4335 const struct net_device_ops *ops = dev->netdev_ops; 4362 netdev_resync_ops(dev);
4336
4337 dev->init = ops->ndo_init;
4338 dev->uninit = ops->ndo_uninit;
4339 dev->open = ops->ndo_open;
4340 dev->change_rx_flags = ops->ndo_change_rx_flags;
4341 dev->set_rx_mode = ops->ndo_set_rx_mode;
4342 dev->set_multicast_list = ops->ndo_set_multicast_list;
4343 dev->set_mac_address = ops->ndo_set_mac_address;
4344 dev->validate_addr = ops->ndo_validate_addr;
4345 dev->do_ioctl = ops->ndo_do_ioctl;
4346 dev->set_config = ops->ndo_set_config;
4347 dev->change_mtu = ops->ndo_change_mtu;
4348 dev->tx_timeout = ops->ndo_tx_timeout;
4349 dev->get_stats = ops->ndo_get_stats;
4350 dev->vlan_rx_register = ops->ndo_vlan_rx_register;
4351 dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
4352 dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
4353#ifdef CONFIG_NET_POLL_CONTROLLER
4354 dev->poll_controller = ops->ndo_poll_controller;
4355#endif
4356 } else { 4363 } else {
4357 char drivername[64]; 4364 char drivername[64];
4358 pr_info("%s (%s): not using net_device_ops yet\n", 4365 pr_info("%s (%s): not using net_device_ops yet\n",
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 6ac29a46e23e..484f58750eba 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -77,7 +77,9 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
77 if (endp == buf) 77 if (endp == buf)
78 goto err; 78 goto err;
79 79
80 rtnl_lock(); 80 if (!rtnl_trylock())
81 return -ERESTARTSYS;
82
81 if (dev_isalive(net)) { 83 if (dev_isalive(net)) {
82 if ((ret = (*set)(net, new)) == 0) 84 if ((ret = (*set)(net, new)) == 0)
83 ret = len; 85 ret = len;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 2adb1a7d361f..e3bebd36f053 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -157,9 +157,6 @@ static void cleanup_net(struct work_struct *work)
157 struct pernet_operations *ops; 157 struct pernet_operations *ops;
158 struct net *net; 158 struct net *net;
159 159
160 /* Be very certain incoming network packets will not find us */
161 rcu_barrier();
162
163 net = container_of(work, struct net, work); 160 net = container_of(work, struct net, work);
164 161
165 mutex_lock(&net_mutex); 162 mutex_lock(&net_mutex);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 743f5542d65a..3a3dad801354 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1375,10 +1375,10 @@ EXPORT_SYMBOL_GPL(snmp_fold_field);
1375int snmp_mib_init(void *ptr[2], size_t mibsize) 1375int snmp_mib_init(void *ptr[2], size_t mibsize)
1376{ 1376{
1377 BUG_ON(ptr == NULL); 1377 BUG_ON(ptr == NULL);
1378 ptr[0] = __alloc_percpu(mibsize); 1378 ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long));
1379 if (!ptr[0]) 1379 if (!ptr[0])
1380 goto err0; 1380 goto err0;
1381 ptr[1] = __alloc_percpu(mibsize); 1381 ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long long));
1382 if (!ptr[1]) 1382 if (!ptr[1])
1383 goto err1; 1383 goto err1;
1384 return 0; 1384 return 0;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 705b33b184a3..fc562d29cc46 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1205,7 +1205,7 @@ static struct pernet_operations __net_initdata icmp_sk_ops = {
1205 1205
1206int __init icmp_init(void) 1206int __init icmp_init(void)
1207{ 1207{
1208 return register_pernet_device(&icmp_sk_ops); 1208 return register_pernet_subsys(&icmp_sk_ops);
1209} 1209}
1210 1210
1211EXPORT_SYMBOL(icmp_err_convert); 1211EXPORT_SYMBOL(icmp_err_convert);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 97f71153584f..bf895401218f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3376,7 +3376,7 @@ int __init ip_rt_init(void)
3376 int rc = 0; 3376 int rc = 0;
3377 3377
3378#ifdef CONFIG_NET_CLS_ROUTE 3378#ifdef CONFIG_NET_CLS_ROUTE
3379 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct)); 3379 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3380 if (!ip_rt_acct) 3380 if (!ip_rt_acct)
3381 panic("IP: failed to allocate ip_rt_acct\n"); 3381 panic("IP: failed to allocate ip_rt_acct\n");
3382#endif 3382#endif
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 19d7b429a262..cf74c416831a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2443,7 +2443,7 @@ static struct pernet_operations __net_initdata tcp_sk_ops = {
2443void __init tcp_v4_init(void) 2443void __init tcp_v4_init(void)
2444{ 2444{
2445 inet_hashinfo_init(&tcp_hashinfo); 2445 inet_hashinfo_init(&tcp_hashinfo);
2446 if (register_pernet_device(&tcp_sk_ops)) 2446 if (register_pernet_subsys(&tcp_sk_ops))
2447 panic("Failed to create the TCP control socket.\n"); 2447 panic("Failed to create the TCP control socket.\n");
2448} 2448}
2449 2449
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f9afb452249c..1220e2c7831e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -493,15 +493,17 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
493 read_unlock(&dev_base_lock); 493 read_unlock(&dev_base_lock);
494} 494}
495 495
496static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) 496static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
497{ 497{
498 struct net *net; 498 struct net *net;
499 499
500 net = (struct net *)table->extra2; 500 net = (struct net *)table->extra2;
501 if (p == &net->ipv6.devconf_dflt->forwarding) 501 if (p == &net->ipv6.devconf_dflt->forwarding)
502 return; 502 return 0;
503
504 if (!rtnl_trylock())
505 return -ERESTARTSYS;
503 506
504 rtnl_lock();
505 if (p == &net->ipv6.devconf_all->forwarding) { 507 if (p == &net->ipv6.devconf_all->forwarding) {
506 __s32 newf = net->ipv6.devconf_all->forwarding; 508 __s32 newf = net->ipv6.devconf_all->forwarding;
507 net->ipv6.devconf_dflt->forwarding = newf; 509 net->ipv6.devconf_dflt->forwarding = newf;
@@ -512,6 +514,7 @@ static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
512 514
513 if (*p) 515 if (*p)
514 rt6_purge_dflt_routers(net); 516 rt6_purge_dflt_routers(net);
517 return 1;
515} 518}
516#endif 519#endif
517 520
@@ -2608,9 +2611,6 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2608 2611
2609 ASSERT_RTNL(); 2612 ASSERT_RTNL();
2610 2613
2611 if ((dev->flags & IFF_LOOPBACK) && how == 1)
2612 how = 0;
2613
2614 rt6_ifdown(net, dev); 2614 rt6_ifdown(net, dev);
2615 neigh_ifdown(&nd_tbl, dev); 2615 neigh_ifdown(&nd_tbl, dev);
2616 2616
@@ -3983,7 +3983,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
3983 ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); 3983 ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
3984 3984
3985 if (write) 3985 if (write)
3986 addrconf_fixup_forwarding(ctl, valp, val); 3986 ret = addrconf_fixup_forwarding(ctl, valp, val);
3987 return ret; 3987 return ret;
3988} 3988}
3989 3989
@@ -4019,8 +4019,7 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table,
4019 } 4019 }
4020 4020
4021 *valp = new; 4021 *valp = new;
4022 addrconf_fixup_forwarding(table, valp, val); 4022 return addrconf_fixup_forwarding(table, valp, val);
4023 return 1;
4024} 4023}
4025 4024
4026static struct addrconf_sysctl_table 4025static struct addrconf_sysctl_table
@@ -4446,25 +4445,6 @@ int unregister_inet6addr_notifier(struct notifier_block *nb)
4446 4445
4447EXPORT_SYMBOL(unregister_inet6addr_notifier); 4446EXPORT_SYMBOL(unregister_inet6addr_notifier);
4448 4447
4449static void addrconf_net_exit(struct net *net)
4450{
4451 struct net_device *dev;
4452
4453 rtnl_lock();
4454 /* clean dev list */
4455 for_each_netdev(net, dev) {
4456 if (__in6_dev_get(dev) == NULL)
4457 continue;
4458 addrconf_ifdown(dev, 1);
4459 }
4460 addrconf_ifdown(net->loopback_dev, 2);
4461 rtnl_unlock();
4462}
4463
4464static struct pernet_operations addrconf_net_ops = {
4465 .exit = addrconf_net_exit,
4466};
4467
4468/* 4448/*
4469 * Init / cleanup code 4449 * Init / cleanup code
4470 */ 4450 */
@@ -4506,10 +4486,6 @@ int __init addrconf_init(void)
4506 if (err) 4486 if (err)
4507 goto errlo; 4487 goto errlo;
4508 4488
4509 err = register_pernet_device(&addrconf_net_ops);
4510 if (err)
4511 return err;
4512
4513 register_netdevice_notifier(&ipv6_dev_notf); 4489 register_netdevice_notifier(&ipv6_dev_notf);
4514 4490
4515 addrconf_verify(0); 4491 addrconf_verify(0);
@@ -4539,15 +4515,22 @@ errlo:
4539void addrconf_cleanup(void) 4515void addrconf_cleanup(void)
4540{ 4516{
4541 struct inet6_ifaddr *ifa; 4517 struct inet6_ifaddr *ifa;
4518 struct net_device *dev;
4542 int i; 4519 int i;
4543 4520
4544 unregister_netdevice_notifier(&ipv6_dev_notf); 4521 unregister_netdevice_notifier(&ipv6_dev_notf);
4545 unregister_pernet_device(&addrconf_net_ops);
4546
4547 unregister_pernet_subsys(&addrconf_ops); 4522 unregister_pernet_subsys(&addrconf_ops);
4548 4523
4549 rtnl_lock(); 4524 rtnl_lock();
4550 4525
4526 /* clean dev list */
4527 for_each_netdev(&init_net, dev) {
4528 if (__in6_dev_get(dev) == NULL)
4529 continue;
4530 addrconf_ifdown(dev, 1);
4531 }
4532 addrconf_ifdown(init_net.loopback_dev, 2);
4533
4551 /* 4534 /*
4552 * Check hash table. 4535 * Check hash table.
4553 */ 4536 */
@@ -4568,6 +4551,4 @@ void addrconf_cleanup(void)
4568 4551
4569 del_timer(&addr_chk_timer); 4552 del_timer(&addr_chk_timer);
4570 rtnl_unlock(); 4553 rtnl_unlock();
4571
4572 unregister_pernet_subsys(&addrconf_net_ops);
4573} 4554}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index c802bc1658a8..da944eca2ca6 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -72,6 +72,10 @@ MODULE_LICENSE("GPL");
72static struct list_head inetsw6[SOCK_MAX]; 72static struct list_head inetsw6[SOCK_MAX];
73static DEFINE_SPINLOCK(inetsw6_lock); 73static DEFINE_SPINLOCK(inetsw6_lock);
74 74
75static int disable_ipv6 = 0;
76module_param_named(disable, disable_ipv6, int, 0);
77MODULE_PARM_DESC(disable, "Disable IPv6 such that it is non-functional");
78
75static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) 79static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
76{ 80{
77 const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo); 81 const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo);
@@ -991,10 +995,21 @@ static int __init inet6_init(void)
991{ 995{
992 struct sk_buff *dummy_skb; 996 struct sk_buff *dummy_skb;
993 struct list_head *r; 997 struct list_head *r;
994 int err; 998 int err = 0;
995 999
996 BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb)); 1000 BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb));
997 1001
1002 /* Register the socket-side information for inet6_create. */
1003 for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
1004 INIT_LIST_HEAD(r);
1005
1006 if (disable_ipv6) {
1007 printk(KERN_INFO
1008 "IPv6: Loaded, but administratively disabled, "
1009 "reboot required to enable\n");
1010 goto out;
1011 }
1012
998 err = proto_register(&tcpv6_prot, 1); 1013 err = proto_register(&tcpv6_prot, 1);
999 if (err) 1014 if (err)
1000 goto out; 1015 goto out;
@@ -1012,10 +1027,6 @@ static int __init inet6_init(void)
1012 goto out_unregister_udplite_proto; 1027 goto out_unregister_udplite_proto;
1013 1028
1014 1029
1015 /* Register the socket-side information for inet6_create. */
1016 for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
1017 INIT_LIST_HEAD(r);
1018
1019 /* We MUST register RAW sockets before we create the ICMP6, 1030 /* We MUST register RAW sockets before we create the ICMP6,
1020 * IGMP6, or NDISC control sockets. 1031 * IGMP6, or NDISC control sockets.
1021 */ 1032 */
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 9eb895c7a2a9..3ae3cb816563 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1084,6 +1084,13 @@ out:
1084 return 0; 1084 return 0;
1085} 1085}
1086 1086
1087/**
1088 * netlink_set_err - report error to broadcast listeners
1089 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1090 * @pid: the PID of a process that we want to skip (if any)
1091 * @groups: the broadcast group that will notice the error
1092 * @code: error code, must be negative (as usual in kernelspace)
1093 */
1087void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) 1094void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1088{ 1095{
1089 struct netlink_set_err_data info; 1096 struct netlink_set_err_data info;
@@ -1093,7 +1100,8 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1093 info.exclude_sk = ssk; 1100 info.exclude_sk = ssk;
1094 info.pid = pid; 1101 info.pid = pid;
1095 info.group = group; 1102 info.group = group;
1096 info.code = code; 1103 /* sk->sk_err wants a positive error value */
1104 info.code = -code;
1097 1105
1098 read_lock(&nl_table_lock); 1106 read_lock(&nl_table_lock);
1099 1107
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 5c72a116b1a4..f8f047b61245 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -183,13 +183,6 @@ override:
183 if (R_tab == NULL) 183 if (R_tab == NULL)
184 goto failure; 184 goto failure;
185 185
186 if (!est && (ret == ACT_P_CREATED ||
187 !gen_estimator_active(&police->tcf_bstats,
188 &police->tcf_rate_est))) {
189 err = -EINVAL;
190 goto failure;
191 }
192
193 if (parm->peakrate.rate) { 186 if (parm->peakrate.rate) {
194 P_tab = qdisc_get_rtab(&parm->peakrate, 187 P_tab = qdisc_get_rtab(&parm->peakrate,
195 tb[TCA_POLICE_PEAKRATE]); 188 tb[TCA_POLICE_PEAKRATE]);
@@ -205,6 +198,12 @@ override:
205 &police->tcf_lock, est); 198 &police->tcf_lock, est);
206 if (err) 199 if (err)
207 goto failure_unlock; 200 goto failure_unlock;
201 } else if (tb[TCA_POLICE_AVRATE] &&
202 (ret == ACT_P_CREATED ||
203 !gen_estimator_active(&police->tcf_bstats,
204 &police->tcf_rate_est))) {
205 err = -EINVAL;
206 goto failure_unlock;
208 } 207 }
209 208
210 /* No failure allowed after this point */ 209 /* No failure allowed after this point */
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index b78e3be69013..c4986d0f7419 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -717,15 +717,20 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
717static int sctp_ctl_sock_init(void) 717static int sctp_ctl_sock_init(void)
718{ 718{
719 int err; 719 int err;
720 sa_family_t family; 720 sa_family_t family = PF_INET;
721 721
722 if (sctp_get_pf_specific(PF_INET6)) 722 if (sctp_get_pf_specific(PF_INET6))
723 family = PF_INET6; 723 family = PF_INET6;
724 else
725 family = PF_INET;
726 724
727 err = inet_ctl_sock_create(&sctp_ctl_sock, family, 725 err = inet_ctl_sock_create(&sctp_ctl_sock, family,
728 SOCK_SEQPACKET, IPPROTO_SCTP, &init_net); 726 SOCK_SEQPACKET, IPPROTO_SCTP, &init_net);
727
728 /* If IPv6 socket could not be created, try the IPv4 socket */
729 if (err < 0 && family == PF_INET6)
730 err = inet_ctl_sock_create(&sctp_ctl_sock, AF_INET,
731 SOCK_SEQPACKET, IPPROTO_SCTP,
732 &init_net);
733
729 if (err < 0) { 734 if (err < 0) {
730 printk(KERN_ERR 735 printk(KERN_ERR
731 "SCTP: Failed to create the SCTP control socket.\n"); 736 "SCTP: Failed to create the SCTP control socket.\n");
@@ -1322,9 +1327,8 @@ SCTP_STATIC __init int sctp_init(void)
1322out: 1327out:
1323 return status; 1328 return status;
1324err_v6_add_protocol: 1329err_v6_add_protocol:
1325 sctp_v6_del_protocol();
1326err_add_protocol:
1327 sctp_v4_del_protocol(); 1330 sctp_v4_del_protocol();
1331err_add_protocol:
1328 inet_ctl_sock_destroy(sctp_ctl_sock); 1332 inet_ctl_sock_destroy(sctp_ctl_sock);
1329err_ctl_sock_init: 1333err_ctl_sock_init:
1330 sctp_v6_protosw_exit(); 1334 sctp_v6_protosw_exit();
@@ -1335,7 +1339,6 @@ err_protosw_init:
1335 sctp_v4_pf_exit(); 1339 sctp_v4_pf_exit();
1336 sctp_v6_pf_exit(); 1340 sctp_v6_pf_exit();
1337 sctp_sysctl_unregister(); 1341 sctp_sysctl_unregister();
1338 list_del(&sctp_af_inet.list);
1339 free_pages((unsigned long)sctp_port_hashtable, 1342 free_pages((unsigned long)sctp_port_hashtable,
1340 get_order(sctp_port_hashsize * 1343 get_order(sctp_port_hashsize *
1341 sizeof(struct sctp_bind_hashbucket))); 1344 sizeof(struct sctp_bind_hashbucket)));
@@ -1383,7 +1386,6 @@ SCTP_STATIC __exit void sctp_exit(void)
1383 sctp_v4_pf_exit(); 1386 sctp_v4_pf_exit();
1384 1387
1385 sctp_sysctl_unregister(); 1388 sctp_sysctl_unregister();
1386 list_del(&sctp_af_inet.list);
1387 1389
1388 free_pages((unsigned long)sctp_assoc_hashtable, 1390 free_pages((unsigned long)sctp_assoc_hashtable,
1389 get_order(sctp_assoc_hashsize * 1391 get_order(sctp_assoc_hashsize *
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index e1d6076b4f59..b5495aecab60 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -787,36 +787,48 @@ static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
787 struct sctp_association *asoc, 787 struct sctp_association *asoc,
788 struct sctp_chunk *chunk) 788 struct sctp_chunk *chunk)
789{ 789{
790 struct sctp_operr_chunk *operr_chunk;
791 struct sctp_errhdr *err_hdr; 790 struct sctp_errhdr *err_hdr;
791 struct sctp_ulpevent *ev;
792 792
793 operr_chunk = (struct sctp_operr_chunk *)chunk->chunk_hdr; 793 while (chunk->chunk_end > chunk->skb->data) {
794 err_hdr = &operr_chunk->err_hdr; 794 err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
795 795
796 switch (err_hdr->cause) { 796 ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
797 case SCTP_ERROR_UNKNOWN_CHUNK: 797 GFP_ATOMIC);
798 { 798 if (!ev)
799 struct sctp_chunkhdr *unk_chunk_hdr; 799 return;
800 800
801 unk_chunk_hdr = (struct sctp_chunkhdr *)err_hdr->variable; 801 sctp_ulpq_tail_event(&asoc->ulpq, ev);
802 switch (unk_chunk_hdr->type) { 802
803 /* ADDIP 4.1 A9) If the peer responds to an ASCONF with an 803 switch (err_hdr->cause) {
804 * ERROR chunk reporting that it did not recognized the ASCONF 804 case SCTP_ERROR_UNKNOWN_CHUNK:
805 * chunk type, the sender of the ASCONF MUST NOT send any 805 {
806 * further ASCONF chunks and MUST stop its T-4 timer. 806 sctp_chunkhdr_t *unk_chunk_hdr;
807 */ 807
808 case SCTP_CID_ASCONF: 808 unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable;
809 asoc->peer.asconf_capable = 0; 809 switch (unk_chunk_hdr->type) {
810 sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, 810 /* ADDIP 4.1 A9) If the peer responds to an ASCONF with
811 * an ERROR chunk reporting that it did not recognized
812 * the ASCONF chunk type, the sender of the ASCONF MUST
813 * NOT send any further ASCONF chunks and MUST stop its
814 * T-4 timer.
815 */
816 case SCTP_CID_ASCONF:
817 if (asoc->peer.asconf_capable == 0)
818 break;
819
820 asoc->peer.asconf_capable = 0;
821 sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
811 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 822 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
823 break;
824 default:
825 break;
826 }
812 break; 827 break;
828 }
813 default: 829 default:
814 break; 830 break;
815 } 831 }
816 break;
817 }
818 default:
819 break;
820 } 832 }
821} 833}
822 834
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 3a0cd075914f..f88dfded0e3a 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3163,7 +3163,6 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3163 sctp_cmd_seq_t *commands) 3163 sctp_cmd_seq_t *commands)
3164{ 3164{
3165 struct sctp_chunk *chunk = arg; 3165 struct sctp_chunk *chunk = arg;
3166 struct sctp_ulpevent *ev;
3167 3166
3168 if (!sctp_vtag_verify(chunk, asoc)) 3167 if (!sctp_vtag_verify(chunk, asoc))
3169 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3168 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -3173,21 +3172,10 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3173 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3172 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3174 commands); 3173 commands);
3175 3174
3176 while (chunk->chunk_end > chunk->skb->data) { 3175 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
3177 ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, 3176 SCTP_CHUNK(chunk));
3178 GFP_ATOMIC);
3179 if (!ev)
3180 goto nomem;
3181 3177
3182 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
3183 SCTP_ULPEVENT(ev));
3184 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
3185 SCTP_CHUNK(chunk));
3186 }
3187 return SCTP_DISPOSITION_CONSUME; 3178 return SCTP_DISPOSITION_CONSUME;
3188
3189nomem:
3190 return SCTP_DISPOSITION_NOMEM;
3191} 3179}
3192 3180
3193/* 3181/*
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 85c9034c59b2..bd0a16c3de5e 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -380,7 +380,8 @@ static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule)
380 380
381 freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; 381 freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
382 382
383 if (freq_diff <= 0 || freq_range->max_bandwidth_khz > freq_diff) 383 if (freq_range->end_freq_khz <= freq_range->start_freq_khz ||
384 freq_range->max_bandwidth_khz > freq_diff)
384 return false; 385 return false;
385 386
386 return true; 387 return true;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 0278bc083044..e7ded1326b0f 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1498,58 +1498,31 @@ static int smack_socket_post_create(struct socket *sock, int family,
1498 * looks for host based access restrictions 1498 * looks for host based access restrictions
1499 * 1499 *
1500 * This version will only be appropriate for really small 1500 * This version will only be appropriate for really small
1501 * sets of single label hosts. Because of the masking 1501 * sets of single label hosts.
1502 * it cannot shortcut out on the first match. There are
1503 * numerious ways to address the problem, but none of them
1504 * have been applied here.
1505 * 1502 *
1506 * Returns the label of the far end or NULL if it's not special. 1503 * Returns the label of the far end or NULL if it's not special.
1507 */ 1504 */
1508static char *smack_host_label(struct sockaddr_in *sip) 1505static char *smack_host_label(struct sockaddr_in *sip)
1509{ 1506{
1510 struct smk_netlbladdr *snp; 1507 struct smk_netlbladdr *snp;
1511 char *bestlabel = NULL;
1512 struct in_addr *siap = &sip->sin_addr; 1508 struct in_addr *siap = &sip->sin_addr;
1513 struct in_addr *liap;
1514 struct in_addr *miap;
1515 struct in_addr bestmask;
1516 1509
1517 if (siap->s_addr == 0) 1510 if (siap->s_addr == 0)
1518 return NULL; 1511 return NULL;
1519 1512
1520 bestmask.s_addr = 0;
1521
1522 for (snp = smack_netlbladdrs; snp != NULL; snp = snp->smk_next) { 1513 for (snp = smack_netlbladdrs; snp != NULL; snp = snp->smk_next) {
1523 liap = &snp->smk_host.sin_addr;
1524 miap = &snp->smk_mask;
1525 /*
1526 * If the addresses match after applying the list entry mask
1527 * the entry matches the address. If it doesn't move along to
1528 * the next entry.
1529 */
1530 if ((liap->s_addr & miap->s_addr) !=
1531 (siap->s_addr & miap->s_addr))
1532 continue;
1533 /* 1514 /*
1534 * If the list entry mask identifies a single address 1515 * we break after finding the first match because
1535 * it can't get any more specific. 1516 * the list is sorted from longest to shortest mask
1517 * so we have found the most specific match
1536 */ 1518 */
1537 if (miap->s_addr == 0xffffffff) 1519 if ((&snp->smk_host.sin_addr)->s_addr ==
1520 (siap->s_addr & (&snp->smk_mask)->s_addr)) {
1538 return snp->smk_label; 1521 return snp->smk_label;
1539 /* 1522 }
1540 * If the list entry mask is less specific than the best
1541 * already found this entry is uninteresting.
1542 */
1543 if ((miap->s_addr | bestmask.s_addr) == bestmask.s_addr)
1544 continue;
1545 /*
1546 * This is better than any entry found so far.
1547 */
1548 bestmask.s_addr = miap->s_addr;
1549 bestlabel = snp->smk_label;
1550 } 1523 }
1551 1524
1552 return bestlabel; 1525 return NULL;
1553} 1526}
1554 1527
1555/** 1528/**
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 8e42800878f4..51f0efc50dab 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -650,10 +650,6 @@ static void *netlbladdr_seq_next(struct seq_file *s, void *v, loff_t *pos)
650 650
651 return skp; 651 return skp;
652} 652}
653/*
654#define BEMASK 0x80000000
655*/
656#define BEMASK 0x00000001
657#define BEBITS (sizeof(__be32) * 8) 653#define BEBITS (sizeof(__be32) * 8)
658 654
659/* 655/*
@@ -663,12 +659,10 @@ static int netlbladdr_seq_show(struct seq_file *s, void *v)
663{ 659{
664 struct smk_netlbladdr *skp = (struct smk_netlbladdr *) v; 660 struct smk_netlbladdr *skp = (struct smk_netlbladdr *) v;
665 unsigned char *hp = (char *) &skp->smk_host.sin_addr.s_addr; 661 unsigned char *hp = (char *) &skp->smk_host.sin_addr.s_addr;
666 __be32 bebits; 662 int maskn;
667 int maskn = 0; 663 u32 temp_mask = be32_to_cpu(skp->smk_mask.s_addr);
668 664
669 for (bebits = BEMASK; bebits != 0; maskn++, bebits <<= 1) 665 for (maskn = 0; temp_mask; temp_mask <<= 1, maskn++);
670 if ((skp->smk_mask.s_addr & bebits) == 0)
671 break;
672 666
673 seq_printf(s, "%u.%u.%u.%u/%d %s\n", 667 seq_printf(s, "%u.%u.%u.%u/%d %s\n",
674 hp[0], hp[1], hp[2], hp[3], maskn, skp->smk_label); 668 hp[0], hp[1], hp[2], hp[3], maskn, skp->smk_label);
@@ -702,6 +696,42 @@ static int smk_open_netlbladdr(struct inode *inode, struct file *file)
702} 696}
703 697
704/** 698/**
699 * smk_netlbladdr_insert
700 * @new : netlabel to insert
701 *
702 * This helper insert netlabel in the smack_netlbladdrs list
703 * sorted by netmask length (longest to smallest)
704 */
705static void smk_netlbladdr_insert(struct smk_netlbladdr *new)
706{
707 struct smk_netlbladdr *m;
708
709 if (smack_netlbladdrs == NULL) {
710 smack_netlbladdrs = new;
711 return;
712 }
713
714 /* the comparison '>' is a bit hacky, but works */
715 if (new->smk_mask.s_addr > smack_netlbladdrs->smk_mask.s_addr) {
716 new->smk_next = smack_netlbladdrs;
717 smack_netlbladdrs = new;
718 return;
719 }
720 for (m = smack_netlbladdrs; m != NULL; m = m->smk_next) {
721 if (m->smk_next == NULL) {
722 m->smk_next = new;
723 return;
724 }
725 if (new->smk_mask.s_addr > m->smk_next->smk_mask.s_addr) {
726 new->smk_next = m->smk_next;
727 m->smk_next = new;
728 return;
729 }
730 }
731}
732
733
734/**
705 * smk_write_netlbladdr - write() for /smack/netlabel 735 * smk_write_netlbladdr - write() for /smack/netlabel
706 * @filp: file pointer, not actually used 736 * @filp: file pointer, not actually used
707 * @buf: where to get the data from 737 * @buf: where to get the data from
@@ -724,8 +754,9 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
724 struct netlbl_audit audit_info; 754 struct netlbl_audit audit_info;
725 struct in_addr mask; 755 struct in_addr mask;
726 unsigned int m; 756 unsigned int m;
727 __be32 bebits = BEMASK; 757 u32 mask_bits = (1<<31);
728 __be32 nsa; 758 __be32 nsa;
759 u32 temp_mask;
729 760
730 /* 761 /*
731 * Must have privilege. 762 * Must have privilege.
@@ -761,10 +792,13 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
761 if (sp == NULL) 792 if (sp == NULL)
762 return -EINVAL; 793 return -EINVAL;
763 794
764 for (mask.s_addr = 0; m > 0; m--) { 795 for (temp_mask = 0; m > 0; m--) {
765 mask.s_addr |= bebits; 796 temp_mask |= mask_bits;
766 bebits <<= 1; 797 mask_bits >>= 1;
767 } 798 }
799 mask.s_addr = cpu_to_be32(temp_mask);
800
801 newname.sin_addr.s_addr &= mask.s_addr;
768 /* 802 /*
769 * Only allow one writer at a time. Writes should be 803 * Only allow one writer at a time. Writes should be
770 * quite rare and small in any case. 804 * quite rare and small in any case.
@@ -772,6 +806,7 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
772 mutex_lock(&smk_netlbladdr_lock); 806 mutex_lock(&smk_netlbladdr_lock);
773 807
774 nsa = newname.sin_addr.s_addr; 808 nsa = newname.sin_addr.s_addr;
809 /* try to find if the prefix is already in the list */
775 for (skp = smack_netlbladdrs; skp != NULL; skp = skp->smk_next) 810 for (skp = smack_netlbladdrs; skp != NULL; skp = skp->smk_next)
776 if (skp->smk_host.sin_addr.s_addr == nsa && 811 if (skp->smk_host.sin_addr.s_addr == nsa &&
777 skp->smk_mask.s_addr == mask.s_addr) 812 skp->smk_mask.s_addr == mask.s_addr)
@@ -787,9 +822,8 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
787 rc = 0; 822 rc = 0;
788 skp->smk_host.sin_addr.s_addr = newname.sin_addr.s_addr; 823 skp->smk_host.sin_addr.s_addr = newname.sin_addr.s_addr;
789 skp->smk_mask.s_addr = mask.s_addr; 824 skp->smk_mask.s_addr = mask.s_addr;
790 skp->smk_next = smack_netlbladdrs;
791 skp->smk_label = sp; 825 skp->smk_label = sp;
792 smack_netlbladdrs = skp; 826 smk_netlbladdr_insert(skp);
793 } 827 }
794 } else { 828 } else {
795 rc = netlbl_cfg_unlbl_static_del(&init_net, NULL, 829 rc = netlbl_cfg_unlbl_static_del(&init_net, NULL,
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 3bc427645da8..6094344fb223 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1207,7 +1207,7 @@ static const char *slave_vols[] = {
1207 "LFE Playback Volume", 1207 "LFE Playback Volume",
1208 "Side Playback Volume", 1208 "Side Playback Volume",
1209 "Headphone Playback Volume", 1209 "Headphone Playback Volume",
1210 "Headphone Playback Volume", 1210 "Headphone2 Playback Volume",
1211 "Speaker Playback Volume", 1211 "Speaker Playback Volume",
1212 "External Speaker Playback Volume", 1212 "External Speaker Playback Volume",
1213 "Speaker2 Playback Volume", 1213 "Speaker2 Playback Volume",
@@ -1221,7 +1221,7 @@ static const char *slave_sws[] = {
1221 "LFE Playback Switch", 1221 "LFE Playback Switch",
1222 "Side Playback Switch", 1222 "Side Playback Switch",
1223 "Headphone Playback Switch", 1223 "Headphone Playback Switch",
1224 "Headphone Playback Switch", 1224 "Headphone2 Playback Switch",
1225 "Speaker Playback Switch", 1225 "Speaker Playback Switch",
1226 "External Speaker Playback Switch", 1226 "External Speaker Playback Switch",
1227 "Speaker2 Playback Switch", 1227 "Speaker2 Playback Switch",
@@ -3516,6 +3516,7 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out
3516 if (! spec->autocfg.line_outs) 3516 if (! spec->autocfg.line_outs)
3517 return 0; /* can't find valid pin config */ 3517 return 0; /* can't find valid pin config */
3518 3518
3519#if 0 /* FIXME: temporarily disabled */
3519 /* If we have no real line-out pin and multiple hp-outs, HPs should 3520 /* If we have no real line-out pin and multiple hp-outs, HPs should
3520 * be set up as multi-channel outputs. 3521 * be set up as multi-channel outputs.
3521 */ 3522 */
@@ -3535,6 +3536,7 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out
3535 spec->autocfg.line_out_type = AUTO_PIN_HP_OUT; 3536 spec->autocfg.line_out_type = AUTO_PIN_HP_OUT;
3536 spec->autocfg.hp_outs = 0; 3537 spec->autocfg.hp_outs = 0;
3537 } 3538 }
3539#endif /* FIXME: temporarily disabled */
3538 if (spec->autocfg.mono_out_pin) { 3540 if (spec->autocfg.mono_out_pin) {
3539 int dir = get_wcaps(codec, spec->autocfg.mono_out_pin) & 3541 int dir = get_wcaps(codec, spec->autocfg.mono_out_pin) &
3540 (AC_WCAP_OUT_AMP | AC_WCAP_IN_AMP); 3542 (AC_WCAP_OUT_AMP | AC_WCAP_IN_AMP);