aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRandy Dunlap <randy.dunlap@oracle.com>2011-05-20 12:10:04 -0400
committerRandy Dunlap <randy.dunlap@oracle.com>2011-05-20 12:10:04 -0400
commit2f3e4af471e38e0658e701973238ae4b5e50fcd6 (patch)
treefbfc99c0d975e38ff80f4ff3239a9fc0567b8a4d
parent61516587513c84ac26e68e3ab008dc6e965d0378 (diff)
parentd410fa4ef99112386de5f218dd7df7b4fca910b4 (diff)
Merge branch 'docs-security' into docs-move
-rw-r--r--Documentation/00-INDEX6
-rw-r--r--Documentation/filesystems/nfs/idmapper.txt4
-rw-r--r--Documentation/networking/dns_resolver.txt4
-rw-r--r--Documentation/security/00-INDEX18
-rw-r--r--Documentation/security/SELinux.txt (renamed from Documentation/SELinux.txt)0
-rw-r--r--Documentation/security/Smack.txt (renamed from Documentation/Smack.txt)0
-rw-r--r--Documentation/security/apparmor.txt (renamed from Documentation/apparmor.txt)0
-rw-r--r--Documentation/security/credentials.txt (renamed from Documentation/credentials.txt)2
-rw-r--r--Documentation/security/keys-request-key.txt (renamed from Documentation/keys-request-key.txt)4
-rw-r--r--Documentation/security/keys-trusted-encrypted.txt (renamed from Documentation/keys-trusted-encrypted.txt)0
-rw-r--r--Documentation/security/keys.txt (renamed from Documentation/keys.txt)4
-rw-r--r--Documentation/security/tomoyo.txt (renamed from Documentation/tomoyo.txt)0
-rw-r--r--MAINTAINERS86
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/asm/unistd.h6
-rw-r--r--arch/alpha/kernel/systbls.S12
-rw-r--r--arch/alpha/kernel/time.c3
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/compressed/head.S35
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.in1
-rw-r--r--arch/arm/include/asm/system.h2
-rw-r--r--arch/arm/kernel/ptrace.c8
-rw-r--r--arch/arm/kernel/signal.c90
-rw-r--r--arch/arm/mach-omap2/clkt34xx_dpll3m2.c1
-rw-r--r--arch/arm/mach-realview/include/mach/barriers.h2
-rw-r--r--arch/arm/mach-tegra/include/mach/barriers.h2
-rw-r--r--arch/arm/mm/init.c16
-rw-r--r--arch/arm/plat-omap/iommu.c2
-rw-r--r--arch/mips/Kconfig5
-rw-r--r--arch/mips/alchemy/devboards/db1x00/board_setup.c61
-rw-r--r--arch/mips/alchemy/xxs1500/init.c5
-rw-r--r--arch/mips/ar7/gpio.c4
-rw-r--r--arch/mips/boot/compressed/calc_vmlinuz_load_addr.c2
-rw-r--r--arch/mips/cavium-octeon/Kconfig15
-rw-r--r--arch/mips/include/asm/cache.h2
-rw-r--r--arch/mips/include/asm/cevt-r4k.h3
-rw-r--r--arch/mips/include/asm/dma-mapping.h2
-rw-r--r--arch/mips/include/asm/hugetlb.h1
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h2
-rw-r--r--arch/mips/jazz/jazzdma.c5
-rw-r--r--arch/mips/jz4740/dma.c4
-rw-r--r--arch/mips/jz4740/time.c2
-rw-r--r--arch/mips/jz4740/timer.c2
-rw-r--r--arch/mips/kernel/ftrace.c5
-rw-r--r--arch/mips/kernel/ptrace.c4
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/traps.c6
-rw-r--r--arch/mips/kernel/vmlinux.lds.S1
-rw-r--r--arch/mips/loongson/common/env.c5
-rw-r--r--arch/mips/mm/c-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c4
-rw-r--r--arch/mips/mti-malta/malta-init.c14
-rw-r--r--arch/mips/mti-malta/malta-int.c3
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_irq_per.c2
-rw-r--r--arch/mips/power/hibernate.S2
-rw-r--r--arch/mips/rb532/gpio.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-platform.c4
-rw-r--r--arch/mips/sgi-ip22/ip22-time.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-hubio.c3
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c3
-rw-r--r--arch/mips/sni/time.c4
-rw-r--r--arch/powerpc/kernel/ptrace.c12
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c7
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c7
-rw-r--r--arch/s390/include/asm/diag.h17
-rw-r--r--arch/s390/include/asm/mmu_context.h2
-rw-r--r--arch/s390/kernel/diag.c21
-rw-r--r--arch/s390/kernel/dis.c1
-rw-r--r--arch/s390/kernel/entry.S2
-rw-r--r--arch/s390/kernel/entry64.S2
-rw-r--r--arch/s390/mm/cmm.c2
-rw-r--r--arch/s390/oprofile/hwsampler.c14
-rw-r--r--arch/s390/oprofile/hwsampler.h4
-rw-r--r--arch/s390/oprofile/init.c8
-rw-r--r--arch/sh/kernel/ptrace_32.c4
-rw-r--r--arch/sparc/kernel/apc.c2
-rw-r--r--arch/sparc/kernel/pci_sabre.c5
-rw-r--r--arch/sparc/kernel/pci_schizo.c8
-rw-r--r--arch/sparc/kernel/pmc.c2
-rw-r--r--arch/sparc/kernel/smp_32.c10
-rw-r--r--arch/sparc/kernel/time_32.c2
-rw-r--r--arch/sparc/lib/checksum_32.S12
-rw-r--r--arch/um/os-Linux/util.c23
-rw-r--r--arch/x86/include/asm/apicdef.h1
-rw-r--r--arch/x86/include/asm/pgtable_types.h1
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h17
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h2
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h16
-rw-r--r--arch/x86/include/asm/x86_init.h12
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c48
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c87
-rw-r--r--arch/x86/kernel/kprobes.c5
-rw-r--r--arch/x86/kernel/ptrace.c36
-rw-r--r--arch/x86/kernel/x86_init.c4
-rw-r--r--arch/x86/mm/init.c24
-rw-r--r--arch/x86/platform/uv/tlb_uv.c92
-rw-r--r--arch/x86/xen/mmu.c138
-rw-r--r--block/blk-cgroup.c7
-rw-r--r--block/blk-cgroup.h3
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-throttle.c9
-rw-r--r--block/cfq-iosched.c11
-rw-r--r--drivers/ata/libahci.c21
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/atm/fore200e.c7
-rw-r--r--drivers/block/DAC960.c1
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/block/ataflop.c1
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/paride/pcd.c1
-rw-r--r--drivers/block/paride/pd.c1
-rw-r--r--drivers/block/paride/pf.c1
-rw-r--r--drivers/block/rbd.c177
-rw-r--r--drivers/block/swim.c1
-rw-r--r--drivers/block/swim3.c1
-rw-r--r--drivers/block/ub.c1
-rw-r--r--drivers/block/xsysace.c1
-rw-r--r--drivers/cdrom/cdrom.c6
-rw-r--r--drivers/cdrom/gdrom.c1
-rw-r--r--drivers/cdrom/viocd.c1
-rw-r--r--drivers/char/hw_random/n2-drv.c7
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c7
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c14
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c26
-rw-r--r--drivers/gpu/drm/drm_mm.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c17
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/ni.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c6
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen1
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c6
-rw-r--r--drivers/i2c/busses/i2c-mpc.c9
-rw-r--r--drivers/i2c/busses/i2c-pnx.c2
-rw-r--r--drivers/input/touchscreen/ads7846.c13
-rw-r--r--drivers/leds/leds-lm3530.c1
-rw-r--r--drivers/media/video/cx88/cx88-input.c2
-rw-r--r--drivers/media/video/soc_camera.c48
-rw-r--r--drivers/media/video/v4l2-device.c5
-rw-r--r--drivers/media/video/v4l2-subdev.c14
-rw-r--r--drivers/message/i2o/i2o_block.c1
-rw-r--r--drivers/mfd/asic3.c2
-rw-r--r--drivers/mfd/omap-usb-host.c8
-rw-r--r--drivers/mfd/twl4030-power.c3
-rw-r--r--drivers/mmc/core/host.c9
-rw-r--r--drivers/mmc/host/sdhci-of-core.c7
-rw-r--r--drivers/mtd/maps/physmap_of.c7
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/Makefile6
-rw-r--r--drivers/net/arm/etherh.c4
-rw-r--r--drivers/net/benet/be.h2
-rw-r--r--drivers/net/benet/be_cmds.c2
-rw-r--r--drivers/net/benet/be_main.c18
-rw-r--r--drivers/net/bonding/bond_3ad.h10
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c7
-rw-r--r--drivers/net/can/sja1000/sja1000.c2
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/ehea/ehea_ethtool.c21
-rw-r--r--drivers/net/ehea/ehea_main.c6
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c9
-rw-r--r--drivers/net/fs_enet/mii-fec.c7
-rw-r--r--drivers/net/hydra.c14
-rw-r--r--drivers/net/ne-h8300.c16
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c23
-rw-r--r--drivers/net/sfc/mcdi.c49
-rw-r--r--drivers/net/sfc/nic.c7
-rw-r--r--drivers/net/sfc/nic.h2
-rw-r--r--drivers/net/sfc/siena.c25
-rw-r--r--drivers/net/slip.c4
-rw-r--r--drivers/net/sunhme.c7
-rw-r--r--drivers/net/usb/cdc_ether.c2
-rw-r--r--drivers/net/usb/ipheth.c14
-rw-r--r--drivers/net/usb/usbnet.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c8
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c7
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h6
-rw-r--r--drivers/net/wireless/libertas/cmd.c6
-rw-r--r--drivers/net/zorro8390.c12
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/platform/x86/eeepc-laptop.c57
-rw-r--r--drivers/platform/x86/sony-laptop.c130
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c6
-rw-r--r--drivers/rapidio/switches/idt_gen2.c9
-rw-r--r--drivers/rapidio/switches/idtcps.c6
-rw-r--r--drivers/rapidio/switches/tsi57x.c6
-rw-r--r--drivers/rtc/rtc-davinci.c5
-rw-r--r--drivers/rtc/rtc-ds1286.c2
-rw-r--r--drivers/rtc/rtc-ep93xx.c5
-rw-r--r--drivers/rtc/rtc-m41t80.c5
-rw-r--r--drivers/rtc/rtc-max8925.c5
-rw-r--r--drivers/rtc/rtc-max8998.c5
-rw-r--r--drivers/rtc/rtc-mc13xxx.c8
-rw-r--r--drivers/rtc/rtc-msm6242.c3
-rw-r--r--drivers/rtc/rtc-mxc.c19
-rw-r--r--drivers/rtc/rtc-pcap.c4
-rw-r--r--drivers/rtc/rtc-rp5c01.c5
-rw-r--r--drivers/rtc/rtc-s3c.c13
-rw-r--r--drivers/s390/block/dasd.c11
-rw-r--r--drivers/s390/char/sclp_cmd.c2
-rw-r--r--drivers/s390/char/tape_block.c1
-rw-r--r--drivers/scsi/qlogicpti.c7
-rw-r--r--drivers/scsi/scsi_lib.c27
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/tty/serial/of_serial.c7
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c7
-rw-r--r--drivers/video/acornfb.c26
-rw-r--r--drivers/video/fbmem.c223
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c7
-rw-r--r--fs/block_dev.c27
-rw-r--r--fs/btrfs/acl.c5
-rw-r--r--fs/btrfs/extent-tree.c37
-rw-r--r--fs/btrfs/ioctl.c24
-rw-r--r--fs/ceph/caps.c16
-rw-r--r--fs/ceph/inode.c1
-rw-r--r--fs/ceph/mds_client.c2
-rw-r--r--fs/ceph/snap.c2
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/cifs/cifs_unicode.c14
-rw-r--r--fs/cifs/connect.c125
-rw-r--r--fs/cifs/sess.c19
-rw-r--r--fs/configfs/dir.c39
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/hpfs/Kconfig1
-rw-r--r--fs/hpfs/alloc.c118
-rw-r--r--fs/hpfs/anode.c138
-rw-r--r--fs/hpfs/buffer.c24
-rw-r--r--fs/hpfs/dir.c22
-rw-r--r--fs/hpfs/dnode.c174
-rw-r--r--fs/hpfs/ea.c136
-rw-r--r--fs/hpfs/file.c31
-rw-r--r--fs/hpfs/hpfs.h439
-rw-r--r--fs/hpfs/hpfs_fn.h80
-rw-r--r--fs/hpfs/inode.c47
-rw-r--r--fs/hpfs/map.c56
-rw-r--r--fs/hpfs/name.c33
-rw-r--r--fs/hpfs/namei.c106
-rw-r--r--fs/hpfs/super.c118
-rw-r--r--fs/namei.c2
-rw-r--r--fs/nfs/nfs4filelayout.c27
-rw-r--r--fs/nfs/nfs4filelayout.h2
-rw-r--r--fs/nfs/nfs4filelayoutdev.c34
-rw-r--r--fs/nfs/nfs4proc.c6
-rw-r--r--fs/nfs/pnfs.c34
-rw-r--r--fs/nfs/pnfs.h6
-rw-r--r--fs/nfs/read.c4
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/nilfs2/alloc.c2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c61
-rw-r--r--fs/ocfs2/dir.c2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c3
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c3
-rw-r--r--fs/ocfs2/file.c12
-rw-r--r--fs/ocfs2/journal.c3
-rw-r--r--fs/partitions/efi.c6
-rw-r--r--fs/proc/task_mmu.c12
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c1
-rw-r--r--fs/xfs/xfs_trans_ail.c47
-rw-r--r--include/drm/drm_fb_helper.h2
-rw-r--r--include/drm/drm_mm.h2
-rw-r--r--include/linux/bootmem.h2
-rw-r--r--include/linux/capability.h13
-rw-r--r--include/linux/cred.h12
-rw-r--r--include/linux/device.h1
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/ftrace_event.h1
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/key.h2
-rw-r--r--include/linux/mm.h24
-rw-r--r--include/linux/mmc/host.h1
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/of_device.h8
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/ptrace.h13
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/net/inet_ecn.h16
-rw-r--r--include/net/ip_vs.h17
-rw-r--r--include/net/llc_pdu.h8
-rw-r--r--include/net/xfrm.h3
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/trace/events/gfpflags.h6
-rw-r--r--init/Kconfig1
-rw-r--r--kernel/capability.c12
-rw-r--r--kernel/cred.c14
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/power/user.c5
-rw-r--r--kernel/ptrace.c17
-rw-r--r--kernel/time/clocksource.c4
-rw-r--r--kernel/time/tick-broadcast.c12
-rw-r--r--kernel/trace/trace.c1
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--lib/vsprintf.c2
-rw-r--r--mm/memory.c16
-rw-r--r--mm/mmap.c11
-rw-r--r--mm/page_alloc.c57
-rw-r--r--mm/page_cgroup.c2
-rw-r--r--mm/shmem.c149
-rw-r--r--mm/swap.c3
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/protocol.c1
-rw-r--r--net/9p/trans_common.c11
-rw-r--r--net/bluetooth/sco.c9
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/bridge/netfilter/ebtables.c64
-rw-r--r--net/core/dev.c32
-rw-r--r--net/dccp/options.c2
-rw-r--r--net/ipv4/ip_fragment.c31
-rw-r--r--net/ipv4/tcp_cubic.c9
-rw-r--r--net/ipv4/xfrm4_output.c8
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c4
-rw-r--r--net/ipv6/xfrm6_output.c6
-rw-r--r--net/ipv6/xfrm6_state.c1
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c17
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c16
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c103
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c126
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c11
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c65
-rw-r--r--net/netfilter/nf_conntrack_netlink.c4
-rw-r--r--net/netfilter/x_tables.c4
-rw-r--r--net/netfilter/xt_DSCP.c2
-rw-r--r--net/netfilter/xt_conntrack.c5
-rw-r--r--net/xfrm/xfrm_policy.c14
-rw-r--r--net/xfrm/xfrm_replay.c3
-rw-r--r--scripts/selinux/README2
-rw-r--r--security/apparmor/match.c2
-rw-r--r--security/apparmor/policy_unpack.c4
-rw-r--r--security/keys/encrypted.c2
-rw-r--r--security/keys/request_key.c2
-rw-r--r--security/keys/request_key_auth.c2
-rw-r--r--security/keys/trusted.c2
-rw-r--r--security/selinux/ss/policydb.c4
-rw-r--r--sound/soc/codecs/ssm2602.c10
-rw-r--r--sound/soc/codecs/uda134x.c2
-rw-r--r--sound/soc/codecs/wm8903.c2
-rw-r--r--sound/soc/davinci/davinci-mcasp.c19
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c2
-rw-r--r--sound/soc/mid-x86/sst_platform.c6
-rw-r--r--sound/soc/samsung/goni_wm8994.c8
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--tools/perf/Makefile16
-rw-r--r--tools/perf/builtin-record.c2
-rw-r--r--tools/perf/builtin-test.c2
-rw-r--r--tools/perf/builtin-top.c8
-rw-r--r--tools/perf/util/evlist.c153
-rw-r--r--tools/perf/util/evlist.h3
-rw-r--r--tools/perf/util/python.c2
373 files changed, 3665 insertions, 2437 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 1b777b960492..1f89424c36a6 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -192,10 +192,6 @@ kernel-docs.txt
192 - listing of various WWW + books that document kernel internals. 192 - listing of various WWW + books that document kernel internals.
193kernel-parameters.txt 193kernel-parameters.txt
194 - summary listing of command line / boot prompt args for the kernel. 194 - summary listing of command line / boot prompt args for the kernel.
195keys-request-key.txt
196 - description of the kernel key request service.
197keys.txt
198 - description of the kernel key retention service.
199kobject.txt 195kobject.txt
200 - info of the kobject infrastructure of the Linux kernel. 196 - info of the kobject infrastructure of the Linux kernel.
201kprobes.txt 197kprobes.txt
@@ -294,6 +290,8 @@ scheduler/
294 - directory with info on the scheduler. 290 - directory with info on the scheduler.
295scsi/ 291scsi/
296 - directory with info on Linux scsi support. 292 - directory with info on Linux scsi support.
293security/
294 - directory that contains security-related info
297serial/ 295serial/
298 - directory with info on the low level serial API. 296 - directory with info on the low level serial API.
299serial-console.txt 297serial-console.txt
diff --git a/Documentation/filesystems/nfs/idmapper.txt b/Documentation/filesystems/nfs/idmapper.txt
index b9b4192ea8b5..9c8fd6148656 100644
--- a/Documentation/filesystems/nfs/idmapper.txt
+++ b/Documentation/filesystems/nfs/idmapper.txt
@@ -47,8 +47,8 @@ request-key will find the first matching line and corresponding program. In
47this case, /some/other/program will handle all uid lookups and 47this case, /some/other/program will handle all uid lookups and
48/usr/sbin/nfs.idmap will handle gid, user, and group lookups. 48/usr/sbin/nfs.idmap will handle gid, user, and group lookups.
49 49
50See <file:Documentation/keys-request-keys.txt> for more information about the 50See <file:Documentation/security/keys-request-keys.txt> for more information
51request-key function. 51about the request-key function.
52 52
53 53
54========= 54=========
diff --git a/Documentation/networking/dns_resolver.txt b/Documentation/networking/dns_resolver.txt
index 04ca06325b08..7f531ad83285 100644
--- a/Documentation/networking/dns_resolver.txt
+++ b/Documentation/networking/dns_resolver.txt
@@ -139,8 +139,8 @@ the key will be discarded and recreated when the data it holds has expired.
139dns_query() returns a copy of the value attached to the key, or an error if 139dns_query() returns a copy of the value attached to the key, or an error if
140that is indicated instead. 140that is indicated instead.
141 141
142See <file:Documentation/keys-request-key.txt> for further information about 142See <file:Documentation/security/keys-request-key.txt> for further
143request-key function. 143information about request-key function.
144 144
145 145
146========= 146=========
diff --git a/Documentation/security/00-INDEX b/Documentation/security/00-INDEX
new file mode 100644
index 000000000000..19bc49439cac
--- /dev/null
+++ b/Documentation/security/00-INDEX
@@ -0,0 +1,18 @@
100-INDEX
2 - this file.
3SELinux.txt
4 - how to get started with the SELinux security enhancement.
5Smack.txt
6 - documentation on the Smack Linux Security Module.
7apparmor.txt
8 - documentation on the AppArmor security extension.
9credentials.txt
10 - documentation about credentials in Linux.
11keys-request-key.txt
12 - description of the kernel key request service.
13keys-trusted-encrypted.txt
14 - info on the Trusted and Encrypted keys in the kernel key ring service.
15keys.txt
16 - description of the kernel key retention service.
17tomoyo.txt
18 - documentation on the TOMOYO Linux Security Module.
diff --git a/Documentation/SELinux.txt b/Documentation/security/SELinux.txt
index 07eae00f3314..07eae00f3314 100644
--- a/Documentation/SELinux.txt
+++ b/Documentation/security/SELinux.txt
diff --git a/Documentation/Smack.txt b/Documentation/security/Smack.txt
index e9dab41c0fe0..e9dab41c0fe0 100644
--- a/Documentation/Smack.txt
+++ b/Documentation/security/Smack.txt
diff --git a/Documentation/apparmor.txt b/Documentation/security/apparmor.txt
index 93c1fd7d0635..93c1fd7d0635 100644
--- a/Documentation/apparmor.txt
+++ b/Documentation/security/apparmor.txt
diff --git a/Documentation/credentials.txt b/Documentation/security/credentials.txt
index 995baf379c07..fc0366cbd7ce 100644
--- a/Documentation/credentials.txt
+++ b/Documentation/security/credentials.txt
@@ -216,7 +216,7 @@ The Linux kernel supports the following types of credentials:
216 When a process accesses a key, if not already present, it will normally be 216 When a process accesses a key, if not already present, it will normally be
217 cached on one of these keyrings for future accesses to find. 217 cached on one of these keyrings for future accesses to find.
218 218
219 For more information on using keys, see Documentation/keys.txt. 219 For more information on using keys, see Documentation/security/keys.txt.
220 220
221 (5) LSM 221 (5) LSM
222 222
diff --git a/Documentation/keys-request-key.txt b/Documentation/security/keys-request-key.txt
index 69686ad12c66..51987bfecfed 100644
--- a/Documentation/keys-request-key.txt
+++ b/Documentation/security/keys-request-key.txt
@@ -3,8 +3,8 @@
3 =================== 3 ===================
4 4
5The key request service is part of the key retention service (refer to 5The key request service is part of the key retention service (refer to
6Documentation/keys.txt). This document explains more fully how the requesting 6Documentation/security/keys.txt). This document explains more fully how
7algorithm works. 7the requesting algorithm works.
8 8
9The process starts by either the kernel requesting a service by calling 9The process starts by either the kernel requesting a service by calling
10request_key*(): 10request_key*():
diff --git a/Documentation/keys-trusted-encrypted.txt b/Documentation/security/keys-trusted-encrypted.txt
index 8fb79bc1ac4b..8fb79bc1ac4b 100644
--- a/Documentation/keys-trusted-encrypted.txt
+++ b/Documentation/security/keys-trusted-encrypted.txt
diff --git a/Documentation/keys.txt b/Documentation/security/keys.txt
index 6523a9e6f293..4d75931d2d79 100644
--- a/Documentation/keys.txt
+++ b/Documentation/security/keys.txt
@@ -434,7 +434,7 @@ The main syscalls are:
434 /sbin/request-key will be invoked in an attempt to obtain a key. The 434 /sbin/request-key will be invoked in an attempt to obtain a key. The
435 callout_info string will be passed as an argument to the program. 435 callout_info string will be passed as an argument to the program.
436 436
437 See also Documentation/keys-request-key.txt. 437 See also Documentation/security/keys-request-key.txt.
438 438
439 439
440The keyctl syscall functions are: 440The keyctl syscall functions are:
@@ -864,7 +864,7 @@ payload contents" for more information.
864 If successful, the key will have been attached to the default keyring for 864 If successful, the key will have been attached to the default keyring for
865 implicitly obtained request-key keys, as set by KEYCTL_SET_REQKEY_KEYRING. 865 implicitly obtained request-key keys, as set by KEYCTL_SET_REQKEY_KEYRING.
866 866
867 See also Documentation/keys-request-key.txt. 867 See also Documentation/security/keys-request-key.txt.
868 868
869 869
870(*) To search for a key, passing auxiliary data to the upcaller, call: 870(*) To search for a key, passing auxiliary data to the upcaller, call:
diff --git a/Documentation/tomoyo.txt b/Documentation/security/tomoyo.txt
index 200a2d37cbc8..200a2d37cbc8 100644
--- a/Documentation/tomoyo.txt
+++ b/Documentation/security/tomoyo.txt
diff --git a/MAINTAINERS b/MAINTAINERS
index aa9bbd1c0e2b..ec947424f702 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2813,38 +2813,19 @@ F: Documentation/gpio.txt
2813F: drivers/gpio/ 2813F: drivers/gpio/
2814F: include/linux/gpio* 2814F: include/linux/gpio*
2815 2815
2816GRE DEMULTIPLEXER DRIVER
2817M: Dmitry Kozlov <xeb@mail.ru>
2818L: netdev@vger.kernel.org
2819S: Maintained
2820F: net/ipv4/gre.c
2821F: include/net/gre.h
2822
2816GRETH 10/100/1G Ethernet MAC device driver 2823GRETH 10/100/1G Ethernet MAC device driver
2817M: Kristoffer Glembo <kristoffer@gaisler.com> 2824M: Kristoffer Glembo <kristoffer@gaisler.com>
2818L: netdev@vger.kernel.org 2825L: netdev@vger.kernel.org
2819S: Maintained 2826S: Maintained
2820F: drivers/net/greth* 2827F: drivers/net/greth*
2821 2828
2822HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
2823M: Frank Seidel <frank@f-seidel.de>
2824L: platform-driver-x86@vger.kernel.org
2825W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
2826S: Maintained
2827F: drivers/platform/x86/hdaps.c
2828
2829HWPOISON MEMORY FAILURE HANDLING
2830M: Andi Kleen <andi@firstfloor.org>
2831L: linux-mm@kvack.org
2832T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison
2833S: Maintained
2834F: mm/memory-failure.c
2835F: mm/hwpoison-inject.c
2836
2837HYPERVISOR VIRTUAL CONSOLE DRIVER
2838L: linuxppc-dev@lists.ozlabs.org
2839S: Odd Fixes
2840F: drivers/tty/hvc/
2841
2842iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER
2843M: Peter Jones <pjones@redhat.com>
2844M: Konrad Rzeszutek Wilk <konrad@kernel.org>
2845S: Maintained
2846F: drivers/firmware/iscsi_ibft*
2847
2848GSPCA FINEPIX SUBDRIVER 2829GSPCA FINEPIX SUBDRIVER
2849M: Frank Zago <frank@zago.net> 2830M: Frank Zago <frank@zago.net>
2850L: linux-media@vger.kernel.org 2831L: linux-media@vger.kernel.org
@@ -2895,6 +2876,26 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
2895S: Maintained 2876S: Maintained
2896F: drivers/media/video/gspca/ 2877F: drivers/media/video/gspca/
2897 2878
2879HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
2880M: Frank Seidel <frank@f-seidel.de>
2881L: platform-driver-x86@vger.kernel.org
2882W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
2883S: Maintained
2884F: drivers/platform/x86/hdaps.c
2885
2886HWPOISON MEMORY FAILURE HANDLING
2887M: Andi Kleen <andi@firstfloor.org>
2888L: linux-mm@kvack.org
2889T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison
2890S: Maintained
2891F: mm/memory-failure.c
2892F: mm/hwpoison-inject.c
2893
2894HYPERVISOR VIRTUAL CONSOLE DRIVER
2895L: linuxppc-dev@lists.ozlabs.org
2896S: Odd Fixes
2897F: drivers/tty/hvc/
2898
2898HARDWARE MONITORING 2899HARDWARE MONITORING
2899M: Jean Delvare <khali@linux-fr.org> 2900M: Jean Delvare <khali@linux-fr.org>
2900M: Guenter Roeck <guenter.roeck@ericsson.com> 2901M: Guenter Roeck <guenter.roeck@ericsson.com>
@@ -3478,6 +3479,12 @@ F: Documentation/isapnp.txt
3478F: drivers/pnp/isapnp/ 3479F: drivers/pnp/isapnp/
3479F: include/linux/isapnp.h 3480F: include/linux/isapnp.h
3480 3481
3482iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER
3483M: Peter Jones <pjones@redhat.com>
3484M: Konrad Rzeszutek Wilk <konrad@kernel.org>
3485S: Maintained
3486F: drivers/firmware/iscsi_ibft*
3487
3481ISCSI 3488ISCSI
3482M: Mike Christie <michaelc@cs.wisc.edu> 3489M: Mike Christie <michaelc@cs.wisc.edu>
3483L: open-iscsi@googlegroups.com 3490L: open-iscsi@googlegroups.com
@@ -3698,7 +3705,7 @@ KEYS/KEYRINGS:
3698M: David Howells <dhowells@redhat.com> 3705M: David Howells <dhowells@redhat.com>
3699L: keyrings@linux-nfs.org 3706L: keyrings@linux-nfs.org
3700S: Maintained 3707S: Maintained
3701F: Documentation/keys.txt 3708F: Documentation/security/keys.txt
3702F: include/linux/key.h 3709F: include/linux/key.h
3703F: include/linux/key-type.h 3710F: include/linux/key-type.h
3704F: include/keys/ 3711F: include/keys/
@@ -3710,7 +3717,7 @@ M: Mimi Zohar <zohar@us.ibm.com>
3710L: linux-security-module@vger.kernel.org 3717L: linux-security-module@vger.kernel.org
3711L: keyrings@linux-nfs.org 3718L: keyrings@linux-nfs.org
3712S: Supported 3719S: Supported
3713F: Documentation/keys-trusted-encrypted.txt 3720F: Documentation/security/keys-trusted-encrypted.txt
3714F: include/keys/trusted-type.h 3721F: include/keys/trusted-type.h
3715F: security/keys/trusted.c 3722F: security/keys/trusted.c
3716F: security/keys/trusted.h 3723F: security/keys/trusted.h
@@ -3721,7 +3728,7 @@ M: David Safford <safford@watson.ibm.com>
3721L: linux-security-module@vger.kernel.org 3728L: linux-security-module@vger.kernel.org
3722L: keyrings@linux-nfs.org 3729L: keyrings@linux-nfs.org
3723S: Supported 3730S: Supported
3724F: Documentation/keys-trusted-encrypted.txt 3731F: Documentation/security/keys-trusted-encrypted.txt
3725F: include/keys/encrypted-type.h 3732F: include/keys/encrypted-type.h
3726F: security/keys/encrypted.c 3733F: security/keys/encrypted.c
3727F: security/keys/encrypted.h 3734F: security/keys/encrypted.h
@@ -4989,6 +4996,13 @@ F: Documentation/pps/
4989F: drivers/pps/ 4996F: drivers/pps/
4990F: include/linux/pps*.h 4997F: include/linux/pps*.h
4991 4998
4999PPTP DRIVER
5000M: Dmitry Kozlov <xeb@mail.ru>
5001L: netdev@vger.kernel.org
5002S: Maintained
5003F: drivers/net/pptp.c
5004W: http://sourceforge.net/projects/accel-pptp
5005
4992PREEMPTIBLE KERNEL 5006PREEMPTIBLE KERNEL
4993M: Robert Love <rml@tech9.net> 5007M: Robert Love <rml@tech9.net>
4994L: kpreempt-tech@lists.sourceforge.net 5008L: kpreempt-tech@lists.sourceforge.net
@@ -7024,20 +7038,6 @@ M: "Maciej W. Rozycki" <macro@linux-mips.org>
7024S: Maintained 7038S: Maintained
7025F: drivers/tty/serial/zs.* 7039F: drivers/tty/serial/zs.*
7026 7040
7027GRE DEMULTIPLEXER DRIVER
7028M: Dmitry Kozlov <xeb@mail.ru>
7029L: netdev@vger.kernel.org
7030S: Maintained
7031F: net/ipv4/gre.c
7032F: include/net/gre.h
7033
7034PPTP DRIVER
7035M: Dmitry Kozlov <xeb@mail.ru>
7036L: netdev@vger.kernel.org
7037S: Maintained
7038F: drivers/net/pptp.c
7039W: http://sourceforge.net/projects/accel-pptp
7040
7041THE REST 7041THE REST
7042M: Linus Torvalds <torvalds@linux-foundation.org> 7042M: Linus Torvalds <torvalds@linux-foundation.org>
7043L: linux-kernel@vger.kernel.org 7043L: linux-kernel@vger.kernel.org
diff --git a/Makefile b/Makefile
index 28820f7ddf0a..123d858dae03 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 39 3SUBLEVEL = 39
4EXTRAVERSION = -rc6 4EXTRAVERSION =
5NAME = Flesh-Eating Bats with Fangs 5NAME = Flesh-Eating Bats with Fangs
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 058937bf5a77..b1834166922d 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -452,10 +452,14 @@
452#define __NR_fanotify_init 494 452#define __NR_fanotify_init 494
453#define __NR_fanotify_mark 495 453#define __NR_fanotify_mark 495
454#define __NR_prlimit64 496 454#define __NR_prlimit64 496
455#define __NR_name_to_handle_at 497
456#define __NR_open_by_handle_at 498
457#define __NR_clock_adjtime 499
458#define __NR_syncfs 500
455 459
456#ifdef __KERNEL__ 460#ifdef __KERNEL__
457 461
458#define NR_SYSCALLS 497 462#define NR_SYSCALLS 501
459 463
460#define __ARCH_WANT_IPC_PARSE_VERSION 464#define __ARCH_WANT_IPC_PARSE_VERSION
461#define __ARCH_WANT_OLD_READDIR 465#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index a6a1de9db16f..15f999d41c75 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -498,23 +498,27 @@ sys_call_table:
498 .quad sys_ni_syscall /* sys_timerfd */ 498 .quad sys_ni_syscall /* sys_timerfd */
499 .quad sys_eventfd 499 .quad sys_eventfd
500 .quad sys_recvmmsg 500 .quad sys_recvmmsg
501 .quad sys_fallocate /* 480 */ 501 .quad sys_fallocate /* 480 */
502 .quad sys_timerfd_create 502 .quad sys_timerfd_create
503 .quad sys_timerfd_settime 503 .quad sys_timerfd_settime
504 .quad sys_timerfd_gettime 504 .quad sys_timerfd_gettime
505 .quad sys_signalfd4 505 .quad sys_signalfd4
506 .quad sys_eventfd2 /* 485 */ 506 .quad sys_eventfd2 /* 485 */
507 .quad sys_epoll_create1 507 .quad sys_epoll_create1
508 .quad sys_dup3 508 .quad sys_dup3
509 .quad sys_pipe2 509 .quad sys_pipe2
510 .quad sys_inotify_init1 510 .quad sys_inotify_init1
511 .quad sys_preadv /* 490 */ 511 .quad sys_preadv /* 490 */
512 .quad sys_pwritev 512 .quad sys_pwritev
513 .quad sys_rt_tgsigqueueinfo 513 .quad sys_rt_tgsigqueueinfo
514 .quad sys_perf_event_open 514 .quad sys_perf_event_open
515 .quad sys_fanotify_init 515 .quad sys_fanotify_init
516 .quad sys_fanotify_mark /* 495 */ 516 .quad sys_fanotify_mark /* 495 */
517 .quad sys_prlimit64 517 .quad sys_prlimit64
518 .quad sys_name_to_handle_at
519 .quad sys_open_by_handle_at
520 .quad sys_clock_adjtime
521 .quad sys_syncfs /* 500 */
518 522
519 .size sys_call_table, . - sys_call_table 523 .size sys_call_table, . - sys_call_table
520 .type sys_call_table, @object 524 .type sys_call_table, @object
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 918e8e0b72ff..818e74ed45dc 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -375,8 +375,7 @@ static struct clocksource clocksource_rpcc = {
375 375
376static inline void register_rpcc_clocksource(long cycle_freq) 376static inline void register_rpcc_clocksource(long cycle_freq)
377{ 377{
378 clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4); 378 clocksource_register_hz(&clocksource_rpcc, cycle_freq);
379 clocksource_register(&clocksource_rpcc);
380} 379}
381#else /* !CONFIG_SMP */ 380#else /* !CONFIG_SMP */
382static inline void register_rpcc_clocksource(long cycle_freq) 381static inline void register_rpcc_clocksource(long cycle_freq)
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 8ebbb511c783..0c6852d93506 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -74,7 +74,7 @@ ZTEXTADDR := $(CONFIG_ZBOOT_ROM_TEXT)
74ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS) 74ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS)
75else 75else
76ZTEXTADDR := 0 76ZTEXTADDR := 0
77ZBSSADDR := ALIGN(4) 77ZBSSADDR := ALIGN(8)
78endif 78endif
79 79
80SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ 80SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index adf583cd0c35..49f5b2eaaa87 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -179,15 +179,14 @@ not_angel:
179 bl cache_on 179 bl cache_on
180 180
181restart: adr r0, LC0 181restart: adr r0, LC0
182 ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12} 182 ldmia r0, {r1, r2, r3, r6, r9, r11, r12}
183 ldr sp, [r0, #32] 183 ldr sp, [r0, #28]
184 184
185 /* 185 /*
186 * We might be running at a different address. We need 186 * We might be running at a different address. We need
187 * to fix up various pointers. 187 * to fix up various pointers.
188 */ 188 */
189 sub r0, r0, r1 @ calculate the delta offset 189 sub r0, r0, r1 @ calculate the delta offset
190 add r5, r5, r0 @ _start
191 add r6, r6, r0 @ _edata 190 add r6, r6, r0 @ _edata
192 191
193#ifndef CONFIG_ZBOOT_ROM 192#ifndef CONFIG_ZBOOT_ROM
@@ -206,31 +205,40 @@ restart: adr r0, LC0
206/* 205/*
207 * Check to see if we will overwrite ourselves. 206 * Check to see if we will overwrite ourselves.
208 * r4 = final kernel address 207 * r4 = final kernel address
209 * r5 = start of this image
210 * r9 = size of decompressed image 208 * r9 = size of decompressed image
211 * r10 = end of this image, including bss/stack/malloc space if non XIP 209 * r10 = end of this image, including bss/stack/malloc space if non XIP
212 * We basically want: 210 * We basically want:
213 * r4 >= r10 -> OK 211 * r4 - 16k page directory >= r10 -> OK
214 * r4 + image length <= r5 -> OK 212 * r4 + image length <= current position (pc) -> OK
215 */ 213 */
214 add r10, r10, #16384
216 cmp r4, r10 215 cmp r4, r10
217 bhs wont_overwrite 216 bhs wont_overwrite
218 add r10, r4, r9 217 add r10, r4, r9
219 cmp r10, r5 218 ARM( cmp r10, pc )
219 THUMB( mov lr, pc )
220 THUMB( cmp r10, lr )
220 bls wont_overwrite 221 bls wont_overwrite
221 222
222/* 223/*
223 * Relocate ourselves past the end of the decompressed kernel. 224 * Relocate ourselves past the end of the decompressed kernel.
224 * r5 = start of this image
225 * r6 = _edata 225 * r6 = _edata
226 * r10 = end of the decompressed kernel 226 * r10 = end of the decompressed kernel
227 * Because we always copy ahead, we need to do it from the end and go 227 * Because we always copy ahead, we need to do it from the end and go
228 * backward in case the source and destination overlap. 228 * backward in case the source and destination overlap.
229 */ 229 */
230 /* Round up to next 256-byte boundary. */ 230 /*
231 add r10, r10, #256 231 * Bump to the next 256-byte boundary with the size of
232 * the relocation code added. This avoids overwriting
233 * ourself when the offset is small.
234 */
235 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
232 bic r10, r10, #255 236 bic r10, r10, #255
233 237
238 /* Get start of code we want to copy and align it down. */
239 adr r5, restart
240 bic r5, r5, #31
241
234 sub r9, r6, r5 @ size to copy 242 sub r9, r6, r5 @ size to copy
235 add r9, r9, #31 @ rounded up to a multiple 243 add r9, r9, #31 @ rounded up to a multiple
236 bic r9, r9, #31 @ ... of 32 bytes 244 bic r9, r9, #31 @ ... of 32 bytes
@@ -245,6 +253,11 @@ restart: adr r0, LC0
245 /* Preserve offset to relocated code. */ 253 /* Preserve offset to relocated code. */
246 sub r6, r9, r6 254 sub r6, r9, r6
247 255
256#ifndef CONFIG_ZBOOT_ROM
257 /* cache_clean_flush may use the stack, so relocate it */
258 add sp, sp, r6
259#endif
260
248 bl cache_clean_flush 261 bl cache_clean_flush
249 262
250 adr r0, BSYM(restart) 263 adr r0, BSYM(restart)
@@ -333,7 +346,6 @@ not_relocated: mov r0, #0
333LC0: .word LC0 @ r1 346LC0: .word LC0 @ r1
334 .word __bss_start @ r2 347 .word __bss_start @ r2
335 .word _end @ r3 348 .word _end @ r3
336 .word _start @ r5
337 .word _edata @ r6 349 .word _edata @ r6
338 .word _image_size @ r9 350 .word _image_size @ r9
339 .word _got_start @ r11 351 .word _got_start @ r11
@@ -1062,6 +1074,7 @@ memdump: mov r12, r0
1062#endif 1074#endif
1063 1075
1064 .ltorg 1076 .ltorg
1077reloc_code_end:
1065 1078
1066 .align 1079 .align
1067 .section ".stack", "aw", %nobits 1080 .section ".stack", "aw", %nobits
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in
index 5309909d7282..ea80abe78844 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.in
+++ b/arch/arm/boot/compressed/vmlinux.lds.in
@@ -54,6 +54,7 @@ SECTIONS
54 .bss : { *(.bss) } 54 .bss : { *(.bss) }
55 _end = .; 55 _end = .;
56 56
57 . = ALIGN(8); /* the stack must be 64-bit aligned */
57 .stack : { *(.stack) } 58 .stack : { *(.stack) }
58 59
59 .stab 0 : { *(.stab) } 60 .stab 0 : { *(.stab) }
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 885be097769d..832888d0c20c 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -159,7 +159,7 @@ extern unsigned int user_debug;
159#include <mach/barriers.h> 159#include <mach/barriers.h>
160#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) 160#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
161#define mb() do { dsb(); outer_sync(); } while (0) 161#define mb() do { dsb(); outer_sync(); } while (0)
162#define rmb() dmb() 162#define rmb() dsb()
163#define wmb() mb() 163#define wmb() mb()
164#else 164#else
165#include <asm/memory.h> 165#include <asm/memory.h>
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 2bf27f364d09..8182f45ca493 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -767,12 +767,20 @@ long arch_ptrace(struct task_struct *child, long request,
767 767
768#ifdef CONFIG_HAVE_HW_BREAKPOINT 768#ifdef CONFIG_HAVE_HW_BREAKPOINT
769 case PTRACE_GETHBPREGS: 769 case PTRACE_GETHBPREGS:
770 if (ptrace_get_breakpoints(child) < 0)
771 return -ESRCH;
772
770 ret = ptrace_gethbpregs(child, addr, 773 ret = ptrace_gethbpregs(child, addr,
771 (unsigned long __user *)data); 774 (unsigned long __user *)data);
775 ptrace_put_breakpoints(child);
772 break; 776 break;
773 case PTRACE_SETHBPREGS: 777 case PTRACE_SETHBPREGS:
778 if (ptrace_get_breakpoints(child) < 0)
779 return -ESRCH;
780
774 ret = ptrace_sethbpregs(child, addr, 781 ret = ptrace_sethbpregs(child, addr,
775 (unsigned long __user *)data); 782 (unsigned long __user *)data);
783 ptrace_put_breakpoints(child);
776 break; 784 break;
777#endif 785#endif
778 786
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index cb8398317644..0340224cf73c 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -597,19 +597,13 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
597 return err; 597 return err;
598} 598}
599 599
600static inline void setup_syscall_restart(struct pt_regs *regs)
601{
602 regs->ARM_r0 = regs->ARM_ORIG_r0;
603 regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
604}
605
606/* 600/*
607 * OK, we're invoking a handler 601 * OK, we're invoking a handler
608 */ 602 */
609static int 603static int
610handle_signal(unsigned long sig, struct k_sigaction *ka, 604handle_signal(unsigned long sig, struct k_sigaction *ka,
611 siginfo_t *info, sigset_t *oldset, 605 siginfo_t *info, sigset_t *oldset,
612 struct pt_regs * regs, int syscall) 606 struct pt_regs * regs)
613{ 607{
614 struct thread_info *thread = current_thread_info(); 608 struct thread_info *thread = current_thread_info();
615 struct task_struct *tsk = current; 609 struct task_struct *tsk = current;
@@ -617,26 +611,6 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
617 int ret; 611 int ret;
618 612
619 /* 613 /*
620 * If we were from a system call, check for system call restarting...
621 */
622 if (syscall) {
623 switch (regs->ARM_r0) {
624 case -ERESTART_RESTARTBLOCK:
625 case -ERESTARTNOHAND:
626 regs->ARM_r0 = -EINTR;
627 break;
628 case -ERESTARTSYS:
629 if (!(ka->sa.sa_flags & SA_RESTART)) {
630 regs->ARM_r0 = -EINTR;
631 break;
632 }
633 /* fallthrough */
634 case -ERESTARTNOINTR:
635 setup_syscall_restart(regs);
636 }
637 }
638
639 /*
640 * translate the signal 614 * translate the signal
641 */ 615 */
642 if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) 616 if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
@@ -685,6 +659,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
685 */ 659 */
686static void do_signal(struct pt_regs *regs, int syscall) 660static void do_signal(struct pt_regs *regs, int syscall)
687{ 661{
662 unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
688 struct k_sigaction ka; 663 struct k_sigaction ka;
689 siginfo_t info; 664 siginfo_t info;
690 int signr; 665 int signr;
@@ -698,18 +673,61 @@ static void do_signal(struct pt_regs *regs, int syscall)
698 if (!user_mode(regs)) 673 if (!user_mode(regs))
699 return; 674 return;
700 675
676 /*
677 * If we were from a system call, check for system call restarting...
678 */
679 if (syscall) {
680 continue_addr = regs->ARM_pc;
681 restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
682 retval = regs->ARM_r0;
683
684 /*
685 * Prepare for system call restart. We do this here so that a
686 * debugger will see the already changed PSW.
687 */
688 switch (retval) {
689 case -ERESTARTNOHAND:
690 case -ERESTARTSYS:
691 case -ERESTARTNOINTR:
692 regs->ARM_r0 = regs->ARM_ORIG_r0;
693 regs->ARM_pc = restart_addr;
694 break;
695 case -ERESTART_RESTARTBLOCK:
696 regs->ARM_r0 = -EINTR;
697 break;
698 }
699 }
700
701 if (try_to_freeze()) 701 if (try_to_freeze())
702 goto no_signal; 702 goto no_signal;
703 703
704 /*
705 * Get the signal to deliver. When running under ptrace, at this
706 * point the debugger may change all our registers ...
707 */
704 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 708 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
705 if (signr > 0) { 709 if (signr > 0) {
706 sigset_t *oldset; 710 sigset_t *oldset;
707 711
712 /*
713 * Depending on the signal settings we may need to revert the
714 * decision to restart the system call. But skip this if a
715 * debugger has chosen to restart at a different PC.
716 */
717 if (regs->ARM_pc == restart_addr) {
718 if (retval == -ERESTARTNOHAND
719 || (retval == -ERESTARTSYS
720 && !(ka.sa.sa_flags & SA_RESTART))) {
721 regs->ARM_r0 = -EINTR;
722 regs->ARM_pc = continue_addr;
723 }
724 }
725
708 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 726 if (test_thread_flag(TIF_RESTORE_SIGMASK))
709 oldset = &current->saved_sigmask; 727 oldset = &current->saved_sigmask;
710 else 728 else
711 oldset = &current->blocked; 729 oldset = &current->blocked;
712 if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) { 730 if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
713 /* 731 /*
714 * A signal was successfully delivered; the saved 732 * A signal was successfully delivered; the saved
715 * sigmask will have been stored in the signal frame, 733 * sigmask will have been stored in the signal frame,
@@ -723,11 +741,14 @@ static void do_signal(struct pt_regs *regs, int syscall)
723 } 741 }
724 742
725 no_signal: 743 no_signal:
726 /*
727 * No signal to deliver to the process - restart the syscall.
728 */
729 if (syscall) { 744 if (syscall) {
730 if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { 745 /*
746 * Handle restarting a different system call. As above,
747 * if a debugger has chosen to restart at a different PC,
748 * ignore the restart.
749 */
750 if (retval == -ERESTART_RESTARTBLOCK
751 && regs->ARM_pc == continue_addr) {
731 if (thumb_mode(regs)) { 752 if (thumb_mode(regs)) {
732 regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; 753 regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
733 regs->ARM_pc -= 2; 754 regs->ARM_pc -= 2;
@@ -750,11 +771,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
750#endif 771#endif
751 } 772 }
752 } 773 }
753 if (regs->ARM_r0 == -ERESTARTNOHAND ||
754 regs->ARM_r0 == -ERESTARTSYS ||
755 regs->ARM_r0 == -ERESTARTNOINTR) {
756 setup_syscall_restart(regs);
757 }
758 774
759 /* If there's no signal to deliver, we just put the saved sigmask 775 /* If there's no signal to deliver, we just put the saved sigmask
760 * back. 776 * back.
diff --git a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c
index b2b1e37bb6bb..d6e34dd9e7e7 100644
--- a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c
+++ b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c
@@ -115,6 +115,7 @@ int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
115 sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, 115 sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla,
116 sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, 116 sdrc_cs0->actim_ctrlb, sdrc_cs0->mr,
117 0, 0, 0, 0); 117 0, 0, 0, 0);
118 clk->rate = rate;
118 119
119 return 0; 120 return 0;
120} 121}
diff --git a/arch/arm/mach-realview/include/mach/barriers.h b/arch/arm/mach-realview/include/mach/barriers.h
index 0c5d749d7b5f..9a732195aa1c 100644
--- a/arch/arm/mach-realview/include/mach/barriers.h
+++ b/arch/arm/mach-realview/include/mach/barriers.h
@@ -4,5 +4,5 @@
4 * operation to deadlock the system. 4 * operation to deadlock the system.
5 */ 5 */
6#define mb() dsb() 6#define mb() dsb()
7#define rmb() dmb() 7#define rmb() dsb()
8#define wmb() mb() 8#define wmb() mb()
diff --git a/arch/arm/mach-tegra/include/mach/barriers.h b/arch/arm/mach-tegra/include/mach/barriers.h
index cc115174899b..425b42e91ef6 100644
--- a/arch/arm/mach-tegra/include/mach/barriers.h
+++ b/arch/arm/mach-tegra/include/mach/barriers.h
@@ -23,7 +23,7 @@
23 23
24#include <asm/outercache.h> 24#include <asm/outercache.h>
25 25
26#define rmb() dmb() 26#define rmb() dsb()
27#define wmb() do { dsb(); outer_sync(); } while (0) 27#define wmb() do { dsb(); outer_sync(); } while (0)
28#define mb() wmb() 28#define mb() wmb()
29 29
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index e5f6fc428348..e591513bb53e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -392,7 +392,7 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
392 * Convert start_pfn/end_pfn to a struct page pointer. 392 * Convert start_pfn/end_pfn to a struct page pointer.
393 */ 393 */
394 start_pg = pfn_to_page(start_pfn - 1) + 1; 394 start_pg = pfn_to_page(start_pfn - 1) + 1;
395 end_pg = pfn_to_page(end_pfn); 395 end_pg = pfn_to_page(end_pfn - 1) + 1;
396 396
397 /* 397 /*
398 * Convert to physical addresses, and 398 * Convert to physical addresses, and
@@ -426,6 +426,14 @@ static void __init free_unused_memmap(struct meminfo *mi)
426 426
427 bank_start = bank_pfn_start(bank); 427 bank_start = bank_pfn_start(bank);
428 428
429#ifdef CONFIG_SPARSEMEM
430 /*
431 * Take care not to free memmap entries that don't exist
432 * due to SPARSEMEM sections which aren't present.
433 */
434 bank_start = min(bank_start,
435 ALIGN(prev_bank_end, PAGES_PER_SECTION));
436#endif
429 /* 437 /*
430 * If we had a previous bank, and there is a space 438 * If we had a previous bank, and there is a space
431 * between the current bank and the previous, free it. 439 * between the current bank and the previous, free it.
@@ -440,6 +448,12 @@ static void __init free_unused_memmap(struct meminfo *mi)
440 */ 448 */
441 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); 449 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
442 } 450 }
451
452#ifdef CONFIG_SPARSEMEM
453 if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
454 free_memmap(prev_bank_end,
455 ALIGN(prev_bank_end, PAGES_PER_SECTION));
456#endif
443} 457}
444 458
445static void __init free_highpages(void) 459static void __init free_highpages(void)
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index 8a51fd58f656..34fc31ee9081 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -793,6 +793,8 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
793 clk_enable(obj->clk); 793 clk_enable(obj->clk);
794 errs = iommu_report_fault(obj, &da); 794 errs = iommu_report_fault(obj, &da);
795 clk_disable(obj->clk); 795 clk_disable(obj->clk);
796 if (errs == 0)
797 return IRQ_HANDLED;
796 798
797 /* Fault callback or TLB/PTE Dynamic loading */ 799 /* Fault callback or TLB/PTE Dynamic loading */
798 if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) 800 if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv))
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 8e256cc5dcd9..351c80fbba7e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -997,9 +997,6 @@ config IRQ_GT641XX
997config IRQ_GIC 997config IRQ_GIC
998 bool 998 bool
999 999
1000config IRQ_CPU_OCTEON
1001 bool
1002
1003config MIPS_BOARDS_GEN 1000config MIPS_BOARDS_GEN
1004 bool 1001 bool
1005 1002
@@ -1359,8 +1356,6 @@ config CPU_SB1
1359config CPU_CAVIUM_OCTEON 1356config CPU_CAVIUM_OCTEON
1360 bool "Cavium Octeon processor" 1357 bool "Cavium Octeon processor"
1361 depends on SYS_HAS_CPU_CAVIUM_OCTEON 1358 depends on SYS_HAS_CPU_CAVIUM_OCTEON
1362 select IRQ_CPU
1363 select IRQ_CPU_OCTEON
1364 select CPU_HAS_PREFETCH 1359 select CPU_HAS_PREFETCH
1365 select CPU_SUPPORTS_64BIT_KERNEL 1360 select CPU_SUPPORTS_64BIT_KERNEL
1366 select SYS_SUPPORTS_SMP 1361 select SYS_SUPPORTS_SMP
diff --git a/arch/mips/alchemy/devboards/db1x00/board_setup.c b/arch/mips/alchemy/devboards/db1x00/board_setup.c
index 05f120ff90f9..5c956fe8760f 100644
--- a/arch/mips/alchemy/devboards/db1x00/board_setup.c
+++ b/arch/mips/alchemy/devboards/db1x00/board_setup.c
@@ -127,13 +127,10 @@ const char *get_system_type(void)
127void __init board_setup(void) 127void __init board_setup(void)
128{ 128{
129 unsigned long bcsr1, bcsr2; 129 unsigned long bcsr1, bcsr2;
130 u32 pin_func;
131 130
132 bcsr1 = DB1000_BCSR_PHYS_ADDR; 131 bcsr1 = DB1000_BCSR_PHYS_ADDR;
133 bcsr2 = DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS; 132 bcsr2 = DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS;
134 133
135 pin_func = 0;
136
137#ifdef CONFIG_MIPS_DB1000 134#ifdef CONFIG_MIPS_DB1000
138 printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n"); 135 printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n");
139#endif 136#endif
@@ -164,12 +161,16 @@ void __init board_setup(void)
164 /* Not valid for Au1550 */ 161 /* Not valid for Au1550 */
165#if defined(CONFIG_IRDA) && \ 162#if defined(CONFIG_IRDA) && \
166 (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100)) 163 (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100))
167 /* Set IRFIRSEL instead of GPIO15 */ 164 {
168 pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; 165 u32 pin_func;
169 au_writel(pin_func, SYS_PINFUNC); 166
170 /* Power off until the driver is in use */ 167 /* Set IRFIRSEL instead of GPIO15 */
171 bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, 168 pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF;
172 BCSR_RESETS_IRDA_MODE_OFF); 169 au_writel(pin_func, SYS_PINFUNC);
170 /* Power off until the driver is in use */
171 bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK,
172 BCSR_RESETS_IRDA_MODE_OFF);
173 }
173#endif 174#endif
174 bcsr_write(BCSR_PCMCIA, 0); /* turn off PCMCIA power */ 175 bcsr_write(BCSR_PCMCIA, 0); /* turn off PCMCIA power */
175 176
@@ -177,31 +178,35 @@ void __init board_setup(void)
177 alchemy_gpio1_input_enable(); 178 alchemy_gpio1_input_enable();
178 179
179#ifdef CONFIG_MIPS_MIRAGE 180#ifdef CONFIG_MIPS_MIRAGE
180 /* GPIO[20] is output */ 181 {
181 alchemy_gpio_direction_output(20, 0); 182 u32 pin_func;
182 183
183 /* Set GPIO[210:208] instead of SSI_0 */ 184 /* GPIO[20] is output */
184 pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; 185 alchemy_gpio_direction_output(20, 0);
185 186
186 /* Set GPIO[215:211] for LEDs */ 187 /* Set GPIO[210:208] instead of SSI_0 */
187 pin_func |= 5 << 2; 188 pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0;
188 189
189 /* Set GPIO[214:213] for more LEDs */ 190 /* Set GPIO[215:211] for LEDs */
190 pin_func |= 5 << 12; 191 pin_func |= 5 << 2;
191 192
192 /* Set GPIO[207:200] instead of PCMCIA/LCD */ 193 /* Set GPIO[214:213] for more LEDs */
193 pin_func |= SYS_PF_LCD | SYS_PF_PC; 194 pin_func |= 5 << 12;
194 au_writel(pin_func, SYS_PINFUNC);
195 195
196 /* 196 /* Set GPIO[207:200] instead of PCMCIA/LCD */
197 * Enable speaker amplifier. This should 197 pin_func |= SYS_PF_LCD | SYS_PF_PC;
198 * be part of the audio driver. 198 au_writel(pin_func, SYS_PINFUNC);
199 */
200 alchemy_gpio_direction_output(209, 1);
201 199
202 pm_power_off = mirage_power_off; 200 /*
203 _machine_halt = mirage_power_off; 201 * Enable speaker amplifier. This should
204 _machine_restart = (void(*)(char *))mips_softreset; 202 * be part of the audio driver.
203 */
204 alchemy_gpio_direction_output(209, 1);
205
206 pm_power_off = mirage_power_off;
207 _machine_halt = mirage_power_off;
208 _machine_restart = (void(*)(char *))mips_softreset;
209 }
205#endif 210#endif
206 211
207#ifdef CONFIG_MIPS_BOSPORUS 212#ifdef CONFIG_MIPS_BOSPORUS
diff --git a/arch/mips/alchemy/xxs1500/init.c b/arch/mips/alchemy/xxs1500/init.c
index 15125c2fda7d..34a90a4bb6f4 100644
--- a/arch/mips/alchemy/xxs1500/init.c
+++ b/arch/mips/alchemy/xxs1500/init.c
@@ -51,10 +51,9 @@ void __init prom_init(void)
51 prom_init_cmdline(); 51 prom_init_cmdline();
52 52
53 memsize_str = prom_getenv("memsize"); 53 memsize_str = prom_getenv("memsize");
54 if (!memsize_str) 54 if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize))
55 memsize = 0x04000000; 55 memsize = 0x04000000;
56 else 56
57 strict_strtoul(memsize_str, 0, &memsize);
58 add_memory_region(0, memsize, BOOT_MEM_RAM); 57 add_memory_region(0, memsize, BOOT_MEM_RAM);
59} 58}
60 59
diff --git a/arch/mips/ar7/gpio.c b/arch/mips/ar7/gpio.c
index 425dfa5d6e12..bb571bcdb8f2 100644
--- a/arch/mips/ar7/gpio.c
+++ b/arch/mips/ar7/gpio.c
@@ -325,9 +325,7 @@ int __init ar7_gpio_init(void)
325 size = 0x1f; 325 size = 0x1f;
326 } 326 }
327 327
328 gpch->regs = ioremap_nocache(AR7_REGS_GPIO, 328 gpch->regs = ioremap_nocache(AR7_REGS_GPIO, size);
329 AR7_REGS_GPIO + 0x10);
330
331 if (!gpch->regs) { 329 if (!gpch->regs) {
332 printk(KERN_ERR "%s: failed to ioremap regs\n", 330 printk(KERN_ERR "%s: failed to ioremap regs\n",
333 gpch->chip.label); 331 gpch->chip.label);
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
index 88c9d963be88..9a6243676e22 100644
--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
@@ -16,8 +16,8 @@
16 16
17int main(int argc, char *argv[]) 17int main(int argc, char *argv[])
18{ 18{
19 unsigned long long vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr;
19 struct stat sb; 20 struct stat sb;
20 uint64_t vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr;
21 21
22 if (argc != 3) { 22 if (argc != 3) {
23 fprintf(stderr, "Usage: %s <pathname> <vmlinux_load_addr>\n", 23 fprintf(stderr, "Usage: %s <pathname> <vmlinux_load_addr>\n",
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index caae22858163..cad555ebeca3 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -1,11 +1,7 @@
1config CAVIUM_OCTEON_SPECIFIC_OPTIONS 1if CPU_CAVIUM_OCTEON
2 bool "Enable Octeon specific options"
3 depends on CPU_CAVIUM_OCTEON
4 default "y"
5 2
6config CAVIUM_CN63XXP1 3config CAVIUM_CN63XXP1
7 bool "Enable CN63XXP1 errata worarounds" 4 bool "Enable CN63XXP1 errata worarounds"
8 depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
9 default "n" 5 default "n"
10 help 6 help
11 The CN63XXP1 chip requires build time workarounds to 7 The CN63XXP1 chip requires build time workarounds to
@@ -16,7 +12,6 @@ config CAVIUM_CN63XXP1
16 12
17config CAVIUM_OCTEON_2ND_KERNEL 13config CAVIUM_OCTEON_2ND_KERNEL
18 bool "Build the kernel to be used as a 2nd kernel on the same chip" 14 bool "Build the kernel to be used as a 2nd kernel on the same chip"
19 depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
20 default "n" 15 default "n"
21 help 16 help
22 This option configures this kernel to be linked at a different 17 This option configures this kernel to be linked at a different
@@ -26,7 +21,6 @@ config CAVIUM_OCTEON_2ND_KERNEL
26 21
27config CAVIUM_OCTEON_HW_FIX_UNALIGNED 22config CAVIUM_OCTEON_HW_FIX_UNALIGNED
28 bool "Enable hardware fixups of unaligned loads and stores" 23 bool "Enable hardware fixups of unaligned loads and stores"
29 depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
30 default "y" 24 default "y"
31 help 25 help
32 Configure the Octeon hardware to automatically fix unaligned loads 26 Configure the Octeon hardware to automatically fix unaligned loads
@@ -38,7 +32,6 @@ config CAVIUM_OCTEON_HW_FIX_UNALIGNED
38 32
39config CAVIUM_OCTEON_CVMSEG_SIZE 33config CAVIUM_OCTEON_CVMSEG_SIZE
40 int "Number of L1 cache lines reserved for CVMSEG memory" 34 int "Number of L1 cache lines reserved for CVMSEG memory"
41 depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
42 range 0 54 35 range 0 54
43 default 1 36 default 1
44 help 37 help
@@ -50,7 +43,6 @@ config CAVIUM_OCTEON_CVMSEG_SIZE
50 43
51config CAVIUM_OCTEON_LOCK_L2 44config CAVIUM_OCTEON_LOCK_L2
52 bool "Lock often used kernel code in the L2" 45 bool "Lock often used kernel code in the L2"
53 depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
54 default "y" 46 default "y"
55 help 47 help
56 Enable locking parts of the kernel into the L2 cache. 48 Enable locking parts of the kernel into the L2 cache.
@@ -93,7 +85,6 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY
93config ARCH_SPARSEMEM_ENABLE 85config ARCH_SPARSEMEM_ENABLE
94 def_bool y 86 def_bool y
95 select SPARSEMEM_STATIC 87 select SPARSEMEM_STATIC
96 depends on CPU_CAVIUM_OCTEON
97 88
98config CAVIUM_OCTEON_HELPER 89config CAVIUM_OCTEON_HELPER
99 def_bool y 90 def_bool y
@@ -107,6 +98,8 @@ config NEED_SG_DMA_LENGTH
107 98
108config SWIOTLB 99config SWIOTLB
109 def_bool y 100 def_bool y
110 depends on CPU_CAVIUM_OCTEON
111 select IOMMU_HELPER 101 select IOMMU_HELPER
112 select NEED_SG_DMA_LENGTH 102 select NEED_SG_DMA_LENGTH
103
104
105endif # CPU_CAVIUM_OCTEON
diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
index 650ac9ba734c..b4db69fbc40c 100644
--- a/arch/mips/include/asm/cache.h
+++ b/arch/mips/include/asm/cache.h
@@ -17,6 +17,6 @@
17#define SMP_CACHE_SHIFT L1_CACHE_SHIFT 17#define SMP_CACHE_SHIFT L1_CACHE_SHIFT
18#define SMP_CACHE_BYTES L1_CACHE_BYTES 18#define SMP_CACHE_BYTES L1_CACHE_BYTES
19 19
20#define __read_mostly __attribute__((__section__(".data.read_mostly"))) 20#define __read_mostly __attribute__((__section__(".data..read_mostly")))
21 21
22#endif /* _ASM_CACHE_H */ 22#endif /* _ASM_CACHE_H */
diff --git a/arch/mips/include/asm/cevt-r4k.h b/arch/mips/include/asm/cevt-r4k.h
index fa4328f9124f..65f9bdd02f1f 100644
--- a/arch/mips/include/asm/cevt-r4k.h
+++ b/arch/mips/include/asm/cevt-r4k.h
@@ -14,6 +14,9 @@
14#ifndef __ASM_CEVT_R4K_H 14#ifndef __ASM_CEVT_R4K_H
15#define __ASM_CEVT_R4K_H 15#define __ASM_CEVT_R4K_H
16 16
17#include <linux/clockchips.h>
18#include <asm/time.h>
19
17DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); 20DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
18 21
19void mips_event_handler(struct clock_event_device *dev); 22void mips_event_handler(struct clock_event_device *dev);
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 655f849bd08d..7aa37ddfca4b 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -5,7 +5,9 @@
5#include <asm/cache.h> 5#include <asm/cache.h>
6#include <asm-generic/dma-coherent.h> 6#include <asm-generic/dma-coherent.h>
7 7
8#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
8#include <dma-coherence.h> 9#include <dma-coherence.h>
10#endif
9 11
10extern struct dma_map_ops *mips_dma_map_ops; 12extern struct dma_map_ops *mips_dma_map_ops;
11 13
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
index f5e856015329..c565b7c3f0b5 100644
--- a/arch/mips/include/asm/hugetlb.h
+++ b/arch/mips/include/asm/hugetlb.h
@@ -70,6 +70,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
70static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, 70static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
71 unsigned long addr, pte_t *ptep) 71 unsigned long addr, pte_t *ptep)
72{ 72{
73 flush_tlb_mm(vma->vm_mm);
73} 74}
74 75
75static inline int huge_pte_none(pte_t pte) 76static inline int huge_pte_none(pte_t pte)
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
index 32978d32561a..ed72e6a26b73 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
@@ -88,7 +88,7 @@ struct bcm_tag {
88 char kernel_crc[CRC_LEN]; 88 char kernel_crc[CRC_LEN];
89 /* 228-235: Unused at present */ 89 /* 228-235: Unused at present */
90 char reserved1[8]; 90 char reserved1[8];
91 /* 236-239: CRC32 of header excluding tagVersion */ 91 /* 236-239: CRC32 of header excluding last 20 bytes */
92 char header_crc[CRC_LEN]; 92 char header_crc[CRC_LEN];
93 /* 240-255: Unused at present */ 93 /* 240-255: Unused at present */
94 char reserved2[16]; 94 char reserved2[16];
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index 9ce9f64cb76f..2d8e447cb828 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -211,7 +211,7 @@ EXPORT_SYMBOL(vdma_free);
211 */ 211 */
212int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) 212int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size)
213{ 213{
214 int first, pages, npages; 214 int first, pages;
215 215
216 if (laddr > 0xffffff) { 216 if (laddr > 0xffffff) {
217 if (vdma_debug) 217 if (vdma_debug)
@@ -228,8 +228,7 @@ int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size)
228 return -EINVAL; /* invalid physical address */ 228 return -EINVAL; /* invalid physical address */
229 } 229 }
230 230
231 npages = pages = 231 pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1;
232 (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1;
233 first = laddr >> 12; 232 first = laddr >> 12;
234 if (vdma_debug) 233 if (vdma_debug)
235 printk("vdma_remap: first=%x, pages=%x\n", first, pages); 234 printk("vdma_remap: first=%x, pages=%x\n", first, pages);
diff --git a/arch/mips/jz4740/dma.c b/arch/mips/jz4740/dma.c
index 5ebe75a68350..d7feb898692c 100644
--- a/arch/mips/jz4740/dma.c
+++ b/arch/mips/jz4740/dma.c
@@ -242,9 +242,7 @@ EXPORT_SYMBOL_GPL(jz4740_dma_get_residue);
242 242
243static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma) 243static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma)
244{ 244{
245 uint32_t status; 245 (void) jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id));
246
247 status = jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id));
248 246
249 jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, 247 jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0,
250 JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE); 248 JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE);
diff --git a/arch/mips/jz4740/time.c b/arch/mips/jz4740/time.c
index fe01678d94fd..eaa853a54af6 100644
--- a/arch/mips/jz4740/time.c
+++ b/arch/mips/jz4740/time.c
@@ -89,7 +89,7 @@ static int jz4740_clockevent_set_next(unsigned long evt,
89 89
90static struct clock_event_device jz4740_clockevent = { 90static struct clock_event_device jz4740_clockevent = {
91 .name = "jz4740-timer", 91 .name = "jz4740-timer",
92 .features = CLOCK_EVT_FEAT_PERIODIC, 92 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
93 .set_next_event = jz4740_clockevent_set_next, 93 .set_next_event = jz4740_clockevent_set_next,
94 .set_mode = jz4740_clockevent_set_mode, 94 .set_mode = jz4740_clockevent_set_mode,
95 .rating = 200, 95 .rating = 200,
diff --git a/arch/mips/jz4740/timer.c b/arch/mips/jz4740/timer.c
index b2c015129055..654d5c3900b6 100644
--- a/arch/mips/jz4740/timer.c
+++ b/arch/mips/jz4740/timer.c
@@ -27,11 +27,13 @@ void jz4740_timer_enable_watchdog(void)
27{ 27{
28 writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR); 28 writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR);
29} 29}
30EXPORT_SYMBOL_GPL(jz4740_timer_enable_watchdog);
30 31
31void jz4740_timer_disable_watchdog(void) 32void jz4740_timer_disable_watchdog(void)
32{ 33{
33 writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET); 34 writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET);
34} 35}
36EXPORT_SYMBOL_GPL(jz4740_timer_disable_watchdog);
35 37
36void __init jz4740_timer_init(void) 38void __init jz4740_timer_init(void)
37{ 39{
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 94ca2b018af7..feb8021a305f 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -23,6 +23,7 @@
23 23
24#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ 24#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
25#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ 25#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
26#define JUMP_RANGE_MASK ((1UL << 28) - 1)
26 27
27#define INSN_NOP 0x00000000 /* nop */ 28#define INSN_NOP 0x00000000 /* nop */
28#define INSN_JAL(addr) \ 29#define INSN_JAL(addr) \
@@ -44,12 +45,12 @@ static inline void ftrace_dyn_arch_init_insns(void)
44 45
45 /* jal (ftrace_caller + 8), jump over the first two instruction */ 46 /* jal (ftrace_caller + 8), jump over the first two instruction */
46 buf = (u32 *)&insn_jal_ftrace_caller; 47 buf = (u32 *)&insn_jal_ftrace_caller;
47 uasm_i_jal(&buf, (FTRACE_ADDR + 8)); 48 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
48 49
49#ifdef CONFIG_FUNCTION_GRAPH_TRACER 50#ifdef CONFIG_FUNCTION_GRAPH_TRACER
50 /* j ftrace_graph_caller */ 51 /* j ftrace_graph_caller */
51 buf = (u32 *)&insn_j_ftrace_graph_caller; 52 buf = (u32 *)&insn_j_ftrace_graph_caller;
52 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller); 53 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
53#endif 54#endif
54} 55}
55 56
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index d21c388c0116..584e6b55c865 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -540,8 +540,8 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
540 secure_computing(regs->regs[2]); 540 secure_computing(regs->regs[2]);
541 541
542 if (unlikely(current->audit_context) && entryexit) 542 if (unlikely(current->audit_context) && entryexit)
543 audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), 543 audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]),
544 regs->regs[2]); 544 -regs->regs[2]);
545 545
546 if (!(current->ptrace & PT_PTRACED)) 546 if (!(current->ptrace & PT_PTRACED))
547 goto out; 547 goto out;
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 7f5468b38d4c..7f1377eb22d3 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -565,7 +565,7 @@ einval: li v0, -ENOSYS
565 sys sys_ioprio_get 2 /* 4315 */ 565 sys sys_ioprio_get 2 /* 4315 */
566 sys sys_utimensat 4 566 sys sys_utimensat 4
567 sys sys_signalfd 3 567 sys sys_signalfd 3
568 sys sys_ni_syscall 0 568 sys sys_ni_syscall 0 /* was timerfd */
569 sys sys_eventfd 1 569 sys sys_eventfd 1
570 sys sys_fallocate 6 /* 4320 */ 570 sys sys_fallocate 6 /* 4320 */
571 sys sys_timerfd_create 2 571 sys sys_timerfd_create 2
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index a2e1fcbc41dc..7c0ef7f128bf 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -404,7 +404,7 @@ sys_call_table:
404 PTR sys_ioprio_get 404 PTR sys_ioprio_get
405 PTR sys_utimensat /* 5275 */ 405 PTR sys_utimensat /* 5275 */
406 PTR sys_signalfd 406 PTR sys_signalfd
407 PTR sys_ni_syscall 407 PTR sys_ni_syscall /* was timerfd */
408 PTR sys_eventfd 408 PTR sys_eventfd
409 PTR sys_fallocate 409 PTR sys_fallocate
410 PTR sys_timerfd_create /* 5280 */ 410 PTR sys_timerfd_create /* 5280 */
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index b2c7624995b8..de6c5563beab 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -403,7 +403,7 @@ EXPORT(sysn32_call_table)
403 PTR sys_ioprio_get 403 PTR sys_ioprio_get
404 PTR compat_sys_utimensat 404 PTR compat_sys_utimensat
405 PTR compat_sys_signalfd /* 6280 */ 405 PTR compat_sys_signalfd /* 6280 */
406 PTR sys_ni_syscall 406 PTR sys_ni_syscall /* was timerfd */
407 PTR sys_eventfd 407 PTR sys_eventfd
408 PTR sys_fallocate 408 PTR sys_fallocate
409 PTR sys_timerfd_create 409 PTR sys_timerfd_create
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 049a9c8c49a0..b0541dda8830 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -522,7 +522,7 @@ sys_call_table:
522 PTR sys_ioprio_get /* 4315 */ 522 PTR sys_ioprio_get /* 4315 */
523 PTR compat_sys_utimensat 523 PTR compat_sys_utimensat
524 PTR compat_sys_signalfd 524 PTR compat_sys_signalfd
525 PTR sys_ni_syscall 525 PTR sys_ni_syscall /* was timerfd */
526 PTR sys_eventfd 526 PTR sys_eventfd
527 PTR sys32_fallocate /* 4320 */ 527 PTR sys32_fallocate /* 4320 */
528 PTR sys_timerfd_create 528 PTR sys_timerfd_create
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 71350f7f2d88..e9b3af27d844 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -374,7 +374,8 @@ void __noreturn die(const char *str, struct pt_regs *regs)
374 unsigned long dvpret = dvpe(); 374 unsigned long dvpret = dvpe();
375#endif /* CONFIG_MIPS_MT_SMTC */ 375#endif /* CONFIG_MIPS_MT_SMTC */
376 376
377 notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV); 377 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
378 sig = 0;
378 379
379 console_verbose(); 380 console_verbose();
380 spin_lock_irq(&die_lock); 381 spin_lock_irq(&die_lock);
@@ -383,9 +384,6 @@ void __noreturn die(const char *str, struct pt_regs *regs)
383 mips_mt_regdump(dvpret); 384 mips_mt_regdump(dvpret);
384#endif /* CONFIG_MIPS_MT_SMTC */ 385#endif /* CONFIG_MIPS_MT_SMTC */
385 386
386 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
387 sig = 0;
388
389 printk("%s[#%d]:\n", str, ++die_counter); 387 printk("%s[#%d]:\n", str, ++die_counter);
390 show_registers(regs); 388 show_registers(regs);
391 add_taint(TAINT_DIE); 389 add_taint(TAINT_DIE);
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 832afbb87588..e4b0b0bec039 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -74,6 +74,7 @@ SECTIONS
74 INIT_TASK_DATA(PAGE_SIZE) 74 INIT_TASK_DATA(PAGE_SIZE)
75 NOSAVE_DATA 75 NOSAVE_DATA
76 CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) 76 CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
77 READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
77 DATA_DATA 78 DATA_DATA
78 CONSTRUCTORS 79 CONSTRUCTORS
79 } 80 }
diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c
index 11b193f848f8..d93830ad6113 100644
--- a/arch/mips/loongson/common/env.c
+++ b/arch/mips/loongson/common/env.c
@@ -29,9 +29,10 @@ unsigned long memsize, highmemsize;
29 29
30#define parse_even_earlier(res, option, p) \ 30#define parse_even_earlier(res, option, p) \
31do { \ 31do { \
32 int ret; \ 32 unsigned int tmp __maybe_unused; \
33 \
33 if (strncmp(option, (char *)p, strlen(option)) == 0) \ 34 if (strncmp(option, (char *)p, strlen(option)) == 0) \
34 ret = strict_strtol((char *)p + strlen(option"="), 10, &res); \ 35 tmp = strict_strtol((char *)p + strlen(option"="), 10, &res); \
35} while (0) 36} while (0)
36 37
37void __init prom_init_env(void) 38void __init prom_init_env(void)
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index b4923a75cb4b..71bddf8f7d25 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1075,7 +1075,6 @@ static int __cpuinit probe_scache(void)
1075 unsigned long flags, addr, begin, end, pow2; 1075 unsigned long flags, addr, begin, end, pow2;
1076 unsigned int config = read_c0_config(); 1076 unsigned int config = read_c0_config();
1077 struct cpuinfo_mips *c = &current_cpu_data; 1077 struct cpuinfo_mips *c = &current_cpu_data;
1078 int tmp;
1079 1078
1080 if (config & CONF_SC) 1079 if (config & CONF_SC)
1081 return 0; 1080 return 0;
@@ -1108,7 +1107,6 @@ static int __cpuinit probe_scache(void)
1108 1107
1109 /* Now search for the wrap around point. */ 1108 /* Now search for the wrap around point. */
1110 pow2 = (128 * 1024); 1109 pow2 = (128 * 1024);
1111 tmp = 0;
1112 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { 1110 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1113 cache_op(Index_Load_Tag_SD, addr); 1111 cache_op(Index_Load_Tag_SD, addr);
1114 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ 1112 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 5ef294fbb6e7..f5734c2c8097 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1151,8 +1151,8 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
1151 struct uasm_reloc *r = relocs; 1151 struct uasm_reloc *r = relocs;
1152 u32 *f; 1152 u32 *f;
1153 unsigned int final_len; 1153 unsigned int final_len;
1154 struct mips_huge_tlb_info htlb_info; 1154 struct mips_huge_tlb_info htlb_info __maybe_unused;
1155 enum vmalloc64_mode vmalloc_mode; 1155 enum vmalloc64_mode vmalloc_mode __maybe_unused;
1156 1156
1157 memset(tlb_handler, 0, sizeof(tlb_handler)); 1157 memset(tlb_handler, 0, sizeof(tlb_handler));
1158 memset(labels, 0, sizeof(labels)); 1158 memset(labels, 0, sizeof(labels));
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index 414f0c99b196..31180c321a1a 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -193,8 +193,6 @@ extern struct plat_smp_ops msmtc_smp_ops;
193 193
194void __init prom_init(void) 194void __init prom_init(void)
195{ 195{
196 int result;
197
198 prom_argc = fw_arg0; 196 prom_argc = fw_arg0;
199 _prom_argv = (int *) fw_arg1; 197 _prom_argv = (int *) fw_arg1;
200 _prom_envp = (int *) fw_arg2; 198 _prom_envp = (int *) fw_arg2;
@@ -360,20 +358,14 @@ void __init prom_init(void)
360#ifdef CONFIG_SERIAL_8250_CONSOLE 358#ifdef CONFIG_SERIAL_8250_CONSOLE
361 console_config(); 359 console_config();
362#endif 360#endif
363 /* Early detection of CMP support */
364 result = gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ);
365
366#ifdef CONFIG_MIPS_CMP 361#ifdef CONFIG_MIPS_CMP
367 if (result) 362 /* Early detection of CMP support */
363 if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ))
368 register_smp_ops(&cmp_smp_ops); 364 register_smp_ops(&cmp_smp_ops);
365 else
369#endif 366#endif
370#ifdef CONFIG_MIPS_MT_SMP 367#ifdef CONFIG_MIPS_MT_SMP
371#ifdef CONFIG_MIPS_CMP
372 if (!result)
373 register_smp_ops(&vsmp_smp_ops); 368 register_smp_ops(&vsmp_smp_ops);
374#else
375 register_smp_ops(&vsmp_smp_ops);
376#endif
377#endif 369#endif
378#ifdef CONFIG_MIPS_MT_SMTC 370#ifdef CONFIG_MIPS_MT_SMTC
379 register_smp_ops(&msmtc_smp_ops); 371 register_smp_ops(&msmtc_smp_ops);
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 9027061f0ead..e85c977328da 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -56,7 +56,6 @@ static DEFINE_RAW_SPINLOCK(mips_irq_lock);
56static inline int mips_pcibios_iack(void) 56static inline int mips_pcibios_iack(void)
57{ 57{
58 int irq; 58 int irq;
59 u32 dummy;
60 59
61 /* 60 /*
62 * Determine highest priority pending interrupt by performing 61 * Determine highest priority pending interrupt by performing
@@ -83,7 +82,7 @@ static inline int mips_pcibios_iack(void)
83 BONITO_PCIMAP_CFG = 0x20000; 82 BONITO_PCIMAP_CFG = 0x20000;
84 83
85 /* Flush Bonito register block */ 84 /* Flush Bonito register block */
86 dummy = BONITO_PCIMAP_CFG; 85 (void) BONITO_PCIMAP_CFG;
87 iob(); /* sync */ 86 iob(); /* sync */
88 87
89 irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg); 88 irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg);
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
index f9b9dcdfa9dd..98fd0099d964 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
@@ -97,7 +97,7 @@ static int msp_per_irq_set_affinity(struct irq_data *d,
97 97
98static struct irq_chip msp_per_irq_controller = { 98static struct irq_chip msp_per_irq_controller = {
99 .name = "MSP_PER", 99 .name = "MSP_PER",
100 .irq_enable = unmask_per_irq. 100 .irq_enable = unmask_per_irq,
101 .irq_disable = mask_per_irq, 101 .irq_disable = mask_per_irq,
102 .irq_ack = msp_per_irq_ack, 102 .irq_ack = msp_per_irq_ack,
103#ifdef CONFIG_SMP 103#ifdef CONFIG_SMP
diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
index dbb5c7b4b70f..f8a751c03282 100644
--- a/arch/mips/power/hibernate.S
+++ b/arch/mips/power/hibernate.S
@@ -35,7 +35,7 @@ LEAF(swsusp_arch_resume)
350: 350:
36 PTR_L t1, PBE_ADDRESS(t0) /* source */ 36 PTR_L t1, PBE_ADDRESS(t0) /* source */
37 PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ 37 PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */
38 PTR_ADDIU t3, t1, PAGE_SIZE 38 PTR_ADDU t3, t1, PAGE_SIZE
391: 391:
40 REG_L t8, (t1) 40 REG_L t8, (t1)
41 REG_S t8, (t2) 41 REG_S t8, (t2)
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c
index 37de05d595e7..6c47dfeb7be3 100644
--- a/arch/mips/rb532/gpio.c
+++ b/arch/mips/rb532/gpio.c
@@ -185,7 +185,7 @@ int __init rb532_gpio_init(void)
185 struct resource *r; 185 struct resource *r;
186 186
187 r = rb532_gpio_reg0_res; 187 r = rb532_gpio_reg0_res;
188 rb532_gpio_chip->regbase = ioremap_nocache(r->start, r->end - r->start); 188 rb532_gpio_chip->regbase = ioremap_nocache(r->start, resource_size(r));
189 189
190 if (!rb532_gpio_chip->regbase) { 190 if (!rb532_gpio_chip->regbase) {
191 printk(KERN_ERR "rb532: cannot remap GPIO register 0\n"); 191 printk(KERN_ERR "rb532: cannot remap GPIO register 0\n");
diff --git a/arch/mips/sgi-ip22/ip22-platform.c b/arch/mips/sgi-ip22/ip22-platform.c
index deddbf0ebe5c..698904daf901 100644
--- a/arch/mips/sgi-ip22/ip22-platform.c
+++ b/arch/mips/sgi-ip22/ip22-platform.c
@@ -132,7 +132,7 @@ static struct platform_device eth1_device = {
132 */ 132 */
133static int __init sgiseeq_devinit(void) 133static int __init sgiseeq_devinit(void)
134{ 134{
135 unsigned int tmp; 135 unsigned int pbdma __maybe_unused;
136 int res, i; 136 int res, i;
137 137
138 eth0_pd.hpc = hpc3c0; 138 eth0_pd.hpc = hpc3c0;
@@ -151,7 +151,7 @@ static int __init sgiseeq_devinit(void)
151 151
152 /* Second HPC is missing? */ 152 /* Second HPC is missing? */
153 if (ip22_is_fullhouse() || 153 if (ip22_is_fullhouse() ||
154 get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1])) 154 get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1]))
155 return 0; 155 return 0;
156 156
157 sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 | 157 sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 |
diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c
index 603fc91c1030..1a94c9894188 100644
--- a/arch/mips/sgi-ip22/ip22-time.c
+++ b/arch/mips/sgi-ip22/ip22-time.c
@@ -32,7 +32,7 @@
32static unsigned long dosample(void) 32static unsigned long dosample(void)
33{ 33{
34 u32 ct0, ct1; 34 u32 ct0, ct1;
35 u8 msb, lsb; 35 u8 msb;
36 36
37 /* Start the counter. */ 37 /* Start the counter. */
38 sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | 38 sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL |
@@ -46,7 +46,7 @@ static unsigned long dosample(void)
46 /* Latch and spin until top byte of counter2 is zero */ 46 /* Latch and spin until top byte of counter2 is zero */
47 do { 47 do {
48 writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword); 48 writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword);
49 lsb = readb(&sgint->tcnt2); 49 (void) readb(&sgint->tcnt2);
50 msb = readb(&sgint->tcnt2); 50 msb = readb(&sgint->tcnt2);
51 ct1 = read_c0_count(); 51 ct1 = read_c0_count();
52 } while (msb); 52 } while (msb);
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
index a1fa4abb3f6a..cd0d5b06cd83 100644
--- a/arch/mips/sgi-ip27/ip27-hubio.c
+++ b/arch/mips/sgi-ip27/ip27-hubio.c
@@ -29,7 +29,6 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
29 unsigned long xtalk_addr, size_t size) 29 unsigned long xtalk_addr, size_t size)
30{ 30{
31 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); 31 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
32 volatile hubreg_t junk;
33 unsigned i; 32 unsigned i;
34 33
35 /* use small-window mapping if possible */ 34 /* use small-window mapping if possible */
@@ -64,7 +63,7 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
64 * after we write it. 63 * after we write it.
65 */ 64 */
66 IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr); 65 IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
67 junk = HUB_L(IIO_ITTE_GET(nasid, i)); 66 (void) HUB_L(IIO_ITTE_GET(nasid, i));
68 67
69 return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE); 68 return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE);
70 } 69 }
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
index c3d30a88daf3..1d1919a44e88 100644
--- a/arch/mips/sgi-ip27/ip27-klnuma.c
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -54,11 +54,8 @@ void __init setup_replication_mask(void)
54 54
55static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid) 55static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid)
56{ 56{
57 cnodeid_t client_cnode;
58 kern_vars_t *kvp; 57 kern_vars_t *kvp;
59 58
60 client_cnode = NASID_TO_COMPACT_NODEID(client_nasid);
61
62 kvp = &hub_data(client_nasid)->kern_vars; 59 kvp = &hub_data(client_nasid)->kern_vars;
63 60
64 KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp; 61 KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp;
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index c76151b56568..0904d4d30cb3 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -95,7 +95,7 @@ static void __init sni_a20r_timer_setup(void)
95static __init unsigned long dosample(void) 95static __init unsigned long dosample(void)
96{ 96{
97 u32 ct0, ct1; 97 u32 ct0, ct1;
98 volatile u8 msb, lsb; 98 volatile u8 msb;
99 99
100 /* Start the counter. */ 100 /* Start the counter. */
101 outb_p(0x34, 0x43); 101 outb_p(0x34, 0x43);
@@ -108,7 +108,7 @@ static __init unsigned long dosample(void)
108 /* Latch and spin until top byte of counter0 is zero */ 108 /* Latch and spin until top byte of counter0 is zero */
109 do { 109 do {
110 outb(0x00, 0x43); 110 outb(0x00, 0x43);
111 lsb = inb(0x40); 111 (void) inb(0x40);
112 msb = inb(0x40); 112 msb = inb(0x40);
113 ct1 = read_c0_count(); 113 ct1 = read_c0_count();
114 } while (msb); 114 } while (msb);
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 55613e33e263..a6ae1cfad86c 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -933,12 +933,16 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
933 if (data && !(data & DABR_TRANSLATION)) 933 if (data && !(data & DABR_TRANSLATION))
934 return -EIO; 934 return -EIO;
935#ifdef CONFIG_HAVE_HW_BREAKPOINT 935#ifdef CONFIG_HAVE_HW_BREAKPOINT
936 if (ptrace_get_breakpoints(task) < 0)
937 return -ESRCH;
938
936 bp = thread->ptrace_bps[0]; 939 bp = thread->ptrace_bps[0];
937 if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) { 940 if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) {
938 if (bp) { 941 if (bp) {
939 unregister_hw_breakpoint(bp); 942 unregister_hw_breakpoint(bp);
940 thread->ptrace_bps[0] = NULL; 943 thread->ptrace_bps[0] = NULL;
941 } 944 }
945 ptrace_put_breakpoints(task);
942 return 0; 946 return 0;
943 } 947 }
944 if (bp) { 948 if (bp) {
@@ -948,9 +952,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
948 (DABR_DATA_WRITE | DABR_DATA_READ), 952 (DABR_DATA_WRITE | DABR_DATA_READ),
949 &attr.bp_type); 953 &attr.bp_type);
950 ret = modify_user_hw_breakpoint(bp, &attr); 954 ret = modify_user_hw_breakpoint(bp, &attr);
951 if (ret) 955 if (ret) {
956 ptrace_put_breakpoints(task);
952 return ret; 957 return ret;
958 }
953 thread->ptrace_bps[0] = bp; 959 thread->ptrace_bps[0] = bp;
960 ptrace_put_breakpoints(task);
954 thread->dabr = data; 961 thread->dabr = data;
955 return 0; 962 return 0;
956 } 963 }
@@ -965,9 +972,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
965 ptrace_triggered, task); 972 ptrace_triggered, task);
966 if (IS_ERR(bp)) { 973 if (IS_ERR(bp)) {
967 thread->ptrace_bps[0] = NULL; 974 thread->ptrace_bps[0] = NULL;
975 ptrace_put_breakpoints(task);
968 return PTR_ERR(bp); 976 return PTR_ERR(bp);
969 } 977 }
970 978
979 ptrace_put_breakpoints(task);
980
971#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 981#endif /* CONFIG_HAVE_HW_BREAKPOINT */
972 982
973 /* Move contents to the DABR register */ 983 /* Move contents to the DABR register */
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 188272934cfb..104faa8aa23c 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -318,17 +318,20 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = {
318 .end = mpc83xx_suspend_end, 318 .end = mpc83xx_suspend_end,
319}; 319};
320 320
321static struct of_device_id pmc_match[];
321static int pmc_probe(struct platform_device *ofdev) 322static int pmc_probe(struct platform_device *ofdev)
322{ 323{
324 const struct of_device_id *match;
323 struct device_node *np = ofdev->dev.of_node; 325 struct device_node *np = ofdev->dev.of_node;
324 struct resource res; 326 struct resource res;
325 struct pmc_type *type; 327 struct pmc_type *type;
326 int ret = 0; 328 int ret = 0;
327 329
328 if (!ofdev->dev.of_match) 330 match = of_match_device(pmc_match, &ofdev->dev);
331 if (!match)
329 return -EINVAL; 332 return -EINVAL;
330 333
331 type = ofdev->dev.of_match->data; 334 type = match->data;
332 335
333 if (!of_device_is_available(np)) 336 if (!of_device_is_available(np))
334 return -ENODEV; 337 return -ENODEV;
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index d5679dc1e20f..01cd2f089512 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -304,8 +304,10 @@ static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi,
304 return 0; 304 return 0;
305} 305}
306 306
307static const struct of_device_id fsl_of_msi_ids[];
307static int __devinit fsl_of_msi_probe(struct platform_device *dev) 308static int __devinit fsl_of_msi_probe(struct platform_device *dev)
308{ 309{
310 const struct of_device_id *match;
309 struct fsl_msi *msi; 311 struct fsl_msi *msi;
310 struct resource res; 312 struct resource res;
311 int err, i, j, irq_index, count; 313 int err, i, j, irq_index, count;
@@ -316,9 +318,10 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
316 u32 offset; 318 u32 offset;
317 static const u32 all_avail[] = { 0, NR_MSI_IRQS }; 319 static const u32 all_avail[] = { 0, NR_MSI_IRQS };
318 320
319 if (!dev->dev.of_match) 321 match = of_match_device(fsl_of_msi_ids, &dev->dev);
322 if (!match)
320 return -EINVAL; 323 return -EINVAL;
321 features = dev->dev.of_match->data; 324 features = match->data;
322 325
323 printk(KERN_DEBUG "Setting up Freescale MSI support\n"); 326 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
324 327
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 72b2e2f2d32d..7e91c58072e2 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -9,9 +9,22 @@
9#define _ASM_S390_DIAG_H 9#define _ASM_S390_DIAG_H
10 10
11/* 11/*
12 * Diagnose 10: Release pages 12 * Diagnose 10: Release page range
13 */ 13 */
14extern void diag10(unsigned long addr); 14static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn)
15{
16 unsigned long start_addr, end_addr;
17
18 start_addr = start_pfn << PAGE_SHIFT;
19 end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT;
20
21 asm volatile(
22 "0: diag %0,%1,0x10\n"
23 "1:\n"
24 EX_TABLE(0b, 1b)
25 EX_TABLE(1b, 1b)
26 : : "a" (start_addr), "a" (end_addr));
27}
15 28
16/* 29/*
17 * Diagnose 14: Input spool file manipulation 30 * Diagnose 14: Input spool file manipulation
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index a6f0e7cc9cde..8c277caa8d3a 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -23,7 +23,7 @@ static inline int init_new_context(struct task_struct *tsk,
23#ifdef CONFIG_64BIT 23#ifdef CONFIG_64BIT
24 mm->context.asce_bits |= _ASCE_TYPE_REGION3; 24 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
25#endif 25#endif
26 if (current->mm->context.alloc_pgste) { 26 if (current->mm && current->mm->context.alloc_pgste) {
27 /* 27 /*
28 * alloc_pgste indicates, that any NEW context will be created 28 * alloc_pgste indicates, that any NEW context will be created
29 * with extended page tables. The old context is unchanged. The 29 * with extended page tables. The old context is unchanged. The
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index c032d11da8a1..8237fc07ac79 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -9,27 +9,6 @@
9#include <asm/diag.h> 9#include <asm/diag.h>
10 10
11/* 11/*
12 * Diagnose 10: Release pages
13 */
14void diag10(unsigned long addr)
15{
16 if (addr >= 0x7ff00000)
17 return;
18 asm volatile(
19#ifdef CONFIG_64BIT
20 " sam31\n"
21 " diag %0,%0,0x10\n"
22 "0: sam64\n"
23#else
24 " diag %0,%0,0x10\n"
25 "0:\n"
26#endif
27 EX_TABLE(0b, 0b)
28 : : "a" (addr));
29}
30EXPORT_SYMBOL(diag10);
31
32/*
33 * Diagnose 14: Input spool file manipulation 12 * Diagnose 14: Input spool file manipulation
34 */ 13 */
35int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) 14int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index c83726c9fe03..3d4a78fc1adc 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -672,6 +672,7 @@ static struct insn opcode_b2[] = {
672 { "rp", 0x77, INSTR_S_RD }, 672 { "rp", 0x77, INSTR_S_RD },
673 { "stcke", 0x78, INSTR_S_RD }, 673 { "stcke", 0x78, INSTR_S_RD },
674 { "sacf", 0x79, INSTR_S_RD }, 674 { "sacf", 0x79, INSTR_S_RD },
675 { "spp", 0x80, INSTR_S_RD },
675 { "stsi", 0x7d, INSTR_S_RD }, 676 { "stsi", 0x7d, INSTR_S_RD },
676 { "srnm", 0x99, INSTR_S_RD }, 677 { "srnm", 0x99, INSTR_S_RD },
677 { "stfpc", 0x9c, INSTR_S_RD }, 678 { "stfpc", 0x9c, INSTR_S_RD },
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 648f64239a9d..1b67fc6ebdc2 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -836,7 +836,7 @@ restart_base:
836 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on 836 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
837 basr %r14,0 837 basr %r14,0
838 l %r14,restart_addr-.(%r14) 838 l %r14,restart_addr-.(%r14)
839 br %r14 # branch to start_secondary 839 basr %r14,%r14 # branch to start_secondary
840restart_addr: 840restart_addr:
841 .long start_secondary 841 .long start_secondary
842 .align 8 842 .align 8
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 9d3603d6c511..9fd864563499 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -841,7 +841,7 @@ restart_base:
841 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) 841 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
842 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER 842 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
843 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on 843 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
844 jg start_secondary 844 brasl %r14,start_secondary
845 .align 8 845 .align 8
846restart_vtime: 846restart_vtime:
847 .long 0x7fffffff,0xffffffff 847 .long 0x7fffffff,0xffffffff
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index c66ffd8dbbb7..1f1dba9dcf58 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -91,7 +91,7 @@ static long cmm_alloc_pages(long nr, long *counter,
91 } else 91 } else
92 free_page((unsigned long) npa); 92 free_page((unsigned long) npa);
93 } 93 }
94 diag10(addr); 94 diag10_range(addr >> PAGE_SHIFT, 1);
95 pa->pages[pa->index++] = addr; 95 pa->pages[pa->index++] = addr;
96 (*counter)++; 96 (*counter)++;
97 spin_unlock(&cmm_lock); 97 spin_unlock(&cmm_lock);
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index 4952872d6f0a..33cbd373cce4 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -1021,20 +1021,14 @@ deallocate_exit:
1021 return rc; 1021 return rc;
1022} 1022}
1023 1023
1024long hwsampler_query_min_interval(void) 1024unsigned long hwsampler_query_min_interval(void)
1025{ 1025{
1026 if (min_sampler_rate) 1026 return min_sampler_rate;
1027 return min_sampler_rate;
1028 else
1029 return -EINVAL;
1030} 1027}
1031 1028
1032long hwsampler_query_max_interval(void) 1029unsigned long hwsampler_query_max_interval(void)
1033{ 1030{
1034 if (max_sampler_rate) 1031 return max_sampler_rate;
1035 return max_sampler_rate;
1036 else
1037 return -EINVAL;
1038} 1032}
1039 1033
1040unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) 1034unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h
index 8c72b59316b5..1912f3bb190c 100644
--- a/arch/s390/oprofile/hwsampler.h
+++ b/arch/s390/oprofile/hwsampler.h
@@ -102,8 +102,8 @@ int hwsampler_setup(void);
102int hwsampler_shutdown(void); 102int hwsampler_shutdown(void);
103int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); 103int hwsampler_allocate(unsigned long sdbt, unsigned long sdb);
104int hwsampler_deallocate(void); 104int hwsampler_deallocate(void);
105long hwsampler_query_min_interval(void); 105unsigned long hwsampler_query_min_interval(void);
106long hwsampler_query_max_interval(void); 106unsigned long hwsampler_query_max_interval(void);
107int hwsampler_start_all(unsigned long interval); 107int hwsampler_start_all(unsigned long interval);
108int hwsampler_stop_all(void); 108int hwsampler_stop_all(void);
109int hwsampler_deactivate(unsigned int cpu); 109int hwsampler_deactivate(unsigned int cpu);
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index c63d7e58352b..5995e9bc72d9 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -145,15 +145,11 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
145 * create hwsampler files only if hwsampler_setup() succeeds. 145 * create hwsampler files only if hwsampler_setup() succeeds.
146 */ 146 */
147 oprofile_min_interval = hwsampler_query_min_interval(); 147 oprofile_min_interval = hwsampler_query_min_interval();
148 if (oprofile_min_interval < 0) { 148 if (oprofile_min_interval == 0)
149 oprofile_min_interval = 0;
150 return -ENODEV; 149 return -ENODEV;
151 }
152 oprofile_max_interval = hwsampler_query_max_interval(); 150 oprofile_max_interval = hwsampler_query_max_interval();
153 if (oprofile_max_interval < 0) { 151 if (oprofile_max_interval == 0)
154 oprofile_max_interval = 0;
155 return -ENODEV; 152 return -ENODEV;
156 }
157 153
158 if (oprofile_timer_init(ops)) 154 if (oprofile_timer_init(ops))
159 return -ENODEV; 155 return -ENODEV;
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 2130ca674e9b..3d7b209b2178 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -117,7 +117,11 @@ void user_enable_single_step(struct task_struct *child)
117 117
118 set_tsk_thread_flag(child, TIF_SINGLESTEP); 118 set_tsk_thread_flag(child, TIF_SINGLESTEP);
119 119
120 if (ptrace_get_breakpoints(child) < 0)
121 return;
122
120 set_single_step(child, pc); 123 set_single_step(child, pc);
124 ptrace_put_breakpoints(child);
121} 125}
122 126
123void user_disable_single_step(struct task_struct *child) 127void user_disable_single_step(struct task_struct *child)
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index f679c57644d5..1e34f29e58bb 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -165,7 +165,7 @@ static int __devinit apc_probe(struct platform_device *op)
165 return 0; 165 return 0;
166} 166}
167 167
168static struct of_device_id __initdata apc_match[] = { 168static struct of_device_id apc_match[] = {
169 { 169 {
170 .name = APC_OBPNAME, 170 .name = APC_OBPNAME,
171 }, 171 },
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c
index 948068a083fc..d1840dbdaa2f 100644
--- a/arch/sparc/kernel/pci_sabre.c
+++ b/arch/sparc/kernel/pci_sabre.c
@@ -452,8 +452,10 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm,
452 sabre_scan_bus(pbm, &op->dev); 452 sabre_scan_bus(pbm, &op->dev);
453} 453}
454 454
455static const struct of_device_id sabre_match[];
455static int __devinit sabre_probe(struct platform_device *op) 456static int __devinit sabre_probe(struct platform_device *op)
456{ 457{
458 const struct of_device_id *match;
457 const struct linux_prom64_registers *pr_regs; 459 const struct linux_prom64_registers *pr_regs;
458 struct device_node *dp = op->dev.of_node; 460 struct device_node *dp = op->dev.of_node;
459 struct pci_pbm_info *pbm; 461 struct pci_pbm_info *pbm;
@@ -463,7 +465,8 @@ static int __devinit sabre_probe(struct platform_device *op)
463 const u32 *vdma; 465 const u32 *vdma;
464 u64 clear_irq; 466 u64 clear_irq;
465 467
466 hummingbird_p = op->dev.of_match && (op->dev.of_match->data != NULL); 468 match = of_match_device(sabre_match, &op->dev);
469 hummingbird_p = match && (match->data != NULL);
467 if (!hummingbird_p) { 470 if (!hummingbird_p) {
468 struct device_node *cpu_dp; 471 struct device_node *cpu_dp;
469 472
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index fecfcb2063c8..283fbc329a43 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -1458,11 +1458,15 @@ out_err:
1458 return err; 1458 return err;
1459} 1459}
1460 1460
1461static const struct of_device_id schizo_match[];
1461static int __devinit schizo_probe(struct platform_device *op) 1462static int __devinit schizo_probe(struct platform_device *op)
1462{ 1463{
1463 if (!op->dev.of_match) 1464 const struct of_device_id *match;
1465
1466 match = of_match_device(schizo_match, &op->dev);
1467 if (!match)
1464 return -EINVAL; 1468 return -EINVAL;
1465 return __schizo_init(op, (unsigned long) op->dev.of_match->data); 1469 return __schizo_init(op, (unsigned long)match->data);
1466} 1470}
1467 1471
1468/* The ordering of this table is very important. Some Tomatillo 1472/* The ordering of this table is very important. Some Tomatillo
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index 93d7b4465f8d..6a585d393580 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -69,7 +69,7 @@ static int __devinit pmc_probe(struct platform_device *op)
69 return 0; 69 return 0;
70} 70}
71 71
72static struct of_device_id __initdata pmc_match[] = { 72static struct of_device_id pmc_match[] = {
73 { 73 {
74 .name = PMC_OBPNAME, 74 .name = PMC_OBPNAME,
75 }, 75 },
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 91c10fb70858..850a1360c0d6 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -53,6 +53,7 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE;
53void __cpuinit smp_store_cpu_info(int id) 53void __cpuinit smp_store_cpu_info(int id)
54{ 54{
55 int cpu_node; 55 int cpu_node;
56 int mid;
56 57
57 cpu_data(id).udelay_val = loops_per_jiffy; 58 cpu_data(id).udelay_val = loops_per_jiffy;
58 59
@@ -60,10 +61,13 @@ void __cpuinit smp_store_cpu_info(int id)
60 cpu_data(id).clock_tick = prom_getintdefault(cpu_node, 61 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
61 "clock-frequency", 0); 62 "clock-frequency", 0);
62 cpu_data(id).prom_node = cpu_node; 63 cpu_data(id).prom_node = cpu_node;
63 cpu_data(id).mid = cpu_get_hwmid(cpu_node); 64 mid = cpu_get_hwmid(cpu_node);
64 65
65 if (cpu_data(id).mid < 0) 66 if (mid < 0) {
66 panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); 67 printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
68 mid = 0;
69 }
70 cpu_data(id).mid = mid;
67} 71}
68 72
69void __init smp_cpus_done(unsigned int max_cpus) 73void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 4e236391b635..96046a4024c2 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -168,7 +168,7 @@ static int __devinit clock_probe(struct platform_device *op)
168 return 0; 168 return 0;
169} 169}
170 170
171static struct of_device_id __initdata clock_match[] = { 171static struct of_device_id clock_match[] = {
172 { 172 {
173 .name = "eeprom", 173 .name = "eeprom",
174 }, 174 },
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 3632cb34e914..0084c3361e15 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -289,10 +289,16 @@ cc_end_cruft:
289 289
290 /* Also, handle the alignment code out of band. */ 290 /* Also, handle the alignment code out of band. */
291cc_dword_align: 291cc_dword_align:
292 cmp %g1, 6 292 cmp %g1, 16
293 bl,a ccte 293 bge 1f
294 srl %g1, 1, %o3
2952: cmp %o3, 0
296 be,a ccte
294 andcc %g1, 0xf, %o3 297 andcc %g1, 0xf, %o3
295 andcc %o0, 0x1, %g0 298 andcc %o3, %o0, %g0 ! Check %o0 only (%o1 has the same last 2 bits)
299 be,a 2b
300 srl %o3, 1, %o3
3011: andcc %o0, 0x1, %g0
296 bne ccslow 302 bne ccslow
297 andcc %o0, 0x2, %g0 303 andcc %o0, 0x2, %g0
298 be 1f 304 be 1f
diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c
index 6ea77979531c..42827cafa6af 100644
--- a/arch/um/os-Linux/util.c
+++ b/arch/um/os-Linux/util.c
@@ -5,6 +5,7 @@
5 5
6#include <stdio.h> 6#include <stdio.h>
7#include <stdlib.h> 7#include <stdlib.h>
8#include <unistd.h>
8#include <errno.h> 9#include <errno.h>
9#include <signal.h> 10#include <signal.h>
10#include <string.h> 11#include <string.h>
@@ -75,6 +76,26 @@ void setup_hostinfo(char *buf, int len)
75 host.release, host.version, host.machine); 76 host.release, host.version, host.machine);
76} 77}
77 78
79/*
80 * We cannot use glibc's abort(). It makes use of tgkill() which
81 * has no effect within UML's kernel threads.
82 * After that glibc would execute an invalid instruction to kill
83 * the calling process and UML crashes with SIGSEGV.
84 */
85static inline void __attribute__ ((noreturn)) uml_abort(void)
86{
87 sigset_t sig;
88
89 fflush(NULL);
90
91 if (!sigemptyset(&sig) && !sigaddset(&sig, SIGABRT))
92 sigprocmask(SIG_UNBLOCK, &sig, 0);
93
94 for (;;)
95 if (kill(getpid(), SIGABRT) < 0)
96 exit(127);
97}
98
78void os_dump_core(void) 99void os_dump_core(void)
79{ 100{
80 int pid; 101 int pid;
@@ -116,5 +137,5 @@ void os_dump_core(void)
116 while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0) 137 while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0)
117 os_kill_ptraced_process(pid, 0); 138 os_kill_ptraced_process(pid, 0);
118 139
119 abort(); 140 uml_abort();
120} 141}
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index d87988bacf3e..34595d5e1038 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -78,6 +78,7 @@
78#define APIC_DEST_LOGICAL 0x00800 78#define APIC_DEST_LOGICAL 0x00800
79#define APIC_DEST_PHYSICAL 0x00000 79#define APIC_DEST_PHYSICAL 0x00000
80#define APIC_DM_FIXED 0x00000 80#define APIC_DM_FIXED 0x00000
81#define APIC_DM_FIXED_MASK 0x00700
81#define APIC_DM_LOWEST 0x00100 82#define APIC_DM_LOWEST 0x00100
82#define APIC_DM_SMI 0x00200 83#define APIC_DM_SMI 0x00200
83#define APIC_DM_REMRD 0x00300 84#define APIC_DM_REMRD 0x00300
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 7db7723d1f32..d56187c6b838 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -299,6 +299,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
299/* Install a pte for a particular vaddr in kernel space. */ 299/* Install a pte for a particular vaddr in kernel space. */
300void set_pte_vaddr(unsigned long vaddr, pte_t pte); 300void set_pte_vaddr(unsigned long vaddr, pte_t pte);
301 301
302extern void native_pagetable_reserve(u64 start, u64 end);
302#ifdef CONFIG_X86_32 303#ifdef CONFIG_X86_32
303extern void native_pagetable_setup_start(pgd_t *base); 304extern void native_pagetable_setup_start(pgd_t *base);
304extern void native_pagetable_setup_done(pgd_t *base); 305extern void native_pagetable_setup_done(pgd_t *base);
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 3e094af443c3..130f1eeee5fe 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -94,6 +94,8 @@
94/* after this # consecutive successes, bump up the throttle if it was lowered */ 94/* after this # consecutive successes, bump up the throttle if it was lowered */
95#define COMPLETE_THRESHOLD 5 95#define COMPLETE_THRESHOLD 5
96 96
97#define UV_LB_SUBNODEID 0x10
98
97/* 99/*
98 * number of entries in the destination side payload queue 100 * number of entries in the destination side payload queue
99 */ 101 */
@@ -124,7 +126,7 @@
124 * The distribution specification (32 bytes) is interpreted as a 256-bit 126 * The distribution specification (32 bytes) is interpreted as a 256-bit
125 * distribution vector. Adjacent bits correspond to consecutive even numbered 127 * distribution vector. Adjacent bits correspond to consecutive even numbered
126 * nodeIDs. The result of adding the index of a given bit to the 15-bit 128 * nodeIDs. The result of adding the index of a given bit to the 15-bit
127 * 'base_dest_nodeid' field of the header corresponds to the 129 * 'base_dest_nasid' field of the header corresponds to the
128 * destination nodeID associated with that specified bit. 130 * destination nodeID associated with that specified bit.
129 */ 131 */
130struct bau_target_uvhubmask { 132struct bau_target_uvhubmask {
@@ -176,7 +178,7 @@ struct bau_msg_payload {
176struct bau_msg_header { 178struct bau_msg_header {
177 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ 179 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
178 /* bits 5:0 */ 180 /* bits 5:0 */
179 unsigned int base_dest_nodeid:15; /* nasid of the */ 181 unsigned int base_dest_nasid:15; /* nasid of the */
180 /* bits 20:6 */ /* first bit in uvhub map */ 182 /* bits 20:6 */ /* first bit in uvhub map */
181 unsigned int command:8; /* message type */ 183 unsigned int command:8; /* message type */
182 /* bits 28:21 */ 184 /* bits 28:21 */
@@ -378,6 +380,10 @@ struct ptc_stats {
378 unsigned long d_rcanceled; /* number of messages canceled by resets */ 380 unsigned long d_rcanceled; /* number of messages canceled by resets */
379}; 381};
380 382
383struct hub_and_pnode {
384 short uvhub;
385 short pnode;
386};
381/* 387/*
382 * one per-cpu; to locate the software tables 388 * one per-cpu; to locate the software tables
383 */ 389 */
@@ -399,10 +405,12 @@ struct bau_control {
399 int baudisabled; 405 int baudisabled;
400 int set_bau_off; 406 int set_bau_off;
401 short cpu; 407 short cpu;
408 short osnode;
402 short uvhub_cpu; 409 short uvhub_cpu;
403 short uvhub; 410 short uvhub;
404 short cpus_in_socket; 411 short cpus_in_socket;
405 short cpus_in_uvhub; 412 short cpus_in_uvhub;
413 short partition_base_pnode;
406 unsigned short message_number; 414 unsigned short message_number;
407 unsigned short uvhub_quiesce; 415 unsigned short uvhub_quiesce;
408 short socket_acknowledge_count[DEST_Q_SIZE]; 416 short socket_acknowledge_count[DEST_Q_SIZE];
@@ -422,15 +430,16 @@ struct bau_control {
422 int congested_period; 430 int congested_period;
423 cycles_t period_time; 431 cycles_t period_time;
424 long period_requests; 432 long period_requests;
433 struct hub_and_pnode *target_hub_and_pnode;
425}; 434};
426 435
427static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp) 436static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
428{ 437{
429 return constant_test_bit(uvhub, &dstp->bits[0]); 438 return constant_test_bit(uvhub, &dstp->bits[0]);
430} 439}
431static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp) 440static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp)
432{ 441{
433 __set_bit(uvhub, &dstp->bits[0]); 442 __set_bit(pnode, &dstp->bits[0]);
434} 443}
435static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp, 444static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp,
436 int nbits) 445 int nbits)
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index a501741c2335..4298002d0c83 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -398,6 +398,8 @@ struct uv_blade_info {
398 unsigned short nr_online_cpus; 398 unsigned short nr_online_cpus;
399 unsigned short pnode; 399 unsigned short pnode;
400 short memory_nid; 400 short memory_nid;
401 spinlock_t nmi_lock;
402 unsigned long nmi_count;
401}; 403};
402extern struct uv_blade_info *uv_blade_info; 404extern struct uv_blade_info *uv_blade_info;
403extern short *uv_node_to_blade; 405extern short *uv_node_to_blade;
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 20cafeac7455..f5bb64a823d7 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * SGI UV MMR definitions 6 * SGI UV MMR definitions
7 * 7 *
8 * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef _ASM_X86_UV_UV_MMRS_H 11#ifndef _ASM_X86_UV_UV_MMRS_H
@@ -1099,5 +1099,19 @@ union uvh_rtc1_int_config_u {
1099 } s; 1099 } s;
1100}; 1100};
1101 1101
1102/* ========================================================================= */
1103/* UVH_SCRATCH5 */
1104/* ========================================================================= */
1105#define UVH_SCRATCH5 0x2d0200UL
1106#define UVH_SCRATCH5_32 0x00778
1107
1108#define UVH_SCRATCH5_SCRATCH5_SHFT 0
1109#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
1110union uvh_scratch5_u {
1111 unsigned long v;
1112 struct uvh_scratch5_s {
1113 unsigned long scratch5 : 64; /* RW, W1CS */
1114 } s;
1115};
1102 1116
1103#endif /* __ASM_UV_MMRS_X86_H__ */ 1117#endif /* __ASM_UV_MMRS_X86_H__ */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 643ebf2e2ad8..d3d859035af9 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -68,6 +68,17 @@ struct x86_init_oem {
68}; 68};
69 69
70/** 70/**
71 * struct x86_init_mapping - platform specific initial kernel pagetable setup
72 * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage
73 *
74 * For more details on the purpose of this hook, look in
75 * init_memory_mapping and the commit that added it.
76 */
77struct x86_init_mapping {
78 void (*pagetable_reserve)(u64 start, u64 end);
79};
80
81/**
71 * struct x86_init_paging - platform specific paging functions 82 * struct x86_init_paging - platform specific paging functions
72 * @pagetable_setup_start: platform specific pre paging_init() call 83 * @pagetable_setup_start: platform specific pre paging_init() call
73 * @pagetable_setup_done: platform specific post paging_init() call 84 * @pagetable_setup_done: platform specific post paging_init() call
@@ -123,6 +134,7 @@ struct x86_init_ops {
123 struct x86_init_mpparse mpparse; 134 struct x86_init_mpparse mpparse;
124 struct x86_init_irqs irqs; 135 struct x86_init_irqs irqs;
125 struct x86_init_oem oem; 136 struct x86_init_oem oem;
137 struct x86_init_mapping mapping;
126 struct x86_init_paging paging; 138 struct x86_init_paging paging;
127 struct x86_init_timers timers; 139 struct x86_init_timers timers;
128 struct x86_init_iommu iommu; 140 struct x86_init_iommu iommu;
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 33b10a0fc095..7acd2d2ac965 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -37,6 +37,13 @@
37#include <asm/smp.h> 37#include <asm/smp.h>
38#include <asm/x86_init.h> 38#include <asm/x86_init.h>
39#include <asm/emergency-restart.h> 39#include <asm/emergency-restart.h>
40#include <asm/nmi.h>
41
42/* BMC sets a bit this MMR non-zero before sending an NMI */
43#define UVH_NMI_MMR UVH_SCRATCH5
44#define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8)
45#define UV_NMI_PENDING_MASK (1UL << 63)
46DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);
40 47
41DEFINE_PER_CPU(int, x2apic_extra_bits); 48DEFINE_PER_CPU(int, x2apic_extra_bits);
42 49
@@ -642,18 +649,46 @@ void __cpuinit uv_cpu_init(void)
642 */ 649 */
643int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) 650int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
644{ 651{
652 unsigned long real_uv_nmi;
653 int bid;
654
645 if (reason != DIE_NMIUNKNOWN) 655 if (reason != DIE_NMIUNKNOWN)
646 return NOTIFY_OK; 656 return NOTIFY_OK;
647 657
648 if (in_crash_kexec) 658 if (in_crash_kexec)
649 /* do nothing if entering the crash kernel */ 659 /* do nothing if entering the crash kernel */
650 return NOTIFY_OK; 660 return NOTIFY_OK;
661
651 /* 662 /*
652 * Use a lock so only one cpu prints at a time 663 * Each blade has an MMR that indicates when an NMI has been sent
653 * to prevent intermixed output. 664 * to cpus on the blade. If an NMI is detected, atomically
665 * clear the MMR and update a per-blade NMI count used to
666 * cause each cpu on the blade to notice a new NMI.
667 */
668 bid = uv_numa_blade_id();
669 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
670
671 if (unlikely(real_uv_nmi)) {
672 spin_lock(&uv_blade_info[bid].nmi_lock);
673 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
674 if (real_uv_nmi) {
675 uv_blade_info[bid].nmi_count++;
676 uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK);
677 }
678 spin_unlock(&uv_blade_info[bid].nmi_lock);
679 }
680
681 if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
682 return NOTIFY_DONE;
683
684 __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
685
686 /*
687 * Use a lock so only one cpu prints at a time.
688 * This prevents intermixed output.
654 */ 689 */
655 spin_lock(&uv_nmi_lock); 690 spin_lock(&uv_nmi_lock);
656 pr_info("NMI stack dump cpu %u:\n", smp_processor_id()); 691 pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
657 dump_stack(); 692 dump_stack();
658 spin_unlock(&uv_nmi_lock); 693 spin_unlock(&uv_nmi_lock);
659 694
@@ -661,7 +696,8 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
661} 696}
662 697
663static struct notifier_block uv_dump_stack_nmi_nb = { 698static struct notifier_block uv_dump_stack_nmi_nb = {
664 .notifier_call = uv_handle_nmi 699 .notifier_call = uv_handle_nmi,
700 .priority = NMI_LOCAL_LOW_PRIOR - 1,
665}; 701};
666 702
667void uv_register_nmi_notifier(void) 703void uv_register_nmi_notifier(void)
@@ -720,8 +756,9 @@ void __init uv_system_init(void)
720 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); 756 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
721 757
722 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); 758 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
723 uv_blade_info = kmalloc(bytes, GFP_KERNEL); 759 uv_blade_info = kzalloc(bytes, GFP_KERNEL);
724 BUG_ON(!uv_blade_info); 760 BUG_ON(!uv_blade_info);
761
725 for (blade = 0; blade < uv_num_possible_blades(); blade++) 762 for (blade = 0; blade < uv_num_possible_blades(); blade++)
726 uv_blade_info[blade].memory_nid = -1; 763 uv_blade_info[blade].memory_nid = -1;
727 764
@@ -747,6 +784,7 @@ void __init uv_system_init(void)
747 uv_blade_info[blade].pnode = pnode; 784 uv_blade_info[blade].pnode = pnode;
748 uv_blade_info[blade].nr_possible_cpus = 0; 785 uv_blade_info[blade].nr_possible_cpus = 0;
749 uv_blade_info[blade].nr_online_cpus = 0; 786 uv_blade_info[blade].nr_online_cpus = 0;
787 spin_lock_init(&uv_blade_info[blade].nmi_lock);
750 max_pnode = max(pnode, max_pnode); 788 max_pnode = max(pnode, max_pnode);
751 blade++; 789 blade++;
752 } 790 }
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index bb9eb29a52dd..6f9d1f6063e9 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -613,7 +613,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
613#endif 613#endif
614 614
615 /* As a rule processors have APIC timer running in deep C states */ 615 /* As a rule processors have APIC timer running in deep C states */
616 if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) 616 if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400))
617 set_cpu_cap(c, X86_FEATURE_ARAT); 617 set_cpu_cap(c, X86_FEATURE_ARAT);
618 618
619 /* 619 /*
@@ -698,7 +698,7 @@ cpu_dev_register(amd_cpu_dev);
698 */ 698 */
699 699
700const int amd_erratum_400[] = 700const int amd_erratum_400[] =
701 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0x0f, 0x4, 0x2, 0xff, 0xf), 701 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
702 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); 702 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
703EXPORT_SYMBOL_GPL(amd_erratum_400); 703EXPORT_SYMBOL_GPL(amd_erratum_400);
704 704
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 167f97b5596e..bb0adad35143 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -509,6 +509,7 @@ recurse:
509out_free: 509out_free:
510 if (b) { 510 if (b) {
511 kobject_put(&b->kobj); 511 kobject_put(&b->kobj);
512 list_del(&b->miscj);
512 kfree(b); 513 kfree(b);
513 } 514 }
514 return err; 515 return err;
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 6f8c5e9da97f..0f034460260d 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -446,18 +446,20 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
446 */ 446 */
447 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 447 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
448 448
449 h = lvtthmr_init;
449 /* 450 /*
450 * The initial value of thermal LVT entries on all APs always reads 451 * The initial value of thermal LVT entries on all APs always reads
451 * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI 452 * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
452 * sequence to them and LVT registers are reset to 0s except for 453 * sequence to them and LVT registers are reset to 0s except for
453 * the mask bits which are set to 1s when APs receive INIT IPI. 454 * the mask bits which are set to 1s when APs receive INIT IPI.
454 * Always restore the value that BIOS has programmed on AP based on 455 * If BIOS takes over the thermal interrupt and sets its interrupt
455 * BSP's info we saved since BIOS is always setting the same value 456 * delivery mode to SMI (not fixed), it restores the value that the
456 * for all threads/cores 457 * BIOS has programmed on AP based on BSP's info we saved since BIOS
458 * is always setting the same value for all threads/cores.
457 */ 459 */
458 apic_write(APIC_LVTTHMR, lvtthmr_init); 460 if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED)
461 apic_write(APIC_LVTTHMR, lvtthmr_init);
459 462
460 h = lvtthmr_init;
461 463
462 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { 464 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
463 printk(KERN_DEBUG 465 printk(KERN_DEBUG
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index e61539b07d2c..447a28de6f09 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -184,26 +184,23 @@ static __initconst const u64 snb_hw_cache_event_ids
184 }, 184 },
185 }, 185 },
186 [ C(LL ) ] = { 186 [ C(LL ) ] = {
187 /*
188 * TBD: Need Off-core Response Performance Monitoring support
189 */
190 [ C(OP_READ) ] = { 187 [ C(OP_READ) ] = {
191 /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ 188 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
192 [ C(RESULT_ACCESS) ] = 0x01b7, 189 [ C(RESULT_ACCESS) ] = 0x01b7,
193 /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ 190 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
194 [ C(RESULT_MISS) ] = 0x01bb, 191 [ C(RESULT_MISS) ] = 0x01b7,
195 }, 192 },
196 [ C(OP_WRITE) ] = { 193 [ C(OP_WRITE) ] = {
197 /* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */ 194 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
198 [ C(RESULT_ACCESS) ] = 0x01b7, 195 [ C(RESULT_ACCESS) ] = 0x01b7,
199 /* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */ 196 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
200 [ C(RESULT_MISS) ] = 0x01bb, 197 [ C(RESULT_MISS) ] = 0x01b7,
201 }, 198 },
202 [ C(OP_PREFETCH) ] = { 199 [ C(OP_PREFETCH) ] = {
203 /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ 200 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
204 [ C(RESULT_ACCESS) ] = 0x01b7, 201 [ C(RESULT_ACCESS) ] = 0x01b7,
205 /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ 202 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
206 [ C(RESULT_MISS) ] = 0x01bb, 203 [ C(RESULT_MISS) ] = 0x01b7,
207 }, 204 },
208 }, 205 },
209 [ C(DTLB) ] = { 206 [ C(DTLB) ] = {
@@ -285,26 +282,26 @@ static __initconst const u64 westmere_hw_cache_event_ids
285 }, 282 },
286 [ C(LL ) ] = { 283 [ C(LL ) ] = {
287 [ C(OP_READ) ] = { 284 [ C(OP_READ) ] = {
288 /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ 285 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
289 [ C(RESULT_ACCESS) ] = 0x01b7, 286 [ C(RESULT_ACCESS) ] = 0x01b7,
290 /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ 287 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
291 [ C(RESULT_MISS) ] = 0x01bb, 288 [ C(RESULT_MISS) ] = 0x01b7,
292 }, 289 },
293 /* 290 /*
294 * Use RFO, not WRITEBACK, because a write miss would typically occur 291 * Use RFO, not WRITEBACK, because a write miss would typically occur
295 * on RFO. 292 * on RFO.
296 */ 293 */
297 [ C(OP_WRITE) ] = { 294 [ C(OP_WRITE) ] = {
298 /* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */ 295 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
299 [ C(RESULT_ACCESS) ] = 0x01bb, 296 [ C(RESULT_ACCESS) ] = 0x01b7,
300 /* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */ 297 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
301 [ C(RESULT_MISS) ] = 0x01b7, 298 [ C(RESULT_MISS) ] = 0x01b7,
302 }, 299 },
303 [ C(OP_PREFETCH) ] = { 300 [ C(OP_PREFETCH) ] = {
304 /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ 301 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
305 [ C(RESULT_ACCESS) ] = 0x01b7, 302 [ C(RESULT_ACCESS) ] = 0x01b7,
306 /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ 303 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
307 [ C(RESULT_MISS) ] = 0x01bb, 304 [ C(RESULT_MISS) ] = 0x01b7,
308 }, 305 },
309 }, 306 },
310 [ C(DTLB) ] = { 307 [ C(DTLB) ] = {
@@ -352,16 +349,36 @@ static __initconst const u64 westmere_hw_cache_event_ids
352}; 349};
353 350
354/* 351/*
355 * OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3 352 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
353 * See IA32 SDM Vol 3B 30.6.1.3
356 */ 354 */
357 355
358#define DMND_DATA_RD (1 << 0) 356#define NHM_DMND_DATA_RD (1 << 0)
359#define DMND_RFO (1 << 1) 357#define NHM_DMND_RFO (1 << 1)
360#define DMND_WB (1 << 3) 358#define NHM_DMND_IFETCH (1 << 2)
361#define PF_DATA_RD (1 << 4) 359#define NHM_DMND_WB (1 << 3)
362#define PF_DATA_RFO (1 << 5) 360#define NHM_PF_DATA_RD (1 << 4)
363#define RESP_UNCORE_HIT (1 << 8) 361#define NHM_PF_DATA_RFO (1 << 5)
364#define RESP_MISS (0xf600) /* non uncore hit */ 362#define NHM_PF_IFETCH (1 << 6)
363#define NHM_OFFCORE_OTHER (1 << 7)
364#define NHM_UNCORE_HIT (1 << 8)
365#define NHM_OTHER_CORE_HIT_SNP (1 << 9)
366#define NHM_OTHER_CORE_HITM (1 << 10)
367 /* reserved */
368#define NHM_REMOTE_CACHE_FWD (1 << 12)
369#define NHM_REMOTE_DRAM (1 << 13)
370#define NHM_LOCAL_DRAM (1 << 14)
371#define NHM_NON_DRAM (1 << 15)
372
373#define NHM_ALL_DRAM (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM)
374
375#define NHM_DMND_READ (NHM_DMND_DATA_RD)
376#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
377#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
378
379#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
380#define NHM_L3_MISS (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD)
381#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
365 382
366static __initconst const u64 nehalem_hw_cache_extra_regs 383static __initconst const u64 nehalem_hw_cache_extra_regs
367 [PERF_COUNT_HW_CACHE_MAX] 384 [PERF_COUNT_HW_CACHE_MAX]
@@ -370,16 +387,16 @@ static __initconst const u64 nehalem_hw_cache_extra_regs
370{ 387{
371 [ C(LL ) ] = { 388 [ C(LL ) ] = {
372 [ C(OP_READ) ] = { 389 [ C(OP_READ) ] = {
373 [ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT, 390 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
374 [ C(RESULT_MISS) ] = DMND_DATA_RD|RESP_MISS, 391 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
375 }, 392 },
376 [ C(OP_WRITE) ] = { 393 [ C(OP_WRITE) ] = {
377 [ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT, 394 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
378 [ C(RESULT_MISS) ] = DMND_RFO|DMND_WB|RESP_MISS, 395 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
379 }, 396 },
380 [ C(OP_PREFETCH) ] = { 397 [ C(OP_PREFETCH) ] = {
381 [ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT, 398 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
382 [ C(RESULT_MISS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS, 399 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
383 }, 400 },
384 } 401 }
385}; 402};
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index c969fd9d1566..f1a6244d7d93 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -1183,12 +1183,13 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
1183 struct pt_regs *regs) 1183 struct pt_regs *regs)
1184{ 1184{
1185 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1185 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1186 unsigned long flags;
1186 1187
1187 /* This is possible if op is under delayed unoptimizing */ 1188 /* This is possible if op is under delayed unoptimizing */
1188 if (kprobe_disabled(&op->kp)) 1189 if (kprobe_disabled(&op->kp))
1189 return; 1190 return;
1190 1191
1191 preempt_disable(); 1192 local_irq_save(flags);
1192 if (kprobe_running()) { 1193 if (kprobe_running()) {
1193 kprobes_inc_nmissed_count(&op->kp); 1194 kprobes_inc_nmissed_count(&op->kp);
1194 } else { 1195 } else {
@@ -1207,7 +1208,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
1207 opt_pre_handler(&op->kp, regs); 1208 opt_pre_handler(&op->kp, regs);
1208 __this_cpu_write(current_kprobe, NULL); 1209 __this_cpu_write(current_kprobe, NULL);
1209 } 1210 }
1210 preempt_enable_no_resched(); 1211 local_irq_restore(flags);
1211} 1212}
1212 1213
1213static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) 1214static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 45892dc4b72a..f65e5b521dbd 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -608,6 +608,9 @@ static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
608 unsigned len, type; 608 unsigned len, type;
609 struct perf_event *bp; 609 struct perf_event *bp;
610 610
611 if (ptrace_get_breakpoints(tsk) < 0)
612 return -ESRCH;
613
611 data &= ~DR_CONTROL_RESERVED; 614 data &= ~DR_CONTROL_RESERVED;
612 old_dr7 = ptrace_get_dr7(thread->ptrace_bps); 615 old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
613restore: 616restore:
@@ -655,6 +658,9 @@ restore:
655 } 658 }
656 goto restore; 659 goto restore;
657 } 660 }
661
662 ptrace_put_breakpoints(tsk);
663
658 return ((orig_ret < 0) ? orig_ret : rc); 664 return ((orig_ret < 0) ? orig_ret : rc);
659} 665}
660 666
@@ -668,10 +674,17 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
668 674
669 if (n < HBP_NUM) { 675 if (n < HBP_NUM) {
670 struct perf_event *bp; 676 struct perf_event *bp;
677
678 if (ptrace_get_breakpoints(tsk) < 0)
679 return -ESRCH;
680
671 bp = thread->ptrace_bps[n]; 681 bp = thread->ptrace_bps[n];
672 if (!bp) 682 if (!bp)
673 return 0; 683 val = 0;
674 val = bp->hw.info.address; 684 else
685 val = bp->hw.info.address;
686
687 ptrace_put_breakpoints(tsk);
675 } else if (n == 6) { 688 } else if (n == 6) {
676 val = thread->debugreg6; 689 val = thread->debugreg6;
677 } else if (n == 7) { 690 } else if (n == 7) {
@@ -686,6 +699,10 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
686 struct perf_event *bp; 699 struct perf_event *bp;
687 struct thread_struct *t = &tsk->thread; 700 struct thread_struct *t = &tsk->thread;
688 struct perf_event_attr attr; 701 struct perf_event_attr attr;
702 int err = 0;
703
704 if (ptrace_get_breakpoints(tsk) < 0)
705 return -ESRCH;
689 706
690 if (!t->ptrace_bps[nr]) { 707 if (!t->ptrace_bps[nr]) {
691 ptrace_breakpoint_init(&attr); 708 ptrace_breakpoint_init(&attr);
@@ -709,24 +726,23 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
709 * writing for the user. And anyway this is the previous 726 * writing for the user. And anyway this is the previous
710 * behaviour. 727 * behaviour.
711 */ 728 */
712 if (IS_ERR(bp)) 729 if (IS_ERR(bp)) {
713 return PTR_ERR(bp); 730 err = PTR_ERR(bp);
731 goto put;
732 }
714 733
715 t->ptrace_bps[nr] = bp; 734 t->ptrace_bps[nr] = bp;
716 } else { 735 } else {
717 int err;
718
719 bp = t->ptrace_bps[nr]; 736 bp = t->ptrace_bps[nr];
720 737
721 attr = bp->attr; 738 attr = bp->attr;
722 attr.bp_addr = addr; 739 attr.bp_addr = addr;
723 err = modify_user_hw_breakpoint(bp, &attr); 740 err = modify_user_hw_breakpoint(bp, &attr);
724 if (err)
725 return err;
726 } 741 }
727 742
728 743put:
729 return 0; 744 ptrace_put_breakpoints(tsk);
745 return err;
730} 746}
731 747
732/* 748/*
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index c11514e9128b..75ef4b18e9b7 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -61,6 +61,10 @@ struct x86_init_ops x86_init __initdata = {
61 .banner = default_banner, 61 .banner = default_banner,
62 }, 62 },
63 63
64 .mapping = {
65 .pagetable_reserve = native_pagetable_reserve,
66 },
67
64 .paging = { 68 .paging = {
65 .pagetable_setup_start = native_pagetable_setup_start, 69 .pagetable_setup_start = native_pagetable_setup_start,
66 .pagetable_setup_done = native_pagetable_setup_done, 70 .pagetable_setup_done = native_pagetable_setup_done,
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 286d289b039b..37b8b0fe8320 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -81,6 +81,11 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
81 end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); 81 end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
82} 82}
83 83
84void __init native_pagetable_reserve(u64 start, u64 end)
85{
86 memblock_x86_reserve_range(start, end, "PGTABLE");
87}
88
84struct map_range { 89struct map_range {
85 unsigned long start; 90 unsigned long start;
86 unsigned long end; 91 unsigned long end;
@@ -272,9 +277,24 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
272 277
273 __flush_tlb_all(); 278 __flush_tlb_all();
274 279
280 /*
281 * Reserve the kernel pagetable pages we used (pgt_buf_start -
282 * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
283 * so that they can be reused for other purposes.
284 *
285 * On native it just means calling memblock_x86_reserve_range, on Xen it
286 * also means marking RW the pagetable pages that we allocated before
287 * but that haven't been used.
288 *
289 * In fact on xen we mark RO the whole range pgt_buf_start -
290 * pgt_buf_top, because we have to make sure that when
291 * init_memory_mapping reaches the pagetable pages area, it maps
292 * RO all the pagetable pages, including the ones that are beyond
293 * pgt_buf_end at that time.
294 */
275 if (!after_bootmem && pgt_buf_end > pgt_buf_start) 295 if (!after_bootmem && pgt_buf_end > pgt_buf_start)
276 memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT, 296 x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start),
277 pgt_buf_end << PAGE_SHIFT, "PGTABLE"); 297 PFN_PHYS(pgt_buf_end));
278 298
279 if (!after_bootmem) 299 if (!after_bootmem)
280 early_memtest(start, end); 300 early_memtest(start, end);
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 7cb6424317f6..c58e0ea39ef5 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -699,16 +699,17 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
699 struct mm_struct *mm, 699 struct mm_struct *mm,
700 unsigned long va, unsigned int cpu) 700 unsigned long va, unsigned int cpu)
701{ 701{
702 int tcpu;
703 int uvhub;
704 int locals = 0; 702 int locals = 0;
705 int remotes = 0; 703 int remotes = 0;
706 int hubs = 0; 704 int hubs = 0;
705 int tcpu;
706 int tpnode;
707 struct bau_desc *bau_desc; 707 struct bau_desc *bau_desc;
708 struct cpumask *flush_mask; 708 struct cpumask *flush_mask;
709 struct ptc_stats *stat; 709 struct ptc_stats *stat;
710 struct bau_control *bcp; 710 struct bau_control *bcp;
711 struct bau_control *tbcp; 711 struct bau_control *tbcp;
712 struct hub_and_pnode *hpp;
712 713
713 /* kernel was booted 'nobau' */ 714 /* kernel was booted 'nobau' */
714 if (nobau) 715 if (nobau)
@@ -750,11 +751,18 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
750 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; 751 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
751 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 752 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
752 753
753 /* cpu statistics */
754 for_each_cpu(tcpu, flush_mask) { 754 for_each_cpu(tcpu, flush_mask) {
755 uvhub = uv_cpu_to_blade_id(tcpu); 755 /*
756 bau_uvhub_set(uvhub, &bau_desc->distribution); 756 * The distribution vector is a bit map of pnodes, relative
757 if (uvhub == bcp->uvhub) 757 * to the partition base pnode (and the partition base nasid
758 * in the header).
759 * Translate cpu to pnode and hub using an array stored
760 * in local memory.
761 */
762 hpp = &bcp->socket_master->target_hub_and_pnode[tcpu];
763 tpnode = hpp->pnode - bcp->partition_base_pnode;
764 bau_uvhub_set(tpnode, &bau_desc->distribution);
765 if (hpp->uvhub == bcp->uvhub)
758 locals++; 766 locals++;
759 else 767 else
760 remotes++; 768 remotes++;
@@ -855,7 +863,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
855 * an interrupt, but causes an error message to be returned to 863 * an interrupt, but causes an error message to be returned to
856 * the sender. 864 * the sender.
857 */ 865 */
858static void uv_enable_timeouts(void) 866static void __init uv_enable_timeouts(void)
859{ 867{
860 int uvhub; 868 int uvhub;
861 int nuvhubs; 869 int nuvhubs;
@@ -1326,10 +1334,10 @@ static int __init uv_ptc_init(void)
1326} 1334}
1327 1335
1328/* 1336/*
1329 * initialize the sending side's sending buffers 1337 * Initialize the sending side's sending buffers.
1330 */ 1338 */
1331static void 1339static void
1332uv_activation_descriptor_init(int node, int pnode) 1340uv_activation_descriptor_init(int node, int pnode, int base_pnode)
1333{ 1341{
1334 int i; 1342 int i;
1335 int cpu; 1343 int cpu;
@@ -1352,11 +1360,11 @@ uv_activation_descriptor_init(int node, int pnode)
1352 n = pa >> uv_nshift; 1360 n = pa >> uv_nshift;
1353 m = pa & uv_mmask; 1361 m = pa & uv_mmask;
1354 1362
1363 /* the 14-bit pnode */
1355 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, 1364 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
1356 (n << UV_DESC_BASE_PNODE_SHIFT | m)); 1365 (n << UV_DESC_BASE_PNODE_SHIFT | m));
1357
1358 /* 1366 /*
1359 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each 1367 * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
1360 * cpu even though we only use the first one; one descriptor can 1368 * cpu even though we only use the first one; one descriptor can
1361 * describe a broadcast to 256 uv hubs. 1369 * describe a broadcast to 256 uv hubs.
1362 */ 1370 */
@@ -1365,12 +1373,13 @@ uv_activation_descriptor_init(int node, int pnode)
1365 memset(bd2, 0, sizeof(struct bau_desc)); 1373 memset(bd2, 0, sizeof(struct bau_desc));
1366 bd2->header.sw_ack_flag = 1; 1374 bd2->header.sw_ack_flag = 1;
1367 /* 1375 /*
1368 * base_dest_nodeid is the nasid of the first uvhub 1376 * The base_dest_nasid set in the message header is the nasid
1369 * in the partition. The bit map will indicate uvhub numbers, 1377 * of the first uvhub in the partition. The bit map will
1370 * which are 0-N in a partition. Pnodes are unique system-wide. 1378 * indicate destination pnode numbers relative to that base.
1379 * They may not be consecutive if nasid striding is being used.
1371 */ 1380 */
1372 bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); 1381 bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
1373 bd2->header.dest_subnodeid = 0x10; /* the LB */ 1382 bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
1374 bd2->header.command = UV_NET_ENDPOINT_INTD; 1383 bd2->header.command = UV_NET_ENDPOINT_INTD;
1375 bd2->header.int_both = 1; 1384 bd2->header.int_both = 1;
1376 /* 1385 /*
@@ -1442,7 +1451,7 @@ uv_payload_queue_init(int node, int pnode)
1442/* 1451/*
1443 * Initialization of each UV hub's structures 1452 * Initialization of each UV hub's structures
1444 */ 1453 */
1445static void __init uv_init_uvhub(int uvhub, int vector) 1454static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode)
1446{ 1455{
1447 int node; 1456 int node;
1448 int pnode; 1457 int pnode;
@@ -1450,11 +1459,11 @@ static void __init uv_init_uvhub(int uvhub, int vector)
1450 1459
1451 node = uvhub_to_first_node(uvhub); 1460 node = uvhub_to_first_node(uvhub);
1452 pnode = uv_blade_to_pnode(uvhub); 1461 pnode = uv_blade_to_pnode(uvhub);
1453 uv_activation_descriptor_init(node, pnode); 1462 uv_activation_descriptor_init(node, pnode, base_pnode);
1454 uv_payload_queue_init(node, pnode); 1463 uv_payload_queue_init(node, pnode);
1455 /* 1464 /*
1456 * the below initialization can't be in firmware because the 1465 * The below initialization can't be in firmware because the
1457 * messaging IRQ will be determined by the OS 1466 * messaging IRQ will be determined by the OS.
1458 */ 1467 */
1459 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; 1468 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
1460 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 1469 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
@@ -1491,10 +1500,11 @@ calculate_destination_timeout(void)
1491/* 1500/*
1492 * initialize the bau_control structure for each cpu 1501 * initialize the bau_control structure for each cpu
1493 */ 1502 */
1494static int __init uv_init_per_cpu(int nuvhubs) 1503static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode)
1495{ 1504{
1496 int i; 1505 int i;
1497 int cpu; 1506 int cpu;
1507 int tcpu;
1498 int pnode; 1508 int pnode;
1499 int uvhub; 1509 int uvhub;
1500 int have_hmaster; 1510 int have_hmaster;
@@ -1528,6 +1538,15 @@ static int __init uv_init_per_cpu(int nuvhubs)
1528 bcp = &per_cpu(bau_control, cpu); 1538 bcp = &per_cpu(bau_control, cpu);
1529 memset(bcp, 0, sizeof(struct bau_control)); 1539 memset(bcp, 0, sizeof(struct bau_control));
1530 pnode = uv_cpu_hub_info(cpu)->pnode; 1540 pnode = uv_cpu_hub_info(cpu)->pnode;
1541 if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) {
1542 printk(KERN_EMERG
1543 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
1544 cpu, pnode, base_part_pnode,
1545 UV_DISTRIBUTION_SIZE);
1546 return 1;
1547 }
1548 bcp->osnode = cpu_to_node(cpu);
1549 bcp->partition_base_pnode = uv_partition_base_pnode;
1531 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; 1550 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1532 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); 1551 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
1533 bdp = &uvhub_descs[uvhub]; 1552 bdp = &uvhub_descs[uvhub];
@@ -1536,7 +1555,7 @@ static int __init uv_init_per_cpu(int nuvhubs)
1536 bdp->pnode = pnode; 1555 bdp->pnode = pnode;
1537 /* kludge: 'assuming' one node per socket, and assuming that 1556 /* kludge: 'assuming' one node per socket, and assuming that
1538 disabling a socket just leaves a gap in node numbers */ 1557 disabling a socket just leaves a gap in node numbers */
1539 socket = (cpu_to_node(cpu) & 1); 1558 socket = bcp->osnode & 1;
1540 bdp->socket_mask |= (1 << socket); 1559 bdp->socket_mask |= (1 << socket);
1541 sdp = &bdp->socket[socket]; 1560 sdp = &bdp->socket[socket];
1542 sdp->cpu_number[sdp->num_cpus] = cpu; 1561 sdp->cpu_number[sdp->num_cpus] = cpu;
@@ -1585,6 +1604,20 @@ static int __init uv_init_per_cpu(int nuvhubs)
1585nextsocket: 1604nextsocket:
1586 socket++; 1605 socket++;
1587 socket_mask = (socket_mask >> 1); 1606 socket_mask = (socket_mask >> 1);
1607 /* each socket gets a local array of pnodes/hubs */
1608 bcp = smaster;
1609 bcp->target_hub_and_pnode = kmalloc_node(
1610 sizeof(struct hub_and_pnode) *
1611 num_possible_cpus(), GFP_KERNEL, bcp->osnode);
1612 memset(bcp->target_hub_and_pnode, 0,
1613 sizeof(struct hub_and_pnode) *
1614 num_possible_cpus());
1615 for_each_present_cpu(tcpu) {
1616 bcp->target_hub_and_pnode[tcpu].pnode =
1617 uv_cpu_hub_info(tcpu)->pnode;
1618 bcp->target_hub_and_pnode[tcpu].uvhub =
1619 uv_cpu_hub_info(tcpu)->numa_blade_id;
1620 }
1588 } 1621 }
1589 } 1622 }
1590 kfree(uvhub_descs); 1623 kfree(uvhub_descs);
@@ -1637,21 +1670,22 @@ static int __init uv_bau_init(void)
1637 spin_lock_init(&disable_lock); 1670 spin_lock_init(&disable_lock);
1638 congested_cycles = microsec_2_cycles(congested_response_us); 1671 congested_cycles = microsec_2_cycles(congested_response_us);
1639 1672
1640 if (uv_init_per_cpu(nuvhubs)) {
1641 nobau = 1;
1642 return 0;
1643 }
1644
1645 uv_partition_base_pnode = 0x7fffffff; 1673 uv_partition_base_pnode = 0x7fffffff;
1646 for (uvhub = 0; uvhub < nuvhubs; uvhub++) 1674 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1647 if (uv_blade_nr_possible_cpus(uvhub) && 1675 if (uv_blade_nr_possible_cpus(uvhub) &&
1648 (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) 1676 (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
1649 uv_partition_base_pnode = uv_blade_to_pnode(uvhub); 1677 uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
1678 }
1679
1680 if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) {
1681 nobau = 1;
1682 return 0;
1683 }
1650 1684
1651 vector = UV_BAU_MESSAGE; 1685 vector = UV_BAU_MESSAGE;
1652 for_each_possible_blade(uvhub) 1686 for_each_possible_blade(uvhub)
1653 if (uv_blade_nr_possible_cpus(uvhub)) 1687 if (uv_blade_nr_possible_cpus(uvhub))
1654 uv_init_uvhub(uvhub, vector); 1688 uv_init_uvhub(uvhub, vector, uv_partition_base_pnode);
1655 1689
1656 uv_enable_timeouts(); 1690 uv_enable_timeouts();
1657 alloc_intr_gate(vector, uv_bau_message_intr1); 1691 alloc_intr_gate(vector, uv_bau_message_intr1);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 55c965b38c27..0684f3c74d53 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1275,6 +1275,20 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
1275{ 1275{
1276} 1276}
1277 1277
1278static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
1279{
1280 /* reserve the range used */
1281 native_pagetable_reserve(start, end);
1282
1283 /* set as RW the rest */
1284 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
1285 PFN_PHYS(pgt_buf_top));
1286 while (end < PFN_PHYS(pgt_buf_top)) {
1287 make_lowmem_page_readwrite(__va(end));
1288 end += PAGE_SIZE;
1289 }
1290}
1291
1278static void xen_post_allocator_init(void); 1292static void xen_post_allocator_init(void);
1279 1293
1280static __init void xen_pagetable_setup_done(pgd_t *base) 1294static __init void xen_pagetable_setup_done(pgd_t *base)
@@ -1463,119 +1477,6 @@ static int xen_pgd_alloc(struct mm_struct *mm)
1463 return ret; 1477 return ret;
1464} 1478}
1465 1479
1466#ifdef CONFIG_X86_64
1467static __initdata u64 __last_pgt_set_rw = 0;
1468static __initdata u64 __pgt_buf_start = 0;
1469static __initdata u64 __pgt_buf_end = 0;
1470static __initdata u64 __pgt_buf_top = 0;
1471/*
1472 * As a consequence of the commit:
1473 *
1474 * commit 4b239f458c229de044d6905c2b0f9fe16ed9e01e
1475 * Author: Yinghai Lu <yinghai@kernel.org>
1476 * Date: Fri Dec 17 16:58:28 2010 -0800
1477 *
1478 * x86-64, mm: Put early page table high
1479 *
1480 * at some point init_memory_mapping is going to reach the pagetable pages
1481 * area and map those pages too (mapping them as normal memory that falls
1482 * in the range of addresses passed to init_memory_mapping as argument).
1483 * Some of those pages are already pagetable pages (they are in the range
1484 * pgt_buf_start-pgt_buf_end) therefore they are going to be mapped RO and
1485 * everything is fine.
1486 * Some of these pages are not pagetable pages yet (they fall in the range
1487 * pgt_buf_end-pgt_buf_top; for example the page at pgt_buf_end) so they
1488 * are going to be mapped RW. When these pages become pagetable pages and
1489 * are hooked into the pagetable, xen will find that the guest has already
1490 * a RW mapping of them somewhere and fail the operation.
1491 * The reason Xen requires pagetables to be RO is that the hypervisor needs
1492 * to verify that the pagetables are valid before using them. The validation
1493 * operations are called "pinning".
1494 *
1495 * In order to fix the issue we mark all the pages in the entire range
1496 * pgt_buf_start-pgt_buf_top as RO, however when the pagetable allocation
1497 * is completed only the range pgt_buf_start-pgt_buf_end is reserved by
1498 * init_memory_mapping. Hence the kernel is going to crash as soon as one
1499 * of the pages in the range pgt_buf_end-pgt_buf_top is reused (b/c those
1500 * ranges are RO).
1501 *
1502 * For this reason, 'mark_rw_past_pgt' is introduced which is called _after_
1503 * the init_memory_mapping has completed (in a perfect world we would
1504 * call this function from init_memory_mapping, but lets ignore that).
1505 *
1506 * Because we are called _after_ init_memory_mapping the pgt_buf_[start,
1507 * end,top] have all changed to new values (b/c init_memory_mapping
1508 * is called and setting up another new page-table). Hence, the first time
1509 * we enter this function, we save away the pgt_buf_start value and update
1510 * the pgt_buf_[end,top].
1511 *
1512 * When we detect that the "old" pgt_buf_start through pgt_buf_end
1513 * PFNs have been reserved (so memblock_x86_reserve_range has been called),
1514 * we immediately set out to RW the "old" pgt_buf_end through pgt_buf_top.
1515 *
1516 * And then we update those "old" pgt_buf_[end|top] with the new ones
1517 * so that we can redo this on the next pagetable.
1518 */
1519static __init void mark_rw_past_pgt(void) {
1520
1521 if (pgt_buf_end > pgt_buf_start) {
1522 u64 addr, size;
1523
1524 /* Save it away. */
1525 if (!__pgt_buf_start) {
1526 __pgt_buf_start = pgt_buf_start;
1527 __pgt_buf_end = pgt_buf_end;
1528 __pgt_buf_top = pgt_buf_top;
1529 return;
1530 }
1531 /* If we get the range that starts at __pgt_buf_end that means
1532 * the range is reserved, and that in 'init_memory_mapping'
1533 * the 'memblock_x86_reserve_range' has been called with the
1534 * outdated __pgt_buf_start, __pgt_buf_end (the "new"
1535 * pgt_buf_[start|end|top] refer now to a new pagetable.
1536 * Note: we are called _after_ the pgt_buf_[..] have been
1537 * updated.*/
1538
1539 addr = memblock_x86_find_in_range_size(PFN_PHYS(__pgt_buf_start),
1540 &size, PAGE_SIZE);
1541
1542 /* Still not reserved, meaning 'memblock_x86_reserve_range'
1543 * hasn't been called yet. Update the _end and _top.*/
1544 if (addr == PFN_PHYS(__pgt_buf_start)) {
1545 __pgt_buf_end = pgt_buf_end;
1546 __pgt_buf_top = pgt_buf_top;
1547 return;
1548 }
1549
1550 /* OK, the area is reserved, meaning it is time for us to
1551 * set RW for the old end->top PFNs. */
1552
1553 /* ..unless we had already done this. */
1554 if (__pgt_buf_end == __last_pgt_set_rw)
1555 return;
1556
1557 addr = PFN_PHYS(__pgt_buf_end);
1558
1559 /* set as RW the rest */
1560 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n",
1561 PFN_PHYS(__pgt_buf_end), PFN_PHYS(__pgt_buf_top));
1562
1563 while (addr < PFN_PHYS(__pgt_buf_top)) {
1564 make_lowmem_page_readwrite(__va(addr));
1565 addr += PAGE_SIZE;
1566 }
1567 /* And update everything so that we are ready for the next
1568 * pagetable (the one created for regions past 4GB) */
1569 __last_pgt_set_rw = __pgt_buf_end;
1570 __pgt_buf_start = pgt_buf_start;
1571 __pgt_buf_end = pgt_buf_end;
1572 __pgt_buf_top = pgt_buf_top;
1573 }
1574 return;
1575}
1576#else
1577static __init void mark_rw_past_pgt(void) { }
1578#endif
1579static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) 1480static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1580{ 1481{
1581#ifdef CONFIG_X86_64 1482#ifdef CONFIG_X86_64
@@ -1602,14 +1503,6 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1602 unsigned long pfn = pte_pfn(pte); 1503 unsigned long pfn = pte_pfn(pte);
1603 1504
1604 /* 1505 /*
1605 * A bit of optimization. We do not need to call the workaround
1606 * when xen_set_pte_init is called with a PTE with 0 as PFN.
1607 * That is b/c the pagetable at that point are just being populated
1608 * with empty values and we can save some cycles by not calling
1609 * the 'memblock' code.*/
1610 if (pfn)
1611 mark_rw_past_pgt();
1612 /*
1613 * If the new pfn is within the range of the newly allocated 1506 * If the new pfn is within the range of the newly allocated
1614 * kernel pagetable, and it isn't being mapped into an 1507 * kernel pagetable, and it isn't being mapped into an
1615 * early_ioremap fixmap slot as a freshly allocated page, make sure 1508 * early_ioremap fixmap slot as a freshly allocated page, make sure
@@ -2118,8 +2011,6 @@ __init void xen_ident_map_ISA(void)
2118 2011
2119static __init void xen_post_allocator_init(void) 2012static __init void xen_post_allocator_init(void)
2120{ 2013{
2121 mark_rw_past_pgt();
2122
2123#ifdef CONFIG_XEN_DEBUG 2014#ifdef CONFIG_XEN_DEBUG
2124 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); 2015 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
2125#endif 2016#endif
@@ -2228,6 +2119,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
2228 2119
2229void __init xen_init_mmu_ops(void) 2120void __init xen_init_mmu_ops(void)
2230{ 2121{
2122 x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
2231 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; 2123 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2232 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; 2124 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2233 pv_mmu_ops = xen_mmu_ops; 2125 pv_mmu_ops = xen_mmu_ops;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index f0605ab2a761..471fdcc5df85 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -114,6 +114,13 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
114} 114}
115EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); 115EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
116 116
117struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
118{
119 return container_of(task_subsys_state(tsk, blkio_subsys_id),
120 struct blkio_cgroup, css);
121}
122EXPORT_SYMBOL_GPL(task_blkio_cgroup);
123
117static inline void 124static inline void
118blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) 125blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
119{ 126{
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 10919fae2d3a..c774930cc206 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -291,6 +291,7 @@ static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
291#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) 291#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
292extern struct blkio_cgroup blkio_root_cgroup; 292extern struct blkio_cgroup blkio_root_cgroup;
293extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); 293extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
294extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
294extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 295extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
295 struct blkio_group *blkg, void *key, dev_t dev, 296 struct blkio_group *blkg, void *key, dev_t dev,
296 enum blkio_policy_id plid); 297 enum blkio_policy_id plid);
@@ -314,6 +315,8 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
314struct cgroup; 315struct cgroup;
315static inline struct blkio_cgroup * 316static inline struct blkio_cgroup *
316cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } 317cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
318static inline struct blkio_cgroup *
319task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
317 320
318static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 321static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
319 struct blkio_group *blkg, void *key, dev_t dev, 322 struct blkio_group *blkg, void *key, dev_t dev,
diff --git a/block/blk-core.c b/block/blk-core.c
index a2e58eeb3549..3fe00a14822a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -316,8 +316,10 @@ EXPORT_SYMBOL(__blk_run_queue);
316 */ 316 */
317void blk_run_queue_async(struct request_queue *q) 317void blk_run_queue_async(struct request_queue *q)
318{ 318{
319 if (likely(!blk_queue_stopped(q))) 319 if (likely(!blk_queue_stopped(q))) {
320 __cancel_delayed_work(&q->delay_work);
320 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 321 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
322 }
321} 323}
322EXPORT_SYMBOL(blk_run_queue_async); 324EXPORT_SYMBOL(blk_run_queue_async);
323 325
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 0475a22a420d..252a81a306f7 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -160,9 +160,8 @@ static void throtl_put_tg(struct throtl_grp *tg)
160} 160}
161 161
162static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, 162static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
163 struct cgroup *cgroup) 163 struct blkio_cgroup *blkcg)
164{ 164{
165 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
166 struct throtl_grp *tg = NULL; 165 struct throtl_grp *tg = NULL;
167 void *key = td; 166 void *key = td;
168 struct backing_dev_info *bdi = &td->queue->backing_dev_info; 167 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
@@ -229,12 +228,12 @@ done:
229 228
230static struct throtl_grp * throtl_get_tg(struct throtl_data *td) 229static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
231{ 230{
232 struct cgroup *cgroup;
233 struct throtl_grp *tg = NULL; 231 struct throtl_grp *tg = NULL;
232 struct blkio_cgroup *blkcg;
234 233
235 rcu_read_lock(); 234 rcu_read_lock();
236 cgroup = task_cgroup(current, blkio_subsys_id); 235 blkcg = task_blkio_cgroup(current);
237 tg = throtl_find_alloc_tg(td, cgroup); 236 tg = throtl_find_alloc_tg(td, blkcg);
238 if (!tg) 237 if (!tg)
239 tg = &td->root_tg; 238 tg = &td->root_tg;
240 rcu_read_unlock(); 239 rcu_read_unlock();
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5b52011e3a40..ab7a9e6a9b1c 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1014,10 +1014,9 @@ void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
1014 cfqg->needs_update = true; 1014 cfqg->needs_update = true;
1015} 1015}
1016 1016
1017static struct cfq_group * 1017static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd,
1018cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) 1018 struct blkio_cgroup *blkcg, int create)
1019{ 1019{
1020 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1021 struct cfq_group *cfqg = NULL; 1020 struct cfq_group *cfqg = NULL;
1022 void *key = cfqd; 1021 void *key = cfqd;
1023 int i, j; 1022 int i, j;
@@ -1079,12 +1078,12 @@ done:
1079 */ 1078 */
1080static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) 1079static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1081{ 1080{
1082 struct cgroup *cgroup; 1081 struct blkio_cgroup *blkcg;
1083 struct cfq_group *cfqg = NULL; 1082 struct cfq_group *cfqg = NULL;
1084 1083
1085 rcu_read_lock(); 1084 rcu_read_lock();
1086 cgroup = task_cgroup(current, blkio_subsys_id); 1085 blkcg = task_blkio_cgroup(current);
1087 cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create); 1086 cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create);
1088 if (!cfqg && create) 1087 if (!cfqg && create)
1089 cfqg = &cfqd->root_group; 1088 cfqg = &cfqd->root_group;
1090 rcu_read_unlock(); 1089 rcu_read_unlock();
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index ff9d832a163d..d38c40fe4ddb 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -561,27 +561,6 @@ void ahci_start_engine(struct ata_port *ap)
561{ 561{
562 void __iomem *port_mmio = ahci_port_base(ap); 562 void __iomem *port_mmio = ahci_port_base(ap);
563 u32 tmp; 563 u32 tmp;
564 u8 status;
565
566 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
567
568 /*
569 * At end of section 10.1 of AHCI spec (rev 1.3), it states
570 * Software shall not set PxCMD.ST to 1 until it is determined
571 * that a functoinal device is present on the port as determined by
572 * PxTFD.STS.BSY=0, PxTFD.STS.DRQ=0 and PxSSTS.DET=3h
573 *
574 * Even though most AHCI host controllers work without this check,
575 * specific controller will fail under this condition
576 */
577 if (status & (ATA_BUSY | ATA_DRQ))
578 return;
579 else {
580 ahci_scr_read(&ap->link, SCR_STATUS, &tmp);
581
582 if ((tmp & 0xf) != 0x3)
583 return;
584 }
585 564
586 /* start DMA */ 565 /* start DMA */
587 tmp = readl(port_mmio + PORT_CMD); 566 tmp = readl(port_mmio + PORT_CMD);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index f26f2fe3480a..dad9fd660f37 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3316,7 +3316,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3316 struct ata_eh_context *ehc = &link->eh_context; 3316 struct ata_eh_context *ehc = &link->eh_context;
3317 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 3317 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3318 enum ata_lpm_policy old_policy = link->lpm_policy; 3318 enum ata_lpm_policy old_policy = link->lpm_policy;
3319 bool no_dipm = ap->flags & ATA_FLAG_NO_DIPM; 3319 bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
3320 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 3320 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3321 unsigned int err_mask; 3321 unsigned int err_mask;
3322 int rc; 3322 int rc;
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index bdd2719f3f68..bc9e702186dd 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2643,16 +2643,19 @@ fore200e_init(struct fore200e* fore200e, struct device *parent)
2643} 2643}
2644 2644
2645#ifdef CONFIG_SBUS 2645#ifdef CONFIG_SBUS
2646static const struct of_device_id fore200e_sba_match[];
2646static int __devinit fore200e_sba_probe(struct platform_device *op) 2647static int __devinit fore200e_sba_probe(struct platform_device *op)
2647{ 2648{
2649 const struct of_device_id *match;
2648 const struct fore200e_bus *bus; 2650 const struct fore200e_bus *bus;
2649 struct fore200e *fore200e; 2651 struct fore200e *fore200e;
2650 static int index = 0; 2652 static int index = 0;
2651 int err; 2653 int err;
2652 2654
2653 if (!op->dev.of_match) 2655 match = of_match_device(fore200e_sba_match, &op->dev);
2656 if (!match)
2654 return -EINVAL; 2657 return -EINVAL;
2655 bus = op->dev.of_match->data; 2658 bus = match->data;
2656 2659
2657 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); 2660 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2658 if (!fore200e) 2661 if (!fore200e)
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 8066d086578a..e086fbbbe853 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -2547,7 +2547,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
2547 disk->major = MajorNumber; 2547 disk->major = MajorNumber;
2548 disk->first_minor = n << DAC960_MaxPartitionsBits; 2548 disk->first_minor = n << DAC960_MaxPartitionsBits;
2549 disk->fops = &DAC960_BlockDeviceOperations; 2549 disk->fops = &DAC960_BlockDeviceOperations;
2550 disk->events = DISK_EVENT_MEDIA_CHANGE;
2551 } 2550 }
2552 /* 2551 /*
2553 Indicate the Block Device Registration completed successfully, 2552 Indicate the Block Device Registration completed successfully,
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 456c0cc90dcf..8eba86bba599 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1736,7 +1736,6 @@ static int __init fd_probe_drives(void)
1736 disk->major = FLOPPY_MAJOR; 1736 disk->major = FLOPPY_MAJOR;
1737 disk->first_minor = drive; 1737 disk->first_minor = drive;
1738 disk->fops = &floppy_fops; 1738 disk->fops = &floppy_fops;
1739 disk->events = DISK_EVENT_MEDIA_CHANGE;
1740 sprintf(disk->disk_name, "fd%d", drive); 1739 sprintf(disk->disk_name, "fd%d", drive);
1741 disk->private_data = &unit[drive]; 1740 disk->private_data = &unit[drive];
1742 set_capacity(disk, 880*2); 1741 set_capacity(disk, 880*2);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index c871eae14120..ede16c64ff07 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1964,7 +1964,6 @@ static int __init atari_floppy_init (void)
1964 unit[i].disk->first_minor = i; 1964 unit[i].disk->first_minor = i;
1965 sprintf(unit[i].disk->disk_name, "fd%d", i); 1965 sprintf(unit[i].disk->disk_name, "fd%d", i);
1966 unit[i].disk->fops = &floppy_fops; 1966 unit[i].disk->fops = &floppy_fops;
1967 unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE;
1968 unit[i].disk->private_data = &unit[i]; 1967 unit[i].disk->private_data = &unit[i];
1969 unit[i].disk->queue = blk_init_queue(do_fd_request, 1968 unit[i].disk->queue = blk_init_queue(do_fd_request,
1970 &ataflop_lock); 1969 &ataflop_lock);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 301d7a9a41a6..db8f88586c8d 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4205,7 +4205,6 @@ static int __init floppy_init(void)
4205 disks[dr]->major = FLOPPY_MAJOR; 4205 disks[dr]->major = FLOPPY_MAJOR;
4206 disks[dr]->first_minor = TOMINOR(dr); 4206 disks[dr]->first_minor = TOMINOR(dr);
4207 disks[dr]->fops = &floppy_fops; 4207 disks[dr]->fops = &floppy_fops;
4208 disks[dr]->events = DISK_EVENT_MEDIA_CHANGE;
4209 sprintf(disks[dr]->disk_name, "fd%d", dr); 4208 sprintf(disks[dr]->disk_name, "fd%d", dr);
4210 4209
4211 init_timer(&motor_off_timer[dr]); 4210 init_timer(&motor_off_timer[dr]);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 2f2ccf686251..8690e31d9932 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -320,7 +320,6 @@ static void pcd_init_units(void)
320 disk->first_minor = unit; 320 disk->first_minor = unit;
321 strcpy(disk->disk_name, cd->name); /* umm... */ 321 strcpy(disk->disk_name, cd->name); /* umm... */
322 disk->fops = &pcd_bdops; 322 disk->fops = &pcd_bdops;
323 disk->events = DISK_EVENT_MEDIA_CHANGE;
324 } 323 }
325} 324}
326 325
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 21dfdb776869..869e7676d46f 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -837,7 +837,6 @@ static void pd_probe_drive(struct pd_unit *disk)
837 p->fops = &pd_fops; 837 p->fops = &pd_fops;
838 p->major = major; 838 p->major = major;
839 p->first_minor = (disk - pd) << PD_BITS; 839 p->first_minor = (disk - pd) << PD_BITS;
840 p->events = DISK_EVENT_MEDIA_CHANGE;
841 disk->gd = p; 840 disk->gd = p;
842 p->private_data = disk; 841 p->private_data = disk;
843 p->queue = pd_queue; 842 p->queue = pd_queue;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 7adeb1edbf43..f21b520ef419 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -294,7 +294,6 @@ static void __init pf_init_units(void)
294 disk->first_minor = unit; 294 disk->first_minor = unit;
295 strcpy(disk->disk_name, pf->name); 295 strcpy(disk->disk_name, pf->name);
296 disk->fops = &pf_fops; 296 disk->fops = &pf_fops;
297 disk->events = DISK_EVENT_MEDIA_CHANGE;
298 if (!(*drives[unit])[D_PRT]) 297 if (!(*drives[unit])[D_PRT])
299 pf_drive_count++; 298 pf_drive_count++;
300 } 299 }
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3e904717c1c0..9712fad82bc6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -92,6 +92,8 @@ struct rbd_client {
92 struct list_head node; 92 struct list_head node;
93}; 93};
94 94
95struct rbd_req_coll;
96
95/* 97/*
96 * a single io request 98 * a single io request
97 */ 99 */
@@ -100,6 +102,24 @@ struct rbd_request {
100 struct bio *bio; /* cloned bio */ 102 struct bio *bio; /* cloned bio */
101 struct page **pages; /* list of used pages */ 103 struct page **pages; /* list of used pages */
102 u64 len; 104 u64 len;
105 int coll_index;
106 struct rbd_req_coll *coll;
107};
108
109struct rbd_req_status {
110 int done;
111 int rc;
112 u64 bytes;
113};
114
115/*
116 * a collection of requests
117 */
118struct rbd_req_coll {
119 int total;
120 int num_done;
121 struct kref kref;
122 struct rbd_req_status status[0];
103}; 123};
104 124
105struct rbd_snap { 125struct rbd_snap {
@@ -416,6 +436,17 @@ static void rbd_put_client(struct rbd_device *rbd_dev)
416 rbd_dev->client = NULL; 436 rbd_dev->client = NULL;
417} 437}
418 438
439/*
440 * Destroy requests collection
441 */
442static void rbd_coll_release(struct kref *kref)
443{
444 struct rbd_req_coll *coll =
445 container_of(kref, struct rbd_req_coll, kref);
446
447 dout("rbd_coll_release %p\n", coll);
448 kfree(coll);
449}
419 450
420/* 451/*
421 * Create a new header structure, translate header format from the on-disk 452 * Create a new header structure, translate header format from the on-disk
@@ -590,6 +621,14 @@ static u64 rbd_get_segment(struct rbd_image_header *header,
590 return len; 621 return len;
591} 622}
592 623
624static int rbd_get_num_segments(struct rbd_image_header *header,
625 u64 ofs, u64 len)
626{
627 u64 start_seg = ofs >> header->obj_order;
628 u64 end_seg = (ofs + len - 1) >> header->obj_order;
629 return end_seg - start_seg + 1;
630}
631
593/* 632/*
594 * bio helpers 633 * bio helpers
595 */ 634 */
@@ -735,6 +774,50 @@ static void rbd_destroy_ops(struct ceph_osd_req_op *ops)
735 kfree(ops); 774 kfree(ops);
736} 775}
737 776
777static void rbd_coll_end_req_index(struct request *rq,
778 struct rbd_req_coll *coll,
779 int index,
780 int ret, u64 len)
781{
782 struct request_queue *q;
783 int min, max, i;
784
785 dout("rbd_coll_end_req_index %p index %d ret %d len %lld\n",
786 coll, index, ret, len);
787
788 if (!rq)
789 return;
790
791 if (!coll) {
792 blk_end_request(rq, ret, len);
793 return;
794 }
795
796 q = rq->q;
797
798 spin_lock_irq(q->queue_lock);
799 coll->status[index].done = 1;
800 coll->status[index].rc = ret;
801 coll->status[index].bytes = len;
802 max = min = coll->num_done;
803 while (max < coll->total && coll->status[max].done)
804 max++;
805
806 for (i = min; i<max; i++) {
807 __blk_end_request(rq, coll->status[i].rc,
808 coll->status[i].bytes);
809 coll->num_done++;
810 kref_put(&coll->kref, rbd_coll_release);
811 }
812 spin_unlock_irq(q->queue_lock);
813}
814
815static void rbd_coll_end_req(struct rbd_request *req,
816 int ret, u64 len)
817{
818 rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len);
819}
820
738/* 821/*
739 * Send ceph osd request 822 * Send ceph osd request
740 */ 823 */
@@ -749,6 +832,8 @@ static int rbd_do_request(struct request *rq,
749 int flags, 832 int flags,
750 struct ceph_osd_req_op *ops, 833 struct ceph_osd_req_op *ops,
751 int num_reply, 834 int num_reply,
835 struct rbd_req_coll *coll,
836 int coll_index,
752 void (*rbd_cb)(struct ceph_osd_request *req, 837 void (*rbd_cb)(struct ceph_osd_request *req,
753 struct ceph_msg *msg), 838 struct ceph_msg *msg),
754 struct ceph_osd_request **linger_req, 839 struct ceph_osd_request **linger_req,
@@ -763,12 +848,20 @@ static int rbd_do_request(struct request *rq,
763 struct ceph_osd_request_head *reqhead; 848 struct ceph_osd_request_head *reqhead;
764 struct rbd_image_header *header = &dev->header; 849 struct rbd_image_header *header = &dev->header;
765 850
766 ret = -ENOMEM;
767 req_data = kzalloc(sizeof(*req_data), GFP_NOIO); 851 req_data = kzalloc(sizeof(*req_data), GFP_NOIO);
768 if (!req_data) 852 if (!req_data) {
769 goto done; 853 if (coll)
854 rbd_coll_end_req_index(rq, coll, coll_index,
855 -ENOMEM, len);
856 return -ENOMEM;
857 }
770 858
771 dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs); 859 if (coll) {
860 req_data->coll = coll;
861 req_data->coll_index = coll_index;
862 }
863
864 dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs);
772 865
773 down_read(&header->snap_rwsem); 866 down_read(&header->snap_rwsem);
774 867
@@ -828,7 +921,8 @@ static int rbd_do_request(struct request *rq,
828 ret = ceph_osdc_wait_request(&dev->client->osdc, req); 921 ret = ceph_osdc_wait_request(&dev->client->osdc, req);
829 if (ver) 922 if (ver)
830 *ver = le64_to_cpu(req->r_reassert_version.version); 923 *ver = le64_to_cpu(req->r_reassert_version.version);
831 dout("reassert_ver=%lld\n", le64_to_cpu(req->r_reassert_version.version)); 924 dout("reassert_ver=%lld\n",
925 le64_to_cpu(req->r_reassert_version.version));
832 ceph_osdc_put_request(req); 926 ceph_osdc_put_request(req);
833 } 927 }
834 return ret; 928 return ret;
@@ -837,10 +931,8 @@ done_err:
837 bio_chain_put(req_data->bio); 931 bio_chain_put(req_data->bio);
838 ceph_osdc_put_request(req); 932 ceph_osdc_put_request(req);
839done_pages: 933done_pages:
934 rbd_coll_end_req(req_data, ret, len);
840 kfree(req_data); 935 kfree(req_data);
841done:
842 if (rq)
843 blk_end_request(rq, ret, len);
844 return ret; 936 return ret;
845} 937}
846 938
@@ -874,7 +966,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
874 bytes = req_data->len; 966 bytes = req_data->len;
875 } 967 }
876 968
877 blk_end_request(req_data->rq, rc, bytes); 969 rbd_coll_end_req(req_data, rc, bytes);
878 970
879 if (req_data->bio) 971 if (req_data->bio)
880 bio_chain_put(req_data->bio); 972 bio_chain_put(req_data->bio);
@@ -934,6 +1026,7 @@ static int rbd_req_sync_op(struct rbd_device *dev,
934 flags, 1026 flags,
935 ops, 1027 ops,
936 2, 1028 2,
1029 NULL, 0,
937 NULL, 1030 NULL,
938 linger_req, ver); 1031 linger_req, ver);
939 if (ret < 0) 1032 if (ret < 0)
@@ -959,7 +1052,9 @@ static int rbd_do_op(struct request *rq,
959 u64 snapid, 1052 u64 snapid,
960 int opcode, int flags, int num_reply, 1053 int opcode, int flags, int num_reply,
961 u64 ofs, u64 len, 1054 u64 ofs, u64 len,
962 struct bio *bio) 1055 struct bio *bio,
1056 struct rbd_req_coll *coll,
1057 int coll_index)
963{ 1058{
964 char *seg_name; 1059 char *seg_name;
965 u64 seg_ofs; 1060 u64 seg_ofs;
@@ -995,7 +1090,10 @@ static int rbd_do_op(struct request *rq,
995 flags, 1090 flags,
996 ops, 1091 ops,
997 num_reply, 1092 num_reply,
1093 coll, coll_index,
998 rbd_req_cb, 0, NULL); 1094 rbd_req_cb, 0, NULL);
1095
1096 rbd_destroy_ops(ops);
999done: 1097done:
1000 kfree(seg_name); 1098 kfree(seg_name);
1001 return ret; 1099 return ret;
@@ -1008,13 +1106,15 @@ static int rbd_req_write(struct request *rq,
1008 struct rbd_device *rbd_dev, 1106 struct rbd_device *rbd_dev,
1009 struct ceph_snap_context *snapc, 1107 struct ceph_snap_context *snapc,
1010 u64 ofs, u64 len, 1108 u64 ofs, u64 len,
1011 struct bio *bio) 1109 struct bio *bio,
1110 struct rbd_req_coll *coll,
1111 int coll_index)
1012{ 1112{
1013 return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP, 1113 return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP,
1014 CEPH_OSD_OP_WRITE, 1114 CEPH_OSD_OP_WRITE,
1015 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 1115 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1016 2, 1116 2,
1017 ofs, len, bio); 1117 ofs, len, bio, coll, coll_index);
1018} 1118}
1019 1119
1020/* 1120/*
@@ -1024,14 +1124,16 @@ static int rbd_req_read(struct request *rq,
1024 struct rbd_device *rbd_dev, 1124 struct rbd_device *rbd_dev,
1025 u64 snapid, 1125 u64 snapid,
1026 u64 ofs, u64 len, 1126 u64 ofs, u64 len,
1027 struct bio *bio) 1127 struct bio *bio,
1128 struct rbd_req_coll *coll,
1129 int coll_index)
1028{ 1130{
1029 return rbd_do_op(rq, rbd_dev, NULL, 1131 return rbd_do_op(rq, rbd_dev, NULL,
1030 (snapid ? snapid : CEPH_NOSNAP), 1132 (snapid ? snapid : CEPH_NOSNAP),
1031 CEPH_OSD_OP_READ, 1133 CEPH_OSD_OP_READ,
1032 CEPH_OSD_FLAG_READ, 1134 CEPH_OSD_FLAG_READ,
1033 2, 1135 2,
1034 ofs, len, bio); 1136 ofs, len, bio, coll, coll_index);
1035} 1137}
1036 1138
1037/* 1139/*
@@ -1063,7 +1165,9 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev,
1063{ 1165{
1064 struct ceph_osd_req_op *ops; 1166 struct ceph_osd_req_op *ops;
1065 struct page **pages = NULL; 1167 struct page **pages = NULL;
1066 int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); 1168 int ret;
1169
1170 ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0);
1067 if (ret < 0) 1171 if (ret < 0)
1068 return ret; 1172 return ret;
1069 1173
@@ -1077,6 +1181,7 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev,
1077 CEPH_OSD_FLAG_READ, 1181 CEPH_OSD_FLAG_READ,
1078 ops, 1182 ops,
1079 1, 1183 1,
1184 NULL, 0,
1080 rbd_simple_req_cb, 0, NULL); 1185 rbd_simple_req_cb, 0, NULL);
1081 1186
1082 rbd_destroy_ops(ops); 1187 rbd_destroy_ops(ops);
@@ -1274,6 +1379,20 @@ static int rbd_req_sync_exec(struct rbd_device *dev,
1274 return ret; 1379 return ret;
1275} 1380}
1276 1381
1382static struct rbd_req_coll *rbd_alloc_coll(int num_reqs)
1383{
1384 struct rbd_req_coll *coll =
1385 kzalloc(sizeof(struct rbd_req_coll) +
1386 sizeof(struct rbd_req_status) * num_reqs,
1387 GFP_ATOMIC);
1388
1389 if (!coll)
1390 return NULL;
1391 coll->total = num_reqs;
1392 kref_init(&coll->kref);
1393 return coll;
1394}
1395
1277/* 1396/*
1278 * block device queue callback 1397 * block device queue callback
1279 */ 1398 */
@@ -1291,6 +1410,8 @@ static void rbd_rq_fn(struct request_queue *q)
1291 bool do_write; 1410 bool do_write;
1292 int size, op_size = 0; 1411 int size, op_size = 0;
1293 u64 ofs; 1412 u64 ofs;
1413 int num_segs, cur_seg = 0;
1414 struct rbd_req_coll *coll;
1294 1415
1295 /* peek at request from block layer */ 1416 /* peek at request from block layer */
1296 if (!rq) 1417 if (!rq)
@@ -1321,6 +1442,14 @@ static void rbd_rq_fn(struct request_queue *q)
1321 do_write ? "write" : "read", 1442 do_write ? "write" : "read",
1322 size, blk_rq_pos(rq) * 512ULL); 1443 size, blk_rq_pos(rq) * 512ULL);
1323 1444
1445 num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
1446 coll = rbd_alloc_coll(num_segs);
1447 if (!coll) {
1448 spin_lock_irq(q->queue_lock);
1449 __blk_end_request_all(rq, -ENOMEM);
1450 goto next;
1451 }
1452
1324 do { 1453 do {
1325 /* a bio clone to be passed down to OSD req */ 1454 /* a bio clone to be passed down to OSD req */
1326 dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt); 1455 dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt);
@@ -1328,35 +1457,41 @@ static void rbd_rq_fn(struct request_queue *q)
1328 rbd_dev->header.block_name, 1457 rbd_dev->header.block_name,
1329 ofs, size, 1458 ofs, size,
1330 NULL, NULL); 1459 NULL, NULL);
1460 kref_get(&coll->kref);
1331 bio = bio_chain_clone(&rq_bio, &next_bio, &bp, 1461 bio = bio_chain_clone(&rq_bio, &next_bio, &bp,
1332 op_size, GFP_ATOMIC); 1462 op_size, GFP_ATOMIC);
1333 if (!bio) { 1463 if (!bio) {
1334 spin_lock_irq(q->queue_lock); 1464 rbd_coll_end_req_index(rq, coll, cur_seg,
1335 __blk_end_request_all(rq, -ENOMEM); 1465 -ENOMEM, op_size);
1336 goto next; 1466 goto next_seg;
1337 } 1467 }
1338 1468
1469
1339 /* init OSD command: write or read */ 1470 /* init OSD command: write or read */
1340 if (do_write) 1471 if (do_write)
1341 rbd_req_write(rq, rbd_dev, 1472 rbd_req_write(rq, rbd_dev,
1342 rbd_dev->header.snapc, 1473 rbd_dev->header.snapc,
1343 ofs, 1474 ofs,
1344 op_size, bio); 1475 op_size, bio,
1476 coll, cur_seg);
1345 else 1477 else
1346 rbd_req_read(rq, rbd_dev, 1478 rbd_req_read(rq, rbd_dev,
1347 cur_snap_id(rbd_dev), 1479 cur_snap_id(rbd_dev),
1348 ofs, 1480 ofs,
1349 op_size, bio); 1481 op_size, bio,
1482 coll, cur_seg);
1350 1483
1484next_seg:
1351 size -= op_size; 1485 size -= op_size;
1352 ofs += op_size; 1486 ofs += op_size;
1353 1487
1488 cur_seg++;
1354 rq_bio = next_bio; 1489 rq_bio = next_bio;
1355 } while (size > 0); 1490 } while (size > 0);
1491 kref_put(&coll->kref, rbd_coll_release);
1356 1492
1357 if (bp) 1493 if (bp)
1358 bio_pair_release(bp); 1494 bio_pair_release(bp);
1359
1360 spin_lock_irq(q->queue_lock); 1495 spin_lock_irq(q->queue_lock);
1361next: 1496next:
1362 rq = blk_fetch_request(q); 1497 rq = blk_fetch_request(q);
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 24a482f2fbd6..fd5adcd55944 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -858,7 +858,6 @@ static int __devinit swim_floppy_init(struct swim_priv *swd)
858 swd->unit[drive].disk->first_minor = drive; 858 swd->unit[drive].disk->first_minor = drive;
859 sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive); 859 sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
860 swd->unit[drive].disk->fops = &floppy_fops; 860 swd->unit[drive].disk->fops = &floppy_fops;
861 swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
862 swd->unit[drive].disk->private_data = &swd->unit[drive]; 861 swd->unit[drive].disk->private_data = &swd->unit[drive];
863 swd->unit[drive].disk->queue = swd->queue; 862 swd->unit[drive].disk->queue = swd->queue;
864 set_capacity(swd->unit[drive].disk, 2880); 863 set_capacity(swd->unit[drive].disk, 2880);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 4c10f56facbf..773bfa792777 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1163,7 +1163,6 @@ static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device
1163 disk->major = FLOPPY_MAJOR; 1163 disk->major = FLOPPY_MAJOR;
1164 disk->first_minor = i; 1164 disk->first_minor = i;
1165 disk->fops = &floppy_fops; 1165 disk->fops = &floppy_fops;
1166 disk->events = DISK_EVENT_MEDIA_CHANGE;
1167 disk->private_data = &floppy_states[i]; 1166 disk->private_data = &floppy_states[i];
1168 disk->queue = swim3_queue; 1167 disk->queue = swim3_queue;
1169 disk->flags |= GENHD_FL_REMOVABLE; 1168 disk->flags |= GENHD_FL_REMOVABLE;
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 68b9430c7cfe..0e376d46bdd1 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -2334,7 +2334,6 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
2334 disk->major = UB_MAJOR; 2334 disk->major = UB_MAJOR;
2335 disk->first_minor = lun->id * UB_PARTS_PER_LUN; 2335 disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2336 disk->fops = &ub_bd_fops; 2336 disk->fops = &ub_bd_fops;
2337 disk->events = DISK_EVENT_MEDIA_CHANGE;
2338 disk->private_data = lun; 2337 disk->private_data = lun;
2339 disk->driverfs_dev = &sc->intf->dev; 2338 disk->driverfs_dev = &sc->intf->dev;
2340 2339
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 645ff765cd12..6c7fd7db6dff 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1005,7 +1005,6 @@ static int __devinit ace_setup(struct ace_device *ace)
1005 ace->gd->major = ace_major; 1005 ace->gd->major = ace_major;
1006 ace->gd->first_minor = ace->id * ACE_NUM_MINORS; 1006 ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
1007 ace->gd->fops = &ace_fops; 1007 ace->gd->fops = &ace_fops;
1008 ace->gd->events = DISK_EVENT_MEDIA_CHANGE;
1009 ace->gd->queue = ace->queue; 1008 ace->gd->queue = ace->queue;
1010 ace->gd->private_data = ace; 1009 ace->gd->private_data = ace;
1011 snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); 1010 snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 514dd8efaf73..75fb965b8f72 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -986,6 +986,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t
986 986
987 cdinfo(CD_OPEN, "entering cdrom_open\n"); 987 cdinfo(CD_OPEN, "entering cdrom_open\n");
988 988
989 /* open is event synchronization point, check events first */
990 check_disk_change(bdev);
991
989 /* if this was a O_NONBLOCK open and we should honor the flags, 992 /* if this was a O_NONBLOCK open and we should honor the flags,
990 * do a quick open without drive/disc integrity checks. */ 993 * do a quick open without drive/disc integrity checks. */
991 cdi->use_count++; 994 cdi->use_count++;
@@ -1012,9 +1015,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t
1012 1015
1013 cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", 1016 cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n",
1014 cdi->name, cdi->use_count); 1017 cdi->name, cdi->use_count);
1015 /* Do this on open. Don't wait for mount, because they might
1016 not be mounting, but opening with O_NONBLOCK */
1017 check_disk_change(bdev);
1018 return 0; 1018 return 0;
1019err_release: 1019err_release:
1020 if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { 1020 if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index b2b034fea34e..3ceaf006e7f0 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -803,7 +803,6 @@ static int __devinit probe_gdrom(struct platform_device *devptr)
803 goto probe_fail_cdrom_register; 803 goto probe_fail_cdrom_register;
804 } 804 }
805 gd.disk->fops = &gdrom_bdops; 805 gd.disk->fops = &gdrom_bdops;
806 gd.disk->events = DISK_EVENT_MEDIA_CHANGE;
807 /* latch on to the interrupt */ 806 /* latch on to the interrupt */
808 err = gdrom_set_interrupt_handlers(); 807 err = gdrom_set_interrupt_handlers();
809 if (err) 808 if (err)
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 4e874c5fa605..e427fbe45999 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -626,7 +626,6 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
626 gendisk->queue = q; 626 gendisk->queue = q;
627 gendisk->fops = &viocd_fops; 627 gendisk->fops = &viocd_fops;
628 gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; 628 gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE;
629 gendisk->events = DISK_EVENT_MEDIA_CHANGE;
630 set_capacity(gendisk, 0); 629 set_capacity(gendisk, 0);
631 gendisk->private_data = d; 630 gendisk->private_data = d;
632 d->viocd_disk = gendisk; 631 d->viocd_disk = gendisk;
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 43ac61978d8b..ac6739e085e3 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -619,15 +619,18 @@ static void __devinit n2rng_driver_version(void)
619 pr_info("%s", version); 619 pr_info("%s", version);
620} 620}
621 621
622static const struct of_device_id n2rng_match[];
622static int __devinit n2rng_probe(struct platform_device *op) 623static int __devinit n2rng_probe(struct platform_device *op)
623{ 624{
625 const struct of_device_id *match;
624 int victoria_falls; 626 int victoria_falls;
625 int err = -ENOMEM; 627 int err = -ENOMEM;
626 struct n2rng *np; 628 struct n2rng *np;
627 629
628 if (!op->dev.of_match) 630 match = of_match_device(n2rng_match, &op->dev);
631 if (!match)
629 return -EINVAL; 632 return -EINVAL;
630 victoria_falls = (op->dev.of_match->data != NULL); 633 victoria_falls = (match->data != NULL);
631 634
632 n2rng_driver_version(); 635 n2rng_driver_version();
633 np = kzalloc(sizeof(*np), GFP_KERNEL); 636 np = kzalloc(sizeof(*np), GFP_KERNEL);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index cc6c9b2546a3..64c6b8530615 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2554,9 +2554,11 @@ static struct pci_driver ipmi_pci_driver = {
2554}; 2554};
2555#endif /* CONFIG_PCI */ 2555#endif /* CONFIG_PCI */
2556 2556
2557static struct of_device_id ipmi_match[];
2557static int __devinit ipmi_probe(struct platform_device *dev) 2558static int __devinit ipmi_probe(struct platform_device *dev)
2558{ 2559{
2559#ifdef CONFIG_OF 2560#ifdef CONFIG_OF
2561 const struct of_device_id *match;
2560 struct smi_info *info; 2562 struct smi_info *info;
2561 struct resource resource; 2563 struct resource resource;
2562 const __be32 *regsize, *regspacing, *regshift; 2564 const __be32 *regsize, *regspacing, *regshift;
@@ -2566,7 +2568,8 @@ static int __devinit ipmi_probe(struct platform_device *dev)
2566 2568
2567 dev_info(&dev->dev, "probing via device tree\n"); 2569 dev_info(&dev->dev, "probing via device tree\n");
2568 2570
2569 if (!dev->dev.of_match) 2571 match = of_match_device(ipmi_match, &dev->dev);
2572 if (!match)
2570 return -EINVAL; 2573 return -EINVAL;
2571 2574
2572 ret = of_address_to_resource(np, 0, &resource); 2575 ret = of_address_to_resource(np, 0, &resource);
@@ -2601,7 +2604,7 @@ static int __devinit ipmi_probe(struct platform_device *dev)
2601 return -ENOMEM; 2604 return -ENOMEM;
2602 } 2605 }
2603 2606
2604 info->si_type = (enum si_type) dev->dev.of_match->data; 2607 info->si_type = (enum si_type) match->data;
2605 info->addr_source = SI_DEVICETREE; 2608 info->addr_source = SI_DEVICETREE;
2606 info->irq_setup = std_irq_setup; 2609 info->irq_setup = std_irq_setup;
2607 2610
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index d6412c16385f..39ccdeada791 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -715,13 +715,13 @@ static int __devexit hwicap_remove(struct device *dev)
715} 715}
716 716
717#ifdef CONFIG_OF 717#ifdef CONFIG_OF
718static int __devinit hwicap_of_probe(struct platform_device *op) 718static int __devinit hwicap_of_probe(struct platform_device *op,
719 const struct hwicap_driver_config *config)
719{ 720{
720 struct resource res; 721 struct resource res;
721 const unsigned int *id; 722 const unsigned int *id;
722 const char *family; 723 const char *family;
723 int rc; 724 int rc;
724 const struct hwicap_driver_config *config = op->dev.of_match->data;
725 const struct config_registers *regs; 725 const struct config_registers *regs;
726 726
727 727
@@ -751,20 +751,24 @@ static int __devinit hwicap_of_probe(struct platform_device *op)
751 regs); 751 regs);
752} 752}
753#else 753#else
754static inline int hwicap_of_probe(struct platform_device *op) 754static inline int hwicap_of_probe(struct platform_device *op,
755 const struct hwicap_driver_config *config)
755{ 756{
756 return -EINVAL; 757 return -EINVAL;
757} 758}
758#endif /* CONFIG_OF */ 759#endif /* CONFIG_OF */
759 760
761static const struct of_device_id __devinitconst hwicap_of_match[];
760static int __devinit hwicap_drv_probe(struct platform_device *pdev) 762static int __devinit hwicap_drv_probe(struct platform_device *pdev)
761{ 763{
764 const struct of_device_id *match;
762 struct resource *res; 765 struct resource *res;
763 const struct config_registers *regs; 766 const struct config_registers *regs;
764 const char *family; 767 const char *family;
765 768
766 if (pdev->dev.of_match) 769 match = of_match_device(hwicap_of_match, &pdev->dev);
767 return hwicap_of_probe(pdev); 770 if (match)
771 return hwicap_of_probe(pdev, match->data);
768 772
769 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 773 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
770 if (!res) 774 if (!res)
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index c1f0045ceb8e..af8e7b1aa290 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -1019,7 +1019,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
1019 struct ppc4xx_edac_pdata *pdata = NULL; 1019 struct ppc4xx_edac_pdata *pdata = NULL;
1020 const struct device_node *np = op->dev.of_node; 1020 const struct device_node *np = op->dev.of_node;
1021 1021
1022 if (op->dev.of_match == NULL) 1022 if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL)
1023 return -EINVAL; 1023 return -EINVAL;
1024 1024
1025 /* Initial driver pointers and private data */ 1025 /* Initial driver pointers and private data */
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 11d7a72c22d9..140b9525b48a 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1516,17 +1516,33 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
1516} 1516}
1517EXPORT_SYMBOL(drm_fb_helper_initial_config); 1517EXPORT_SYMBOL(drm_fb_helper_initial_config);
1518 1518
1519bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) 1519/**
1520 * drm_fb_helper_hotplug_event - respond to a hotplug notification by
1521 * probing all the outputs attached to the fb.
1522 * @fb_helper: the drm_fb_helper
1523 *
1524 * LOCKING:
1525 * Called at runtime, must take mode config lock.
1526 *
1527 * Scan the connectors attached to the fb_helper and try to put together a
1528 * setup after *notification of a change in output configuration.
1529 *
1530 * RETURNS:
1531 * 0 on success and a non-zero error code otherwise.
1532 */
1533int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1520{ 1534{
1535 struct drm_device *dev = fb_helper->dev;
1521 int count = 0; 1536 int count = 0;
1522 u32 max_width, max_height, bpp_sel; 1537 u32 max_width, max_height, bpp_sel;
1523 bool bound = false, crtcs_bound = false; 1538 bool bound = false, crtcs_bound = false;
1524 struct drm_crtc *crtc; 1539 struct drm_crtc *crtc;
1525 1540
1526 if (!fb_helper->fb) 1541 if (!fb_helper->fb)
1527 return false; 1542 return 0;
1528 1543
1529 list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { 1544 mutex_lock(&dev->mode_config.mutex);
1545 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1530 if (crtc->fb) 1546 if (crtc->fb)
1531 crtcs_bound = true; 1547 crtcs_bound = true;
1532 if (crtc->fb == fb_helper->fb) 1548 if (crtc->fb == fb_helper->fb)
@@ -1535,7 +1551,8 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1535 1551
1536 if (!bound && crtcs_bound) { 1552 if (!bound && crtcs_bound) {
1537 fb_helper->delayed_hotplug = true; 1553 fb_helper->delayed_hotplug = true;
1538 return false; 1554 mutex_unlock(&dev->mode_config.mutex);
1555 return 0;
1539 } 1556 }
1540 DRM_DEBUG_KMS("\n"); 1557 DRM_DEBUG_KMS("\n");
1541 1558
@@ -1546,6 +1563,7 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1546 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, 1563 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
1547 max_height); 1564 max_height);
1548 drm_setup_crtcs(fb_helper); 1565 drm_setup_crtcs(fb_helper);
1566 mutex_unlock(&dev->mode_config.mutex);
1549 1567
1550 return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); 1568 return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
1551} 1569}
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 5d00b0fc0d91..959186cbf328 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -431,7 +431,7 @@ EXPORT_SYMBOL(drm_mm_search_free_in_range);
431void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 431void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
432{ 432{
433 list_replace(&old->node_list, &new->node_list); 433 list_replace(&old->node_list, &new->node_list);
434 list_replace(&old->node_list, &new->hole_stack); 434 list_replace(&old->hole_stack, &new->hole_stack);
435 new->hole_follows = old->hole_follows; 435 new->hole_follows = old->hole_follows;
436 new->mm = old->mm; 436 new->mm = old->mm;
437 new->start = old->start; 437 new->start = old->start;
@@ -699,8 +699,8 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
699 entry->size); 699 entry->size);
700 total_used += entry->size; 700 total_used += entry->size;
701 if (entry->hole_follows) { 701 if (entry->hole_follows) {
702 hole_start = drm_mm_hole_node_start(&mm->head_node); 702 hole_start = drm_mm_hole_node_start(entry);
703 hole_end = drm_mm_hole_node_end(&mm->head_node); 703 hole_end = drm_mm_hole_node_end(entry);
704 hole_size = hole_end - hole_start; 704 hole_size = hole_end - hole_start;
705 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", 705 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
706 hole_start, hole_end, hole_size); 706 hole_start, hole_end, hole_size);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c34a8dd31d02..32d1b3e829c8 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,7 +49,7 @@ module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
49unsigned int i915_powersave = 1; 49unsigned int i915_powersave = 1;
50module_param_named(powersave, i915_powersave, int, 0600); 50module_param_named(powersave, i915_powersave, int, 0600);
51 51
52unsigned int i915_semaphores = 1; 52unsigned int i915_semaphores = 0;
53module_param_named(semaphores, i915_semaphores, int, 0600); 53module_param_named(semaphores, i915_semaphores, int, 0600);
54 54
55unsigned int i915_enable_rc6 = 0; 55unsigned int i915_enable_rc6 = 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e522c702b04e..2166ee071ddb 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5605,9 +5605,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
5605 intel_clock_t clock; 5605 intel_clock_t clock;
5606 5606
5607 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 5607 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
5608 fp = FP0(pipe); 5608 fp = I915_READ(FP0(pipe));
5609 else 5609 else
5610 fp = FP1(pipe); 5610 fp = I915_READ(FP1(pipe));
5611 5611
5612 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 5612 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
5613 if (IS_PINEVIEW(dev)) { 5613 if (IS_PINEVIEW(dev)) {
@@ -6579,8 +6579,10 @@ intel_user_framebuffer_create(struct drm_device *dev,
6579 return ERR_PTR(-ENOENT); 6579 return ERR_PTR(-ENOENT);
6580 6580
6581 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 6581 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6582 if (!intel_fb) 6582 if (!intel_fb) {
6583 drm_gem_object_unreference_unlocked(&obj->base);
6583 return ERR_PTR(-ENOMEM); 6584 return ERR_PTR(-ENOMEM);
6585 }
6584 6586
6585 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 6587 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6586 if (ret) { 6588 if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index cb8578b7e443..a4d80314e7f8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1470,7 +1470,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1470 1470
1471 if (!HAS_PCH_CPT(dev) && 1471 if (!HAS_PCH_CPT(dev) &&
1472 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1472 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
1473 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); 1473 struct drm_crtc *crtc = intel_dp->base.base.crtc;
1474
1474 /* Hardware workaround: leaving our transcoder select 1475 /* Hardware workaround: leaving our transcoder select
1475 * set to transcoder B while it's off will prevent the 1476 * set to transcoder B while it's off will prevent the
1476 * corresponding HDMI output on transcoder A. 1477 * corresponding HDMI output on transcoder A.
@@ -1485,7 +1486,19 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1485 /* Changes to enable or select take place the vblank 1486 /* Changes to enable or select take place the vblank
1486 * after being written. 1487 * after being written.
1487 */ 1488 */
1488 intel_wait_for_vblank(dev, intel_crtc->pipe); 1489 if (crtc == NULL) {
1490 /* We can arrive here never having been attached
1491 * to a CRTC, for instance, due to inheriting
1492 * random state from the BIOS.
1493 *
1494 * If the pipe is not running, play safe and
1495 * wait for the clocks to stabilise before
1496 * continuing.
1497 */
1498 POSTING_READ(intel_dp->output_reg);
1499 msleep(50);
1500 } else
1501 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
1489 } 1502 }
1490 1503
1491 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 1504 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index a562bd2648c7..67cb076d271b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -539,6 +539,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
539 struct drm_device *dev = dev_priv->dev; 539 struct drm_device *dev = dev_priv->dev;
540 struct drm_connector *connector = dev_priv->int_lvds_connector; 540 struct drm_connector *connector = dev_priv->int_lvds_connector;
541 541
542 if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
543 return NOTIFY_OK;
544
542 /* 545 /*
543 * check and update the status of LVDS connector after receiving 546 * check and update the status of LVDS connector after receiving
544 * the LID nofication event. 547 * the LID nofication event.
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 5045f8b921d6..c3e953b08992 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -152,8 +152,6 @@ nouveau_mem_vram_fini(struct drm_device *dev)
152{ 152{
153 struct drm_nouveau_private *dev_priv = dev->dev_private; 153 struct drm_nouveau_private *dev_priv = dev->dev_private;
154 154
155 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
156
157 ttm_bo_device_release(&dev_priv->ttm.bdev); 155 ttm_bo_device_release(&dev_priv->ttm.bdev);
158 156
159 nouveau_ttm_global_release(dev_priv); 157 nouveau_ttm_global_release(dev_priv);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 4bce801bc588..c77111eca6ac 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -42,7 +42,8 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
42 42
43 nvbe->nr_pages = 0; 43 nvbe->nr_pages = 0;
44 while (num_pages--) { 44 while (num_pages--) {
45 if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) { 45 /* this code path isn't called and is incorrect anyways */
46 if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/
46 nvbe->pages[nvbe->nr_pages] = 47 nvbe->pages[nvbe->nr_pages] =
47 dma_addrs[nvbe->nr_pages]; 48 dma_addrs[nvbe->nr_pages];
48 nvbe->ttm_alloced[nvbe->nr_pages] = true; 49 nvbe->ttm_alloced[nvbe->nr_pages] = true;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a30adec5beaa..915fbce89595 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -768,6 +768,11 @@ static void nouveau_card_takedown(struct drm_device *dev)
768 engine->mc.takedown(dev); 768 engine->mc.takedown(dev);
769 engine->display.late_takedown(dev); 769 engine->display.late_takedown(dev);
770 770
771 if (dev_priv->vga_ram) {
772 nouveau_bo_unpin(dev_priv->vga_ram);
773 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
774 }
775
771 mutex_lock(&dev->struct_mutex); 776 mutex_lock(&dev->struct_mutex);
772 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); 777 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
773 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); 778 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index c20eac3379e6..9073e3bfb08c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1780,7 +1780,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1780 1780
1781 1781
1782 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 1782 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1783 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 1783 if (rdev->flags & RADEON_IS_IGP)
1784 mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
1785 else
1786 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1784 1787
1785 switch (rdev->config.evergreen.max_tile_pipes) { 1788 switch (rdev->config.evergreen.max_tile_pipes) {
1786 case 1: 1789 case 1:
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 94533849927e..fc40e0cc3451 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -200,6 +200,7 @@
200#define BURSTLENGTH_SHIFT 9 200#define BURSTLENGTH_SHIFT 9
201#define BURSTLENGTH_MASK 0x00000200 201#define BURSTLENGTH_MASK 0x00000200
202#define CHANSIZE_OVERRIDE (1 << 11) 202#define CHANSIZE_OVERRIDE (1 << 11)
203#define FUS_MC_ARB_RAMCFG 0x2768
203#define MC_VM_AGP_TOP 0x2028 204#define MC_VM_AGP_TOP 0x2028
204#define MC_VM_AGP_BOT 0x202C 205#define MC_VM_AGP_BOT 0x202C
205#define MC_VM_AGP_BASE 0x2030 206#define MC_VM_AGP_BASE 0x2030
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 7aade20f63a8..3d8a7634bbe9 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -674,7 +674,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
674 674
675 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); 675 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
676 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); 676 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
677 cgts_tcc_disable = RREG32(CGTS_TCC_DISABLE); 677 cgts_tcc_disable = 0xff000000;
678 gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); 678 gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
679 gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); 679 gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
680 cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); 680 cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
@@ -871,7 +871,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
871 871
872 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 872 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
873 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); 873 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
874 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); 874 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
875 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 875 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
876 876
877 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); 877 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
@@ -887,20 +887,20 @@ static void cayman_gpu_init(struct radeon_device *rdev)
887 887
888 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); 888 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
889 889
890 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | 890 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
891 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | 891 POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
892 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); 892 SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
893 893
894 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | 894 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
895 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | 895 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
896 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); 896 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
897 897
898 898
899 WREG32(VGT_NUM_INSTANCES, 1); 899 WREG32(VGT_NUM_INSTANCES, 1);
900 900
901 WREG32(CP_PERFMON_CNTL, 0); 901 WREG32(CP_PERFMON_CNTL, 0);
902 902
903 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | 903 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
904 FETCH_FIFO_HIWATER(0x4) | 904 FETCH_FIFO_HIWATER(0x4) |
905 DONE_FIFO_HIWATER(0xe0) | 905 DONE_FIFO_HIWATER(0xe0) |
906 ALU_UPDATE_FIFO_HIWATER(0x8))); 906 ALU_UPDATE_FIFO_HIWATER(0x8)));
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f116516bfef7..90dfb2b8cf03 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -431,7 +431,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
431 } 431 }
432 } 432 }
433 433
434 /* Acer laptop (Acer TravelMate 5730G) has an HDMI port 434 /* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port
435 * on the laptop and a DVI port on the docking station and 435 * on the laptop and a DVI port on the docking station and
436 * both share the same encoder, hpd pin, and ddc line. 436 * both share the same encoder, hpd pin, and ddc line.
437 * So while the bios table is technically correct, 437 * So while the bios table is technically correct,
@@ -440,7 +440,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
440 * with different crtcs which isn't possible on the hardware 440 * with different crtcs which isn't possible on the hardware
441 * side and leaves no crtcs for LVDS or VGA. 441 * side and leaves no crtcs for LVDS or VGA.
442 */ 442 */
443 if ((dev->pdev->device == 0x95c4) && 443 if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) &&
444 (dev->pdev->subsystem_vendor == 0x1025) && 444 (dev->pdev->subsystem_vendor == 0x1025) &&
445 (dev->pdev->subsystem_device == 0x013c)) { 445 (dev->pdev->subsystem_device == 0x013c)) {
446 if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && 446 if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
@@ -1574,9 +1574,17 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1574 ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; 1574 ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
1575 ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; 1575 ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
1576 bool bad_record = false; 1576 bool bad_record = false;
1577 u8 *record = (u8 *)(mode_info->atom_context->bios + 1577 u8 *record;
1578 data_offset + 1578
1579 le16_to_cpu(lvds_info->info.usModePatchTableOffset)); 1579 if ((frev == 1) && (crev < 2))
1580 /* absolute */
1581 record = (u8 *)(mode_info->atom_context->bios +
1582 le16_to_cpu(lvds_info->info.usModePatchTableOffset));
1583 else
1584 /* relative */
1585 record = (u8 *)(mode_info->atom_context->bios +
1586 data_offset +
1587 le16_to_cpu(lvds_info->info.usModePatchTableOffset));
1580 while (*record != ATOM_RECORD_END_TYPE) { 1588 while (*record != ATOM_RECORD_END_TYPE) {
1581 switch (*record) { 1589 switch (*record) {
1582 case LCD_MODE_PATCH_RECORD_MODE_TYPE: 1590 case LCD_MODE_PATCH_RECORD_MODE_TYPE:
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index ed5dfe58f29c..9d95792bea3e 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -15,6 +15,9 @@
15#define ATPX_VERSION 0 15#define ATPX_VERSION 0
16#define ATPX_GPU_PWR 2 16#define ATPX_GPU_PWR 2
17#define ATPX_MUX_SELECT 3 17#define ATPX_MUX_SELECT 3
18#define ATPX_I2C_MUX_SELECT 4
19#define ATPX_SWITCH_START 5
20#define ATPX_SWITCH_END 6
18 21
19#define ATPX_INTEGRATED 0 22#define ATPX_INTEGRATED 0
20#define ATPX_DISCRETE 1 23#define ATPX_DISCRETE 1
@@ -149,13 +152,35 @@ static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id)
149 return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id); 152 return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id);
150} 153}
151 154
155static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id)
156{
157 return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id);
158}
159
160static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id)
161{
162 return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id);
163}
164
165static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id)
166{
167 return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id);
168}
152 169
153static int radeon_atpx_switchto(enum vga_switcheroo_client_id id) 170static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
154{ 171{
172 int gpu_id;
173
155 if (id == VGA_SWITCHEROO_IGD) 174 if (id == VGA_SWITCHEROO_IGD)
156 radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 0); 175 gpu_id = ATPX_INTEGRATED;
157 else 176 else
158 radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 1); 177 gpu_id = ATPX_DISCRETE;
178
179 radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id);
180 radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id);
181 radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id);
182 radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id);
183
159 return 0; 184 return 0;
160} 185}
161 186
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index bdf2fa1189ae..3189a7efb2e9 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -167,9 +167,6 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
167 return -EINVAL; 167 return -EINVAL;
168 } 168 }
169 169
170 radeon_crtc->cursor_width = width;
171 radeon_crtc->cursor_height = height;
172
173 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); 170 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
174 if (!obj) { 171 if (!obj) {
175 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id); 172 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
@@ -180,6 +177,9 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
180 if (ret) 177 if (ret)
181 goto fail; 178 goto fail;
182 179
180 radeon_crtc->cursor_width = width;
181 radeon_crtc->cursor_height = height;
182
183 radeon_lock_cursor(crtc, true); 183 radeon_lock_cursor(crtc, true);
184 /* XXX only 27 bit offset for legacy cursor */ 184 /* XXX only 27 bit offset for legacy cursor */
185 radeon_set_cursor(crtc, obj, gpu_addr); 185 radeon_set_cursor(crtc, obj, gpu_addr);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 8a955bbdb608..a533f52fd163 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -181,9 +181,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
181 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 181 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
182 182
183 for (i = 0; i < pages; i++, p++) { 183 for (i = 0; i < pages; i++, p++) {
184 /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32 184 /* we reverted the patch using dma_addr in TTM for now but this
185 * is requested. */ 185 * code stops building on alpha so just comment it out for now */
186 if (dma_addr[i] != DMA_ERROR_CODE) { 186 if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
187 rdev->gart.ttm_alloced[p] = true; 187 rdev->gart.ttm_alloced[p] = true;
188 rdev->gart.pages_addr[p] = dma_addr[i]; 188 rdev->gart.pages_addr[p] = dma_addr[i];
189 } else { 189 } else {
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index 6334f8ac1209..0aa8e85a9457 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -33,6 +33,7 @@ cayman 0x9400
330x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 330x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
340x00009100 SPI_CONFIG_CNTL 340x00009100 SPI_CONFIG_CNTL
350x0000913C SPI_CONFIG_CNTL_1 350x0000913C SPI_CONFIG_CNTL_1
360x00009508 TA_CNTL_AUX
360x00009830 DB_DEBUG 370x00009830 DB_DEBUG
370x00009834 DB_DEBUG2 380x00009834 DB_DEBUG2
380x00009838 DB_DEBUG3 390x00009838 DB_DEBUG3
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index 7e1637176e08..0e28cae7ea43 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -46,6 +46,7 @@ evergreen 0x9400
460x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 460x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
470x00009100 SPI_CONFIG_CNTL 470x00009100 SPI_CONFIG_CNTL
480x0000913C SPI_CONFIG_CNTL_1 480x0000913C SPI_CONFIG_CNTL_1
490x00009508 TA_CNTL_AUX
490x00009700 VC_CNTL 500x00009700 VC_CNTL
500x00009714 VC_ENHANCE 510x00009714 VC_ENHANCE
510x00009830 DB_DEBUG 520x00009830 DB_DEBUG
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index e01cacba685f..498b284e5ef9 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -219,9 +219,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
219 int i; 219 int i;
220 struct vga_switcheroo_client *active = NULL; 220 struct vga_switcheroo_client *active = NULL;
221 221
222 if (new_client->active == true)
223 return 0;
224
225 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 222 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
226 if (vgasr_priv.clients[i].active == true) { 223 if (vgasr_priv.clients[i].active == true) {
227 active = &vgasr_priv.clients[i]; 224 active = &vgasr_priv.clients[i];
@@ -372,6 +369,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
372 goto out; 369 goto out;
373 } 370 }
374 371
372 if (client->active == true)
373 goto out;
374
375 /* okay we want a switch - test if devices are willing to switch */ 375 /* okay we want a switch - test if devices are willing to switch */
376 can_switch = true; 376 can_switch = true;
377 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 377 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 75b984c519ac..107397a606b4 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -560,15 +560,18 @@ static struct i2c_adapter mpc_ops = {
560 .timeout = HZ, 560 .timeout = HZ,
561}; 561};
562 562
563static const struct of_device_id mpc_i2c_of_match[];
563static int __devinit fsl_i2c_probe(struct platform_device *op) 564static int __devinit fsl_i2c_probe(struct platform_device *op)
564{ 565{
566 const struct of_device_id *match;
565 struct mpc_i2c *i2c; 567 struct mpc_i2c *i2c;
566 const u32 *prop; 568 const u32 *prop;
567 u32 clock = MPC_I2C_CLOCK_LEGACY; 569 u32 clock = MPC_I2C_CLOCK_LEGACY;
568 int result = 0; 570 int result = 0;
569 int plen; 571 int plen;
570 572
571 if (!op->dev.of_match) 573 match = of_match_device(mpc_i2c_of_match, &op->dev);
574 if (!match)
572 return -EINVAL; 575 return -EINVAL;
573 576
574 i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); 577 i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
@@ -605,8 +608,8 @@ static int __devinit fsl_i2c_probe(struct platform_device *op)
605 clock = *prop; 608 clock = *prop;
606 } 609 }
607 610
608 if (op->dev.of_match->data) { 611 if (match->data) {
609 struct mpc_i2c_data *data = op->dev.of_match->data; 612 struct mpc_i2c_data *data = match->data;
610 data->setup(op->dev.of_node, i2c, clock, data->prescaler); 613 data->setup(op->dev.of_node, i2c, clock, data->prescaler);
611 } else { 614 } else {
612 /* Backwards compatibility */ 615 /* Backwards compatibility */
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index a97e3fec8148..04be9f82e14b 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -65,7 +65,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
65 jiffies, expires); 65 jiffies, expires);
66 66
67 timer->expires = jiffies + expires; 67 timer->expires = jiffies + expires;
68 timer->data = (unsigned long)&alg_data; 68 timer->data = (unsigned long)alg_data;
69 69
70 add_timer(timer); 70 add_timer(timer);
71} 71}
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index c24946f51256..1de1c19dad30 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -281,17 +281,24 @@ struct ser_req {
281 u8 command; 281 u8 command;
282 u8 ref_off; 282 u8 ref_off;
283 u16 scratch; 283 u16 scratch;
284 __be16 sample;
285 struct spi_message msg; 284 struct spi_message msg;
286 struct spi_transfer xfer[6]; 285 struct spi_transfer xfer[6];
286 /*
287 * DMA (thus cache coherency maintenance) requires the
288 * transfer buffers to live in their own cache lines.
289 */
290 __be16 sample ____cacheline_aligned;
287}; 291};
288 292
289struct ads7845_ser_req { 293struct ads7845_ser_req {
290 u8 command[3]; 294 u8 command[3];
291 u8 pwrdown[3];
292 u8 sample[3];
293 struct spi_message msg; 295 struct spi_message msg;
294 struct spi_transfer xfer[2]; 296 struct spi_transfer xfer[2];
297 /*
298 * DMA (thus cache coherency maintenance) requires the
299 * transfer buffers to live in their own cache lines.
300 */
301 u8 sample[3] ____cacheline_aligned;
295}; 302};
296 303
297static int ads7846_read12_ser(struct device *dev, unsigned command) 304static int ads7846_read12_ser(struct device *dev, unsigned command)
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index e7089a1f6cb6..b37e6186d0fa 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -349,6 +349,7 @@ static const struct i2c_device_id lm3530_id[] = {
349 {LM3530_NAME, 0}, 349 {LM3530_NAME, 0},
350 {} 350 {}
351}; 351};
352MODULE_DEVICE_TABLE(i2c, lm3530_id);
352 353
353static struct i2c_driver lm3530_i2c_driver = { 354static struct i2c_driver lm3530_i2c_driver = {
354 .probe = lm3530_probe, 355 .probe = lm3530_probe,
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index c820e2f53527..3f442003623d 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -524,7 +524,7 @@ void cx88_ir_irq(struct cx88_core *core)
524 for (todo = 32; todo > 0; todo -= bits) { 524 for (todo = 32; todo > 0; todo -= bits) {
525 ev.pulse = samples & 0x80000000 ? false : true; 525 ev.pulse = samples & 0x80000000 ? false : true;
526 bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples)); 526 bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples));
527 ev.duration = (bits * NSEC_PER_SEC) / (1000 * ir_samplerate); 527 ev.duration = (bits * (NSEC_PER_SEC / 1000)) / ir_samplerate;
528 ir_raw_event_store_with_filter(ir->dev, &ev); 528 ir_raw_event_store_with_filter(ir->dev, &ev);
529 samples <<= bits; 529 samples <<= bits;
530 } 530 }
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 3973f9a94753..ddb4c091dedc 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -136,11 +136,50 @@ unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl,
136} 136}
137EXPORT_SYMBOL(soc_camera_apply_sensor_flags); 137EXPORT_SYMBOL(soc_camera_apply_sensor_flags);
138 138
139#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \
140 ((x) >> 24) & 0xff
141
142static int soc_camera_try_fmt(struct soc_camera_device *icd,
143 struct v4l2_format *f)
144{
145 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
146 struct v4l2_pix_format *pix = &f->fmt.pix;
147 int ret;
148
149 dev_dbg(&icd->dev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
150 pixfmtstr(pix->pixelformat), pix->width, pix->height);
151
152 pix->bytesperline = 0;
153 pix->sizeimage = 0;
154
155 ret = ici->ops->try_fmt(icd, f);
156 if (ret < 0)
157 return ret;
158
159 if (!pix->sizeimage) {
160 if (!pix->bytesperline) {
161 const struct soc_camera_format_xlate *xlate;
162
163 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
164 if (!xlate)
165 return -EINVAL;
166
167 ret = soc_mbus_bytes_per_line(pix->width,
168 xlate->host_fmt);
169 if (ret > 0)
170 pix->bytesperline = ret;
171 }
172 if (pix->bytesperline)
173 pix->sizeimage = pix->bytesperline * pix->height;
174 }
175
176 return 0;
177}
178
139static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, 179static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
140 struct v4l2_format *f) 180 struct v4l2_format *f)
141{ 181{
142 struct soc_camera_device *icd = file->private_data; 182 struct soc_camera_device *icd = file->private_data;
143 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
144 183
145 WARN_ON(priv != file->private_data); 184 WARN_ON(priv != file->private_data);
146 185
@@ -149,7 +188,7 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
149 return -EINVAL; 188 return -EINVAL;
150 189
151 /* limit format to hardware capabilities */ 190 /* limit format to hardware capabilities */
152 return ici->ops->try_fmt(icd, f); 191 return soc_camera_try_fmt(icd, f);
153} 192}
154 193
155static int soc_camera_enum_input(struct file *file, void *priv, 194static int soc_camera_enum_input(struct file *file, void *priv,
@@ -362,9 +401,6 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd)
362 icd->user_formats = NULL; 401 icd->user_formats = NULL;
363} 402}
364 403
365#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \
366 ((x) >> 24) & 0xff
367
368/* Called with .vb_lock held, or from the first open(2), see comment there */ 404/* Called with .vb_lock held, or from the first open(2), see comment there */
369static int soc_camera_set_fmt(struct soc_camera_device *icd, 405static int soc_camera_set_fmt(struct soc_camera_device *icd,
370 struct v4l2_format *f) 406 struct v4l2_format *f)
@@ -377,7 +413,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
377 pixfmtstr(pix->pixelformat), pix->width, pix->height); 413 pixfmtstr(pix->pixelformat), pix->width, pix->height);
378 414
379 /* We always call try_fmt() before set_fmt() or set_crop() */ 415 /* We always call try_fmt() before set_fmt() or set_crop() */
380 ret = ici->ops->try_fmt(icd, f); 416 ret = soc_camera_try_fmt(icd, f);
381 if (ret < 0) 417 if (ret < 0)
382 return ret; 418 return ret;
383 419
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 5aeaf876ba9b..4aae501f02d0 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -155,8 +155,10 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
155 sd->v4l2_dev = v4l2_dev; 155 sd->v4l2_dev = v4l2_dev;
156 if (sd->internal_ops && sd->internal_ops->registered) { 156 if (sd->internal_ops && sd->internal_ops->registered) {
157 err = sd->internal_ops->registered(sd); 157 err = sd->internal_ops->registered(sd);
158 if (err) 158 if (err) {
159 module_put(sd->owner);
159 return err; 160 return err;
161 }
160 } 162 }
161 163
162 /* This just returns 0 if either of the two args is NULL */ 164 /* This just returns 0 if either of the two args is NULL */
@@ -164,6 +166,7 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
164 if (err) { 166 if (err) {
165 if (sd->internal_ops && sd->internal_ops->unregistered) 167 if (sd->internal_ops && sd->internal_ops->unregistered)
166 sd->internal_ops->unregistered(sd); 168 sd->internal_ops->unregistered(sd);
169 module_put(sd->owner);
167 return err; 170 return err;
168 } 171 }
169 172
diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c
index 0b8064490676..812729ebf09e 100644
--- a/drivers/media/video/v4l2-subdev.c
+++ b/drivers/media/video/v4l2-subdev.c
@@ -155,25 +155,25 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
155 155
156 switch (cmd) { 156 switch (cmd) {
157 case VIDIOC_QUERYCTRL: 157 case VIDIOC_QUERYCTRL:
158 return v4l2_subdev_queryctrl(sd, arg); 158 return v4l2_queryctrl(sd->ctrl_handler, arg);
159 159
160 case VIDIOC_QUERYMENU: 160 case VIDIOC_QUERYMENU:
161 return v4l2_subdev_querymenu(sd, arg); 161 return v4l2_querymenu(sd->ctrl_handler, arg);
162 162
163 case VIDIOC_G_CTRL: 163 case VIDIOC_G_CTRL:
164 return v4l2_subdev_g_ctrl(sd, arg); 164 return v4l2_g_ctrl(sd->ctrl_handler, arg);
165 165
166 case VIDIOC_S_CTRL: 166 case VIDIOC_S_CTRL:
167 return v4l2_subdev_s_ctrl(sd, arg); 167 return v4l2_s_ctrl(sd->ctrl_handler, arg);
168 168
169 case VIDIOC_G_EXT_CTRLS: 169 case VIDIOC_G_EXT_CTRLS:
170 return v4l2_subdev_g_ext_ctrls(sd, arg); 170 return v4l2_g_ext_ctrls(sd->ctrl_handler, arg);
171 171
172 case VIDIOC_S_EXT_CTRLS: 172 case VIDIOC_S_EXT_CTRLS:
173 return v4l2_subdev_s_ext_ctrls(sd, arg); 173 return v4l2_s_ext_ctrls(sd->ctrl_handler, arg);
174 174
175 case VIDIOC_TRY_EXT_CTRLS: 175 case VIDIOC_TRY_EXT_CTRLS:
176 return v4l2_subdev_try_ext_ctrls(sd, arg); 176 return v4l2_try_ext_ctrls(sd->ctrl_handler, arg);
177 177
178 case VIDIOC_DQEVENT: 178 case VIDIOC_DQEVENT:
179 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) 179 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 643ad52e3ca2..4796bbf0ae4e 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -1000,7 +1000,6 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
1000 gd->major = I2O_MAJOR; 1000 gd->major = I2O_MAJOR;
1001 gd->queue = queue; 1001 gd->queue = queue;
1002 gd->fops = &i2o_block_fops; 1002 gd->fops = &i2o_block_fops;
1003 gd->events = DISK_EVENT_MEDIA_CHANGE;
1004 gd->private_data = dev; 1003 gd->private_data = dev;
1005 1004
1006 dev->gd = gd; 1005 dev->gd = gd;
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index d4a851c6b5bf..0b4d5b23bec9 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -144,7 +144,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
144 int iter, i; 144 int iter, i;
145 unsigned long flags; 145 unsigned long flags;
146 146
147 data->chip->irq_ack(irq_data); 147 data->chip->irq_ack(data);
148 148
149 for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { 149 for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
150 u32 status; 150 u32 status;
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 2e165117457b..3ab9ffa00aad 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -717,14 +717,14 @@ static int usbhs_enable(struct device *dev)
717 gpio_request(pdata->ehci_data->reset_gpio_port[0], 717 gpio_request(pdata->ehci_data->reset_gpio_port[0],
718 "USB1 PHY reset"); 718 "USB1 PHY reset");
719 gpio_direction_output 719 gpio_direction_output
720 (pdata->ehci_data->reset_gpio_port[0], 1); 720 (pdata->ehci_data->reset_gpio_port[0], 0);
721 } 721 }
722 722
723 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) { 723 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) {
724 gpio_request(pdata->ehci_data->reset_gpio_port[1], 724 gpio_request(pdata->ehci_data->reset_gpio_port[1],
725 "USB2 PHY reset"); 725 "USB2 PHY reset");
726 gpio_direction_output 726 gpio_direction_output
727 (pdata->ehci_data->reset_gpio_port[1], 1); 727 (pdata->ehci_data->reset_gpio_port[1], 0);
728 } 728 }
729 729
730 /* Hold the PHY in RESET for enough time till DIR is high */ 730 /* Hold the PHY in RESET for enough time till DIR is high */
@@ -904,11 +904,11 @@ static int usbhs_enable(struct device *dev)
904 904
905 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) 905 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
906 gpio_set_value 906 gpio_set_value
907 (pdata->ehci_data->reset_gpio_port[0], 0); 907 (pdata->ehci_data->reset_gpio_port[0], 1);
908 908
909 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) 909 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
910 gpio_set_value 910 gpio_set_value
911 (pdata->ehci_data->reset_gpio_port[1], 0); 911 (pdata->ehci_data->reset_gpio_port[1], 1);
912 } 912 }
913 913
914end_count: 914end_count:
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 16422de0823a..2c0d4d16491a 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -447,12 +447,13 @@ static int __init load_twl4030_script(struct twl4030_script *tscript,
447 if (err) 447 if (err)
448 goto out; 448 goto out;
449 } 449 }
450 if (tscript->flags & TWL4030_SLEEP_SCRIPT) 450 if (tscript->flags & TWL4030_SLEEP_SCRIPT) {
451 if (order) 451 if (order)
452 pr_warning("TWL4030: Bad order of scripts (sleep "\ 452 pr_warning("TWL4030: Bad order of scripts (sleep "\
453 "script before wakeup) Leads to boot"\ 453 "script before wakeup) Leads to boot"\
454 "failure on some boards\n"); 454 "failure on some boards\n");
455 err = twl4030_config_sleep_sequence(address); 455 err = twl4030_config_sleep_sequence(address);
456 }
456out: 457out:
457 return err; 458 return err;
458} 459}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 2b200c1cfbba..461e6a17fb90 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -94,7 +94,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
94 spin_unlock_irqrestore(&host->clk_lock, flags); 94 spin_unlock_irqrestore(&host->clk_lock, flags);
95 return; 95 return;
96 } 96 }
97 mmc_claim_host(host); 97 mutex_lock(&host->clk_gate_mutex);
98 spin_lock_irqsave(&host->clk_lock, flags); 98 spin_lock_irqsave(&host->clk_lock, flags);
99 if (!host->clk_requests) { 99 if (!host->clk_requests) {
100 spin_unlock_irqrestore(&host->clk_lock, flags); 100 spin_unlock_irqrestore(&host->clk_lock, flags);
@@ -104,7 +104,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
104 pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); 104 pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
105 } 105 }
106 spin_unlock_irqrestore(&host->clk_lock, flags); 106 spin_unlock_irqrestore(&host->clk_lock, flags);
107 mmc_release_host(host); 107 mutex_unlock(&host->clk_gate_mutex);
108} 108}
109 109
110/* 110/*
@@ -130,7 +130,7 @@ void mmc_host_clk_ungate(struct mmc_host *host)
130{ 130{
131 unsigned long flags; 131 unsigned long flags;
132 132
133 mmc_claim_host(host); 133 mutex_lock(&host->clk_gate_mutex);
134 spin_lock_irqsave(&host->clk_lock, flags); 134 spin_lock_irqsave(&host->clk_lock, flags);
135 if (host->clk_gated) { 135 if (host->clk_gated) {
136 spin_unlock_irqrestore(&host->clk_lock, flags); 136 spin_unlock_irqrestore(&host->clk_lock, flags);
@@ -140,7 +140,7 @@ void mmc_host_clk_ungate(struct mmc_host *host)
140 } 140 }
141 host->clk_requests++; 141 host->clk_requests++;
142 spin_unlock_irqrestore(&host->clk_lock, flags); 142 spin_unlock_irqrestore(&host->clk_lock, flags);
143 mmc_release_host(host); 143 mutex_unlock(&host->clk_gate_mutex);
144} 144}
145 145
146/** 146/**
@@ -215,6 +215,7 @@ static inline void mmc_host_clk_init(struct mmc_host *host)
215 host->clk_gated = false; 215 host->clk_gated = false;
216 INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); 216 INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
217 spin_lock_init(&host->clk_lock); 217 spin_lock_init(&host->clk_lock);
218 mutex_init(&host->clk_gate_mutex);
218} 219}
219 220
220/** 221/**
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index f9b611fc773e..60e4186a4345 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -124,8 +124,10 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
124#endif 124#endif
125} 125}
126 126
127static const struct of_device_id sdhci_of_match[];
127static int __devinit sdhci_of_probe(struct platform_device *ofdev) 128static int __devinit sdhci_of_probe(struct platform_device *ofdev)
128{ 129{
130 const struct of_device_id *match;
129 struct device_node *np = ofdev->dev.of_node; 131 struct device_node *np = ofdev->dev.of_node;
130 struct sdhci_of_data *sdhci_of_data; 132 struct sdhci_of_data *sdhci_of_data;
131 struct sdhci_host *host; 133 struct sdhci_host *host;
@@ -134,9 +136,10 @@ static int __devinit sdhci_of_probe(struct platform_device *ofdev)
134 int size; 136 int size;
135 int ret; 137 int ret;
136 138
137 if (!ofdev->dev.of_match) 139 match = of_match_device(sdhci_of_match, &ofdev->dev);
140 if (!match)
138 return -EINVAL; 141 return -EINVAL;
139 sdhci_of_data = ofdev->dev.of_match->data; 142 sdhci_of_data = match->data;
140 143
141 if (!of_device_is_available(np)) 144 if (!of_device_is_available(np))
142 return -ENODEV; 145 return -ENODEV;
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index bd483f0c57e1..c1d33464aee8 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -214,11 +214,13 @@ static void __devinit of_free_probes(const char **probes)
214} 214}
215#endif 215#endif
216 216
217static struct of_device_id of_flash_match[];
217static int __devinit of_flash_probe(struct platform_device *dev) 218static int __devinit of_flash_probe(struct platform_device *dev)
218{ 219{
219#ifdef CONFIG_MTD_PARTITIONS 220#ifdef CONFIG_MTD_PARTITIONS
220 const char **part_probe_types; 221 const char **part_probe_types;
221#endif 222#endif
223 const struct of_device_id *match;
222 struct device_node *dp = dev->dev.of_node; 224 struct device_node *dp = dev->dev.of_node;
223 struct resource res; 225 struct resource res;
224 struct of_flash *info; 226 struct of_flash *info;
@@ -232,9 +234,10 @@ static int __devinit of_flash_probe(struct platform_device *dev)
232 struct mtd_info **mtd_list = NULL; 234 struct mtd_info **mtd_list = NULL;
233 resource_size_t res_size; 235 resource_size_t res_size;
234 236
235 if (!dev->dev.of_match) 237 match = of_match_device(of_flash_match, &dev->dev);
238 if (!match)
236 return -EINVAL; 239 return -EINVAL;
237 probe_type = dev->dev.of_match->data; 240 probe_type = match->data;
238 241
239 reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); 242 reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
240 243
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index dc280bc8eba2..6c884ef1b069 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2536,7 +2536,7 @@ config S6GMAC
2536source "drivers/net/stmmac/Kconfig" 2536source "drivers/net/stmmac/Kconfig"
2537 2537
2538config PCH_GBE 2538config PCH_GBE
2539 tristate "PCH Gigabit Ethernet" 2539 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE"
2540 depends on PCI 2540 depends on PCI
2541 select MII 2541 select MII
2542 ---help--- 2542 ---help---
@@ -2548,6 +2548,12 @@ config PCH_GBE
2548 to Gigabit Ethernet. 2548 to Gigabit Ethernet.
2549 This driver enables Gigabit Ethernet function. 2549 This driver enables Gigabit Ethernet function.
2550 2550
2551 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
2552 Output Hub), ML7223.
2553 ML7223 IOH is for MP(Media Phone) use.
2554 ML7223 is companion chip for Intel Atom E6xx series.
2555 ML7223 is completely compatible for Intel EG20T PCH.
2556
2551endif # NETDEV_1000 2557endif # NETDEV_1000
2552 2558
2553# 2559#
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 01b604ad155e..e5a7375685ad 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -144,7 +144,7 @@ obj-$(CONFIG_NE3210) += ne3210.o 8390.o
144obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o 144obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
145obj-$(CONFIG_B44) += b44.o 145obj-$(CONFIG_B44) += b44.o
146obj-$(CONFIG_FORCEDETH) += forcedeth.o 146obj-$(CONFIG_FORCEDETH) += forcedeth.o
147obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o 147obj-$(CONFIG_NE_H8300) += ne-h8300.o
148obj-$(CONFIG_AX88796) += ax88796.o 148obj-$(CONFIG_AX88796) += ax88796.o
149obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o 149obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
150obj-$(CONFIG_FTMAC100) += ftmac100.o 150obj-$(CONFIG_FTMAC100) += ftmac100.o
@@ -219,7 +219,7 @@ obj-$(CONFIG_SC92031) += sc92031.o
219obj-$(CONFIG_LP486E) += lp486e.o 219obj-$(CONFIG_LP486E) += lp486e.o
220 220
221obj-$(CONFIG_ETH16I) += eth16i.o 221obj-$(CONFIG_ETH16I) += eth16i.o
222obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o 222obj-$(CONFIG_ZORRO8390) += zorro8390.o
223obj-$(CONFIG_HPLANCE) += hplance.o 7990.o 223obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
224obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o 224obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
225obj-$(CONFIG_EQUALIZER) += eql.o 225obj-$(CONFIG_EQUALIZER) += eql.o
@@ -231,7 +231,7 @@ obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
231obj-$(CONFIG_DECLANCE) += declance.o 231obj-$(CONFIG_DECLANCE) += declance.o
232obj-$(CONFIG_ATARILANCE) += atarilance.o 232obj-$(CONFIG_ATARILANCE) += atarilance.o
233obj-$(CONFIG_A2065) += a2065.o 233obj-$(CONFIG_A2065) += a2065.o
234obj-$(CONFIG_HYDRA) += hydra.o 8390.o 234obj-$(CONFIG_HYDRA) += hydra.o
235obj-$(CONFIG_ARIADNE) += ariadne.o 235obj-$(CONFIG_ARIADNE) += ariadne.o
236obj-$(CONFIG_CS89x0) += cs89x0.o 236obj-$(CONFIG_CS89x0) += cs89x0.o
237obj-$(CONFIG_MACSONIC) += macsonic.o 237obj-$(CONFIG_MACSONIC) += macsonic.o
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 4af235d41fda..fbfb5b47c506 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -527,7 +527,7 @@ static void __init etherh_banner(void)
527 * Read the ethernet address string from the on board rom. 527 * Read the ethernet address string from the on board rom.
528 * This is an ascii string... 528 * This is an ascii string...
529 */ 529 */
530static int __init etherh_addr(char *addr, struct expansion_card *ec) 530static int __devinit etherh_addr(char *addr, struct expansion_card *ec)
531{ 531{
532 struct in_chunk_dir cd; 532 struct in_chunk_dir cd;
533 char *s; 533 char *s;
@@ -655,7 +655,7 @@ static const struct net_device_ops etherh_netdev_ops = {
655static u32 etherh_regoffsets[16]; 655static u32 etherh_regoffsets[16];
656static u32 etherm_regoffsets[16]; 656static u32 etherm_regoffsets[16];
657 657
658static int __init 658static int __devinit
659etherh_probe(struct expansion_card *ec, const struct ecard_id *id) 659etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
660{ 660{
661 const struct etherh_data *data = id->data; 661 const struct etherh_data *data = id->data;
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 66823eded7a3..2353eca32593 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -213,7 +213,7 @@ struct be_rx_stats {
213 213
214struct be_rx_compl_info { 214struct be_rx_compl_info {
215 u32 rss_hash; 215 u32 rss_hash;
216 u16 vid; 216 u16 vlan_tag;
217 u16 pkt_size; 217 u16 pkt_size;
218 u16 rxq_idx; 218 u16 rxq_idx;
219 u16 mac_id; 219 u16 mac_id;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 1e2d825bb94a..9dc9394fd4ca 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -132,7 +132,7 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
132 struct be_async_event_grp5_pvid_state *evt) 132 struct be_async_event_grp5_pvid_state *evt)
133{ 133{
134 if (evt->enabled) 134 if (evt->enabled)
135 adapter->pvid = evt->tag; 135 adapter->pvid = le16_to_cpu(evt->tag);
136 else 136 else
137 adapter->pvid = 0; 137 adapter->pvid = 0;
138} 138}
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 02a0443d1821..9187fb4e08f1 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1018,7 +1018,8 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1018 kfree_skb(skb); 1018 kfree_skb(skb);
1019 return; 1019 return;
1020 } 1020 }
1021 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid); 1021 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1022 rxcp->vlan_tag);
1022 } else { 1023 } else {
1023 netif_receive_skb(skb); 1024 netif_receive_skb(skb);
1024 } 1025 }
@@ -1076,7 +1077,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1076 if (likely(!rxcp->vlanf)) 1077 if (likely(!rxcp->vlanf))
1077 napi_gro_frags(&eq_obj->napi); 1078 napi_gro_frags(&eq_obj->napi);
1078 else 1079 else
1079 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid); 1080 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1081 rxcp->vlan_tag);
1080} 1082}
1081 1083
1082static void be_parse_rx_compl_v1(struct be_adapter *adapter, 1084static void be_parse_rx_compl_v1(struct be_adapter *adapter,
@@ -1102,7 +1104,8 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1102 rxcp->pkt_type = 1104 rxcp->pkt_type =
1103 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); 1105 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1104 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl); 1106 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl);
1105 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl); 1107 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1108 compl);
1106} 1109}
1107 1110
1108static void be_parse_rx_compl_v0(struct be_adapter *adapter, 1111static void be_parse_rx_compl_v0(struct be_adapter *adapter,
@@ -1128,7 +1131,8 @@ static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1128 rxcp->pkt_type = 1131 rxcp->pkt_type =
1129 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); 1132 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1130 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl); 1133 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl);
1131 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl); 1134 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1135 compl);
1132} 1136}
1133 1137
1134static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) 1138static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
@@ -1155,9 +1159,11 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1155 rxcp->vlanf = 0; 1159 rxcp->vlanf = 0;
1156 1160
1157 if (!lancer_chip(adapter)) 1161 if (!lancer_chip(adapter))
1158 rxcp->vid = swab16(rxcp->vid); 1162 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1159 1163
1160 if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid]) 1164 if (((adapter->pvid & VLAN_VID_MASK) ==
1165 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1166 !adapter->vlan_tag[rxcp->vlan_tag])
1161 rxcp->vlanf = 0; 1167 rxcp->vlanf = 0;
1162 1168
1163 /* As the compl has been parsed, reset it; we wont touch it again */ 1169 /* As the compl has been parsed, reset it; we wont touch it again */
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index b28baff70864..01b8a6af275b 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -39,7 +39,7 @@
39 39
40typedef struct mac_addr { 40typedef struct mac_addr {
41 u8 mac_addr_value[ETH_ALEN]; 41 u8 mac_addr_value[ETH_ALEN];
42} mac_addr_t; 42} __packed mac_addr_t;
43 43
44enum { 44enum {
45 BOND_AD_STABLE = 0, 45 BOND_AD_STABLE = 0,
@@ -134,12 +134,12 @@ typedef struct lacpdu {
134 u8 tlv_type_terminator; // = terminator 134 u8 tlv_type_terminator; // = terminator
135 u8 terminator_length; // = 0 135 u8 terminator_length; // = 0
136 u8 reserved_50[50]; // = 0 136 u8 reserved_50[50]; // = 0
137} lacpdu_t; 137} __packed lacpdu_t;
138 138
139typedef struct lacpdu_header { 139typedef struct lacpdu_header {
140 struct ethhdr hdr; 140 struct ethhdr hdr;
141 struct lacpdu lacpdu; 141 struct lacpdu lacpdu;
142} lacpdu_header_t; 142} __packed lacpdu_header_t;
143 143
144// Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) 144// Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard)
145typedef struct bond_marker { 145typedef struct bond_marker {
@@ -155,12 +155,12 @@ typedef struct bond_marker {
155 u8 tlv_type_terminator; // = 0x00 155 u8 tlv_type_terminator; // = 0x00
156 u8 terminator_length; // = 0x00 156 u8 terminator_length; // = 0x00
157 u8 reserved_90[90]; // = 0 157 u8 reserved_90[90]; // = 0
158} bond_marker_t; 158} __packed bond_marker_t;
159 159
160typedef struct bond_marker_header { 160typedef struct bond_marker_header {
161 struct ethhdr hdr; 161 struct ethhdr hdr;
162 struct bond_marker marker; 162 struct bond_marker marker;
163} bond_marker_header_t; 163} __packed bond_marker_header_t;
164 164
165#pragma pack() 165#pragma pack()
166 166
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index bd1d811c204f..5fedc3375562 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -247,8 +247,10 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
247} 247}
248#endif /* CONFIG_PPC_MPC512x */ 248#endif /* CONFIG_PPC_MPC512x */
249 249
250static struct of_device_id mpc5xxx_can_table[];
250static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) 251static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
251{ 252{
253 const struct of_device_id *match;
252 struct mpc5xxx_can_data *data; 254 struct mpc5xxx_can_data *data;
253 struct device_node *np = ofdev->dev.of_node; 255 struct device_node *np = ofdev->dev.of_node;
254 struct net_device *dev; 256 struct net_device *dev;
@@ -258,9 +260,10 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
258 int irq, mscan_clksrc = 0; 260 int irq, mscan_clksrc = 0;
259 int err = -ENOMEM; 261 int err = -ENOMEM;
260 262
261 if (!ofdev->dev.of_match) 263 match = of_match_device(mpc5xxx_can_table, &ofdev->dev);
264 if (!match)
262 return -EINVAL; 265 return -EINVAL;
263 data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data; 266 data = match->data;
264 267
265 base = of_iomap(np, 0); 268 base = of_iomap(np, 0);
266 if (!base) { 269 if (!base) {
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index a358ea9445a2..f501bba1fc6f 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -346,10 +346,10 @@ static void sja1000_rx(struct net_device *dev)
346 | (priv->read_reg(priv, REG_ID2) >> 5); 346 | (priv->read_reg(priv, REG_ID2) >> 5);
347 } 347 }
348 348
349 cf->can_dlc = get_can_dlc(fi & 0x0F);
349 if (fi & FI_RTR) { 350 if (fi & FI_RTR) {
350 id |= CAN_RTR_FLAG; 351 id |= CAN_RTR_FLAG;
351 } else { 352 } else {
352 cf->can_dlc = get_can_dlc(fi & 0x0F);
353 for (i = 0; i < cf->can_dlc; i++) 353 for (i = 0; i < cf->can_dlc; i++)
354 cf->data[i] = priv->read_reg(priv, dreg++); 354 cf->data[i] = priv->read_reg(priv, dreg++);
355 } 355 }
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index b423965a78d1..1b49df6b2470 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -583,7 +583,9 @@ static int slcan_open(struct tty_struct *tty)
583 /* Done. We have linked the TTY line to a channel. */ 583 /* Done. We have linked the TTY line to a channel. */
584 rtnl_unlock(); 584 rtnl_unlock();
585 tty->receive_room = 65536; /* We don't flow control */ 585 tty->receive_room = 65536; /* We don't flow control */
586 return sl->dev->base_addr; 586
587 /* TTY layer expects 0 on success */
588 return 0;
587 589
588err_free_chan: 590err_free_chan:
589 sl->tty = NULL; 591 sl->tty = NULL;
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
index 3e2e734fecb7..f3bbdcef338c 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -55,15 +55,20 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
55 cmd->duplex = -1; 55 cmd->duplex = -1;
56 } 56 }
57 57
58 cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full 58 if (cmd->speed == SPEED_10000) {
59 | SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half 59 cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
60 | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half 60 cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
61 | SUPPORTED_Autoneg | SUPPORTED_FIBRE); 61 cmd->port = PORT_FIBRE;
62 62 } else {
63 cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg 63 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full
64 | ADVERTISED_FIBRE); 64 | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full
65 | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg
66 | SUPPORTED_TP);
67 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg
68 | ADVERTISED_TP);
69 cmd->port = PORT_TP;
70 }
65 71
66 cmd->port = PORT_FIBRE;
67 cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE; 72 cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
68 73
69 return 0; 74 return 0;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 53c0f04b1b23..cf79cf759e13 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2688,9 +2688,6 @@ static int ehea_open(struct net_device *dev)
2688 netif_start_queue(dev); 2688 netif_start_queue(dev);
2689 } 2689 }
2690 2690
2691 init_waitqueue_head(&port->swqe_avail_wq);
2692 init_waitqueue_head(&port->restart_wq);
2693
2694 mutex_unlock(&port->port_lock); 2691 mutex_unlock(&port->port_lock);
2695 2692
2696 return ret; 2693 return ret;
@@ -3276,6 +3273,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3276 3273
3277 INIT_WORK(&port->reset_task, ehea_reset_port); 3274 INIT_WORK(&port->reset_task, ehea_reset_port);
3278 3275
3276 init_waitqueue_head(&port->swqe_avail_wq);
3277 init_waitqueue_head(&port->restart_wq);
3278
3279 ret = register_netdev(dev); 3279 ret = register_netdev(dev);
3280 if (ret) { 3280 if (ret) {
3281 pr_err("register_netdev failed. ret=%d\n", ret); 3281 pr_err("register_netdev failed. ret=%d\n", ret);
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 24cb953900dd..5131e61c358c 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -998,8 +998,10 @@ static const struct net_device_ops fs_enet_netdev_ops = {
998#endif 998#endif
999}; 999};
1000 1000
1001static struct of_device_id fs_enet_match[];
1001static int __devinit fs_enet_probe(struct platform_device *ofdev) 1002static int __devinit fs_enet_probe(struct platform_device *ofdev)
1002{ 1003{
1004 const struct of_device_id *match;
1003 struct net_device *ndev; 1005 struct net_device *ndev;
1004 struct fs_enet_private *fep; 1006 struct fs_enet_private *fep;
1005 struct fs_platform_info *fpi; 1007 struct fs_platform_info *fpi;
@@ -1007,14 +1009,15 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev)
1007 const u8 *mac_addr; 1009 const u8 *mac_addr;
1008 int privsize, len, ret = -ENODEV; 1010 int privsize, len, ret = -ENODEV;
1009 1011
1010 if (!ofdev->dev.of_match) 1012 match = of_match_device(fs_enet_match, &ofdev->dev);
1013 if (!match)
1011 return -EINVAL; 1014 return -EINVAL;
1012 1015
1013 fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); 1016 fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
1014 if (!fpi) 1017 if (!fpi)
1015 return -ENOMEM; 1018 return -ENOMEM;
1016 1019
1017 if (!IS_FEC(ofdev->dev.of_match)) { 1020 if (!IS_FEC(match)) {
1018 data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); 1021 data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
1019 if (!data || len != 4) 1022 if (!data || len != 4)
1020 goto out_free_fpi; 1023 goto out_free_fpi;
@@ -1049,7 +1052,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev)
1049 fep->dev = &ofdev->dev; 1052 fep->dev = &ofdev->dev;
1050 fep->ndev = ndev; 1053 fep->ndev = ndev;
1051 fep->fpi = fpi; 1054 fep->fpi = fpi;
1052 fep->ops = ofdev->dev.of_match->data; 1055 fep->ops = match->data;
1053 1056
1054 ret = fep->ops->setup_data(ndev); 1057 ret = fep->ops->setup_data(ndev);
1055 if (ret) 1058 if (ret)
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index 7e840d373ab3..6a2e150e75bb 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -101,17 +101,20 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus)
101 return 0; 101 return 0;
102} 102}
103 103
104static struct of_device_id fs_enet_mdio_fec_match[];
104static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) 105static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
105{ 106{
107 const struct of_device_id *match;
106 struct resource res; 108 struct resource res;
107 struct mii_bus *new_bus; 109 struct mii_bus *new_bus;
108 struct fec_info *fec; 110 struct fec_info *fec;
109 int (*get_bus_freq)(struct device_node *); 111 int (*get_bus_freq)(struct device_node *);
110 int ret = -ENOMEM, clock, speed; 112 int ret = -ENOMEM, clock, speed;
111 113
112 if (!ofdev->dev.of_match) 114 match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev);
115 if (!match)
113 return -EINVAL; 116 return -EINVAL;
114 get_bus_freq = ofdev->dev.of_match->data; 117 get_bus_freq = match->data;
115 118
116 new_bus = mdiobus_alloc(); 119 new_bus = mdiobus_alloc();
117 if (!new_bus) 120 if (!new_bus)
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index c5ef62ceb840..1cd481c04202 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -98,15 +98,15 @@ static const struct net_device_ops hydra_netdev_ops = {
98 .ndo_open = hydra_open, 98 .ndo_open = hydra_open,
99 .ndo_stop = hydra_close, 99 .ndo_stop = hydra_close,
100 100
101 .ndo_start_xmit = ei_start_xmit, 101 .ndo_start_xmit = __ei_start_xmit,
102 .ndo_tx_timeout = ei_tx_timeout, 102 .ndo_tx_timeout = __ei_tx_timeout,
103 .ndo_get_stats = ei_get_stats, 103 .ndo_get_stats = __ei_get_stats,
104 .ndo_set_multicast_list = ei_set_multicast_list, 104 .ndo_set_multicast_list = __ei_set_multicast_list,
105 .ndo_validate_addr = eth_validate_addr, 105 .ndo_validate_addr = eth_validate_addr,
106 .ndo_set_mac_address = eth_mac_addr, 106 .ndo_set_mac_address = eth_mac_addr,
107 .ndo_change_mtu = eth_change_mtu, 107 .ndo_change_mtu = eth_change_mtu,
108#ifdef CONFIG_NET_POLL_CONTROLLER 108#ifdef CONFIG_NET_POLL_CONTROLLER
109 .ndo_poll_controller = ei_poll, 109 .ndo_poll_controller = __ei_poll,
110#endif 110#endif
111}; 111};
112 112
@@ -125,7 +125,7 @@ static int __devinit hydra_init(struct zorro_dev *z)
125 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 125 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
126 }; 126 };
127 127
128 dev = alloc_ei_netdev(); 128 dev = ____alloc_ei_netdev(0);
129 if (!dev) 129 if (!dev)
130 return -ENOMEM; 130 return -ENOMEM;
131 131
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index 30be8c634ebd..7298a34bc795 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -167,7 +167,7 @@ static void cleanup_card(struct net_device *dev)
167#ifndef MODULE 167#ifndef MODULE
168struct net_device * __init ne_probe(int unit) 168struct net_device * __init ne_probe(int unit)
169{ 169{
170 struct net_device *dev = alloc_ei_netdev(); 170 struct net_device *dev = ____alloc_ei_netdev(0);
171 int err; 171 int err;
172 172
173 if (!dev) 173 if (!dev)
@@ -197,15 +197,15 @@ static const struct net_device_ops ne_netdev_ops = {
197 .ndo_open = ne_open, 197 .ndo_open = ne_open,
198 .ndo_stop = ne_close, 198 .ndo_stop = ne_close,
199 199
200 .ndo_start_xmit = ei_start_xmit, 200 .ndo_start_xmit = __ei_start_xmit,
201 .ndo_tx_timeout = ei_tx_timeout, 201 .ndo_tx_timeout = __ei_tx_timeout,
202 .ndo_get_stats = ei_get_stats, 202 .ndo_get_stats = __ei_get_stats,
203 .ndo_set_multicast_list = ei_set_multicast_list, 203 .ndo_set_multicast_list = __ei_set_multicast_list,
204 .ndo_validate_addr = eth_validate_addr, 204 .ndo_validate_addr = eth_validate_addr,
205 .ndo_set_mac_address = eth_mac_addr, 205 .ndo_set_mac_address = eth_mac_addr,
206 .ndo_change_mtu = eth_change_mtu, 206 .ndo_change_mtu = eth_change_mtu,
207#ifdef CONFIG_NET_POLL_CONTROLLER 207#ifdef CONFIG_NET_POLL_CONTROLLER
208 .ndo_poll_controller = ei_poll, 208 .ndo_poll_controller = __ei_poll,
209#endif 209#endif
210}; 210};
211 211
@@ -637,7 +637,7 @@ int init_module(void)
637 int err; 637 int err;
638 638
639 for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { 639 for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
640 struct net_device *dev = alloc_ei_netdev(); 640 struct net_device *dev = ____alloc_ei_netdev(0);
641 if (!dev) 641 if (!dev)
642 break; 642 break;
643 if (io[this_dev]) { 643 if (io[this_dev]) {
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 2ef2f9cdefa6..56d049a472da 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -34,6 +34,10 @@ const char pch_driver_version[] = DRV_VERSION;
34#define PCH_GBE_COPYBREAK_DEFAULT 256 34#define PCH_GBE_COPYBREAK_DEFAULT 256
35#define PCH_GBE_PCI_BAR 1 35#define PCH_GBE_PCI_BAR 1
36 36
37/* Macros for ML7223 */
38#define PCI_VENDOR_ID_ROHM 0x10db
39#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
40
37#define PCH_GBE_TX_WEIGHT 64 41#define PCH_GBE_TX_WEIGHT 64
38#define PCH_GBE_RX_WEIGHT 64 42#define PCH_GBE_RX_WEIGHT 64
39#define PCH_GBE_RX_BUFFER_WRITE 16 43#define PCH_GBE_RX_BUFFER_WRITE 16
@@ -43,8 +47,7 @@ const char pch_driver_version[] = DRV_VERSION;
43 47
44#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \ 48#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
45 PCH_GBE_CHIP_TYPE_INTERNAL | \ 49 PCH_GBE_CHIP_TYPE_INTERNAL | \
46 PCH_GBE_RGMII_MODE_RGMII | \ 50 PCH_GBE_RGMII_MODE_RGMII \
47 PCH_GBE_CRS_SEL \
48 ) 51 )
49 52
50/* Ethertype field values */ 53/* Ethertype field values */
@@ -1494,12 +1497,11 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1494 /* Write meta date of skb */ 1497 /* Write meta date of skb */
1495 skb_put(skb, length); 1498 skb_put(skb, length);
1496 skb->protocol = eth_type_trans(skb, netdev); 1499 skb->protocol = eth_type_trans(skb, netdev);
1497 if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) == 1500 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1498 PCH_GBE_RXD_ACC_STAT_TCPIPOK) {
1499 skb->ip_summed = CHECKSUM_UNNECESSARY;
1500 } else {
1501 skb->ip_summed = CHECKSUM_NONE; 1501 skb->ip_summed = CHECKSUM_NONE;
1502 } 1502 else
1503 skb->ip_summed = CHECKSUM_UNNECESSARY;
1504
1503 napi_gro_receive(&adapter->napi, skb); 1505 napi_gro_receive(&adapter->napi, skb);
1504 (*work_done)++; 1506 (*work_done)++;
1505 pr_debug("Receive skb->ip_summed: %d length: %d\n", 1507 pr_debug("Receive skb->ip_summed: %d length: %d\n",
@@ -2420,6 +2422,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
2420 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), 2422 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2421 .class_mask = (0xFFFF00) 2423 .class_mask = (0xFFFF00)
2422 }, 2424 },
2425 {.vendor = PCI_VENDOR_ID_ROHM,
2426 .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2427 .subvendor = PCI_ANY_ID,
2428 .subdevice = PCI_ANY_ID,
2429 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2430 .class_mask = (0xFFFF00)
2431 },
2423 /* required last entry */ 2432 /* required last entry */
2424 {0} 2433 {0}
2425}; 2434};
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index d98479030ef2..3dd45ed61f0a 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -50,6 +50,20 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
50 return &nic_data->mcdi; 50 return &nic_data->mcdi;
51} 51}
52 52
53static inline void
54efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
55{
56 struct siena_nic_data *nic_data = efx->nic_data;
57 value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
58}
59
60static inline void
61efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
62{
63 struct siena_nic_data *nic_data = efx->nic_data;
64 __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
65}
66
53void efx_mcdi_init(struct efx_nic *efx) 67void efx_mcdi_init(struct efx_nic *efx)
54{ 68{
55 struct efx_mcdi_iface *mcdi; 69 struct efx_mcdi_iface *mcdi;
@@ -70,8 +84,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
70 const u8 *inbuf, size_t inlen) 84 const u8 *inbuf, size_t inlen)
71{ 85{
72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 86 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
73 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 87 unsigned pdu = MCDI_PDU(efx);
74 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); 88 unsigned doorbell = MCDI_DOORBELL(efx);
75 unsigned int i; 89 unsigned int i;
76 efx_dword_t hdr; 90 efx_dword_t hdr;
77 u32 xflags, seqno; 91 u32 xflags, seqno;
@@ -92,30 +106,28 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
92 MCDI_HEADER_SEQ, seqno, 106 MCDI_HEADER_SEQ, seqno,
93 MCDI_HEADER_XFLAGS, xflags); 107 MCDI_HEADER_XFLAGS, xflags);
94 108
95 efx_writed(efx, &hdr, pdu); 109 efx_mcdi_writed(efx, &hdr, pdu);
96 110
97 for (i = 0; i < inlen; i += 4) { 111 for (i = 0; i < inlen; i += 4)
98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); 112 efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i),
99 /* use wmb() within loop to inhibit write combining */ 113 pdu + 4 + i);
100 wmb();
101 }
102 114
103 /* ring the doorbell with a distinctive value */ 115 /* ring the doorbell with a distinctive value */
104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); 116 EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc);
105 wmb(); 117 efx_mcdi_writed(efx, &hdr, doorbell);
106} 118}
107 119
108static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 120static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
109{ 121{
110 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
111 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 123 unsigned int pdu = MCDI_PDU(efx);
112 int i; 124 int i;
113 125
114 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 126 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
115 BUG_ON(outlen & 3 || outlen >= 0x100); 127 BUG_ON(outlen & 3 || outlen >= 0x100);
116 128
117 for (i = 0; i < outlen; i += 4) 129 for (i = 0; i < outlen; i += 4)
118 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); 130 efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i);
119} 131}
120 132
121static int efx_mcdi_poll(struct efx_nic *efx) 133static int efx_mcdi_poll(struct efx_nic *efx)
@@ -123,7 +135,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
123 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 135 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
124 unsigned int time, finish; 136 unsigned int time, finish;
125 unsigned int respseq, respcmd, error; 137 unsigned int respseq, respcmd, error;
126 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 138 unsigned int pdu = MCDI_PDU(efx);
127 unsigned int rc, spins; 139 unsigned int rc, spins;
128 efx_dword_t reg; 140 efx_dword_t reg;
129 141
@@ -149,8 +161,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
149 161
150 time = get_seconds(); 162 time = get_seconds();
151 163
152 rmb(); 164 efx_mcdi_readd(efx, &reg, pdu);
153 efx_readd(efx, &reg, pdu);
154 165
155 /* All 1's indicates that shared memory is in reset (and is 166 /* All 1's indicates that shared memory is in reset (and is
156 * not a valid header). Wait for it to come out reset before 167 * not a valid header). Wait for it to come out reset before
@@ -177,7 +188,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
177 respseq, mcdi->seqno); 188 respseq, mcdi->seqno);
178 rc = EIO; 189 rc = EIO;
179 } else if (error) { 190 } else if (error) {
180 efx_readd(efx, &reg, pdu + 4); 191 efx_mcdi_readd(efx, &reg, pdu + 4);
181 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { 192 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
182#define TRANSLATE_ERROR(name) \ 193#define TRANSLATE_ERROR(name) \
183 case MC_CMD_ERR_ ## name: \ 194 case MC_CMD_ERR_ ## name: \
@@ -211,21 +222,21 @@ out:
211/* Test and clear MC-rebooted flag for this port/function */ 222/* Test and clear MC-rebooted flag for this port/function */
212int efx_mcdi_poll_reboot(struct efx_nic *efx) 223int efx_mcdi_poll_reboot(struct efx_nic *efx)
213{ 224{
214 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); 225 unsigned int addr = MCDI_REBOOT_FLAG(efx);
215 efx_dword_t reg; 226 efx_dword_t reg;
216 uint32_t value; 227 uint32_t value;
217 228
218 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 229 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
219 return false; 230 return false;
220 231
221 efx_readd(efx, &reg, addr); 232 efx_mcdi_readd(efx, &reg, addr);
222 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); 233 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
223 234
224 if (value == 0) 235 if (value == 0)
225 return 0; 236 return 0;
226 237
227 EFX_ZERO_DWORD(reg); 238 EFX_ZERO_DWORD(reg);
228 efx_writed(efx, &reg, addr); 239 efx_mcdi_writed(efx, &reg, addr);
229 240
230 if (value == MC_STATUS_DWORD_ASSERT) 241 if (value == MC_STATUS_DWORD_ASSERT)
231 return -EINTR; 242 return -EINTR;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 10f1cb79c147..9b29a8d7c449 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1937,6 +1937,13 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
1937 1937
1938 size = min_t(size_t, table->step, 16); 1938 size = min_t(size_t, table->step, 16);
1939 1939
1940 if (table->offset >= efx->type->mem_map_size) {
1941 /* No longer mapped; return dummy data */
1942 memcpy(buf, "\xde\xc0\xad\xde", 4);
1943 buf += table->rows * size;
1944 continue;
1945 }
1946
1940 for (i = 0; i < table->rows; i++) { 1947 for (i = 0; i < table->rows; i++) {
1941 switch (table->step) { 1948 switch (table->step) {
1942 case 4: /* 32-bit register or SRAM */ 1949 case 4: /* 32-bit register or SRAM */
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index a42db6e35be3..d91701abd331 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -143,10 +143,12 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
143/** 143/**
144 * struct siena_nic_data - Siena NIC state 144 * struct siena_nic_data - Siena NIC state
145 * @mcdi: Management-Controller-to-Driver Interface 145 * @mcdi: Management-Controller-to-Driver Interface
146 * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
146 * @wol_filter_id: Wake-on-LAN packet filter id 147 * @wol_filter_id: Wake-on-LAN packet filter id
147 */ 148 */
148struct siena_nic_data { 149struct siena_nic_data {
149 struct efx_mcdi_iface mcdi; 150 struct efx_mcdi_iface mcdi;
151 void __iomem *mcdi_smem;
150 int wol_filter_id; 152 int wol_filter_id;
151}; 153};
152 154
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index e4dd8986b1fe..837869b71db9 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -220,12 +220,26 @@ static int siena_probe_nic(struct efx_nic *efx)
220 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 220 efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
221 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 221 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
222 222
223 /* Initialise MCDI */
224 nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
225 FR_CZ_MC_TREG_SMEM,
226 FR_CZ_MC_TREG_SMEM_STEP *
227 FR_CZ_MC_TREG_SMEM_ROWS);
228 if (!nic_data->mcdi_smem) {
229 netif_err(efx, probe, efx->net_dev,
230 "could not map MCDI at %llx+%x\n",
231 (unsigned long long)efx->membase_phys +
232 FR_CZ_MC_TREG_SMEM,
233 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
234 rc = -ENOMEM;
235 goto fail1;
236 }
223 efx_mcdi_init(efx); 237 efx_mcdi_init(efx);
224 238
225 /* Recover from a failed assertion before probing */ 239 /* Recover from a failed assertion before probing */
226 rc = efx_mcdi_handle_assertion(efx); 240 rc = efx_mcdi_handle_assertion(efx);
227 if (rc) 241 if (rc)
228 goto fail1; 242 goto fail2;
229 243
230 /* Let the BMC know that the driver is now in charge of link and 244 /* Let the BMC know that the driver is now in charge of link and
231 * filter settings. We must do this before we reset the NIC */ 245 * filter settings. We must do this before we reset the NIC */
@@ -280,6 +294,7 @@ fail4:
280fail3: 294fail3:
281 efx_mcdi_drv_attach(efx, false, NULL); 295 efx_mcdi_drv_attach(efx, false, NULL);
282fail2: 296fail2:
297 iounmap(nic_data->mcdi_smem);
283fail1: 298fail1:
284 kfree(efx->nic_data); 299 kfree(efx->nic_data);
285 return rc; 300 return rc;
@@ -359,6 +374,8 @@ static int siena_init_nic(struct efx_nic *efx)
359 374
360static void siena_remove_nic(struct efx_nic *efx) 375static void siena_remove_nic(struct efx_nic *efx)
361{ 376{
377 struct siena_nic_data *nic_data = efx->nic_data;
378
362 efx_nic_free_buffer(efx, &efx->irq_status); 379 efx_nic_free_buffer(efx, &efx->irq_status);
363 380
364 siena_reset_hw(efx, RESET_TYPE_ALL); 381 siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -368,7 +385,8 @@ static void siena_remove_nic(struct efx_nic *efx)
368 efx_mcdi_drv_attach(efx, false, NULL); 385 efx_mcdi_drv_attach(efx, false, NULL);
369 386
370 /* Tear down the private nic state */ 387 /* Tear down the private nic state */
371 kfree(efx->nic_data); 388 iounmap(nic_data->mcdi_smem);
389 kfree(nic_data);
372 efx->nic_data = NULL; 390 efx->nic_data = NULL;
373} 391}
374 392
@@ -606,8 +624,7 @@ struct efx_nic_type siena_a0_nic_type = {
606 .default_mac_ops = &efx_mcdi_mac_operations, 624 .default_mac_ops = &efx_mcdi_mac_operations,
607 625
608 .revision = EFX_REV_SIENA_A0, 626 .revision = EFX_REV_SIENA_A0,
609 .mem_map_size = (FR_CZ_MC_TREG_SMEM + 627 .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
610 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
611 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 628 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
612 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 629 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
613 .buf_tbl_base = FR_BZ_BUF_FULL_TBL, 630 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 86cbb9ea2f26..8ec1a9a0bb9a 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -853,7 +853,9 @@ static int slip_open(struct tty_struct *tty)
853 /* Done. We have linked the TTY line to a channel. */ 853 /* Done. We have linked the TTY line to a channel. */
854 rtnl_unlock(); 854 rtnl_unlock();
855 tty->receive_room = 65536; /* We don't flow control */ 855 tty->receive_room = 65536; /* We don't flow control */
856 return sl->dev->base_addr; 856
857 /* TTY layer expects 0 on success */
858 return 0;
857 859
858err_free_bufs: 860err_free_bufs:
859 sl_free_bufs(sl); 861 sl_free_bufs(sl);
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index eb4f59fb01e9..bff2f7999ff0 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3237,15 +3237,18 @@ static void happy_meal_pci_exit(void)
3237#endif 3237#endif
3238 3238
3239#ifdef CONFIG_SBUS 3239#ifdef CONFIG_SBUS
3240static const struct of_device_id hme_sbus_match[];
3240static int __devinit hme_sbus_probe(struct platform_device *op) 3241static int __devinit hme_sbus_probe(struct platform_device *op)
3241{ 3242{
3243 const struct of_device_id *match;
3242 struct device_node *dp = op->dev.of_node; 3244 struct device_node *dp = op->dev.of_node;
3243 const char *model = of_get_property(dp, "model", NULL); 3245 const char *model = of_get_property(dp, "model", NULL);
3244 int is_qfe; 3246 int is_qfe;
3245 3247
3246 if (!op->dev.of_match) 3248 match = of_match_device(hme_sbus_match, &op->dev);
3249 if (!match)
3247 return -EINVAL; 3250 return -EINVAL;
3248 is_qfe = (op->dev.of_match->data != NULL); 3251 is_qfe = (match->data != NULL);
3249 3252
3250 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) 3253 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
3251 is_qfe = 1; 3254 is_qfe = 1;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index a301479ecc60..c924ea2bce07 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -567,7 +567,7 @@ static const struct usb_device_id products [] = {
567{ 567{
568 USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM, 568 USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM,
569 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 569 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
570 .driver_info = 0, 570 .driver_info = (unsigned long)&wwan_info,
571}, 571},
572 572
573/* 573/*
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 7d42f9a2c068..81126ff85e05 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -65,6 +65,7 @@
65#define IPHETH_USBINTF_PROTO 1 65#define IPHETH_USBINTF_PROTO 1
66 66
67#define IPHETH_BUF_SIZE 1516 67#define IPHETH_BUF_SIZE 1516
68#define IPHETH_IP_ALIGN 2 /* padding at front of URB */
68#define IPHETH_TX_TIMEOUT (5 * HZ) 69#define IPHETH_TX_TIMEOUT (5 * HZ)
69 70
70#define IPHETH_INTFNUM 2 71#define IPHETH_INTFNUM 2
@@ -202,18 +203,21 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
202 return; 203 return;
203 } 204 }
204 205
205 len = urb->actual_length; 206 if (urb->actual_length <= IPHETH_IP_ALIGN) {
206 buf = urb->transfer_buffer; 207 dev->net->stats.rx_length_errors++;
208 return;
209 }
210 len = urb->actual_length - IPHETH_IP_ALIGN;
211 buf = urb->transfer_buffer + IPHETH_IP_ALIGN;
207 212
208 skb = dev_alloc_skb(NET_IP_ALIGN + len); 213 skb = dev_alloc_skb(len);
209 if (!skb) { 214 if (!skb) {
210 err("%s: dev_alloc_skb: -ENOMEM", __func__); 215 err("%s: dev_alloc_skb: -ENOMEM", __func__);
211 dev->net->stats.rx_dropped++; 216 dev->net->stats.rx_dropped++;
212 return; 217 return;
213 } 218 }
214 219
215 skb_reserve(skb, NET_IP_ALIGN); 220 memcpy(skb_put(skb, len), buf, len);
216 memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN);
217 skb->dev = dev->net; 221 skb->dev = dev->net;
218 skb->protocol = eth_type_trans(skb, dev->net); 222 skb->protocol = eth_type_trans(skb, dev->net);
219 223
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 009bba3d753e..9ab439d144ed 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -645,6 +645,7 @@ int usbnet_stop (struct net_device *net)
645 struct driver_info *info = dev->driver_info; 645 struct driver_info *info = dev->driver_info;
646 int retval; 646 int retval;
647 647
648 clear_bit(EVENT_DEV_OPEN, &dev->flags);
648 netif_stop_queue (net); 649 netif_stop_queue (net);
649 650
650 netif_info(dev, ifdown, dev->net, 651 netif_info(dev, ifdown, dev->net,
@@ -1524,9 +1525,12 @@ int usbnet_resume (struct usb_interface *intf)
1524 smp_mb(); 1525 smp_mb();
1525 clear_bit(EVENT_DEV_ASLEEP, &dev->flags); 1526 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
1526 spin_unlock_irq(&dev->txq.lock); 1527 spin_unlock_irq(&dev->txq.lock);
1527 if (!(dev->txq.qlen >= TX_QLEN(dev))) 1528
1528 netif_start_queue(dev->net); 1529 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
1529 tasklet_schedule (&dev->bh); 1530 if (!(dev->txq.qlen >= TX_QLEN(dev)))
1531 netif_start_queue(dev->net);
1532 tasklet_schedule (&dev->bh);
1533 }
1530 } 1534 }
1531 return 0; 1535 return 0;
1532} 1536}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0d47c3a05307..c16ed961153a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -178,6 +178,7 @@ static void
178vmxnet3_process_events(struct vmxnet3_adapter *adapter) 178vmxnet3_process_events(struct vmxnet3_adapter *adapter)
179{ 179{
180 int i; 180 int i;
181 unsigned long flags;
181 u32 events = le32_to_cpu(adapter->shared->ecr); 182 u32 events = le32_to_cpu(adapter->shared->ecr);
182 if (!events) 183 if (!events)
183 return; 184 return;
@@ -190,10 +191,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
190 191
191 /* Check if there is an error on xmit/recv queues */ 192 /* Check if there is an error on xmit/recv queues */
192 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 193 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
193 spin_lock(&adapter->cmd_lock); 194 spin_lock_irqsave(&adapter->cmd_lock, flags);
194 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 195 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
195 VMXNET3_CMD_GET_QUEUE_STATUS); 196 VMXNET3_CMD_GET_QUEUE_STATUS);
196 spin_unlock(&adapter->cmd_lock); 197 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
197 198
198 for (i = 0; i < adapter->num_tx_queues; i++) 199 for (i = 0; i < adapter->num_tx_queues; i++)
199 if (adapter->tqd_start[i].status.stopped) 200 if (adapter->tqd_start[i].status.stopped)
@@ -2733,13 +2734,14 @@ static void
2733vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) 2734vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2734{ 2735{
2735 u32 cfg; 2736 u32 cfg;
2737 unsigned long flags;
2736 2738
2737 /* intr settings */ 2739 /* intr settings */
2738 spin_lock(&adapter->cmd_lock); 2740 spin_lock_irqsave(&adapter->cmd_lock, flags);
2739 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2741 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2740 VMXNET3_CMD_GET_CONF_INTR); 2742 VMXNET3_CMD_GET_CONF_INTR);
2741 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2743 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2742 spin_unlock(&adapter->cmd_lock); 2744 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2743 adapter->intr.type = cfg & 0x3; 2745 adapter->intr.type = cfg & 0x3;
2744 adapter->intr.mask_mode = (cfg >> 2) & 0x3; 2746 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2745 2747
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 51f2ef142a5b..976467253d20 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -311,6 +311,9 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
311 /* toggle the LRO feature*/ 311 /* toggle the LRO feature*/
312 netdev->features ^= NETIF_F_LRO; 312 netdev->features ^= NETIF_F_LRO;
313 313
314 /* Update private LRO flag */
315 adapter->lro = lro_requested;
316
314 /* update harware LRO capability accordingly */ 317 /* update harware LRO capability accordingly */
315 if (lro_requested) 318 if (lro_requested)
316 adapter->shared->devRead.misc.uptFeatures |= 319 adapter->shared->devRead.misc.uptFeatures |=
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 17d04ff8d678..1482fa650833 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2141,6 +2141,8 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
2141static void ath9k_flush(struct ieee80211_hw *hw, bool drop) 2141static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2142{ 2142{
2143 struct ath_softc *sc = hw->priv; 2143 struct ath_softc *sc = hw->priv;
2144 struct ath_hw *ah = sc->sc_ah;
2145 struct ath_common *common = ath9k_hw_common(ah);
2144 int timeout = 200; /* ms */ 2146 int timeout = 200; /* ms */
2145 int i, j; 2147 int i, j;
2146 2148
@@ -2149,6 +2151,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2149 2151
2150 cancel_delayed_work_sync(&sc->tx_complete_work); 2152 cancel_delayed_work_sync(&sc->tx_complete_work);
2151 2153
2154 if (sc->sc_flags & SC_OP_INVALID) {
2155 ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
2156 mutex_unlock(&sc->mutex);
2157 return;
2158 }
2159
2152 if (drop) 2160 if (drop)
2153 timeout = 1; 2161 timeout = 1;
2154 2162
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index c1511b14b239..42db0fc8b921 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -2155,6 +2155,13 @@ int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
2155 goto set_ch_out; 2155 goto set_ch_out;
2156 } 2156 }
2157 2157
2158 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2159 !iwl_legacy_is_channel_ibss(ch_info)) {
2160 IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n");
2161 ret = -EINVAL;
2162 goto set_ch_out;
2163 }
2164
2158 spin_lock_irqsave(&priv->lock, flags); 2165 spin_lock_irqsave(&priv->lock, flags);
2159 2166
2160 for_each_context(priv, ctx) { 2167 for_each_context(priv, ctx) {
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
index 9ee849d669f3..f43ac1eb9014 100644
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -1411,6 +1411,12 @@ iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
1411 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; 1411 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1412} 1412}
1413 1413
1414static inline int
1415iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch)
1416{
1417 return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
1418}
1419
1414static inline void 1420static inline void
1415__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page) 1421__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
1416{ 1422{
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 7e8a658b7670..f3ac62431a30 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -1339,8 +1339,8 @@ int lbs_execute_next_command(struct lbs_private *priv)
1339 cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { 1339 cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) {
1340 lbs_deb_host( 1340 lbs_deb_host(
1341 "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); 1341 "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n");
1342 list_del(&cmdnode->list);
1343 spin_lock_irqsave(&priv->driver_lock, flags); 1342 spin_lock_irqsave(&priv->driver_lock, flags);
1343 list_del(&cmdnode->list);
1344 lbs_complete_command(priv, cmdnode, 0); 1344 lbs_complete_command(priv, cmdnode, 0);
1345 spin_unlock_irqrestore(&priv->driver_lock, flags); 1345 spin_unlock_irqrestore(&priv->driver_lock, flags);
1346 1346
@@ -1352,8 +1352,8 @@ int lbs_execute_next_command(struct lbs_private *priv)
1352 (priv->psstate == PS_STATE_PRE_SLEEP)) { 1352 (priv->psstate == PS_STATE_PRE_SLEEP)) {
1353 lbs_deb_host( 1353 lbs_deb_host(
1354 "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n"); 1354 "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n");
1355 list_del(&cmdnode->list);
1356 spin_lock_irqsave(&priv->driver_lock, flags); 1355 spin_lock_irqsave(&priv->driver_lock, flags);
1356 list_del(&cmdnode->list);
1357 lbs_complete_command(priv, cmdnode, 0); 1357 lbs_complete_command(priv, cmdnode, 0);
1358 spin_unlock_irqrestore(&priv->driver_lock, flags); 1358 spin_unlock_irqrestore(&priv->driver_lock, flags);
1359 priv->needtowakeup = 1; 1359 priv->needtowakeup = 1;
@@ -1366,7 +1366,9 @@ int lbs_execute_next_command(struct lbs_private *priv)
1366 "EXEC_NEXT_CMD: sending EXIT_PS\n"); 1366 "EXEC_NEXT_CMD: sending EXIT_PS\n");
1367 } 1367 }
1368 } 1368 }
1369 spin_lock_irqsave(&priv->driver_lock, flags);
1369 list_del(&cmdnode->list); 1370 list_del(&cmdnode->list);
1371 spin_unlock_irqrestore(&priv->driver_lock, flags);
1370 lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n", 1372 lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n",
1371 le16_to_cpu(cmd->command)); 1373 le16_to_cpu(cmd->command));
1372 lbs_submit_command(priv, cmdnode); 1374 lbs_submit_command(priv, cmdnode);
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
index b78a38d9172a..8c7c522a056a 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/zorro8390.c
@@ -126,7 +126,7 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z,
126 126
127 board = z->resource.start; 127 board = z->resource.start;
128 ioaddr = board+cards[i].offset; 128 ioaddr = board+cards[i].offset;
129 dev = alloc_ei_netdev(); 129 dev = ____alloc_ei_netdev(0);
130 if (!dev) 130 if (!dev)
131 return -ENOMEM; 131 return -ENOMEM;
132 if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) { 132 if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) {
@@ -146,15 +146,15 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z,
146static const struct net_device_ops zorro8390_netdev_ops = { 146static const struct net_device_ops zorro8390_netdev_ops = {
147 .ndo_open = zorro8390_open, 147 .ndo_open = zorro8390_open,
148 .ndo_stop = zorro8390_close, 148 .ndo_stop = zorro8390_close,
149 .ndo_start_xmit = ei_start_xmit, 149 .ndo_start_xmit = __ei_start_xmit,
150 .ndo_tx_timeout = ei_tx_timeout, 150 .ndo_tx_timeout = __ei_tx_timeout,
151 .ndo_get_stats = ei_get_stats, 151 .ndo_get_stats = __ei_get_stats,
152 .ndo_set_multicast_list = ei_set_multicast_list, 152 .ndo_set_multicast_list = __ei_set_multicast_list,
153 .ndo_validate_addr = eth_validate_addr, 153 .ndo_validate_addr = eth_validate_addr,
154 .ndo_set_mac_address = eth_mac_addr, 154 .ndo_set_mac_address = eth_mac_addr,
155 .ndo_change_mtu = eth_change_mtu, 155 .ndo_change_mtu = eth_change_mtu,
156#ifdef CONFIG_NET_POLL_CONTROLLER 156#ifdef CONFIG_NET_POLL_CONTROLLER
157 .ndo_poll_controller = ei_poll, 157 .ndo_poll_controller = __ei_poll,
158#endif 158#endif
159}; 159};
160 160
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index ebf51ad1b714..a806cb321d2e 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -579,7 +579,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
579 } 579 }
580 size0 = calculate_iosize(size, min_size, size1, 580 size0 = calculate_iosize(size, min_size, size1,
581 resource_size(b_res), 4096); 581 resource_size(b_res), 4096);
582 size1 = !add_size? size0: 582 size1 = (!add_head || (add_head && !add_size)) ? size0 :
583 calculate_iosize(size, min_size+add_size, size1, 583 calculate_iosize(size, min_size+add_size, size1,
584 resource_size(b_res), 4096); 584 resource_size(b_res), 4096);
585 if (!size0 && !size1) { 585 if (!size0 && !size1) {
@@ -677,7 +677,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
677 align += aligns[order]; 677 align += aligns[order];
678 } 678 }
679 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); 679 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
680 size1 = !add_size ? size : 680 size1 = (!add_head || (add_head && !add_size)) ? size0 :
681 calculate_memsize(size, min_size+add_size, 0, 681 calculate_memsize(size, min_size+add_size, 0,
682 resource_size(b_res), min_align); 682 resource_size(b_res), min_align);
683 if (!size0 && !size1) { 683 if (!size0 && !size1) {
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 5f2dd386152b..2c1abf63957f 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -585,8 +585,9 @@ static bool eeepc_wlan_rfkill_blocked(struct eeepc_laptop *eeepc)
585 return true; 585 return true;
586} 586}
587 587
588static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc) 588static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
589{ 589{
590 struct pci_dev *port;
590 struct pci_dev *dev; 591 struct pci_dev *dev;
591 struct pci_bus *bus; 592 struct pci_bus *bus;
592 bool blocked = eeepc_wlan_rfkill_blocked(eeepc); 593 bool blocked = eeepc_wlan_rfkill_blocked(eeepc);
@@ -599,9 +600,16 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
599 mutex_lock(&eeepc->hotplug_lock); 600 mutex_lock(&eeepc->hotplug_lock);
600 601
601 if (eeepc->hotplug_slot) { 602 if (eeepc->hotplug_slot) {
602 bus = pci_find_bus(0, 1); 603 port = acpi_get_pci_dev(handle);
604 if (!port) {
605 pr_warning("Unable to find port\n");
606 goto out_unlock;
607 }
608
609 bus = port->subordinate;
610
603 if (!bus) { 611 if (!bus) {
604 pr_warning("Unable to find PCI bus 1?\n"); 612 pr_warning("Unable to find PCI bus?\n");
605 goto out_unlock; 613 goto out_unlock;
606 } 614 }
607 615
@@ -609,6 +617,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
609 pr_err("Unable to read PCI config space?\n"); 617 pr_err("Unable to read PCI config space?\n");
610 goto out_unlock; 618 goto out_unlock;
611 } 619 }
620
612 absent = (l == 0xffffffff); 621 absent = (l == 0xffffffff);
613 622
614 if (blocked != absent) { 623 if (blocked != absent) {
@@ -647,6 +656,17 @@ out_unlock:
647 mutex_unlock(&eeepc->hotplug_lock); 656 mutex_unlock(&eeepc->hotplug_lock);
648} 657}
649 658
659static void eeepc_rfkill_hotplug_update(struct eeepc_laptop *eeepc, char *node)
660{
661 acpi_status status = AE_OK;
662 acpi_handle handle;
663
664 status = acpi_get_handle(NULL, node, &handle);
665
666 if (ACPI_SUCCESS(status))
667 eeepc_rfkill_hotplug(eeepc, handle);
668}
669
650static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) 670static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
651{ 671{
652 struct eeepc_laptop *eeepc = data; 672 struct eeepc_laptop *eeepc = data;
@@ -654,7 +674,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
654 if (event != ACPI_NOTIFY_BUS_CHECK) 674 if (event != ACPI_NOTIFY_BUS_CHECK)
655 return; 675 return;
656 676
657 eeepc_rfkill_hotplug(eeepc); 677 eeepc_rfkill_hotplug(eeepc, handle);
658} 678}
659 679
660static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc, 680static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
@@ -672,6 +692,11 @@ static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
672 eeepc); 692 eeepc);
673 if (ACPI_FAILURE(status)) 693 if (ACPI_FAILURE(status))
674 pr_warning("Failed to register notify on %s\n", node); 694 pr_warning("Failed to register notify on %s\n", node);
695 /*
696 * Refresh pci hotplug in case the rfkill state was
697 * changed during setup.
698 */
699 eeepc_rfkill_hotplug(eeepc, handle);
675 } else 700 } else
676 return -ENODEV; 701 return -ENODEV;
677 702
@@ -693,6 +718,12 @@ static void eeepc_unregister_rfkill_notifier(struct eeepc_laptop *eeepc,
693 if (ACPI_FAILURE(status)) 718 if (ACPI_FAILURE(status))
694 pr_err("Error removing rfkill notify handler %s\n", 719 pr_err("Error removing rfkill notify handler %s\n",
695 node); 720 node);
721 /*
722 * Refresh pci hotplug in case the rfkill
723 * state was changed after
724 * eeepc_unregister_rfkill_notifier()
725 */
726 eeepc_rfkill_hotplug(eeepc, handle);
696 } 727 }
697} 728}
698 729
@@ -816,11 +847,7 @@ static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc)
816 rfkill_destroy(eeepc->wlan_rfkill); 847 rfkill_destroy(eeepc->wlan_rfkill);
817 eeepc->wlan_rfkill = NULL; 848 eeepc->wlan_rfkill = NULL;
818 } 849 }
819 /* 850
820 * Refresh pci hotplug in case the rfkill state was changed after
821 * eeepc_unregister_rfkill_notifier()
822 */
823 eeepc_rfkill_hotplug(eeepc);
824 if (eeepc->hotplug_slot) 851 if (eeepc->hotplug_slot)
825 pci_hp_deregister(eeepc->hotplug_slot); 852 pci_hp_deregister(eeepc->hotplug_slot);
826 853
@@ -889,11 +916,6 @@ static int eeepc_rfkill_init(struct eeepc_laptop *eeepc)
889 eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5"); 916 eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5");
890 eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6"); 917 eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6");
891 eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7"); 918 eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7");
892 /*
893 * Refresh pci hotplug in case the rfkill state was changed during
894 * setup.
895 */
896 eeepc_rfkill_hotplug(eeepc);
897 919
898exit: 920exit:
899 if (result && result != -ENODEV) 921 if (result && result != -ENODEV)
@@ -928,8 +950,11 @@ static int eeepc_hotk_restore(struct device *device)
928 struct eeepc_laptop *eeepc = dev_get_drvdata(device); 950 struct eeepc_laptop *eeepc = dev_get_drvdata(device);
929 951
930 /* Refresh both wlan rfkill state and pci hotplug */ 952 /* Refresh both wlan rfkill state and pci hotplug */
931 if (eeepc->wlan_rfkill) 953 if (eeepc->wlan_rfkill) {
932 eeepc_rfkill_hotplug(eeepc); 954 eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P5");
955 eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P6");
956 eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P7");
957 }
933 958
934 if (eeepc->bluetooth_rfkill) 959 if (eeepc->bluetooth_rfkill)
935 rfkill_set_sw_state(eeepc->bluetooth_rfkill, 960 rfkill_set_sw_state(eeepc->bluetooth_rfkill,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 8f709aec4da0..6fe8cd6e23b5 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -934,6 +934,14 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
934/* 934/*
935 * Backlight device 935 * Backlight device
936 */ 936 */
937struct sony_backlight_props {
938 struct backlight_device *dev;
939 int handle;
940 u8 offset;
941 u8 maxlvl;
942};
943struct sony_backlight_props sony_bl_props;
944
937static int sony_backlight_update_status(struct backlight_device *bd) 945static int sony_backlight_update_status(struct backlight_device *bd)
938{ 946{
939 return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT", 947 return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
@@ -954,21 +962,26 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd)
954{ 962{
955 int result; 963 int result;
956 int *handle = (int *)bl_get_data(bd); 964 int *handle = (int *)bl_get_data(bd);
965 struct sony_backlight_props *sdev =
966 (struct sony_backlight_props *)bl_get_data(bd);
957 967
958 sony_call_snc_handle(*handle, 0x0200, &result); 968 sony_call_snc_handle(sdev->handle, 0x0200, &result);
959 969
960 return result & 0xff; 970 return (result & 0xff) - sdev->offset;
961} 971}
962 972
963static int sony_nc_update_status_ng(struct backlight_device *bd) 973static int sony_nc_update_status_ng(struct backlight_device *bd)
964{ 974{
965 int value, result; 975 int value, result;
966 int *handle = (int *)bl_get_data(bd); 976 int *handle = (int *)bl_get_data(bd);
977 struct sony_backlight_props *sdev =
978 (struct sony_backlight_props *)bl_get_data(bd);
967 979
968 value = bd->props.brightness; 980 value = bd->props.brightness + sdev->offset;
969 sony_call_snc_handle(*handle, 0x0100 | (value << 16), &result); 981 if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result))
982 return -EIO;
970 983
971 return sony_nc_get_brightness_ng(bd); 984 return value;
972} 985}
973 986
974static const struct backlight_ops sony_backlight_ops = { 987static const struct backlight_ops sony_backlight_ops = {
@@ -981,8 +994,6 @@ static const struct backlight_ops sony_backlight_ng_ops = {
981 .update_status = sony_nc_update_status_ng, 994 .update_status = sony_nc_update_status_ng,
982 .get_brightness = sony_nc_get_brightness_ng, 995 .get_brightness = sony_nc_get_brightness_ng,
983}; 996};
984static int backlight_ng_handle;
985static struct backlight_device *sony_backlight_device;
986 997
987/* 998/*
988 * New SNC-only Vaios event mapping to driver known keys 999 * New SNC-only Vaios event mapping to driver known keys
@@ -1549,6 +1560,75 @@ static void sony_nc_kbd_backlight_resume(void)
1549 &ignore); 1560 &ignore);
1550} 1561}
1551 1562
1563static void sony_nc_backlight_ng_read_limits(int handle,
1564 struct sony_backlight_props *props)
1565{
1566 int offset;
1567 acpi_status status;
1568 u8 brlvl, i;
1569 u8 min = 0xff, max = 0x00;
1570 struct acpi_object_list params;
1571 union acpi_object in_obj;
1572 union acpi_object *lvl_enum;
1573 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1574
1575 props->handle = handle;
1576 props->offset = 0;
1577 props->maxlvl = 0xff;
1578
1579 offset = sony_find_snc_handle(handle);
1580 if (offset < 0)
1581 return;
1582
1583 /* try to read the boundaries from ACPI tables, if we fail the above
1584 * defaults should be reasonable
1585 */
1586 params.count = 1;
1587 params.pointer = &in_obj;
1588 in_obj.type = ACPI_TYPE_INTEGER;
1589 in_obj.integer.value = offset;
1590 status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
1591 &buffer);
1592 if (ACPI_FAILURE(status))
1593 return;
1594
1595 lvl_enum = (union acpi_object *) buffer.pointer;
1596 if (!lvl_enum) {
1597 pr_err("No SN06 return object.");
1598 return;
1599 }
1600 if (lvl_enum->type != ACPI_TYPE_BUFFER) {
1601 pr_err("Invalid SN06 return object 0x%.2x\n",
1602 lvl_enum->type);
1603 goto out_invalid;
1604 }
1605
1606 /* the buffer lists brightness levels available, brightness levels are
1607 * from 0 to 8 in the array, other values are used by ALS control.
1608 */
1609 for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) {
1610
1611 brlvl = *(lvl_enum->buffer.pointer + i);
1612 dprintk("Brightness level: %d\n", brlvl);
1613
1614 if (!brlvl)
1615 break;
1616
1617 if (brlvl > max)
1618 max = brlvl;
1619 if (brlvl < min)
1620 min = brlvl;
1621 }
1622 props->offset = min;
1623 props->maxlvl = max;
1624 dprintk("Brightness levels: min=%d max=%d\n", props->offset,
1625 props->maxlvl);
1626
1627out_invalid:
1628 kfree(buffer.pointer);
1629 return;
1630}
1631
1552static void sony_nc_backlight_setup(void) 1632static void sony_nc_backlight_setup(void)
1553{ 1633{
1554 acpi_handle unused; 1634 acpi_handle unused;
@@ -1557,14 +1637,14 @@ static void sony_nc_backlight_setup(void)
1557 struct backlight_properties props; 1637 struct backlight_properties props;
1558 1638
1559 if (sony_find_snc_handle(0x12f) != -1) { 1639 if (sony_find_snc_handle(0x12f) != -1) {
1560 backlight_ng_handle = 0x12f;
1561 ops = &sony_backlight_ng_ops; 1640 ops = &sony_backlight_ng_ops;
1562 max_brightness = 0xff; 1641 sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props);
1642 max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
1563 1643
1564 } else if (sony_find_snc_handle(0x137) != -1) { 1644 } else if (sony_find_snc_handle(0x137) != -1) {
1565 backlight_ng_handle = 0x137;
1566 ops = &sony_backlight_ng_ops; 1645 ops = &sony_backlight_ng_ops;
1567 max_brightness = 0xff; 1646 sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props);
1647 max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
1568 1648
1569 } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", 1649 } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
1570 &unused))) { 1650 &unused))) {
@@ -1577,22 +1657,22 @@ static void sony_nc_backlight_setup(void)
1577 memset(&props, 0, sizeof(struct backlight_properties)); 1657 memset(&props, 0, sizeof(struct backlight_properties));
1578 props.type = BACKLIGHT_PLATFORM; 1658 props.type = BACKLIGHT_PLATFORM;
1579 props.max_brightness = max_brightness; 1659 props.max_brightness = max_brightness;
1580 sony_backlight_device = backlight_device_register("sony", NULL, 1660 sony_bl_props.dev = backlight_device_register("sony", NULL,
1581 &backlight_ng_handle, 1661 &sony_bl_props,
1582 ops, &props); 1662 ops, &props);
1583 1663
1584 if (IS_ERR(sony_backlight_device)) { 1664 if (IS_ERR(sony_bl_props.dev)) {
1585 pr_warning(DRV_PFX "unable to register backlight device\n"); 1665 pr_warn(DRV_PFX "unable to register backlight device\n");
1586 sony_backlight_device = NULL; 1666 sony_bl_props.dev = NULL;
1587 } else 1667 } else
1588 sony_backlight_device->props.brightness = 1668 sony_bl_props.dev->props.brightness =
1589 ops->get_brightness(sony_backlight_device); 1669 ops->get_brightness(sony_bl_props.dev);
1590} 1670}
1591 1671
1592static void sony_nc_backlight_cleanup(void) 1672static void sony_nc_backlight_cleanup(void)
1593{ 1673{
1594 if (sony_backlight_device) 1674 if (sony_bl_props.dev)
1595 backlight_device_unregister(sony_backlight_device); 1675 backlight_device_unregister(sony_bl_props.dev);
1596} 1676}
1597 1677
1598static int sony_nc_add(struct acpi_device *device) 1678static int sony_nc_add(struct acpi_device *device)
@@ -2590,7 +2670,7 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
2590 mutex_lock(&spic_dev.lock); 2670 mutex_lock(&spic_dev.lock);
2591 switch (cmd) { 2671 switch (cmd) {
2592 case SONYPI_IOCGBRT: 2672 case SONYPI_IOCGBRT:
2593 if (sony_backlight_device == NULL) { 2673 if (sony_bl_props.dev == NULL) {
2594 ret = -EIO; 2674 ret = -EIO;
2595 break; 2675 break;
2596 } 2676 }
@@ -2603,7 +2683,7 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
2603 ret = -EFAULT; 2683 ret = -EFAULT;
2604 break; 2684 break;
2605 case SONYPI_IOCSBRT: 2685 case SONYPI_IOCSBRT:
2606 if (sony_backlight_device == NULL) { 2686 if (sony_bl_props.dev == NULL) {
2607 ret = -EIO; 2687 ret = -EIO;
2608 break; 2688 break;
2609 } 2689 }
@@ -2617,8 +2697,8 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
2617 break; 2697 break;
2618 } 2698 }
2619 /* sync the backlight device status */ 2699 /* sync the backlight device status */
2620 sony_backlight_device->props.brightness = 2700 sony_bl_props.dev->props.brightness =
2621 sony_backlight_get_brightness(sony_backlight_device); 2701 sony_backlight_get_brightness(sony_bl_props.dev);
2622 break; 2702 break;
2623 case SONYPI_IOCGBAT1CAP: 2703 case SONYPI_IOCGBAT1CAP:
2624 if (ec_read16(SONYPI_BAT1_FULL, &val16)) { 2704 if (ec_read16(SONYPI_BAT1_FULL, &val16)) {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index efb3b6b9bcdb..562fcf0dd2b5 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -128,7 +128,8 @@ enum {
128}; 128};
129 129
130/* ACPI HIDs */ 130/* ACPI HIDs */
131#define TPACPI_ACPI_HKEY_HID "IBM0068" 131#define TPACPI_ACPI_IBM_HKEY_HID "IBM0068"
132#define TPACPI_ACPI_LENOVO_HKEY_HID "LEN0068"
132#define TPACPI_ACPI_EC_HID "PNP0C09" 133#define TPACPI_ACPI_EC_HID "PNP0C09"
133 134
134/* Input IDs */ 135/* Input IDs */
@@ -3879,7 +3880,8 @@ errexit:
3879} 3880}
3880 3881
3881static const struct acpi_device_id ibm_htk_device_ids[] = { 3882static const struct acpi_device_id ibm_htk_device_ids[] = {
3882 {TPACPI_ACPI_HKEY_HID, 0}, 3883 {TPACPI_ACPI_IBM_HKEY_HID, 0},
3884 {TPACPI_ACPI_LENOVO_HKEY_HID, 0},
3883 {"", 0}, 3885 {"", 0},
3884}; 3886};
3885 3887
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
index ac2701b22e71..043ee3136e40 100644
--- a/drivers/rapidio/switches/idt_gen2.c
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -95,6 +95,9 @@ idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
95 else 95 else
96 table++; 96 table++;
97 97
98 if (route_port == RIO_INVALID_ROUTE)
99 route_port = IDT_DEFAULT_ROUTE;
100
98 rio_mport_write_config_32(mport, destid, hopcount, 101 rio_mport_write_config_32(mport, destid, hopcount,
99 LOCAL_RTE_CONF_DESTID_SEL, table); 102 LOCAL_RTE_CONF_DESTID_SEL, table);
100 103
@@ -411,6 +414,12 @@ static int idtg2_switch_init(struct rio_dev *rdev, int do_enum)
411 rdev->rswitch->em_handle = idtg2_em_handler; 414 rdev->rswitch->em_handle = idtg2_em_handler;
412 rdev->rswitch->sw_sysfs = idtg2_sysfs; 415 rdev->rswitch->sw_sysfs = idtg2_sysfs;
413 416
417 if (do_enum) {
418 /* Ensure that default routing is disabled on startup */
419 rio_write_config_32(rdev,
420 RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE);
421 }
422
414 return 0; 423 return 0;
415} 424}
416 425
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
index 3a971077e7bf..d06ee2d44b44 100644
--- a/drivers/rapidio/switches/idtcps.c
+++ b/drivers/rapidio/switches/idtcps.c
@@ -26,6 +26,9 @@ idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
26{ 26{
27 u32 result; 27 u32 result;
28 28
29 if (route_port == RIO_INVALID_ROUTE)
30 route_port = CPS_DEFAULT_ROUTE;
31
29 if (table == RIO_GLOBAL_TABLE) { 32 if (table == RIO_GLOBAL_TABLE) {
30 rio_mport_write_config_32(mport, destid, hopcount, 33 rio_mport_write_config_32(mport, destid, hopcount,
31 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); 34 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
@@ -130,6 +133,9 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
130 /* set TVAL = ~50us */ 133 /* set TVAL = ~50us */
131 rio_write_config_32(rdev, 134 rio_write_config_32(rdev,
132 rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); 135 rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
136 /* Ensure that default routing is disabled on startup */
137 rio_write_config_32(rdev,
138 RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE);
133 } 139 }
134 140
135 return 0; 141 return 0;
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
index 1a62934bfebc..db8b8028988d 100644
--- a/drivers/rapidio/switches/tsi57x.c
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -303,6 +303,12 @@ static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum)
303 rdev->rswitch->em_init = tsi57x_em_init; 303 rdev->rswitch->em_init = tsi57x_em_init;
304 rdev->rswitch->em_handle = tsi57x_em_handler; 304 rdev->rswitch->em_handle = tsi57x_em_handler;
305 305
306 if (do_enum) {
307 /* Ensure that default routing is disabled on startup */
308 rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT,
309 RIO_INVALID_ROUTE);
310 }
311
306 return 0; 312 return 0;
307} 313}
308 314
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index 8d46838dff8a..755e1fe914af 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -524,6 +524,8 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
524 goto fail2; 524 goto fail2;
525 } 525 }
526 526
527 platform_set_drvdata(pdev, davinci_rtc);
528
527 davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, 529 davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
528 &davinci_rtc_ops, THIS_MODULE); 530 &davinci_rtc_ops, THIS_MODULE);
529 if (IS_ERR(davinci_rtc->rtc)) { 531 if (IS_ERR(davinci_rtc->rtc)) {
@@ -553,8 +555,6 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
553 555
554 rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL); 556 rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL);
555 557
556 platform_set_drvdata(pdev, davinci_rtc);
557
558 device_init_wakeup(&pdev->dev, 0); 558 device_init_wakeup(&pdev->dev, 0);
559 559
560 return 0; 560 return 0;
@@ -562,6 +562,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
562fail4: 562fail4:
563 rtc_device_unregister(davinci_rtc->rtc); 563 rtc_device_unregister(davinci_rtc->rtc);
564fail3: 564fail3:
565 platform_set_drvdata(pdev, NULL);
565 iounmap(davinci_rtc->base); 566 iounmap(davinci_rtc->base);
566fail2: 567fail2:
567 release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size); 568 release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index 60ce69600828..47e681df31e2 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -355,6 +355,7 @@ static int __devinit ds1286_probe(struct platform_device *pdev)
355 goto out; 355 goto out;
356 } 356 }
357 spin_lock_init(&priv->lock); 357 spin_lock_init(&priv->lock);
358 platform_set_drvdata(pdev, priv);
358 rtc = rtc_device_register("ds1286", &pdev->dev, 359 rtc = rtc_device_register("ds1286", &pdev->dev,
359 &ds1286_ops, THIS_MODULE); 360 &ds1286_ops, THIS_MODULE);
360 if (IS_ERR(rtc)) { 361 if (IS_ERR(rtc)) {
@@ -362,7 +363,6 @@ static int __devinit ds1286_probe(struct platform_device *pdev)
362 goto out; 363 goto out;
363 } 364 }
364 priv->rtc = rtc; 365 priv->rtc = rtc;
365 platform_set_drvdata(pdev, priv);
366 return 0; 366 return 0;
367 367
368out: 368out:
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 11ae64dcbf3c..335551d333b2 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -151,6 +151,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
151 return -ENXIO; 151 return -ENXIO;
152 152
153 pdev->dev.platform_data = ep93xx_rtc; 153 pdev->dev.platform_data = ep93xx_rtc;
154 platform_set_drvdata(pdev, rtc);
154 155
155 rtc = rtc_device_register(pdev->name, 156 rtc = rtc_device_register(pdev->name,
156 &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); 157 &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
@@ -159,8 +160,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
159 goto exit; 160 goto exit;
160 } 161 }
161 162
162 platform_set_drvdata(pdev, rtc);
163
164 err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); 163 err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
165 if (err) 164 if (err)
166 goto fail; 165 goto fail;
@@ -168,9 +167,9 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
168 return 0; 167 return 0;
169 168
170fail: 169fail:
171 platform_set_drvdata(pdev, NULL);
172 rtc_device_unregister(rtc); 170 rtc_device_unregister(rtc);
173exit: 171exit:
172 platform_set_drvdata(pdev, NULL);
174 pdev->dev.platform_data = NULL; 173 pdev->dev.platform_data = NULL;
175 return err; 174 return err;
176} 175}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 69fe664a2228..eda128fc1d38 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -783,6 +783,9 @@ static int m41t80_probe(struct i2c_client *client,
783 goto exit; 783 goto exit;
784 } 784 }
785 785
786 clientdata->features = id->driver_data;
787 i2c_set_clientdata(client, clientdata);
788
786 rtc = rtc_device_register(client->name, &client->dev, 789 rtc = rtc_device_register(client->name, &client->dev,
787 &m41t80_rtc_ops, THIS_MODULE); 790 &m41t80_rtc_ops, THIS_MODULE);
788 if (IS_ERR(rtc)) { 791 if (IS_ERR(rtc)) {
@@ -792,8 +795,6 @@ static int m41t80_probe(struct i2c_client *client,
792 } 795 }
793 796
794 clientdata->rtc = rtc; 797 clientdata->rtc = rtc;
795 clientdata->features = id->driver_data;
796 i2c_set_clientdata(client, clientdata);
797 798
798 /* Make sure HT (Halt Update) bit is cleared */ 799 /* Make sure HT (Halt Update) bit is cleared */
799 rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR); 800 rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c
index 20494b5edc3c..3bc046f427e0 100644
--- a/drivers/rtc/rtc-max8925.c
+++ b/drivers/rtc/rtc-max8925.c
@@ -258,6 +258,8 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev)
258 } 258 }
259 259
260 dev_set_drvdata(&pdev->dev, info); 260 dev_set_drvdata(&pdev->dev, info);
261 /* XXX - isn't this redundant? */
262 platform_set_drvdata(pdev, info);
261 263
262 info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev, 264 info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev,
263 &max8925_rtc_ops, THIS_MODULE); 265 &max8925_rtc_ops, THIS_MODULE);
@@ -267,10 +269,9 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev)
267 goto out_rtc; 269 goto out_rtc;
268 } 270 }
269 271
270 platform_set_drvdata(pdev, info);
271
272 return 0; 272 return 0;
273out_rtc: 273out_rtc:
274 platform_set_drvdata(pdev, NULL);
274 free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info); 275 free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info);
275out_irq: 276out_irq:
276 kfree(info); 277 kfree(info);
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c
index 3f7bc6b9fefa..2e48aa604273 100644
--- a/drivers/rtc/rtc-max8998.c
+++ b/drivers/rtc/rtc-max8998.c
@@ -265,6 +265,8 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev)
265 info->rtc = max8998->rtc; 265 info->rtc = max8998->rtc;
266 info->irq = max8998->irq_base + MAX8998_IRQ_ALARM0; 266 info->irq = max8998->irq_base + MAX8998_IRQ_ALARM0;
267 267
268 platform_set_drvdata(pdev, info);
269
268 info->rtc_dev = rtc_device_register("max8998-rtc", &pdev->dev, 270 info->rtc_dev = rtc_device_register("max8998-rtc", &pdev->dev,
269 &max8998_rtc_ops, THIS_MODULE); 271 &max8998_rtc_ops, THIS_MODULE);
270 272
@@ -274,8 +276,6 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev)
274 goto out_rtc; 276 goto out_rtc;
275 } 277 }
276 278
277 platform_set_drvdata(pdev, info);
278
279 ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0, 279 ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0,
280 "rtc-alarm0", info); 280 "rtc-alarm0", info);
281 281
@@ -293,6 +293,7 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev)
293 return 0; 293 return 0;
294 294
295out_rtc: 295out_rtc:
296 platform_set_drvdata(pdev, NULL);
296 kfree(info); 297 kfree(info);
297 return ret; 298 return ret;
298} 299}
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index c5ac03793e79..a1a278bc340d 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -349,11 +349,15 @@ static int __devinit mc13xxx_rtc_probe(struct platform_device *pdev)
349 if (ret) 349 if (ret)
350 goto err_alarm_irq_request; 350 goto err_alarm_irq_request;
351 351
352 mc13xxx_unlock(mc13xxx);
353
352 priv->rtc = rtc_device_register(pdev->name, 354 priv->rtc = rtc_device_register(pdev->name,
353 &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE); 355 &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE);
354 if (IS_ERR(priv->rtc)) { 356 if (IS_ERR(priv->rtc)) {
355 ret = PTR_ERR(priv->rtc); 357 ret = PTR_ERR(priv->rtc);
356 358
359 mc13xxx_lock(mc13xxx);
360
357 mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv); 361 mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv);
358err_alarm_irq_request: 362err_alarm_irq_request:
359 363
@@ -365,12 +369,12 @@ err_reset_irq_status:
365 mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv); 369 mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv);
366err_reset_irq_request: 370err_reset_irq_request:
367 371
372 mc13xxx_unlock(mc13xxx);
373
368 platform_set_drvdata(pdev, NULL); 374 platform_set_drvdata(pdev, NULL);
369 kfree(priv); 375 kfree(priv);
370 } 376 }
371 377
372 mc13xxx_unlock(mc13xxx);
373
374 return ret; 378 return ret;
375} 379}
376 380
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
index 67820626e18f..fcb113c11122 100644
--- a/drivers/rtc/rtc-msm6242.c
+++ b/drivers/rtc/rtc-msm6242.c
@@ -214,6 +214,7 @@ static int __init msm6242_rtc_probe(struct platform_device *dev)
214 error = -ENOMEM; 214 error = -ENOMEM;
215 goto out_free_priv; 215 goto out_free_priv;
216 } 216 }
217 platform_set_drvdata(dev, priv);
217 218
218 rtc = rtc_device_register("rtc-msm6242", &dev->dev, &msm6242_rtc_ops, 219 rtc = rtc_device_register("rtc-msm6242", &dev->dev, &msm6242_rtc_ops,
219 THIS_MODULE); 220 THIS_MODULE);
@@ -223,10 +224,10 @@ static int __init msm6242_rtc_probe(struct platform_device *dev)
223 } 224 }
224 225
225 priv->rtc = rtc; 226 priv->rtc = rtc;
226 platform_set_drvdata(dev, priv);
227 return 0; 227 return 0;
228 228
229out_unmap: 229out_unmap:
230 platform_set_drvdata(dev, NULL);
230 iounmap(priv->regs); 231 iounmap(priv->regs);
231out_free_priv: 232out_free_priv:
232 kfree(priv); 233 kfree(priv);
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 826ab64a8fa9..d814417bee8c 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -418,14 +418,6 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
418 goto exit_put_clk; 418 goto exit_put_clk;
419 } 419 }
420 420
421 rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops,
422 THIS_MODULE);
423 if (IS_ERR(rtc)) {
424 ret = PTR_ERR(rtc);
425 goto exit_put_clk;
426 }
427
428 pdata->rtc = rtc;
429 platform_set_drvdata(pdev, pdata); 421 platform_set_drvdata(pdev, pdata);
430 422
431 /* Configure and enable the RTC */ 423 /* Configure and enable the RTC */
@@ -438,8 +430,19 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
438 pdata->irq = -1; 430 pdata->irq = -1;
439 } 431 }
440 432
433 rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops,
434 THIS_MODULE);
435 if (IS_ERR(rtc)) {
436 ret = PTR_ERR(rtc);
437 goto exit_clr_drvdata;
438 }
439
440 pdata->rtc = rtc;
441
441 return 0; 442 return 0;
442 443
444exit_clr_drvdata:
445 platform_set_drvdata(pdev, NULL);
443exit_put_clk: 446exit_put_clk:
444 clk_disable(pdata->clk); 447 clk_disable(pdata->clk);
445 clk_put(pdata->clk); 448 clk_put(pdata->clk);
diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c
index a633abc42896..cd4f198cc2ef 100644
--- a/drivers/rtc/rtc-pcap.c
+++ b/drivers/rtc/rtc-pcap.c
@@ -151,6 +151,8 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev)
151 151
152 pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent); 152 pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent);
153 153
154 platform_set_drvdata(pdev, pcap_rtc);
155
154 pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev, 156 pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev,
155 &pcap_rtc_ops, THIS_MODULE); 157 &pcap_rtc_ops, THIS_MODULE);
156 if (IS_ERR(pcap_rtc->rtc)) { 158 if (IS_ERR(pcap_rtc->rtc)) {
@@ -158,7 +160,6 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev)
158 goto fail_rtc; 160 goto fail_rtc;
159 } 161 }
160 162
161 platform_set_drvdata(pdev, pcap_rtc);
162 163
163 timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ); 164 timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ);
164 alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA); 165 alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA);
@@ -177,6 +178,7 @@ fail_alarm:
177fail_timer: 178fail_timer:
178 rtc_device_unregister(pcap_rtc->rtc); 179 rtc_device_unregister(pcap_rtc->rtc);
179fail_rtc: 180fail_rtc:
181 platform_set_drvdata(pdev, NULL);
180 kfree(pcap_rtc); 182 kfree(pcap_rtc);
181 return err; 183 return err;
182} 184}
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
index 694da39b6dd2..359da6d020b9 100644
--- a/drivers/rtc/rtc-rp5c01.c
+++ b/drivers/rtc/rtc-rp5c01.c
@@ -249,15 +249,15 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev)
249 249
250 spin_lock_init(&priv->lock); 250 spin_lock_init(&priv->lock);
251 251
252 platform_set_drvdata(dev, priv);
253
252 rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops, 254 rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops,
253 THIS_MODULE); 255 THIS_MODULE);
254 if (IS_ERR(rtc)) { 256 if (IS_ERR(rtc)) {
255 error = PTR_ERR(rtc); 257 error = PTR_ERR(rtc);
256 goto out_unmap; 258 goto out_unmap;
257 } 259 }
258
259 priv->rtc = rtc; 260 priv->rtc = rtc;
260 platform_set_drvdata(dev, priv);
261 261
262 error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr); 262 error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr);
263 if (error) 263 if (error)
@@ -268,6 +268,7 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev)
268out_unregister: 268out_unregister:
269 rtc_device_unregister(rtc); 269 rtc_device_unregister(rtc);
270out_unmap: 270out_unmap:
271 platform_set_drvdata(dev, NULL);
271 iounmap(priv->regs); 272 iounmap(priv->regs);
272out_free_priv: 273out_free_priv:
273 kfree(priv); 274 kfree(priv);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index b3466c491cd3..16512ecae31a 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -46,6 +46,7 @@ static struct clk *rtc_clk;
46static void __iomem *s3c_rtc_base; 46static void __iomem *s3c_rtc_base;
47static int s3c_rtc_alarmno = NO_IRQ; 47static int s3c_rtc_alarmno = NO_IRQ;
48static int s3c_rtc_tickno = NO_IRQ; 48static int s3c_rtc_tickno = NO_IRQ;
49static bool wake_en;
49static enum s3c_cpu_type s3c_rtc_cpu_type; 50static enum s3c_cpu_type s3c_rtc_cpu_type;
50 51
51static DEFINE_SPINLOCK(s3c_rtc_pie_lock); 52static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
@@ -562,8 +563,12 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
562 } 563 }
563 s3c_rtc_enable(pdev, 0); 564 s3c_rtc_enable(pdev, 0);
564 565
565 if (device_may_wakeup(&pdev->dev)) 566 if (device_may_wakeup(&pdev->dev) && !wake_en) {
566 enable_irq_wake(s3c_rtc_alarmno); 567 if (enable_irq_wake(s3c_rtc_alarmno) == 0)
568 wake_en = true;
569 else
570 dev_err(&pdev->dev, "enable_irq_wake failed\n");
571 }
567 572
568 return 0; 573 return 0;
569} 574}
@@ -579,8 +584,10 @@ static int s3c_rtc_resume(struct platform_device *pdev)
579 writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); 584 writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
580 } 585 }
581 586
582 if (device_may_wakeup(&pdev->dev)) 587 if (device_may_wakeup(&pdev->dev) && wake_en) {
583 disable_irq_wake(s3c_rtc_alarmno); 588 disable_irq_wake(s3c_rtc_alarmno);
589 wake_en = false;
590 }
584 591
585 return 0; 592 return 0;
586} 593}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 475e603fc584..86b6f1cc1b10 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1742,11 +1742,20 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1742static inline int _dasd_term_running_cqr(struct dasd_device *device) 1742static inline int _dasd_term_running_cqr(struct dasd_device *device)
1743{ 1743{
1744 struct dasd_ccw_req *cqr; 1744 struct dasd_ccw_req *cqr;
1745 int rc;
1745 1746
1746 if (list_empty(&device->ccw_queue)) 1747 if (list_empty(&device->ccw_queue))
1747 return 0; 1748 return 0;
1748 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1749 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1749 return device->discipline->term_IO(cqr); 1750 rc = device->discipline->term_IO(cqr);
1751 if (!rc)
1752 /*
1753 * CQR terminated because a more important request is pending.
1754 * Undo decreasing of retry counter because this is
1755 * not an error case.
1756 */
1757 cqr->retries++;
1758 return rc;
1750} 1759}
1751 1760
1752int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 1761int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 4b60ede07f0e..be55fb2b1b1c 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -518,6 +518,8 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
518 return; 518 return;
519 new_incr->rn = rn; 519 new_incr->rn = rn;
520 new_incr->standby = standby; 520 new_incr->standby = standby;
521 if (!standby)
522 new_incr->usecount = 1;
521 last_rn = 0; 523 last_rn = 0;
522 prev = &sclp_mem_list; 524 prev = &sclp_mem_list;
523 list_for_each_entry(incr, &sclp_mem_list, list) { 525 list_for_each_entry(incr, &sclp_mem_list, list) {
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 83cea9a55e2f..1b3924c2fffd 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -236,7 +236,6 @@ tapeblock_setup_device(struct tape_device * device)
236 disk->major = tapeblock_major; 236 disk->major = tapeblock_major;
237 disk->first_minor = device->first_minor; 237 disk->first_minor = device->first_minor;
238 disk->fops = &tapeblock_fops; 238 disk->fops = &tapeblock_fops;
239 disk->events = DISK_EVENT_MEDIA_CHANGE;
240 disk->private_data = tape_get_device(device); 239 disk->private_data = tape_get_device(device);
241 disk->queue = blkdat->request_queue; 240 disk->queue = blkdat->request_queue;
242 set_capacity(disk, 0); 241 set_capacity(disk, 0);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index e2d45c91b8e8..9689d41c7888 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1292,8 +1292,10 @@ static struct scsi_host_template qpti_template = {
1292 .use_clustering = ENABLE_CLUSTERING, 1292 .use_clustering = ENABLE_CLUSTERING,
1293}; 1293};
1294 1294
1295static const struct of_device_id qpti_match[];
1295static int __devinit qpti_sbus_probe(struct platform_device *op) 1296static int __devinit qpti_sbus_probe(struct platform_device *op)
1296{ 1297{
1298 const struct of_device_id *match;
1297 struct scsi_host_template *tpnt; 1299 struct scsi_host_template *tpnt;
1298 struct device_node *dp = op->dev.of_node; 1300 struct device_node *dp = op->dev.of_node;
1299 struct Scsi_Host *host; 1301 struct Scsi_Host *host;
@@ -1301,9 +1303,10 @@ static int __devinit qpti_sbus_probe(struct platform_device *op)
1301 static int nqptis; 1303 static int nqptis;
1302 const char *fcode; 1304 const char *fcode;
1303 1305
1304 if (!op->dev.of_match) 1306 match = of_match_device(qpti_match, &op->dev);
1307 if (!match)
1305 return -EINVAL; 1308 return -EINVAL;
1306 tpnt = op->dev.of_match->data; 1309 tpnt = match->data;
1307 1310
1308 /* Sometimes Antares cards come up not completely 1311 /* Sometimes Antares cards come up not completely
1309 * setup, and we get a report of a zero IRQ. 1312 * setup, and we get a report of a zero IRQ.
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e9901b8f8443..ec1803a48723 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -74,8 +74,6 @@ struct kmem_cache *scsi_sdb_cache;
74 */ 74 */
75#define SCSI_QUEUE_DELAY 3 75#define SCSI_QUEUE_DELAY 3
76 76
77static void scsi_run_queue(struct request_queue *q);
78
79/* 77/*
80 * Function: scsi_unprep_request() 78 * Function: scsi_unprep_request()
81 * 79 *
@@ -161,7 +159,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
161 blk_requeue_request(q, cmd->request); 159 blk_requeue_request(q, cmd->request);
162 spin_unlock_irqrestore(q->queue_lock, flags); 160 spin_unlock_irqrestore(q->queue_lock, flags);
163 161
164 scsi_run_queue(q); 162 kblockd_schedule_work(q, &device->requeue_work);
165 163
166 return 0; 164 return 0;
167} 165}
@@ -400,10 +398,15 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost)
400static void scsi_run_queue(struct request_queue *q) 398static void scsi_run_queue(struct request_queue *q)
401{ 399{
402 struct scsi_device *sdev = q->queuedata; 400 struct scsi_device *sdev = q->queuedata;
403 struct Scsi_Host *shost = sdev->host; 401 struct Scsi_Host *shost;
404 LIST_HEAD(starved_list); 402 LIST_HEAD(starved_list);
405 unsigned long flags; 403 unsigned long flags;
406 404
405 /* if the device is dead, sdev will be NULL, so no queue to run */
406 if (!sdev)
407 return;
408
409 shost = sdev->host;
407 if (scsi_target(sdev)->single_lun) 410 if (scsi_target(sdev)->single_lun)
408 scsi_single_lun_run(sdev); 411 scsi_single_lun_run(sdev);
409 412
@@ -433,7 +436,11 @@ static void scsi_run_queue(struct request_queue *q)
433 continue; 436 continue;
434 } 437 }
435 438
436 blk_run_queue_async(sdev->request_queue); 439 spin_unlock(shost->host_lock);
440 spin_lock(sdev->request_queue->queue_lock);
441 __blk_run_queue(sdev->request_queue);
442 spin_unlock(sdev->request_queue->queue_lock);
443 spin_lock(shost->host_lock);
437 } 444 }
438 /* put any unprocessed entries back */ 445 /* put any unprocessed entries back */
439 list_splice(&starved_list, &shost->starved_list); 446 list_splice(&starved_list, &shost->starved_list);
@@ -442,6 +449,16 @@ static void scsi_run_queue(struct request_queue *q)
442 blk_run_queue(q); 449 blk_run_queue(q);
443} 450}
444 451
452void scsi_requeue_run_queue(struct work_struct *work)
453{
454 struct scsi_device *sdev;
455 struct request_queue *q;
456
457 sdev = container_of(work, struct scsi_device, requeue_work);
458 q = sdev->request_queue;
459 scsi_run_queue(q);
460}
461
445/* 462/*
446 * Function: scsi_requeue_command() 463 * Function: scsi_requeue_command()
447 * 464 *
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 087821fac8fe..58584dc0724a 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -242,6 +242,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
242 int display_failure_msg = 1, ret; 242 int display_failure_msg = 1, ret;
243 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 243 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
244 extern void scsi_evt_thread(struct work_struct *work); 244 extern void scsi_evt_thread(struct work_struct *work);
245 extern void scsi_requeue_run_queue(struct work_struct *work);
245 246
246 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, 247 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
247 GFP_ATOMIC); 248 GFP_ATOMIC);
@@ -264,6 +265,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
264 INIT_LIST_HEAD(&sdev->event_list); 265 INIT_LIST_HEAD(&sdev->event_list);
265 spin_lock_init(&sdev->list_lock); 266 spin_lock_init(&sdev->list_lock);
266 INIT_WORK(&sdev->event_work, scsi_evt_thread); 267 INIT_WORK(&sdev->event_work, scsi_evt_thread);
268 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
267 269
268 sdev->sdev_gendev.parent = get_device(&starget->dev); 270 sdev->sdev_gendev.parent = get_device(&starget->dev);
269 sdev->sdev_target = starget; 271 sdev->sdev_target = starget;
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 0e8eec516df4..c911b2419abb 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -80,14 +80,17 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev,
80/* 80/*
81 * Try to register a serial port 81 * Try to register a serial port
82 */ 82 */
83static struct of_device_id of_platform_serial_table[];
83static int __devinit of_platform_serial_probe(struct platform_device *ofdev) 84static int __devinit of_platform_serial_probe(struct platform_device *ofdev)
84{ 85{
86 const struct of_device_id *match;
85 struct of_serial_info *info; 87 struct of_serial_info *info;
86 struct uart_port port; 88 struct uart_port port;
87 int port_type; 89 int port_type;
88 int ret; 90 int ret;
89 91
90 if (!ofdev->dev.of_match) 92 match = of_match_device(of_platform_serial_table, &ofdev->dev);
93 if (!match)
91 return -EINVAL; 94 return -EINVAL;
92 95
93 if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) 96 if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL))
@@ -97,7 +100,7 @@ static int __devinit of_platform_serial_probe(struct platform_device *ofdev)
97 if (info == NULL) 100 if (info == NULL)
98 return -ENOMEM; 101 return -ENOMEM;
99 102
100 port_type = (unsigned long)ofdev->dev.of_match->data; 103 port_type = (unsigned long)match->data;
101 ret = of_platform_serial_setup(ofdev, port_type, &port); 104 ret = of_platform_serial_setup(ofdev, port_type, &port);
102 if (ret) 105 if (ret)
103 goto out; 106 goto out;
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 36613b37c504..3a68e09309f7 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2539,15 +2539,18 @@ static void qe_udc_release(struct device *dev)
2539} 2539}
2540 2540
2541/* Driver probe functions */ 2541/* Driver probe functions */
2542static const struct of_device_id qe_udc_match[];
2542static int __devinit qe_udc_probe(struct platform_device *ofdev) 2543static int __devinit qe_udc_probe(struct platform_device *ofdev)
2543{ 2544{
2545 const struct of_device_id *match;
2544 struct device_node *np = ofdev->dev.of_node; 2546 struct device_node *np = ofdev->dev.of_node;
2545 struct qe_ep *ep; 2547 struct qe_ep *ep;
2546 unsigned int ret = 0; 2548 unsigned int ret = 0;
2547 unsigned int i; 2549 unsigned int i;
2548 const void *prop; 2550 const void *prop;
2549 2551
2550 if (!ofdev->dev.of_match) 2552 match = of_match_device(qe_udc_match, &ofdev->dev);
2553 if (!match)
2551 return -EINVAL; 2554 return -EINVAL;
2552 2555
2553 prop = of_get_property(np, "mode", NULL); 2556 prop = of_get_property(np, "mode", NULL);
@@ -2561,7 +2564,7 @@ static int __devinit qe_udc_probe(struct platform_device *ofdev)
2561 return -ENOMEM; 2564 return -ENOMEM;
2562 } 2565 }
2563 2566
2564 udc_controller->soc_type = (unsigned long)ofdev->dev.of_match->data; 2567 udc_controller->soc_type = (unsigned long)match->data;
2565 udc_controller->usb_regs = of_iomap(np, 0); 2568 udc_controller->usb_regs = of_iomap(np, 0);
2566 if (!udc_controller->usb_regs) { 2569 if (!udc_controller->usb_regs) {
2567 ret = -ENOMEM; 2570 ret = -ENOMEM;
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index 82acb8dc4aa1..6183a57eb69d 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -66,7 +66,7 @@
66 * have. Allow 1% either way on the nominal for TVs. 66 * have. Allow 1% either way on the nominal for TVs.
67 */ 67 */
68#define NR_MONTYPES 6 68#define NR_MONTYPES 6
69static struct fb_monspecs monspecs[NR_MONTYPES] __initdata = { 69static struct fb_monspecs monspecs[NR_MONTYPES] __devinitdata = {
70 { /* TV */ 70 { /* TV */
71 .hfmin = 15469, 71 .hfmin = 15469,
72 .hfmax = 15781, 72 .hfmax = 15781,
@@ -873,7 +873,7 @@ static struct fb_ops acornfb_ops = {
873/* 873/*
874 * Everything after here is initialisation!!! 874 * Everything after here is initialisation!!!
875 */ 875 */
876static struct fb_videomode modedb[] __initdata = { 876static struct fb_videomode modedb[] __devinitdata = {
877 { /* 320x256 @ 50Hz */ 877 { /* 320x256 @ 50Hz */
878 NULL, 50, 320, 256, 125000, 92, 62, 35, 19, 38, 2, 878 NULL, 50, 320, 256, 125000, 92, 62, 35, 19, 38, 2,
879 FB_SYNC_COMP_HIGH_ACT, 879 FB_SYNC_COMP_HIGH_ACT,
@@ -925,8 +925,7 @@ static struct fb_videomode modedb[] __initdata = {
925 } 925 }
926}; 926};
927 927
928static struct fb_videomode __initdata 928static struct fb_videomode acornfb_default_mode __devinitdata = {
929acornfb_default_mode = {
930 .name = NULL, 929 .name = NULL,
931 .refresh = 60, 930 .refresh = 60,
932 .xres = 640, 931 .xres = 640,
@@ -942,7 +941,7 @@ acornfb_default_mode = {
942 .vmode = FB_VMODE_NONINTERLACED 941 .vmode = FB_VMODE_NONINTERLACED
943}; 942};
944 943
945static void __init acornfb_init_fbinfo(void) 944static void __devinit acornfb_init_fbinfo(void)
946{ 945{
947 static int first = 1; 946 static int first = 1;
948 947
@@ -1018,8 +1017,7 @@ static void __init acornfb_init_fbinfo(void)
1018 * size can optionally be followed by 'M' or 'K' for 1017 * size can optionally be followed by 'M' or 'K' for
1019 * MB or KB respectively. 1018 * MB or KB respectively.
1020 */ 1019 */
1021static void __init 1020static void __devinit acornfb_parse_mon(char *opt)
1022acornfb_parse_mon(char *opt)
1023{ 1021{
1024 char *p = opt; 1022 char *p = opt;
1025 1023
@@ -1066,8 +1064,7 @@ bad:
1066 current_par.montype = -1; 1064 current_par.montype = -1;
1067} 1065}
1068 1066
1069static void __init 1067static void __devinit acornfb_parse_montype(char *opt)
1070acornfb_parse_montype(char *opt)
1071{ 1068{
1072 current_par.montype = -2; 1069 current_par.montype = -2;
1073 1070
@@ -1108,8 +1105,7 @@ acornfb_parse_montype(char *opt)
1108 } 1105 }
1109} 1106}
1110 1107
1111static void __init 1108static void __devinit acornfb_parse_dram(char *opt)
1112acornfb_parse_dram(char *opt)
1113{ 1109{
1114 unsigned int size; 1110 unsigned int size;
1115 1111
@@ -1134,15 +1130,14 @@ acornfb_parse_dram(char *opt)
1134static struct options { 1130static struct options {
1135 char *name; 1131 char *name;
1136 void (*parse)(char *opt); 1132 void (*parse)(char *opt);
1137} opt_table[] __initdata = { 1133} opt_table[] __devinitdata = {
1138 { "mon", acornfb_parse_mon }, 1134 { "mon", acornfb_parse_mon },
1139 { "montype", acornfb_parse_montype }, 1135 { "montype", acornfb_parse_montype },
1140 { "dram", acornfb_parse_dram }, 1136 { "dram", acornfb_parse_dram },
1141 { NULL, NULL } 1137 { NULL, NULL }
1142}; 1138};
1143 1139
1144int __init 1140static int __devinit acornfb_setup(char *options)
1145acornfb_setup(char *options)
1146{ 1141{
1147 struct options *optp; 1142 struct options *optp;
1148 char *opt; 1143 char *opt;
@@ -1179,8 +1174,7 @@ acornfb_setup(char *options)
1179 * Detect type of monitor connected 1174 * Detect type of monitor connected
1180 * For now, we just assume SVGA 1175 * For now, we just assume SVGA
1181 */ 1176 */
1182static int __init 1177static int __devinit acornfb_detect_monitortype(void)
1183acornfb_detect_monitortype(void)
1184{ 1178{
1185 return 4; 1179 return 4;
1186} 1180}
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index e0c2284924b6..5aac00eb1830 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -42,9 +42,34 @@
42 42
43#define FBPIXMAPSIZE (1024 * 8) 43#define FBPIXMAPSIZE (1024 * 8)
44 44
45static DEFINE_MUTEX(registration_lock);
45struct fb_info *registered_fb[FB_MAX] __read_mostly; 46struct fb_info *registered_fb[FB_MAX] __read_mostly;
46int num_registered_fb __read_mostly; 47int num_registered_fb __read_mostly;
47 48
49static struct fb_info *get_fb_info(unsigned int idx)
50{
51 struct fb_info *fb_info;
52
53 if (idx >= FB_MAX)
54 return ERR_PTR(-ENODEV);
55
56 mutex_lock(&registration_lock);
57 fb_info = registered_fb[idx];
58 if (fb_info)
59 atomic_inc(&fb_info->count);
60 mutex_unlock(&registration_lock);
61
62 return fb_info;
63}
64
65static void put_fb_info(struct fb_info *fb_info)
66{
67 if (!atomic_dec_and_test(&fb_info->count))
68 return;
69 if (fb_info->fbops->fb_destroy)
70 fb_info->fbops->fb_destroy(fb_info);
71}
72
48int lock_fb_info(struct fb_info *info) 73int lock_fb_info(struct fb_info *info)
49{ 74{
50 mutex_lock(&info->lock); 75 mutex_lock(&info->lock);
@@ -647,6 +672,7 @@ int fb_show_logo(struct fb_info *info, int rotate) { return 0; }
647 672
648static void *fb_seq_start(struct seq_file *m, loff_t *pos) 673static void *fb_seq_start(struct seq_file *m, loff_t *pos)
649{ 674{
675 mutex_lock(&registration_lock);
650 return (*pos < FB_MAX) ? pos : NULL; 676 return (*pos < FB_MAX) ? pos : NULL;
651} 677}
652 678
@@ -658,6 +684,7 @@ static void *fb_seq_next(struct seq_file *m, void *v, loff_t *pos)
658 684
659static void fb_seq_stop(struct seq_file *m, void *v) 685static void fb_seq_stop(struct seq_file *m, void *v)
660{ 686{
687 mutex_unlock(&registration_lock);
661} 688}
662 689
663static int fb_seq_show(struct seq_file *m, void *v) 690static int fb_seq_show(struct seq_file *m, void *v)
@@ -690,13 +717,30 @@ static const struct file_operations fb_proc_fops = {
690 .release = seq_release, 717 .release = seq_release,
691}; 718};
692 719
693static ssize_t 720/*
694fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 721 * We hold a reference to the fb_info in file->private_data,
722 * but if the current registered fb has changed, we don't
723 * actually want to use it.
724 *
725 * So look up the fb_info using the inode minor number,
726 * and just verify it against the reference we have.
727 */
728static struct fb_info *file_fb_info(struct file *file)
695{ 729{
696 unsigned long p = *ppos;
697 struct inode *inode = file->f_path.dentry->d_inode; 730 struct inode *inode = file->f_path.dentry->d_inode;
698 int fbidx = iminor(inode); 731 int fbidx = iminor(inode);
699 struct fb_info *info = registered_fb[fbidx]; 732 struct fb_info *info = registered_fb[fbidx];
733
734 if (info != file->private_data)
735 info = NULL;
736 return info;
737}
738
739static ssize_t
740fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
741{
742 unsigned long p = *ppos;
743 struct fb_info *info = file_fb_info(file);
700 u8 *buffer, *dst; 744 u8 *buffer, *dst;
701 u8 __iomem *src; 745 u8 __iomem *src;
702 int c, cnt = 0, err = 0; 746 int c, cnt = 0, err = 0;
@@ -761,9 +805,7 @@ static ssize_t
761fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 805fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
762{ 806{
763 unsigned long p = *ppos; 807 unsigned long p = *ppos;
764 struct inode *inode = file->f_path.dentry->d_inode; 808 struct fb_info *info = file_fb_info(file);
765 int fbidx = iminor(inode);
766 struct fb_info *info = registered_fb[fbidx];
767 u8 *buffer, *src; 809 u8 *buffer, *src;
768 u8 __iomem *dst; 810 u8 __iomem *dst;
769 int c, cnt = 0, err = 0; 811 int c, cnt = 0, err = 0;
@@ -1141,10 +1183,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
1141 1183
1142static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1184static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1143{ 1185{
1144 struct inode *inode = file->f_path.dentry->d_inode; 1186 struct fb_info *info = file_fb_info(file);
1145 int fbidx = iminor(inode);
1146 struct fb_info *info = registered_fb[fbidx];
1147 1187
1188 if (!info)
1189 return -ENODEV;
1148 return do_fb_ioctl(info, cmd, arg); 1190 return do_fb_ioctl(info, cmd, arg);
1149} 1191}
1150 1192
@@ -1265,12 +1307,13 @@ static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
1265static long fb_compat_ioctl(struct file *file, unsigned int cmd, 1307static long fb_compat_ioctl(struct file *file, unsigned int cmd,
1266 unsigned long arg) 1308 unsigned long arg)
1267{ 1309{
1268 struct inode *inode = file->f_path.dentry->d_inode; 1310 struct fb_info *info = file_fb_info(file);
1269 int fbidx = iminor(inode); 1311 struct fb_ops *fb;
1270 struct fb_info *info = registered_fb[fbidx];
1271 struct fb_ops *fb = info->fbops;
1272 long ret = -ENOIOCTLCMD; 1312 long ret = -ENOIOCTLCMD;
1273 1313
1314 if (!info)
1315 return -ENODEV;
1316 fb = info->fbops;
1274 switch(cmd) { 1317 switch(cmd) {
1275 case FBIOGET_VSCREENINFO: 1318 case FBIOGET_VSCREENINFO:
1276 case FBIOPUT_VSCREENINFO: 1319 case FBIOPUT_VSCREENINFO:
@@ -1303,16 +1346,18 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
1303static int 1346static int
1304fb_mmap(struct file *file, struct vm_area_struct * vma) 1347fb_mmap(struct file *file, struct vm_area_struct * vma)
1305{ 1348{
1306 int fbidx = iminor(file->f_path.dentry->d_inode); 1349 struct fb_info *info = file_fb_info(file);
1307 struct fb_info *info = registered_fb[fbidx]; 1350 struct fb_ops *fb;
1308 struct fb_ops *fb = info->fbops;
1309 unsigned long off; 1351 unsigned long off;
1310 unsigned long start; 1352 unsigned long start;
1311 u32 len; 1353 u32 len;
1312 1354
1355 if (!info)
1356 return -ENODEV;
1313 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) 1357 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
1314 return -EINVAL; 1358 return -EINVAL;
1315 off = vma->vm_pgoff << PAGE_SHIFT; 1359 off = vma->vm_pgoff << PAGE_SHIFT;
1360 fb = info->fbops;
1316 if (!fb) 1361 if (!fb)
1317 return -ENODEV; 1362 return -ENODEV;
1318 mutex_lock(&info->mm_lock); 1363 mutex_lock(&info->mm_lock);
@@ -1361,14 +1406,16 @@ __releases(&info->lock)
1361 struct fb_info *info; 1406 struct fb_info *info;
1362 int res = 0; 1407 int res = 0;
1363 1408
1364 if (fbidx >= FB_MAX) 1409 info = get_fb_info(fbidx);
1365 return -ENODEV; 1410 if (!info) {
1366 info = registered_fb[fbidx];
1367 if (!info)
1368 request_module("fb%d", fbidx); 1411 request_module("fb%d", fbidx);
1369 info = registered_fb[fbidx]; 1412 info = get_fb_info(fbidx);
1370 if (!info) 1413 if (!info)
1371 return -ENODEV; 1414 return -ENODEV;
1415 }
1416 if (IS_ERR(info))
1417 return PTR_ERR(info);
1418
1372 mutex_lock(&info->lock); 1419 mutex_lock(&info->lock);
1373 if (!try_module_get(info->fbops->owner)) { 1420 if (!try_module_get(info->fbops->owner)) {
1374 res = -ENODEV; 1421 res = -ENODEV;
@@ -1386,6 +1433,8 @@ __releases(&info->lock)
1386#endif 1433#endif
1387out: 1434out:
1388 mutex_unlock(&info->lock); 1435 mutex_unlock(&info->lock);
1436 if (res)
1437 put_fb_info(info);
1389 return res; 1438 return res;
1390} 1439}
1391 1440
@@ -1401,6 +1450,7 @@ __releases(&info->lock)
1401 info->fbops->fb_release(info,1); 1450 info->fbops->fb_release(info,1);
1402 module_put(info->fbops->owner); 1451 module_put(info->fbops->owner);
1403 mutex_unlock(&info->lock); 1452 mutex_unlock(&info->lock);
1453 put_fb_info(info);
1404 return 0; 1454 return 0;
1405} 1455}
1406 1456
@@ -1487,8 +1537,10 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena,
1487 return false; 1537 return false;
1488} 1538}
1489 1539
1540static int do_unregister_framebuffer(struct fb_info *fb_info);
1541
1490#define VGA_FB_PHYS 0xA0000 1542#define VGA_FB_PHYS 0xA0000
1491void remove_conflicting_framebuffers(struct apertures_struct *a, 1543static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
1492 const char *name, bool primary) 1544 const char *name, bool primary)
1493{ 1545{
1494 int i; 1546 int i;
@@ -1510,43 +1562,32 @@ void remove_conflicting_framebuffers(struct apertures_struct *a,
1510 printk(KERN_INFO "fb: conflicting fb hw usage " 1562 printk(KERN_INFO "fb: conflicting fb hw usage "
1511 "%s vs %s - removing generic driver\n", 1563 "%s vs %s - removing generic driver\n",
1512 name, registered_fb[i]->fix.id); 1564 name, registered_fb[i]->fix.id);
1513 unregister_framebuffer(registered_fb[i]); 1565 do_unregister_framebuffer(registered_fb[i]);
1514 } 1566 }
1515 } 1567 }
1516} 1568}
1517EXPORT_SYMBOL(remove_conflicting_framebuffers);
1518 1569
1519/** 1570static int do_register_framebuffer(struct fb_info *fb_info)
1520 * register_framebuffer - registers a frame buffer device
1521 * @fb_info: frame buffer info structure
1522 *
1523 * Registers a frame buffer device @fb_info.
1524 *
1525 * Returns negative errno on error, or zero for success.
1526 *
1527 */
1528
1529int
1530register_framebuffer(struct fb_info *fb_info)
1531{ 1571{
1532 int i; 1572 int i;
1533 struct fb_event event; 1573 struct fb_event event;
1534 struct fb_videomode mode; 1574 struct fb_videomode mode;
1535 1575
1536 if (num_registered_fb == FB_MAX)
1537 return -ENXIO;
1538
1539 if (fb_check_foreignness(fb_info)) 1576 if (fb_check_foreignness(fb_info))
1540 return -ENOSYS; 1577 return -ENOSYS;
1541 1578
1542 remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, 1579 do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
1543 fb_is_primary_device(fb_info)); 1580 fb_is_primary_device(fb_info));
1544 1581
1582 if (num_registered_fb == FB_MAX)
1583 return -ENXIO;
1584
1545 num_registered_fb++; 1585 num_registered_fb++;
1546 for (i = 0 ; i < FB_MAX; i++) 1586 for (i = 0 ; i < FB_MAX; i++)
1547 if (!registered_fb[i]) 1587 if (!registered_fb[i])
1548 break; 1588 break;
1549 fb_info->node = i; 1589 fb_info->node = i;
1590 atomic_set(&fb_info->count, 1);
1550 mutex_init(&fb_info->lock); 1591 mutex_init(&fb_info->lock);
1551 mutex_init(&fb_info->mm_lock); 1592 mutex_init(&fb_info->mm_lock);
1552 1593
@@ -1592,36 +1633,14 @@ register_framebuffer(struct fb_info *fb_info)
1592 return 0; 1633 return 0;
1593} 1634}
1594 1635
1595 1636static int do_unregister_framebuffer(struct fb_info *fb_info)
1596/**
1597 * unregister_framebuffer - releases a frame buffer device
1598 * @fb_info: frame buffer info structure
1599 *
1600 * Unregisters a frame buffer device @fb_info.
1601 *
1602 * Returns negative errno on error, or zero for success.
1603 *
1604 * This function will also notify the framebuffer console
1605 * to release the driver.
1606 *
1607 * This is meant to be called within a driver's module_exit()
1608 * function. If this is called outside module_exit(), ensure
1609 * that the driver implements fb_open() and fb_release() to
1610 * check that no processes are using the device.
1611 */
1612
1613int
1614unregister_framebuffer(struct fb_info *fb_info)
1615{ 1637{
1616 struct fb_event event; 1638 struct fb_event event;
1617 int i, ret = 0; 1639 int i, ret = 0;
1618 1640
1619 i = fb_info->node; 1641 i = fb_info->node;
1620 if (!registered_fb[i]) { 1642 if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
1621 ret = -EINVAL; 1643 return -EINVAL;
1622 goto done;
1623 }
1624
1625 1644
1626 if (!lock_fb_info(fb_info)) 1645 if (!lock_fb_info(fb_info))
1627 return -ENODEV; 1646 return -ENODEV;
@@ -1629,16 +1648,14 @@ unregister_framebuffer(struct fb_info *fb_info)
1629 ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); 1648 ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
1630 unlock_fb_info(fb_info); 1649 unlock_fb_info(fb_info);
1631 1650
1632 if (ret) { 1651 if (ret)
1633 ret = -EINVAL; 1652 return -EINVAL;
1634 goto done;
1635 }
1636 1653
1637 if (fb_info->pixmap.addr && 1654 if (fb_info->pixmap.addr &&
1638 (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) 1655 (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
1639 kfree(fb_info->pixmap.addr); 1656 kfree(fb_info->pixmap.addr);
1640 fb_destroy_modelist(&fb_info->modelist); 1657 fb_destroy_modelist(&fb_info->modelist);
1641 registered_fb[i]=NULL; 1658 registered_fb[i] = NULL;
1642 num_registered_fb--; 1659 num_registered_fb--;
1643 fb_cleanup_device(fb_info); 1660 fb_cleanup_device(fb_info);
1644 device_destroy(fb_class, MKDEV(FB_MAJOR, i)); 1661 device_destroy(fb_class, MKDEV(FB_MAJOR, i));
@@ -1646,9 +1663,65 @@ unregister_framebuffer(struct fb_info *fb_info)
1646 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); 1663 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
1647 1664
1648 /* this may free fb info */ 1665 /* this may free fb info */
1649 if (fb_info->fbops->fb_destroy) 1666 put_fb_info(fb_info);
1650 fb_info->fbops->fb_destroy(fb_info); 1667 return 0;
1651done: 1668}
1669
1670void remove_conflicting_framebuffers(struct apertures_struct *a,
1671 const char *name, bool primary)
1672{
1673 mutex_lock(&registration_lock);
1674 do_remove_conflicting_framebuffers(a, name, primary);
1675 mutex_unlock(&registration_lock);
1676}
1677EXPORT_SYMBOL(remove_conflicting_framebuffers);
1678
1679/**
1680 * register_framebuffer - registers a frame buffer device
1681 * @fb_info: frame buffer info structure
1682 *
1683 * Registers a frame buffer device @fb_info.
1684 *
1685 * Returns negative errno on error, or zero for success.
1686 *
1687 */
1688int
1689register_framebuffer(struct fb_info *fb_info)
1690{
1691 int ret;
1692
1693 mutex_lock(&registration_lock);
1694 ret = do_register_framebuffer(fb_info);
1695 mutex_unlock(&registration_lock);
1696
1697 return ret;
1698}
1699
1700/**
1701 * unregister_framebuffer - releases a frame buffer device
1702 * @fb_info: frame buffer info structure
1703 *
1704 * Unregisters a frame buffer device @fb_info.
1705 *
1706 * Returns negative errno on error, or zero for success.
1707 *
1708 * This function will also notify the framebuffer console
1709 * to release the driver.
1710 *
1711 * This is meant to be called within a driver's module_exit()
1712 * function. If this is called outside module_exit(), ensure
1713 * that the driver implements fb_open() and fb_release() to
1714 * check that no processes are using the device.
1715 */
1716int
1717unregister_framebuffer(struct fb_info *fb_info)
1718{
1719 int ret;
1720
1721 mutex_lock(&registration_lock);
1722 ret = do_unregister_framebuffer(fb_info);
1723 mutex_unlock(&registration_lock);
1724
1652 return ret; 1725 return ret;
1653} 1726}
1654 1727
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 528bceb220fd..eed5436ffb51 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -185,17 +185,20 @@ static struct miscdevice mpc8xxx_wdt_miscdev = {
185 .fops = &mpc8xxx_wdt_fops, 185 .fops = &mpc8xxx_wdt_fops,
186}; 186};
187 187
188static const struct of_device_id mpc8xxx_wdt_match[];
188static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev) 189static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev)
189{ 190{
190 int ret; 191 int ret;
192 const struct of_device_id *match;
191 struct device_node *np = ofdev->dev.of_node; 193 struct device_node *np = ofdev->dev.of_node;
192 struct mpc8xxx_wdt_type *wdt_type; 194 struct mpc8xxx_wdt_type *wdt_type;
193 u32 freq = fsl_get_sys_freq(); 195 u32 freq = fsl_get_sys_freq();
194 bool enabled; 196 bool enabled;
195 197
196 if (!ofdev->dev.of_match) 198 match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev);
199 if (!match)
197 return -EINVAL; 200 return -EINVAL;
198 wdt_type = ofdev->dev.of_match->data; 201 wdt_type = match->data;
199 202
200 if (!freq || freq == -1) 203 if (!freq || freq == -1)
201 return -EINVAL; 204 return -EINVAL;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 5147bdd3b8e1..257b00e98428 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1102,6 +1102,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1102 if (!bdev->bd_part) 1102 if (!bdev->bd_part)
1103 goto out_clear; 1103 goto out_clear;
1104 1104
1105 ret = 0;
1105 if (disk->fops->open) { 1106 if (disk->fops->open) {
1106 ret = disk->fops->open(bdev, mode); 1107 ret = disk->fops->open(bdev, mode);
1107 if (ret == -ERESTARTSYS) { 1108 if (ret == -ERESTARTSYS) {
@@ -1118,9 +1119,18 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1118 put_disk(disk); 1119 put_disk(disk);
1119 goto restart; 1120 goto restart;
1120 } 1121 }
1121 if (ret)
1122 goto out_clear;
1123 } 1122 }
1123 /*
1124 * If the device is invalidated, rescan partition
1125 * if open succeeded or failed with -ENOMEDIUM.
1126 * The latter is necessary to prevent ghost
1127 * partitions on a removed medium.
1128 */
1129 if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
1130 rescan_partitions(disk, bdev);
1131 if (ret)
1132 goto out_clear;
1133
1124 if (!bdev->bd_openers) { 1134 if (!bdev->bd_openers) {
1125 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); 1135 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
1126 bdi = blk_get_backing_dev_info(bdev); 1136 bdi = blk_get_backing_dev_info(bdev);
@@ -1128,8 +1138,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1128 bdi = &default_backing_dev_info; 1138 bdi = &default_backing_dev_info;
1129 bdev_inode_switch_bdi(bdev->bd_inode, bdi); 1139 bdev_inode_switch_bdi(bdev->bd_inode, bdi);
1130 } 1140 }
1131 if (bdev->bd_invalidated)
1132 rescan_partitions(disk, bdev);
1133 } else { 1141 } else {
1134 struct block_device *whole; 1142 struct block_device *whole;
1135 whole = bdget_disk(disk, 0); 1143 whole = bdget_disk(disk, 0);
@@ -1153,13 +1161,14 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1153 } 1161 }
1154 } else { 1162 } else {
1155 if (bdev->bd_contains == bdev) { 1163 if (bdev->bd_contains == bdev) {
1156 if (bdev->bd_disk->fops->open) { 1164 ret = 0;
1165 if (bdev->bd_disk->fops->open)
1157 ret = bdev->bd_disk->fops->open(bdev, mode); 1166 ret = bdev->bd_disk->fops->open(bdev, mode);
1158 if (ret) 1167 /* the same as first opener case, read comment there */
1159 goto out_unlock_bdev; 1168 if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
1160 }
1161 if (bdev->bd_invalidated)
1162 rescan_partitions(bdev->bd_disk, bdev); 1169 rescan_partitions(bdev->bd_disk, bdev);
1170 if (ret)
1171 goto out_unlock_bdev;
1163 } 1172 }
1164 /* only one opener holds refs to the module and disk */ 1173 /* only one opener holds refs to the module and disk */
1165 module_put(disk->fops->owner); 1174 module_put(disk->fops->owner);
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 5d505aaa72fb..44ea5b92e1ba 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -178,12 +178,13 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
178 178
179 if (value) { 179 if (value) {
180 acl = posix_acl_from_xattr(value, size); 180 acl = posix_acl_from_xattr(value, size);
181 if (IS_ERR(acl))
182 return PTR_ERR(acl);
183
181 if (acl) { 184 if (acl) {
182 ret = posix_acl_valid(acl); 185 ret = posix_acl_valid(acl);
183 if (ret) 186 if (ret)
184 goto out; 187 goto out;
185 } else if (IS_ERR(acl)) {
186 return PTR_ERR(acl);
187 } 188 }
188 } 189 }
189 190
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index cd52f7f556ef..9ee6bd55e16c 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -8856,23 +8856,38 @@ out:
8856int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 8856int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8857{ 8857{
8858 struct btrfs_space_info *space_info; 8858 struct btrfs_space_info *space_info;
8859 struct btrfs_super_block *disk_super;
8860 u64 features;
8861 u64 flags;
8862 int mixed = 0;
8859 int ret; 8863 int ret;
8860 8864
8861 ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0, 8865 disk_super = &fs_info->super_copy;
8862 &space_info); 8866 if (!btrfs_super_root(disk_super))
8863 if (ret) 8867 return 1;
8864 return ret;
8865 8868
8866 ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0, 8869 features = btrfs_super_incompat_flags(disk_super);
8867 &space_info); 8870 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8868 if (ret) 8871 mixed = 1;
8869 return ret;
8870 8872
8871 ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0, 8873 flags = BTRFS_BLOCK_GROUP_SYSTEM;
8872 &space_info); 8874 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8873 if (ret) 8875 if (ret)
8874 return ret; 8876 goto out;
8875 8877
8878 if (mixed) {
8879 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8880 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8881 } else {
8882 flags = BTRFS_BLOCK_GROUP_METADATA;
8883 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8884 if (ret)
8885 goto out;
8886
8887 flags = BTRFS_BLOCK_GROUP_DATA;
8888 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8889 }
8890out:
8876 return ret; 8891 return ret;
8877} 8892}
8878 8893
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index ffb48d6c5433..2616f7ed4799 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -81,6 +81,13 @@ static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
81 iflags |= FS_NOATIME_FL; 81 iflags |= FS_NOATIME_FL;
82 if (flags & BTRFS_INODE_DIRSYNC) 82 if (flags & BTRFS_INODE_DIRSYNC)
83 iflags |= FS_DIRSYNC_FL; 83 iflags |= FS_DIRSYNC_FL;
84 if (flags & BTRFS_INODE_NODATACOW)
85 iflags |= FS_NOCOW_FL;
86
87 if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS))
88 iflags |= FS_COMPR_FL;
89 else if (flags & BTRFS_INODE_NOCOMPRESS)
90 iflags |= FS_NOCOMP_FL;
84 91
85 return iflags; 92 return iflags;
86} 93}
@@ -144,16 +151,13 @@ static int check_flags(unsigned int flags)
144 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ 151 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
145 FS_NOATIME_FL | FS_NODUMP_FL | \ 152 FS_NOATIME_FL | FS_NODUMP_FL | \
146 FS_SYNC_FL | FS_DIRSYNC_FL | \ 153 FS_SYNC_FL | FS_DIRSYNC_FL | \
147 FS_NOCOMP_FL | FS_COMPR_FL | \ 154 FS_NOCOMP_FL | FS_COMPR_FL |
148 FS_NOCOW_FL | FS_COW_FL)) 155 FS_NOCOW_FL))
149 return -EOPNOTSUPP; 156 return -EOPNOTSUPP;
150 157
151 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) 158 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
152 return -EINVAL; 159 return -EINVAL;
153 160
154 if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL))
155 return -EINVAL;
156
157 return 0; 161 return 0;
158} 162}
159 163
@@ -218,6 +222,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
218 ip->flags |= BTRFS_INODE_DIRSYNC; 222 ip->flags |= BTRFS_INODE_DIRSYNC;
219 else 223 else
220 ip->flags &= ~BTRFS_INODE_DIRSYNC; 224 ip->flags &= ~BTRFS_INODE_DIRSYNC;
225 if (flags & FS_NOCOW_FL)
226 ip->flags |= BTRFS_INODE_NODATACOW;
227 else
228 ip->flags &= ~BTRFS_INODE_NODATACOW;
221 229
222 /* 230 /*
223 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS 231 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
@@ -230,11 +238,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
230 } else if (flags & FS_COMPR_FL) { 238 } else if (flags & FS_COMPR_FL) {
231 ip->flags |= BTRFS_INODE_COMPRESS; 239 ip->flags |= BTRFS_INODE_COMPRESS;
232 ip->flags &= ~BTRFS_INODE_NOCOMPRESS; 240 ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
241 } else {
242 ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
233 } 243 }
234 if (flags & FS_NOCOW_FL)
235 ip->flags |= BTRFS_INODE_NODATACOW;
236 else if (flags & FS_COW_FL)
237 ip->flags &= ~BTRFS_INODE_NODATACOW;
238 244
239 trans = btrfs_join_transaction(root, 1); 245 trans = btrfs_join_transaction(root, 1);
240 BUG_ON(IS_ERR(trans)); 246 BUG_ON(IS_ERR(trans));
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 9fa08662a88d..2a5404c1c42f 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -819,7 +819,7 @@ int __ceph_caps_used(struct ceph_inode_info *ci)
819 used |= CEPH_CAP_FILE_CACHE; 819 used |= CEPH_CAP_FILE_CACHE;
820 if (ci->i_wr_ref) 820 if (ci->i_wr_ref)
821 used |= CEPH_CAP_FILE_WR; 821 used |= CEPH_CAP_FILE_WR;
822 if (ci->i_wrbuffer_ref) 822 if (ci->i_wb_ref || ci->i_wrbuffer_ref)
823 used |= CEPH_CAP_FILE_BUFFER; 823 used |= CEPH_CAP_FILE_BUFFER;
824 return used; 824 return used;
825} 825}
@@ -1990,11 +1990,11 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got)
1990 if (got & CEPH_CAP_FILE_WR) 1990 if (got & CEPH_CAP_FILE_WR)
1991 ci->i_wr_ref++; 1991 ci->i_wr_ref++;
1992 if (got & CEPH_CAP_FILE_BUFFER) { 1992 if (got & CEPH_CAP_FILE_BUFFER) {
1993 if (ci->i_wrbuffer_ref == 0) 1993 if (ci->i_wb_ref == 0)
1994 ihold(&ci->vfs_inode); 1994 ihold(&ci->vfs_inode);
1995 ci->i_wrbuffer_ref++; 1995 ci->i_wb_ref++;
1996 dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n", 1996 dout("__take_cap_refs %p wb %d -> %d (?)\n",
1997 &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref); 1997 &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
1998 } 1998 }
1999} 1999}
2000 2000
@@ -2169,12 +2169,12 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2169 if (--ci->i_rdcache_ref == 0) 2169 if (--ci->i_rdcache_ref == 0)
2170 last++; 2170 last++;
2171 if (had & CEPH_CAP_FILE_BUFFER) { 2171 if (had & CEPH_CAP_FILE_BUFFER) {
2172 if (--ci->i_wrbuffer_ref == 0) { 2172 if (--ci->i_wb_ref == 0) {
2173 last++; 2173 last++;
2174 put++; 2174 put++;
2175 } 2175 }
2176 dout("put_cap_refs %p wrbuffer %d -> %d (?)\n", 2176 dout("put_cap_refs %p wb %d -> %d (?)\n",
2177 inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref); 2177 inode, ci->i_wb_ref+1, ci->i_wb_ref);
2178 } 2178 }
2179 if (had & CEPH_CAP_FILE_WR) 2179 if (had & CEPH_CAP_FILE_WR)
2180 if (--ci->i_wr_ref == 0) { 2180 if (--ci->i_wr_ref == 0) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 03d6dafda61f..70b6a4839c38 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -355,6 +355,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
355 ci->i_rd_ref = 0; 355 ci->i_rd_ref = 0;
356 ci->i_rdcache_ref = 0; 356 ci->i_rdcache_ref = 0;
357 ci->i_wr_ref = 0; 357 ci->i_wr_ref = 0;
358 ci->i_wb_ref = 0;
358 ci->i_wrbuffer_ref = 0; 359 ci->i_wrbuffer_ref = 0;
359 ci->i_wrbuffer_ref_head = 0; 360 ci->i_wrbuffer_ref_head = 0;
360 ci->i_shared_gen = 0; 361 ci->i_shared_gen = 0;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index f60b07b0feb0..d0fae4ce9ba5 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -3304,8 +3304,8 @@ static void con_put(struct ceph_connection *con)
3304{ 3304{
3305 struct ceph_mds_session *s = con->private; 3305 struct ceph_mds_session *s = con->private;
3306 3306
3307 dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1);
3307 ceph_put_mds_session(s); 3308 ceph_put_mds_session(s);
3308 dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref));
3309} 3309}
3310 3310
3311/* 3311/*
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index e86ec1155f8f..24067d68a554 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -206,7 +206,7 @@ void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
206 up_write(&mdsc->snap_rwsem); 206 up_write(&mdsc->snap_rwsem);
207 } else { 207 } else {
208 spin_lock(&mdsc->snap_empty_lock); 208 spin_lock(&mdsc->snap_empty_lock);
209 list_add(&mdsc->snap_empty, &realm->empty_item); 209 list_add(&realm->empty_item, &mdsc->snap_empty);
210 spin_unlock(&mdsc->snap_empty_lock); 210 spin_unlock(&mdsc->snap_empty_lock);
211 } 211 }
212} 212}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index b1f1b8bb1271..f5cabefa98dc 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -293,7 +293,7 @@ struct ceph_inode_info {
293 293
294 /* held references to caps */ 294 /* held references to caps */
295 int i_pin_ref; 295 int i_pin_ref;
296 int i_rd_ref, i_rdcache_ref, i_wr_ref; 296 int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref;
297 int i_wrbuffer_ref, i_wrbuffer_ref_head; 297 int i_wrbuffer_ref, i_wrbuffer_ref_head;
298 u32 i_shared_gen; /* increment each time we get FILE_SHARED */ 298 u32 i_shared_gen; /* increment each time we get FILE_SHARED */
299 u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */ 299 u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 23d43cde4306..1b2e180b018d 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -277,6 +277,7 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen,
277 277
278 for (i = 0, j = 0; i < srclen; j++) { 278 for (i = 0, j = 0; i < srclen; j++) {
279 src_char = source[i]; 279 src_char = source[i];
280 charlen = 1;
280 switch (src_char) { 281 switch (src_char) {
281 case 0: 282 case 0:
282 put_unaligned(0, &target[j]); 283 put_unaligned(0, &target[j]);
@@ -316,16 +317,13 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen,
316 dst_char = cpu_to_le16(0x003f); 317 dst_char = cpu_to_le16(0x003f);
317 charlen = 1; 318 charlen = 1;
318 } 319 }
319 /*
320 * character may take more than one byte in the source
321 * string, but will take exactly two bytes in the
322 * target string
323 */
324 i += charlen;
325 continue;
326 } 320 }
321 /*
322 * character may take more than one byte in the source string,
323 * but will take exactly two bytes in the target string
324 */
325 i += charlen;
327 put_unaligned(dst_char, &target[j]); 326 put_unaligned(dst_char, &target[j]);
328 i++; /* move to next char in source string */
329 } 327 }
330 328
331ctoUCS_out: 329ctoUCS_out:
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 4bc862a80efa..277262a8e82f 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -274,7 +274,8 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
274 char *data_area_of_target; 274 char *data_area_of_target;
275 char *data_area_of_buf2; 275 char *data_area_of_buf2;
276 int remaining; 276 int remaining;
277 __u16 byte_count, total_data_size, total_in_buf, total_in_buf2; 277 unsigned int byte_count, total_in_buf;
278 __u16 total_data_size, total_in_buf2;
278 279
279 total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); 280 total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
280 281
@@ -287,7 +288,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
287 remaining = total_data_size - total_in_buf; 288 remaining = total_data_size - total_in_buf;
288 289
289 if (remaining < 0) 290 if (remaining < 0)
290 return -EINVAL; 291 return -EPROTO;
291 292
292 if (remaining == 0) /* nothing to do, ignore */ 293 if (remaining == 0) /* nothing to do, ignore */
293 return 0; 294 return 0;
@@ -308,20 +309,29 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
308 data_area_of_target += total_in_buf; 309 data_area_of_target += total_in_buf;
309 310
310 /* copy second buffer into end of first buffer */ 311 /* copy second buffer into end of first buffer */
311 memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2);
312 total_in_buf += total_in_buf2; 312 total_in_buf += total_in_buf2;
313 /* is the result too big for the field? */
314 if (total_in_buf > USHRT_MAX)
315 return -EPROTO;
313 put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount); 316 put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount);
317
318 /* fix up the BCC */
314 byte_count = get_bcc_le(pTargetSMB); 319 byte_count = get_bcc_le(pTargetSMB);
315 byte_count += total_in_buf2; 320 byte_count += total_in_buf2;
321 /* is the result too big for the field? */
322 if (byte_count > USHRT_MAX)
323 return -EPROTO;
316 put_bcc_le(byte_count, pTargetSMB); 324 put_bcc_le(byte_count, pTargetSMB);
317 325
318 byte_count = pTargetSMB->smb_buf_length; 326 byte_count = pTargetSMB->smb_buf_length;
319 byte_count += total_in_buf2; 327 byte_count += total_in_buf2;
320 328 /* don't allow buffer to overflow */
321 /* BB also add check that we are not beyond maximum buffer size */ 329 if (byte_count > CIFSMaxBufSize)
322 330 return -ENOBUFS;
323 pTargetSMB->smb_buf_length = byte_count; 331 pTargetSMB->smb_buf_length = byte_count;
324 332
333 memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2);
334
325 if (remaining == total_in_buf2) { 335 if (remaining == total_in_buf2) {
326 cFYI(1, "found the last secondary response"); 336 cFYI(1, "found the last secondary response");
327 return 0; /* we are done */ 337 return 0; /* we are done */
@@ -607,59 +617,63 @@ incomplete_rcv:
607 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { 617 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
608 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 618 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
609 619
610 if ((mid_entry->mid == smb_buffer->Mid) && 620 if (mid_entry->mid != smb_buffer->Mid ||
611 (mid_entry->midState == MID_REQUEST_SUBMITTED) && 621 mid_entry->midState != MID_REQUEST_SUBMITTED ||
612 (mid_entry->command == smb_buffer->Command)) { 622 mid_entry->command != smb_buffer->Command) {
613 if (length == 0 && 623 mid_entry = NULL;
614 check2ndT2(smb_buffer, server->maxBuf) > 0) { 624 continue;
615 /* We have a multipart transact2 resp */ 625 }
616 isMultiRsp = true; 626
617 if (mid_entry->resp_buf) { 627 if (length == 0 &&
618 /* merge response - fix up 1st*/ 628 check2ndT2(smb_buffer, server->maxBuf) > 0) {
619 if (coalesce_t2(smb_buffer, 629 /* We have a multipart transact2 resp */
620 mid_entry->resp_buf)) { 630 isMultiRsp = true;
621 mid_entry->multiRsp = 631 if (mid_entry->resp_buf) {
622 true; 632 /* merge response - fix up 1st*/
623 break; 633 length = coalesce_t2(smb_buffer,
624 } else { 634 mid_entry->resp_buf);
625 /* all parts received */ 635 if (length > 0) {
626 mid_entry->multiEnd = 636 length = 0;
627 true; 637 mid_entry->multiRsp = true;
628 goto multi_t2_fnd; 638 break;
629 }
630 } else { 639 } else {
631 if (!isLargeBuf) { 640 /* all parts received or
632 cERROR(1, "1st trans2 resp needs bigbuf"); 641 * packet is malformed
633 /* BB maybe we can fix this up, switch 642 */
634 to already allocated large buffer? */ 643 mid_entry->multiEnd = true;
635 } else { 644 goto multi_t2_fnd;
636 /* Have first buffer */ 645 }
637 mid_entry->resp_buf = 646 } else {
638 smb_buffer; 647 if (!isLargeBuf) {
639 mid_entry->largeBuf = 648 /*
640 true; 649 * FIXME: switch to already
641 bigbuf = NULL; 650 * allocated largebuf?
642 } 651 */
652 cERROR(1, "1st trans2 resp "
653 "needs bigbuf");
654 } else {
655 /* Have first buffer */
656 mid_entry->resp_buf =
657 smb_buffer;
658 mid_entry->largeBuf = true;
659 bigbuf = NULL;
643 } 660 }
644 break;
645 } 661 }
646 mid_entry->resp_buf = smb_buffer; 662 break;
647 mid_entry->largeBuf = isLargeBuf; 663 }
664 mid_entry->resp_buf = smb_buffer;
665 mid_entry->largeBuf = isLargeBuf;
648multi_t2_fnd: 666multi_t2_fnd:
649 if (length == 0) 667 if (length == 0)
650 mid_entry->midState = 668 mid_entry->midState = MID_RESPONSE_RECEIVED;
651 MID_RESPONSE_RECEIVED; 669 else
652 else 670 mid_entry->midState = MID_RESPONSE_MALFORMED;
653 mid_entry->midState =
654 MID_RESPONSE_MALFORMED;
655#ifdef CONFIG_CIFS_STATS2 671#ifdef CONFIG_CIFS_STATS2
656 mid_entry->when_received = jiffies; 672 mid_entry->when_received = jiffies;
657#endif 673#endif
658 list_del_init(&mid_entry->qhead); 674 list_del_init(&mid_entry->qhead);
659 mid_entry->callback(mid_entry); 675 mid_entry->callback(mid_entry);
660 break; 676 break;
661 }
662 mid_entry = NULL;
663 } 677 }
664 spin_unlock(&GlobalMid_Lock); 678 spin_unlock(&GlobalMid_Lock);
665 679
@@ -2659,6 +2673,11 @@ is_path_accessible(int xid, struct cifsTconInfo *tcon,
2659 0 /* not legacy */, cifs_sb->local_nls, 2673 0 /* not legacy */, cifs_sb->local_nls,
2660 cifs_sb->mnt_cifs_flags & 2674 cifs_sb->mnt_cifs_flags &
2661 CIFS_MOUNT_MAP_SPECIAL_CHR); 2675 CIFS_MOUNT_MAP_SPECIAL_CHR);
2676
2677 if (rc == -EOPNOTSUPP || rc == -EINVAL)
2678 rc = SMBQueryInformation(xid, tcon, full_path, pfile_info,
2679 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
2680 CIFS_MOUNT_MAP_SPECIAL_CHR);
2662 kfree(pfile_info); 2681 kfree(pfile_info);
2663 return rc; 2682 return rc;
2664} 2683}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index f6728eb6f4b9..645114ad0a10 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -276,7 +276,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
276} 276}
277 277
278static void 278static void
279decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses, 279decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
280 const struct nls_table *nls_cp) 280 const struct nls_table *nls_cp)
281{ 281{
282 int len; 282 int len;
@@ -284,19 +284,6 @@ decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses,
284 284
285 cFYI(1, "bleft %d", bleft); 285 cFYI(1, "bleft %d", bleft);
286 286
287 /*
288 * Windows servers do not always double null terminate their final
289 * Unicode string. Check to see if there are an uneven number of bytes
290 * left. If so, then add an extra NULL pad byte to the end of the
291 * response.
292 *
293 * See section 2.7.2 in "Implementing CIFS" for details
294 */
295 if (bleft % 2) {
296 data[bleft] = 0;
297 ++bleft;
298 }
299
300 kfree(ses->serverOS); 287 kfree(ses->serverOS);
301 ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp); 288 ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp);
302 cFYI(1, "serverOS=%s", ses->serverOS); 289 cFYI(1, "serverOS=%s", ses->serverOS);
@@ -929,7 +916,9 @@ ssetup_ntlmssp_authenticate:
929 } 916 }
930 917
931 /* BB check if Unicode and decode strings */ 918 /* BB check if Unicode and decode strings */
932 if (smb_buf->Flags2 & SMBFLG2_UNICODE) { 919 if (bytes_remaining == 0) {
920 /* no string area to decode, do nothing */
921 } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
933 /* unicode string area must be word-aligned */ 922 /* unicode string area must be word-aligned */
934 if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { 923 if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
935 ++bcc_ptr; 924 ++bcc_ptr;
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 3313dd19f543..9a37a9b6de3a 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -53,11 +53,14 @@ DEFINE_SPINLOCK(configfs_dirent_lock);
53static void configfs_d_iput(struct dentry * dentry, 53static void configfs_d_iput(struct dentry * dentry,
54 struct inode * inode) 54 struct inode * inode)
55{ 55{
56 struct configfs_dirent * sd = dentry->d_fsdata; 56 struct configfs_dirent *sd = dentry->d_fsdata;
57 57
58 if (sd) { 58 if (sd) {
59 BUG_ON(sd->s_dentry != dentry); 59 BUG_ON(sd->s_dentry != dentry);
60 /* Coordinate with configfs_readdir */
61 spin_lock(&configfs_dirent_lock);
60 sd->s_dentry = NULL; 62 sd->s_dentry = NULL;
63 spin_unlock(&configfs_dirent_lock);
61 configfs_put(sd); 64 configfs_put(sd);
62 } 65 }
63 iput(inode); 66 iput(inode);
@@ -689,7 +692,8 @@ static int create_default_group(struct config_group *parent_group,
689 sd = child->d_fsdata; 692 sd = child->d_fsdata;
690 sd->s_type |= CONFIGFS_USET_DEFAULT; 693 sd->s_type |= CONFIGFS_USET_DEFAULT;
691 } else { 694 } else {
692 d_delete(child); 695 BUG_ON(child->d_inode);
696 d_drop(child);
693 dput(child); 697 dput(child);
694 } 698 }
695 } 699 }
@@ -1545,7 +1549,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
1545 struct configfs_dirent * parent_sd = dentry->d_fsdata; 1549 struct configfs_dirent * parent_sd = dentry->d_fsdata;
1546 struct configfs_dirent *cursor = filp->private_data; 1550 struct configfs_dirent *cursor = filp->private_data;
1547 struct list_head *p, *q = &cursor->s_sibling; 1551 struct list_head *p, *q = &cursor->s_sibling;
1548 ino_t ino; 1552 ino_t ino = 0;
1549 int i = filp->f_pos; 1553 int i = filp->f_pos;
1550 1554
1551 switch (i) { 1555 switch (i) {
@@ -1573,6 +1577,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
1573 struct configfs_dirent *next; 1577 struct configfs_dirent *next;
1574 const char * name; 1578 const char * name;
1575 int len; 1579 int len;
1580 struct inode *inode = NULL;
1576 1581
1577 next = list_entry(p, struct configfs_dirent, 1582 next = list_entry(p, struct configfs_dirent,
1578 s_sibling); 1583 s_sibling);
@@ -1581,9 +1586,28 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
1581 1586
1582 name = configfs_get_name(next); 1587 name = configfs_get_name(next);
1583 len = strlen(name); 1588 len = strlen(name);
1584 if (next->s_dentry) 1589
1585 ino = next->s_dentry->d_inode->i_ino; 1590 /*
1586 else 1591 * We'll have a dentry and an inode for
1592 * PINNED items and for open attribute
1593 * files. We lock here to prevent a race
1594 * with configfs_d_iput() clearing
1595 * s_dentry before calling iput().
1596 *
1597 * Why do we go to the trouble? If
1598 * someone has an attribute file open,
1599 * the inode number should match until
1600 * they close it. Beyond that, we don't
1601 * care.
1602 */
1603 spin_lock(&configfs_dirent_lock);
1604 dentry = next->s_dentry;
1605 if (dentry)
1606 inode = dentry->d_inode;
1607 if (inode)
1608 ino = inode->i_ino;
1609 spin_unlock(&configfs_dirent_lock);
1610 if (!inode)
1587 ino = iunique(configfs_sb, 2); 1611 ino = iunique(configfs_sb, 2);
1588 1612
1589 if (filldir(dirent, name, len, filp->f_pos, ino, 1613 if (filldir(dirent, name, len, filp->f_pos, ino,
@@ -1683,7 +1707,8 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
1683 err = configfs_attach_group(sd->s_element, &group->cg_item, 1707 err = configfs_attach_group(sd->s_element, &group->cg_item,
1684 dentry); 1708 dentry);
1685 if (err) { 1709 if (err) {
1686 d_delete(dentry); 1710 BUG_ON(dentry->d_inode);
1711 d_drop(dentry);
1687 dput(dentry); 1712 dput(dentry);
1688 } else { 1713 } else {
1689 spin_lock(&configfs_dirent_lock); 1714 spin_lock(&configfs_dirent_lock);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index c6ba49bd95b3..b32eb29a4e6f 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -174,7 +174,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
174 if (!inode) 174 if (!inode)
175 return 0; 175 return 0;
176 176
177 if (nd->flags & LOOKUP_RCU) 177 if (nd && (nd->flags & LOOKUP_RCU))
178 return -ECHILD; 178 return -ECHILD;
179 179
180 fc = get_fuse_conn(inode); 180 fc = get_fuse_conn(inode);
diff --git a/fs/hpfs/Kconfig b/fs/hpfs/Kconfig
index 0c39dc3ef7d7..56bd15c5bf6c 100644
--- a/fs/hpfs/Kconfig
+++ b/fs/hpfs/Kconfig
@@ -1,7 +1,6 @@
1config HPFS_FS 1config HPFS_FS
2 tristate "OS/2 HPFS file system support" 2 tristate "OS/2 HPFS file system support"
3 depends on BLOCK 3 depends on BLOCK
4 depends on BROKEN || !PREEMPT
5 help 4 help
6 OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS 5 OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS
7 is the file system used for organizing files on OS/2 hard disk 6 is the file system used for organizing files on OS/2 hard disk
diff --git a/fs/hpfs/alloc.c b/fs/hpfs/alloc.c
index 5503e2c28910..7a5eb2c718c8 100644
--- a/fs/hpfs/alloc.c
+++ b/fs/hpfs/alloc.c
@@ -8,8 +8,6 @@
8 8
9#include "hpfs_fn.h" 9#include "hpfs_fn.h"
10 10
11static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec);
12
13/* 11/*
14 * Check if a sector is allocated in bitmap 12 * Check if a sector is allocated in bitmap
15 * This is really slow. Turned on only if chk==2 13 * This is really slow. Turned on only if chk==2
@@ -18,9 +16,9 @@ static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec);
18static int chk_if_allocated(struct super_block *s, secno sec, char *msg) 16static int chk_if_allocated(struct super_block *s, secno sec, char *msg)
19{ 17{
20 struct quad_buffer_head qbh; 18 struct quad_buffer_head qbh;
21 unsigned *bmp; 19 u32 *bmp;
22 if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail; 20 if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail;
23 if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f)) & 1) { 21 if ((cpu_to_le32(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) {
24 hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec); 22 hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec);
25 goto fail1; 23 goto fail1;
26 } 24 }
@@ -28,7 +26,7 @@ static int chk_if_allocated(struct super_block *s, secno sec, char *msg)
28 if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) { 26 if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) {
29 unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4; 27 unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4;
30 if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail; 28 if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail;
31 if ((bmp[ssec >> 5] >> (ssec & 0x1f)) & 1) { 29 if ((le32_to_cpu(bmp[ssec >> 5]) >> (ssec & 0x1f)) & 1) {
32 hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec); 30 hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec);
33 goto fail1; 31 goto fail1;
34 } 32 }
@@ -75,7 +73,6 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne
75 hpfs_error(s, "Bad allocation size: %d", n); 73 hpfs_error(s, "Bad allocation size: %d", n);
76 return 0; 74 return 0;
77 } 75 }
78 lock_super(s);
79 if (bs != ~0x3fff) { 76 if (bs != ~0x3fff) {
80 if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls; 77 if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls;
81 } else { 78 } else {
@@ -85,10 +82,6 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne
85 ret = bs + nr; 82 ret = bs + nr;
86 goto rt; 83 goto rt;
87 } 84 }
88 /*if (!tstbits(bmp, nr + n, n + forward)) {
89 ret = bs + nr + n;
90 goto rt;
91 }*/
92 q = nr + n; b = 0; 85 q = nr + n; b = 0;
93 while ((a = tstbits(bmp, q, n + forward)) != 0) { 86 while ((a = tstbits(bmp, q, n + forward)) != 0) {
94 q += a; 87 q += a;
@@ -105,14 +98,14 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne
105 goto rt; 98 goto rt;
106 } 99 }
107 nr >>= 5; 100 nr >>= 5;
108 /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) {*/ 101 /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) */
109 i = nr; 102 i = nr;
110 do { 103 do {
111 if (!bmp[i]) goto cont; 104 if (!le32_to_cpu(bmp[i])) goto cont;
112 if (n + forward >= 0x3f && bmp[i] != -1) goto cont; 105 if (n + forward >= 0x3f && le32_to_cpu(bmp[i]) != 0xffffffff) goto cont;
113 q = i<<5; 106 q = i<<5;
114 if (i > 0) { 107 if (i > 0) {
115 unsigned k = bmp[i-1]; 108 unsigned k = le32_to_cpu(bmp[i-1]);
116 while (k & 0x80000000) { 109 while (k & 0x80000000) {
117 q--; k <<= 1; 110 q--; k <<= 1;
118 } 111 }
@@ -132,18 +125,17 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne
132 } while (i != nr); 125 } while (i != nr);
133 rt: 126 rt:
134 if (ret) { 127 if (ret) {
135 if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (bmp[(ret & 0x3fff) >> 5] | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) { 128 if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) {
136 hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret); 129 hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret);
137 ret = 0; 130 ret = 0;
138 goto b; 131 goto b;
139 } 132 }
140 bmp[(ret & 0x3fff) >> 5] &= ~(((1 << n) - 1) << (ret & 0x1f)); 133 bmp[(ret & 0x3fff) >> 5] &= cpu_to_le32(~(((1 << n) - 1) << (ret & 0x1f)));
141 hpfs_mark_4buffers_dirty(&qbh); 134 hpfs_mark_4buffers_dirty(&qbh);
142 } 135 }
143 b: 136 b:
144 hpfs_brelse4(&qbh); 137 hpfs_brelse4(&qbh);
145 uls: 138 uls:
146 unlock_super(s);
147 return ret; 139 return ret;
148} 140}
149 141
@@ -155,7 +147,7 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne
155 * sectors 147 * sectors
156 */ 148 */
157 149
158secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward, int lock) 150secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward)
159{ 151{
160 secno sec; 152 secno sec;
161 int i; 153 int i;
@@ -167,7 +159,6 @@ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forwa
167 forward = -forward; 159 forward = -forward;
168 f_p = 1; 160 f_p = 1;
169 } 161 }
170 if (lock) hpfs_lock_creation(s);
171 n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14; 162 n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14;
172 if (near && near < sbi->sb_fs_size) { 163 if (near && near < sbi->sb_fs_size) {
173 if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret; 164 if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret;
@@ -214,18 +205,17 @@ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forwa
214 ret: 205 ret:
215 if (sec && f_p) { 206 if (sec && f_p) {
216 for (i = 0; i < forward; i++) { 207 for (i = 0; i < forward; i++) {
217 if (!hpfs_alloc_if_possible_nolock(s, sec + i + 1)) { 208 if (!hpfs_alloc_if_possible(s, sec + i + 1)) {
218 hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i); 209 hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i);
219 sec = 0; 210 sec = 0;
220 break; 211 break;
221 } 212 }
222 } 213 }
223 } 214 }
224 if (lock) hpfs_unlock_creation(s);
225 return sec; 215 return sec;
226} 216}
227 217
228static secno alloc_in_dirband(struct super_block *s, secno near, int lock) 218static secno alloc_in_dirband(struct super_block *s, secno near)
229{ 219{
230 unsigned nr = near; 220 unsigned nr = near;
231 secno sec; 221 secno sec;
@@ -236,49 +226,35 @@ static secno alloc_in_dirband(struct super_block *s, secno near, int lock)
236 nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4; 226 nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4;
237 nr -= sbi->sb_dirband_start; 227 nr -= sbi->sb_dirband_start;
238 nr >>= 2; 228 nr >>= 2;
239 if (lock) hpfs_lock_creation(s);
240 sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0); 229 sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0);
241 if (lock) hpfs_unlock_creation(s);
242 if (!sec) return 0; 230 if (!sec) return 0;
243 return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start; 231 return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start;
244} 232}
245 233
246/* Alloc sector if it's free */ 234/* Alloc sector if it's free */
247 235
248static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec) 236int hpfs_alloc_if_possible(struct super_block *s, secno sec)
249{ 237{
250 struct quad_buffer_head qbh; 238 struct quad_buffer_head qbh;
251 unsigned *bmp; 239 u32 *bmp;
252 lock_super(s);
253 if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end; 240 if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end;
254 if (bmp[(sec & 0x3fff) >> 5] & (1 << (sec & 0x1f))) { 241 if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) {
255 bmp[(sec & 0x3fff) >> 5] &= ~(1 << (sec & 0x1f)); 242 bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
256 hpfs_mark_4buffers_dirty(&qbh); 243 hpfs_mark_4buffers_dirty(&qbh);
257 hpfs_brelse4(&qbh); 244 hpfs_brelse4(&qbh);
258 unlock_super(s);
259 return 1; 245 return 1;
260 } 246 }
261 hpfs_brelse4(&qbh); 247 hpfs_brelse4(&qbh);
262 end: 248 end:
263 unlock_super(s);
264 return 0; 249 return 0;
265} 250}
266 251
267int hpfs_alloc_if_possible(struct super_block *s, secno sec)
268{
269 int r;
270 hpfs_lock_creation(s);
271 r = hpfs_alloc_if_possible_nolock(s, sec);
272 hpfs_unlock_creation(s);
273 return r;
274}
275
276/* Free sectors in bitmaps */ 252/* Free sectors in bitmaps */
277 253
278void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n) 254void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
279{ 255{
280 struct quad_buffer_head qbh; 256 struct quad_buffer_head qbh;
281 unsigned *bmp; 257 u32 *bmp;
282 struct hpfs_sb_info *sbi = hpfs_sb(s); 258 struct hpfs_sb_info *sbi = hpfs_sb(s);
283 /*printk("2 - ");*/ 259 /*printk("2 - ");*/
284 if (!n) return; 260 if (!n) return;
@@ -286,26 +262,22 @@ void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
286 hpfs_error(s, "Trying to free reserved sector %08x", sec); 262 hpfs_error(s, "Trying to free reserved sector %08x", sec);
287 return; 263 return;
288 } 264 }
289 lock_super(s);
290 sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n; 265 sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n;
291 if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff; 266 if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff;
292 new_map: 267 new_map:
293 if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) { 268 if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) {
294 unlock_super(s);
295 return; 269 return;
296 } 270 }
297 new_tst: 271 new_tst:
298 if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f) & 1)) { 272 if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f) & 1)) {
299 hpfs_error(s, "sector %08x not allocated", sec); 273 hpfs_error(s, "sector %08x not allocated", sec);
300 hpfs_brelse4(&qbh); 274 hpfs_brelse4(&qbh);
301 unlock_super(s);
302 return; 275 return;
303 } 276 }
304 bmp[(sec & 0x3fff) >> 5] |= 1 << (sec & 0x1f); 277 bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f));
305 if (!--n) { 278 if (!--n) {
306 hpfs_mark_4buffers_dirty(&qbh); 279 hpfs_mark_4buffers_dirty(&qbh);
307 hpfs_brelse4(&qbh); 280 hpfs_brelse4(&qbh);
308 unlock_super(s);
309 return; 281 return;
310 } 282 }
311 if (!(++sec & 0x3fff)) { 283 if (!(++sec & 0x3fff)) {
@@ -327,13 +299,13 @@ int hpfs_check_free_dnodes(struct super_block *s, int n)
327 int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14; 299 int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14;
328 int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff; 300 int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff;
329 int i, j; 301 int i, j;
330 unsigned *bmp; 302 u32 *bmp;
331 struct quad_buffer_head qbh; 303 struct quad_buffer_head qbh;
332 if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) { 304 if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
333 for (j = 0; j < 512; j++) { 305 for (j = 0; j < 512; j++) {
334 unsigned k; 306 unsigned k;
335 if (!bmp[j]) continue; 307 if (!le32_to_cpu(bmp[j])) continue;
336 for (k = bmp[j]; k; k >>= 1) if (k & 1) if (!--n) { 308 for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) {
337 hpfs_brelse4(&qbh); 309 hpfs_brelse4(&qbh);
338 return 0; 310 return 0;
339 } 311 }
@@ -352,10 +324,10 @@ int hpfs_check_free_dnodes(struct super_block *s, int n)
352 chk_bmp: 324 chk_bmp:
353 if (bmp) { 325 if (bmp) {
354 for (j = 0; j < 512; j++) { 326 for (j = 0; j < 512; j++) {
355 unsigned k; 327 u32 k;
356 if (!bmp[j]) continue; 328 if (!le32_to_cpu(bmp[j])) continue;
357 for (k = 0xf; k; k <<= 4) 329 for (k = 0xf; k; k <<= 4)
358 if ((bmp[j] & k) == k) { 330 if ((le32_to_cpu(bmp[j]) & k) == k) {
359 if (!--n) { 331 if (!--n) {
360 hpfs_brelse4(&qbh); 332 hpfs_brelse4(&qbh);
361 return 0; 333 return 0;
@@ -379,44 +351,40 @@ void hpfs_free_dnode(struct super_block *s, dnode_secno dno)
379 hpfs_free_sectors(s, dno, 4); 351 hpfs_free_sectors(s, dno, 4);
380 } else { 352 } else {
381 struct quad_buffer_head qbh; 353 struct quad_buffer_head qbh;
382 unsigned *bmp; 354 u32 *bmp;
383 unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4; 355 unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4;
384 lock_super(s);
385 if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) { 356 if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
386 unlock_super(s);
387 return; 357 return;
388 } 358 }
389 bmp[ssec >> 5] |= 1 << (ssec & 0x1f); 359 bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f));
390 hpfs_mark_4buffers_dirty(&qbh); 360 hpfs_mark_4buffers_dirty(&qbh);
391 hpfs_brelse4(&qbh); 361 hpfs_brelse4(&qbh);
392 unlock_super(s);
393 } 362 }
394} 363}
395 364
396struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near, 365struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near,
397 dnode_secno *dno, struct quad_buffer_head *qbh, 366 dnode_secno *dno, struct quad_buffer_head *qbh)
398 int lock)
399{ 367{
400 struct dnode *d; 368 struct dnode *d;
401 if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) { 369 if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) {
402 if (!(*dno = alloc_in_dirband(s, near, lock))) 370 if (!(*dno = alloc_in_dirband(s, near)))
403 if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock))) return NULL; 371 if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL;
404 } else { 372 } else {
405 if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock))) 373 if (!(*dno = hpfs_alloc_sector(s, near, 4, 0)))
406 if (!(*dno = alloc_in_dirband(s, near, lock))) return NULL; 374 if (!(*dno = alloc_in_dirband(s, near))) return NULL;
407 } 375 }
408 if (!(d = hpfs_get_4sectors(s, *dno, qbh))) { 376 if (!(d = hpfs_get_4sectors(s, *dno, qbh))) {
409 hpfs_free_dnode(s, *dno); 377 hpfs_free_dnode(s, *dno);
410 return NULL; 378 return NULL;
411 } 379 }
412 memset(d, 0, 2048); 380 memset(d, 0, 2048);
413 d->magic = DNODE_MAGIC; 381 d->magic = cpu_to_le32(DNODE_MAGIC);
414 d->first_free = 52; 382 d->first_free = cpu_to_le32(52);
415 d->dirent[0] = 32; 383 d->dirent[0] = 32;
416 d->dirent[2] = 8; 384 d->dirent[2] = 8;
417 d->dirent[30] = 1; 385 d->dirent[30] = 1;
418 d->dirent[31] = 255; 386 d->dirent[31] = 255;
419 d->self = *dno; 387 d->self = cpu_to_le32(*dno);
420 return d; 388 return d;
421} 389}
422 390
@@ -424,16 +392,16 @@ struct fnode *hpfs_alloc_fnode(struct super_block *s, secno near, fnode_secno *f
424 struct buffer_head **bh) 392 struct buffer_head **bh)
425{ 393{
426 struct fnode *f; 394 struct fnode *f;
427 if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD, 1))) return NULL; 395 if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD))) return NULL;
428 if (!(f = hpfs_get_sector(s, *fno, bh))) { 396 if (!(f = hpfs_get_sector(s, *fno, bh))) {
429 hpfs_free_sectors(s, *fno, 1); 397 hpfs_free_sectors(s, *fno, 1);
430 return NULL; 398 return NULL;
431 } 399 }
432 memset(f, 0, 512); 400 memset(f, 0, 512);
433 f->magic = FNODE_MAGIC; 401 f->magic = cpu_to_le32(FNODE_MAGIC);
434 f->ea_offs = 0xc4; 402 f->ea_offs = cpu_to_le16(0xc4);
435 f->btree.n_free_nodes = 8; 403 f->btree.n_free_nodes = 8;
436 f->btree.first_free = 8; 404 f->btree.first_free = cpu_to_le16(8);
437 return f; 405 return f;
438} 406}
439 407
@@ -441,16 +409,16 @@ struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *a
441 struct buffer_head **bh) 409 struct buffer_head **bh)
442{ 410{
443 struct anode *a; 411 struct anode *a;
444 if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD, 1))) return NULL; 412 if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD))) return NULL;
445 if (!(a = hpfs_get_sector(s, *ano, bh))) { 413 if (!(a = hpfs_get_sector(s, *ano, bh))) {
446 hpfs_free_sectors(s, *ano, 1); 414 hpfs_free_sectors(s, *ano, 1);
447 return NULL; 415 return NULL;
448 } 416 }
449 memset(a, 0, 512); 417 memset(a, 0, 512);
450 a->magic = ANODE_MAGIC; 418 a->magic = cpu_to_le32(ANODE_MAGIC);
451 a->self = *ano; 419 a->self = cpu_to_le32(*ano);
452 a->btree.n_free_nodes = 40; 420 a->btree.n_free_nodes = 40;
453 a->btree.n_used_nodes = 0; 421 a->btree.n_used_nodes = 0;
454 a->btree.first_free = 8; 422 a->btree.first_free = cpu_to_le16(8);
455 return a; 423 return a;
456} 424}
diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c
index 6a2f04bf3df0..08b503e8ed29 100644
--- a/fs/hpfs/anode.c
+++ b/fs/hpfs/anode.c
@@ -22,8 +22,8 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
22 if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1; 22 if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
23 if (btree->internal) { 23 if (btree->internal) {
24 for (i = 0; i < btree->n_used_nodes; i++) 24 for (i = 0; i < btree->n_used_nodes; i++)
25 if (btree->u.internal[i].file_secno > sec) { 25 if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
26 a = btree->u.internal[i].down; 26 a = le32_to_cpu(btree->u.internal[i].down);
27 brelse(bh); 27 brelse(bh);
28 if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; 28 if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
29 btree = &anode->btree; 29 btree = &anode->btree;
@@ -34,18 +34,18 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
34 return -1; 34 return -1;
35 } 35 }
36 for (i = 0; i < btree->n_used_nodes; i++) 36 for (i = 0; i < btree->n_used_nodes; i++)
37 if (btree->u.external[i].file_secno <= sec && 37 if (le32_to_cpu(btree->u.external[i].file_secno) <= sec &&
38 btree->u.external[i].file_secno + btree->u.external[i].length > sec) { 38 le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) {
39 a = btree->u.external[i].disk_secno + sec - btree->u.external[i].file_secno; 39 a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno);
40 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) { 40 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) {
41 brelse(bh); 41 brelse(bh);
42 return -1; 42 return -1;
43 } 43 }
44 if (inode) { 44 if (inode) {
45 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); 45 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
46 hpfs_inode->i_file_sec = btree->u.external[i].file_secno; 46 hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno);
47 hpfs_inode->i_disk_sec = btree->u.external[i].disk_secno; 47 hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno);
48 hpfs_inode->i_n_secs = btree->u.external[i].length; 48 hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length);
49 } 49 }
50 brelse(bh); 50 brelse(bh);
51 return a; 51 return a;
@@ -83,8 +83,8 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
83 return -1; 83 return -1;
84 } 84 }
85 if (btree->internal) { 85 if (btree->internal) {
86 a = btree->u.internal[n].down; 86 a = le32_to_cpu(btree->u.internal[n].down);
87 btree->u.internal[n].file_secno = -1; 87 btree->u.internal[n].file_secno = cpu_to_le32(-1);
88 mark_buffer_dirty(bh); 88 mark_buffer_dirty(bh);
89 brelse(bh); 89 brelse(bh);
90 if (hpfs_sb(s)->sb_chk) 90 if (hpfs_sb(s)->sb_chk)
@@ -94,15 +94,15 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
94 goto go_down; 94 goto go_down;
95 } 95 }
96 if (n >= 0) { 96 if (n >= 0) {
97 if (btree->u.external[n].file_secno + btree->u.external[n].length != fsecno) { 97 if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) {
98 hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x", 98 hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x",
99 btree->u.external[n].file_secno + btree->u.external[n].length, fsecno, 99 le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno,
100 fnod?'f':'a', node); 100 fnod?'f':'a', node);
101 brelse(bh); 101 brelse(bh);
102 return -1; 102 return -1;
103 } 103 }
104 if (hpfs_alloc_if_possible(s, se = btree->u.external[n].disk_secno + btree->u.external[n].length)) { 104 if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) {
105 btree->u.external[n].length++; 105 btree->u.external[n].length = cpu_to_le32(le32_to_cpu(btree->u.external[n].length) + 1);
106 mark_buffer_dirty(bh); 106 mark_buffer_dirty(bh);
107 brelse(bh); 107 brelse(bh);
108 return se; 108 return se;
@@ -115,20 +115,20 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
115 } 115 }
116 se = !fnod ? node : (node + 16384) & ~16383; 116 se = !fnod ? node : (node + 16384) & ~16383;
117 } 117 }
118 if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M, 1))) { 118 if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) {
119 brelse(bh); 119 brelse(bh);
120 return -1; 120 return -1;
121 } 121 }
122 fs = n < 0 ? 0 : btree->u.external[n].file_secno + btree->u.external[n].length; 122 fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length);
123 if (!btree->n_free_nodes) { 123 if (!btree->n_free_nodes) {
124 up = a != node ? anode->up : -1; 124 up = a != node ? le32_to_cpu(anode->up) : -1;
125 if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) { 125 if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) {
126 brelse(bh); 126 brelse(bh);
127 hpfs_free_sectors(s, se, 1); 127 hpfs_free_sectors(s, se, 1);
128 return -1; 128 return -1;
129 } 129 }
130 if (a == node && fnod) { 130 if (a == node && fnod) {
131 anode->up = node; 131 anode->up = cpu_to_le32(node);
132 anode->btree.fnode_parent = 1; 132 anode->btree.fnode_parent = 1;
133 anode->btree.n_used_nodes = btree->n_used_nodes; 133 anode->btree.n_used_nodes = btree->n_used_nodes;
134 anode->btree.first_free = btree->first_free; 134 anode->btree.first_free = btree->first_free;
@@ -137,9 +137,9 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
137 btree->internal = 1; 137 btree->internal = 1;
138 btree->n_free_nodes = 11; 138 btree->n_free_nodes = 11;
139 btree->n_used_nodes = 1; 139 btree->n_used_nodes = 1;
140 btree->first_free = (char *)&(btree->u.internal[1]) - (char *)btree; 140 btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
141 btree->u.internal[0].file_secno = -1; 141 btree->u.internal[0].file_secno = cpu_to_le32(-1);
142 btree->u.internal[0].down = na; 142 btree->u.internal[0].down = cpu_to_le32(na);
143 mark_buffer_dirty(bh); 143 mark_buffer_dirty(bh);
144 } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) { 144 } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) {
145 brelse(bh); 145 brelse(bh);
@@ -153,15 +153,15 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
153 btree = &anode->btree; 153 btree = &anode->btree;
154 } 154 }
155 btree->n_free_nodes--; n = btree->n_used_nodes++; 155 btree->n_free_nodes--; n = btree->n_used_nodes++;
156 btree->first_free += 12; 156 btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 12);
157 btree->u.external[n].disk_secno = se; 157 btree->u.external[n].disk_secno = cpu_to_le32(se);
158 btree->u.external[n].file_secno = fs; 158 btree->u.external[n].file_secno = cpu_to_le32(fs);
159 btree->u.external[n].length = 1; 159 btree->u.external[n].length = cpu_to_le32(1);
160 mark_buffer_dirty(bh); 160 mark_buffer_dirty(bh);
161 brelse(bh); 161 brelse(bh);
162 if ((a == node && fnod) || na == -1) return se; 162 if ((a == node && fnod) || na == -1) return se;
163 c2 = 0; 163 c2 = 0;
164 while (up != -1) { 164 while (up != (anode_secno)-1) {
165 struct anode *new_anode; 165 struct anode *new_anode;
166 if (hpfs_sb(s)->sb_chk) 166 if (hpfs_sb(s)->sb_chk)
167 if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1; 167 if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
@@ -174,47 +174,47 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
174 } 174 }
175 if (btree->n_free_nodes) { 175 if (btree->n_free_nodes) {
176 btree->n_free_nodes--; n = btree->n_used_nodes++; 176 btree->n_free_nodes--; n = btree->n_used_nodes++;
177 btree->first_free += 8; 177 btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 8);
178 btree->u.internal[n].file_secno = -1; 178 btree->u.internal[n].file_secno = cpu_to_le32(-1);
179 btree->u.internal[n].down = na; 179 btree->u.internal[n].down = cpu_to_le32(na);
180 btree->u.internal[n-1].file_secno = fs; 180 btree->u.internal[n-1].file_secno = cpu_to_le32(fs);
181 mark_buffer_dirty(bh); 181 mark_buffer_dirty(bh);
182 brelse(bh); 182 brelse(bh);
183 brelse(bh2); 183 brelse(bh2);
184 hpfs_free_sectors(s, ra, 1); 184 hpfs_free_sectors(s, ra, 1);
185 if ((anode = hpfs_map_anode(s, na, &bh))) { 185 if ((anode = hpfs_map_anode(s, na, &bh))) {
186 anode->up = up; 186 anode->up = cpu_to_le32(up);
187 anode->btree.fnode_parent = up == node && fnod; 187 anode->btree.fnode_parent = up == node && fnod;
188 mark_buffer_dirty(bh); 188 mark_buffer_dirty(bh);
189 brelse(bh); 189 brelse(bh);
190 } 190 }
191 return se; 191 return se;
192 } 192 }
193 up = up != node ? anode->up : -1; 193 up = up != node ? le32_to_cpu(anode->up) : -1;
194 btree->u.internal[btree->n_used_nodes - 1].file_secno = /*fs*/-1; 194 btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1);
195 mark_buffer_dirty(bh); 195 mark_buffer_dirty(bh);
196 brelse(bh); 196 brelse(bh);
197 a = na; 197 a = na;
198 if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) { 198 if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
199 anode = new_anode; 199 anode = new_anode;
200 /*anode->up = up != -1 ? up : ra;*/ 200 /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
201 anode->btree.internal = 1; 201 anode->btree.internal = 1;
202 anode->btree.n_used_nodes = 1; 202 anode->btree.n_used_nodes = 1;
203 anode->btree.n_free_nodes = 59; 203 anode->btree.n_free_nodes = 59;
204 anode->btree.first_free = 16; 204 anode->btree.first_free = cpu_to_le16(16);
205 anode->btree.u.internal[0].down = a; 205 anode->btree.u.internal[0].down = cpu_to_le32(a);
206 anode->btree.u.internal[0].file_secno = -1; 206 anode->btree.u.internal[0].file_secno = cpu_to_le32(-1);
207 mark_buffer_dirty(bh); 207 mark_buffer_dirty(bh);
208 brelse(bh); 208 brelse(bh);
209 if ((anode = hpfs_map_anode(s, a, &bh))) { 209 if ((anode = hpfs_map_anode(s, a, &bh))) {
210 anode->up = na; 210 anode->up = cpu_to_le32(na);
211 mark_buffer_dirty(bh); 211 mark_buffer_dirty(bh);
212 brelse(bh); 212 brelse(bh);
213 } 213 }
214 } else na = a; 214 } else na = a;
215 } 215 }
216 if ((anode = hpfs_map_anode(s, na, &bh))) { 216 if ((anode = hpfs_map_anode(s, na, &bh))) {
217 anode->up = node; 217 anode->up = cpu_to_le32(node);
218 if (fnod) anode->btree.fnode_parent = 1; 218 if (fnod) anode->btree.fnode_parent = 1;
219 mark_buffer_dirty(bh); 219 mark_buffer_dirty(bh);
220 brelse(bh); 220 brelse(bh);
@@ -232,14 +232,14 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
232 } 232 }
233 btree = &fnode->btree; 233 btree = &fnode->btree;
234 } 234 }
235 ranode->up = node; 235 ranode->up = cpu_to_le32(node);
236 memcpy(&ranode->btree, btree, btree->first_free); 236 memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
237 if (fnod) ranode->btree.fnode_parent = 1; 237 if (fnod) ranode->btree.fnode_parent = 1;
238 ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes; 238 ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes;
239 if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) { 239 if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
240 struct anode *unode; 240 struct anode *unode;
241 if ((unode = hpfs_map_anode(s, ranode->u.internal[n].down, &bh1))) { 241 if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
242 unode->up = ra; 242 unode->up = cpu_to_le32(ra);
243 unode->btree.fnode_parent = 0; 243 unode->btree.fnode_parent = 0;
244 mark_buffer_dirty(bh1); 244 mark_buffer_dirty(bh1);
245 brelse(bh1); 245 brelse(bh1);
@@ -248,11 +248,11 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
248 btree->internal = 1; 248 btree->internal = 1;
249 btree->n_free_nodes = fnod ? 10 : 58; 249 btree->n_free_nodes = fnod ? 10 : 58;
250 btree->n_used_nodes = 2; 250 btree->n_used_nodes = 2;
251 btree->first_free = (char *)&btree->u.internal[2] - (char *)btree; 251 btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
252 btree->u.internal[0].file_secno = fs; 252 btree->u.internal[0].file_secno = cpu_to_le32(fs);
253 btree->u.internal[0].down = ra; 253 btree->u.internal[0].down = cpu_to_le32(ra);
254 btree->u.internal[1].file_secno = -1; 254 btree->u.internal[1].file_secno = cpu_to_le32(-1);
255 btree->u.internal[1].down = na; 255 btree->u.internal[1].down = cpu_to_le32(na);
256 mark_buffer_dirty(bh); 256 mark_buffer_dirty(bh);
257 brelse(bh); 257 brelse(bh);
258 mark_buffer_dirty(bh2); 258 mark_buffer_dirty(bh2);
@@ -279,7 +279,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
279 go_down: 279 go_down:
280 d2 = 0; 280 d2 = 0;
281 while (btree1->internal) { 281 while (btree1->internal) {
282 ano = btree1->u.internal[pos].down; 282 ano = le32_to_cpu(btree1->u.internal[pos].down);
283 if (level) brelse(bh); 283 if (level) brelse(bh);
284 if (hpfs_sb(s)->sb_chk) 284 if (hpfs_sb(s)->sb_chk)
285 if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1")) 285 if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
@@ -290,7 +290,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
290 pos = 0; 290 pos = 0;
291 } 291 }
292 for (i = 0; i < btree1->n_used_nodes; i++) 292 for (i = 0; i < btree1->n_used_nodes; i++)
293 hpfs_free_sectors(s, btree1->u.external[i].disk_secno, btree1->u.external[i].length); 293 hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length));
294 go_up: 294 go_up:
295 if (!level) return; 295 if (!level) return;
296 brelse(bh); 296 brelse(bh);
@@ -298,13 +298,13 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
298 if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return; 298 if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return;
299 hpfs_free_sectors(s, ano, 1); 299 hpfs_free_sectors(s, ano, 1);
300 oano = ano; 300 oano = ano;
301 ano = anode->up; 301 ano = le32_to_cpu(anode->up);
302 if (--level) { 302 if (--level) {
303 if (!(anode = hpfs_map_anode(s, ano, &bh))) return; 303 if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
304 btree1 = &anode->btree; 304 btree1 = &anode->btree;
305 } else btree1 = btree; 305 } else btree1 = btree;
306 for (i = 0; i < btree1->n_used_nodes; i++) { 306 for (i = 0; i < btree1->n_used_nodes; i++) {
307 if (btree1->u.internal[i].down == oano) { 307 if (le32_to_cpu(btree1->u.internal[i].down) == oano) {
308 if ((pos = i + 1) < btree1->n_used_nodes) 308 if ((pos = i + 1) < btree1->n_used_nodes)
309 goto go_down; 309 goto go_down;
310 else 310 else
@@ -411,7 +411,7 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
411 if (fno) { 411 if (fno) {
412 btree->n_free_nodes = 8; 412 btree->n_free_nodes = 8;
413 btree->n_used_nodes = 0; 413 btree->n_used_nodes = 0;
414 btree->first_free = 8; 414 btree->first_free = cpu_to_le16(8);
415 btree->internal = 0; 415 btree->internal = 0;
416 mark_buffer_dirty(bh); 416 mark_buffer_dirty(bh);
417 } else hpfs_free_sectors(s, f, 1); 417 } else hpfs_free_sectors(s, f, 1);
@@ -421,22 +421,22 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
421 while (btree->internal) { 421 while (btree->internal) {
422 nodes = btree->n_used_nodes + btree->n_free_nodes; 422 nodes = btree->n_used_nodes + btree->n_free_nodes;
423 for (i = 0; i < btree->n_used_nodes; i++) 423 for (i = 0; i < btree->n_used_nodes; i++)
424 if (btree->u.internal[i].file_secno >= secs) goto f; 424 if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
425 brelse(bh); 425 brelse(bh);
426 hpfs_error(s, "internal btree %08x doesn't end with -1", node); 426 hpfs_error(s, "internal btree %08x doesn't end with -1", node);
427 return; 427 return;
428 f: 428 f:
429 for (j = i + 1; j < btree->n_used_nodes; j++) 429 for (j = i + 1; j < btree->n_used_nodes; j++)
430 hpfs_ea_remove(s, btree->u.internal[j].down, 1, 0); 430 hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0);
431 btree->n_used_nodes = i + 1; 431 btree->n_used_nodes = i + 1;
432 btree->n_free_nodes = nodes - btree->n_used_nodes; 432 btree->n_free_nodes = nodes - btree->n_used_nodes;
433 btree->first_free = 8 + 8 * btree->n_used_nodes; 433 btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes);
434 mark_buffer_dirty(bh); 434 mark_buffer_dirty(bh);
435 if (btree->u.internal[i].file_secno == secs) { 435 if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) {
436 brelse(bh); 436 brelse(bh);
437 return; 437 return;
438 } 438 }
439 node = btree->u.internal[i].down; 439 node = le32_to_cpu(btree->u.internal[i].down);
440 brelse(bh); 440 brelse(bh);
441 if (hpfs_sb(s)->sb_chk) 441 if (hpfs_sb(s)->sb_chk)
442 if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree")) 442 if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
@@ -446,25 +446,25 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
446 } 446 }
447 nodes = btree->n_used_nodes + btree->n_free_nodes; 447 nodes = btree->n_used_nodes + btree->n_free_nodes;
448 for (i = 0; i < btree->n_used_nodes; i++) 448 for (i = 0; i < btree->n_used_nodes; i++)
449 if (btree->u.external[i].file_secno + btree->u.external[i].length >= secs) goto ff; 449 if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff;
450 brelse(bh); 450 brelse(bh);
451 return; 451 return;
452 ff: 452 ff:
453 if (secs <= btree->u.external[i].file_secno) { 453 if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) {
454 hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs); 454 hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs);
455 if (i) i--; 455 if (i) i--;
456 } 456 }
457 else if (btree->u.external[i].file_secno + btree->u.external[i].length > secs) { 457 else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) {
458 hpfs_free_sectors(s, btree->u.external[i].disk_secno + secs - 458 hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs -
459 btree->u.external[i].file_secno, btree->u.external[i].length 459 le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length)
460 - secs + btree->u.external[i].file_secno); /* I hope gcc optimizes this :-) */ 460 - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */
461 btree->u.external[i].length = secs - btree->u.external[i].file_secno; 461 btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno));
462 } 462 }
463 for (j = i + 1; j < btree->n_used_nodes; j++) 463 for (j = i + 1; j < btree->n_used_nodes; j++)
464 hpfs_free_sectors(s, btree->u.external[j].disk_secno, btree->u.external[j].length); 464 hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length));
465 btree->n_used_nodes = i + 1; 465 btree->n_used_nodes = i + 1;
466 btree->n_free_nodes = nodes - btree->n_used_nodes; 466 btree->n_free_nodes = nodes - btree->n_used_nodes;
467 btree->first_free = 8 + 12 * btree->n_used_nodes; 467 btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes);
468 mark_buffer_dirty(bh); 468 mark_buffer_dirty(bh);
469 brelse(bh); 469 brelse(bh);
470} 470}
@@ -480,12 +480,12 @@ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
480 struct extended_attribute *ea_end; 480 struct extended_attribute *ea_end;
481 if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return; 481 if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
482 if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree); 482 if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree);
483 else hpfs_remove_dtree(s, fnode->u.external[0].disk_secno); 483 else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
484 ea_end = fnode_end_ea(fnode); 484 ea_end = fnode_end_ea(fnode);
485 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) 485 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
486 if (ea->indirect) 486 if (ea->indirect)
487 hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea)); 487 hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea));
488 hpfs_ea_ext_remove(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l); 488 hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l));
489 brelse(bh); 489 brelse(bh);
490 hpfs_free_sectors(s, fno, 1); 490 hpfs_free_sectors(s, fno, 1);
491} 491}
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
index 793cb9d943d2..9ecde27d1e29 100644
--- a/fs/hpfs/buffer.c
+++ b/fs/hpfs/buffer.c
@@ -9,22 +9,6 @@
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include "hpfs_fn.h" 10#include "hpfs_fn.h"
11 11
12void hpfs_lock_creation(struct super_block *s)
13{
14#ifdef DEBUG_LOCKS
15 printk("lock creation\n");
16#endif
17 mutex_lock(&hpfs_sb(s)->hpfs_creation_de);
18}
19
20void hpfs_unlock_creation(struct super_block *s)
21{
22#ifdef DEBUG_LOCKS
23 printk("unlock creation\n");
24#endif
25 mutex_unlock(&hpfs_sb(s)->hpfs_creation_de);
26}
27
28/* Map a sector into a buffer and return pointers to it and to the buffer. */ 12/* Map a sector into a buffer and return pointers to it and to the buffer. */
29 13
30void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp, 14void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
@@ -32,6 +16,8 @@ void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head
32{ 16{
33 struct buffer_head *bh; 17 struct buffer_head *bh;
34 18
19 hpfs_lock_assert(s);
20
35 cond_resched(); 21 cond_resched();
36 22
37 *bhp = bh = sb_bread(s, secno); 23 *bhp = bh = sb_bread(s, secno);
@@ -50,6 +36,8 @@ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head
50 struct buffer_head *bh; 36 struct buffer_head *bh;
51 /*return hpfs_map_sector(s, secno, bhp, 0);*/ 37 /*return hpfs_map_sector(s, secno, bhp, 0);*/
52 38
39 hpfs_lock_assert(s);
40
53 cond_resched(); 41 cond_resched();
54 42
55 if ((*bhp = bh = sb_getblk(s, secno)) != NULL) { 43 if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
@@ -70,6 +58,8 @@ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffe
70 struct buffer_head *bh; 58 struct buffer_head *bh;
71 char *data; 59 char *data;
72 60
61 hpfs_lock_assert(s);
62
73 cond_resched(); 63 cond_resched();
74 64
75 if (secno & 3) { 65 if (secno & 3) {
@@ -125,6 +115,8 @@ void *hpfs_get_4sectors(struct super_block *s, unsigned secno,
125{ 115{
126 cond_resched(); 116 cond_resched();
127 117
118 hpfs_lock_assert(s);
119
128 if (secno & 3) { 120 if (secno & 3) {
129 printk("HPFS: hpfs_get_4sectors: unaligned read\n"); 121 printk("HPFS: hpfs_get_4sectors: unaligned read\n");
130 return NULL; 122 return NULL;
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index b3d7c0ddb609..f46ae025bfb5 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -88,9 +88,9 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
88 hpfs_error(inode->i_sb, "not a directory, fnode %08lx", 88 hpfs_error(inode->i_sb, "not a directory, fnode %08lx",
89 (unsigned long)inode->i_ino); 89 (unsigned long)inode->i_ino);
90 } 90 }
91 if (hpfs_inode->i_dno != fno->u.external[0].disk_secno) { 91 if (hpfs_inode->i_dno != le32_to_cpu(fno->u.external[0].disk_secno)) {
92 e = 1; 92 e = 1;
93 hpfs_error(inode->i_sb, "corrupted inode: i_dno == %08x, fnode -> dnode == %08x", hpfs_inode->i_dno, fno->u.external[0].disk_secno); 93 hpfs_error(inode->i_sb, "corrupted inode: i_dno == %08x, fnode -> dnode == %08x", hpfs_inode->i_dno, le32_to_cpu(fno->u.external[0].disk_secno));
94 } 94 }
95 brelse(bh); 95 brelse(bh);
96 if (e) { 96 if (e) {
@@ -156,7 +156,7 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
156 goto again; 156 goto again;
157 } 157 }
158 tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3); 158 tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3);
159 if (filldir(dirent, tempname, de->namelen, old_pos, de->fnode, DT_UNKNOWN) < 0) { 159 if (filldir(dirent, tempname, de->namelen, old_pos, le32_to_cpu(de->fnode), DT_UNKNOWN) < 0) {
160 filp->f_pos = old_pos; 160 filp->f_pos = old_pos;
161 if (tempname != de->name) kfree(tempname); 161 if (tempname != de->name) kfree(tempname);
162 hpfs_brelse4(&qbh); 162 hpfs_brelse4(&qbh);
@@ -221,7 +221,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
221 * Get inode number, what we're after. 221 * Get inode number, what we're after.
222 */ 222 */
223 223
224 ino = de->fnode; 224 ino = le32_to_cpu(de->fnode);
225 225
226 /* 226 /*
227 * Go find or make an inode. 227 * Go find or make an inode.
@@ -236,7 +236,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
236 hpfs_init_inode(result); 236 hpfs_init_inode(result);
237 if (de->directory) 237 if (de->directory)
238 hpfs_read_inode(result); 238 hpfs_read_inode(result);
239 else if (de->ea_size && hpfs_sb(dir->i_sb)->sb_eas) 239 else if (le32_to_cpu(de->ea_size) && hpfs_sb(dir->i_sb)->sb_eas)
240 hpfs_read_inode(result); 240 hpfs_read_inode(result);
241 else { 241 else {
242 result->i_mode |= S_IFREG; 242 result->i_mode |= S_IFREG;
@@ -250,8 +250,6 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
250 hpfs_result = hpfs_i(result); 250 hpfs_result = hpfs_i(result);
251 if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino; 251 if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino;
252 252
253 hpfs_decide_conv(result, name, len);
254
255 if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) { 253 if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) {
256 hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures"); 254 hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures");
257 goto bail1; 255 goto bail1;
@@ -263,19 +261,19 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
263 */ 261 */
264 262
265 if (!result->i_ctime.tv_sec) { 263 if (!result->i_ctime.tv_sec) {
266 if (!(result->i_ctime.tv_sec = local_to_gmt(dir->i_sb, de->creation_date))) 264 if (!(result->i_ctime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->creation_date))))
267 result->i_ctime.tv_sec = 1; 265 result->i_ctime.tv_sec = 1;
268 result->i_ctime.tv_nsec = 0; 266 result->i_ctime.tv_nsec = 0;
269 result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, de->write_date); 267 result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->write_date));
270 result->i_mtime.tv_nsec = 0; 268 result->i_mtime.tv_nsec = 0;
271 result->i_atime.tv_sec = local_to_gmt(dir->i_sb, de->read_date); 269 result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->read_date));
272 result->i_atime.tv_nsec = 0; 270 result->i_atime.tv_nsec = 0;
273 hpfs_result->i_ea_size = de->ea_size; 271 hpfs_result->i_ea_size = le32_to_cpu(de->ea_size);
274 if (!hpfs_result->i_ea_mode && de->read_only) 272 if (!hpfs_result->i_ea_mode && de->read_only)
275 result->i_mode &= ~0222; 273 result->i_mode &= ~0222;
276 if (!de->directory) { 274 if (!de->directory) {
277 if (result->i_size == -1) { 275 if (result->i_size == -1) {
278 result->i_size = de->file_size; 276 result->i_size = le32_to_cpu(de->file_size);
279 result->i_data.a_ops = &hpfs_aops; 277 result->i_data.a_ops = &hpfs_aops;
280 hpfs_i(result)->mmu_private = result->i_size; 278 hpfs_i(result)->mmu_private = result->i_size;
281 /* 279 /*
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c
index 9b2ffadfc8c4..1e0e2ac30fd3 100644
--- a/fs/hpfs/dnode.c
+++ b/fs/hpfs/dnode.c
@@ -14,11 +14,11 @@ static loff_t get_pos(struct dnode *d, struct hpfs_dirent *fde)
14 struct hpfs_dirent *de_end = dnode_end_de(d); 14 struct hpfs_dirent *de_end = dnode_end_de(d);
15 int i = 1; 15 int i = 1;
16 for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { 16 for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) {
17 if (de == fde) return ((loff_t) d->self << 4) | (loff_t)i; 17 if (de == fde) return ((loff_t) le32_to_cpu(d->self) << 4) | (loff_t)i;
18 i++; 18 i++;
19 } 19 }
20 printk("HPFS: get_pos: not_found\n"); 20 printk("HPFS: get_pos: not_found\n");
21 return ((loff_t)d->self << 4) | (loff_t)1; 21 return ((loff_t)le32_to_cpu(d->self) << 4) | (loff_t)1;
22} 22}
23 23
24void hpfs_add_pos(struct inode *inode, loff_t *pos) 24void hpfs_add_pos(struct inode *inode, loff_t *pos)
@@ -130,29 +130,30 @@ static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno
130{ 130{
131 struct hpfs_dirent *de; 131 struct hpfs_dirent *de;
132 if (!(de = dnode_last_de(d))) { 132 if (!(de = dnode_last_de(d))) {
133 hpfs_error(s, "set_last_pointer: empty dnode %08x", d->self); 133 hpfs_error(s, "set_last_pointer: empty dnode %08x", le32_to_cpu(d->self));
134 return; 134 return;
135 } 135 }
136 if (hpfs_sb(s)->sb_chk) { 136 if (hpfs_sb(s)->sb_chk) {
137 if (de->down) { 137 if (de->down) {
138 hpfs_error(s, "set_last_pointer: dnode %08x has already last pointer %08x", 138 hpfs_error(s, "set_last_pointer: dnode %08x has already last pointer %08x",
139 d->self, de_down_pointer(de)); 139 le32_to_cpu(d->self), de_down_pointer(de));
140 return; 140 return;
141 } 141 }
142 if (de->length != 32) { 142 if (le16_to_cpu(de->length) != 32) {
143 hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", d->self); 143 hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", le32_to_cpu(d->self));
144 return; 144 return;
145 } 145 }
146 } 146 }
147 if (ptr) { 147 if (ptr) {
148 if ((d->first_free += 4) > 2048) { 148 d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + 4);
149 hpfs_error(s,"set_last_pointer: too long dnode %08x", d->self); 149 if (le32_to_cpu(d->first_free) > 2048) {
150 d->first_free -= 4; 150 hpfs_error(s, "set_last_pointer: too long dnode %08x", le32_to_cpu(d->self));
151 d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - 4);
151 return; 152 return;
152 } 153 }
153 de->length = 36; 154 de->length = cpu_to_le16(36);
154 de->down = 1; 155 de->down = 1;
155 *(dnode_secno *)((char *)de + 32) = ptr; 156 *(dnode_secno *)((char *)de + 32) = cpu_to_le32(ptr);
156 } 157 }
157} 158}
158 159
@@ -168,7 +169,7 @@ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d,
168 for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { 169 for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) {
169 int c = hpfs_compare_names(s, name, namelen, de->name, de->namelen, de->last); 170 int c = hpfs_compare_names(s, name, namelen, de->name, de->namelen, de->last);
170 if (!c) { 171 if (!c) {
171 hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, d->self); 172 hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, le32_to_cpu(d->self));
172 return NULL; 173 return NULL;
173 } 174 }
174 if (c < 0) break; 175 if (c < 0) break;
@@ -176,15 +177,14 @@ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d,
176 memmove((char *)de + d_size, de, (char *)de_end - (char *)de); 177 memmove((char *)de + d_size, de, (char *)de_end - (char *)de);
177 memset(de, 0, d_size); 178 memset(de, 0, d_size);
178 if (down_ptr) { 179 if (down_ptr) {
179 *(int *)((char *)de + d_size - 4) = down_ptr; 180 *(dnode_secno *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr);
180 de->down = 1; 181 de->down = 1;
181 } 182 }
182 de->length = d_size; 183 de->length = cpu_to_le16(d_size);
183 if (down_ptr) de->down = 1;
184 de->not_8x3 = hpfs_is_name_long(name, namelen); 184 de->not_8x3 = hpfs_is_name_long(name, namelen);
185 de->namelen = namelen; 185 de->namelen = namelen;
186 memcpy(de->name, name, namelen); 186 memcpy(de->name, name, namelen);
187 d->first_free += d_size; 187 d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + d_size);
188 return de; 188 return de;
189} 189}
190 190
@@ -194,25 +194,25 @@ static void hpfs_delete_de(struct super_block *s, struct dnode *d,
194 struct hpfs_dirent *de) 194 struct hpfs_dirent *de)
195{ 195{
196 if (de->last) { 196 if (de->last) {
197 hpfs_error(s, "attempt to delete last dirent in dnode %08x", d->self); 197 hpfs_error(s, "attempt to delete last dirent in dnode %08x", le32_to_cpu(d->self));
198 return; 198 return;
199 } 199 }
200 d->first_free -= de->length; 200 d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - le16_to_cpu(de->length));
201 memmove(de, de_next_de(de), d->first_free + (char *)d - (char *)de); 201 memmove(de, de_next_de(de), le32_to_cpu(d->first_free) + (char *)d - (char *)de);
202} 202}
203 203
204static void fix_up_ptrs(struct super_block *s, struct dnode *d) 204static void fix_up_ptrs(struct super_block *s, struct dnode *d)
205{ 205{
206 struct hpfs_dirent *de; 206 struct hpfs_dirent *de;
207 struct hpfs_dirent *de_end = dnode_end_de(d); 207 struct hpfs_dirent *de_end = dnode_end_de(d);
208 dnode_secno dno = d->self; 208 dnode_secno dno = le32_to_cpu(d->self);
209 for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) 209 for (de = dnode_first_de(d); de < de_end; de = de_next_de(de))
210 if (de->down) { 210 if (de->down) {
211 struct quad_buffer_head qbh; 211 struct quad_buffer_head qbh;
212 struct dnode *dd; 212 struct dnode *dd;
213 if ((dd = hpfs_map_dnode(s, de_down_pointer(de), &qbh))) { 213 if ((dd = hpfs_map_dnode(s, de_down_pointer(de), &qbh))) {
214 if (dd->up != dno || dd->root_dnode) { 214 if (le32_to_cpu(dd->up) != dno || dd->root_dnode) {
215 dd->up = dno; 215 dd->up = cpu_to_le32(dno);
216 dd->root_dnode = 0; 216 dd->root_dnode = 0;
217 hpfs_mark_4buffers_dirty(&qbh); 217 hpfs_mark_4buffers_dirty(&qbh);
218 } 218 }
@@ -262,7 +262,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
262 kfree(nname); 262 kfree(nname);
263 return 1; 263 return 1;
264 } 264 }
265 if (d->first_free + de_size(namelen, down_ptr) <= 2048) { 265 if (le32_to_cpu(d->first_free) + de_size(namelen, down_ptr) <= 2048) {
266 loff_t t; 266 loff_t t;
267 copy_de(de=hpfs_add_de(i->i_sb, d, name, namelen, down_ptr), new_de); 267 copy_de(de=hpfs_add_de(i->i_sb, d, name, namelen, down_ptr), new_de);
268 t = get_pos(d, de); 268 t = get_pos(d, de);
@@ -286,11 +286,11 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
286 kfree(nname); 286 kfree(nname);
287 return 1; 287 return 1;
288 } 288 }
289 memcpy(nd, d, d->first_free); 289 memcpy(nd, d, le32_to_cpu(d->first_free));
290 copy_de(de = hpfs_add_de(i->i_sb, nd, name, namelen, down_ptr), new_de); 290 copy_de(de = hpfs_add_de(i->i_sb, nd, name, namelen, down_ptr), new_de);
291 for_all_poss(i, hpfs_pos_ins, get_pos(nd, de), 1); 291 for_all_poss(i, hpfs_pos_ins, get_pos(nd, de), 1);
292 h = ((char *)dnode_last_de(nd) - (char *)nd) / 2 + 10; 292 h = ((char *)dnode_last_de(nd) - (char *)nd) / 2 + 10;
293 if (!(ad = hpfs_alloc_dnode(i->i_sb, d->up, &adno, &qbh1, 0))) { 293 if (!(ad = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &adno, &qbh1))) {
294 hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); 294 hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted");
295 hpfs_brelse4(&qbh); 295 hpfs_brelse4(&qbh);
296 kfree(nd); 296 kfree(nd);
@@ -313,20 +313,21 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
313 down_ptr = adno; 313 down_ptr = adno;
314 set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0); 314 set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0);
315 de = de_next_de(de); 315 de = de_next_de(de);
316 memmove((char *)nd + 20, de, nd->first_free + (char *)nd - (char *)de); 316 memmove((char *)nd + 20, de, le32_to_cpu(nd->first_free) + (char *)nd - (char *)de);
317 nd->first_free -= (char *)de - (char *)nd - 20; 317 nd->first_free = cpu_to_le32(le32_to_cpu(nd->first_free) - ((char *)de - (char *)nd - 20));
318 memcpy(d, nd, nd->first_free); 318 memcpy(d, nd, le32_to_cpu(nd->first_free));
319 for_all_poss(i, hpfs_pos_del, (loff_t)dno << 4, pos); 319 for_all_poss(i, hpfs_pos_del, (loff_t)dno << 4, pos);
320 fix_up_ptrs(i->i_sb, ad); 320 fix_up_ptrs(i->i_sb, ad);
321 if (!d->root_dnode) { 321 if (!d->root_dnode) {
322 dno = ad->up = d->up; 322 ad->up = d->up;
323 dno = le32_to_cpu(ad->up);
323 hpfs_mark_4buffers_dirty(&qbh); 324 hpfs_mark_4buffers_dirty(&qbh);
324 hpfs_brelse4(&qbh); 325 hpfs_brelse4(&qbh);
325 hpfs_mark_4buffers_dirty(&qbh1); 326 hpfs_mark_4buffers_dirty(&qbh1);
326 hpfs_brelse4(&qbh1); 327 hpfs_brelse4(&qbh1);
327 goto go_up; 328 goto go_up;
328 } 329 }
329 if (!(rd = hpfs_alloc_dnode(i->i_sb, d->up, &rdno, &qbh2, 0))) { 330 if (!(rd = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &rdno, &qbh2))) {
330 hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); 331 hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted");
331 hpfs_brelse4(&qbh); 332 hpfs_brelse4(&qbh);
332 hpfs_brelse4(&qbh1); 333 hpfs_brelse4(&qbh1);
@@ -338,7 +339,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
338 i->i_blocks += 4; 339 i->i_blocks += 4;
339 rd->root_dnode = 1; 340 rd->root_dnode = 1;
340 rd->up = d->up; 341 rd->up = d->up;
341 if (!(fnode = hpfs_map_fnode(i->i_sb, d->up, &bh))) { 342 if (!(fnode = hpfs_map_fnode(i->i_sb, le32_to_cpu(d->up), &bh))) {
342 hpfs_free_dnode(i->i_sb, rdno); 343 hpfs_free_dnode(i->i_sb, rdno);
343 hpfs_brelse4(&qbh); 344 hpfs_brelse4(&qbh);
344 hpfs_brelse4(&qbh1); 345 hpfs_brelse4(&qbh1);
@@ -347,10 +348,11 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
347 kfree(nname); 348 kfree(nname);
348 return 1; 349 return 1;
349 } 350 }
350 fnode->u.external[0].disk_secno = rdno; 351 fnode->u.external[0].disk_secno = cpu_to_le32(rdno);
351 mark_buffer_dirty(bh); 352 mark_buffer_dirty(bh);
352 brelse(bh); 353 brelse(bh);
353 d->up = ad->up = hpfs_i(i)->i_dno = rdno; 354 hpfs_i(i)->i_dno = rdno;
355 d->up = ad->up = cpu_to_le32(rdno);
354 d->root_dnode = ad->root_dnode = 0; 356 d->root_dnode = ad->root_dnode = 0;
355 hpfs_mark_4buffers_dirty(&qbh); 357 hpfs_mark_4buffers_dirty(&qbh);
356 hpfs_brelse4(&qbh); 358 hpfs_brelse4(&qbh);
@@ -373,7 +375,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
373 375
374int hpfs_add_dirent(struct inode *i, 376int hpfs_add_dirent(struct inode *i,
375 const unsigned char *name, unsigned namelen, 377 const unsigned char *name, unsigned namelen,
376 struct hpfs_dirent *new_de, int cdepth) 378 struct hpfs_dirent *new_de)
377{ 379{
378 struct hpfs_inode_info *hpfs_inode = hpfs_i(i); 380 struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
379 struct dnode *d; 381 struct dnode *d;
@@ -403,7 +405,6 @@ int hpfs_add_dirent(struct inode *i,
403 } 405 }
404 } 406 }
405 hpfs_brelse4(&qbh); 407 hpfs_brelse4(&qbh);
406 if (!cdepth) hpfs_lock_creation(i->i_sb);
407 if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_ADD)) { 408 if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_ADD)) {
408 c = 1; 409 c = 1;
409 goto ret; 410 goto ret;
@@ -411,7 +412,6 @@ int hpfs_add_dirent(struct inode *i,
411 i->i_version++; 412 i->i_version++;
412 c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0); 413 c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0);
413 ret: 414 ret:
414 if (!cdepth) hpfs_unlock_creation(i->i_sb);
415 return c; 415 return c;
416} 416}
417 417
@@ -437,9 +437,9 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to)
437 return 0; 437 return 0;
438 if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 0; 438 if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 0;
439 if (hpfs_sb(i->i_sb)->sb_chk) { 439 if (hpfs_sb(i->i_sb)->sb_chk) {
440 if (dnode->up != chk_up) { 440 if (le32_to_cpu(dnode->up) != chk_up) {
441 hpfs_error(i->i_sb, "move_to_top: up pointer from %08x should be %08x, is %08x", 441 hpfs_error(i->i_sb, "move_to_top: up pointer from %08x should be %08x, is %08x",
442 dno, chk_up, dnode->up); 442 dno, chk_up, le32_to_cpu(dnode->up));
443 hpfs_brelse4(&qbh); 443 hpfs_brelse4(&qbh);
444 return 0; 444 return 0;
445 } 445 }
@@ -455,7 +455,7 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to)
455 hpfs_brelse4(&qbh); 455 hpfs_brelse4(&qbh);
456 } 456 }
457 while (!(de = dnode_pre_last_de(dnode))) { 457 while (!(de = dnode_pre_last_de(dnode))) {
458 dnode_secno up = dnode->up; 458 dnode_secno up = le32_to_cpu(dnode->up);
459 hpfs_brelse4(&qbh); 459 hpfs_brelse4(&qbh);
460 hpfs_free_dnode(i->i_sb, dno); 460 hpfs_free_dnode(i->i_sb, dno);
461 i->i_size -= 2048; 461 i->i_size -= 2048;
@@ -474,8 +474,8 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to)
474 hpfs_brelse4(&qbh); 474 hpfs_brelse4(&qbh);
475 return 0; 475 return 0;
476 } 476 }
477 dnode->first_free -= 4; 477 dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4);
478 de->length -= 4; 478 de->length = cpu_to_le16(le16_to_cpu(de->length) - 4);
479 de->down = 0; 479 de->down = 0;
480 hpfs_mark_4buffers_dirty(&qbh); 480 hpfs_mark_4buffers_dirty(&qbh);
481 dno = up; 481 dno = up;
@@ -483,12 +483,12 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to)
483 t = get_pos(dnode, de); 483 t = get_pos(dnode, de);
484 for_all_poss(i, hpfs_pos_subst, t, 4); 484 for_all_poss(i, hpfs_pos_subst, t, 4);
485 for_all_poss(i, hpfs_pos_subst, t + 1, 5); 485 for_all_poss(i, hpfs_pos_subst, t + 1, 5);
486 if (!(nde = kmalloc(de->length, GFP_NOFS))) { 486 if (!(nde = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) {
487 hpfs_error(i->i_sb, "out of memory for dirent - directory will be corrupted"); 487 hpfs_error(i->i_sb, "out of memory for dirent - directory will be corrupted");
488 hpfs_brelse4(&qbh); 488 hpfs_brelse4(&qbh);
489 return 0; 489 return 0;
490 } 490 }
491 memcpy(nde, de, de->length); 491 memcpy(nde, de, le16_to_cpu(de->length));
492 ddno = de->down ? de_down_pointer(de) : 0; 492 ddno = de->down ? de_down_pointer(de) : 0;
493 hpfs_delete_de(i->i_sb, dnode, de); 493 hpfs_delete_de(i->i_sb, dnode, de);
494 set_last_pointer(i->i_sb, dnode, ddno); 494 set_last_pointer(i->i_sb, dnode, ddno);
@@ -517,11 +517,11 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
517 try_it_again: 517 try_it_again:
518 if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "delete_empty_dnode")) return; 518 if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "delete_empty_dnode")) return;
519 if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return; 519 if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return;
520 if (dnode->first_free > 56) goto end; 520 if (le32_to_cpu(dnode->first_free) > 56) goto end;
521 if (dnode->first_free == 52 || dnode->first_free == 56) { 521 if (le32_to_cpu(dnode->first_free) == 52 || le32_to_cpu(dnode->first_free) == 56) {
522 struct hpfs_dirent *de_end; 522 struct hpfs_dirent *de_end;
523 int root = dnode->root_dnode; 523 int root = dnode->root_dnode;
524 up = dnode->up; 524 up = le32_to_cpu(dnode->up);
525 de = dnode_first_de(dnode); 525 de = dnode_first_de(dnode);
526 down = de->down ? de_down_pointer(de) : 0; 526 down = de->down ? de_down_pointer(de) : 0;
527 if (hpfs_sb(i->i_sb)->sb_chk) if (root && !down) { 527 if (hpfs_sb(i->i_sb)->sb_chk) if (root && !down) {
@@ -545,13 +545,13 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
545 return; 545 return;
546 } 546 }
547 if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { 547 if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) {
548 d1->up = up; 548 d1->up = cpu_to_le32(up);
549 d1->root_dnode = 1; 549 d1->root_dnode = 1;
550 hpfs_mark_4buffers_dirty(&qbh1); 550 hpfs_mark_4buffers_dirty(&qbh1);
551 hpfs_brelse4(&qbh1); 551 hpfs_brelse4(&qbh1);
552 } 552 }
553 if ((fnode = hpfs_map_fnode(i->i_sb, up, &bh))) { 553 if ((fnode = hpfs_map_fnode(i->i_sb, up, &bh))) {
554 fnode->u.external[0].disk_secno = down; 554 fnode->u.external[0].disk_secno = cpu_to_le32(down);
555 mark_buffer_dirty(bh); 555 mark_buffer_dirty(bh);
556 brelse(bh); 556 brelse(bh);
557 } 557 }
@@ -570,22 +570,22 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
570 for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, ((loff_t)up << 4) | p); 570 for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, ((loff_t)up << 4) | p);
571 if (!down) { 571 if (!down) {
572 de->down = 0; 572 de->down = 0;
573 de->length -= 4; 573 de->length = cpu_to_le16(le16_to_cpu(de->length) - 4);
574 dnode->first_free -= 4; 574 dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4);
575 memmove(de_next_de(de), (char *)de_next_de(de) + 4, 575 memmove(de_next_de(de), (char *)de_next_de(de) + 4,
576 (char *)dnode + dnode->first_free - (char *)de_next_de(de)); 576 (char *)dnode + le32_to_cpu(dnode->first_free) - (char *)de_next_de(de));
577 } else { 577 } else {
578 struct dnode *d1; 578 struct dnode *d1;
579 struct quad_buffer_head qbh1; 579 struct quad_buffer_head qbh1;
580 *(dnode_secno *) ((void *) de + de->length - 4) = down; 580 *(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4) = down;
581 if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { 581 if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) {
582 d1->up = up; 582 d1->up = cpu_to_le32(up);
583 hpfs_mark_4buffers_dirty(&qbh1); 583 hpfs_mark_4buffers_dirty(&qbh1);
584 hpfs_brelse4(&qbh1); 584 hpfs_brelse4(&qbh1);
585 } 585 }
586 } 586 }
587 } else { 587 } else {
588 hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, dnode->first_free); 588 hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, le32_to_cpu(dnode->first_free));
589 goto end; 589 goto end;
590 } 590 }
591 591
@@ -596,18 +596,18 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
596 struct quad_buffer_head qbh1; 596 struct quad_buffer_head qbh1;
597 if (!de_next->down) goto endm; 597 if (!de_next->down) goto endm;
598 ndown = de_down_pointer(de_next); 598 ndown = de_down_pointer(de_next);
599 if (!(de_cp = kmalloc(de->length, GFP_NOFS))) { 599 if (!(de_cp = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) {
600 printk("HPFS: out of memory for dtree balancing\n"); 600 printk("HPFS: out of memory for dtree balancing\n");
601 goto endm; 601 goto endm;
602 } 602 }
603 memcpy(de_cp, de, de->length); 603 memcpy(de_cp, de, le16_to_cpu(de->length));
604 hpfs_delete_de(i->i_sb, dnode, de); 604 hpfs_delete_de(i->i_sb, dnode, de);
605 hpfs_mark_4buffers_dirty(&qbh); 605 hpfs_mark_4buffers_dirty(&qbh);
606 hpfs_brelse4(&qbh); 606 hpfs_brelse4(&qbh);
607 for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, 4); 607 for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, 4);
608 for_all_poss(i, hpfs_pos_del, ((loff_t)up << 4) | p, 1); 608 for_all_poss(i, hpfs_pos_del, ((loff_t)up << 4) | p, 1);
609 if (de_cp->down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de_cp), &qbh1))) { 609 if (de_cp->down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de_cp), &qbh1))) {
610 d1->up = ndown; 610 d1->up = cpu_to_le32(ndown);
611 hpfs_mark_4buffers_dirty(&qbh1); 611 hpfs_mark_4buffers_dirty(&qbh1);
612 hpfs_brelse4(&qbh1); 612 hpfs_brelse4(&qbh1);
613 } 613 }
@@ -635,7 +635,7 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
635 struct hpfs_dirent *del = dnode_last_de(d1); 635 struct hpfs_dirent *del = dnode_last_de(d1);
636 dlp = del->down ? de_down_pointer(del) : 0; 636 dlp = del->down ? de_down_pointer(del) : 0;
637 if (!dlp && down) { 637 if (!dlp && down) {
638 if (d1->first_free > 2044) { 638 if (le32_to_cpu(d1->first_free) > 2044) {
639 if (hpfs_sb(i->i_sb)->sb_chk >= 2) { 639 if (hpfs_sb(i->i_sb)->sb_chk >= 2) {
640 printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); 640 printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n");
641 printk("HPFS: warning: terminating balancing operation\n"); 641 printk("HPFS: warning: terminating balancing operation\n");
@@ -647,38 +647,38 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
647 printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); 647 printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n");
648 printk("HPFS: warning: goin'on\n"); 648 printk("HPFS: warning: goin'on\n");
649 } 649 }
650 del->length += 4; 650 del->length = cpu_to_le16(le16_to_cpu(del->length) + 4);
651 del->down = 1; 651 del->down = 1;
652 d1->first_free += 4; 652 d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) + 4);
653 } 653 }
654 if (dlp && !down) { 654 if (dlp && !down) {
655 del->length -= 4; 655 del->length = cpu_to_le16(le16_to_cpu(del->length) - 4);
656 del->down = 0; 656 del->down = 0;
657 d1->first_free -= 4; 657 d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) - 4);
658 } else if (down) 658 } else if (down)
659 *(dnode_secno *) ((void *) del + del->length - 4) = down; 659 *(dnode_secno *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down);
660 } else goto endm; 660 } else goto endm;
661 if (!(de_cp = kmalloc(de_prev->length, GFP_NOFS))) { 661 if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) {
662 printk("HPFS: out of memory for dtree balancing\n"); 662 printk("HPFS: out of memory for dtree balancing\n");
663 hpfs_brelse4(&qbh1); 663 hpfs_brelse4(&qbh1);
664 goto endm; 664 goto endm;
665 } 665 }
666 hpfs_mark_4buffers_dirty(&qbh1); 666 hpfs_mark_4buffers_dirty(&qbh1);
667 hpfs_brelse4(&qbh1); 667 hpfs_brelse4(&qbh1);
668 memcpy(de_cp, de_prev, de_prev->length); 668 memcpy(de_cp, de_prev, le16_to_cpu(de_prev->length));
669 hpfs_delete_de(i->i_sb, dnode, de_prev); 669 hpfs_delete_de(i->i_sb, dnode, de_prev);
670 if (!de_prev->down) { 670 if (!de_prev->down) {
671 de_prev->length += 4; 671 de_prev->length = cpu_to_le16(le16_to_cpu(de_prev->length) + 4);
672 de_prev->down = 1; 672 de_prev->down = 1;
673 dnode->first_free += 4; 673 dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) + 4);
674 } 674 }
675 *(dnode_secno *) ((void *) de_prev + de_prev->length - 4) = ndown; 675 *(dnode_secno *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown);
676 hpfs_mark_4buffers_dirty(&qbh); 676 hpfs_mark_4buffers_dirty(&qbh);
677 hpfs_brelse4(&qbh); 677 hpfs_brelse4(&qbh);
678 for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4); 678 for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4);
679 for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, ((loff_t)up << 4) | (p - 1)); 679 for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, ((loff_t)up << 4) | (p - 1));
680 if (down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de), &qbh1))) { 680 if (down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de), &qbh1))) {
681 d1->up = ndown; 681 d1->up = cpu_to_le32(ndown);
682 hpfs_mark_4buffers_dirty(&qbh1); 682 hpfs_mark_4buffers_dirty(&qbh1);
683 hpfs_brelse4(&qbh1); 683 hpfs_brelse4(&qbh1);
684 } 684 }
@@ -701,7 +701,6 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de,
701{ 701{
702 struct dnode *dnode = qbh->data; 702 struct dnode *dnode = qbh->data;
703 dnode_secno down = 0; 703 dnode_secno down = 0;
704 int lock = 0;
705 loff_t t; 704 loff_t t;
706 if (de->first || de->last) { 705 if (de->first || de->last) {
707 hpfs_error(i->i_sb, "hpfs_remove_dirent: attempt to delete first or last dirent in dnode %08x", dno); 706 hpfs_error(i->i_sb, "hpfs_remove_dirent: attempt to delete first or last dirent in dnode %08x", dno);
@@ -710,11 +709,8 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de,
710 } 709 }
711 if (de->down) down = de_down_pointer(de); 710 if (de->down) down = de_down_pointer(de);
712 if (depth && (de->down || (de == dnode_first_de(dnode) && de_next_de(de)->last))) { 711 if (depth && (de->down || (de == dnode_first_de(dnode) && de_next_de(de)->last))) {
713 lock = 1;
714 hpfs_lock_creation(i->i_sb);
715 if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_DEL)) { 712 if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_DEL)) {
716 hpfs_brelse4(qbh); 713 hpfs_brelse4(qbh);
717 hpfs_unlock_creation(i->i_sb);
718 return 2; 714 return 2;
719 } 715 }
720 } 716 }
@@ -727,11 +723,9 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de,
727 dnode_secno a = move_to_top(i, down, dno); 723 dnode_secno a = move_to_top(i, down, dno);
728 for_all_poss(i, hpfs_pos_subst, 5, t); 724 for_all_poss(i, hpfs_pos_subst, 5, t);
729 if (a) delete_empty_dnode(i, a); 725 if (a) delete_empty_dnode(i, a);
730 if (lock) hpfs_unlock_creation(i->i_sb);
731 return !a; 726 return !a;
732 } 727 }
733 delete_empty_dnode(i, dno); 728 delete_empty_dnode(i, dno);
734 if (lock) hpfs_unlock_creation(i->i_sb);
735 return 0; 729 return 0;
736} 730}
737 731
@@ -751,8 +745,8 @@ void hpfs_count_dnodes(struct super_block *s, dnode_secno dno, int *n_dnodes,
751 ptr = 0; 745 ptr = 0;
752 go_up: 746 go_up:
753 if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return; 747 if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return;
754 if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && dnode->up != odno) 748 if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && le32_to_cpu(dnode->up) != odno)
755 hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, dnode->up); 749 hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, le32_to_cpu(dnode->up));
756 de = dnode_first_de(dnode); 750 de = dnode_first_de(dnode);
757 if (ptr) while(1) { 751 if (ptr) while(1) {
758 if (de->down) if (de_down_pointer(de) == ptr) goto process_de; 752 if (de->down) if (de_down_pointer(de) == ptr) goto process_de;
@@ -776,7 +770,7 @@ void hpfs_count_dnodes(struct super_block *s, dnode_secno dno, int *n_dnodes,
776 if (!de->first && !de->last && n_items) (*n_items)++; 770 if (!de->first && !de->last && n_items) (*n_items)++;
777 if ((de = de_next_de(de)) < dnode_end_de(dnode)) goto next_de; 771 if ((de = de_next_de(de)) < dnode_end_de(dnode)) goto next_de;
778 ptr = dno; 772 ptr = dno;
779 dno = dnode->up; 773 dno = le32_to_cpu(dnode->up);
780 if (dnode->root_dnode) { 774 if (dnode->root_dnode) {
781 hpfs_brelse4(&qbh); 775 hpfs_brelse4(&qbh);
782 return; 776 return;
@@ -824,8 +818,8 @@ dnode_secno hpfs_de_as_down_as_possible(struct super_block *s, dnode_secno dno)
824 return d; 818 return d;
825 if (!(de = map_nth_dirent(s, d, 1, &qbh, NULL))) return dno; 819 if (!(de = map_nth_dirent(s, d, 1, &qbh, NULL))) return dno;
826 if (hpfs_sb(s)->sb_chk) 820 if (hpfs_sb(s)->sb_chk)
827 if (up && ((struct dnode *)qbh.data)->up != up) 821 if (up && le32_to_cpu(((struct dnode *)qbh.data)->up) != up)
828 hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, ((struct dnode *)qbh.data)->up); 822 hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, le32_to_cpu(((struct dnode *)qbh.data)->up));
829 if (!de->down) { 823 if (!de->down) {
830 hpfs_brelse4(&qbh); 824 hpfs_brelse4(&qbh);
831 return d; 825 return d;
@@ -874,7 +868,7 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp,
874 /* Going up */ 868 /* Going up */
875 if (dnode->root_dnode) goto bail; 869 if (dnode->root_dnode) goto bail;
876 870
877 if (!(up_dnode = hpfs_map_dnode(inode->i_sb, dnode->up, &qbh0))) 871 if (!(up_dnode = hpfs_map_dnode(inode->i_sb, le32_to_cpu(dnode->up), &qbh0)))
878 goto bail; 872 goto bail;
879 873
880 end_up_de = dnode_end_de(up_dnode); 874 end_up_de = dnode_end_de(up_dnode);
@@ -882,16 +876,16 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp,
882 for (up_de = dnode_first_de(up_dnode); up_de < end_up_de; 876 for (up_de = dnode_first_de(up_dnode); up_de < end_up_de;
883 up_de = de_next_de(up_de)) { 877 up_de = de_next_de(up_de)) {
884 if (!(++c & 077)) hpfs_error(inode->i_sb, 878 if (!(++c & 077)) hpfs_error(inode->i_sb,
885 "map_pos_dirent: pos crossed dnode boundary; dnode = %08x", dnode->up); 879 "map_pos_dirent: pos crossed dnode boundary; dnode = %08x", le32_to_cpu(dnode->up));
886 if (up_de->down && de_down_pointer(up_de) == dno) { 880 if (up_de->down && de_down_pointer(up_de) == dno) {
887 *posp = ((loff_t) dnode->up << 4) + c; 881 *posp = ((loff_t) le32_to_cpu(dnode->up) << 4) + c;
888 hpfs_brelse4(&qbh0); 882 hpfs_brelse4(&qbh0);
889 return de; 883 return de;
890 } 884 }
891 } 885 }
892 886
893 hpfs_error(inode->i_sb, "map_pos_dirent: pointer to dnode %08x not found in parent dnode %08x", 887 hpfs_error(inode->i_sb, "map_pos_dirent: pointer to dnode %08x not found in parent dnode %08x",
894 dno, dnode->up); 888 dno, le32_to_cpu(dnode->up));
895 hpfs_brelse4(&qbh0); 889 hpfs_brelse4(&qbh0);
896 890
897 bail: 891 bail:
@@ -1017,17 +1011,17 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
1017 /*name2[15] = 0xff;*/ 1011 /*name2[15] = 0xff;*/
1018 name1len = 15; name2len = 256; 1012 name1len = 15; name2len = 256;
1019 } 1013 }
1020 if (!(upf = hpfs_map_fnode(s, f->up, &bh))) { 1014 if (!(upf = hpfs_map_fnode(s, le32_to_cpu(f->up), &bh))) {
1021 kfree(name2); 1015 kfree(name2);
1022 return NULL; 1016 return NULL;
1023 } 1017 }
1024 if (!upf->dirflag) { 1018 if (!upf->dirflag) {
1025 brelse(bh); 1019 brelse(bh);
1026 hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, f->up); 1020 hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up));
1027 kfree(name2); 1021 kfree(name2);
1028 return NULL; 1022 return NULL;
1029 } 1023 }
1030 dno = upf->u.external[0].disk_secno; 1024 dno = le32_to_cpu(upf->u.external[0].disk_secno);
1031 brelse(bh); 1025 brelse(bh);
1032 go_down: 1026 go_down:
1033 downd = 0; 1027 downd = 0;
@@ -1049,7 +1043,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
1049 return NULL; 1043 return NULL;
1050 } 1044 }
1051 next_de: 1045 next_de:
1052 if (de->fnode == fno) { 1046 if (le32_to_cpu(de->fnode) == fno) {
1053 kfree(name2); 1047 kfree(name2);
1054 return de; 1048 return de;
1055 } 1049 }
@@ -1065,7 +1059,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
1065 goto go_down; 1059 goto go_down;
1066 } 1060 }
1067 f: 1061 f:
1068 if (de->fnode == fno) { 1062 if (le32_to_cpu(de->fnode) == fno) {
1069 kfree(name2); 1063 kfree(name2);
1070 return de; 1064 return de;
1071 } 1065 }
@@ -1074,7 +1068,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
1074 if ((de = de_next_de(de)) < de_end) goto next_de; 1068 if ((de = de_next_de(de)) < de_end) goto next_de;
1075 if (d->root_dnode) goto not_found; 1069 if (d->root_dnode) goto not_found;
1076 downd = dno; 1070 downd = dno;
1077 dno = d->up; 1071 dno = le32_to_cpu(d->up);
1078 hpfs_brelse4(qbh); 1072 hpfs_brelse4(qbh);
1079 if (hpfs_sb(s)->sb_chk) 1073 if (hpfs_sb(s)->sb_chk)
1080 if (hpfs_stop_cycles(s, downd, &d1, &d2, "map_fnode_dirent #2")) { 1074 if (hpfs_stop_cycles(s, downd, &d1, &d2, "map_fnode_dirent #2")) {
diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c
index 45e53d972b42..d8b84d113c89 100644
--- a/fs/hpfs/ea.c
+++ b/fs/hpfs/ea.c
@@ -24,7 +24,7 @@ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len)
24 } 24 }
25 if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return; 25 if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return;
26 if (ea->indirect) { 26 if (ea->indirect) {
27 if (ea->valuelen != 8) { 27 if (ea_valuelen(ea) != 8) {
28 hpfs_error(s, "ea->indirect set while ea->valuelen!=8, %s %08x, pos %08x", 28 hpfs_error(s, "ea->indirect set while ea->valuelen!=8, %s %08x, pos %08x",
29 ano ? "anode" : "sectors", a, pos); 29 ano ? "anode" : "sectors", a, pos);
30 return; 30 return;
@@ -33,7 +33,7 @@ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len)
33 return; 33 return;
34 hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea)); 34 hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea));
35 } 35 }
36 pos += ea->namelen + ea->valuelen + 5; 36 pos += ea->namelen + ea_valuelen(ea) + 5;
37 } 37 }
38 if (!ano) hpfs_free_sectors(s, a, (len+511) >> 9); 38 if (!ano) hpfs_free_sectors(s, a, (len+511) >> 9);
39 else { 39 else {
@@ -76,24 +76,24 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
76 unsigned pos; 76 unsigned pos;
77 int ano, len; 77 int ano, len;
78 secno a; 78 secno a;
79 char ex[4 + 255 + 1 + 8];
79 struct extended_attribute *ea; 80 struct extended_attribute *ea;
80 struct extended_attribute *ea_end = fnode_end_ea(fnode); 81 struct extended_attribute *ea_end = fnode_end_ea(fnode);
81 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) 82 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
82 if (!strcmp(ea->name, key)) { 83 if (!strcmp(ea->name, key)) {
83 if (ea->indirect) 84 if (ea->indirect)
84 goto indirect; 85 goto indirect;
85 if (ea->valuelen >= size) 86 if (ea_valuelen(ea) >= size)
86 return -EINVAL; 87 return -EINVAL;
87 memcpy(buf, ea_data(ea), ea->valuelen); 88 memcpy(buf, ea_data(ea), ea_valuelen(ea));
88 buf[ea->valuelen] = 0; 89 buf[ea_valuelen(ea)] = 0;
89 return 0; 90 return 0;
90 } 91 }
91 a = fnode->ea_secno; 92 a = le32_to_cpu(fnode->ea_secno);
92 len = fnode->ea_size_l; 93 len = le32_to_cpu(fnode->ea_size_l);
93 ano = fnode->ea_anode; 94 ano = fnode->ea_anode;
94 pos = 0; 95 pos = 0;
95 while (pos < len) { 96 while (pos < len) {
96 char ex[4 + 255 + 1 + 8];
97 ea = (struct extended_attribute *)ex; 97 ea = (struct extended_attribute *)ex;
98 if (pos + 4 > len) { 98 if (pos + 4 > len) {
99 hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x", 99 hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x",
@@ -106,14 +106,14 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
106 if (!strcmp(ea->name, key)) { 106 if (!strcmp(ea->name, key)) {
107 if (ea->indirect) 107 if (ea->indirect)
108 goto indirect; 108 goto indirect;
109 if (ea->valuelen >= size) 109 if (ea_valuelen(ea) >= size)
110 return -EINVAL; 110 return -EINVAL;
111 if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea->valuelen, buf)) 111 if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), buf))
112 return -EIO; 112 return -EIO;
113 buf[ea->valuelen] = 0; 113 buf[ea_valuelen(ea)] = 0;
114 return 0; 114 return 0;
115 } 115 }
116 pos += ea->namelen + ea->valuelen + 5; 116 pos += ea->namelen + ea_valuelen(ea) + 5;
117 } 117 }
118 return -ENOENT; 118 return -ENOENT;
119indirect: 119indirect:
@@ -138,16 +138,16 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
138 if (!strcmp(ea->name, key)) { 138 if (!strcmp(ea->name, key)) {
139 if (ea->indirect) 139 if (ea->indirect)
140 return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea)); 140 return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea));
141 if (!(ret = kmalloc((*size = ea->valuelen) + 1, GFP_NOFS))) { 141 if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) {
142 printk("HPFS: out of memory for EA\n"); 142 printk("HPFS: out of memory for EA\n");
143 return NULL; 143 return NULL;
144 } 144 }
145 memcpy(ret, ea_data(ea), ea->valuelen); 145 memcpy(ret, ea_data(ea), ea_valuelen(ea));
146 ret[ea->valuelen] = 0; 146 ret[ea_valuelen(ea)] = 0;
147 return ret; 147 return ret;
148 } 148 }
149 a = fnode->ea_secno; 149 a = le32_to_cpu(fnode->ea_secno);
150 len = fnode->ea_size_l; 150 len = le32_to_cpu(fnode->ea_size_l);
151 ano = fnode->ea_anode; 151 ano = fnode->ea_anode;
152 pos = 0; 152 pos = 0;
153 while (pos < len) { 153 while (pos < len) {
@@ -164,18 +164,18 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
164 if (!strcmp(ea->name, key)) { 164 if (!strcmp(ea->name, key)) {
165 if (ea->indirect) 165 if (ea->indirect)
166 return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea)); 166 return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea));
167 if (!(ret = kmalloc((*size = ea->valuelen) + 1, GFP_NOFS))) { 167 if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) {
168 printk("HPFS: out of memory for EA\n"); 168 printk("HPFS: out of memory for EA\n");
169 return NULL; 169 return NULL;
170 } 170 }
171 if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea->valuelen, ret)) { 171 if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), ret)) {
172 kfree(ret); 172 kfree(ret);
173 return NULL; 173 return NULL;
174 } 174 }
175 ret[ea->valuelen] = 0; 175 ret[ea_valuelen(ea)] = 0;
176 return ret; 176 return ret;
177 } 177 }
178 pos += ea->namelen + ea->valuelen + 5; 178 pos += ea->namelen + ea_valuelen(ea) + 5;
179 } 179 }
180 return NULL; 180 return NULL;
181} 181}
@@ -202,13 +202,13 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
202 if (ea->indirect) { 202 if (ea->indirect) {
203 if (ea_len(ea) == size) 203 if (ea_len(ea) == size)
204 set_indirect_ea(s, ea->anode, ea_sec(ea), data, size); 204 set_indirect_ea(s, ea->anode, ea_sec(ea), data, size);
205 } else if (ea->valuelen == size) { 205 } else if (ea_valuelen(ea) == size) {
206 memcpy(ea_data(ea), data, size); 206 memcpy(ea_data(ea), data, size);
207 } 207 }
208 return; 208 return;
209 } 209 }
210 a = fnode->ea_secno; 210 a = le32_to_cpu(fnode->ea_secno);
211 len = fnode->ea_size_l; 211 len = le32_to_cpu(fnode->ea_size_l);
212 ano = fnode->ea_anode; 212 ano = fnode->ea_anode;
213 pos = 0; 213 pos = 0;
214 while (pos < len) { 214 while (pos < len) {
@@ -228,68 +228,70 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
228 set_indirect_ea(s, ea->anode, ea_sec(ea), data, size); 228 set_indirect_ea(s, ea->anode, ea_sec(ea), data, size);
229 } 229 }
230 else { 230 else {
231 if (ea->valuelen == size) 231 if (ea_valuelen(ea) == size)
232 hpfs_ea_write(s, a, ano, pos + 4 + ea->namelen + 1, size, data); 232 hpfs_ea_write(s, a, ano, pos + 4 + ea->namelen + 1, size, data);
233 } 233 }
234 return; 234 return;
235 } 235 }
236 pos += ea->namelen + ea->valuelen + 5; 236 pos += ea->namelen + ea_valuelen(ea) + 5;
237 } 237 }
238 if (!fnode->ea_offs) { 238 if (!le16_to_cpu(fnode->ea_offs)) {
239 /*if (fnode->ea_size_s) { 239 /*if (le16_to_cpu(fnode->ea_size_s)) {
240 hpfs_error(s, "fnode %08x: ea_size_s == %03x, ea_offs == 0", 240 hpfs_error(s, "fnode %08x: ea_size_s == %03x, ea_offs == 0",
241 inode->i_ino, fnode->ea_size_s); 241 inode->i_ino, le16_to_cpu(fnode->ea_size_s));
242 return; 242 return;
243 }*/ 243 }*/
244 fnode->ea_offs = 0xc4; 244 fnode->ea_offs = cpu_to_le16(0xc4);
245 } 245 }
246 if (fnode->ea_offs < 0xc4 || fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200) { 246 if (le16_to_cpu(fnode->ea_offs) < 0xc4 || le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200) {
247 hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x", 247 hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x",
248 (unsigned long)inode->i_ino, 248 (unsigned long)inode->i_ino,
249 fnode->ea_offs, fnode->ea_size_s); 249 le32_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
250 return; 250 return;
251 } 251 }
252 if ((fnode->ea_size_s || !fnode->ea_size_l) && 252 if ((le16_to_cpu(fnode->ea_size_s) || !le32_to_cpu(fnode->ea_size_l)) &&
253 fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s + strlen(key) + size + 5 <= 0x200) { 253 le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) + strlen(key) + size + 5 <= 0x200) {
254 ea = fnode_end_ea(fnode); 254 ea = fnode_end_ea(fnode);
255 *(char *)ea = 0; 255 *(char *)ea = 0;
256 ea->namelen = strlen(key); 256 ea->namelen = strlen(key);
257 ea->valuelen = size; 257 ea->valuelen_lo = size;
258 ea->valuelen_hi = size >> 8;
258 strcpy(ea->name, key); 259 strcpy(ea->name, key);
259 memcpy(ea_data(ea), data, size); 260 memcpy(ea_data(ea), data, size);
260 fnode->ea_size_s += strlen(key) + size + 5; 261 fnode->ea_size_s = cpu_to_le16(le16_to_cpu(fnode->ea_size_s) + strlen(key) + size + 5);
261 goto ret; 262 goto ret;
262 } 263 }
263 /* Most the code here is 99.9993422% unused. I hope there are no bugs. 264 /* Most the code here is 99.9993422% unused. I hope there are no bugs.
264 But what .. HPFS.IFS has also bugs in ea management. */ 265 But what .. HPFS.IFS has also bugs in ea management. */
265 if (fnode->ea_size_s && !fnode->ea_size_l) { 266 if (le16_to_cpu(fnode->ea_size_s) && !le32_to_cpu(fnode->ea_size_l)) {
266 secno n; 267 secno n;
267 struct buffer_head *bh; 268 struct buffer_head *bh;
268 char *data; 269 char *data;
269 if (!(n = hpfs_alloc_sector(s, fno, 1, 0, 1))) return; 270 if (!(n = hpfs_alloc_sector(s, fno, 1, 0))) return;
270 if (!(data = hpfs_get_sector(s, n, &bh))) { 271 if (!(data = hpfs_get_sector(s, n, &bh))) {
271 hpfs_free_sectors(s, n, 1); 272 hpfs_free_sectors(s, n, 1);
272 return; 273 return;
273 } 274 }
274 memcpy(data, fnode_ea(fnode), fnode->ea_size_s); 275 memcpy(data, fnode_ea(fnode), le16_to_cpu(fnode->ea_size_s));
275 fnode->ea_size_l = fnode->ea_size_s; 276 fnode->ea_size_l = cpu_to_le32(le16_to_cpu(fnode->ea_size_s));
276 fnode->ea_size_s = 0; 277 fnode->ea_size_s = cpu_to_le16(0);
277 fnode->ea_secno = n; 278 fnode->ea_secno = cpu_to_le32(n);
278 fnode->ea_anode = 0; 279 fnode->ea_anode = cpu_to_le32(0);
279 mark_buffer_dirty(bh); 280 mark_buffer_dirty(bh);
280 brelse(bh); 281 brelse(bh);
281 } 282 }
282 pos = fnode->ea_size_l + 5 + strlen(key) + size; 283 pos = le32_to_cpu(fnode->ea_size_l) + 5 + strlen(key) + size;
283 len = (fnode->ea_size_l + 511) >> 9; 284 len = (le32_to_cpu(fnode->ea_size_l) + 511) >> 9;
284 if (pos >= 30000) goto bail; 285 if (pos >= 30000) goto bail;
285 while (((pos + 511) >> 9) > len) { 286 while (((pos + 511) >> 9) > len) {
286 if (!len) { 287 if (!len) {
287 if (!(fnode->ea_secno = hpfs_alloc_sector(s, fno, 1, 0, 1))) 288 secno q = hpfs_alloc_sector(s, fno, 1, 0);
288 goto bail; 289 if (!q) goto bail;
290 fnode->ea_secno = cpu_to_le32(q);
289 fnode->ea_anode = 0; 291 fnode->ea_anode = 0;
290 len++; 292 len++;
291 } else if (!fnode->ea_anode) { 293 } else if (!fnode->ea_anode) {
292 if (hpfs_alloc_if_possible(s, fnode->ea_secno + len)) { 294 if (hpfs_alloc_if_possible(s, le32_to_cpu(fnode->ea_secno) + len)) {
293 len++; 295 len++;
294 } else { 296 } else {
295 /* Aargh... don't know how to create ea anodes :-( */ 297 /* Aargh... don't know how to create ea anodes :-( */
@@ -298,26 +300,26 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
298 anode_secno a_s; 300 anode_secno a_s;
299 if (!(anode = hpfs_alloc_anode(s, fno, &a_s, &bh))) 301 if (!(anode = hpfs_alloc_anode(s, fno, &a_s, &bh)))
300 goto bail; 302 goto bail;
301 anode->up = fno; 303 anode->up = cpu_to_le32(fno);
302 anode->btree.fnode_parent = 1; 304 anode->btree.fnode_parent = 1;
303 anode->btree.n_free_nodes--; 305 anode->btree.n_free_nodes--;
304 anode->btree.n_used_nodes++; 306 anode->btree.n_used_nodes++;
305 anode->btree.first_free += 12; 307 anode->btree.first_free = cpu_to_le16(le16_to_cpu(anode->btree.first_free) + 12);
306 anode->u.external[0].disk_secno = fnode->ea_secno; 308 anode->u.external[0].disk_secno = cpu_to_le32(le32_to_cpu(fnode->ea_secno));
307 anode->u.external[0].file_secno = 0; 309 anode->u.external[0].file_secno = cpu_to_le32(0);
308 anode->u.external[0].length = len; 310 anode->u.external[0].length = cpu_to_le32(len);
309 mark_buffer_dirty(bh); 311 mark_buffer_dirty(bh);
310 brelse(bh); 312 brelse(bh);
311 fnode->ea_anode = 1; 313 fnode->ea_anode = 1;
312 fnode->ea_secno = a_s;*/ 314 fnode->ea_secno = cpu_to_le32(a_s);*/
313 secno new_sec; 315 secno new_sec;
314 int i; 316 int i;
315 if (!(new_sec = hpfs_alloc_sector(s, fno, 1, 1 - ((pos + 511) >> 9), 1))) 317 if (!(new_sec = hpfs_alloc_sector(s, fno, 1, 1 - ((pos + 511) >> 9))))
316 goto bail; 318 goto bail;
317 for (i = 0; i < len; i++) { 319 for (i = 0; i < len; i++) {
318 struct buffer_head *bh1, *bh2; 320 struct buffer_head *bh1, *bh2;
319 void *b1, *b2; 321 void *b1, *b2;
320 if (!(b1 = hpfs_map_sector(s, fnode->ea_secno + i, &bh1, len - i - 1))) { 322 if (!(b1 = hpfs_map_sector(s, le32_to_cpu(fnode->ea_secno) + i, &bh1, len - i - 1))) {
321 hpfs_free_sectors(s, new_sec, (pos + 511) >> 9); 323 hpfs_free_sectors(s, new_sec, (pos + 511) >> 9);
322 goto bail; 324 goto bail;
323 } 325 }
@@ -331,13 +333,13 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
331 mark_buffer_dirty(bh2); 333 mark_buffer_dirty(bh2);
332 brelse(bh2); 334 brelse(bh2);
333 } 335 }
334 hpfs_free_sectors(s, fnode->ea_secno, len); 336 hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno), len);
335 fnode->ea_secno = new_sec; 337 fnode->ea_secno = cpu_to_le32(new_sec);
336 len = (pos + 511) >> 9; 338 len = (pos + 511) >> 9;
337 } 339 }
338 } 340 }
339 if (fnode->ea_anode) { 341 if (fnode->ea_anode) {
340 if (hpfs_add_sector_to_btree(s, fnode->ea_secno, 342 if (hpfs_add_sector_to_btree(s, le32_to_cpu(fnode->ea_secno),
341 0, len) != -1) { 343 0, len) != -1) {
342 len++; 344 len++;
343 } else { 345 } else {
@@ -349,17 +351,17 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
349 h[1] = strlen(key); 351 h[1] = strlen(key);
350 h[2] = size & 0xff; 352 h[2] = size & 0xff;
351 h[3] = size >> 8; 353 h[3] = size >> 8;
352 if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l, 4, h)) goto bail; 354 if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail;
353 if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l + 4, h[1] + 1, key)) goto bail; 355 if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail;
354 if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l + 5 + h[1], size, data)) goto bail; 356 if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail;
355 fnode->ea_size_l = pos; 357 fnode->ea_size_l = cpu_to_le32(pos);
356 ret: 358 ret:
357 hpfs_i(inode)->i_ea_size += 5 + strlen(key) + size; 359 hpfs_i(inode)->i_ea_size += 5 + strlen(key) + size;
358 return; 360 return;
359 bail: 361 bail:
360 if (fnode->ea_secno) 362 if (le32_to_cpu(fnode->ea_secno))
361 if (fnode->ea_anode) hpfs_truncate_btree(s, fnode->ea_secno, 1, (fnode->ea_size_l + 511) >> 9); 363 if (fnode->ea_anode) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9);
362 else hpfs_free_sectors(s, fnode->ea_secno + ((fnode->ea_size_l + 511) >> 9), len - ((fnode->ea_size_l + 511) >> 9)); 364 else hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno) + ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9), len - ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9));
363 else fnode->ea_secno = fnode->ea_size_l = 0; 365 else fnode->ea_secno = fnode->ea_size_l = cpu_to_le32(0);
364} 366}
365 367
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 9b9eb6933e43..89c500ee5213 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -20,8 +20,8 @@ static int hpfs_file_release(struct inode *inode, struct file *file)
20 20
21int hpfs_file_fsync(struct file *file, int datasync) 21int hpfs_file_fsync(struct file *file, int datasync)
22{ 22{
23 /*return file_fsync(file, datasync);*/ 23 struct inode *inode = file->f_mapping->host;
24 return 0; /* Don't fsync :-) */ 24 return sync_blockdev(inode->i_sb->s_bdev);
25} 25}
26 26
27/* 27/*
@@ -48,38 +48,46 @@ static secno hpfs_bmap(struct inode *inode, unsigned file_secno)
48static void hpfs_truncate(struct inode *i) 48static void hpfs_truncate(struct inode *i)
49{ 49{
50 if (IS_IMMUTABLE(i)) return /*-EPERM*/; 50 if (IS_IMMUTABLE(i)) return /*-EPERM*/;
51 hpfs_lock(i->i_sb); 51 hpfs_lock_assert(i->i_sb);
52
52 hpfs_i(i)->i_n_secs = 0; 53 hpfs_i(i)->i_n_secs = 0;
53 i->i_blocks = 1 + ((i->i_size + 511) >> 9); 54 i->i_blocks = 1 + ((i->i_size + 511) >> 9);
54 hpfs_i(i)->mmu_private = i->i_size; 55 hpfs_i(i)->mmu_private = i->i_size;
55 hpfs_truncate_btree(i->i_sb, i->i_ino, 1, ((i->i_size + 511) >> 9)); 56 hpfs_truncate_btree(i->i_sb, i->i_ino, 1, ((i->i_size + 511) >> 9));
56 hpfs_write_inode(i); 57 hpfs_write_inode(i);
57 hpfs_i(i)->i_n_secs = 0; 58 hpfs_i(i)->i_n_secs = 0;
58 hpfs_unlock(i->i_sb);
59} 59}
60 60
61static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) 61static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
62{ 62{
63 int r;
63 secno s; 64 secno s;
65 hpfs_lock(inode->i_sb);
64 s = hpfs_bmap(inode, iblock); 66 s = hpfs_bmap(inode, iblock);
65 if (s) { 67 if (s) {
66 map_bh(bh_result, inode->i_sb, s); 68 map_bh(bh_result, inode->i_sb, s);
67 return 0; 69 goto ret_0;
68 } 70 }
69 if (!create) return 0; 71 if (!create) goto ret_0;
70 if (iblock<<9 != hpfs_i(inode)->mmu_private) { 72 if (iblock<<9 != hpfs_i(inode)->mmu_private) {
71 BUG(); 73 BUG();
72 return -EIO; 74 r = -EIO;
75 goto ret_r;
73 } 76 }
74 if ((s = hpfs_add_sector_to_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1)) == -1) { 77 if ((s = hpfs_add_sector_to_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1)) == -1) {
75 hpfs_truncate_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1); 78 hpfs_truncate_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1);
76 return -ENOSPC; 79 r = -ENOSPC;
80 goto ret_r;
77 } 81 }
78 inode->i_blocks++; 82 inode->i_blocks++;
79 hpfs_i(inode)->mmu_private += 512; 83 hpfs_i(inode)->mmu_private += 512;
80 set_buffer_new(bh_result); 84 set_buffer_new(bh_result);
81 map_bh(bh_result, inode->i_sb, s); 85 map_bh(bh_result, inode->i_sb, s);
82 return 0; 86 ret_0:
87 r = 0;
88 ret_r:
89 hpfs_unlock(inode->i_sb);
90 return r;
83} 91}
84 92
85static int hpfs_writepage(struct page *page, struct writeback_control *wbc) 93static int hpfs_writepage(struct page *page, struct writeback_control *wbc)
@@ -130,8 +138,11 @@ static ssize_t hpfs_file_write(struct file *file, const char __user *buf,
130 ssize_t retval; 138 ssize_t retval;
131 139
132 retval = do_sync_write(file, buf, count, ppos); 140 retval = do_sync_write(file, buf, count, ppos);
133 if (retval > 0) 141 if (retval > 0) {
142 hpfs_lock(file->f_path.dentry->d_sb);
134 hpfs_i(file->f_path.dentry->d_inode)->i_dirty = 1; 143 hpfs_i(file->f_path.dentry->d_inode)->i_dirty = 1;
144 hpfs_unlock(file->f_path.dentry->d_sb);
145 }
135 return retval; 146 return retval;
136} 147}
137 148
diff --git a/fs/hpfs/hpfs.h b/fs/hpfs/hpfs.h
index 0e84c73cd9c4..8b0650aae328 100644
--- a/fs/hpfs/hpfs.h
+++ b/fs/hpfs/hpfs.h
@@ -19,9 +19,13 @@
19 For definitive information on HPFS, ask somebody else -- this is guesswork. 19 For definitive information on HPFS, ask somebody else -- this is guesswork.
20 There are certain to be many mistakes. */ 20 There are certain to be many mistakes. */
21 21
22#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
23#error unknown endian
24#endif
25
22/* Notation */ 26/* Notation */
23 27
24typedef unsigned secno; /* sector number, partition relative */ 28typedef u32 secno; /* sector number, partition relative */
25 29
26typedef secno dnode_secno; /* sector number of a dnode */ 30typedef secno dnode_secno; /* sector number of a dnode */
27typedef secno fnode_secno; /* sector number of an fnode */ 31typedef secno fnode_secno; /* sector number of an fnode */
@@ -38,28 +42,28 @@ typedef u32 time32_t; /* 32-bit time_t type */
38 42
39struct hpfs_boot_block 43struct hpfs_boot_block
40{ 44{
41 unsigned char jmp[3]; 45 u8 jmp[3];
42 unsigned char oem_id[8]; 46 u8 oem_id[8];
43 unsigned char bytes_per_sector[2]; /* 512 */ 47 u8 bytes_per_sector[2]; /* 512 */
44 unsigned char sectors_per_cluster; 48 u8 sectors_per_cluster;
45 unsigned char n_reserved_sectors[2]; 49 u8 n_reserved_sectors[2];
46 unsigned char n_fats; 50 u8 n_fats;
47 unsigned char n_rootdir_entries[2]; 51 u8 n_rootdir_entries[2];
48 unsigned char n_sectors_s[2]; 52 u8 n_sectors_s[2];
49 unsigned char media_byte; 53 u8 media_byte;
50 unsigned short sectors_per_fat; 54 u16 sectors_per_fat;
51 unsigned short sectors_per_track; 55 u16 sectors_per_track;
52 unsigned short heads_per_cyl; 56 u16 heads_per_cyl;
53 unsigned int n_hidden_sectors; 57 u32 n_hidden_sectors;
54 unsigned int n_sectors_l; /* size of partition */ 58 u32 n_sectors_l; /* size of partition */
55 unsigned char drive_number; 59 u8 drive_number;
56 unsigned char mbz; 60 u8 mbz;
57 unsigned char sig_28h; /* 28h */ 61 u8 sig_28h; /* 28h */
58 unsigned char vol_serno[4]; 62 u8 vol_serno[4];
59 unsigned char vol_label[11]; 63 u8 vol_label[11];
60 unsigned char sig_hpfs[8]; /* "HPFS " */ 64 u8 sig_hpfs[8]; /* "HPFS " */
61 unsigned char pad[448]; 65 u8 pad[448];
62 unsigned short magic; /* aa55 */ 66 u16 magic; /* aa55 */
63}; 67};
64 68
65 69
@@ -71,31 +75,29 @@ struct hpfs_boot_block
71 75
72struct hpfs_super_block 76struct hpfs_super_block
73{ 77{
74 unsigned magic; /* f995 e849 */ 78 u32 magic; /* f995 e849 */
75 unsigned magic1; /* fa53 e9c5, more magic? */ 79 u32 magic1; /* fa53 e9c5, more magic? */
76 /*unsigned huh202;*/ /* ?? 202 = N. of B. in 1.00390625 S.*/ 80 u8 version; /* version of a filesystem usually 2 */
77 char version; /* version of a filesystem usually 2 */ 81 u8 funcversion; /* functional version - oldest version
78 char funcversion; /* functional version - oldest version
79 of filesystem that can understand 82 of filesystem that can understand
80 this disk */ 83 this disk */
81 unsigned short int zero; /* 0 */ 84 u16 zero; /* 0 */
82 fnode_secno root; /* fnode of root directory */ 85 fnode_secno root; /* fnode of root directory */
83 secno n_sectors; /* size of filesystem */ 86 secno n_sectors; /* size of filesystem */
84 unsigned n_badblocks; /* number of bad blocks */ 87 u32 n_badblocks; /* number of bad blocks */
85 secno bitmaps; /* pointers to free space bit maps */ 88 secno bitmaps; /* pointers to free space bit maps */
86 unsigned zero1; /* 0 */ 89 u32 zero1; /* 0 */
87 secno badblocks; /* bad block list */ 90 secno badblocks; /* bad block list */
88 unsigned zero3; /* 0 */ 91 u32 zero3; /* 0 */
89 time32_t last_chkdsk; /* date last checked, 0 if never */ 92 time32_t last_chkdsk; /* date last checked, 0 if never */
90 /*unsigned zero4;*/ /* 0 */ 93 time32_t last_optimize; /* date last optimized, 0 if never */
91 time32_t last_optimize; /* date last optimized, 0 if never */
92 secno n_dir_band; /* number of sectors in dir band */ 94 secno n_dir_band; /* number of sectors in dir band */
93 secno dir_band_start; /* first sector in dir band */ 95 secno dir_band_start; /* first sector in dir band */
94 secno dir_band_end; /* last sector in dir band */ 96 secno dir_band_end; /* last sector in dir band */
95 secno dir_band_bitmap; /* free space map, 1 dnode per bit */ 97 secno dir_band_bitmap; /* free space map, 1 dnode per bit */
96 char volume_name[32]; /* not used */ 98 u8 volume_name[32]; /* not used */
97 secno user_id_table; /* 8 preallocated sectors - user id */ 99 secno user_id_table; /* 8 preallocated sectors - user id */
98 unsigned zero6[103]; /* 0 */ 100 u32 zero6[103]; /* 0 */
99}; 101};
100 102
101 103
@@ -107,44 +109,65 @@ struct hpfs_super_block
107 109
108struct hpfs_spare_block 110struct hpfs_spare_block
109{ 111{
110 unsigned magic; /* f991 1849 */ 112 u32 magic; /* f991 1849 */
111 unsigned magic1; /* fa52 29c5, more magic? */ 113 u32 magic1; /* fa52 29c5, more magic? */
112 114
113 unsigned dirty: 1; /* 0 clean, 1 "improperly stopped" */ 115#ifdef __LITTLE_ENDIAN
114 /*unsigned flag1234: 4;*/ /* unknown flags */ 116 u8 dirty: 1; /* 0 clean, 1 "improperly stopped" */
115 unsigned sparedir_used: 1; /* spare dirblks used */ 117 u8 sparedir_used: 1; /* spare dirblks used */
116 unsigned hotfixes_used: 1; /* hotfixes used */ 118 u8 hotfixes_used: 1; /* hotfixes used */
117 unsigned bad_sector: 1; /* bad sector, corrupted disk (???) */ 119 u8 bad_sector: 1; /* bad sector, corrupted disk (???) */
118 unsigned bad_bitmap: 1; /* bad bitmap */ 120 u8 bad_bitmap: 1; /* bad bitmap */
119 unsigned fast: 1; /* partition was fast formatted */ 121 u8 fast: 1; /* partition was fast formatted */
120 unsigned old_wrote: 1; /* old version wrote to partion */ 122 u8 old_wrote: 1; /* old version wrote to partion */
121 unsigned old_wrote_1: 1; /* old version wrote to partion (?) */ 123 u8 old_wrote_1: 1; /* old version wrote to partion (?) */
122 unsigned install_dasd_limits: 1; /* HPFS386 flags */ 124#else
123 unsigned resynch_dasd_limits: 1; 125 u8 old_wrote_1: 1; /* old version wrote to partion (?) */
124 unsigned dasd_limits_operational: 1; 126 u8 old_wrote: 1; /* old version wrote to partion */
125 unsigned multimedia_active: 1; 127 u8 fast: 1; /* partition was fast formatted */
126 unsigned dce_acls_active: 1; 128 u8 bad_bitmap: 1; /* bad bitmap */
127 unsigned dasd_limits_dirty: 1; 129 u8 bad_sector: 1; /* bad sector, corrupted disk (???) */
128 unsigned flag67: 2; 130 u8 hotfixes_used: 1; /* hotfixes used */
129 unsigned char mm_contlgulty; 131 u8 sparedir_used: 1; /* spare dirblks used */
130 unsigned char unused; 132 u8 dirty: 1; /* 0 clean, 1 "improperly stopped" */
133#endif
134
135#ifdef __LITTLE_ENDIAN
136 u8 install_dasd_limits: 1; /* HPFS386 flags */
137 u8 resynch_dasd_limits: 1;
138 u8 dasd_limits_operational: 1;
139 u8 multimedia_active: 1;
140 u8 dce_acls_active: 1;
141 u8 dasd_limits_dirty: 1;
142 u8 flag67: 2;
143#else
144 u8 flag67: 2;
145 u8 dasd_limits_dirty: 1;
146 u8 dce_acls_active: 1;
147 u8 multimedia_active: 1;
148 u8 dasd_limits_operational: 1;
149 u8 resynch_dasd_limits: 1;
150 u8 install_dasd_limits: 1; /* HPFS386 flags */
151#endif
152
153 u8 mm_contlgulty;
154 u8 unused;
131 155
132 secno hotfix_map; /* info about remapped bad sectors */ 156 secno hotfix_map; /* info about remapped bad sectors */
133 unsigned n_spares_used; /* number of hotfixes */ 157 u32 n_spares_used; /* number of hotfixes */
134 unsigned n_spares; /* number of spares in hotfix map */ 158 u32 n_spares; /* number of spares in hotfix map */
135 unsigned n_dnode_spares_free; /* spare dnodes unused */ 159 u32 n_dnode_spares_free; /* spare dnodes unused */
136 unsigned n_dnode_spares; /* length of spare_dnodes[] list, 160 u32 n_dnode_spares; /* length of spare_dnodes[] list,
137 follows in this block*/ 161 follows in this block*/
138 secno code_page_dir; /* code page directory block */ 162 secno code_page_dir; /* code page directory block */
139 unsigned n_code_pages; /* number of code pages */ 163 u32 n_code_pages; /* number of code pages */
140 /*unsigned large_numbers[2];*/ /* ?? */ 164 u32 super_crc; /* on HPFS386 and LAN Server this is
141 unsigned super_crc; /* on HPFS386 and LAN Server this is
142 checksum of superblock, on normal 165 checksum of superblock, on normal
143 OS/2 unused */ 166 OS/2 unused */
144 unsigned spare_crc; /* on HPFS386 checksum of spareblock */ 167 u32 spare_crc; /* on HPFS386 checksum of spareblock */
145 unsigned zero1[15]; /* unused */ 168 u32 zero1[15]; /* unused */
146 dnode_secno spare_dnodes[100]; /* emergency free dnode list */ 169 dnode_secno spare_dnodes[100]; /* emergency free dnode list */
147 unsigned zero2[1]; /* room for more? */ 170 u32 zero2[1]; /* room for more? */
148}; 171};
149 172
150/* The bad block list is 4 sectors long. The first word must be zero, 173/* The bad block list is 4 sectors long. The first word must be zero,
@@ -179,18 +202,18 @@ struct hpfs_spare_block
179 202
180struct code_page_directory 203struct code_page_directory
181{ 204{
182 unsigned magic; /* 4945 21f7 */ 205 u32 magic; /* 4945 21f7 */
183 unsigned n_code_pages; /* number of pointers following */ 206 u32 n_code_pages; /* number of pointers following */
184 unsigned zero1[2]; 207 u32 zero1[2];
185 struct { 208 struct {
186 unsigned short ix; /* index */ 209 u16 ix; /* index */
187 unsigned short code_page_number; /* code page number */ 210 u16 code_page_number; /* code page number */
188 unsigned bounds; /* matches corresponding word 211 u32 bounds; /* matches corresponding word
189 in data block */ 212 in data block */
190 secno code_page_data; /* sector number of a code_page_data 213 secno code_page_data; /* sector number of a code_page_data
191 containing c.p. array */ 214 containing c.p. array */
192 unsigned short index; /* index in c.p. array in that sector*/ 215 u16 index; /* index in c.p. array in that sector*/
193 unsigned short unknown; /* some unknown value; usually 0; 216 u16 unknown; /* some unknown value; usually 0;
194 2 in Japanese version */ 217 2 in Japanese version */
195 } array[31]; /* unknown length */ 218 } array[31]; /* unknown length */
196}; 219};
@@ -201,21 +224,21 @@ struct code_page_directory
201 224
202struct code_page_data 225struct code_page_data
203{ 226{
204 unsigned magic; /* 8945 21f7 */ 227 u32 magic; /* 8945 21f7 */
205 unsigned n_used; /* # elements used in c_p_data[] */ 228 u32 n_used; /* # elements used in c_p_data[] */
206 unsigned bounds[3]; /* looks a bit like 229 u32 bounds[3]; /* looks a bit like
207 (beg1,end1), (beg2,end2) 230 (beg1,end1), (beg2,end2)
208 one byte each */ 231 one byte each */
209 unsigned short offs[3]; /* offsets from start of sector 232 u16 offs[3]; /* offsets from start of sector
210 to start of c_p_data[ix] */ 233 to start of c_p_data[ix] */
211 struct { 234 struct {
212 unsigned short ix; /* index */ 235 u16 ix; /* index */
213 unsigned short code_page_number; /* code page number */ 236 u16 code_page_number; /* code page number */
214 unsigned short unknown; /* the same as in cp directory */ 237 u16 unknown; /* the same as in cp directory */
215 unsigned char map[128]; /* upcase table for chars 80..ff */ 238 u8 map[128]; /* upcase table for chars 80..ff */
216 unsigned short zero2; 239 u16 zero2;
217 } code_page[3]; 240 } code_page[3];
218 unsigned char incognita[78]; 241 u8 incognita[78];
219}; 242};
220 243
221 244
@@ -255,50 +278,84 @@ struct code_page_data
255#define DNODE_MAGIC 0x77e40aae 278#define DNODE_MAGIC 0x77e40aae
256 279
257struct dnode { 280struct dnode {
258 unsigned magic; /* 77e4 0aae */ 281 u32 magic; /* 77e4 0aae */
259 unsigned first_free; /* offset from start of dnode to 282 u32 first_free; /* offset from start of dnode to
260 first free dir entry */ 283 first free dir entry */
261 unsigned root_dnode:1; /* Is it root dnode? */ 284#ifdef __LITTLE_ENDIAN
262 unsigned increment_me:31; /* some kind of activity counter? 285 u8 root_dnode: 1; /* Is it root dnode? */
263 Neither HPFS.IFS nor CHKDSK cares 286 u8 increment_me: 7; /* some kind of activity counter? */
287 /* Neither HPFS.IFS nor CHKDSK cares
288 if you change this word */
289#else
290 u8 increment_me: 7; /* some kind of activity counter? */
291 /* Neither HPFS.IFS nor CHKDSK cares
264 if you change this word */ 292 if you change this word */
293 u8 root_dnode: 1; /* Is it root dnode? */
294#endif
295 u8 increment_me2[3];
265 secno up; /* (root dnode) directory's fnode 296 secno up; /* (root dnode) directory's fnode
266 (nonroot) parent dnode */ 297 (nonroot) parent dnode */
267 dnode_secno self; /* pointer to this dnode */ 298 dnode_secno self; /* pointer to this dnode */
268 unsigned char dirent[2028]; /* one or more dirents */ 299 u8 dirent[2028]; /* one or more dirents */
269}; 300};
270 301
271struct hpfs_dirent { 302struct hpfs_dirent {
272 unsigned short length; /* offset to next dirent */ 303 u16 length; /* offset to next dirent */
273 unsigned first: 1; /* set on phony ^A^A (".") entry */ 304
274 unsigned has_acl: 1; 305#ifdef __LITTLE_ENDIAN
275 unsigned down: 1; /* down pointer present (after name) */ 306 u8 first: 1; /* set on phony ^A^A (".") entry */
276 unsigned last: 1; /* set on phony \377 entry */ 307 u8 has_acl: 1;
277 unsigned has_ea: 1; /* entry has EA */ 308 u8 down: 1; /* down pointer present (after name) */
278 unsigned has_xtd_perm: 1; /* has extended perm list (???) */ 309 u8 last: 1; /* set on phony \377 entry */
279 unsigned has_explicit_acl: 1; 310 u8 has_ea: 1; /* entry has EA */
280 unsigned has_needea: 1; /* ?? some EA has NEEDEA set 311 u8 has_xtd_perm: 1; /* has extended perm list (???) */
312 u8 has_explicit_acl: 1;
313 u8 has_needea: 1; /* ?? some EA has NEEDEA set
314 I have no idea why this is
315 interesting in a dir entry */
316#else
317 u8 has_needea: 1; /* ?? some EA has NEEDEA set
281 I have no idea why this is 318 I have no idea why this is
282 interesting in a dir entry */ 319 interesting in a dir entry */
283 unsigned read_only: 1; /* dos attrib */ 320 u8 has_explicit_acl: 1;
284 unsigned hidden: 1; /* dos attrib */ 321 u8 has_xtd_perm: 1; /* has extended perm list (???) */
285 unsigned system: 1; /* dos attrib */ 322 u8 has_ea: 1; /* entry has EA */
286 unsigned flag11: 1; /* would be volume label dos attrib */ 323 u8 last: 1; /* set on phony \377 entry */
287 unsigned directory: 1; /* dos attrib */ 324 u8 down: 1; /* down pointer present (after name) */
288 unsigned archive: 1; /* dos attrib */ 325 u8 has_acl: 1;
289 unsigned not_8x3: 1; /* name is not 8.3 */ 326 u8 first: 1; /* set on phony ^A^A (".") entry */
290 unsigned flag15: 1; 327#endif
328
329#ifdef __LITTLE_ENDIAN
330 u8 read_only: 1; /* dos attrib */
331 u8 hidden: 1; /* dos attrib */
332 u8 system: 1; /* dos attrib */
333 u8 flag11: 1; /* would be volume label dos attrib */
334 u8 directory: 1; /* dos attrib */
335 u8 archive: 1; /* dos attrib */
336 u8 not_8x3: 1; /* name is not 8.3 */
337 u8 flag15: 1;
338#else
339 u8 flag15: 1;
340 u8 not_8x3: 1; /* name is not 8.3 */
341 u8 archive: 1; /* dos attrib */
342 u8 directory: 1; /* dos attrib */
343 u8 flag11: 1; /* would be volume label dos attrib */
344 u8 system: 1; /* dos attrib */
345 u8 hidden: 1; /* dos attrib */
346 u8 read_only: 1; /* dos attrib */
347#endif
348
291 fnode_secno fnode; /* fnode giving allocation info */ 349 fnode_secno fnode; /* fnode giving allocation info */
292 time32_t write_date; /* mtime */ 350 time32_t write_date; /* mtime */
293 unsigned file_size; /* file length, bytes */ 351 u32 file_size; /* file length, bytes */
294 time32_t read_date; /* atime */ 352 time32_t read_date; /* atime */
295 time32_t creation_date; /* ctime */ 353 time32_t creation_date; /* ctime */
296 unsigned ea_size; /* total EA length, bytes */ 354 u32 ea_size; /* total EA length, bytes */
297 unsigned char no_of_acls : 3; /* number of ACL's */ 355 u8 no_of_acls; /* number of ACL's (low 3 bits) */
298 unsigned char reserver : 5; 356 u8 ix; /* code page index (of filename), see
299 unsigned char ix; /* code page index (of filename), see
300 struct code_page_data */ 357 struct code_page_data */
301 unsigned char namelen, name[1]; /* file name */ 358 u8 namelen, name[1]; /* file name */
302 /* dnode_secno down; btree down pointer, if present, 359 /* dnode_secno down; btree down pointer, if present,
303 follows name on next word boundary, or maybe it 360 follows name on next word boundary, or maybe it
304 precedes next dirent, which is on a word boundary. */ 361 precedes next dirent, which is on a word boundary. */
@@ -318,38 +375,50 @@ struct hpfs_dirent {
318 375
319struct bplus_leaf_node 376struct bplus_leaf_node
320{ 377{
321 unsigned file_secno; /* first file sector in extent */ 378 u32 file_secno; /* first file sector in extent */
322 unsigned length; /* length, sectors */ 379 u32 length; /* length, sectors */
323 secno disk_secno; /* first corresponding disk sector */ 380 secno disk_secno; /* first corresponding disk sector */
324}; 381};
325 382
326struct bplus_internal_node 383struct bplus_internal_node
327{ 384{
328 unsigned file_secno; /* subtree maps sectors < this */ 385 u32 file_secno; /* subtree maps sectors < this */
329 anode_secno down; /* pointer to subtree */ 386 anode_secno down; /* pointer to subtree */
330}; 387};
331 388
332struct bplus_header 389struct bplus_header
333{ 390{
334 unsigned hbff: 1; /* high bit of first free entry offset */ 391#ifdef __LITTLE_ENDIAN
335 unsigned flag1: 1; 392 u8 hbff: 1; /* high bit of first free entry offset */
336 unsigned flag2: 1; 393 u8 flag1234: 4;
337 unsigned flag3: 1; 394 u8 fnode_parent: 1; /* ? we're pointed to by an fnode,
338 unsigned flag4: 1;
339 unsigned fnode_parent: 1; /* ? we're pointed to by an fnode,
340 the data btree or some ea or the 395 the data btree or some ea or the
341 main ea bootage pointer ea_secno */ 396 main ea bootage pointer ea_secno */
342 /* also can get set in fnodes, which 397 /* also can get set in fnodes, which
343 may be a chkdsk glitch or may mean 398 may be a chkdsk glitch or may mean
344 this bit is irrelevant in fnodes, 399 this bit is irrelevant in fnodes,
345 or this interpretation is all wet */ 400 or this interpretation is all wet */
346 unsigned binary_search: 1; /* suggest binary search (unused) */ 401 u8 binary_search: 1; /* suggest binary search (unused) */
347 unsigned internal: 1; /* 1 -> (internal) tree of anodes 402 u8 internal: 1; /* 1 -> (internal) tree of anodes
403 0 -> (leaf) list of extents */
404#else
405 u8 internal: 1; /* 1 -> (internal) tree of anodes
348 0 -> (leaf) list of extents */ 406 0 -> (leaf) list of extents */
349 unsigned char fill[3]; 407 u8 binary_search: 1; /* suggest binary search (unused) */
350 unsigned char n_free_nodes; /* free nodes in following array */ 408 u8 fnode_parent: 1; /* ? we're pointed to by an fnode,
351 unsigned char n_used_nodes; /* used nodes in following array */ 409 the data btree or some ea or the
352 unsigned short first_free; /* offset from start of header to 410 main ea bootage pointer ea_secno */
411 /* also can get set in fnodes, which
412 may be a chkdsk glitch or may mean
413 this bit is irrelevant in fnodes,
414 or this interpretation is all wet */
415 u8 flag1234: 4;
416 u8 hbff: 1; /* high bit of first free entry offset */
417#endif
418 u8 fill[3];
419 u8 n_free_nodes; /* free nodes in following array */
420 u8 n_used_nodes; /* used nodes in following array */
421 u16 first_free; /* offset from start of header to
353 first free node in array */ 422 first free node in array */
354 union { 423 union {
355 struct bplus_internal_node internal[0]; /* (internal) 2-word entries giving 424 struct bplus_internal_node internal[0]; /* (internal) 2-word entries giving
@@ -369,37 +438,38 @@ struct bplus_header
369 438
370struct fnode 439struct fnode
371{ 440{
372 unsigned magic; /* f7e4 0aae */ 441 u32 magic; /* f7e4 0aae */
373 unsigned zero1[2]; /* read history */ 442 u32 zero1[2]; /* read history */
374 unsigned char len, name[15]; /* true length, truncated name */ 443 u8 len, name[15]; /* true length, truncated name */
375 fnode_secno up; /* pointer to file's directory fnode */ 444 fnode_secno up; /* pointer to file's directory fnode */
376 /*unsigned zero2[3];*/
377 secno acl_size_l; 445 secno acl_size_l;
378 secno acl_secno; 446 secno acl_secno;
379 unsigned short acl_size_s; 447 u16 acl_size_s;
380 char acl_anode; 448 u8 acl_anode;
381 char zero2; /* history bit count */ 449 u8 zero2; /* history bit count */
382 unsigned ea_size_l; /* length of disk-resident ea's */ 450 u32 ea_size_l; /* length of disk-resident ea's */
383 secno ea_secno; /* first sector of disk-resident ea's*/ 451 secno ea_secno; /* first sector of disk-resident ea's*/
384 unsigned short ea_size_s; /* length of fnode-resident ea's */ 452 u16 ea_size_s; /* length of fnode-resident ea's */
385 453
386 unsigned flag0: 1; 454#ifdef __LITTLE_ENDIAN
387 unsigned ea_anode: 1; /* 1 -> ea_secno is an anode */ 455 u8 flag0: 1;
388 unsigned flag2: 1; 456 u8 ea_anode: 1; /* 1 -> ea_secno is an anode */
389 unsigned flag3: 1; 457 u8 flag234567: 6;
390 unsigned flag4: 1; 458#else
391 unsigned flag5: 1; 459 u8 flag234567: 6;
392 unsigned flag6: 1; 460 u8 ea_anode: 1; /* 1 -> ea_secno is an anode */
393 unsigned flag7: 1; 461 u8 flag0: 1;
394 unsigned dirflag: 1; /* 1 -> directory. first & only extent 462#endif
463
464#ifdef __LITTLE_ENDIAN
465 u8 dirflag: 1; /* 1 -> directory. first & only extent
395 points to dnode. */ 466 points to dnode. */
396 unsigned flag9: 1; 467 u8 flag9012345: 7;
397 unsigned flag10: 1; 468#else
398 unsigned flag11: 1; 469 u8 flag9012345: 7;
399 unsigned flag12: 1; 470 u8 dirflag: 1; /* 1 -> directory. first & only extent
400 unsigned flag13: 1; 471 points to dnode. */
401 unsigned flag14: 1; 472#endif
402 unsigned flag15: 1;
403 473
404 struct bplus_header btree; /* b+ tree, 8 extents or 12 subtrees */ 474 struct bplus_header btree; /* b+ tree, 8 extents or 12 subtrees */
405 union { 475 union {
@@ -407,17 +477,16 @@ struct fnode
407 struct bplus_internal_node internal[12]; 477 struct bplus_internal_node internal[12];
408 } u; 478 } u;
409 479
410 unsigned file_size; /* file length, bytes */ 480 u32 file_size; /* file length, bytes */
411 unsigned n_needea; /* number of EA's with NEEDEA set */ 481 u32 n_needea; /* number of EA's with NEEDEA set */
412 char user_id[16]; /* unused */ 482 u8 user_id[16]; /* unused */
413 unsigned short ea_offs; /* offset from start of fnode 483 u16 ea_offs; /* offset from start of fnode
414 to first fnode-resident ea */ 484 to first fnode-resident ea */
415 char dasd_limit_treshhold; 485 u8 dasd_limit_treshhold;
416 char dasd_limit_delta; 486 u8 dasd_limit_delta;
417 unsigned dasd_limit; 487 u32 dasd_limit;
418 unsigned dasd_usage; 488 u32 dasd_usage;
419 /*unsigned zero5[2];*/ 489 u8 ea[316]; /* zero or more EA's, packed together
420 unsigned char ea[316]; /* zero or more EA's, packed together
421 with no alignment padding. 490 with no alignment padding.
422 (Do not use this name, get here 491 (Do not use this name, get here
423 via fnode + ea_offs. I think.) */ 492 via fnode + ea_offs. I think.) */
@@ -430,7 +499,7 @@ struct fnode
430 499
431struct anode 500struct anode
432{ 501{
433 unsigned magic; /* 37e4 0aae */ 502 u32 magic; /* 37e4 0aae */
434 anode_secno self; /* pointer to this anode */ 503 anode_secno self; /* pointer to this anode */
435 secno up; /* parent anode or fnode */ 504 secno up; /* parent anode or fnode */
436 505
@@ -440,7 +509,7 @@ struct anode
440 struct bplus_internal_node internal[60]; 509 struct bplus_internal_node internal[60];
441 } u; 510 } u;
442 511
443 unsigned fill[3]; /* unused */ 512 u32 fill[3]; /* unused */
444}; 513};
445 514
446 515
@@ -461,25 +530,31 @@ struct anode
461 530
462struct extended_attribute 531struct extended_attribute
463{ 532{
464 unsigned indirect: 1; /* 1 -> value gives sector number 533#ifdef __LITTLE_ENDIAN
534 u8 indirect: 1; /* 1 -> value gives sector number
465 where real value starts */ 535 where real value starts */
466 unsigned anode: 1; /* 1 -> sector is an anode 536 u8 anode: 1; /* 1 -> sector is an anode
537 that points to fragmented value */
538 u8 flag23456: 5;
539 u8 needea: 1; /* required ea */
540#else
541 u8 needea: 1; /* required ea */
542 u8 flag23456: 5;
543 u8 anode: 1; /* 1 -> sector is an anode
467 that points to fragmented value */ 544 that points to fragmented value */
468 unsigned flag2: 1; 545 u8 indirect: 1; /* 1 -> value gives sector number
469 unsigned flag3: 1; 546 where real value starts */
470 unsigned flag4: 1; 547#endif
471 unsigned flag5: 1; 548 u8 namelen; /* length of name, bytes */
472 unsigned flag6: 1; 549 u8 valuelen_lo; /* length of value, bytes */
473 unsigned needea: 1; /* required ea */ 550 u8 valuelen_hi; /* length of value, bytes */
474 unsigned char namelen; /* length of name, bytes */ 551 u8 name[0];
475 unsigned short valuelen; /* length of value, bytes */
476 unsigned char name[0];
477 /* 552 /*
478 unsigned char name[namelen]; ascii attrib name 553 u8 name[namelen]; ascii attrib name
479 unsigned char nul; terminating '\0', not counted 554 u8 nul; terminating '\0', not counted
480 unsigned char value[valuelen]; value, arbitrary 555 u8 value[valuelen]; value, arbitrary
481 if this.indirect, valuelen is 8 and the value is 556 if this.indirect, valuelen is 8 and the value is
482 unsigned length; real length of value, bytes 557 u32 length; real length of value, bytes
483 secno secno; sector address where it starts 558 secno secno; sector address where it starts
484 if this.anode, the above sector number is the root of an anode tree 559 if this.anode, the above sector number is the root of an anode tree
485 which points to the value. 560 which points to the value.
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index c15adbca07ff..dd552f862c8f 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -13,6 +13,7 @@
13#include <linux/pagemap.h> 13#include <linux/pagemap.h>
14#include <linux/buffer_head.h> 14#include <linux/buffer_head.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <asm/unaligned.h>
16 17
17#include "hpfs.h" 18#include "hpfs.h"
18 19
@@ -51,18 +52,16 @@ struct hpfs_inode_info {
51 unsigned i_disk_sec; /* (files) minimalist cache of alloc info */ 52 unsigned i_disk_sec; /* (files) minimalist cache of alloc info */
52 unsigned i_n_secs; /* (files) minimalist cache of alloc info */ 53 unsigned i_n_secs; /* (files) minimalist cache of alloc info */
53 unsigned i_ea_size; /* size of extended attributes */ 54 unsigned i_ea_size; /* size of extended attributes */
54 unsigned i_conv : 2; /* (files) crlf->newline hackery */
55 unsigned i_ea_mode : 1; /* file's permission is stored in ea */ 55 unsigned i_ea_mode : 1; /* file's permission is stored in ea */
56 unsigned i_ea_uid : 1; /* file's uid is stored in ea */ 56 unsigned i_ea_uid : 1; /* file's uid is stored in ea */
57 unsigned i_ea_gid : 1; /* file's gid is stored in ea */ 57 unsigned i_ea_gid : 1; /* file's gid is stored in ea */
58 unsigned i_dirty : 1; 58 unsigned i_dirty : 1;
59 struct mutex i_mutex;
60 struct mutex i_parent_mutex;
61 loff_t **i_rddir_off; 59 loff_t **i_rddir_off;
62 struct inode vfs_inode; 60 struct inode vfs_inode;
63}; 61};
64 62
65struct hpfs_sb_info { 63struct hpfs_sb_info {
64 struct mutex hpfs_mutex; /* global hpfs lock */
66 ino_t sb_root; /* inode number of root dir */ 65 ino_t sb_root; /* inode number of root dir */
67 unsigned sb_fs_size; /* file system size, sectors */ 66 unsigned sb_fs_size; /* file system size, sectors */
68 unsigned sb_bitmaps; /* sector number of bitmap list */ 67 unsigned sb_bitmaps; /* sector number of bitmap list */
@@ -74,7 +73,6 @@ struct hpfs_sb_info {
74 uid_t sb_uid; /* uid from mount options */ 73 uid_t sb_uid; /* uid from mount options */
75 gid_t sb_gid; /* gid from mount options */ 74 gid_t sb_gid; /* gid from mount options */
76 umode_t sb_mode; /* mode from mount options */ 75 umode_t sb_mode; /* mode from mount options */
77 unsigned sb_conv : 2; /* crlf->newline hackery */
78 unsigned sb_eas : 2; /* eas: 0-ignore, 1-ro, 2-rw */ 76 unsigned sb_eas : 2; /* eas: 0-ignore, 1-ro, 2-rw */
79 unsigned sb_err : 2; /* on errs: 0-cont, 1-ro, 2-panic */ 77 unsigned sb_err : 2; /* on errs: 0-cont, 1-ro, 2-panic */
80 unsigned sb_chk : 2; /* checks: 0-no, 1-normal, 2-strict */ 78 unsigned sb_chk : 2; /* checks: 0-no, 1-normal, 2-strict */
@@ -87,20 +85,9 @@ struct hpfs_sb_info {
87 unsigned *sb_bmp_dir; /* main bitmap directory */ 85 unsigned *sb_bmp_dir; /* main bitmap directory */
88 unsigned sb_c_bitmap; /* current bitmap */ 86 unsigned sb_c_bitmap; /* current bitmap */
89 unsigned sb_max_fwd_alloc; /* max forwad allocation */ 87 unsigned sb_max_fwd_alloc; /* max forwad allocation */
90 struct mutex hpfs_creation_de; /* when creating dirents, nobody else
91 can alloc blocks */
92 /*unsigned sb_mounting : 1;*/
93 int sb_timeshift; 88 int sb_timeshift;
94}; 89};
95 90
96/*
97 * conv= options
98 */
99
100#define CONV_BINARY 0 /* no conversion */
101#define CONV_TEXT 1 /* crlf->newline */
102#define CONV_AUTO 2 /* decide based on file contents */
103
104/* Four 512-byte buffers and the 2k block obtained by concatenating them */ 91/* Four 512-byte buffers and the 2k block obtained by concatenating them */
105 92
106struct quad_buffer_head { 93struct quad_buffer_head {
@@ -113,7 +100,7 @@ struct quad_buffer_head {
113static inline dnode_secno de_down_pointer (struct hpfs_dirent *de) 100static inline dnode_secno de_down_pointer (struct hpfs_dirent *de)
114{ 101{
115 CHKCOND(de->down,("HPFS: de_down_pointer: !de->down\n")); 102 CHKCOND(de->down,("HPFS: de_down_pointer: !de->down\n"));
116 return *(dnode_secno *) ((void *) de + de->length - 4); 103 return le32_to_cpu(*(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4));
117} 104}
118 105
119/* The first dir entry in a dnode */ 106/* The first dir entry in a dnode */
@@ -127,41 +114,46 @@ static inline struct hpfs_dirent *dnode_first_de (struct dnode *dnode)
127 114
128static inline struct hpfs_dirent *dnode_end_de (struct dnode *dnode) 115static inline struct hpfs_dirent *dnode_end_de (struct dnode *dnode)
129{ 116{
130 CHKCOND(dnode->first_free>=0x14 && dnode->first_free<=0xa00,("HPFS: dnode_end_de: dnode->first_free = %d\n",(int)dnode->first_free)); 117 CHKCOND(le32_to_cpu(dnode->first_free)>=0x14 && le32_to_cpu(dnode->first_free)<=0xa00,("HPFS: dnode_end_de: dnode->first_free = %x\n",(unsigned)le32_to_cpu(dnode->first_free)));
131 return (void *) dnode + dnode->first_free; 118 return (void *) dnode + le32_to_cpu(dnode->first_free);
132} 119}
133 120
134/* The dir entry after dir entry de */ 121/* The dir entry after dir entry de */
135 122
136static inline struct hpfs_dirent *de_next_de (struct hpfs_dirent *de) 123static inline struct hpfs_dirent *de_next_de (struct hpfs_dirent *de)
137{ 124{
138 CHKCOND(de->length>=0x20 && de->length<0x800,("HPFS: de_next_de: de->length = %d\n",(int)de->length)); 125 CHKCOND(le16_to_cpu(de->length)>=0x20 && le16_to_cpu(de->length)<0x800,("HPFS: de_next_de: de->length = %x\n",(unsigned)le16_to_cpu(de->length)));
139 return (void *) de + de->length; 126 return (void *) de + le16_to_cpu(de->length);
140} 127}
141 128
142static inline struct extended_attribute *fnode_ea(struct fnode *fnode) 129static inline struct extended_attribute *fnode_ea(struct fnode *fnode)
143{ 130{
144 return (struct extended_attribute *)((char *)fnode + fnode->ea_offs + fnode->acl_size_s); 131 return (struct extended_attribute *)((char *)fnode + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s));
145} 132}
146 133
147static inline struct extended_attribute *fnode_end_ea(struct fnode *fnode) 134static inline struct extended_attribute *fnode_end_ea(struct fnode *fnode)
148{ 135{
149 return (struct extended_attribute *)((char *)fnode + fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s); 136 return (struct extended_attribute *)((char *)fnode + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s));
137}
138
139static unsigned ea_valuelen(struct extended_attribute *ea)
140{
141 return ea->valuelen_lo + 256 * ea->valuelen_hi;
150} 142}
151 143
152static inline struct extended_attribute *next_ea(struct extended_attribute *ea) 144static inline struct extended_attribute *next_ea(struct extended_attribute *ea)
153{ 145{
154 return (struct extended_attribute *)((char *)ea + 5 + ea->namelen + ea->valuelen); 146 return (struct extended_attribute *)((char *)ea + 5 + ea->namelen + ea_valuelen(ea));
155} 147}
156 148
157static inline secno ea_sec(struct extended_attribute *ea) 149static inline secno ea_sec(struct extended_attribute *ea)
158{ 150{
159 return *(secno *)((char *)ea + 9 + ea->namelen); 151 return le32_to_cpu(get_unaligned((secno *)((char *)ea + 9 + ea->namelen)));
160} 152}
161 153
162static inline secno ea_len(struct extended_attribute *ea) 154static inline secno ea_len(struct extended_attribute *ea)
163{ 155{
164 return *(secno *)((char *)ea + 5 + ea->namelen); 156 return le32_to_cpu(get_unaligned((secno *)((char *)ea + 5 + ea->namelen)));
165} 157}
166 158
167static inline char *ea_data(struct extended_attribute *ea) 159static inline char *ea_data(struct extended_attribute *ea)
@@ -186,13 +178,13 @@ static inline void copy_de(struct hpfs_dirent *dst, struct hpfs_dirent *src)
186 dst->not_8x3 = n; 178 dst->not_8x3 = n;
187} 179}
188 180
189static inline unsigned tstbits(unsigned *bmp, unsigned b, unsigned n) 181static inline unsigned tstbits(u32 *bmp, unsigned b, unsigned n)
190{ 182{
191 int i; 183 int i;
192 if ((b >= 0x4000) || (b + n - 1 >= 0x4000)) return n; 184 if ((b >= 0x4000) || (b + n - 1 >= 0x4000)) return n;
193 if (!((bmp[(b & 0x3fff) >> 5] >> (b & 0x1f)) & 1)) return 1; 185 if (!((le32_to_cpu(bmp[(b & 0x3fff) >> 5]) >> (b & 0x1f)) & 1)) return 1;
194 for (i = 1; i < n; i++) 186 for (i = 1; i < n; i++)
195 if (/*b+i < 0x4000 &&*/ !((bmp[((b+i) & 0x3fff) >> 5] >> ((b+i) & 0x1f)) & 1)) 187 if (!((le32_to_cpu(bmp[((b+i) & 0x3fff) >> 5]) >> ((b+i) & 0x1f)) & 1))
196 return i + 1; 188 return i + 1;
197 return 0; 189 return 0;
198} 190}
@@ -200,12 +192,12 @@ static inline unsigned tstbits(unsigned *bmp, unsigned b, unsigned n)
200/* alloc.c */ 192/* alloc.c */
201 193
202int hpfs_chk_sectors(struct super_block *, secno, int, char *); 194int hpfs_chk_sectors(struct super_block *, secno, int, char *);
203secno hpfs_alloc_sector(struct super_block *, secno, unsigned, int, int); 195secno hpfs_alloc_sector(struct super_block *, secno, unsigned, int);
204int hpfs_alloc_if_possible(struct super_block *, secno); 196int hpfs_alloc_if_possible(struct super_block *, secno);
205void hpfs_free_sectors(struct super_block *, secno, unsigned); 197void hpfs_free_sectors(struct super_block *, secno, unsigned);
206int hpfs_check_free_dnodes(struct super_block *, int); 198int hpfs_check_free_dnodes(struct super_block *, int);
207void hpfs_free_dnode(struct super_block *, secno); 199void hpfs_free_dnode(struct super_block *, secno);
208struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *, int); 200struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *);
209struct fnode *hpfs_alloc_fnode(struct super_block *, secno, fnode_secno *, struct buffer_head **); 201struct fnode *hpfs_alloc_fnode(struct super_block *, secno, fnode_secno *, struct buffer_head **);
210struct anode *hpfs_alloc_anode(struct super_block *, secno, anode_secno *, struct buffer_head **); 202struct anode *hpfs_alloc_anode(struct super_block *, secno, anode_secno *, struct buffer_head **);
211 203
@@ -222,8 +214,6 @@ void hpfs_remove_fnode(struct super_block *, fnode_secno fno);
222 214
223/* buffer.c */ 215/* buffer.c */
224 216
225void hpfs_lock_creation(struct super_block *);
226void hpfs_unlock_creation(struct super_block *);
227void *hpfs_map_sector(struct super_block *, unsigned, struct buffer_head **, int); 217void *hpfs_map_sector(struct super_block *, unsigned, struct buffer_head **, int);
228void *hpfs_get_sector(struct super_block *, unsigned, struct buffer_head **); 218void *hpfs_get_sector(struct super_block *, unsigned, struct buffer_head **);
229void *hpfs_map_4sectors(struct super_block *, unsigned, struct quad_buffer_head *, int); 219void *hpfs_map_4sectors(struct super_block *, unsigned, struct quad_buffer_head *, int);
@@ -247,7 +237,7 @@ void hpfs_del_pos(struct inode *, loff_t *);
247struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *, 237struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *,
248 const unsigned char *, unsigned, secno); 238 const unsigned char *, unsigned, secno);
249int hpfs_add_dirent(struct inode *, const unsigned char *, unsigned, 239int hpfs_add_dirent(struct inode *, const unsigned char *, unsigned,
250 struct hpfs_dirent *, int); 240 struct hpfs_dirent *);
251int hpfs_remove_dirent(struct inode *, dnode_secno, struct hpfs_dirent *, struct quad_buffer_head *, int); 241int hpfs_remove_dirent(struct inode *, dnode_secno, struct hpfs_dirent *, struct quad_buffer_head *, int);
252void hpfs_count_dnodes(struct super_block *, dnode_secno, int *, int *, int *); 242void hpfs_count_dnodes(struct super_block *, dnode_secno, int *, int *, int *);
253dnode_secno hpfs_de_as_down_as_possible(struct super_block *, dnode_secno dno); 243dnode_secno hpfs_de_as_down_as_possible(struct super_block *, dnode_secno dno);
@@ -303,7 +293,6 @@ int hpfs_compare_names(struct super_block *, const unsigned char *, unsigned,
303 const unsigned char *, unsigned, int); 293 const unsigned char *, unsigned, int);
304int hpfs_is_name_long(const unsigned char *, unsigned); 294int hpfs_is_name_long(const unsigned char *, unsigned);
305void hpfs_adjust_length(const unsigned char *, unsigned *); 295void hpfs_adjust_length(const unsigned char *, unsigned *);
306void hpfs_decide_conv(struct inode *, const unsigned char *, unsigned);
307 296
308/* namei.c */ 297/* namei.c */
309 298
@@ -346,21 +335,26 @@ static inline time32_t gmt_to_local(struct super_block *s, time_t t)
346/* 335/*
347 * Locking: 336 * Locking:
348 * 337 *
349 * hpfs_lock() is a leftover from the big kernel lock. 338 * hpfs_lock() locks the whole filesystem. It must be taken
350 * Right now, these functions are empty and only left 339 * on any method called by the VFS.
351 * for documentation purposes. The file system no longer
352 * works on SMP systems, so the lock is not needed
353 * any more.
354 * 340 *
355 * If someone is interested in making it work again, this 341 * We don't do any per-file locking anymore, it is hard to
356 * would be the place to start by adding a per-superblock 342 * review and HPFS is not performance-sensitive anyway.
357 * mutex and fixing all the bugs and performance issues
358 * caused by that.
359 */ 343 */
360static inline void hpfs_lock(struct super_block *s) 344static inline void hpfs_lock(struct super_block *s)
361{ 345{
346 struct hpfs_sb_info *sbi = hpfs_sb(s);
347 mutex_lock(&sbi->hpfs_mutex);
362} 348}
363 349
364static inline void hpfs_unlock(struct super_block *s) 350static inline void hpfs_unlock(struct super_block *s)
365{ 351{
352 struct hpfs_sb_info *sbi = hpfs_sb(s);
353 mutex_unlock(&sbi->hpfs_mutex);
354}
355
356static inline void hpfs_lock_assert(struct super_block *s)
357{
358 struct hpfs_sb_info *sbi = hpfs_sb(s);
359 WARN_ON(!mutex_is_locked(&sbi->hpfs_mutex));
366} 360}
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 87f1f787e767..338cd8368451 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -17,7 +17,6 @@ void hpfs_init_inode(struct inode *i)
17 i->i_uid = hpfs_sb(sb)->sb_uid; 17 i->i_uid = hpfs_sb(sb)->sb_uid;
18 i->i_gid = hpfs_sb(sb)->sb_gid; 18 i->i_gid = hpfs_sb(sb)->sb_gid;
19 i->i_mode = hpfs_sb(sb)->sb_mode; 19 i->i_mode = hpfs_sb(sb)->sb_mode;
20 hpfs_inode->i_conv = hpfs_sb(sb)->sb_conv;
21 i->i_size = -1; 20 i->i_size = -1;
22 i->i_blocks = -1; 21 i->i_blocks = -1;
23 22
@@ -116,8 +115,8 @@ void hpfs_read_inode(struct inode *i)
116 i->i_mode |= S_IFDIR; 115 i->i_mode |= S_IFDIR;
117 i->i_op = &hpfs_dir_iops; 116 i->i_op = &hpfs_dir_iops;
118 i->i_fop = &hpfs_dir_ops; 117 i->i_fop = &hpfs_dir_ops;
119 hpfs_inode->i_parent_dir = fnode->up; 118 hpfs_inode->i_parent_dir = le32_to_cpu(fnode->up);
120 hpfs_inode->i_dno = fnode->u.external[0].disk_secno; 119 hpfs_inode->i_dno = le32_to_cpu(fnode->u.external[0].disk_secno);
121 if (hpfs_sb(sb)->sb_chk >= 2) { 120 if (hpfs_sb(sb)->sb_chk >= 2) {
122 struct buffer_head *bh0; 121 struct buffer_head *bh0;
123 if (hpfs_map_fnode(sb, hpfs_inode->i_parent_dir, &bh0)) brelse(bh0); 122 if (hpfs_map_fnode(sb, hpfs_inode->i_parent_dir, &bh0)) brelse(bh0);
@@ -133,7 +132,7 @@ void hpfs_read_inode(struct inode *i)
133 i->i_op = &hpfs_file_iops; 132 i->i_op = &hpfs_file_iops;
134 i->i_fop = &hpfs_file_ops; 133 i->i_fop = &hpfs_file_ops;
135 i->i_nlink = 1; 134 i->i_nlink = 1;
136 i->i_size = fnode->file_size; 135 i->i_size = le32_to_cpu(fnode->file_size);
137 i->i_blocks = ((i->i_size + 511) >> 9) + 1; 136 i->i_blocks = ((i->i_size + 511) >> 9) + 1;
138 i->i_data.a_ops = &hpfs_aops; 137 i->i_data.a_ops = &hpfs_aops;
139 hpfs_i(i)->mmu_private = i->i_size; 138 hpfs_i(i)->mmu_private = i->i_size;
@@ -144,7 +143,7 @@ void hpfs_read_inode(struct inode *i)
144static void hpfs_write_inode_ea(struct inode *i, struct fnode *fnode) 143static void hpfs_write_inode_ea(struct inode *i, struct fnode *fnode)
145{ 144{
146 struct hpfs_inode_info *hpfs_inode = hpfs_i(i); 145 struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
147 /*if (fnode->acl_size_l || fnode->acl_size_s) { 146 /*if (le32_to_cpu(fnode->acl_size_l) || le16_to_cpu(fnode->acl_size_s)) {
148 Some unknown structures like ACL may be in fnode, 147 Some unknown structures like ACL may be in fnode,
149 we'd better not overwrite them 148 we'd better not overwrite them
150 hpfs_error(i->i_sb, "fnode %08x has some unknown HPFS386 stuctures", i->i_ino); 149 hpfs_error(i->i_sb, "fnode %08x has some unknown HPFS386 stuctures", i->i_ino);
@@ -187,9 +186,7 @@ void hpfs_write_inode(struct inode *i)
187 kfree(hpfs_inode->i_rddir_off); 186 kfree(hpfs_inode->i_rddir_off);
188 hpfs_inode->i_rddir_off = NULL; 187 hpfs_inode->i_rddir_off = NULL;
189 } 188 }
190 mutex_lock(&hpfs_inode->i_parent_mutex);
191 if (!i->i_nlink) { 189 if (!i->i_nlink) {
192 mutex_unlock(&hpfs_inode->i_parent_mutex);
193 return; 190 return;
194 } 191 }
195 parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir); 192 parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir);
@@ -200,14 +197,9 @@ void hpfs_write_inode(struct inode *i)
200 hpfs_read_inode(parent); 197 hpfs_read_inode(parent);
201 unlock_new_inode(parent); 198 unlock_new_inode(parent);
202 } 199 }
203 mutex_lock(&hpfs_inode->i_mutex);
204 hpfs_write_inode_nolock(i); 200 hpfs_write_inode_nolock(i);
205 mutex_unlock(&hpfs_inode->i_mutex);
206 iput(parent); 201 iput(parent);
207 } else {
208 mark_inode_dirty(i);
209 } 202 }
210 mutex_unlock(&hpfs_inode->i_parent_mutex);
211} 203}
212 204
213void hpfs_write_inode_nolock(struct inode *i) 205void hpfs_write_inode_nolock(struct inode *i)
@@ -226,30 +218,30 @@ void hpfs_write_inode_nolock(struct inode *i)
226 } 218 }
227 } else de = NULL; 219 } else de = NULL;
228 if (S_ISREG(i->i_mode)) { 220 if (S_ISREG(i->i_mode)) {
229 fnode->file_size = i->i_size; 221 fnode->file_size = cpu_to_le32(i->i_size);
230 if (de) de->file_size = i->i_size; 222 if (de) de->file_size = cpu_to_le32(i->i_size);
231 } else if (S_ISDIR(i->i_mode)) { 223 } else if (S_ISDIR(i->i_mode)) {
232 fnode->file_size = 0; 224 fnode->file_size = cpu_to_le32(0);
233 if (de) de->file_size = 0; 225 if (de) de->file_size = cpu_to_le32(0);
234 } 226 }
235 hpfs_write_inode_ea(i, fnode); 227 hpfs_write_inode_ea(i, fnode);
236 if (de) { 228 if (de) {
237 de->write_date = gmt_to_local(i->i_sb, i->i_mtime.tv_sec); 229 de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec));
238 de->read_date = gmt_to_local(i->i_sb, i->i_atime.tv_sec); 230 de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec));
239 de->creation_date = gmt_to_local(i->i_sb, i->i_ctime.tv_sec); 231 de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec));
240 de->read_only = !(i->i_mode & 0222); 232 de->read_only = !(i->i_mode & 0222);
241 de->ea_size = hpfs_inode->i_ea_size; 233 de->ea_size = cpu_to_le32(hpfs_inode->i_ea_size);
242 hpfs_mark_4buffers_dirty(&qbh); 234 hpfs_mark_4buffers_dirty(&qbh);
243 hpfs_brelse4(&qbh); 235 hpfs_brelse4(&qbh);
244 } 236 }
245 if (S_ISDIR(i->i_mode)) { 237 if (S_ISDIR(i->i_mode)) {
246 if ((de = map_dirent(i, hpfs_inode->i_dno, "\001\001", 2, NULL, &qbh))) { 238 if ((de = map_dirent(i, hpfs_inode->i_dno, "\001\001", 2, NULL, &qbh))) {
247 de->write_date = gmt_to_local(i->i_sb, i->i_mtime.tv_sec); 239 de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec));
248 de->read_date = gmt_to_local(i->i_sb, i->i_atime.tv_sec); 240 de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec));
249 de->creation_date = gmt_to_local(i->i_sb, i->i_ctime.tv_sec); 241 de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec));
250 de->read_only = !(i->i_mode & 0222); 242 de->read_only = !(i->i_mode & 0222);
251 de->ea_size = /*hpfs_inode->i_ea_size*/0; 243 de->ea_size = cpu_to_le32(/*hpfs_inode->i_ea_size*/0);
252 de->file_size = 0; 244 de->file_size = cpu_to_le32(0);
253 hpfs_mark_4buffers_dirty(&qbh); 245 hpfs_mark_4buffers_dirty(&qbh);
254 hpfs_brelse4(&qbh); 246 hpfs_brelse4(&qbh);
255 } else 247 } else
@@ -269,6 +261,10 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
269 hpfs_lock(inode->i_sb); 261 hpfs_lock(inode->i_sb);
270 if (inode->i_ino == hpfs_sb(inode->i_sb)->sb_root) 262 if (inode->i_ino == hpfs_sb(inode->i_sb)->sb_root)
271 goto out_unlock; 263 goto out_unlock;
264 if ((attr->ia_valid & ATTR_UID) && attr->ia_uid >= 0x10000)
265 goto out_unlock;
266 if ((attr->ia_valid & ATTR_GID) && attr->ia_gid >= 0x10000)
267 goto out_unlock;
272 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size) 268 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size)
273 goto out_unlock; 269 goto out_unlock;
274 270
@@ -284,7 +280,6 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
284 } 280 }
285 281
286 setattr_copy(inode, attr); 282 setattr_copy(inode, attr);
287 mark_inode_dirty(inode);
288 283
289 hpfs_write_inode(inode); 284 hpfs_write_inode(inode);
290 285
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index 840d033ecee8..a790821366a7 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -21,7 +21,7 @@ unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
21 hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id); 21 hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id);
22 return NULL; 22 return NULL;
23 } 23 }
24 sec = hpfs_sb(s)->sb_bmp_dir[bmp_block]; 24 sec = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]);
25 if (!sec || sec > hpfs_sb(s)->sb_fs_size-4) { 25 if (!sec || sec > hpfs_sb(s)->sb_fs_size-4) {
26 hpfs_error(s, "invalid bitmap block pointer %08x -> %08x at %s", bmp_block, sec, id); 26 hpfs_error(s, "invalid bitmap block pointer %08x -> %08x at %s", bmp_block, sec, id);
27 return NULL; 27 return NULL;
@@ -46,18 +46,18 @@ unsigned char *hpfs_load_code_page(struct super_block *s, secno cps)
46 struct code_page_data *cpd; 46 struct code_page_data *cpd;
47 struct code_page_directory *cp = hpfs_map_sector(s, cps, &bh, 0); 47 struct code_page_directory *cp = hpfs_map_sector(s, cps, &bh, 0);
48 if (!cp) return NULL; 48 if (!cp) return NULL;
49 if (cp->magic != CP_DIR_MAGIC) { 49 if (le32_to_cpu(cp->magic) != CP_DIR_MAGIC) {
50 printk("HPFS: Code page directory magic doesn't match (magic = %08x)\n", cp->magic); 50 printk("HPFS: Code page directory magic doesn't match (magic = %08x)\n", le32_to_cpu(cp->magic));
51 brelse(bh); 51 brelse(bh);
52 return NULL; 52 return NULL;
53 } 53 }
54 if (!cp->n_code_pages) { 54 if (!le32_to_cpu(cp->n_code_pages)) {
55 printk("HPFS: n_code_pages == 0\n"); 55 printk("HPFS: n_code_pages == 0\n");
56 brelse(bh); 56 brelse(bh);
57 return NULL; 57 return NULL;
58 } 58 }
59 cpds = cp->array[0].code_page_data; 59 cpds = le32_to_cpu(cp->array[0].code_page_data);
60 cpi = cp->array[0].index; 60 cpi = le16_to_cpu(cp->array[0].index);
61 brelse(bh); 61 brelse(bh);
62 62
63 if (cpi >= 3) { 63 if (cpi >= 3) {
@@ -66,12 +66,12 @@ unsigned char *hpfs_load_code_page(struct super_block *s, secno cps)
66 } 66 }
67 67
68 if (!(cpd = hpfs_map_sector(s, cpds, &bh, 0))) return NULL; 68 if (!(cpd = hpfs_map_sector(s, cpds, &bh, 0))) return NULL;
69 if ((unsigned)cpd->offs[cpi] > 0x178) { 69 if (le16_to_cpu(cpd->offs[cpi]) > 0x178) {
70 printk("HPFS: Code page index out of sector\n"); 70 printk("HPFS: Code page index out of sector\n");
71 brelse(bh); 71 brelse(bh);
72 return NULL; 72 return NULL;
73 } 73 }
74 ptr = (unsigned char *)cpd + cpd->offs[cpi] + 6; 74 ptr = (unsigned char *)cpd + le16_to_cpu(cpd->offs[cpi]) + 6;
75 if (!(cp_table = kmalloc(256, GFP_KERNEL))) { 75 if (!(cp_table = kmalloc(256, GFP_KERNEL))) {
76 printk("HPFS: out of memory for code page table\n"); 76 printk("HPFS: out of memory for code page table\n");
77 brelse(bh); 77 brelse(bh);
@@ -125,7 +125,7 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea
125 if (hpfs_sb(s)->sb_chk) { 125 if (hpfs_sb(s)->sb_chk) {
126 struct extended_attribute *ea; 126 struct extended_attribute *ea;
127 struct extended_attribute *ea_end; 127 struct extended_attribute *ea_end;
128 if (fnode->magic != FNODE_MAGIC) { 128 if (le32_to_cpu(fnode->magic) != FNODE_MAGIC) {
129 hpfs_error(s, "bad magic on fnode %08lx", 129 hpfs_error(s, "bad magic on fnode %08lx",
130 (unsigned long)ino); 130 (unsigned long)ino);
131 goto bail; 131 goto bail;
@@ -138,7 +138,7 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea
138 (unsigned long)ino); 138 (unsigned long)ino);
139 goto bail; 139 goto bail;
140 } 140 }
141 if (fnode->btree.first_free != 141 if (le16_to_cpu(fnode->btree.first_free) !=
142 8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) { 142 8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) {
143 hpfs_error(s, 143 hpfs_error(s,
144 "bad first_free pointer in fnode %08lx", 144 "bad first_free pointer in fnode %08lx",
@@ -146,12 +146,12 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea
146 goto bail; 146 goto bail;
147 } 147 }
148 } 148 }
149 if (fnode->ea_size_s && ((signed int)fnode->ea_offs < 0xc4 || 149 if (le16_to_cpu(fnode->ea_size_s) && (le16_to_cpu(fnode->ea_offs) < 0xc4 ||
150 (signed int)fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200)) { 150 le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200)) {
151 hpfs_error(s, 151 hpfs_error(s,
152 "bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x", 152 "bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x",
153 (unsigned long)ino, 153 (unsigned long)ino,
154 fnode->ea_offs, fnode->ea_size_s); 154 le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
155 goto bail; 155 goto bail;
156 } 156 }
157 ea = fnode_ea(fnode); 157 ea = fnode_ea(fnode);
@@ -178,16 +178,20 @@ struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buff
178 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ano, 1, "anode")) return NULL; 178 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ano, 1, "anode")) return NULL;
179 if ((anode = hpfs_map_sector(s, ano, bhp, ANODE_RD_AHEAD))) 179 if ((anode = hpfs_map_sector(s, ano, bhp, ANODE_RD_AHEAD)))
180 if (hpfs_sb(s)->sb_chk) { 180 if (hpfs_sb(s)->sb_chk) {
181 if (anode->magic != ANODE_MAGIC || anode->self != ano) { 181 if (le32_to_cpu(anode->magic) != ANODE_MAGIC) {
182 hpfs_error(s, "bad magic on anode %08x", ano); 182 hpfs_error(s, "bad magic on anode %08x", ano);
183 goto bail; 183 goto bail;
184 } 184 }
185 if (le32_to_cpu(anode->self) != ano) {
186 hpfs_error(s, "self pointer invalid on anode %08x", ano);
187 goto bail;
188 }
185 if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes != 189 if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes !=
186 (anode->btree.internal ? 60 : 40)) { 190 (anode->btree.internal ? 60 : 40)) {
187 hpfs_error(s, "bad number of nodes in anode %08x", ano); 191 hpfs_error(s, "bad number of nodes in anode %08x", ano);
188 goto bail; 192 goto bail;
189 } 193 }
190 if (anode->btree.first_free != 194 if (le16_to_cpu(anode->btree.first_free) !=
191 8 + anode->btree.n_used_nodes * (anode->btree.internal ? 8 : 12)) { 195 8 + anode->btree.n_used_nodes * (anode->btree.internal ? 8 : 12)) {
192 hpfs_error(s, "bad first_free pointer in anode %08x", ano); 196 hpfs_error(s, "bad first_free pointer in anode %08x", ano);
193 goto bail; 197 goto bail;
@@ -219,26 +223,26 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
219 unsigned p, pp = 0; 223 unsigned p, pp = 0;
220 unsigned char *d = (unsigned char *)dnode; 224 unsigned char *d = (unsigned char *)dnode;
221 int b = 0; 225 int b = 0;
222 if (dnode->magic != DNODE_MAGIC) { 226 if (le32_to_cpu(dnode->magic) != DNODE_MAGIC) {
223 hpfs_error(s, "bad magic on dnode %08x", secno); 227 hpfs_error(s, "bad magic on dnode %08x", secno);
224 goto bail; 228 goto bail;
225 } 229 }
226 if (dnode->self != secno) 230 if (le32_to_cpu(dnode->self) != secno)
227 hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, dnode->self); 231 hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, le32_to_cpu(dnode->self));
228 /* Check dirents - bad dirents would cause infinite 232 /* Check dirents - bad dirents would cause infinite
229 loops or shooting to memory */ 233 loops or shooting to memory */
230 if (dnode->first_free > 2048/* || dnode->first_free < 84*/) { 234 if (le32_to_cpu(dnode->first_free) > 2048) {
231 hpfs_error(s, "dnode %08x has first_free == %08x", secno, dnode->first_free); 235 hpfs_error(s, "dnode %08x has first_free == %08x", secno, le32_to_cpu(dnode->first_free));
232 goto bail; 236 goto bail;
233 } 237 }
234 for (p = 20; p < dnode->first_free; p += d[p] + (d[p+1] << 8)) { 238 for (p = 20; p < le32_to_cpu(dnode->first_free); p += d[p] + (d[p+1] << 8)) {
235 struct hpfs_dirent *de = (struct hpfs_dirent *)((char *)dnode + p); 239 struct hpfs_dirent *de = (struct hpfs_dirent *)((char *)dnode + p);
236 if (de->length > 292 || (de->length < 32) || (de->length & 3) || p + de->length > 2048) { 240 if (le16_to_cpu(de->length) > 292 || (le16_to_cpu(de->length) < 32) || (le16_to_cpu(de->length) & 3) || p + le16_to_cpu(de->length) > 2048) {
237 hpfs_error(s, "bad dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp); 241 hpfs_error(s, "bad dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
238 goto bail; 242 goto bail;
239 } 243 }
240 if (((31 + de->namelen + de->down*4 + 3) & ~3) != de->length) { 244 if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) {
241 if (((31 + de->namelen + de->down*4 + 3) & ~3) < de->length && s->s_flags & MS_RDONLY) goto ok; 245 if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & MS_RDONLY) goto ok;
242 hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp); 246 hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
243 goto bail; 247 goto bail;
244 } 248 }
@@ -251,7 +255,7 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
251 pp = p; 255 pp = p;
252 256
253 } 257 }
254 if (p != dnode->first_free) { 258 if (p != le32_to_cpu(dnode->first_free)) {
255 hpfs_error(s, "size on last dirent does not match first_free; dnode %08x", secno); 259 hpfs_error(s, "size on last dirent does not match first_free; dnode %08x", secno);
256 goto bail; 260 goto bail;
257 } 261 }
@@ -277,7 +281,7 @@ dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino)
277 if (!fnode) 281 if (!fnode)
278 return 0; 282 return 0;
279 283
280 dno = fnode->u.external[0].disk_secno; 284 dno = le32_to_cpu(fnode->u.external[0].disk_secno);
281 brelse(bh); 285 brelse(bh);
282 return dno; 286 return dno;
283} 287}
diff --git a/fs/hpfs/name.c b/fs/hpfs/name.c
index f24736d7a439..9acdf338def0 100644
--- a/fs/hpfs/name.c
+++ b/fs/hpfs/name.c
@@ -8,39 +8,6 @@
8 8
9#include "hpfs_fn.h" 9#include "hpfs_fn.h"
10 10
11static const char *text_postfix[]={
12".ASM", ".BAS", ".BAT", ".C", ".CC", ".CFG", ".CMD", ".CON", ".CPP", ".DEF",
13".DOC", ".DPR", ".ERX", ".H", ".HPP", ".HTM", ".HTML", ".JAVA", ".LOG", ".PAS",
14".RC", ".TEX", ".TXT", ".Y", ""};
15
16static const char *text_prefix[]={
17"AUTOEXEC.", "CHANGES", "COPYING", "CONFIG.", "CREDITS", "FAQ", "FILE_ID.DIZ",
18"MAKEFILE", "READ.ME", "README", "TERMCAP", ""};
19
20void hpfs_decide_conv(struct inode *inode, const unsigned char *name, unsigned len)
21{
22 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
23 int i;
24 if (hpfs_inode->i_conv != CONV_AUTO) return;
25 for (i = 0; *text_postfix[i]; i++) {
26 int l = strlen(text_postfix[i]);
27 if (l <= len)
28 if (!hpfs_compare_names(inode->i_sb, text_postfix[i], l, name + len - l, l, 0))
29 goto text;
30 }
31 for (i = 0; *text_prefix[i]; i++) {
32 int l = strlen(text_prefix[i]);
33 if (l <= len)
34 if (!hpfs_compare_names(inode->i_sb, text_prefix[i], l, name, l, 0))
35 goto text;
36 }
37 hpfs_inode->i_conv = CONV_BINARY;
38 return;
39 text:
40 hpfs_inode->i_conv = CONV_TEXT;
41 return;
42}
43
44static inline int not_allowed_char(unsigned char c) 11static inline int not_allowed_char(unsigned char c)
45{ 12{
46 return c<' ' || c=='"' || c=='*' || c=='/' || c==':' || c=='<' || 13 return c<' ' || c=='"' || c=='*' || c=='/' || c==':' || c=='<' ||
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index d5f8c8a19023..1f05839c27a7 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -29,7 +29,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
29 fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh); 29 fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
30 if (!fnode) 30 if (!fnode)
31 goto bail; 31 goto bail;
32 dnode = hpfs_alloc_dnode(dir->i_sb, fno, &dno, &qbh0, 1); 32 dnode = hpfs_alloc_dnode(dir->i_sb, fno, &dno, &qbh0);
33 if (!dnode) 33 if (!dnode)
34 goto bail1; 34 goto bail1;
35 memset(&dee, 0, sizeof dee); 35 memset(&dee, 0, sizeof dee);
@@ -37,8 +37,8 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
37 if (!(mode & 0222)) dee.read_only = 1; 37 if (!(mode & 0222)) dee.read_only = 1;
38 /*dee.archive = 0;*/ 38 /*dee.archive = 0;*/
39 dee.hidden = name[0] == '.'; 39 dee.hidden = name[0] == '.';
40 dee.fnode = fno; 40 dee.fnode = cpu_to_le32(fno);
41 dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); 41 dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
42 result = new_inode(dir->i_sb); 42 result = new_inode(dir->i_sb);
43 if (!result) 43 if (!result)
44 goto bail2; 44 goto bail2;
@@ -46,7 +46,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
46 result->i_ino = fno; 46 result->i_ino = fno;
47 hpfs_i(result)->i_parent_dir = dir->i_ino; 47 hpfs_i(result)->i_parent_dir = dir->i_ino;
48 hpfs_i(result)->i_dno = dno; 48 hpfs_i(result)->i_dno = dno;
49 result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); 49 result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
50 result->i_ctime.tv_nsec = 0; 50 result->i_ctime.tv_nsec = 0;
51 result->i_mtime.tv_nsec = 0; 51 result->i_mtime.tv_nsec = 0;
52 result->i_atime.tv_nsec = 0; 52 result->i_atime.tv_nsec = 0;
@@ -60,8 +60,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
60 if (dee.read_only) 60 if (dee.read_only)
61 result->i_mode &= ~0222; 61 result->i_mode &= ~0222;
62 62
63 mutex_lock(&hpfs_i(dir)->i_mutex); 63 r = hpfs_add_dirent(dir, name, len, &dee);
64 r = hpfs_add_dirent(dir, name, len, &dee, 0);
65 if (r == 1) 64 if (r == 1)
66 goto bail3; 65 goto bail3;
67 if (r == -1) { 66 if (r == -1) {
@@ -70,21 +69,21 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
70 } 69 }
71 fnode->len = len; 70 fnode->len = len;
72 memcpy(fnode->name, name, len > 15 ? 15 : len); 71 memcpy(fnode->name, name, len > 15 ? 15 : len);
73 fnode->up = dir->i_ino; 72 fnode->up = cpu_to_le32(dir->i_ino);
74 fnode->dirflag = 1; 73 fnode->dirflag = 1;
75 fnode->btree.n_free_nodes = 7; 74 fnode->btree.n_free_nodes = 7;
76 fnode->btree.n_used_nodes = 1; 75 fnode->btree.n_used_nodes = 1;
77 fnode->btree.first_free = 0x14; 76 fnode->btree.first_free = cpu_to_le16(0x14);
78 fnode->u.external[0].disk_secno = dno; 77 fnode->u.external[0].disk_secno = cpu_to_le32(dno);
79 fnode->u.external[0].file_secno = -1; 78 fnode->u.external[0].file_secno = cpu_to_le32(-1);
80 dnode->root_dnode = 1; 79 dnode->root_dnode = 1;
81 dnode->up = fno; 80 dnode->up = cpu_to_le32(fno);
82 de = hpfs_add_de(dir->i_sb, dnode, "\001\001", 2, 0); 81 de = hpfs_add_de(dir->i_sb, dnode, "\001\001", 2, 0);
83 de->creation_date = de->write_date = de->read_date = gmt_to_local(dir->i_sb, get_seconds()); 82 de->creation_date = de->write_date = de->read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
84 if (!(mode & 0222)) de->read_only = 1; 83 if (!(mode & 0222)) de->read_only = 1;
85 de->first = de->directory = 1; 84 de->first = de->directory = 1;
86 /*de->hidden = de->system = 0;*/ 85 /*de->hidden = de->system = 0;*/
87 de->fnode = fno; 86 de->fnode = cpu_to_le32(fno);
88 mark_buffer_dirty(bh); 87 mark_buffer_dirty(bh);
89 brelse(bh); 88 brelse(bh);
90 hpfs_mark_4buffers_dirty(&qbh0); 89 hpfs_mark_4buffers_dirty(&qbh0);
@@ -101,11 +100,9 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
101 hpfs_write_inode_nolock(result); 100 hpfs_write_inode_nolock(result);
102 } 101 }
103 d_instantiate(dentry, result); 102 d_instantiate(dentry, result);
104 mutex_unlock(&hpfs_i(dir)->i_mutex);
105 hpfs_unlock(dir->i_sb); 103 hpfs_unlock(dir->i_sb);
106 return 0; 104 return 0;
107bail3: 105bail3:
108 mutex_unlock(&hpfs_i(dir)->i_mutex);
109 iput(result); 106 iput(result);
110bail2: 107bail2:
111 hpfs_brelse4(&qbh0); 108 hpfs_brelse4(&qbh0);
@@ -140,8 +137,8 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
140 if (!(mode & 0222)) dee.read_only = 1; 137 if (!(mode & 0222)) dee.read_only = 1;
141 dee.archive = 1; 138 dee.archive = 1;
142 dee.hidden = name[0] == '.'; 139 dee.hidden = name[0] == '.';
143 dee.fnode = fno; 140 dee.fnode = cpu_to_le32(fno);
144 dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); 141 dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
145 142
146 result = new_inode(dir->i_sb); 143 result = new_inode(dir->i_sb);
147 if (!result) 144 if (!result)
@@ -154,9 +151,8 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
154 result->i_op = &hpfs_file_iops; 151 result->i_op = &hpfs_file_iops;
155 result->i_fop = &hpfs_file_ops; 152 result->i_fop = &hpfs_file_ops;
156 result->i_nlink = 1; 153 result->i_nlink = 1;
157 hpfs_decide_conv(result, name, len);
158 hpfs_i(result)->i_parent_dir = dir->i_ino; 154 hpfs_i(result)->i_parent_dir = dir->i_ino;
159 result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); 155 result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
160 result->i_ctime.tv_nsec = 0; 156 result->i_ctime.tv_nsec = 0;
161 result->i_mtime.tv_nsec = 0; 157 result->i_mtime.tv_nsec = 0;
162 result->i_atime.tv_nsec = 0; 158 result->i_atime.tv_nsec = 0;
@@ -168,8 +164,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
168 result->i_data.a_ops = &hpfs_aops; 164 result->i_data.a_ops = &hpfs_aops;
169 hpfs_i(result)->mmu_private = 0; 165 hpfs_i(result)->mmu_private = 0;
170 166
171 mutex_lock(&hpfs_i(dir)->i_mutex); 167 r = hpfs_add_dirent(dir, name, len, &dee);
172 r = hpfs_add_dirent(dir, name, len, &dee, 0);
173 if (r == 1) 168 if (r == 1)
174 goto bail2; 169 goto bail2;
175 if (r == -1) { 170 if (r == -1) {
@@ -178,7 +173,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
178 } 173 }
179 fnode->len = len; 174 fnode->len = len;
180 memcpy(fnode->name, name, len > 15 ? 15 : len); 175 memcpy(fnode->name, name, len > 15 ? 15 : len);
181 fnode->up = dir->i_ino; 176 fnode->up = cpu_to_le32(dir->i_ino);
182 mark_buffer_dirty(bh); 177 mark_buffer_dirty(bh);
183 brelse(bh); 178 brelse(bh);
184 179
@@ -193,12 +188,10 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
193 hpfs_write_inode_nolock(result); 188 hpfs_write_inode_nolock(result);
194 } 189 }
195 d_instantiate(dentry, result); 190 d_instantiate(dentry, result);
196 mutex_unlock(&hpfs_i(dir)->i_mutex);
197 hpfs_unlock(dir->i_sb); 191 hpfs_unlock(dir->i_sb);
198 return 0; 192 return 0;
199 193
200bail2: 194bail2:
201 mutex_unlock(&hpfs_i(dir)->i_mutex);
202 iput(result); 195 iput(result);
203bail1: 196bail1:
204 brelse(bh); 197 brelse(bh);
@@ -232,8 +225,8 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
232 if (!(mode & 0222)) dee.read_only = 1; 225 if (!(mode & 0222)) dee.read_only = 1;
233 dee.archive = 1; 226 dee.archive = 1;
234 dee.hidden = name[0] == '.'; 227 dee.hidden = name[0] == '.';
235 dee.fnode = fno; 228 dee.fnode = cpu_to_le32(fno);
236 dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); 229 dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
237 230
238 result = new_inode(dir->i_sb); 231 result = new_inode(dir->i_sb);
239 if (!result) 232 if (!result)
@@ -242,7 +235,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
242 hpfs_init_inode(result); 235 hpfs_init_inode(result);
243 result->i_ino = fno; 236 result->i_ino = fno;
244 hpfs_i(result)->i_parent_dir = dir->i_ino; 237 hpfs_i(result)->i_parent_dir = dir->i_ino;
245 result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); 238 result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
246 result->i_ctime.tv_nsec = 0; 239 result->i_ctime.tv_nsec = 0;
247 result->i_mtime.tv_nsec = 0; 240 result->i_mtime.tv_nsec = 0;
248 result->i_atime.tv_nsec = 0; 241 result->i_atime.tv_nsec = 0;
@@ -254,8 +247,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
254 result->i_blocks = 1; 247 result->i_blocks = 1;
255 init_special_inode(result, mode, rdev); 248 init_special_inode(result, mode, rdev);
256 249
257 mutex_lock(&hpfs_i(dir)->i_mutex); 250 r = hpfs_add_dirent(dir, name, len, &dee);
258 r = hpfs_add_dirent(dir, name, len, &dee, 0);
259 if (r == 1) 251 if (r == 1)
260 goto bail2; 252 goto bail2;
261 if (r == -1) { 253 if (r == -1) {
@@ -264,19 +256,17 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
264 } 256 }
265 fnode->len = len; 257 fnode->len = len;
266 memcpy(fnode->name, name, len > 15 ? 15 : len); 258 memcpy(fnode->name, name, len > 15 ? 15 : len);
267 fnode->up = dir->i_ino; 259 fnode->up = cpu_to_le32(dir->i_ino);
268 mark_buffer_dirty(bh); 260 mark_buffer_dirty(bh);
269 261
270 insert_inode_hash(result); 262 insert_inode_hash(result);
271 263
272 hpfs_write_inode_nolock(result); 264 hpfs_write_inode_nolock(result);
273 d_instantiate(dentry, result); 265 d_instantiate(dentry, result);
274 mutex_unlock(&hpfs_i(dir)->i_mutex);
275 brelse(bh); 266 brelse(bh);
276 hpfs_unlock(dir->i_sb); 267 hpfs_unlock(dir->i_sb);
277 return 0; 268 return 0;
278bail2: 269bail2:
279 mutex_unlock(&hpfs_i(dir)->i_mutex);
280 iput(result); 270 iput(result);
281bail1: 271bail1:
282 brelse(bh); 272 brelse(bh);
@@ -310,8 +300,8 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
310 memset(&dee, 0, sizeof dee); 300 memset(&dee, 0, sizeof dee);
311 dee.archive = 1; 301 dee.archive = 1;
312 dee.hidden = name[0] == '.'; 302 dee.hidden = name[0] == '.';
313 dee.fnode = fno; 303 dee.fnode = cpu_to_le32(fno);
314 dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); 304 dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
315 305
316 result = new_inode(dir->i_sb); 306 result = new_inode(dir->i_sb);
317 if (!result) 307 if (!result)
@@ -319,7 +309,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
319 result->i_ino = fno; 309 result->i_ino = fno;
320 hpfs_init_inode(result); 310 hpfs_init_inode(result);
321 hpfs_i(result)->i_parent_dir = dir->i_ino; 311 hpfs_i(result)->i_parent_dir = dir->i_ino;
322 result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); 312 result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
323 result->i_ctime.tv_nsec = 0; 313 result->i_ctime.tv_nsec = 0;
324 result->i_mtime.tv_nsec = 0; 314 result->i_mtime.tv_nsec = 0;
325 result->i_atime.tv_nsec = 0; 315 result->i_atime.tv_nsec = 0;
@@ -333,8 +323,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
333 result->i_op = &page_symlink_inode_operations; 323 result->i_op = &page_symlink_inode_operations;
334 result->i_data.a_ops = &hpfs_symlink_aops; 324 result->i_data.a_ops = &hpfs_symlink_aops;
335 325
336 mutex_lock(&hpfs_i(dir)->i_mutex); 326 r = hpfs_add_dirent(dir, name, len, &dee);
337 r = hpfs_add_dirent(dir, name, len, &dee, 0);
338 if (r == 1) 327 if (r == 1)
339 goto bail2; 328 goto bail2;
340 if (r == -1) { 329 if (r == -1) {
@@ -343,7 +332,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
343 } 332 }
344 fnode->len = len; 333 fnode->len = len;
345 memcpy(fnode->name, name, len > 15 ? 15 : len); 334 memcpy(fnode->name, name, len > 15 ? 15 : len);
346 fnode->up = dir->i_ino; 335 fnode->up = cpu_to_le32(dir->i_ino);
347 hpfs_set_ea(result, fnode, "SYMLINK", symlink, strlen(symlink)); 336 hpfs_set_ea(result, fnode, "SYMLINK", symlink, strlen(symlink));
348 mark_buffer_dirty(bh); 337 mark_buffer_dirty(bh);
349 brelse(bh); 338 brelse(bh);
@@ -352,11 +341,9 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
352 341
353 hpfs_write_inode_nolock(result); 342 hpfs_write_inode_nolock(result);
354 d_instantiate(dentry, result); 343 d_instantiate(dentry, result);
355 mutex_unlock(&hpfs_i(dir)->i_mutex);
356 hpfs_unlock(dir->i_sb); 344 hpfs_unlock(dir->i_sb);
357 return 0; 345 return 0;
358bail2: 346bail2:
359 mutex_unlock(&hpfs_i(dir)->i_mutex);
360 iput(result); 347 iput(result);
361bail1: 348bail1:
362 brelse(bh); 349 brelse(bh);
@@ -374,7 +361,6 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
374 struct hpfs_dirent *de; 361 struct hpfs_dirent *de;
375 struct inode *inode = dentry->d_inode; 362 struct inode *inode = dentry->d_inode;
376 dnode_secno dno; 363 dnode_secno dno;
377 fnode_secno fno;
378 int r; 364 int r;
379 int rep = 0; 365 int rep = 0;
380 int err; 366 int err;
@@ -382,8 +368,6 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
382 hpfs_lock(dir->i_sb); 368 hpfs_lock(dir->i_sb);
383 hpfs_adjust_length(name, &len); 369 hpfs_adjust_length(name, &len);
384again: 370again:
385 mutex_lock(&hpfs_i(inode)->i_parent_mutex);
386 mutex_lock(&hpfs_i(dir)->i_mutex);
387 err = -ENOENT; 371 err = -ENOENT;
388 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); 372 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
389 if (!de) 373 if (!de)
@@ -397,7 +381,6 @@ again:
397 if (de->directory) 381 if (de->directory)
398 goto out1; 382 goto out1;
399 383
400 fno = de->fnode;
401 r = hpfs_remove_dirent(dir, dno, de, &qbh, 1); 384 r = hpfs_remove_dirent(dir, dno, de, &qbh, 1);
402 switch (r) { 385 switch (r) {
403 case 1: 386 case 1:
@@ -410,8 +393,6 @@ again:
410 if (rep++) 393 if (rep++)
411 break; 394 break;
412 395
413 mutex_unlock(&hpfs_i(dir)->i_mutex);
414 mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
415 dentry_unhash(dentry); 396 dentry_unhash(dentry);
416 if (!d_unhashed(dentry)) { 397 if (!d_unhashed(dentry)) {
417 dput(dentry); 398 dput(dentry);
@@ -445,8 +426,6 @@ again:
445out1: 426out1:
446 hpfs_brelse4(&qbh); 427 hpfs_brelse4(&qbh);
447out: 428out:
448 mutex_unlock(&hpfs_i(dir)->i_mutex);
449 mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
450 hpfs_unlock(dir->i_sb); 429 hpfs_unlock(dir->i_sb);
451 return err; 430 return err;
452} 431}
@@ -459,15 +438,12 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
459 struct hpfs_dirent *de; 438 struct hpfs_dirent *de;
460 struct inode *inode = dentry->d_inode; 439 struct inode *inode = dentry->d_inode;
461 dnode_secno dno; 440 dnode_secno dno;
462 fnode_secno fno;
463 int n_items = 0; 441 int n_items = 0;
464 int err; 442 int err;
465 int r; 443 int r;
466 444
467 hpfs_adjust_length(name, &len); 445 hpfs_adjust_length(name, &len);
468 hpfs_lock(dir->i_sb); 446 hpfs_lock(dir->i_sb);
469 mutex_lock(&hpfs_i(inode)->i_parent_mutex);
470 mutex_lock(&hpfs_i(dir)->i_mutex);
471 err = -ENOENT; 447 err = -ENOENT;
472 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); 448 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
473 if (!de) 449 if (!de)
@@ -486,7 +462,6 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
486 if (n_items) 462 if (n_items)
487 goto out1; 463 goto out1;
488 464
489 fno = de->fnode;
490 r = hpfs_remove_dirent(dir, dno, de, &qbh, 1); 465 r = hpfs_remove_dirent(dir, dno, de, &qbh, 1);
491 switch (r) { 466 switch (r) {
492 case 1: 467 case 1:
@@ -505,8 +480,6 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
505out1: 480out1:
506 hpfs_brelse4(&qbh); 481 hpfs_brelse4(&qbh);
507out: 482out:
508 mutex_unlock(&hpfs_i(dir)->i_mutex);
509 mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
510 hpfs_unlock(dir->i_sb); 483 hpfs_unlock(dir->i_sb);
511 return err; 484 return err;
512} 485}
@@ -568,12 +541,6 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
568 541
569 hpfs_lock(i->i_sb); 542 hpfs_lock(i->i_sb);
570 /* order doesn't matter, due to VFS exclusion */ 543 /* order doesn't matter, due to VFS exclusion */
571 mutex_lock(&hpfs_i(i)->i_parent_mutex);
572 if (new_inode)
573 mutex_lock(&hpfs_i(new_inode)->i_parent_mutex);
574 mutex_lock(&hpfs_i(old_dir)->i_mutex);
575 if (new_dir != old_dir)
576 mutex_lock(&hpfs_i(new_dir)->i_mutex);
577 544
578 /* Erm? Moving over the empty non-busy directory is perfectly legal */ 545 /* Erm? Moving over the empty non-busy directory is perfectly legal */
579 if (new_inode && S_ISDIR(new_inode->i_mode)) { 546 if (new_inode && S_ISDIR(new_inode->i_mode)) {
@@ -610,9 +577,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
610 577
611 if (new_dir == old_dir) hpfs_brelse4(&qbh); 578 if (new_dir == old_dir) hpfs_brelse4(&qbh);
612 579
613 hpfs_lock_creation(i->i_sb); 580 if ((r = hpfs_add_dirent(new_dir, new_name, new_len, &de))) {
614 if ((r = hpfs_add_dirent(new_dir, new_name, new_len, &de, 1))) {
615 hpfs_unlock_creation(i->i_sb);
616 if (r == -1) hpfs_error(new_dir->i_sb, "hpfs_rename: dirent already exists!"); 581 if (r == -1) hpfs_error(new_dir->i_sb, "hpfs_rename: dirent already exists!");
617 err = r == 1 ? -ENOSPC : -EFSERROR; 582 err = r == 1 ? -ENOSPC : -EFSERROR;
618 if (new_dir != old_dir) hpfs_brelse4(&qbh); 583 if (new_dir != old_dir) hpfs_brelse4(&qbh);
@@ -621,20 +586,17 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
621 586
622 if (new_dir == old_dir) 587 if (new_dir == old_dir)
623 if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) { 588 if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) {
624 hpfs_unlock_creation(i->i_sb);
625 hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2"); 589 hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2");
626 err = -ENOENT; 590 err = -ENOENT;
627 goto end1; 591 goto end1;
628 } 592 }
629 593
630 if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 0))) { 594 if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 0))) {
631 hpfs_unlock_creation(i->i_sb);
632 hpfs_error(i->i_sb, "hpfs_rename: could not remove dirent"); 595 hpfs_error(i->i_sb, "hpfs_rename: could not remove dirent");
633 err = r == 2 ? -ENOSPC : -EFSERROR; 596 err = r == 2 ? -ENOSPC : -EFSERROR;
634 goto end1; 597 goto end1;
635 } 598 }
636 hpfs_unlock_creation(i->i_sb); 599
637
638 end: 600 end:
639 hpfs_i(i)->i_parent_dir = new_dir->i_ino; 601 hpfs_i(i)->i_parent_dir = new_dir->i_ino;
640 if (S_ISDIR(i->i_mode)) { 602 if (S_ISDIR(i->i_mode)) {
@@ -642,22 +604,14 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
642 drop_nlink(old_dir); 604 drop_nlink(old_dir);
643 } 605 }
644 if ((fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh))) { 606 if ((fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh))) {
645 fnode->up = new_dir->i_ino; 607 fnode->up = cpu_to_le32(new_dir->i_ino);
646 fnode->len = new_len; 608 fnode->len = new_len;
647 memcpy(fnode->name, new_name, new_len>15?15:new_len); 609 memcpy(fnode->name, new_name, new_len>15?15:new_len);
648 if (new_len < 15) memset(&fnode->name[new_len], 0, 15 - new_len); 610 if (new_len < 15) memset(&fnode->name[new_len], 0, 15 - new_len);
649 mark_buffer_dirty(bh); 611 mark_buffer_dirty(bh);
650 brelse(bh); 612 brelse(bh);
651 } 613 }
652 hpfs_i(i)->i_conv = hpfs_sb(i->i_sb)->sb_conv;
653 hpfs_decide_conv(i, new_name, new_len);
654end1: 614end1:
655 if (old_dir != new_dir)
656 mutex_unlock(&hpfs_i(new_dir)->i_mutex);
657 mutex_unlock(&hpfs_i(old_dir)->i_mutex);
658 mutex_unlock(&hpfs_i(i)->i_parent_mutex);
659 if (new_inode)
660 mutex_unlock(&hpfs_i(new_inode)->i_parent_mutex);
661 hpfs_unlock(i->i_sb); 615 hpfs_unlock(i->i_sb);
662 return err; 616 return err;
663} 617}
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index c89b40808587..98580a3b5005 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -18,15 +18,16 @@
18 18
19/* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */ 19/* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
20 20
21static void mark_dirty(struct super_block *s) 21static void mark_dirty(struct super_block *s, int remount)
22{ 22{
23 if (hpfs_sb(s)->sb_chkdsk && !(s->s_flags & MS_RDONLY)) { 23 if (hpfs_sb(s)->sb_chkdsk && (remount || !(s->s_flags & MS_RDONLY))) {
24 struct buffer_head *bh; 24 struct buffer_head *bh;
25 struct hpfs_spare_block *sb; 25 struct hpfs_spare_block *sb;
26 if ((sb = hpfs_map_sector(s, 17, &bh, 0))) { 26 if ((sb = hpfs_map_sector(s, 17, &bh, 0))) {
27 sb->dirty = 1; 27 sb->dirty = 1;
28 sb->old_wrote = 0; 28 sb->old_wrote = 0;
29 mark_buffer_dirty(bh); 29 mark_buffer_dirty(bh);
30 sync_dirty_buffer(bh);
30 brelse(bh); 31 brelse(bh);
31 } 32 }
32 } 33 }
@@ -40,10 +41,12 @@ static void unmark_dirty(struct super_block *s)
40 struct buffer_head *bh; 41 struct buffer_head *bh;
41 struct hpfs_spare_block *sb; 42 struct hpfs_spare_block *sb;
42 if (s->s_flags & MS_RDONLY) return; 43 if (s->s_flags & MS_RDONLY) return;
44 sync_blockdev(s->s_bdev);
43 if ((sb = hpfs_map_sector(s, 17, &bh, 0))) { 45 if ((sb = hpfs_map_sector(s, 17, &bh, 0))) {
44 sb->dirty = hpfs_sb(s)->sb_chkdsk > 1 - hpfs_sb(s)->sb_was_error; 46 sb->dirty = hpfs_sb(s)->sb_chkdsk > 1 - hpfs_sb(s)->sb_was_error;
45 sb->old_wrote = hpfs_sb(s)->sb_chkdsk >= 2 && !hpfs_sb(s)->sb_was_error; 47 sb->old_wrote = hpfs_sb(s)->sb_chkdsk >= 2 && !hpfs_sb(s)->sb_was_error;
46 mark_buffer_dirty(bh); 48 mark_buffer_dirty(bh);
49 sync_dirty_buffer(bh);
47 brelse(bh); 50 brelse(bh);
48 } 51 }
49} 52}
@@ -63,13 +66,13 @@ void hpfs_error(struct super_block *s, const char *fmt, ...)
63 if (!hpfs_sb(s)->sb_was_error) { 66 if (!hpfs_sb(s)->sb_was_error) {
64 if (hpfs_sb(s)->sb_err == 2) { 67 if (hpfs_sb(s)->sb_err == 2) {
65 printk("; crashing the system because you wanted it\n"); 68 printk("; crashing the system because you wanted it\n");
66 mark_dirty(s); 69 mark_dirty(s, 0);
67 panic("HPFS panic"); 70 panic("HPFS panic");
68 } else if (hpfs_sb(s)->sb_err == 1) { 71 } else if (hpfs_sb(s)->sb_err == 1) {
69 if (s->s_flags & MS_RDONLY) printk("; already mounted read-only\n"); 72 if (s->s_flags & MS_RDONLY) printk("; already mounted read-only\n");
70 else { 73 else {
71 printk("; remounting read-only\n"); 74 printk("; remounting read-only\n");
72 mark_dirty(s); 75 mark_dirty(s, 0);
73 s->s_flags |= MS_RDONLY; 76 s->s_flags |= MS_RDONLY;
74 } 77 }
75 } else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n"); 78 } else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n");
@@ -102,9 +105,12 @@ static void hpfs_put_super(struct super_block *s)
102{ 105{
103 struct hpfs_sb_info *sbi = hpfs_sb(s); 106 struct hpfs_sb_info *sbi = hpfs_sb(s);
104 107
108 hpfs_lock(s);
109 unmark_dirty(s);
110 hpfs_unlock(s);
111
105 kfree(sbi->sb_cp_table); 112 kfree(sbi->sb_cp_table);
106 kfree(sbi->sb_bmp_dir); 113 kfree(sbi->sb_bmp_dir);
107 unmark_dirty(s);
108 s->s_fs_info = NULL; 114 s->s_fs_info = NULL;
109 kfree(sbi); 115 kfree(sbi);
110} 116}
@@ -129,7 +135,7 @@ static unsigned count_bitmaps(struct super_block *s)
129 n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14; 135 n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
130 count = 0; 136 count = 0;
131 for (n = 0; n < n_bands; n++) 137 for (n = 0; n < n_bands; n++)
132 count += hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_bmp_dir[n]); 138 count += hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n]));
133 return count; 139 return count;
134} 140}
135 141
@@ -188,8 +194,6 @@ static void init_once(void *foo)
188{ 194{
189 struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; 195 struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo;
190 196
191 mutex_init(&ei->i_mutex);
192 mutex_init(&ei->i_parent_mutex);
193 inode_init_once(&ei->vfs_inode); 197 inode_init_once(&ei->vfs_inode);
194} 198}
195 199
@@ -218,7 +222,6 @@ static void destroy_inodecache(void)
218 222
219enum { 223enum {
220 Opt_help, Opt_uid, Opt_gid, Opt_umask, Opt_case_lower, Opt_case_asis, 224 Opt_help, Opt_uid, Opt_gid, Opt_umask, Opt_case_lower, Opt_case_asis,
221 Opt_conv_binary, Opt_conv_text, Opt_conv_auto,
222 Opt_check_none, Opt_check_normal, Opt_check_strict, 225 Opt_check_none, Opt_check_normal, Opt_check_strict,
223 Opt_err_cont, Opt_err_ro, Opt_err_panic, 226 Opt_err_cont, Opt_err_ro, Opt_err_panic,
224 Opt_eas_no, Opt_eas_ro, Opt_eas_rw, 227 Opt_eas_no, Opt_eas_ro, Opt_eas_rw,
@@ -233,9 +236,6 @@ static const match_table_t tokens = {
233 {Opt_umask, "umask=%o"}, 236 {Opt_umask, "umask=%o"},
234 {Opt_case_lower, "case=lower"}, 237 {Opt_case_lower, "case=lower"},
235 {Opt_case_asis, "case=asis"}, 238 {Opt_case_asis, "case=asis"},
236 {Opt_conv_binary, "conv=binary"},
237 {Opt_conv_text, "conv=text"},
238 {Opt_conv_auto, "conv=auto"},
239 {Opt_check_none, "check=none"}, 239 {Opt_check_none, "check=none"},
240 {Opt_check_normal, "check=normal"}, 240 {Opt_check_normal, "check=normal"},
241 {Opt_check_strict, "check=strict"}, 241 {Opt_check_strict, "check=strict"},
@@ -253,7 +253,7 @@ static const match_table_t tokens = {
253}; 253};
254 254
255static int parse_opts(char *opts, uid_t *uid, gid_t *gid, umode_t *umask, 255static int parse_opts(char *opts, uid_t *uid, gid_t *gid, umode_t *umask,
256 int *lowercase, int *conv, int *eas, int *chk, int *errs, 256 int *lowercase, int *eas, int *chk, int *errs,
257 int *chkdsk, int *timeshift) 257 int *chkdsk, int *timeshift)
258{ 258{
259 char *p; 259 char *p;
@@ -295,15 +295,6 @@ static int parse_opts(char *opts, uid_t *uid, gid_t *gid, umode_t *umask,
295 case Opt_case_asis: 295 case Opt_case_asis:
296 *lowercase = 0; 296 *lowercase = 0;
297 break; 297 break;
298 case Opt_conv_binary:
299 *conv = CONV_BINARY;
300 break;
301 case Opt_conv_text:
302 *conv = CONV_TEXT;
303 break;
304 case Opt_conv_auto:
305 *conv = CONV_AUTO;
306 break;
307 case Opt_check_none: 298 case Opt_check_none:
308 *chk = 0; 299 *chk = 0;
309 break; 300 break;
@@ -370,9 +361,6 @@ HPFS filesystem options:\n\
370 umask=xxx set mode of files that don't have mode specified in eas\n\ 361 umask=xxx set mode of files that don't have mode specified in eas\n\
371 case=lower lowercase all files\n\ 362 case=lower lowercase all files\n\
372 case=asis do not lowercase files (default)\n\ 363 case=asis do not lowercase files (default)\n\
373 conv=binary do not convert CR/LF -> LF (default)\n\
374 conv=auto convert only files with known text extensions\n\
375 conv=text convert all files\n\
376 check=none no fs checks - kernel may crash on corrupted filesystem\n\ 364 check=none no fs checks - kernel may crash on corrupted filesystem\n\
377 check=normal do some checks - it should not crash (default)\n\ 365 check=normal do some checks - it should not crash (default)\n\
378 check=strict do extra time-consuming checks, used for debugging\n\ 366 check=strict do extra time-consuming checks, used for debugging\n\
@@ -394,7 +382,7 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
394 uid_t uid; 382 uid_t uid;
395 gid_t gid; 383 gid_t gid;
396 umode_t umask; 384 umode_t umask;
397 int lowercase, conv, eas, chk, errs, chkdsk, timeshift; 385 int lowercase, eas, chk, errs, chkdsk, timeshift;
398 int o; 386 int o;
399 struct hpfs_sb_info *sbi = hpfs_sb(s); 387 struct hpfs_sb_info *sbi = hpfs_sb(s);
400 char *new_opts = kstrdup(data, GFP_KERNEL); 388 char *new_opts = kstrdup(data, GFP_KERNEL);
@@ -405,11 +393,11 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
405 lock_super(s); 393 lock_super(s);
406 uid = sbi->sb_uid; gid = sbi->sb_gid; 394 uid = sbi->sb_uid; gid = sbi->sb_gid;
407 umask = 0777 & ~sbi->sb_mode; 395 umask = 0777 & ~sbi->sb_mode;
408 lowercase = sbi->sb_lowercase; conv = sbi->sb_conv; 396 lowercase = sbi->sb_lowercase;
409 eas = sbi->sb_eas; chk = sbi->sb_chk; chkdsk = sbi->sb_chkdsk; 397 eas = sbi->sb_eas; chk = sbi->sb_chk; chkdsk = sbi->sb_chkdsk;
410 errs = sbi->sb_err; timeshift = sbi->sb_timeshift; 398 errs = sbi->sb_err; timeshift = sbi->sb_timeshift;
411 399
412 if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase, &conv, 400 if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase,
413 &eas, &chk, &errs, &chkdsk, &timeshift))) { 401 &eas, &chk, &errs, &chkdsk, &timeshift))) {
414 printk("HPFS: bad mount options.\n"); 402 printk("HPFS: bad mount options.\n");
415 goto out_err; 403 goto out_err;
@@ -427,11 +415,11 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
427 415
428 sbi->sb_uid = uid; sbi->sb_gid = gid; 416 sbi->sb_uid = uid; sbi->sb_gid = gid;
429 sbi->sb_mode = 0777 & ~umask; 417 sbi->sb_mode = 0777 & ~umask;
430 sbi->sb_lowercase = lowercase; sbi->sb_conv = conv; 418 sbi->sb_lowercase = lowercase;
431 sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk; 419 sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk;
432 sbi->sb_err = errs; sbi->sb_timeshift = timeshift; 420 sbi->sb_err = errs; sbi->sb_timeshift = timeshift;
433 421
434 if (!(*flags & MS_RDONLY)) mark_dirty(s); 422 if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
435 423
436 replace_mount_options(s, new_opts); 424 replace_mount_options(s, new_opts);
437 425
@@ -471,7 +459,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
471 uid_t uid; 459 uid_t uid;
472 gid_t gid; 460 gid_t gid;
473 umode_t umask; 461 umode_t umask;
474 int lowercase, conv, eas, chk, errs, chkdsk, timeshift; 462 int lowercase, eas, chk, errs, chkdsk, timeshift;
475 463
476 dnode_secno root_dno; 464 dnode_secno root_dno;
477 struct hpfs_dirent *de = NULL; 465 struct hpfs_dirent *de = NULL;
@@ -479,11 +467,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
479 467
480 int o; 468 int o;
481 469
482 if (num_possible_cpus() > 1) {
483 printk(KERN_ERR "HPFS is not SMP safe\n");
484 return -EINVAL;
485 }
486
487 save_mount_options(s, options); 470 save_mount_options(s, options);
488 471
489 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 472 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
@@ -495,20 +478,20 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
495 sbi->sb_bmp_dir = NULL; 478 sbi->sb_bmp_dir = NULL;
496 sbi->sb_cp_table = NULL; 479 sbi->sb_cp_table = NULL;
497 480
498 mutex_init(&sbi->hpfs_creation_de); 481 mutex_init(&sbi->hpfs_mutex);
482 hpfs_lock(s);
499 483
500 uid = current_uid(); 484 uid = current_uid();
501 gid = current_gid(); 485 gid = current_gid();
502 umask = current_umask(); 486 umask = current_umask();
503 lowercase = 0; 487 lowercase = 0;
504 conv = CONV_BINARY;
505 eas = 2; 488 eas = 2;
506 chk = 1; 489 chk = 1;
507 errs = 1; 490 errs = 1;
508 chkdsk = 1; 491 chkdsk = 1;
509 timeshift = 0; 492 timeshift = 0;
510 493
511 if (!(o = parse_opts(options, &uid, &gid, &umask, &lowercase, &conv, 494 if (!(o = parse_opts(options, &uid, &gid, &umask, &lowercase,
512 &eas, &chk, &errs, &chkdsk, &timeshift))) { 495 &eas, &chk, &errs, &chkdsk, &timeshift))) {
513 printk("HPFS: bad mount options.\n"); 496 printk("HPFS: bad mount options.\n");
514 goto bail0; 497 goto bail0;
@@ -526,9 +509,9 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
526 if (!(spareblock = hpfs_map_sector(s, 17, &bh2, 0))) goto bail3; 509 if (!(spareblock = hpfs_map_sector(s, 17, &bh2, 0))) goto bail3;
527 510
528 /* Check magics */ 511 /* Check magics */
529 if (/*bootblock->magic != BB_MAGIC 512 if (/*le16_to_cpu(bootblock->magic) != BB_MAGIC
530 ||*/ superblock->magic != SB_MAGIC 513 ||*/ le32_to_cpu(superblock->magic) != SB_MAGIC
531 || spareblock->magic != SP_MAGIC) { 514 || le32_to_cpu(spareblock->magic) != SP_MAGIC) {
532 if (!silent) printk("HPFS: Bad magic ... probably not HPFS\n"); 515 if (!silent) printk("HPFS: Bad magic ... probably not HPFS\n");
533 goto bail4; 516 goto bail4;
534 } 517 }
@@ -549,19 +532,18 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
549 s->s_op = &hpfs_sops; 532 s->s_op = &hpfs_sops;
550 s->s_d_op = &hpfs_dentry_operations; 533 s->s_d_op = &hpfs_dentry_operations;
551 534
552 sbi->sb_root = superblock->root; 535 sbi->sb_root = le32_to_cpu(superblock->root);
553 sbi->sb_fs_size = superblock->n_sectors; 536 sbi->sb_fs_size = le32_to_cpu(superblock->n_sectors);
554 sbi->sb_bitmaps = superblock->bitmaps; 537 sbi->sb_bitmaps = le32_to_cpu(superblock->bitmaps);
555 sbi->sb_dirband_start = superblock->dir_band_start; 538 sbi->sb_dirband_start = le32_to_cpu(superblock->dir_band_start);
556 sbi->sb_dirband_size = superblock->n_dir_band; 539 sbi->sb_dirband_size = le32_to_cpu(superblock->n_dir_band);
557 sbi->sb_dmap = superblock->dir_band_bitmap; 540 sbi->sb_dmap = le32_to_cpu(superblock->dir_band_bitmap);
558 sbi->sb_uid = uid; 541 sbi->sb_uid = uid;
559 sbi->sb_gid = gid; 542 sbi->sb_gid = gid;
560 sbi->sb_mode = 0777 & ~umask; 543 sbi->sb_mode = 0777 & ~umask;
561 sbi->sb_n_free = -1; 544 sbi->sb_n_free = -1;
562 sbi->sb_n_free_dnodes = -1; 545 sbi->sb_n_free_dnodes = -1;
563 sbi->sb_lowercase = lowercase; 546 sbi->sb_lowercase = lowercase;
564 sbi->sb_conv = conv;
565 sbi->sb_eas = eas; 547 sbi->sb_eas = eas;
566 sbi->sb_chk = chk; 548 sbi->sb_chk = chk;
567 sbi->sb_chkdsk = chkdsk; 549 sbi->sb_chkdsk = chkdsk;
@@ -573,7 +555,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
573 sbi->sb_max_fwd_alloc = 0xffffff; 555 sbi->sb_max_fwd_alloc = 0xffffff;
574 556
575 /* Load bitmap directory */ 557 /* Load bitmap directory */
576 if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, superblock->bitmaps))) 558 if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps))))
577 goto bail4; 559 goto bail4;
578 560
579 /* Check for general fs errors*/ 561 /* Check for general fs errors*/
@@ -591,20 +573,20 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
591 mark_buffer_dirty(bh2); 573 mark_buffer_dirty(bh2);
592 } 574 }
593 575
594 if (spareblock->hotfixes_used || spareblock->n_spares_used) { 576 if (le32_to_cpu(spareblock->hotfixes_used) || le32_to_cpu(spareblock->n_spares_used)) {
595 if (errs >= 2) { 577 if (errs >= 2) {
596 printk("HPFS: Hotfixes not supported here, try chkdsk\n"); 578 printk("HPFS: Hotfixes not supported here, try chkdsk\n");
597 mark_dirty(s); 579 mark_dirty(s, 0);
598 goto bail4; 580 goto bail4;
599 } 581 }
600 hpfs_error(s, "hotfixes not supported here, try chkdsk"); 582 hpfs_error(s, "hotfixes not supported here, try chkdsk");
601 if (errs == 0) printk("HPFS: Proceeding, but your filesystem will be probably corrupted by this driver...\n"); 583 if (errs == 0) printk("HPFS: Proceeding, but your filesystem will be probably corrupted by this driver...\n");
602 else printk("HPFS: This driver may read bad files or crash when operating on disk with hotfixes.\n"); 584 else printk("HPFS: This driver may read bad files or crash when operating on disk with hotfixes.\n");
603 } 585 }
604 if (spareblock->n_dnode_spares != spareblock->n_dnode_spares_free) { 586 if (le32_to_cpu(spareblock->n_dnode_spares) != le32_to_cpu(spareblock->n_dnode_spares_free)) {
605 if (errs >= 2) { 587 if (errs >= 2) {
606 printk("HPFS: Spare dnodes used, try chkdsk\n"); 588 printk("HPFS: Spare dnodes used, try chkdsk\n");
607 mark_dirty(s); 589 mark_dirty(s, 0);
608 goto bail4; 590 goto bail4;
609 } 591 }
610 hpfs_error(s, "warning: spare dnodes used, try chkdsk"); 592 hpfs_error(s, "warning: spare dnodes used, try chkdsk");
@@ -612,26 +594,26 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
612 } 594 }
613 if (chk) { 595 if (chk) {
614 unsigned a; 596 unsigned a;
615 if (superblock->dir_band_end - superblock->dir_band_start + 1 != superblock->n_dir_band || 597 if (le32_to_cpu(superblock->dir_band_end) - le32_to_cpu(superblock->dir_band_start) + 1 != le32_to_cpu(superblock->n_dir_band) ||
616 superblock->dir_band_end < superblock->dir_band_start || superblock->n_dir_band > 0x4000) { 598 le32_to_cpu(superblock->dir_band_end) < le32_to_cpu(superblock->dir_band_start) || le32_to_cpu(superblock->n_dir_band) > 0x4000) {
617 hpfs_error(s, "dir band size mismatch: dir_band_start==%08x, dir_band_end==%08x, n_dir_band==%08x", 599 hpfs_error(s, "dir band size mismatch: dir_band_start==%08x, dir_band_end==%08x, n_dir_band==%08x",
618 superblock->dir_band_start, superblock->dir_band_end, superblock->n_dir_band); 600 le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->dir_band_end), le32_to_cpu(superblock->n_dir_band));
619 goto bail4; 601 goto bail4;
620 } 602 }
621 a = sbi->sb_dirband_size; 603 a = sbi->sb_dirband_size;
622 sbi->sb_dirband_size = 0; 604 sbi->sb_dirband_size = 0;
623 if (hpfs_chk_sectors(s, superblock->dir_band_start, superblock->n_dir_band, "dir_band") || 605 if (hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->n_dir_band), "dir_band") ||
624 hpfs_chk_sectors(s, superblock->dir_band_bitmap, 4, "dir_band_bitmap") || 606 hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_bitmap), 4, "dir_band_bitmap") ||
625 hpfs_chk_sectors(s, superblock->bitmaps, 4, "bitmaps")) { 607 hpfs_chk_sectors(s, le32_to_cpu(superblock->bitmaps), 4, "bitmaps")) {
626 mark_dirty(s); 608 mark_dirty(s, 0);
627 goto bail4; 609 goto bail4;
628 } 610 }
629 sbi->sb_dirband_size = a; 611 sbi->sb_dirband_size = a;
630 } else printk("HPFS: You really don't want any checks? You are crazy...\n"); 612 } else printk("HPFS: You really don't want any checks? You are crazy...\n");
631 613
632 /* Load code page table */ 614 /* Load code page table */
633 if (spareblock->n_code_pages) 615 if (le32_to_cpu(spareblock->n_code_pages))
634 if (!(sbi->sb_cp_table = hpfs_load_code_page(s, spareblock->code_page_dir))) 616 if (!(sbi->sb_cp_table = hpfs_load_code_page(s, le32_to_cpu(spareblock->code_page_dir))))
635 printk("HPFS: Warning: code page support is disabled\n"); 617 printk("HPFS: Warning: code page support is disabled\n");
636 618
637 brelse(bh2); 619 brelse(bh2);
@@ -660,13 +642,13 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
660 if (!de) 642 if (!de)
661 hpfs_error(s, "unable to find root dir"); 643 hpfs_error(s, "unable to find root dir");
662 else { 644 else {
663 root->i_atime.tv_sec = local_to_gmt(s, de->read_date); 645 root->i_atime.tv_sec = local_to_gmt(s, le32_to_cpu(de->read_date));
664 root->i_atime.tv_nsec = 0; 646 root->i_atime.tv_nsec = 0;
665 root->i_mtime.tv_sec = local_to_gmt(s, de->write_date); 647 root->i_mtime.tv_sec = local_to_gmt(s, le32_to_cpu(de->write_date));
666 root->i_mtime.tv_nsec = 0; 648 root->i_mtime.tv_nsec = 0;
667 root->i_ctime.tv_sec = local_to_gmt(s, de->creation_date); 649 root->i_ctime.tv_sec = local_to_gmt(s, le32_to_cpu(de->creation_date));
668 root->i_ctime.tv_nsec = 0; 650 root->i_ctime.tv_nsec = 0;
669 hpfs_i(root)->i_ea_size = de->ea_size; 651 hpfs_i(root)->i_ea_size = le16_to_cpu(de->ea_size);
670 hpfs_i(root)->i_parent_dir = root->i_ino; 652 hpfs_i(root)->i_parent_dir = root->i_ino;
671 if (root->i_size == -1) 653 if (root->i_size == -1)
672 root->i_size = 2048; 654 root->i_size = 2048;
@@ -674,6 +656,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
674 root->i_blocks = 5; 656 root->i_blocks = 5;
675 hpfs_brelse4(&qbh); 657 hpfs_brelse4(&qbh);
676 } 658 }
659 hpfs_unlock(s);
677 return 0; 660 return 0;
678 661
679bail4: brelse(bh2); 662bail4: brelse(bh2);
@@ -681,6 +664,7 @@ bail3: brelse(bh1);
681bail2: brelse(bh0); 664bail2: brelse(bh0);
682bail1: 665bail1:
683bail0: 666bail0:
667 hpfs_unlock(s);
684 kfree(sbi->sb_bmp_dir); 668 kfree(sbi->sb_bmp_dir);
685 kfree(sbi->sb_cp_table); 669 kfree(sbi->sb_cp_table);
686 s->s_fs_info = NULL; 670 s->s_fs_info = NULL;
diff --git a/fs/namei.c b/fs/namei.c
index 54fc993e3027..e3c4f112ebf7 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -179,7 +179,7 @@ EXPORT_SYMBOL(putname);
179static int acl_permission_check(struct inode *inode, int mask, unsigned int flags, 179static int acl_permission_check(struct inode *inode, int mask, unsigned int flags,
180 int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) 180 int (*check_acl)(struct inode *inode, int mask, unsigned int flags))
181{ 181{
182 umode_t mode = inode->i_mode; 182 unsigned int mode = inode->i_mode;
183 183
184 mask &= MAY_READ | MAY_WRITE | MAY_EXEC; 184 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
185 185
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 6f8192f4cfc7..be79dc9f386d 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -117,6 +117,8 @@ static int filelayout_async_handle_error(struct rpc_task *task,
117 case -EKEYEXPIRED: 117 case -EKEYEXPIRED:
118 rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX); 118 rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX);
119 break; 119 break;
120 case -NFS4ERR_RETRY_UNCACHED_REP:
121 break;
120 default: 122 default:
121 dprintk("%s DS error. Retry through MDS %d\n", __func__, 123 dprintk("%s DS error. Retry through MDS %d\n", __func__,
122 task->tk_status); 124 task->tk_status);
@@ -416,7 +418,8 @@ static int
416filelayout_check_layout(struct pnfs_layout_hdr *lo, 418filelayout_check_layout(struct pnfs_layout_hdr *lo,
417 struct nfs4_filelayout_segment *fl, 419 struct nfs4_filelayout_segment *fl,
418 struct nfs4_layoutget_res *lgr, 420 struct nfs4_layoutget_res *lgr,
419 struct nfs4_deviceid *id) 421 struct nfs4_deviceid *id,
422 gfp_t gfp_flags)
420{ 423{
421 struct nfs4_file_layout_dsaddr *dsaddr; 424 struct nfs4_file_layout_dsaddr *dsaddr;
422 int status = -EINVAL; 425 int status = -EINVAL;
@@ -439,7 +442,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
439 /* find and reference the deviceid */ 442 /* find and reference the deviceid */
440 dsaddr = nfs4_fl_find_get_deviceid(id); 443 dsaddr = nfs4_fl_find_get_deviceid(id);
441 if (dsaddr == NULL) { 444 if (dsaddr == NULL) {
442 dsaddr = get_device_info(lo->plh_inode, id); 445 dsaddr = get_device_info(lo->plh_inode, id, gfp_flags);
443 if (dsaddr == NULL) 446 if (dsaddr == NULL)
444 goto out; 447 goto out;
445 } 448 }
@@ -500,7 +503,8 @@ static int
500filelayout_decode_layout(struct pnfs_layout_hdr *flo, 503filelayout_decode_layout(struct pnfs_layout_hdr *flo,
501 struct nfs4_filelayout_segment *fl, 504 struct nfs4_filelayout_segment *fl,
502 struct nfs4_layoutget_res *lgr, 505 struct nfs4_layoutget_res *lgr,
503 struct nfs4_deviceid *id) 506 struct nfs4_deviceid *id,
507 gfp_t gfp_flags)
504{ 508{
505 struct xdr_stream stream; 509 struct xdr_stream stream;
506 struct xdr_buf buf = { 510 struct xdr_buf buf = {
@@ -516,7 +520,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
516 520
517 dprintk("%s: set_layout_map Begin\n", __func__); 521 dprintk("%s: set_layout_map Begin\n", __func__);
518 522
519 scratch = alloc_page(GFP_KERNEL); 523 scratch = alloc_page(gfp_flags);
520 if (!scratch) 524 if (!scratch)
521 return -ENOMEM; 525 return -ENOMEM;
522 526
@@ -554,13 +558,13 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
554 goto out_err; 558 goto out_err;
555 559
556 fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), 560 fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
557 GFP_KERNEL); 561 gfp_flags);
558 if (!fl->fh_array) 562 if (!fl->fh_array)
559 goto out_err; 563 goto out_err;
560 564
561 for (i = 0; i < fl->num_fh; i++) { 565 for (i = 0; i < fl->num_fh; i++) {
562 /* Do we want to use a mempool here? */ 566 /* Do we want to use a mempool here? */
563 fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL); 567 fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
564 if (!fl->fh_array[i]) 568 if (!fl->fh_array[i])
565 goto out_err_free; 569 goto out_err_free;
566 570
@@ -605,19 +609,20 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg)
605 609
606static struct pnfs_layout_segment * 610static struct pnfs_layout_segment *
607filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, 611filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
608 struct nfs4_layoutget_res *lgr) 612 struct nfs4_layoutget_res *lgr,
613 gfp_t gfp_flags)
609{ 614{
610 struct nfs4_filelayout_segment *fl; 615 struct nfs4_filelayout_segment *fl;
611 int rc; 616 int rc;
612 struct nfs4_deviceid id; 617 struct nfs4_deviceid id;
613 618
614 dprintk("--> %s\n", __func__); 619 dprintk("--> %s\n", __func__);
615 fl = kzalloc(sizeof(*fl), GFP_KERNEL); 620 fl = kzalloc(sizeof(*fl), gfp_flags);
616 if (!fl) 621 if (!fl)
617 return NULL; 622 return NULL;
618 623
619 rc = filelayout_decode_layout(layoutid, fl, lgr, &id); 624 rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags);
620 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id)) { 625 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) {
621 _filelayout_free_lseg(fl); 626 _filelayout_free_lseg(fl);
622 return NULL; 627 return NULL;
623 } 628 }
@@ -633,7 +638,7 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
633 int size = (fl->stripe_type == STRIPE_SPARSE) ? 638 int size = (fl->stripe_type == STRIPE_SPARSE) ?
634 fl->dsaddr->ds_num : fl->dsaddr->stripe_count; 639 fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
635 640
636 fl->commit_buckets = kcalloc(size, sizeof(struct list_head), GFP_KERNEL); 641 fl->commit_buckets = kcalloc(size, sizeof(struct list_head), gfp_flags);
637 if (!fl->commit_buckets) { 642 if (!fl->commit_buckets) {
638 filelayout_free_lseg(&fl->generic_hdr); 643 filelayout_free_lseg(&fl->generic_hdr);
639 return NULL; 644 return NULL;
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index 7c44579f5832..2b461d77b43a 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -104,6 +104,6 @@ extern struct nfs4_file_layout_dsaddr *
104nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id); 104nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id);
105extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr); 105extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
106struct nfs4_file_layout_dsaddr * 106struct nfs4_file_layout_dsaddr *
107get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id); 107get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
108 108
109#endif /* FS_NFS_NFS4FILELAYOUT_H */ 109#endif /* FS_NFS_NFS4FILELAYOUT_H */
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index de5350f2b249..db07c7af1395 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -225,11 +225,11 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
225} 225}
226 226
227static struct nfs4_pnfs_ds * 227static struct nfs4_pnfs_ds *
228nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port) 228nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port, gfp_t gfp_flags)
229{ 229{
230 struct nfs4_pnfs_ds *tmp_ds, *ds; 230 struct nfs4_pnfs_ds *tmp_ds, *ds;
231 231
232 ds = kzalloc(sizeof(*tmp_ds), GFP_KERNEL); 232 ds = kzalloc(sizeof(*tmp_ds), gfp_flags);
233 if (!ds) 233 if (!ds)
234 goto out; 234 goto out;
235 235
@@ -261,7 +261,7 @@ out:
261 * Currently only support ipv4, and one multi-path address. 261 * Currently only support ipv4, and one multi-path address.
262 */ 262 */
263static struct nfs4_pnfs_ds * 263static struct nfs4_pnfs_ds *
264decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode) 264decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_flags)
265{ 265{
266 struct nfs4_pnfs_ds *ds = NULL; 266 struct nfs4_pnfs_ds *ds = NULL;
267 char *buf; 267 char *buf;
@@ -303,7 +303,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode)
303 rlen); 303 rlen);
304 goto out_err; 304 goto out_err;
305 } 305 }
306 buf = kmalloc(rlen + 1, GFP_KERNEL); 306 buf = kmalloc(rlen + 1, gfp_flags);
307 if (!buf) { 307 if (!buf) {
308 dprintk("%s: Not enough memory\n", __func__); 308 dprintk("%s: Not enough memory\n", __func__);
309 goto out_err; 309 goto out_err;
@@ -333,7 +333,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode)
333 sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]); 333 sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]);
334 port = htons((tmp[0] << 8) | (tmp[1])); 334 port = htons((tmp[0] << 8) | (tmp[1]));
335 335
336 ds = nfs4_pnfs_ds_add(inode, ip_addr, port); 336 ds = nfs4_pnfs_ds_add(inode, ip_addr, port, gfp_flags);
337 dprintk("%s: Decoded address and port %s\n", __func__, buf); 337 dprintk("%s: Decoded address and port %s\n", __func__, buf);
338out_free: 338out_free:
339 kfree(buf); 339 kfree(buf);
@@ -343,7 +343,7 @@ out_err:
343 343
344/* Decode opaque device data and return the result */ 344/* Decode opaque device data and return the result */
345static struct nfs4_file_layout_dsaddr* 345static struct nfs4_file_layout_dsaddr*
346decode_device(struct inode *ino, struct pnfs_device *pdev) 346decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
347{ 347{
348 int i; 348 int i;
349 u32 cnt, num; 349 u32 cnt, num;
@@ -362,7 +362,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
362 struct page *scratch; 362 struct page *scratch;
363 363
364 /* set up xdr stream */ 364 /* set up xdr stream */
365 scratch = alloc_page(GFP_KERNEL); 365 scratch = alloc_page(gfp_flags);
366 if (!scratch) 366 if (!scratch)
367 goto out_err; 367 goto out_err;
368 368
@@ -384,7 +384,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
384 } 384 }
385 385
386 /* read stripe indices */ 386 /* read stripe indices */
387 stripe_indices = kcalloc(cnt, sizeof(u8), GFP_KERNEL); 387 stripe_indices = kcalloc(cnt, sizeof(u8), gfp_flags);
388 if (!stripe_indices) 388 if (!stripe_indices)
389 goto out_err_free_scratch; 389 goto out_err_free_scratch;
390 390
@@ -423,7 +423,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
423 423
424 dsaddr = kzalloc(sizeof(*dsaddr) + 424 dsaddr = kzalloc(sizeof(*dsaddr) +
425 (sizeof(struct nfs4_pnfs_ds *) * (num - 1)), 425 (sizeof(struct nfs4_pnfs_ds *) * (num - 1)),
426 GFP_KERNEL); 426 gfp_flags);
427 if (!dsaddr) 427 if (!dsaddr)
428 goto out_err_free_stripe_indices; 428 goto out_err_free_stripe_indices;
429 429
@@ -452,7 +452,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
452 for (j = 0; j < mp_count; j++) { 452 for (j = 0; j < mp_count; j++) {
453 if (j == 0) { 453 if (j == 0) {
454 dsaddr->ds_list[i] = decode_and_add_ds(&stream, 454 dsaddr->ds_list[i] = decode_and_add_ds(&stream,
455 ino); 455 ino, gfp_flags);
456 if (dsaddr->ds_list[i] == NULL) 456 if (dsaddr->ds_list[i] == NULL)
457 goto out_err_free_deviceid; 457 goto out_err_free_deviceid;
458 } else { 458 } else {
@@ -503,12 +503,12 @@ out_err:
503 * available devices. 503 * available devices.
504 */ 504 */
505static struct nfs4_file_layout_dsaddr * 505static struct nfs4_file_layout_dsaddr *
506decode_and_add_device(struct inode *inode, struct pnfs_device *dev) 506decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags)
507{ 507{
508 struct nfs4_file_layout_dsaddr *d, *new; 508 struct nfs4_file_layout_dsaddr *d, *new;
509 long hash; 509 long hash;
510 510
511 new = decode_device(inode, dev); 511 new = decode_device(inode, dev, gfp_flags);
512 if (!new) { 512 if (!new) {
513 printk(KERN_WARNING "%s: Could not decode or add device\n", 513 printk(KERN_WARNING "%s: Could not decode or add device\n",
514 __func__); 514 __func__);
@@ -537,7 +537,7 @@ decode_and_add_device(struct inode *inode, struct pnfs_device *dev)
537 * of available devices, and return it. 537 * of available devices, and return it.
538 */ 538 */
539struct nfs4_file_layout_dsaddr * 539struct nfs4_file_layout_dsaddr *
540get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id) 540get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags)
541{ 541{
542 struct pnfs_device *pdev = NULL; 542 struct pnfs_device *pdev = NULL;
543 u32 max_resp_sz; 543 u32 max_resp_sz;
@@ -556,17 +556,17 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
556 dprintk("%s inode %p max_resp_sz %u max_pages %d\n", 556 dprintk("%s inode %p max_resp_sz %u max_pages %d\n",
557 __func__, inode, max_resp_sz, max_pages); 557 __func__, inode, max_resp_sz, max_pages);
558 558
559 pdev = kzalloc(sizeof(struct pnfs_device), GFP_KERNEL); 559 pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags);
560 if (pdev == NULL) 560 if (pdev == NULL)
561 return NULL; 561 return NULL;
562 562
563 pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL); 563 pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
564 if (pages == NULL) { 564 if (pages == NULL) {
565 kfree(pdev); 565 kfree(pdev);
566 return NULL; 566 return NULL;
567 } 567 }
568 for (i = 0; i < max_pages; i++) { 568 for (i = 0; i < max_pages; i++) {
569 pages[i] = alloc_page(GFP_KERNEL); 569 pages[i] = alloc_page(gfp_flags);
570 if (!pages[i]) 570 if (!pages[i])
571 goto out_free; 571 goto out_free;
572 } 572 }
@@ -587,7 +587,7 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
587 * Found new device, need to decode it and then add it to the 587 * Found new device, need to decode it and then add it to the
588 * list of known devices for this mountpoint. 588 * list of known devices for this mountpoint.
589 */ 589 */
590 dsaddr = decode_and_add_device(inode, pdev); 590 dsaddr = decode_and_add_device(inode, pdev, gfp_flags);
591out_free: 591out_free:
592 for (i = 0; i < max_pages; i++) 592 for (i = 0; i < max_pages; i++)
593 __free_page(pages[i]); 593 __free_page(pages[i]);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 69c0f3c5ee7a..cf1b339c3937 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -300,6 +300,7 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
300 ret = nfs4_delay(server->client, &exception->timeout); 300 ret = nfs4_delay(server->client, &exception->timeout);
301 if (ret != 0) 301 if (ret != 0)
302 break; 302 break;
303 case -NFS4ERR_RETRY_UNCACHED_REP:
303 case -NFS4ERR_OLD_STATEID: 304 case -NFS4ERR_OLD_STATEID:
304 exception->retry = 1; 305 exception->retry = 1;
305 break; 306 break;
@@ -3695,6 +3696,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3695 rpc_delay(task, NFS4_POLL_RETRY_MAX); 3696 rpc_delay(task, NFS4_POLL_RETRY_MAX);
3696 task->tk_status = 0; 3697 task->tk_status = 0;
3697 return -EAGAIN; 3698 return -EAGAIN;
3699 case -NFS4ERR_RETRY_UNCACHED_REP:
3698 case -NFS4ERR_OLD_STATEID: 3700 case -NFS4ERR_OLD_STATEID:
3699 task->tk_status = 0; 3701 task->tk_status = 0;
3700 return -EAGAIN; 3702 return -EAGAIN;
@@ -4844,6 +4846,8 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
4844 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 4846 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
4845 rpc_delay(task, NFS4_POLL_RETRY_MIN); 4847 rpc_delay(task, NFS4_POLL_RETRY_MIN);
4846 task->tk_status = 0; 4848 task->tk_status = 0;
4849 /* fall through */
4850 case -NFS4ERR_RETRY_UNCACHED_REP:
4847 nfs_restart_rpc(task, data->clp); 4851 nfs_restart_rpc(task, data->clp);
4848 return; 4852 return;
4849 } 4853 }
@@ -5479,6 +5483,8 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
5479 break; 5483 break;
5480 case -NFS4ERR_DELAY: 5484 case -NFS4ERR_DELAY:
5481 rpc_delay(task, NFS4_POLL_RETRY_MAX); 5485 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5486 /* fall through */
5487 case -NFS4ERR_RETRY_UNCACHED_REP:
5482 return -EAGAIN; 5488 return -EAGAIN;
5483 default: 5489 default:
5484 nfs4_schedule_lease_recovery(clp); 5490 nfs4_schedule_lease_recovery(clp);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index ff681ab65d31..f57f5281a520 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -383,6 +383,7 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
383 plh_layouts); 383 plh_layouts);
384 dprintk("%s freeing layout for inode %lu\n", __func__, 384 dprintk("%s freeing layout for inode %lu\n", __func__,
385 lo->plh_inode->i_ino); 385 lo->plh_inode->i_ino);
386 list_del_init(&lo->plh_layouts);
386 pnfs_destroy_layout(NFS_I(lo->plh_inode)); 387 pnfs_destroy_layout(NFS_I(lo->plh_inode));
387 } 388 }
388} 389}
@@ -466,7 +467,8 @@ pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
466static struct pnfs_layout_segment * 467static struct pnfs_layout_segment *
467send_layoutget(struct pnfs_layout_hdr *lo, 468send_layoutget(struct pnfs_layout_hdr *lo,
468 struct nfs_open_context *ctx, 469 struct nfs_open_context *ctx,
469 u32 iomode) 470 u32 iomode,
471 gfp_t gfp_flags)
470{ 472{
471 struct inode *ino = lo->plh_inode; 473 struct inode *ino = lo->plh_inode;
472 struct nfs_server *server = NFS_SERVER(ino); 474 struct nfs_server *server = NFS_SERVER(ino);
@@ -479,7 +481,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
479 dprintk("--> %s\n", __func__); 481 dprintk("--> %s\n", __func__);
480 482
481 BUG_ON(ctx == NULL); 483 BUG_ON(ctx == NULL);
482 lgp = kzalloc(sizeof(*lgp), GFP_KERNEL); 484 lgp = kzalloc(sizeof(*lgp), gfp_flags);
483 if (lgp == NULL) 485 if (lgp == NULL)
484 return NULL; 486 return NULL;
485 487
@@ -487,12 +489,12 @@ send_layoutget(struct pnfs_layout_hdr *lo,
487 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 489 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
488 max_pages = max_resp_sz >> PAGE_SHIFT; 490 max_pages = max_resp_sz >> PAGE_SHIFT;
489 491
490 pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL); 492 pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
491 if (!pages) 493 if (!pages)
492 goto out_err_free; 494 goto out_err_free;
493 495
494 for (i = 0; i < max_pages; i++) { 496 for (i = 0; i < max_pages; i++) {
495 pages[i] = alloc_page(GFP_KERNEL); 497 pages[i] = alloc_page(gfp_flags);
496 if (!pages[i]) 498 if (!pages[i])
497 goto out_err_free; 499 goto out_err_free;
498 } 500 }
@@ -508,6 +510,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
508 lgp->args.layout.pages = pages; 510 lgp->args.layout.pages = pages;
509 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 511 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
510 lgp->lsegpp = &lseg; 512 lgp->lsegpp = &lseg;
513 lgp->gfp_flags = gfp_flags;
511 514
512 /* Synchronously retrieve layout information from server and 515 /* Synchronously retrieve layout information from server and
513 * store in lseg. 516 * store in lseg.
@@ -665,11 +668,11 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo,
665} 668}
666 669
667static struct pnfs_layout_hdr * 670static struct pnfs_layout_hdr *
668alloc_init_layout_hdr(struct inode *ino) 671alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags)
669{ 672{
670 struct pnfs_layout_hdr *lo; 673 struct pnfs_layout_hdr *lo;
671 674
672 lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL); 675 lo = kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
673 if (!lo) 676 if (!lo)
674 return NULL; 677 return NULL;
675 atomic_set(&lo->plh_refcount, 1); 678 atomic_set(&lo->plh_refcount, 1);
@@ -681,7 +684,7 @@ alloc_init_layout_hdr(struct inode *ino)
681} 684}
682 685
683static struct pnfs_layout_hdr * 686static struct pnfs_layout_hdr *
684pnfs_find_alloc_layout(struct inode *ino) 687pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags)
685{ 688{
686 struct nfs_inode *nfsi = NFS_I(ino); 689 struct nfs_inode *nfsi = NFS_I(ino);
687 struct pnfs_layout_hdr *new = NULL; 690 struct pnfs_layout_hdr *new = NULL;
@@ -696,7 +699,7 @@ pnfs_find_alloc_layout(struct inode *ino)
696 return nfsi->layout; 699 return nfsi->layout;
697 } 700 }
698 spin_unlock(&ino->i_lock); 701 spin_unlock(&ino->i_lock);
699 new = alloc_init_layout_hdr(ino); 702 new = alloc_init_layout_hdr(ino, gfp_flags);
700 spin_lock(&ino->i_lock); 703 spin_lock(&ino->i_lock);
701 704
702 if (likely(nfsi->layout == NULL)) /* Won the race? */ 705 if (likely(nfsi->layout == NULL)) /* Won the race? */
@@ -756,7 +759,8 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
756struct pnfs_layout_segment * 759struct pnfs_layout_segment *
757pnfs_update_layout(struct inode *ino, 760pnfs_update_layout(struct inode *ino,
758 struct nfs_open_context *ctx, 761 struct nfs_open_context *ctx,
759 enum pnfs_iomode iomode) 762 enum pnfs_iomode iomode,
763 gfp_t gfp_flags)
760{ 764{
761 struct nfs_inode *nfsi = NFS_I(ino); 765 struct nfs_inode *nfsi = NFS_I(ino);
762 struct nfs_client *clp = NFS_SERVER(ino)->nfs_client; 766 struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
@@ -767,7 +771,7 @@ pnfs_update_layout(struct inode *ino,
767 if (!pnfs_enabled_sb(NFS_SERVER(ino))) 771 if (!pnfs_enabled_sb(NFS_SERVER(ino)))
768 return NULL; 772 return NULL;
769 spin_lock(&ino->i_lock); 773 spin_lock(&ino->i_lock);
770 lo = pnfs_find_alloc_layout(ino); 774 lo = pnfs_find_alloc_layout(ino, gfp_flags);
771 if (lo == NULL) { 775 if (lo == NULL) {
772 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__); 776 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
773 goto out_unlock; 777 goto out_unlock;
@@ -807,7 +811,7 @@ pnfs_update_layout(struct inode *ino,
807 spin_unlock(&clp->cl_lock); 811 spin_unlock(&clp->cl_lock);
808 } 812 }
809 813
810 lseg = send_layoutget(lo, ctx, iomode); 814 lseg = send_layoutget(lo, ctx, iomode, gfp_flags);
811 if (!lseg && first) { 815 if (!lseg && first) {
812 spin_lock(&clp->cl_lock); 816 spin_lock(&clp->cl_lock);
813 list_del_init(&lo->plh_layouts); 817 list_del_init(&lo->plh_layouts);
@@ -846,7 +850,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
846 goto out; 850 goto out;
847 } 851 }
848 /* Inject layout blob into I/O device driver */ 852 /* Inject layout blob into I/O device driver */
849 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res); 853 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
850 if (!lseg || IS_ERR(lseg)) { 854 if (!lseg || IS_ERR(lseg)) {
851 if (!lseg) 855 if (!lseg)
852 status = -ENOMEM; 856 status = -ENOMEM;
@@ -899,7 +903,8 @@ static int pnfs_read_pg_test(struct nfs_pageio_descriptor *pgio,
899 /* This is first coelesce call for a series of nfs_pages */ 903 /* This is first coelesce call for a series of nfs_pages */
900 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 904 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
901 prev->wb_context, 905 prev->wb_context,
902 IOMODE_READ); 906 IOMODE_READ,
907 GFP_KERNEL);
903 } 908 }
904 return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); 909 return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
905} 910}
@@ -921,7 +926,8 @@ static int pnfs_write_pg_test(struct nfs_pageio_descriptor *pgio,
921 /* This is first coelesce call for a series of nfs_pages */ 926 /* This is first coelesce call for a series of nfs_pages */
922 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 927 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
923 prev->wb_context, 928 prev->wb_context,
924 IOMODE_RW); 929 IOMODE_RW,
930 GFP_NOFS);
925 } 931 }
926 return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); 932 return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
927} 933}
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index bc4827202e7a..0c015bad9e7a 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -70,7 +70,7 @@ struct pnfs_layoutdriver_type {
70 const u32 id; 70 const u32 id;
71 const char *name; 71 const char *name;
72 struct module *owner; 72 struct module *owner;
73 struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr); 73 struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags);
74 void (*free_lseg) (struct pnfs_layout_segment *lseg); 74 void (*free_lseg) (struct pnfs_layout_segment *lseg);
75 75
76 /* test for nfs page cache coalescing */ 76 /* test for nfs page cache coalescing */
@@ -126,7 +126,7 @@ void get_layout_hdr(struct pnfs_layout_hdr *lo);
126void put_lseg(struct pnfs_layout_segment *lseg); 126void put_lseg(struct pnfs_layout_segment *lseg);
127struct pnfs_layout_segment * 127struct pnfs_layout_segment *
128pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, 128pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
129 enum pnfs_iomode access_type); 129 enum pnfs_iomode access_type, gfp_t gfp_flags);
130void set_pnfs_layoutdriver(struct nfs_server *, u32 id); 130void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
131void unset_pnfs_layoutdriver(struct nfs_server *); 131void unset_pnfs_layoutdriver(struct nfs_server *);
132enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *, 132enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *,
@@ -245,7 +245,7 @@ static inline void put_lseg(struct pnfs_layout_segment *lseg)
245 245
246static inline struct pnfs_layout_segment * 246static inline struct pnfs_layout_segment *
247pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, 247pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
248 enum pnfs_iomode access_type) 248 enum pnfs_iomode access_type, gfp_t gfp_flags)
249{ 249{
250 return NULL; 250 return NULL;
251} 251}
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 7cded2b12a05..2bcf0dc306a1 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -288,7 +288,7 @@ static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc)
288 atomic_set(&req->wb_complete, requests); 288 atomic_set(&req->wb_complete, requests);
289 289
290 BUG_ON(desc->pg_lseg != NULL); 290 BUG_ON(desc->pg_lseg != NULL);
291 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ); 291 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL);
292 ClearPageError(page); 292 ClearPageError(page);
293 offset = 0; 293 offset = 0;
294 nbytes = desc->pg_count; 294 nbytes = desc->pg_count;
@@ -351,7 +351,7 @@ static int nfs_pagein_one(struct nfs_pageio_descriptor *desc)
351 } 351 }
352 req = nfs_list_entry(data->pages.next); 352 req = nfs_list_entry(data->pages.next);
353 if ((!lseg) && list_is_singular(&data->pages)) 353 if ((!lseg) && list_is_singular(&data->pages))
354 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ); 354 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL);
355 355
356 ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count, 356 ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count,
357 0, lseg); 357 0, lseg);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 3bd5d7e80f6c..49c715b4ac92 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -939,7 +939,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc)
939 atomic_set(&req->wb_complete, requests); 939 atomic_set(&req->wb_complete, requests);
940 940
941 BUG_ON(desc->pg_lseg); 941 BUG_ON(desc->pg_lseg);
942 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW); 942 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS);
943 ClearPageError(page); 943 ClearPageError(page);
944 offset = 0; 944 offset = 0;
945 nbytes = desc->pg_count; 945 nbytes = desc->pg_count;
@@ -1013,7 +1013,7 @@ static int nfs_flush_one(struct nfs_pageio_descriptor *desc)
1013 } 1013 }
1014 req = nfs_list_entry(data->pages.next); 1014 req = nfs_list_entry(data->pages.next);
1015 if ((!lseg) && list_is_singular(&data->pages)) 1015 if ((!lseg) && list_is_singular(&data->pages))
1016 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW); 1016 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS);
1017 1017
1018 if ((desc->pg_ioflags & FLUSH_COND_STABLE) && 1018 if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
1019 (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit)) 1019 (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index 0a0a66d98cce..f7684483785e 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -646,7 +646,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
646 unsigned long group, group_offset; 646 unsigned long group, group_offset;
647 int i, j, n, ret; 647 int i, j, n, ret;
648 648
649 for (i = 0; i < nitems; i += n) { 649 for (i = 0; i < nitems; i = j) {
650 group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset); 650 group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset);
651 ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh); 651 ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh);
652 if (ret < 0) 652 if (ret < 0)
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 643720209a98..9a3e6bbff27b 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -539,25 +539,41 @@ static int o2hb_verify_crc(struct o2hb_region *reg,
539 539
540/* We want to make sure that nobody is heartbeating on top of us -- 540/* We want to make sure that nobody is heartbeating on top of us --
541 * this will help detect an invalid configuration. */ 541 * this will help detect an invalid configuration. */
542static int o2hb_check_last_timestamp(struct o2hb_region *reg) 542static void o2hb_check_last_timestamp(struct o2hb_region *reg)
543{ 543{
544 int node_num, ret;
545 struct o2hb_disk_slot *slot; 544 struct o2hb_disk_slot *slot;
546 struct o2hb_disk_heartbeat_block *hb_block; 545 struct o2hb_disk_heartbeat_block *hb_block;
546 char *errstr;
547 547
548 node_num = o2nm_this_node(); 548 slot = &reg->hr_slots[o2nm_this_node()];
549
550 ret = 1;
551 slot = &reg->hr_slots[node_num];
552 /* Don't check on our 1st timestamp */ 549 /* Don't check on our 1st timestamp */
553 if (slot->ds_last_time) { 550 if (!slot->ds_last_time)
554 hb_block = slot->ds_raw_block; 551 return;
555 552
556 if (le64_to_cpu(hb_block->hb_seq) != slot->ds_last_time) 553 hb_block = slot->ds_raw_block;
557 ret = 0; 554 if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time &&
558 } 555 le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation &&
556 hb_block->hb_node == slot->ds_node_num)
557 return;
559 558
560 return ret; 559#define ERRSTR1 "Another node is heartbeating on device"
560#define ERRSTR2 "Heartbeat generation mismatch on device"
561#define ERRSTR3 "Heartbeat sequence mismatch on device"
562
563 if (hb_block->hb_node != slot->ds_node_num)
564 errstr = ERRSTR1;
565 else if (le64_to_cpu(hb_block->hb_generation) !=
566 slot->ds_last_generation)
567 errstr = ERRSTR2;
568 else
569 errstr = ERRSTR3;
570
571 mlog(ML_ERROR, "%s (%s): expected(%u:0x%llx, 0x%llx), "
572 "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_dev_name,
573 slot->ds_node_num, (unsigned long long)slot->ds_last_generation,
574 (unsigned long long)slot->ds_last_time, hb_block->hb_node,
575 (unsigned long long)le64_to_cpu(hb_block->hb_generation),
576 (unsigned long long)le64_to_cpu(hb_block->hb_seq));
561} 577}
562 578
563static inline void o2hb_prepare_block(struct o2hb_region *reg, 579static inline void o2hb_prepare_block(struct o2hb_region *reg,
@@ -983,9 +999,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
983 /* With an up to date view of the slots, we can check that no 999 /* With an up to date view of the slots, we can check that no
984 * other node has been improperly configured to heartbeat in 1000 * other node has been improperly configured to heartbeat in
985 * our slot. */ 1001 * our slot. */
986 if (!o2hb_check_last_timestamp(reg)) 1002 o2hb_check_last_timestamp(reg);
987 mlog(ML_ERROR, "Device \"%s\": another node is heartbeating "
988 "in our slot!\n", reg->hr_dev_name);
989 1003
990 /* fill in the proper info for our next heartbeat */ 1004 /* fill in the proper info for our next heartbeat */
991 o2hb_prepare_block(reg, reg->hr_generation); 1005 o2hb_prepare_block(reg, reg->hr_generation);
@@ -999,8 +1013,8 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
999 } 1013 }
1000 1014
1001 i = -1; 1015 i = -1;
1002 while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { 1016 while((i = find_next_bit(configured_nodes,
1003 1017 O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
1004 change |= o2hb_check_slot(reg, &reg->hr_slots[i]); 1018 change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
1005 } 1019 }
1006 1020
@@ -1690,6 +1704,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1690 struct file *filp = NULL; 1704 struct file *filp = NULL;
1691 struct inode *inode = NULL; 1705 struct inode *inode = NULL;
1692 ssize_t ret = -EINVAL; 1706 ssize_t ret = -EINVAL;
1707 int live_threshold;
1693 1708
1694 if (reg->hr_bdev) 1709 if (reg->hr_bdev)
1695 goto out; 1710 goto out;
@@ -1766,8 +1781,18 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1766 * A node is considered live after it has beat LIVE_THRESHOLD 1781 * A node is considered live after it has beat LIVE_THRESHOLD
1767 * times. We're not steady until we've given them a chance 1782 * times. We're not steady until we've given them a chance
1768 * _after_ our first read. 1783 * _after_ our first read.
1784 * The default threshold is bare minimum so as to limit the delay
1785 * during mounts. For global heartbeat, the threshold doubled for the
1786 * first region.
1769 */ 1787 */
1770 atomic_set(&reg->hr_steady_iterations, O2HB_LIVE_THRESHOLD + 1); 1788 live_threshold = O2HB_LIVE_THRESHOLD;
1789 if (o2hb_global_heartbeat_active()) {
1790 spin_lock(&o2hb_live_lock);
1791 if (o2hb_pop_count(&o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1)
1792 live_threshold <<= 1;
1793 spin_unlock(&o2hb_live_lock);
1794 }
1795 atomic_set(&reg->hr_steady_iterations, live_threshold + 1);
1771 1796
1772 hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", 1797 hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
1773 reg->hr_item.ci_name); 1798 reg->hr_item.ci_name);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 9fe5b8fd658f..8582e3f4f120 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2868,7 +2868,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
2868 bytes = blocks_wanted << sb->s_blocksize_bits; 2868 bytes = blocks_wanted << sb->s_blocksize_bits;
2869 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); 2869 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2870 struct ocfs2_inode_info *oi = OCFS2_I(dir); 2870 struct ocfs2_inode_info *oi = OCFS2_I(dir);
2871 struct ocfs2_alloc_context *data_ac; 2871 struct ocfs2_alloc_context *data_ac = NULL;
2872 struct ocfs2_alloc_context *meta_ac = NULL; 2872 struct ocfs2_alloc_context *meta_ac = NULL;
2873 struct buffer_head *dirdata_bh = NULL; 2873 struct buffer_head *dirdata_bh = NULL;
2874 struct buffer_head *dx_root_bh = NULL; 2874 struct buffer_head *dx_root_bh = NULL;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 7540a492eaba..3b179d6cbde0 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -1614,7 +1614,8 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
1614 spin_unlock(&dlm->spinlock); 1614 spin_unlock(&dlm->spinlock);
1615 1615
1616 /* Support for global heartbeat and node info was added in 1.1 */ 1616 /* Support for global heartbeat and node info was added in 1.1 */
1617 if (dlm_protocol.pv_major > 1 || dlm_protocol.pv_minor > 0) { 1617 if (dlm->dlm_locking_proto.pv_major > 1 ||
1618 dlm->dlm_locking_proto.pv_minor > 0) {
1618 status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map); 1619 status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map);
1619 if (status) { 1620 if (status) {
1620 mlog_errno(status); 1621 mlog_errno(status);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index fede57ed005f..84d166328cf7 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2574,6 +2574,9 @@ fail:
2574 res->state &= ~DLM_LOCK_RES_MIGRATING; 2574 res->state &= ~DLM_LOCK_RES_MIGRATING;
2575 wake = 1; 2575 wake = 1;
2576 spin_unlock(&res->spinlock); 2576 spin_unlock(&res->spinlock);
2577 if (dlm_is_host_down(ret))
2578 dlm_wait_for_node_death(dlm, target,
2579 DLM_NODE_DEATH_WAIT_MAX);
2577 goto leave; 2580 goto leave;
2578 } 2581 }
2579 2582
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 41565ae52856..89659d6dc206 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1607,6 +1607,9 @@ static void ocfs2_calc_trunc_pos(struct inode *inode,
1607 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); 1607 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1608 1608
1609 if (le32_to_cpu(rec->e_cpos) >= trunc_start) { 1609 if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1610 /*
1611 * remove an entire extent record.
1612 */
1610 *trunc_cpos = le32_to_cpu(rec->e_cpos); 1613 *trunc_cpos = le32_to_cpu(rec->e_cpos);
1611 /* 1614 /*
1612 * Skip holes if any. 1615 * Skip holes if any.
@@ -1617,7 +1620,16 @@ static void ocfs2_calc_trunc_pos(struct inode *inode,
1617 *blkno = le64_to_cpu(rec->e_blkno); 1620 *blkno = le64_to_cpu(rec->e_blkno);
1618 *trunc_end = le32_to_cpu(rec->e_cpos); 1621 *trunc_end = le32_to_cpu(rec->e_cpos);
1619 } else if (range > trunc_start) { 1622 } else if (range > trunc_start) {
1623 /*
1624 * remove a partial extent record, which means we're
1625 * removing the last extent record.
1626 */
1620 *trunc_cpos = trunc_start; 1627 *trunc_cpos = trunc_start;
1628 /*
1629 * skip hole if any.
1630 */
1631 if (range < *trunc_end)
1632 *trunc_end = range;
1621 *trunc_len = *trunc_end - trunc_start; 1633 *trunc_len = *trunc_end - trunc_start;
1622 coff = trunc_start - le32_to_cpu(rec->e_cpos); 1634 coff = trunc_start - le32_to_cpu(rec->e_cpos);
1623 *blkno = le64_to_cpu(rec->e_blkno) + 1635 *blkno = le64_to_cpu(rec->e_blkno) +
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index b141a44605ca..295d56454e8b 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1260,6 +1260,9 @@ void ocfs2_complete_mount_recovery(struct ocfs2_super *osb)
1260{ 1260{
1261 struct ocfs2_journal *journal = osb->journal; 1261 struct ocfs2_journal *journal = osb->journal;
1262 1262
1263 if (ocfs2_is_hard_readonly(osb))
1264 return;
1265
1263 /* No need to queue up our truncate_log as regular cleanup will catch 1266 /* No need to queue up our truncate_log as regular cleanup will catch
1264 * that */ 1267 * that */
1265 ocfs2_queue_recovery_completion(journal, osb->slot_num, 1268 ocfs2_queue_recovery_completion(journal, osb->slot_num,
diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
index ac0ccb5026a2..19d6750d1d6c 100644
--- a/fs/partitions/efi.c
+++ b/fs/partitions/efi.c
@@ -348,6 +348,12 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
348 goto fail; 348 goto fail;
349 } 349 }
350 350
351 /* Check that sizeof_partition_entry has the correct value */
352 if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) {
353 pr_debug("GUID Partitition Entry Size check failed.\n");
354 goto fail;
355 }
356
351 if (!(*ptes = alloc_read_gpt_entries(state, *gpt))) 357 if (!(*ptes = alloc_read_gpt_entries(state, *gpt)))
352 goto fail; 358 goto fail;
353 359
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 2e7addfd9803..318d8654989b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -214,7 +214,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
214 int flags = vma->vm_flags; 214 int flags = vma->vm_flags;
215 unsigned long ino = 0; 215 unsigned long ino = 0;
216 unsigned long long pgoff = 0; 216 unsigned long long pgoff = 0;
217 unsigned long start; 217 unsigned long start, end;
218 dev_t dev = 0; 218 dev_t dev = 0;
219 int len; 219 int len;
220 220
@@ -227,13 +227,15 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
227 227
228 /* We don't show the stack guard page in /proc/maps */ 228 /* We don't show the stack guard page in /proc/maps */
229 start = vma->vm_start; 229 start = vma->vm_start;
230 if (vma->vm_flags & VM_GROWSDOWN) 230 if (stack_guard_page_start(vma, start))
231 if (!vma_stack_continue(vma->vm_prev, vma->vm_start)) 231 start += PAGE_SIZE;
232 start += PAGE_SIZE; 232 end = vma->vm_end;
233 if (stack_guard_page_end(vma, end))
234 end -= PAGE_SIZE;
233 235
234 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", 236 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
235 start, 237 start,
236 vma->vm_end, 238 end,
237 flags & VM_READ ? 'r' : '-', 239 flags & VM_READ ? 'r' : '-',
238 flags & VM_WRITE ? 'w' : '-', 240 flags & VM_WRITE ? 'w' : '-',
239 flags & VM_EXEC ? 'x' : '-', 241 flags & VM_EXEC ? 'x' : '-',
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index e4f9c1b0836c..3e898a48122d 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -926,6 +926,7 @@ restart:
926 XFS_LOOKUP_BATCH, 926 XFS_LOOKUP_BATCH,
927 XFS_ICI_RECLAIM_TAG); 927 XFS_ICI_RECLAIM_TAG);
928 if (!nr_found) { 928 if (!nr_found) {
929 done = 1;
929 rcu_read_unlock(); 930 rcu_read_unlock();
930 break; 931 break;
931 } 932 }
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index acdb92f14d51..5fc2380092c8 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -346,20 +346,23 @@ xfs_ail_delete(
346 */ 346 */
347STATIC void 347STATIC void
348xfs_ail_worker( 348xfs_ail_worker(
349 struct work_struct *work) 349 struct work_struct *work)
350{ 350{
351 struct xfs_ail *ailp = container_of(to_delayed_work(work), 351 struct xfs_ail *ailp = container_of(to_delayed_work(work),
352 struct xfs_ail, xa_work); 352 struct xfs_ail, xa_work);
353 long tout; 353 xfs_mount_t *mp = ailp->xa_mount;
354 xfs_lsn_t target = ailp->xa_target;
355 xfs_lsn_t lsn;
356 xfs_log_item_t *lip;
357 int flush_log, count, stuck;
358 xfs_mount_t *mp = ailp->xa_mount;
359 struct xfs_ail_cursor *cur = &ailp->xa_cursors; 354 struct xfs_ail_cursor *cur = &ailp->xa_cursors;
360 int push_xfsbufd = 0; 355 xfs_log_item_t *lip;
356 xfs_lsn_t lsn;
357 xfs_lsn_t target;
358 long tout = 10;
359 int flush_log = 0;
360 int stuck = 0;
361 int count = 0;
362 int push_xfsbufd = 0;
361 363
362 spin_lock(&ailp->xa_lock); 364 spin_lock(&ailp->xa_lock);
365 target = ailp->xa_target;
363 xfs_trans_ail_cursor_init(ailp, cur); 366 xfs_trans_ail_cursor_init(ailp, cur);
364 lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); 367 lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
365 if (!lip || XFS_FORCED_SHUTDOWN(mp)) { 368 if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
@@ -368,8 +371,7 @@ xfs_ail_worker(
368 */ 371 */
369 xfs_trans_ail_cursor_done(ailp, cur); 372 xfs_trans_ail_cursor_done(ailp, cur);
370 spin_unlock(&ailp->xa_lock); 373 spin_unlock(&ailp->xa_lock);
371 ailp->xa_last_pushed_lsn = 0; 374 goto out_done;
372 return;
373 } 375 }
374 376
375 XFS_STATS_INC(xs_push_ail); 377 XFS_STATS_INC(xs_push_ail);
@@ -386,8 +388,7 @@ xfs_ail_worker(
386 * lots of contention on the AIL lists. 388 * lots of contention on the AIL lists.
387 */ 389 */
388 lsn = lip->li_lsn; 390 lsn = lip->li_lsn;
389 flush_log = stuck = count = 0; 391 while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
390 while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) {
391 int lock_result; 392 int lock_result;
392 /* 393 /*
393 * If we can lock the item without sleeping, unlock the AIL 394 * If we can lock the item without sleeping, unlock the AIL
@@ -480,21 +481,25 @@ xfs_ail_worker(
480 } 481 }
481 482
482 /* assume we have more work to do in a short while */ 483 /* assume we have more work to do in a short while */
483 tout = 10; 484out_done:
484 if (!count) { 485 if (!count) {
485 /* We're past our target or empty, so idle */ 486 /* We're past our target or empty, so idle */
486 ailp->xa_last_pushed_lsn = 0; 487 ailp->xa_last_pushed_lsn = 0;
487 488
488 /* 489 /*
489 * Check for an updated push target before clearing the 490 * We clear the XFS_AIL_PUSHING_BIT first before checking
490 * XFS_AIL_PUSHING_BIT. If the target changed, we've got more 491 * whether the target has changed. If the target has changed,
491 * work to do. Wait a bit longer before starting that work. 492 * this pushes the requeue race directly onto the result of the
493 * atomic test/set bit, so we are guaranteed that either the
494 * the pusher that changed the target or ourselves will requeue
495 * the work (but not both).
492 */ 496 */
497 clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
493 smp_rmb(); 498 smp_rmb();
494 if (ailp->xa_target == target) { 499 if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
495 clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); 500 test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
496 return; 501 return;
497 } 502
498 tout = 50; 503 tout = 50;
499 } else if (XFS_LSN_CMP(lsn, target) >= 0) { 504 } else if (XFS_LSN_CMP(lsn, target) >= 0) {
500 /* 505 /*
@@ -553,7 +558,7 @@ xfs_ail_push(
553 * the XFS_AIL_PUSHING_BIT. 558 * the XFS_AIL_PUSHING_BIT.
554 */ 559 */
555 smp_wmb(); 560 smp_wmb();
556 ailp->xa_target = threshold_lsn; 561 xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
557 if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 562 if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
558 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); 563 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
559} 564}
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index ade09d7b4271..c99c3d3e7811 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -127,7 +127,7 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
127 127
128int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); 128int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
129 129
130bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); 130int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
131bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); 131bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
132int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); 132int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
133int drm_fb_helper_debug_enter(struct fb_info *info); 133int drm_fb_helper_debug_enter(struct fb_info *info);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index c2f93a8ae2e1..564b14aa7e16 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -86,7 +86,7 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
86} 86}
87#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ 87#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
88 &(mm)->head_node.node_list, \ 88 &(mm)->head_node.node_list, \
89 node_list); 89 node_list)
90#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \ 90#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
91 for (entry = (mm)->prev_scanned_node, \ 91 for (entry = (mm)->prev_scanned_node, \
92 next = entry ? list_entry(entry->node_list.next, \ 92 next = entry ? list_entry(entry->node_list.next, \
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index b8613e806aa9..01eca1794e14 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -111,6 +111,8 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
111 __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 111 __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
112#define alloc_bootmem_node(pgdat, x) \ 112#define alloc_bootmem_node(pgdat, x) \
113 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 113 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
114#define alloc_bootmem_node_nopanic(pgdat, x) \
115 __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
114#define alloc_bootmem_pages_node(pgdat, x) \ 116#define alloc_bootmem_pages_node(pgdat, x) \
115 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 117 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
116#define alloc_bootmem_pages_node_nopanic(pgdat, x) \ 118#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 16ee8b49a200..d4675af963fa 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -546,18 +546,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap);
546extern bool capable(int cap); 546extern bool capable(int cap);
547extern bool ns_capable(struct user_namespace *ns, int cap); 547extern bool ns_capable(struct user_namespace *ns, int cap);
548extern bool task_ns_capable(struct task_struct *t, int cap); 548extern bool task_ns_capable(struct task_struct *t, int cap);
549 549extern bool nsown_capable(int cap);
550/**
551 * nsown_capable - Check superior capability to one's own user_ns
552 * @cap: The capability in question
553 *
554 * Return true if the current task has the given superior capability
555 * targeted at its own user namespace.
556 */
557static inline bool nsown_capable(int cap)
558{
559 return ns_capable(current_user_ns(), cap);
560}
561 550
562/* audit system wants to get cap info from files as well */ 551/* audit system wants to get cap info from files as well */
563extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); 552extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 9aeeb0ba2003..82607992f308 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -1,4 +1,4 @@
1/* Credentials management - see Documentation/credentials.txt 1/* Credentials management - see Documentation/security/credentials.txt
2 * 2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
@@ -146,6 +146,7 @@ struct cred {
146 void *security; /* subjective LSM security */ 146 void *security; /* subjective LSM security */
147#endif 147#endif
148 struct user_struct *user; /* real user ID subscription */ 148 struct user_struct *user; /* real user ID subscription */
149 struct user_namespace *user_ns; /* cached user->user_ns */
149 struct group_info *group_info; /* supplementary groups for euid/fsgid */ 150 struct group_info *group_info; /* supplementary groups for euid/fsgid */
150 struct rcu_head rcu; /* RCU deletion hook */ 151 struct rcu_head rcu; /* RCU deletion hook */
151}; 152};
@@ -354,10 +355,15 @@ static inline void put_cred(const struct cred *_cred)
354#define current_fsgid() (current_cred_xxx(fsgid)) 355#define current_fsgid() (current_cred_xxx(fsgid))
355#define current_cap() (current_cred_xxx(cap_effective)) 356#define current_cap() (current_cred_xxx(cap_effective))
356#define current_user() (current_cred_xxx(user)) 357#define current_user() (current_cred_xxx(user))
357#define _current_user_ns() (current_cred_xxx(user)->user_ns)
358#define current_security() (current_cred_xxx(security)) 358#define current_security() (current_cred_xxx(security))
359 359
360extern struct user_namespace *current_user_ns(void); 360#ifdef CONFIG_USER_NS
361#define current_user_ns() (current_cred_xxx(user_ns))
362#else
363extern struct user_namespace init_user_ns;
364#define current_user_ns() (&init_user_ns)
365#endif
366
361 367
362#define current_uid_gid(_uid, _gid) \ 368#define current_uid_gid(_uid, _gid) \
363do { \ 369do { \
diff --git a/include/linux/device.h b/include/linux/device.h
index ab8dfc095709..d08399db6e2c 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -442,7 +442,6 @@ struct device {
442 struct dev_archdata archdata; 442 struct dev_archdata archdata;
443 443
444 struct device_node *of_node; /* associated device tree node */ 444 struct device_node *of_node; /* associated device tree node */
445 const struct of_device_id *of_match; /* matching of_device_id from driver */
446 445
447 dev_t devt; /* dev_t, creates the sysfs "dev" */ 446 dev_t devt; /* dev_t, creates the sysfs "dev" */
448 447
diff --git a/include/linux/fb.h b/include/linux/fb.h
index df728c1c29ed..6a8274877171 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -832,6 +832,7 @@ struct fb_tile_ops {
832#define FBINFO_CAN_FORCE_OUTPUT 0x200000 832#define FBINFO_CAN_FORCE_OUTPUT 0x200000
833 833
834struct fb_info { 834struct fb_info {
835 atomic_t count;
835 int node; 836 int node;
836 int flags; 837 int flags;
837 struct mutex lock; /* Lock for open/release/ioctl funcs */ 838 struct mutex lock; /* Lock for open/release/ioctl funcs */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index dbd860af0804..cdf9495df204 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -358,7 +358,6 @@ struct inodes_stat_t {
358#define FS_EXTENT_FL 0x00080000 /* Extents */ 358#define FS_EXTENT_FL 0x00080000 /* Extents */
359#define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ 359#define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */
360#define FS_NOCOW_FL 0x00800000 /* Do not cow file */ 360#define FS_NOCOW_FL 0x00800000 /* Do not cow file */
361#define FS_COW_FL 0x02000000 /* Cow file */
362#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ 361#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */
363 362
364#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ 363#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 22b32af1b5ec..b5a550a39a70 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -37,6 +37,7 @@ struct trace_entry {
37 unsigned char flags; 37 unsigned char flags;
38 unsigned char preempt_count; 38 unsigned char preempt_count;
39 int pid; 39 int pid;
40 int padding;
40}; 41};
41 42
42#define FTRACE_MAX_EVENT \ 43#define FTRACE_MAX_EVENT \
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index bfb8f934521e..56d8fc87fbbc 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -353,6 +353,8 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
353 353
354void *alloc_pages_exact(size_t size, gfp_t gfp_mask); 354void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
355void free_pages_exact(void *virt, size_t size); 355void free_pages_exact(void *virt, size_t size);
356/* This is different from alloc_pages_exact_node !!! */
357void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
356 358
357#define __get_free_page(gfp_mask) \ 359#define __get_free_page(gfp_mask) \
358 __get_free_pages((gfp_mask), 0) 360 __get_free_pages((gfp_mask), 0)
diff --git a/include/linux/key.h b/include/linux/key.h
index b2bb01719561..303982a69933 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * 11 *
12 * See Documentation/keys.txt for information on keys/keyrings. 12 * See Documentation/security/keys.txt for information on keys/keyrings.
13 */ 13 */
14 14
15#ifndef _LINUX_KEY_H 15#ifndef _LINUX_KEY_H
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2348db26bc3d..6507dde38b16 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1011,11 +1011,33 @@ int set_page_dirty_lock(struct page *page);
1011int clear_page_dirty_for_io(struct page *page); 1011int clear_page_dirty_for_io(struct page *page);
1012 1012
1013/* Is the vma a continuation of the stack vma above it? */ 1013/* Is the vma a continuation of the stack vma above it? */
1014static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) 1014static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1015{ 1015{
1016 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); 1016 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1017} 1017}
1018 1018
1019static inline int stack_guard_page_start(struct vm_area_struct *vma,
1020 unsigned long addr)
1021{
1022 return (vma->vm_flags & VM_GROWSDOWN) &&
1023 (vma->vm_start == addr) &&
1024 !vma_growsdown(vma->vm_prev, addr);
1025}
1026
1027/* Is the vma a continuation of the stack vma below it? */
1028static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1029{
1030 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1031}
1032
1033static inline int stack_guard_page_end(struct vm_area_struct *vma,
1034 unsigned long addr)
1035{
1036 return (vma->vm_flags & VM_GROWSUP) &&
1037 (vma->vm_end == addr) &&
1038 !vma_growsup(vma->vm_next, addr);
1039}
1040
1019extern unsigned long move_page_tables(struct vm_area_struct *vma, 1041extern unsigned long move_page_tables(struct vm_area_struct *vma,
1020 unsigned long old_addr, struct vm_area_struct *new_vma, 1042 unsigned long old_addr, struct vm_area_struct *new_vma,
1021 unsigned long new_addr, unsigned long len); 1043 unsigned long new_addr, unsigned long len);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index eb792cb6d745..bcb793ec7374 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -183,6 +183,7 @@ struct mmc_host {
183 struct work_struct clk_gate_work; /* delayed clock gate */ 183 struct work_struct clk_gate_work; /* delayed clock gate */
184 unsigned int clk_old; /* old clock value cache */ 184 unsigned int clk_old; /* old clock value cache */
185 spinlock_t clk_lock; /* lock for clk fields */ 185 spinlock_t clk_lock; /* lock for clk fields */
186 struct mutex clk_gate_mutex; /* mutex for clock gating */
186#endif 187#endif
187 188
188 /* host specific block data */ 189 /* host specific block data */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 890dce242639..7e371f7df9c4 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -233,6 +233,7 @@ struct nfs4_layoutget {
233 struct nfs4_layoutget_args args; 233 struct nfs4_layoutget_args args;
234 struct nfs4_layoutget_res res; 234 struct nfs4_layoutget_res res;
235 struct pnfs_layout_segment **lsegpp; 235 struct pnfs_layout_segment **lsegpp;
236 gfp_t gfp_flags;
236}; 237};
237 238
238struct nfs4_getdeviceinfo_args { 239struct nfs4_getdeviceinfo_args {
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 8bfe6c1d4365..ae5638480ef2 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -21,8 +21,7 @@ extern void of_device_make_bus_id(struct device *dev);
21static inline int of_driver_match_device(struct device *dev, 21static inline int of_driver_match_device(struct device *dev,
22 const struct device_driver *drv) 22 const struct device_driver *drv)
23{ 23{
24 dev->of_match = of_match_device(drv->of_match_table, dev); 24 return of_match_device(drv->of_match_table, dev) != NULL;
25 return dev->of_match != NULL;
26} 25}
27 26
28extern struct platform_device *of_dev_get(struct platform_device *dev); 27extern struct platform_device *of_dev_get(struct platform_device *dev);
@@ -58,6 +57,11 @@ static inline int of_device_uevent(struct device *dev,
58 57
59static inline void of_device_node_put(struct device *dev) { } 58static inline void of_device_node_put(struct device *dev) { }
60 59
60static inline const struct of_device_id *of_match_device(
61 const struct of_device_id *matches, const struct device *dev)
62{
63 return NULL;
64}
61#endif /* CONFIG_OF_DEVICE */ 65#endif /* CONFIG_OF_DEVICE */
62 66
63#endif /* _LINUX_OF_DEVICE_H */ 67#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 838c1149251a..eaf4350c0f90 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -208,6 +208,8 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
208 struct proc_dir_entry *parent,const char *dest) {return NULL;} 208 struct proc_dir_entry *parent,const char *dest) {return NULL;}
209static inline struct proc_dir_entry *proc_mkdir(const char *name, 209static inline struct proc_dir_entry *proc_mkdir(const char *name,
210 struct proc_dir_entry *parent) {return NULL;} 210 struct proc_dir_entry *parent) {return NULL;}
211static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
212 mode_t mode, struct proc_dir_entry *parent) { return NULL; }
211 213
212static inline struct proc_dir_entry *create_proc_read_entry(const char *name, 214static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
213 mode_t mode, struct proc_dir_entry *base, 215 mode_t mode, struct proc_dir_entry *base,
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index a1147e5dd245..9178d5cc0b01 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -189,6 +189,10 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
189 child->ptrace = current->ptrace; 189 child->ptrace = current->ptrace;
190 __ptrace_link(child, current->parent); 190 __ptrace_link(child, current->parent);
191 } 191 }
192
193#ifdef CONFIG_HAVE_HW_BREAKPOINT
194 atomic_set(&child->ptrace_bp_refcnt, 1);
195#endif
192} 196}
193 197
194/** 198/**
@@ -350,6 +354,13 @@ extern int task_current_syscall(struct task_struct *target, long *callno,
350 unsigned long args[6], unsigned int maxargs, 354 unsigned long args[6], unsigned int maxargs,
351 unsigned long *sp, unsigned long *pc); 355 unsigned long *sp, unsigned long *pc);
352 356
353#endif 357#ifdef CONFIG_HAVE_HW_BREAKPOINT
358extern int ptrace_get_breakpoints(struct task_struct *tsk);
359extern void ptrace_put_breakpoints(struct task_struct *tsk);
360#else
361static inline void ptrace_put_breakpoints(struct task_struct *tsk) { }
362#endif /* CONFIG_HAVE_HW_BREAKPOINT */
363
364#endif /* __KERNEL */
354 365
355#endif 366#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 18d63cea2848..781abd137673 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1537,6 +1537,9 @@ struct task_struct {
1537 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ 1537 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1538 } memcg_batch; 1538 } memcg_batch;
1539#endif 1539#endif
1540#ifdef CONFIG_HAVE_HW_BREAKPOINT
1541 atomic_t ptrace_bp_refcnt;
1542#endif
1540}; 1543};
1541 1544
1542/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1545/* Future-safe accessor for struct task_struct's cpus_allowed. */
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 88bdd010d65d..2fa8d1341a0a 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -38,9 +38,19 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
38 return outer; 38 return outer;
39} 39}
40 40
41#define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0) 41static inline void INET_ECN_xmit(struct sock *sk)
42#define INET_ECN_dontxmit(sk) \ 42{
43 do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0) 43 inet_sk(sk)->tos |= INET_ECN_ECT_0;
44 if (inet6_sk(sk) != NULL)
45 inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
46}
47
48static inline void INET_ECN_dontxmit(struct sock *sk)
49{
50 inet_sk(sk)->tos &= ~INET_ECN_MASK;
51 if (inet6_sk(sk) != NULL)
52 inet6_sk(sk)->tclass &= ~INET_ECN_MASK;
53}
44 54
45#define IP6_ECN_flow_init(label) do { \ 55#define IP6_ECN_flow_init(label) do { \
46 (label) &= ~htonl(INET_ECN_MASK << 20); \ 56 (label) &= ~htonl(INET_ECN_MASK << 20); \
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index d516f00c8e0f..86aefed6140b 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -791,6 +791,7 @@ struct ip_vs_app {
791/* IPVS in network namespace */ 791/* IPVS in network namespace */
792struct netns_ipvs { 792struct netns_ipvs {
793 int gen; /* Generation */ 793 int gen; /* Generation */
794 int enable; /* enable like nf_hooks do */
794 /* 795 /*
795 * Hash table: for real service lookups 796 * Hash table: for real service lookups
796 */ 797 */
@@ -1089,6 +1090,22 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
1089 atomic_inc(&ctl_cp->n_control); 1090 atomic_inc(&ctl_cp->n_control);
1090} 1091}
1091 1092
1093/*
1094 * IPVS netns init & cleanup functions
1095 */
1096extern int __ip_vs_estimator_init(struct net *net);
1097extern int __ip_vs_control_init(struct net *net);
1098extern int __ip_vs_protocol_init(struct net *net);
1099extern int __ip_vs_app_init(struct net *net);
1100extern int __ip_vs_conn_init(struct net *net);
1101extern int __ip_vs_sync_init(struct net *net);
1102extern void __ip_vs_conn_cleanup(struct net *net);
1103extern void __ip_vs_app_cleanup(struct net *net);
1104extern void __ip_vs_protocol_cleanup(struct net *net);
1105extern void __ip_vs_control_cleanup(struct net *net);
1106extern void __ip_vs_estimator_cleanup(struct net *net);
1107extern void __ip_vs_sync_cleanup(struct net *net);
1108extern void __ip_vs_service_cleanup(struct net *net);
1092 1109
1093/* 1110/*
1094 * IPVS application functions 1111 * IPVS application functions
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 75b8e2968c9b..f57e7d46a453 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -199,7 +199,7 @@ struct llc_pdu_sn {
199 u8 ssap; 199 u8 ssap;
200 u8 ctrl_1; 200 u8 ctrl_1;
201 u8 ctrl_2; 201 u8 ctrl_2;
202}; 202} __packed;
203 203
204static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) 204static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb)
205{ 205{
@@ -211,7 +211,7 @@ struct llc_pdu_un {
211 u8 dsap; 211 u8 dsap;
212 u8 ssap; 212 u8 ssap;
213 u8 ctrl_1; 213 u8 ctrl_1;
214}; 214} __packed;
215 215
216static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) 216static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
217{ 217{
@@ -359,7 +359,7 @@ struct llc_xid_info {
359 u8 fmt_id; /* always 0x81 for LLC */ 359 u8 fmt_id; /* always 0x81 for LLC */
360 u8 type; /* different if NULL/non-NULL LSAP */ 360 u8 type; /* different if NULL/non-NULL LSAP */
361 u8 rw; /* sender receive window */ 361 u8 rw; /* sender receive window */
362}; 362} __packed;
363 363
364/** 364/**
365 * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID 365 * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID
@@ -415,7 +415,7 @@ struct llc_frmr_info {
415 u8 curr_ssv; /* current send state variable val */ 415 u8 curr_ssv; /* current send state variable val */
416 u8 curr_rsv; /* current receive state variable */ 416 u8 curr_rsv; /* current receive state variable */
417 u8 ind_bits; /* indicator bits set with macro */ 417 u8 ind_bits; /* indicator bits set with macro */
418}; 418} __packed;
419 419
420extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type); 420extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
421extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value); 421extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 6ae4bc5ce8a7..20afeaa39395 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -324,6 +324,7 @@ struct xfrm_state_afinfo {
324 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); 324 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
325 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); 325 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
326 int (*output)(struct sk_buff *skb); 326 int (*output)(struct sk_buff *skb);
327 int (*output_finish)(struct sk_buff *skb);
327 int (*extract_input)(struct xfrm_state *x, 328 int (*extract_input)(struct xfrm_state *x,
328 struct sk_buff *skb); 329 struct sk_buff *skb);
329 int (*extract_output)(struct xfrm_state *x, 330 int (*extract_output)(struct xfrm_state *x,
@@ -1454,6 +1455,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1454extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1455extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1455extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); 1456extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1456extern int xfrm4_output(struct sk_buff *skb); 1457extern int xfrm4_output(struct sk_buff *skb);
1458extern int xfrm4_output_finish(struct sk_buff *skb);
1457extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); 1459extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1458extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); 1460extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1459extern int xfrm6_extract_header(struct sk_buff *skb); 1461extern int xfrm6_extract_header(struct sk_buff *skb);
@@ -1470,6 +1472,7 @@ extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr);
1470extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1472extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1471extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); 1473extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1472extern int xfrm6_output(struct sk_buff *skb); 1474extern int xfrm6_output(struct sk_buff *skb);
1475extern int xfrm6_output_finish(struct sk_buff *skb);
1473extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, 1476extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1474 u8 **prevhdr); 1477 u8 **prevhdr);
1475 1478
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 2d3ec5094685..dd82e02ddde3 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -169,6 +169,7 @@ struct scsi_device {
169 sdev_dev; 169 sdev_dev;
170 170
171 struct execute_work ew; /* used to get process context on put */ 171 struct execute_work ew; /* used to get process context on put */
172 struct work_struct requeue_work;
172 173
173 struct scsi_dh_data *scsi_dh_data; 174 struct scsi_dh_data *scsi_dh_data;
174 enum scsi_device_state sdev_state; 175 enum scsi_device_state sdev_state;
diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h
index e3615c093741..9fe3a36646e9 100644
--- a/include/trace/events/gfpflags.h
+++ b/include/trace/events/gfpflags.h
@@ -10,6 +10,7 @@
10 */ 10 */
11#define show_gfp_flags(flags) \ 11#define show_gfp_flags(flags) \
12 (flags) ? __print_flags(flags, "|", \ 12 (flags) ? __print_flags(flags, "|", \
13 {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
13 {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ 14 {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \
14 {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ 15 {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
15 {(unsigned long)GFP_USER, "GFP_USER"}, \ 16 {(unsigned long)GFP_USER, "GFP_USER"}, \
@@ -32,6 +33,9 @@
32 {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ 33 {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
33 {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ 34 {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
34 {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ 35 {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
35 {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ 36 {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
37 {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
38 {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
39 {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
36 ) : "GFP_NOWAIT" 40 ) : "GFP_NOWAIT"
37 41
diff --git a/init/Kconfig b/init/Kconfig
index d886b1e9278e..7a71e0a9992a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1226,7 +1226,6 @@ config SLAB
1226 per cpu and per node queues. 1226 per cpu and per node queues.
1227 1227
1228config SLUB 1228config SLUB
1229 depends on BROKEN || NUMA || !DISCONTIGMEM
1230 bool "SLUB (Unqueued Allocator)" 1229 bool "SLUB (Unqueued Allocator)"
1231 help 1230 help
1232 SLUB is a slab allocator that minimizes cache line usage 1231 SLUB is a slab allocator that minimizes cache line usage
diff --git a/kernel/capability.c b/kernel/capability.c
index bf0c734d0c12..32a80e08ff4b 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -399,3 +399,15 @@ bool task_ns_capable(struct task_struct *t, int cap)
399 return ns_capable(task_cred_xxx(t, user)->user_ns, cap); 399 return ns_capable(task_cred_xxx(t, user)->user_ns, cap);
400} 400}
401EXPORT_SYMBOL(task_ns_capable); 401EXPORT_SYMBOL(task_ns_capable);
402
403/**
404 * nsown_capable - Check superior capability to one's own user_ns
405 * @cap: The capability in question
406 *
407 * Return true if the current task has the given superior capability
408 * targeted at its own user namespace.
409 */
410bool nsown_capable(int cap)
411{
412 return ns_capable(current_user_ns(), cap);
413}
diff --git a/kernel/cred.c b/kernel/cred.c
index 5557b55048df..004e3679624d 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -1,4 +1,4 @@
1/* Task credentials management - see Documentation/credentials.txt 1/* Task credentials management - see Documentation/security/credentials.txt
2 * 2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
@@ -54,6 +54,7 @@ struct cred init_cred = {
54 .cap_effective = CAP_INIT_EFF_SET, 54 .cap_effective = CAP_INIT_EFF_SET,
55 .cap_bset = CAP_INIT_BSET, 55 .cap_bset = CAP_INIT_BSET,
56 .user = INIT_USER, 56 .user = INIT_USER,
57 .user_ns = &init_user_ns,
57 .group_info = &init_groups, 58 .group_info = &init_groups,
58#ifdef CONFIG_KEYS 59#ifdef CONFIG_KEYS
59 .tgcred = &init_tgcred, 60 .tgcred = &init_tgcred,
@@ -410,6 +411,11 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
410 goto error_put; 411 goto error_put;
411 } 412 }
412 413
414 /* cache user_ns in cred. Doesn't need a refcount because it will
415 * stay pinned by cred->user
416 */
417 new->user_ns = new->user->user_ns;
418
413#ifdef CONFIG_KEYS 419#ifdef CONFIG_KEYS
414 /* new threads get their own thread keyrings if their parent already 420 /* new threads get their own thread keyrings if their parent already
415 * had one */ 421 * had one */
@@ -741,12 +747,6 @@ int set_create_files_as(struct cred *new, struct inode *inode)
741} 747}
742EXPORT_SYMBOL(set_create_files_as); 748EXPORT_SYMBOL(set_create_files_as);
743 749
744struct user_namespace *current_user_ns(void)
745{
746 return _current_user_ns();
747}
748EXPORT_SYMBOL(current_user_ns);
749
750#ifdef CONFIG_DEBUG_CREDENTIALS 750#ifdef CONFIG_DEBUG_CREDENTIALS
751 751
752bool creds_are_invalid(const struct cred *cred) 752bool creds_are_invalid(const struct cred *cred)
diff --git a/kernel/exit.c b/kernel/exit.c
index f5d2f63bae0b..8dd874181542 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1016,7 +1016,7 @@ NORET_TYPE void do_exit(long code)
1016 /* 1016 /*
1017 * FIXME: do that only when needed, using sched_exit tracepoint 1017 * FIXME: do that only when needed, using sched_exit tracepoint
1018 */ 1018 */
1019 flush_ptrace_hw_breakpoint(tsk); 1019 ptrace_put_breakpoints(tsk);
1020 1020
1021 exit_notify(tsk, group_dead); 1021 exit_notify(tsk, group_dead);
1022#ifdef CONFIG_NUMA 1022#ifdef CONFIG_NUMA
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 8935369d503a..6275970b2189 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -216,7 +216,6 @@ int suspend_devices_and_enter(suspend_state_t state)
216 goto Close; 216 goto Close;
217 } 217 }
218 suspend_console(); 218 suspend_console();
219 pm_restrict_gfp_mask();
220 suspend_test_start(); 219 suspend_test_start();
221 error = dpm_suspend_start(PMSG_SUSPEND); 220 error = dpm_suspend_start(PMSG_SUSPEND);
222 if (error) { 221 if (error) {
@@ -233,7 +232,6 @@ int suspend_devices_and_enter(suspend_state_t state)
233 suspend_test_start(); 232 suspend_test_start();
234 dpm_resume_end(PMSG_RESUME); 233 dpm_resume_end(PMSG_RESUME);
235 suspend_test_finish("resume devices"); 234 suspend_test_finish("resume devices");
236 pm_restore_gfp_mask();
237 resume_console(); 235 resume_console();
238 Close: 236 Close:
239 if (suspend_ops->end) 237 if (suspend_ops->end)
@@ -294,7 +292,9 @@ int enter_state(suspend_state_t state)
294 goto Finish; 292 goto Finish;
295 293
296 pr_debug("PM: Entering %s sleep\n", pm_states[state]); 294 pr_debug("PM: Entering %s sleep\n", pm_states[state]);
295 pm_restrict_gfp_mask();
297 error = suspend_devices_and_enter(state); 296 error = suspend_devices_and_enter(state);
297 pm_restore_gfp_mask();
298 298
299 Finish: 299 Finish:
300 pr_debug("PM: Finishing wakeup.\n"); 300 pr_debug("PM: Finishing wakeup.\n");
diff --git a/kernel/power/user.c b/kernel/power/user.c
index c36c3b9e8a84..7d02d33be699 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -135,8 +135,10 @@ static int snapshot_release(struct inode *inode, struct file *filp)
135 free_basic_memory_bitmaps(); 135 free_basic_memory_bitmaps();
136 data = filp->private_data; 136 data = filp->private_data;
137 free_all_swap_pages(data->swap); 137 free_all_swap_pages(data->swap);
138 if (data->frozen) 138 if (data->frozen) {
139 pm_restore_gfp_mask();
139 thaw_processes(); 140 thaw_processes();
141 }
140 pm_notifier_call_chain(data->mode == O_RDONLY ? 142 pm_notifier_call_chain(data->mode == O_RDONLY ?
141 PM_POST_HIBERNATION : PM_POST_RESTORE); 143 PM_POST_HIBERNATION : PM_POST_RESTORE);
142 atomic_inc(&snapshot_device_available); 144 atomic_inc(&snapshot_device_available);
@@ -379,6 +381,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
379 * PM_HIBERNATION_PREPARE 381 * PM_HIBERNATION_PREPARE
380 */ 382 */
381 error = suspend_devices_and_enter(PM_SUSPEND_MEM); 383 error = suspend_devices_and_enter(PM_SUSPEND_MEM);
384 data->ready = 0;
382 break; 385 break;
383 386
384 case SNAPSHOT_PLATFORM_SUPPORT: 387 case SNAPSHOT_PLATFORM_SUPPORT:
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 0fc1eed28d27..dc7ab65f3b36 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -22,6 +22,7 @@
22#include <linux/syscalls.h> 22#include <linux/syscalls.h>
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24#include <linux/regset.h> 24#include <linux/regset.h>
25#include <linux/hw_breakpoint.h>
25 26
26 27
27/* 28/*
@@ -879,3 +880,19 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
879 return ret; 880 return ret;
880} 881}
881#endif /* CONFIG_COMPAT */ 882#endif /* CONFIG_COMPAT */
883
884#ifdef CONFIG_HAVE_HW_BREAKPOINT
885int ptrace_get_breakpoints(struct task_struct *tsk)
886{
887 if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt))
888 return 0;
889
890 return -1;
891}
892
893void ptrace_put_breakpoints(struct task_struct *tsk)
894{
895 if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt))
896 flush_ptrace_hw_breakpoint(tsk);
897}
898#endif /* CONFIG_HAVE_HW_BREAKPOINT */
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 6519cf62d9cd..0e17c10f8a9d 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -685,8 +685,8 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
685 /* Add clocksource to the clcoksource list */ 685 /* Add clocksource to the clcoksource list */
686 mutex_lock(&clocksource_mutex); 686 mutex_lock(&clocksource_mutex);
687 clocksource_enqueue(cs); 687 clocksource_enqueue(cs);
688 clocksource_select();
689 clocksource_enqueue_watchdog(cs); 688 clocksource_enqueue_watchdog(cs);
689 clocksource_select();
690 mutex_unlock(&clocksource_mutex); 690 mutex_unlock(&clocksource_mutex);
691 return 0; 691 return 0;
692} 692}
@@ -706,8 +706,8 @@ int clocksource_register(struct clocksource *cs)
706 706
707 mutex_lock(&clocksource_mutex); 707 mutex_lock(&clocksource_mutex);
708 clocksource_enqueue(cs); 708 clocksource_enqueue(cs);
709 clocksource_select();
710 clocksource_enqueue_watchdog(cs); 709 clocksource_enqueue_watchdog(cs);
710 clocksource_select();
711 mutex_unlock(&clocksource_mutex); 711 mutex_unlock(&clocksource_mutex);
712 return 0; 712 return 0;
713} 713}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index da800ffa810c..723c7637e55a 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -522,10 +522,11 @@ static void tick_broadcast_init_next_event(struct cpumask *mask,
522 */ 522 */
523void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 523void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
524{ 524{
525 int cpu = smp_processor_id();
526
525 /* Set it up only once ! */ 527 /* Set it up only once ! */
526 if (bc->event_handler != tick_handle_oneshot_broadcast) { 528 if (bc->event_handler != tick_handle_oneshot_broadcast) {
527 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; 529 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
528 int cpu = smp_processor_id();
529 530
530 bc->event_handler = tick_handle_oneshot_broadcast; 531 bc->event_handler = tick_handle_oneshot_broadcast;
531 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 532 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
@@ -551,6 +552,15 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
551 tick_broadcast_set_event(tick_next_period, 1); 552 tick_broadcast_set_event(tick_next_period, 1);
552 } else 553 } else
553 bc->next_event.tv64 = KTIME_MAX; 554 bc->next_event.tv64 = KTIME_MAX;
555 } else {
556 /*
557 * The first cpu which switches to oneshot mode sets
558 * the bit for all other cpus which are in the general
559 * (periodic) broadcast mask. So the bit is set and
560 * would prevent the first broadcast enter after this
561 * to program the bc device.
562 */
563 tick_broadcast_clear_oneshot(cpu);
554 } 564 }
555} 565}
556 566
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d38c16a06a6f..1cb49be7c7fb 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1110,6 +1110,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1110 1110
1111 entry->preempt_count = pc & 0xff; 1111 entry->preempt_count = pc & 0xff;
1112 entry->pid = (tsk) ? tsk->pid : 0; 1112 entry->pid = (tsk) ? tsk->pid : 0;
1113 entry->padding = 0;
1113 entry->flags = 1114 entry->flags =
1114#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 1115#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1115 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 1116 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index e88f74fe1d4c..2fe110341359 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -116,6 +116,7 @@ static int trace_define_common_fields(void)
116 __common_field(unsigned char, flags); 116 __common_field(unsigned char, flags);
117 __common_field(unsigned char, preempt_count); 117 __common_field(unsigned char, preempt_count);
118 __common_field(int, pid); 118 __common_field(int, pid);
119 __common_field(int, padding);
119 120
120 return ret; 121 return ret;
121} 122}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index bc0ac6b333dc..dfd60192bc2e 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -797,7 +797,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
797 return string(buf, end, uuid, spec); 797 return string(buf, end, uuid, spec);
798} 798}
799 799
800int kptr_restrict = 1; 800int kptr_restrict __read_mostly;
801 801
802/* 802/*
803 * Show a '%p' thing. A kernel extension is that the '%p' is followed 803 * Show a '%p' thing. A kernel extension is that the '%p' is followed
diff --git a/mm/memory.c b/mm/memory.c
index 27f425378112..61e66f026563 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1412,9 +1412,8 @@ no_page_table:
1412 1412
1413static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) 1413static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
1414{ 1414{
1415 return (vma->vm_flags & VM_GROWSDOWN) && 1415 return stack_guard_page_start(vma, addr) ||
1416 (vma->vm_start == addr) && 1416 stack_guard_page_end(vma, addr+PAGE_SIZE);
1417 !vma_stack_continue(vma->vm_prev, addr);
1418} 1417}
1419 1418
1420/** 1419/**
@@ -1551,12 +1550,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1551 continue; 1550 continue;
1552 } 1551 }
1553 1552
1554 /*
1555 * For mlock, just skip the stack guard page.
1556 */
1557 if ((gup_flags & FOLL_MLOCK) && stack_guard_page(vma, start))
1558 goto next_page;
1559
1560 do { 1553 do {
1561 struct page *page; 1554 struct page *page;
1562 unsigned int foll_flags = gup_flags; 1555 unsigned int foll_flags = gup_flags;
@@ -1573,6 +1566,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1573 int ret; 1566 int ret;
1574 unsigned int fault_flags = 0; 1567 unsigned int fault_flags = 0;
1575 1568
1569 /* For mlock, just skip the stack guard page. */
1570 if (foll_flags & FOLL_MLOCK) {
1571 if (stack_guard_page(vma, start))
1572 goto next_page;
1573 }
1576 if (foll_flags & FOLL_WRITE) 1574 if (foll_flags & FOLL_WRITE)
1577 fault_flags |= FAULT_FLAG_WRITE; 1575 fault_flags |= FAULT_FLAG_WRITE;
1578 if (nonblocking) 1576 if (nonblocking)
diff --git a/mm/mmap.c b/mm/mmap.c
index e27e0cf0de03..772140c53ab1 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1767,10 +1767,13 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1767 size = address - vma->vm_start; 1767 size = address - vma->vm_start;
1768 grow = (address - vma->vm_end) >> PAGE_SHIFT; 1768 grow = (address - vma->vm_end) >> PAGE_SHIFT;
1769 1769
1770 error = acct_stack_growth(vma, size, grow); 1770 error = -ENOMEM;
1771 if (!error) { 1771 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
1772 vma->vm_end = address; 1772 error = acct_stack_growth(vma, size, grow);
1773 perf_event_mmap(vma); 1773 if (!error) {
1774 vma->vm_end = address;
1775 perf_event_mmap(vma);
1776 }
1774 } 1777 }
1775 } 1778 }
1776 vma_unlock_anon_vma(vma); 1779 vma_unlock_anon_vma(vma);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9f8a97b9a350..3f8bce264df6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2317,6 +2317,21 @@ void free_pages(unsigned long addr, unsigned int order)
2317 2317
2318EXPORT_SYMBOL(free_pages); 2318EXPORT_SYMBOL(free_pages);
2319 2319
2320static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2321{
2322 if (addr) {
2323 unsigned long alloc_end = addr + (PAGE_SIZE << order);
2324 unsigned long used = addr + PAGE_ALIGN(size);
2325
2326 split_page(virt_to_page((void *)addr), order);
2327 while (used < alloc_end) {
2328 free_page(used);
2329 used += PAGE_SIZE;
2330 }
2331 }
2332 return (void *)addr;
2333}
2334
2320/** 2335/**
2321 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 2336 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2322 * @size: the number of bytes to allocate 2337 * @size: the number of bytes to allocate
@@ -2336,22 +2351,33 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2336 unsigned long addr; 2351 unsigned long addr;
2337 2352
2338 addr = __get_free_pages(gfp_mask, order); 2353 addr = __get_free_pages(gfp_mask, order);
2339 if (addr) { 2354 return make_alloc_exact(addr, order, size);
2340 unsigned long alloc_end = addr + (PAGE_SIZE << order);
2341 unsigned long used = addr + PAGE_ALIGN(size);
2342
2343 split_page(virt_to_page((void *)addr), order);
2344 while (used < alloc_end) {
2345 free_page(used);
2346 used += PAGE_SIZE;
2347 }
2348 }
2349
2350 return (void *)addr;
2351} 2355}
2352EXPORT_SYMBOL(alloc_pages_exact); 2356EXPORT_SYMBOL(alloc_pages_exact);
2353 2357
2354/** 2358/**
2359 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
2360 * pages on a node.
2361 * @nid: the preferred node ID where memory should be allocated
2362 * @size: the number of bytes to allocate
2363 * @gfp_mask: GFP flags for the allocation
2364 *
2365 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
2366 * back.
2367 * Note this is not alloc_pages_exact_node() which allocates on a specific node,
2368 * but is not exact.
2369 */
2370void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
2371{
2372 unsigned order = get_order(size);
2373 struct page *p = alloc_pages_node(nid, gfp_mask, order);
2374 if (!p)
2375 return NULL;
2376 return make_alloc_exact((unsigned long)page_address(p), order, size);
2377}
2378EXPORT_SYMBOL(alloc_pages_exact_nid);
2379
2380/**
2355 * free_pages_exact - release memory allocated via alloc_pages_exact() 2381 * free_pages_exact - release memory allocated via alloc_pages_exact()
2356 * @virt: the value returned by alloc_pages_exact. 2382 * @virt: the value returned by alloc_pages_exact.
2357 * @size: size of allocation, same value as passed to alloc_pages_exact(). 2383 * @size: size of allocation, same value as passed to alloc_pages_exact().
@@ -3564,7 +3590,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3564 3590
3565 if (!slab_is_available()) { 3591 if (!slab_is_available()) {
3566 zone->wait_table = (wait_queue_head_t *) 3592 zone->wait_table = (wait_queue_head_t *)
3567 alloc_bootmem_node(pgdat, alloc_size); 3593 alloc_bootmem_node_nopanic(pgdat, alloc_size);
3568 } else { 3594 } else {
3569 /* 3595 /*
3570 * This case means that a zone whose size was 0 gets new memory 3596 * This case means that a zone whose size was 0 gets new memory
@@ -4141,7 +4167,8 @@ static void __init setup_usemap(struct pglist_data *pgdat,
4141 unsigned long usemapsize = usemap_size(zonesize); 4167 unsigned long usemapsize = usemap_size(zonesize);
4142 zone->pageblock_flags = NULL; 4168 zone->pageblock_flags = NULL;
4143 if (usemapsize) 4169 if (usemapsize)
4144 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); 4170 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4171 usemapsize);
4145} 4172}
4146#else 4173#else
4147static inline void setup_usemap(struct pglist_data *pgdat, 4174static inline void setup_usemap(struct pglist_data *pgdat,
@@ -4307,7 +4334,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4307 size = (end - start) * sizeof(struct page); 4334 size = (end - start) * sizeof(struct page);
4308 map = alloc_remap(pgdat->node_id, size); 4335 map = alloc_remap(pgdat->node_id, size);
4309 if (!map) 4336 if (!map)
4310 map = alloc_bootmem_node(pgdat, size); 4337 map = alloc_bootmem_node_nopanic(pgdat, size);
4311 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 4338 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4312 } 4339 }
4313#ifndef CONFIG_NEED_MULTIPLE_NODES 4340#ifndef CONFIG_NEED_MULTIPLE_NODES
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 99055010cece..2daadc322ba6 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -134,7 +134,7 @@ static void *__init_refok alloc_page_cgroup(size_t size, int nid)
134{ 134{
135 void *addr = NULL; 135 void *addr = NULL;
136 136
137 addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_NOWARN); 137 addr = alloc_pages_exact_nid(nid, size, GFP_KERNEL | __GFP_NOWARN);
138 if (addr) 138 if (addr)
139 return addr; 139 return addr;
140 140
diff --git a/mm/shmem.c b/mm/shmem.c
index 8fa27e4e582a..dfc7069102ee 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -852,7 +852,7 @@ static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_
852 852
853static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) 853static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
854{ 854{
855 struct inode *inode; 855 struct address_space *mapping;
856 unsigned long idx; 856 unsigned long idx;
857 unsigned long size; 857 unsigned long size;
858 unsigned long limit; 858 unsigned long limit;
@@ -875,8 +875,10 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
875 if (size > SHMEM_NR_DIRECT) 875 if (size > SHMEM_NR_DIRECT)
876 size = SHMEM_NR_DIRECT; 876 size = SHMEM_NR_DIRECT;
877 offset = shmem_find_swp(entry, ptr, ptr+size); 877 offset = shmem_find_swp(entry, ptr, ptr+size);
878 if (offset >= 0) 878 if (offset >= 0) {
879 shmem_swp_balance_unmap();
879 goto found; 880 goto found;
881 }
880 if (!info->i_indirect) 882 if (!info->i_indirect)
881 goto lost2; 883 goto lost2;
882 884
@@ -914,11 +916,11 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
914 if (size > ENTRIES_PER_PAGE) 916 if (size > ENTRIES_PER_PAGE)
915 size = ENTRIES_PER_PAGE; 917 size = ENTRIES_PER_PAGE;
916 offset = shmem_find_swp(entry, ptr, ptr+size); 918 offset = shmem_find_swp(entry, ptr, ptr+size);
917 shmem_swp_unmap(ptr);
918 if (offset >= 0) { 919 if (offset >= 0) {
919 shmem_dir_unmap(dir); 920 shmem_dir_unmap(dir);
920 goto found; 921 goto found;
921 } 922 }
923 shmem_swp_unmap(ptr);
922 } 924 }
923 } 925 }
924lost1: 926lost1:
@@ -928,8 +930,7 @@ lost2:
928 return 0; 930 return 0;
929found: 931found:
930 idx += offset; 932 idx += offset;
931 inode = igrab(&info->vfs_inode); 933 ptr += offset;
932 spin_unlock(&info->lock);
933 934
934 /* 935 /*
935 * Move _head_ to start search for next from here. 936 * Move _head_ to start search for next from here.
@@ -940,37 +941,18 @@ found:
940 */ 941 */
941 if (shmem_swaplist.next != &info->swaplist) 942 if (shmem_swaplist.next != &info->swaplist)
942 list_move_tail(&shmem_swaplist, &info->swaplist); 943 list_move_tail(&shmem_swaplist, &info->swaplist);
943 mutex_unlock(&shmem_swaplist_mutex);
944 944
945 error = 1;
946 if (!inode)
947 goto out;
948 /* 945 /*
949 * Charge page using GFP_KERNEL while we can wait. 946 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
950 * Charged back to the user(not to caller) when swap account is used. 947 * but also to hold up shmem_evict_inode(): so inode cannot be freed
951 * add_to_page_cache() will be called with GFP_NOWAIT. 948 * beneath us (pagelock doesn't help until the page is in pagecache).
952 */ 949 */
953 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 950 mapping = info->vfs_inode.i_mapping;
954 if (error) 951 error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT);
955 goto out; 952 /* which does mem_cgroup_uncharge_cache_page on error */
956 error = radix_tree_preload(GFP_KERNEL);
957 if (error) {
958 mem_cgroup_uncharge_cache_page(page);
959 goto out;
960 }
961 error = 1;
962
963 spin_lock(&info->lock);
964 ptr = shmem_swp_entry(info, idx, NULL);
965 if (ptr && ptr->val == entry.val) {
966 error = add_to_page_cache_locked(page, inode->i_mapping,
967 idx, GFP_NOWAIT);
968 /* does mem_cgroup_uncharge_cache_page on error */
969 } else /* we must compensate for our precharge above */
970 mem_cgroup_uncharge_cache_page(page);
971 953
972 if (error == -EEXIST) { 954 if (error == -EEXIST) {
973 struct page *filepage = find_get_page(inode->i_mapping, idx); 955 struct page *filepage = find_get_page(mapping, idx);
974 error = 1; 956 error = 1;
975 if (filepage) { 957 if (filepage) {
976 /* 958 /*
@@ -990,14 +972,8 @@ found:
990 swap_free(entry); 972 swap_free(entry);
991 error = 1; /* not an error, but entry was found */ 973 error = 1; /* not an error, but entry was found */
992 } 974 }
993 if (ptr) 975 shmem_swp_unmap(ptr);
994 shmem_swp_unmap(ptr);
995 spin_unlock(&info->lock); 976 spin_unlock(&info->lock);
996 radix_tree_preload_end();
997out:
998 unlock_page(page);
999 page_cache_release(page);
1000 iput(inode); /* allows for NULL */
1001 return error; 977 return error;
1002} 978}
1003 979
@@ -1009,6 +985,26 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
1009 struct list_head *p, *next; 985 struct list_head *p, *next;
1010 struct shmem_inode_info *info; 986 struct shmem_inode_info *info;
1011 int found = 0; 987 int found = 0;
988 int error;
989
990 /*
991 * Charge page using GFP_KERNEL while we can wait, before taking
992 * the shmem_swaplist_mutex which might hold up shmem_writepage().
993 * Charged back to the user (not to caller) when swap account is used.
994 * add_to_page_cache() will be called with GFP_NOWAIT.
995 */
996 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
997 if (error)
998 goto out;
999 /*
1000 * Try to preload while we can wait, to not make a habit of
1001 * draining atomic reserves; but don't latch on to this cpu,
1002 * it's okay if sometimes we get rescheduled after this.
1003 */
1004 error = radix_tree_preload(GFP_KERNEL);
1005 if (error)
1006 goto uncharge;
1007 radix_tree_preload_end();
1012 1008
1013 mutex_lock(&shmem_swaplist_mutex); 1009 mutex_lock(&shmem_swaplist_mutex);
1014 list_for_each_safe(p, next, &shmem_swaplist) { 1010 list_for_each_safe(p, next, &shmem_swaplist) {
@@ -1016,17 +1012,19 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
1016 found = shmem_unuse_inode(info, entry, page); 1012 found = shmem_unuse_inode(info, entry, page);
1017 cond_resched(); 1013 cond_resched();
1018 if (found) 1014 if (found)
1019 goto out; 1015 break;
1020 } 1016 }
1021 mutex_unlock(&shmem_swaplist_mutex); 1017 mutex_unlock(&shmem_swaplist_mutex);
1022 /* 1018
1023 * Can some race bring us here? We've been holding page lock, 1019uncharge:
1024 * so I think not; but would rather try again later than BUG() 1020 if (!found)
1025 */ 1021 mem_cgroup_uncharge_cache_page(page);
1022 if (found < 0)
1023 error = found;
1024out:
1026 unlock_page(page); 1025 unlock_page(page);
1027 page_cache_release(page); 1026 page_cache_release(page);
1028out: 1027 return error;
1029 return (found < 0) ? found : 0;
1030} 1028}
1031 1029
1032/* 1030/*
@@ -1064,7 +1062,25 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1064 else 1062 else
1065 swap.val = 0; 1063 swap.val = 0;
1066 1064
1065 /*
1066 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1067 * if it's not already there. Do it now because we cannot take
1068 * mutex while holding spinlock, and must do so before the page
1069 * is moved to swap cache, when its pagelock no longer protects
1070 * the inode from eviction. But don't unlock the mutex until
1071 * we've taken the spinlock, because shmem_unuse_inode() will
1072 * prune a !swapped inode from the swaplist under both locks.
1073 */
1074 if (swap.val) {
1075 mutex_lock(&shmem_swaplist_mutex);
1076 if (list_empty(&info->swaplist))
1077 list_add_tail(&info->swaplist, &shmem_swaplist);
1078 }
1079
1067 spin_lock(&info->lock); 1080 spin_lock(&info->lock);
1081 if (swap.val)
1082 mutex_unlock(&shmem_swaplist_mutex);
1083
1068 if (index >= info->next_index) { 1084 if (index >= info->next_index) {
1069 BUG_ON(!(info->flags & SHMEM_TRUNCATE)); 1085 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
1070 goto unlock; 1086 goto unlock;
@@ -1084,21 +1100,10 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1084 delete_from_page_cache(page); 1100 delete_from_page_cache(page);
1085 shmem_swp_set(info, entry, swap.val); 1101 shmem_swp_set(info, entry, swap.val);
1086 shmem_swp_unmap(entry); 1102 shmem_swp_unmap(entry);
1087 if (list_empty(&info->swaplist))
1088 inode = igrab(inode);
1089 else
1090 inode = NULL;
1091 spin_unlock(&info->lock); 1103 spin_unlock(&info->lock);
1092 swap_shmem_alloc(swap); 1104 swap_shmem_alloc(swap);
1093 BUG_ON(page_mapped(page)); 1105 BUG_ON(page_mapped(page));
1094 swap_writepage(page, wbc); 1106 swap_writepage(page, wbc);
1095 if (inode) {
1096 mutex_lock(&shmem_swaplist_mutex);
1097 /* move instead of add in case we're racing */
1098 list_move_tail(&info->swaplist, &shmem_swaplist);
1099 mutex_unlock(&shmem_swaplist_mutex);
1100 iput(inode);
1101 }
1102 return 0; 1107 return 0;
1103 } 1108 }
1104 1109
@@ -1400,20 +1405,14 @@ repeat:
1400 if (sbinfo->max_blocks) { 1405 if (sbinfo->max_blocks) {
1401 if (percpu_counter_compare(&sbinfo->used_blocks, 1406 if (percpu_counter_compare(&sbinfo->used_blocks,
1402 sbinfo->max_blocks) >= 0 || 1407 sbinfo->max_blocks) >= 0 ||
1403 shmem_acct_block(info->flags)) { 1408 shmem_acct_block(info->flags))
1404 spin_unlock(&info->lock); 1409 goto nospace;
1405 error = -ENOSPC;
1406 goto failed;
1407 }
1408 percpu_counter_inc(&sbinfo->used_blocks); 1410 percpu_counter_inc(&sbinfo->used_blocks);
1409 spin_lock(&inode->i_lock); 1411 spin_lock(&inode->i_lock);
1410 inode->i_blocks += BLOCKS_PER_PAGE; 1412 inode->i_blocks += BLOCKS_PER_PAGE;
1411 spin_unlock(&inode->i_lock); 1413 spin_unlock(&inode->i_lock);
1412 } else if (shmem_acct_block(info->flags)) { 1414 } else if (shmem_acct_block(info->flags))
1413 spin_unlock(&info->lock); 1415 goto nospace;
1414 error = -ENOSPC;
1415 goto failed;
1416 }
1417 1416
1418 if (!filepage) { 1417 if (!filepage) {
1419 int ret; 1418 int ret;
@@ -1493,6 +1492,24 @@ done:
1493 error = 0; 1492 error = 0;
1494 goto out; 1493 goto out;
1495 1494
1495nospace:
1496 /*
1497 * Perhaps the page was brought in from swap between find_lock_page
1498 * and taking info->lock? We allow for that at add_to_page_cache_lru,
1499 * but must also avoid reporting a spurious ENOSPC while working on a
1500 * full tmpfs. (When filepage has been passed in to shmem_getpage, it
1501 * is already in page cache, which prevents this race from occurring.)
1502 */
1503 if (!filepage) {
1504 struct page *page = find_get_page(mapping, idx);
1505 if (page) {
1506 spin_unlock(&info->lock);
1507 page_cache_release(page);
1508 goto repeat;
1509 }
1510 }
1511 spin_unlock(&info->lock);
1512 error = -ENOSPC;
1496failed: 1513failed:
1497 if (*pagep != filepage) { 1514 if (*pagep != filepage) {
1498 unlock_page(filepage); 1515 unlock_page(filepage);
diff --git a/mm/swap.c b/mm/swap.c
index a448db377cb0..5602f1a1b1e7 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -396,6 +396,9 @@ static void lru_deactivate_fn(struct page *page, void *arg)
396 if (!PageLRU(page)) 396 if (!PageLRU(page))
397 return; 397 return;
398 398
399 if (PageUnevictable(page))
400 return;
401
399 /* Some processes are using the page */ 402 /* Some processes are using the page */
400 if (page_mapped(page)) 403 if (page_mapped(page))
401 return; 404 return;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f6b435c80079..8bfd45050a61 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -937,7 +937,7 @@ keep_lumpy:
937 * back off and wait for congestion to clear because further reclaim 937 * back off and wait for congestion to clear because further reclaim
938 * will encounter the same problem 938 * will encounter the same problem
939 */ 939 */
940 if (nr_dirty == nr_congested && nr_dirty != 0) 940 if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc))
941 zone_set_flag(zone, ZONE_CONGESTED); 941 zone_set_flag(zone, ZONE_CONGESTED);
942 942
943 free_page_list(&free_pages); 943 free_page_list(&free_pages);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 7850412f52b7..0eb1a886b370 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -124,6 +124,9 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
124 124
125 grp->nr_vlans--; 125 grp->nr_vlans--;
126 126
127 if (vlan->flags & VLAN_FLAG_GVRP)
128 vlan_gvrp_request_leave(dev);
129
127 vlan_group_set_device(grp, vlan_id, NULL); 130 vlan_group_set_device(grp, vlan_id, NULL);
128 if (!grp->killall) 131 if (!grp->killall)
129 synchronize_net(); 132 synchronize_net();
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index e34ea9e5e28b..b2ff6c8d3603 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -487,9 +487,6 @@ static int vlan_dev_stop(struct net_device *dev)
487 struct vlan_dev_info *vlan = vlan_dev_info(dev); 487 struct vlan_dev_info *vlan = vlan_dev_info(dev);
488 struct net_device *real_dev = vlan->real_dev; 488 struct net_device *real_dev = vlan->real_dev;
489 489
490 if (vlan->flags & VLAN_FLAG_GVRP)
491 vlan_gvrp_request_leave(dev);
492
493 dev_mc_unsync(real_dev, dev); 490 dev_mc_unsync(real_dev, dev);
494 dev_uc_unsync(real_dev, dev); 491 dev_uc_unsync(real_dev, dev);
495 if (dev->flags & IFF_ALLMULTI) 492 if (dev->flags & IFF_ALLMULTI)
diff --git a/net/9p/client.c b/net/9p/client.c
index 77367745be9b..a9aa2dd66482 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -614,7 +614,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
614 614
615 err = c->trans_mod->request(c, req); 615 err = c->trans_mod->request(c, req);
616 if (err < 0) { 616 if (err < 0) {
617 if (err != -ERESTARTSYS) 617 if (err != -ERESTARTSYS && err != -EFAULT)
618 c->status = Disconnected; 618 c->status = Disconnected;
619 goto reterr; 619 goto reterr;
620 } 620 }
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index b58a501cf3d1..a873277cb996 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -674,6 +674,7 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
674 } 674 }
675 675
676 strcpy(dirent->d_name, nameptr); 676 strcpy(dirent->d_name, nameptr);
677 kfree(nameptr);
677 678
678out: 679out:
679 return fake_pdu.offset; 680 return fake_pdu.offset;
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index e883172f9aa2..9a70ebdec56e 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -63,7 +63,7 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
63 int nr_pages, u8 rw) 63 int nr_pages, u8 rw)
64{ 64{
65 uint32_t first_page_bytes = 0; 65 uint32_t first_page_bytes = 0;
66 uint32_t pdata_mapped_pages; 66 int32_t pdata_mapped_pages;
67 struct trans_rpage_info *rpinfo; 67 struct trans_rpage_info *rpinfo;
68 68
69 *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1); 69 *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1);
@@ -75,14 +75,9 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
75 rpinfo = req->tc->private; 75 rpinfo = req->tc->private;
76 pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf, 76 pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf,
77 nr_pages, rw, &rpinfo->rp_data[0]); 77 nr_pages, rw, &rpinfo->rp_data[0]);
78 if (pdata_mapped_pages <= 0)
79 return pdata_mapped_pages;
78 80
79 if (pdata_mapped_pages < 0) {
80 printk(KERN_ERR "get_user_pages_fast failed:%d udata:%p"
81 "nr_pages:%d\n", pdata_mapped_pages,
82 req->tc->pubuf, nr_pages);
83 pdata_mapped_pages = 0;
84 return -EIO;
85 }
86 rpinfo->rp_nr_pages = pdata_mapped_pages; 81 rpinfo->rp_nr_pages = pdata_mapped_pages;
87 if (*pdata_off) { 82 if (*pdata_off) {
88 *pdata_len = first_page_bytes; 83 *pdata_len = first_page_bytes;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 94954c74f6ae..42fdffd1d76c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -369,15 +369,6 @@ static void __sco_sock_close(struct sock *sk)
369 369
370 case BT_CONNECTED: 370 case BT_CONNECTED:
371 case BT_CONFIG: 371 case BT_CONFIG:
372 if (sco_pi(sk)->conn) {
373 sk->sk_state = BT_DISCONN;
374 sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
375 hci_conn_put(sco_pi(sk)->conn->hcon);
376 sco_pi(sk)->conn = NULL;
377 } else
378 sco_chan_del(sk, ECONNRESET);
379 break;
380
381 case BT_CONNECT: 372 case BT_CONNECT:
382 case BT_DISCONN: 373 case BT_DISCONN:
383 sco_chan_del(sk, ECONNRESET); 374 sco_chan_del(sk, ECONNRESET);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index f3bc322c5891..74ef4d4846a4 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -737,7 +737,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
737 nf_bridge->mask |= BRNF_PKT_TYPE; 737 nf_bridge->mask |= BRNF_PKT_TYPE;
738 } 738 }
739 739
740 if (br_parse_ip_options(skb)) 740 if (pf == PF_INET && br_parse_ip_options(skb))
741 return NF_DROP; 741 return NF_DROP;
742 742
743 /* The physdev module checks on this */ 743 /* The physdev module checks on this */
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 893669caa8de..1a92b369c820 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1766,7 +1766,7 @@ static int compat_table_info(const struct ebt_table_info *info,
1766 1766
1767 newinfo->entries_size = size; 1767 newinfo->entries_size = size;
1768 1768
1769 xt_compat_init_offsets(AF_INET, info->nentries); 1769 xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
1770 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, 1770 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1771 entries, newinfo); 1771 entries, newinfo);
1772} 1772}
@@ -1882,7 +1882,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1882 struct xt_match *match; 1882 struct xt_match *match;
1883 struct xt_target *wt; 1883 struct xt_target *wt;
1884 void *dst = NULL; 1884 void *dst = NULL;
1885 int off, pad = 0, ret = 0; 1885 int off, pad = 0;
1886 unsigned int size_kern, entry_offset, match_size = mwt->match_size; 1886 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1887 1887
1888 strlcpy(name, mwt->u.name, sizeof(name)); 1888 strlcpy(name, mwt->u.name, sizeof(name));
@@ -1935,13 +1935,6 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1935 break; 1935 break;
1936 } 1936 }
1937 1937
1938 if (!dst) {
1939 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1940 off + ebt_compat_entry_padsize());
1941 if (ret < 0)
1942 return ret;
1943 }
1944
1945 state->buf_kern_offset += match_size + off; 1938 state->buf_kern_offset += match_size + off;
1946 state->buf_user_offset += match_size; 1939 state->buf_user_offset += match_size;
1947 pad = XT_ALIGN(size_kern) - size_kern; 1940 pad = XT_ALIGN(size_kern) - size_kern;
@@ -2016,50 +2009,6 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
2016 return growth; 2009 return growth;
2017} 2010}
2018 2011
2019#define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2020({ \
2021 unsigned int __i; \
2022 int __ret = 0; \
2023 struct compat_ebt_entry_mwt *__watcher; \
2024 \
2025 for (__i = e->watchers_offset; \
2026 __i < (e)->target_offset; \
2027 __i += __watcher->watcher_size + \
2028 sizeof(struct compat_ebt_entry_mwt)) { \
2029 __watcher = (void *)(e) + __i; \
2030 __ret = fn(__watcher , ## args); \
2031 if (__ret != 0) \
2032 break; \
2033 } \
2034 if (__ret == 0) { \
2035 if (__i != (e)->target_offset) \
2036 __ret = -EINVAL; \
2037 } \
2038 __ret; \
2039})
2040
2041#define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2042({ \
2043 unsigned int __i; \
2044 int __ret = 0; \
2045 struct compat_ebt_entry_mwt *__match; \
2046 \
2047 for (__i = sizeof(struct ebt_entry); \
2048 __i < (e)->watchers_offset; \
2049 __i += __match->match_size + \
2050 sizeof(struct compat_ebt_entry_mwt)) { \
2051 __match = (void *)(e) + __i; \
2052 __ret = fn(__match , ## args); \
2053 if (__ret != 0) \
2054 break; \
2055 } \
2056 if (__ret == 0) { \
2057 if (__i != (e)->watchers_offset) \
2058 __ret = -EINVAL; \
2059 } \
2060 __ret; \
2061})
2062
2063/* called for all ebt_entry structures. */ 2012/* called for all ebt_entry structures. */
2064static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, 2013static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2065 unsigned int *total, 2014 unsigned int *total,
@@ -2132,6 +2081,14 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2132 } 2081 }
2133 } 2082 }
2134 2083
2084 if (state->buf_kern_start == NULL) {
2085 unsigned int offset = buf_start - (char *) base;
2086
2087 ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2088 if (ret < 0)
2089 return ret;
2090 }
2091
2135 startoff = state->buf_user_offset - startoff; 2092 startoff = state->buf_user_offset - startoff;
2136 2093
2137 BUG_ON(*total < startoff); 2094 BUG_ON(*total < startoff);
@@ -2240,6 +2197,7 @@ static int compat_do_replace(struct net *net, void __user *user,
2240 2197
2241 xt_compat_lock(NFPROTO_BRIDGE); 2198 xt_compat_lock(NFPROTO_BRIDGE);
2242 2199
2200 xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2243 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2201 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2244 if (ret < 0) 2202 if (ret < 0)
2245 goto out_unlock; 2203 goto out_unlock;
diff --git a/net/core/dev.c b/net/core/dev.c
index 856b6ee9a1d5..b624fe4d9bd7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1284,11 +1284,13 @@ static int dev_close_many(struct list_head *head)
1284 */ 1284 */
1285int dev_close(struct net_device *dev) 1285int dev_close(struct net_device *dev)
1286{ 1286{
1287 LIST_HEAD(single); 1287 if (dev->flags & IFF_UP) {
1288 LIST_HEAD(single);
1288 1289
1289 list_add(&dev->unreg_list, &single); 1290 list_add(&dev->unreg_list, &single);
1290 dev_close_many(&single); 1291 dev_close_many(&single);
1291 list_del(&single); 1292 list_del(&single);
1293 }
1292 return 0; 1294 return 0;
1293} 1295}
1294EXPORT_SYMBOL(dev_close); 1296EXPORT_SYMBOL(dev_close);
@@ -5184,27 +5186,27 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5184 /* Fix illegal checksum combinations */ 5186 /* Fix illegal checksum combinations */
5185 if ((features & NETIF_F_HW_CSUM) && 5187 if ((features & NETIF_F_HW_CSUM) &&
5186 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5188 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5187 netdev_info(dev, "mixed HW and IP checksum settings.\n"); 5189 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5188 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5190 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5189 } 5191 }
5190 5192
5191 if ((features & NETIF_F_NO_CSUM) && 5193 if ((features & NETIF_F_NO_CSUM) &&
5192 (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5194 (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5193 netdev_info(dev, "mixed no checksumming and other settings.\n"); 5195 netdev_warn(dev, "mixed no checksumming and other settings.\n");
5194 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); 5196 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5195 } 5197 }
5196 5198
5197 /* Fix illegal SG+CSUM combinations. */ 5199 /* Fix illegal SG+CSUM combinations. */
5198 if ((features & NETIF_F_SG) && 5200 if ((features & NETIF_F_SG) &&
5199 !(features & NETIF_F_ALL_CSUM)) { 5201 !(features & NETIF_F_ALL_CSUM)) {
5200 netdev_info(dev, 5202 netdev_dbg(dev,
5201 "Dropping NETIF_F_SG since no checksum feature.\n"); 5203 "Dropping NETIF_F_SG since no checksum feature.\n");
5202 features &= ~NETIF_F_SG; 5204 features &= ~NETIF_F_SG;
5203 } 5205 }
5204 5206
5205 /* TSO requires that SG is present as well. */ 5207 /* TSO requires that SG is present as well. */
5206 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 5208 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5207 netdev_info(dev, "Dropping TSO features since no SG feature.\n"); 5209 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5208 features &= ~NETIF_F_ALL_TSO; 5210 features &= ~NETIF_F_ALL_TSO;
5209 } 5211 }
5210 5212
@@ -5214,7 +5216,7 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5214 5216
5215 /* Software GSO depends on SG. */ 5217 /* Software GSO depends on SG. */
5216 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 5218 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5217 netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 5219 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5218 features &= ~NETIF_F_GSO; 5220 features &= ~NETIF_F_GSO;
5219 } 5221 }
5220 5222
@@ -5224,13 +5226,13 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5224 if (!((features & NETIF_F_GEN_CSUM) || 5226 if (!((features & NETIF_F_GEN_CSUM) ||
5225 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) 5227 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5226 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5228 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5227 netdev_info(dev, 5229 netdev_dbg(dev,
5228 "Dropping NETIF_F_UFO since no checksum offload features.\n"); 5230 "Dropping NETIF_F_UFO since no checksum offload features.\n");
5229 features &= ~NETIF_F_UFO; 5231 features &= ~NETIF_F_UFO;
5230 } 5232 }
5231 5233
5232 if (!(features & NETIF_F_SG)) { 5234 if (!(features & NETIF_F_SG)) {
5233 netdev_info(dev, 5235 netdev_dbg(dev,
5234 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); 5236 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5235 features &= ~NETIF_F_UFO; 5237 features &= ~NETIF_F_UFO;
5236 } 5238 }
@@ -5412,12 +5414,6 @@ int register_netdevice(struct net_device *dev)
5412 dev->features |= NETIF_F_SOFT_FEATURES; 5414 dev->features |= NETIF_F_SOFT_FEATURES;
5413 dev->wanted_features = dev->features & dev->hw_features; 5415 dev->wanted_features = dev->features & dev->hw_features;
5414 5416
5415 /* Avoid warning from netdev_fix_features() for GSO without SG */
5416 if (!(dev->wanted_features & NETIF_F_SG)) {
5417 dev->wanted_features &= ~NETIF_F_GSO;
5418 dev->features &= ~NETIF_F_GSO;
5419 }
5420
5421 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, 5417 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
5422 * vlan_dev_init() will do the dev->features check, so these features 5418 * vlan_dev_init() will do the dev->features check, so these features
5423 * are enabled only if supported by underlying device. 5419 * are enabled only if supported by underlying device.
diff --git a/net/dccp/options.c b/net/dccp/options.c
index f06ffcfc8d71..4b2ab657ac8e 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -123,6 +123,8 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
123 case DCCPO_CHANGE_L ... DCCPO_CONFIRM_R: 123 case DCCPO_CHANGE_L ... DCCPO_CONFIRM_R:
124 if (pkt_type == DCCP_PKT_DATA) /* RFC 4340, 6 */ 124 if (pkt_type == DCCP_PKT_DATA) /* RFC 4340, 6 */
125 break; 125 break;
126 if (len == 0)
127 goto out_invalid_option;
126 rc = dccp_feat_parse_options(sk, dreq, mandatory, opt, 128 rc = dccp_feat_parse_options(sk, dreq, mandatory, opt,
127 *value, value + 1, len - 1); 129 *value, value + 1, len - 1);
128 if (rc) 130 if (rc)
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index a1151b8adf3c..b1d282f11be7 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -223,31 +223,30 @@ static void ip_expire(unsigned long arg)
223 223
224 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { 224 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
225 struct sk_buff *head = qp->q.fragments; 225 struct sk_buff *head = qp->q.fragments;
226 const struct iphdr *iph;
227 int err;
226 228
227 rcu_read_lock(); 229 rcu_read_lock();
228 head->dev = dev_get_by_index_rcu(net, qp->iif); 230 head->dev = dev_get_by_index_rcu(net, qp->iif);
229 if (!head->dev) 231 if (!head->dev)
230 goto out_rcu_unlock; 232 goto out_rcu_unlock;
231 233
234 /* skb dst is stale, drop it, and perform route lookup again */
235 skb_dst_drop(head);
236 iph = ip_hdr(head);
237 err = ip_route_input_noref(head, iph->daddr, iph->saddr,
238 iph->tos, head->dev);
239 if (err)
240 goto out_rcu_unlock;
241
232 /* 242 /*
233 * Only search router table for the head fragment, 243 * Only an end host needs to send an ICMP
234 * when defraging timeout at PRE_ROUTING HOOK. 244 * "Fragment Reassembly Timeout" message, per RFC792.
235 */ 245 */
236 if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) { 246 if (qp->user == IP_DEFRAG_CONNTRACK_IN &&
237 const struct iphdr *iph = ip_hdr(head); 247 skb_rtable(head)->rt_type != RTN_LOCAL)
238 int err = ip_route_input(head, iph->daddr, iph->saddr, 248 goto out_rcu_unlock;
239 iph->tos, head->dev);
240 if (unlikely(err))
241 goto out_rcu_unlock;
242
243 /*
244 * Only an end host needs to send an ICMP
245 * "Fragment Reassembly Timeout" message, per RFC792.
246 */
247 if (skb_rtable(head)->rt_type != RTN_LOCAL)
248 goto out_rcu_unlock;
249 249
250 }
251 250
252 /* Send an ICMP "Fragment Reassembly Timeout" message. */ 251 /* Send an ICMP "Fragment Reassembly Timeout" message. */
253 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 252 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 34340c9c95fa..f376b05cca81 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -93,6 +93,7 @@ struct bictcp {
93 u32 ack_cnt; /* number of acks */ 93 u32 ack_cnt; /* number of acks */
94 u32 tcp_cwnd; /* estimated tcp cwnd */ 94 u32 tcp_cwnd; /* estimated tcp cwnd */
95#define ACK_RATIO_SHIFT 4 95#define ACK_RATIO_SHIFT 4
96#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT)
96 u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ 97 u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */
97 u8 sample_cnt; /* number of samples to decide curr_rtt */ 98 u8 sample_cnt; /* number of samples to decide curr_rtt */
98 u8 found; /* the exit point is found? */ 99 u8 found; /* the exit point is found? */
@@ -398,8 +399,12 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
398 u32 delay; 399 u32 delay;
399 400
400 if (icsk->icsk_ca_state == TCP_CA_Open) { 401 if (icsk->icsk_ca_state == TCP_CA_Open) {
401 cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; 402 u32 ratio = ca->delayed_ack;
402 ca->delayed_ack += cnt; 403
404 ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
405 ratio += cnt;
406
407 ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT);
403 } 408 }
404 409
405 /* Some calls are for duplicates without timetamps */ 410 /* Some calls are for duplicates without timetamps */
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 571aa96a175c..2d51840e53a1 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -69,7 +69,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
69} 69}
70EXPORT_SYMBOL(xfrm4_prepare_output); 70EXPORT_SYMBOL(xfrm4_prepare_output);
71 71
72static int xfrm4_output_finish(struct sk_buff *skb) 72int xfrm4_output_finish(struct sk_buff *skb)
73{ 73{
74#ifdef CONFIG_NETFILTER 74#ifdef CONFIG_NETFILTER
75 if (!skb_dst(skb)->xfrm) { 75 if (!skb_dst(skb)->xfrm) {
@@ -86,7 +86,11 @@ static int xfrm4_output_finish(struct sk_buff *skb)
86 86
87int xfrm4_output(struct sk_buff *skb) 87int xfrm4_output(struct sk_buff *skb)
88{ 88{
89 struct dst_entry *dst = skb_dst(skb);
90 struct xfrm_state *x = dst->xfrm;
91
89 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, 92 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
90 NULL, skb_dst(skb)->dev, xfrm4_output_finish, 93 NULL, dst->dev,
94 x->outer_mode->afinfo->output_finish,
91 !(IPCB(skb)->flags & IPSKB_REROUTED)); 95 !(IPCB(skb)->flags & IPSKB_REROUTED));
92} 96}
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 1717c64628d1..805d63ef4340 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -78,6 +78,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
78 .init_tempsel = __xfrm4_init_tempsel, 78 .init_tempsel = __xfrm4_init_tempsel,
79 .init_temprop = xfrm4_init_temprop, 79 .init_temprop = xfrm4_init_temprop,
80 .output = xfrm4_output, 80 .output = xfrm4_output,
81 .output_finish = xfrm4_output_finish,
81 .extract_input = xfrm4_extract_input, 82 .extract_input = xfrm4_extract_input,
82 .extract_output = xfrm4_extract_output, 83 .extract_output = xfrm4_extract_output,
83 .transport_finish = xfrm4_transport_finish, 84 .transport_finish = xfrm4_transport_finish,
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 28e74488a329..a5a4c5dd5396 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -45,6 +45,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
45 int tcphoff, needs_ack; 45 int tcphoff, needs_ack;
46 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); 46 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
47 struct ipv6hdr *ip6h; 47 struct ipv6hdr *ip6h;
48#define DEFAULT_TOS_VALUE 0x0U
49 const __u8 tclass = DEFAULT_TOS_VALUE;
48 struct dst_entry *dst = NULL; 50 struct dst_entry *dst = NULL;
49 u8 proto; 51 u8 proto;
50 struct flowi6 fl6; 52 struct flowi6 fl6;
@@ -124,7 +126,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
124 skb_put(nskb, sizeof(struct ipv6hdr)); 126 skb_put(nskb, sizeof(struct ipv6hdr));
125 skb_reset_network_header(nskb); 127 skb_reset_network_header(nskb);
126 ip6h = ipv6_hdr(nskb); 128 ip6h = ipv6_hdr(nskb);
127 ip6h->version = 6; 129 *(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20));
128 ip6h->hop_limit = ip6_dst_hoplimit(dst); 130 ip6h->hop_limit = ip6_dst_hoplimit(dst);
129 ip6h->nexthdr = IPPROTO_TCP; 131 ip6h->nexthdr = IPPROTO_TCP;
130 ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr); 132 ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 8e688b3de9ab..49a91c5f5623 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -79,7 +79,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
79} 79}
80EXPORT_SYMBOL(xfrm6_prepare_output); 80EXPORT_SYMBOL(xfrm6_prepare_output);
81 81
82static int xfrm6_output_finish(struct sk_buff *skb) 82int xfrm6_output_finish(struct sk_buff *skb)
83{ 83{
84#ifdef CONFIG_NETFILTER 84#ifdef CONFIG_NETFILTER
85 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; 85 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
@@ -97,9 +97,9 @@ static int __xfrm6_output(struct sk_buff *skb)
97 if ((x && x->props.mode == XFRM_MODE_TUNNEL) && 97 if ((x && x->props.mode == XFRM_MODE_TUNNEL) &&
98 ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || 98 ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
99 dst_allfrag(skb_dst(skb)))) { 99 dst_allfrag(skb_dst(skb)))) {
100 return ip6_fragment(skb, xfrm6_output_finish); 100 return ip6_fragment(skb, x->outer_mode->afinfo->output_finish);
101 } 101 }
102 return xfrm6_output_finish(skb); 102 return x->outer_mode->afinfo->output_finish(skb);
103} 103}
104 104
105int xfrm6_output(struct sk_buff *skb) 105int xfrm6_output(struct sk_buff *skb)
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index afe941e9415c..248f0b2a7ee9 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -178,6 +178,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
178 .tmpl_sort = __xfrm6_tmpl_sort, 178 .tmpl_sort = __xfrm6_tmpl_sort,
179 .state_sort = __xfrm6_state_sort, 179 .state_sort = __xfrm6_state_sort,
180 .output = xfrm6_output, 180 .output = xfrm6_output,
181 .output_finish = xfrm6_output_finish,
181 .extract_input = xfrm6_extract_input, 182 .extract_input = xfrm6_extract_input,
182 .extract_output = xfrm6_extract_output, 183 .extract_output = xfrm6_extract_output,
183 .transport_finish = xfrm6_transport_finish, 184 .transport_finish = xfrm6_transport_finish,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index ce4596ed1268..bd1224fd216a 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -237,6 +237,10 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
237 &local->dynamic_ps_disable_work); 237 &local->dynamic_ps_disable_work);
238 } 238 }
239 239
240 /* Don't restart the timer if we're not disassociated */
241 if (!ifmgd->associated)
242 return TX_CONTINUE;
243
240 mod_timer(&local->dynamic_ps_timer, jiffies + 244 mod_timer(&local->dynamic_ps_timer, jiffies +
241 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 245 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
242 246
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 2dc6de13ac18..059af3120be7 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -572,11 +572,11 @@ static const struct file_operations ip_vs_app_fops = {
572 .open = ip_vs_app_open, 572 .open = ip_vs_app_open,
573 .read = seq_read, 573 .read = seq_read,
574 .llseek = seq_lseek, 574 .llseek = seq_lseek,
575 .release = seq_release, 575 .release = seq_release_net,
576}; 576};
577#endif 577#endif
578 578
579static int __net_init __ip_vs_app_init(struct net *net) 579int __net_init __ip_vs_app_init(struct net *net)
580{ 580{
581 struct netns_ipvs *ipvs = net_ipvs(net); 581 struct netns_ipvs *ipvs = net_ipvs(net);
582 582
@@ -585,26 +585,17 @@ static int __net_init __ip_vs_app_init(struct net *net)
585 return 0; 585 return 0;
586} 586}
587 587
588static void __net_exit __ip_vs_app_cleanup(struct net *net) 588void __net_exit __ip_vs_app_cleanup(struct net *net)
589{ 589{
590 proc_net_remove(net, "ip_vs_app"); 590 proc_net_remove(net, "ip_vs_app");
591} 591}
592 592
593static struct pernet_operations ip_vs_app_ops = {
594 .init = __ip_vs_app_init,
595 .exit = __ip_vs_app_cleanup,
596};
597
598int __init ip_vs_app_init(void) 593int __init ip_vs_app_init(void)
599{ 594{
600 int rv; 595 return 0;
601
602 rv = register_pernet_subsys(&ip_vs_app_ops);
603 return rv;
604} 596}
605 597
606 598
607void ip_vs_app_cleanup(void) 599void ip_vs_app_cleanup(void)
608{ 600{
609 unregister_pernet_subsys(&ip_vs_app_ops);
610} 601}
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index c97bd45975be..bf28ac2fc99b 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -1046,7 +1046,7 @@ static const struct file_operations ip_vs_conn_fops = {
1046 .open = ip_vs_conn_open, 1046 .open = ip_vs_conn_open,
1047 .read = seq_read, 1047 .read = seq_read,
1048 .llseek = seq_lseek, 1048 .llseek = seq_lseek,
1049 .release = seq_release, 1049 .release = seq_release_net,
1050}; 1050};
1051 1051
1052static const char *ip_vs_origin_name(unsigned flags) 1052static const char *ip_vs_origin_name(unsigned flags)
@@ -1114,7 +1114,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
1114 .open = ip_vs_conn_sync_open, 1114 .open = ip_vs_conn_sync_open,
1115 .read = seq_read, 1115 .read = seq_read,
1116 .llseek = seq_lseek, 1116 .llseek = seq_lseek,
1117 .release = seq_release, 1117 .release = seq_release_net,
1118}; 1118};
1119 1119
1120#endif 1120#endif
@@ -1258,22 +1258,17 @@ int __net_init __ip_vs_conn_init(struct net *net)
1258 return 0; 1258 return 0;
1259} 1259}
1260 1260
1261static void __net_exit __ip_vs_conn_cleanup(struct net *net) 1261void __net_exit __ip_vs_conn_cleanup(struct net *net)
1262{ 1262{
1263 /* flush all the connection entries first */ 1263 /* flush all the connection entries first */
1264 ip_vs_conn_flush(net); 1264 ip_vs_conn_flush(net);
1265 proc_net_remove(net, "ip_vs_conn"); 1265 proc_net_remove(net, "ip_vs_conn");
1266 proc_net_remove(net, "ip_vs_conn_sync"); 1266 proc_net_remove(net, "ip_vs_conn_sync");
1267} 1267}
1268static struct pernet_operations ipvs_conn_ops = {
1269 .init = __ip_vs_conn_init,
1270 .exit = __ip_vs_conn_cleanup,
1271};
1272 1268
1273int __init ip_vs_conn_init(void) 1269int __init ip_vs_conn_init(void)
1274{ 1270{
1275 int idx; 1271 int idx;
1276 int retc;
1277 1272
1278 /* Compute size and mask */ 1273 /* Compute size and mask */
1279 ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; 1274 ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
@@ -1309,17 +1304,14 @@ int __init ip_vs_conn_init(void)
1309 rwlock_init(&__ip_vs_conntbl_lock_array[idx].l); 1304 rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
1310 } 1305 }
1311 1306
1312 retc = register_pernet_subsys(&ipvs_conn_ops);
1313
1314 /* calculate the random value for connection hash */ 1307 /* calculate the random value for connection hash */
1315 get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd)); 1308 get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
1316 1309
1317 return retc; 1310 return 0;
1318} 1311}
1319 1312
1320void ip_vs_conn_cleanup(void) 1313void ip_vs_conn_cleanup(void)
1321{ 1314{
1322 unregister_pernet_subsys(&ipvs_conn_ops);
1323 /* Release the empty cache */ 1315 /* Release the empty cache */
1324 kmem_cache_destroy(ip_vs_conn_cachep); 1316 kmem_cache_destroy(ip_vs_conn_cachep);
1325 vfree(ip_vs_conn_tab); 1317 vfree(ip_vs_conn_tab);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 07accf6b2401..a74dae6c5dbc 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1113,6 +1113,9 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1113 return NF_ACCEPT; 1113 return NF_ACCEPT;
1114 1114
1115 net = skb_net(skb); 1115 net = skb_net(skb);
1116 if (!net_ipvs(net)->enable)
1117 return NF_ACCEPT;
1118
1116 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 1119 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1117#ifdef CONFIG_IP_VS_IPV6 1120#ifdef CONFIG_IP_VS_IPV6
1118 if (af == AF_INET6) { 1121 if (af == AF_INET6) {
@@ -1343,6 +1346,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1343 return NF_ACCEPT; /* The packet looks wrong, ignore */ 1346 return NF_ACCEPT; /* The packet looks wrong, ignore */
1344 1347
1345 net = skb_net(skb); 1348 net = skb_net(skb);
1349
1346 pd = ip_vs_proto_data_get(net, cih->protocol); 1350 pd = ip_vs_proto_data_get(net, cih->protocol);
1347 if (!pd) 1351 if (!pd)
1348 return NF_ACCEPT; 1352 return NF_ACCEPT;
@@ -1529,6 +1533,11 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1529 IP_VS_DBG_ADDR(af, &iph.daddr), hooknum); 1533 IP_VS_DBG_ADDR(af, &iph.daddr), hooknum);
1530 return NF_ACCEPT; 1534 return NF_ACCEPT;
1531 } 1535 }
1536 /* ipvs enabled in this netns ? */
1537 net = skb_net(skb);
1538 if (!net_ipvs(net)->enable)
1539 return NF_ACCEPT;
1540
1532 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 1541 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1533 1542
1534 /* Bad... Do not break raw sockets */ 1543 /* Bad... Do not break raw sockets */
@@ -1562,7 +1571,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1562 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 1571 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1563 } 1572 }
1564 1573
1565 net = skb_net(skb);
1566 /* Protocol supported? */ 1574 /* Protocol supported? */
1567 pd = ip_vs_proto_data_get(net, iph.protocol); 1575 pd = ip_vs_proto_data_get(net, iph.protocol);
1568 if (unlikely(!pd)) 1576 if (unlikely(!pd))
@@ -1588,7 +1596,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1588 } 1596 }
1589 1597
1590 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); 1598 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
1591 net = skb_net(skb);
1592 ipvs = net_ipvs(net); 1599 ipvs = net_ipvs(net);
1593 /* Check the server status */ 1600 /* Check the server status */
1594 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { 1601 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
@@ -1743,10 +1750,16 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
1743 int (*okfn)(struct sk_buff *)) 1750 int (*okfn)(struct sk_buff *))
1744{ 1751{
1745 int r; 1752 int r;
1753 struct net *net;
1746 1754
1747 if (ip_hdr(skb)->protocol != IPPROTO_ICMP) 1755 if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
1748 return NF_ACCEPT; 1756 return NF_ACCEPT;
1749 1757
1758 /* ipvs enabled in this netns ? */
1759 net = skb_net(skb);
1760 if (!net_ipvs(net)->enable)
1761 return NF_ACCEPT;
1762
1750 return ip_vs_in_icmp(skb, &r, hooknum); 1763 return ip_vs_in_icmp(skb, &r, hooknum);
1751} 1764}
1752 1765
@@ -1757,10 +1770,16 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
1757 int (*okfn)(struct sk_buff *)) 1770 int (*okfn)(struct sk_buff *))
1758{ 1771{
1759 int r; 1772 int r;
1773 struct net *net;
1760 1774
1761 if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) 1775 if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
1762 return NF_ACCEPT; 1776 return NF_ACCEPT;
1763 1777
1778 /* ipvs enabled in this netns ? */
1779 net = skb_net(skb);
1780 if (!net_ipvs(net)->enable)
1781 return NF_ACCEPT;
1782
1764 return ip_vs_in_icmp_v6(skb, &r, hooknum); 1783 return ip_vs_in_icmp_v6(skb, &r, hooknum);
1765} 1784}
1766#endif 1785#endif
@@ -1884,19 +1903,70 @@ static int __net_init __ip_vs_init(struct net *net)
1884 pr_err("%s(): no memory.\n", __func__); 1903 pr_err("%s(): no memory.\n", __func__);
1885 return -ENOMEM; 1904 return -ENOMEM;
1886 } 1905 }
1906 /* Hold the beast until a service is registerd */
1907 ipvs->enable = 0;
1887 ipvs->net = net; 1908 ipvs->net = net;
1888 /* Counters used for creating unique names */ 1909 /* Counters used for creating unique names */
1889 ipvs->gen = atomic_read(&ipvs_netns_cnt); 1910 ipvs->gen = atomic_read(&ipvs_netns_cnt);
1890 atomic_inc(&ipvs_netns_cnt); 1911 atomic_inc(&ipvs_netns_cnt);
1891 net->ipvs = ipvs; 1912 net->ipvs = ipvs;
1913
1914 if (__ip_vs_estimator_init(net) < 0)
1915 goto estimator_fail;
1916
1917 if (__ip_vs_control_init(net) < 0)
1918 goto control_fail;
1919
1920 if (__ip_vs_protocol_init(net) < 0)
1921 goto protocol_fail;
1922
1923 if (__ip_vs_app_init(net) < 0)
1924 goto app_fail;
1925
1926 if (__ip_vs_conn_init(net) < 0)
1927 goto conn_fail;
1928
1929 if (__ip_vs_sync_init(net) < 0)
1930 goto sync_fail;
1931
1892 printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n", 1932 printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
1893 sizeof(struct netns_ipvs), ipvs->gen); 1933 sizeof(struct netns_ipvs), ipvs->gen);
1894 return 0; 1934 return 0;
1935/*
1936 * Error handling
1937 */
1938
1939sync_fail:
1940 __ip_vs_conn_cleanup(net);
1941conn_fail:
1942 __ip_vs_app_cleanup(net);
1943app_fail:
1944 __ip_vs_protocol_cleanup(net);
1945protocol_fail:
1946 __ip_vs_control_cleanup(net);
1947control_fail:
1948 __ip_vs_estimator_cleanup(net);
1949estimator_fail:
1950 return -ENOMEM;
1895} 1951}
1896 1952
1897static void __net_exit __ip_vs_cleanup(struct net *net) 1953static void __net_exit __ip_vs_cleanup(struct net *net)
1898{ 1954{
1899 IP_VS_DBG(10, "ipvs netns %d released\n", net_ipvs(net)->gen); 1955 __ip_vs_service_cleanup(net); /* ip_vs_flush() with locks */
1956 __ip_vs_conn_cleanup(net);
1957 __ip_vs_app_cleanup(net);
1958 __ip_vs_protocol_cleanup(net);
1959 __ip_vs_control_cleanup(net);
1960 __ip_vs_estimator_cleanup(net);
1961 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
1962}
1963
1964static void __net_exit __ip_vs_dev_cleanup(struct net *net)
1965{
1966 EnterFunction(2);
1967 net_ipvs(net)->enable = 0; /* Disable packet reception */
1968 __ip_vs_sync_cleanup(net);
1969 LeaveFunction(2);
1900} 1970}
1901 1971
1902static struct pernet_operations ipvs_core_ops = { 1972static struct pernet_operations ipvs_core_ops = {
@@ -1906,6 +1976,10 @@ static struct pernet_operations ipvs_core_ops = {
1906 .size = sizeof(struct netns_ipvs), 1976 .size = sizeof(struct netns_ipvs),
1907}; 1977};
1908 1978
1979static struct pernet_operations ipvs_core_dev_ops = {
1980 .exit = __ip_vs_dev_cleanup,
1981};
1982
1909/* 1983/*
1910 * Initialize IP Virtual Server 1984 * Initialize IP Virtual Server
1911 */ 1985 */
@@ -1913,10 +1987,6 @@ static int __init ip_vs_init(void)
1913{ 1987{
1914 int ret; 1988 int ret;
1915 1989
1916 ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
1917 if (ret < 0)
1918 return ret;
1919
1920 ip_vs_estimator_init(); 1990 ip_vs_estimator_init();
1921 ret = ip_vs_control_init(); 1991 ret = ip_vs_control_init();
1922 if (ret < 0) { 1992 if (ret < 0) {
@@ -1944,15 +2014,28 @@ static int __init ip_vs_init(void)
1944 goto cleanup_conn; 2014 goto cleanup_conn;
1945 } 2015 }
1946 2016
2017 ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
2018 if (ret < 0)
2019 goto cleanup_sync;
2020
2021 ret = register_pernet_device(&ipvs_core_dev_ops);
2022 if (ret < 0)
2023 goto cleanup_sub;
2024
1947 ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); 2025 ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
1948 if (ret < 0) { 2026 if (ret < 0) {
1949 pr_err("can't register hooks.\n"); 2027 pr_err("can't register hooks.\n");
1950 goto cleanup_sync; 2028 goto cleanup_dev;
1951 } 2029 }
1952 2030
1953 pr_info("ipvs loaded.\n"); 2031 pr_info("ipvs loaded.\n");
2032
1954 return ret; 2033 return ret;
1955 2034
2035cleanup_dev:
2036 unregister_pernet_device(&ipvs_core_dev_ops);
2037cleanup_sub:
2038 unregister_pernet_subsys(&ipvs_core_ops);
1956cleanup_sync: 2039cleanup_sync:
1957 ip_vs_sync_cleanup(); 2040 ip_vs_sync_cleanup();
1958 cleanup_conn: 2041 cleanup_conn:
@@ -1964,20 +2047,20 @@ cleanup_sync:
1964 ip_vs_control_cleanup(); 2047 ip_vs_control_cleanup();
1965 cleanup_estimator: 2048 cleanup_estimator:
1966 ip_vs_estimator_cleanup(); 2049 ip_vs_estimator_cleanup();
1967 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
1968 return ret; 2050 return ret;
1969} 2051}
1970 2052
1971static void __exit ip_vs_cleanup(void) 2053static void __exit ip_vs_cleanup(void)
1972{ 2054{
1973 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); 2055 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2056 unregister_pernet_device(&ipvs_core_dev_ops);
2057 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
1974 ip_vs_sync_cleanup(); 2058 ip_vs_sync_cleanup();
1975 ip_vs_conn_cleanup(); 2059 ip_vs_conn_cleanup();
1976 ip_vs_app_cleanup(); 2060 ip_vs_app_cleanup();
1977 ip_vs_protocol_cleanup(); 2061 ip_vs_protocol_cleanup();
1978 ip_vs_control_cleanup(); 2062 ip_vs_control_cleanup();
1979 ip_vs_estimator_cleanup(); 2063 ip_vs_estimator_cleanup();
1980 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
1981 pr_info("ipvs unloaded.\n"); 2064 pr_info("ipvs unloaded.\n");
1982} 2065}
1983 2066
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index ae47090bf45f..37890f228b19 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -69,6 +69,11 @@ int ip_vs_get_debug_level(void)
69} 69}
70#endif 70#endif
71 71
72
73/* Protos */
74static void __ip_vs_del_service(struct ip_vs_service *svc);
75
76
72#ifdef CONFIG_IP_VS_IPV6 77#ifdef CONFIG_IP_VS_IPV6
73/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */ 78/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
74static int __ip_vs_addr_is_local_v6(struct net *net, 79static int __ip_vs_addr_is_local_v6(struct net *net,
@@ -1214,6 +1219,8 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1214 write_unlock_bh(&__ip_vs_svc_lock); 1219 write_unlock_bh(&__ip_vs_svc_lock);
1215 1220
1216 *svc_p = svc; 1221 *svc_p = svc;
1222 /* Now there is a service - full throttle */
1223 ipvs->enable = 1;
1217 return 0; 1224 return 0;
1218 1225
1219 1226
@@ -1472,6 +1479,84 @@ static int ip_vs_flush(struct net *net)
1472 return 0; 1479 return 0;
1473} 1480}
1474 1481
1482/*
1483 * Delete service by {netns} in the service table.
1484 * Called by __ip_vs_cleanup()
1485 */
1486void __ip_vs_service_cleanup(struct net *net)
1487{
1488 EnterFunction(2);
1489 /* Check for "full" addressed entries */
1490 mutex_lock(&__ip_vs_mutex);
1491 ip_vs_flush(net);
1492 mutex_unlock(&__ip_vs_mutex);
1493 LeaveFunction(2);
1494}
1495/*
1496 * Release dst hold by dst_cache
1497 */
1498static inline void
1499__ip_vs_dev_reset(struct ip_vs_dest *dest, struct net_device *dev)
1500{
1501 spin_lock_bh(&dest->dst_lock);
1502 if (dest->dst_cache && dest->dst_cache->dev == dev) {
1503 IP_VS_DBG_BUF(3, "Reset dev:%s dest %s:%u ,dest->refcnt=%d\n",
1504 dev->name,
1505 IP_VS_DBG_ADDR(dest->af, &dest->addr),
1506 ntohs(dest->port),
1507 atomic_read(&dest->refcnt));
1508 ip_vs_dst_reset(dest);
1509 }
1510 spin_unlock_bh(&dest->dst_lock);
1511
1512}
1513/*
1514 * Netdev event receiver
1515 * Currently only NETDEV_UNREGISTER is handled, i.e. if we hold a reference to
1516 * a device that is "unregister" it must be released.
1517 */
1518static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
1519 void *ptr)
1520{
1521 struct net_device *dev = ptr;
1522 struct net *net = dev_net(dev);
1523 struct ip_vs_service *svc;
1524 struct ip_vs_dest *dest;
1525 unsigned int idx;
1526
1527 if (event != NETDEV_UNREGISTER)
1528 return NOTIFY_DONE;
1529 IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
1530 EnterFunction(2);
1531 mutex_lock(&__ip_vs_mutex);
1532 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
1533 list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
1534 if (net_eq(svc->net, net)) {
1535 list_for_each_entry(dest, &svc->destinations,
1536 n_list) {
1537 __ip_vs_dev_reset(dest, dev);
1538 }
1539 }
1540 }
1541
1542 list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
1543 if (net_eq(svc->net, net)) {
1544 list_for_each_entry(dest, &svc->destinations,
1545 n_list) {
1546 __ip_vs_dev_reset(dest, dev);
1547 }
1548 }
1549
1550 }
1551 }
1552
1553 list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
1554 __ip_vs_dev_reset(dest, dev);
1555 }
1556 mutex_unlock(&__ip_vs_mutex);
1557 LeaveFunction(2);
1558 return NOTIFY_DONE;
1559}
1475 1560
1476/* 1561/*
1477 * Zero counters in a service or all services 1562 * Zero counters in a service or all services
@@ -1981,7 +2066,7 @@ static const struct file_operations ip_vs_info_fops = {
1981 .open = ip_vs_info_open, 2066 .open = ip_vs_info_open,
1982 .read = seq_read, 2067 .read = seq_read,
1983 .llseek = seq_lseek, 2068 .llseek = seq_lseek,
1984 .release = seq_release_private, 2069 .release = seq_release_net,
1985}; 2070};
1986 2071
1987#endif 2072#endif
@@ -2024,7 +2109,7 @@ static const struct file_operations ip_vs_stats_fops = {
2024 .open = ip_vs_stats_seq_open, 2109 .open = ip_vs_stats_seq_open,
2025 .read = seq_read, 2110 .read = seq_read,
2026 .llseek = seq_lseek, 2111 .llseek = seq_lseek,
2027 .release = single_release, 2112 .release = single_release_net,
2028}; 2113};
2029 2114
2030static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v) 2115static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
@@ -2093,7 +2178,7 @@ static const struct file_operations ip_vs_stats_percpu_fops = {
2093 .open = ip_vs_stats_percpu_seq_open, 2178 .open = ip_vs_stats_percpu_seq_open,
2094 .read = seq_read, 2179 .read = seq_read,
2095 .llseek = seq_lseek, 2180 .llseek = seq_lseek,
2096 .release = single_release, 2181 .release = single_release_net,
2097}; 2182};
2098#endif 2183#endif
2099 2184
@@ -3588,6 +3673,10 @@ void __net_init __ip_vs_control_cleanup_sysctl(struct net *net) { }
3588 3673
3589#endif 3674#endif
3590 3675
3676static struct notifier_block ip_vs_dst_notifier = {
3677 .notifier_call = ip_vs_dst_event,
3678};
3679
3591int __net_init __ip_vs_control_init(struct net *net) 3680int __net_init __ip_vs_control_init(struct net *net)
3592{ 3681{
3593 int idx; 3682 int idx;
@@ -3626,7 +3715,7 @@ err:
3626 return -ENOMEM; 3715 return -ENOMEM;
3627} 3716}
3628 3717
3629static void __net_exit __ip_vs_control_cleanup(struct net *net) 3718void __net_exit __ip_vs_control_cleanup(struct net *net)
3630{ 3719{
3631 struct netns_ipvs *ipvs = net_ipvs(net); 3720 struct netns_ipvs *ipvs = net_ipvs(net);
3632 3721
@@ -3639,11 +3728,6 @@ static void __net_exit __ip_vs_control_cleanup(struct net *net)
3639 free_percpu(ipvs->tot_stats.cpustats); 3728 free_percpu(ipvs->tot_stats.cpustats);
3640} 3729}
3641 3730
3642static struct pernet_operations ipvs_control_ops = {
3643 .init = __ip_vs_control_init,
3644 .exit = __ip_vs_control_cleanup,
3645};
3646
3647int __init ip_vs_control_init(void) 3731int __init ip_vs_control_init(void)
3648{ 3732{
3649 int idx; 3733 int idx;
@@ -3657,33 +3741,32 @@ int __init ip_vs_control_init(void)
3657 INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]); 3741 INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
3658 } 3742 }
3659 3743
3660 ret = register_pernet_subsys(&ipvs_control_ops);
3661 if (ret) {
3662 pr_err("cannot register namespace.\n");
3663 goto err;
3664 }
3665
3666 smp_wmb(); /* Do we really need it now ? */ 3744 smp_wmb(); /* Do we really need it now ? */
3667 3745
3668 ret = nf_register_sockopt(&ip_vs_sockopts); 3746 ret = nf_register_sockopt(&ip_vs_sockopts);
3669 if (ret) { 3747 if (ret) {
3670 pr_err("cannot register sockopt.\n"); 3748 pr_err("cannot register sockopt.\n");
3671 goto err_net; 3749 goto err_sock;
3672 } 3750 }
3673 3751
3674 ret = ip_vs_genl_register(); 3752 ret = ip_vs_genl_register();
3675 if (ret) { 3753 if (ret) {
3676 pr_err("cannot register Generic Netlink interface.\n"); 3754 pr_err("cannot register Generic Netlink interface.\n");
3677 nf_unregister_sockopt(&ip_vs_sockopts); 3755 goto err_genl;
3678 goto err_net;
3679 } 3756 }
3680 3757
3758 ret = register_netdevice_notifier(&ip_vs_dst_notifier);
3759 if (ret < 0)
3760 goto err_notf;
3761
3681 LeaveFunction(2); 3762 LeaveFunction(2);
3682 return 0; 3763 return 0;
3683 3764
3684err_net: 3765err_notf:
3685 unregister_pernet_subsys(&ipvs_control_ops); 3766 ip_vs_genl_unregister();
3686err: 3767err_genl:
3768 nf_unregister_sockopt(&ip_vs_sockopts);
3769err_sock:
3687 return ret; 3770 return ret;
3688} 3771}
3689 3772
@@ -3691,7 +3774,6 @@ err:
3691void ip_vs_control_cleanup(void) 3774void ip_vs_control_cleanup(void)
3692{ 3775{
3693 EnterFunction(2); 3776 EnterFunction(2);
3694 unregister_pernet_subsys(&ipvs_control_ops);
3695 ip_vs_genl_unregister(); 3777 ip_vs_genl_unregister();
3696 nf_unregister_sockopt(&ip_vs_sockopts); 3778 nf_unregister_sockopt(&ip_vs_sockopts);
3697 LeaveFunction(2); 3779 LeaveFunction(2);
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index 8c8766ca56ad..508cce98777c 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -192,7 +192,7 @@ void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
192 dst->outbps = (e->outbps + 0xF) >> 5; 192 dst->outbps = (e->outbps + 0xF) >> 5;
193} 193}
194 194
195static int __net_init __ip_vs_estimator_init(struct net *net) 195int __net_init __ip_vs_estimator_init(struct net *net)
196{ 196{
197 struct netns_ipvs *ipvs = net_ipvs(net); 197 struct netns_ipvs *ipvs = net_ipvs(net);
198 198
@@ -203,24 +203,16 @@ static int __net_init __ip_vs_estimator_init(struct net *net)
203 return 0; 203 return 0;
204} 204}
205 205
206static void __net_exit __ip_vs_estimator_exit(struct net *net) 206void __net_exit __ip_vs_estimator_cleanup(struct net *net)
207{ 207{
208 del_timer_sync(&net_ipvs(net)->est_timer); 208 del_timer_sync(&net_ipvs(net)->est_timer);
209} 209}
210static struct pernet_operations ip_vs_app_ops = {
211 .init = __ip_vs_estimator_init,
212 .exit = __ip_vs_estimator_exit,
213};
214 210
215int __init ip_vs_estimator_init(void) 211int __init ip_vs_estimator_init(void)
216{ 212{
217 int rv; 213 return 0;
218
219 rv = register_pernet_subsys(&ip_vs_app_ops);
220 return rv;
221} 214}
222 215
223void ip_vs_estimator_cleanup(void) 216void ip_vs_estimator_cleanup(void)
224{ 217{
225 unregister_pernet_subsys(&ip_vs_app_ops);
226} 218}
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 17484a4416ef..eb86028536fc 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -316,7 +316,7 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
316/* 316/*
317 * per network name-space init 317 * per network name-space init
318 */ 318 */
319static int __net_init __ip_vs_protocol_init(struct net *net) 319int __net_init __ip_vs_protocol_init(struct net *net)
320{ 320{
321#ifdef CONFIG_IP_VS_PROTO_TCP 321#ifdef CONFIG_IP_VS_PROTO_TCP
322 register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); 322 register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp);
@@ -336,7 +336,7 @@ static int __net_init __ip_vs_protocol_init(struct net *net)
336 return 0; 336 return 0;
337} 337}
338 338
339static void __net_exit __ip_vs_protocol_cleanup(struct net *net) 339void __net_exit __ip_vs_protocol_cleanup(struct net *net)
340{ 340{
341 struct netns_ipvs *ipvs = net_ipvs(net); 341 struct netns_ipvs *ipvs = net_ipvs(net);
342 struct ip_vs_proto_data *pd; 342 struct ip_vs_proto_data *pd;
@@ -349,11 +349,6 @@ static void __net_exit __ip_vs_protocol_cleanup(struct net *net)
349 } 349 }
350} 350}
351 351
352static struct pernet_operations ipvs_proto_ops = {
353 .init = __ip_vs_protocol_init,
354 .exit = __ip_vs_protocol_cleanup,
355};
356
357int __init ip_vs_protocol_init(void) 352int __init ip_vs_protocol_init(void)
358{ 353{
359 char protocols[64]; 354 char protocols[64];
@@ -382,7 +377,6 @@ int __init ip_vs_protocol_init(void)
382 REGISTER_PROTOCOL(&ip_vs_protocol_esp); 377 REGISTER_PROTOCOL(&ip_vs_protocol_esp);
383#endif 378#endif
384 pr_info("Registered protocols (%s)\n", &protocols[2]); 379 pr_info("Registered protocols (%s)\n", &protocols[2]);
385 return register_pernet_subsys(&ipvs_proto_ops);
386 380
387 return 0; 381 return 0;
388} 382}
@@ -393,7 +387,6 @@ void ip_vs_protocol_cleanup(void)
393 struct ip_vs_protocol *pp; 387 struct ip_vs_protocol *pp;
394 int i; 388 int i;
395 389
396 unregister_pernet_subsys(&ipvs_proto_ops);
397 /* unregister all the ipvs protocols */ 390 /* unregister all the ipvs protocols */
398 for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { 391 for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
399 while ((pp = ip_vs_proto_table[i]) != NULL) 392 while ((pp = ip_vs_proto_table[i]) != NULL)
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 3e7961e85e9c..e292e5bddc70 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1303,13 +1303,18 @@ static struct socket *make_send_sock(struct net *net)
1303 struct socket *sock; 1303 struct socket *sock;
1304 int result; 1304 int result;
1305 1305
1306 /* First create a socket */ 1306 /* First create a socket move it to right name space later */
1307 result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); 1307 result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
1308 if (result < 0) { 1308 if (result < 0) {
1309 pr_err("Error during creation of socket; terminating\n"); 1309 pr_err("Error during creation of socket; terminating\n");
1310 return ERR_PTR(result); 1310 return ERR_PTR(result);
1311 } 1311 }
1312 1312 /*
1313 * Kernel sockets that are a part of a namespace, should not
1314 * hold a reference to a namespace in order to allow to stop it.
1315 * After sk_change_net should be released using sk_release_kernel.
1316 */
1317 sk_change_net(sock->sk, net);
1313 result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn); 1318 result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
1314 if (result < 0) { 1319 if (result < 0) {
1315 pr_err("Error setting outbound mcast interface\n"); 1320 pr_err("Error setting outbound mcast interface\n");
@@ -1334,8 +1339,8 @@ static struct socket *make_send_sock(struct net *net)
1334 1339
1335 return sock; 1340 return sock;
1336 1341
1337 error: 1342error:
1338 sock_release(sock); 1343 sk_release_kernel(sock->sk);
1339 return ERR_PTR(result); 1344 return ERR_PTR(result);
1340} 1345}
1341 1346
@@ -1350,12 +1355,17 @@ static struct socket *make_receive_sock(struct net *net)
1350 int result; 1355 int result;
1351 1356
1352 /* First create a socket */ 1357 /* First create a socket */
1353 result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); 1358 result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
1354 if (result < 0) { 1359 if (result < 0) {
1355 pr_err("Error during creation of socket; terminating\n"); 1360 pr_err("Error during creation of socket; terminating\n");
1356 return ERR_PTR(result); 1361 return ERR_PTR(result);
1357 } 1362 }
1358 1363 /*
1364 * Kernel sockets that are a part of a namespace, should not
1365 * hold a reference to a namespace in order to allow to stop it.
1366 * After sk_change_net should be released using sk_release_kernel.
1367 */
1368 sk_change_net(sock->sk, net);
1359 /* it is equivalent to the REUSEADDR option in user-space */ 1369 /* it is equivalent to the REUSEADDR option in user-space */
1360 sock->sk->sk_reuse = 1; 1370 sock->sk->sk_reuse = 1;
1361 1371
@@ -1377,8 +1387,8 @@ static struct socket *make_receive_sock(struct net *net)
1377 1387
1378 return sock; 1388 return sock;
1379 1389
1380 error: 1390error:
1381 sock_release(sock); 1391 sk_release_kernel(sock->sk);
1382 return ERR_PTR(result); 1392 return ERR_PTR(result);
1383} 1393}
1384 1394
@@ -1473,7 +1483,7 @@ static int sync_thread_master(void *data)
1473 ip_vs_sync_buff_release(sb); 1483 ip_vs_sync_buff_release(sb);
1474 1484
1475 /* release the sending multicast socket */ 1485 /* release the sending multicast socket */
1476 sock_release(tinfo->sock); 1486 sk_release_kernel(tinfo->sock->sk);
1477 kfree(tinfo); 1487 kfree(tinfo);
1478 1488
1479 return 0; 1489 return 0;
@@ -1513,7 +1523,7 @@ static int sync_thread_backup(void *data)
1513 } 1523 }
1514 1524
1515 /* release the sending multicast socket */ 1525 /* release the sending multicast socket */
1516 sock_release(tinfo->sock); 1526 sk_release_kernel(tinfo->sock->sk);
1517 kfree(tinfo->buf); 1527 kfree(tinfo->buf);
1518 kfree(tinfo); 1528 kfree(tinfo);
1519 1529
@@ -1601,7 +1611,7 @@ outtinfo:
1601outbuf: 1611outbuf:
1602 kfree(buf); 1612 kfree(buf);
1603outsocket: 1613outsocket:
1604 sock_release(sock); 1614 sk_release_kernel(sock->sk);
1605out: 1615out:
1606 return result; 1616 return result;
1607} 1617}
@@ -1610,6 +1620,7 @@ out:
1610int stop_sync_thread(struct net *net, int state) 1620int stop_sync_thread(struct net *net, int state)
1611{ 1621{
1612 struct netns_ipvs *ipvs = net_ipvs(net); 1622 struct netns_ipvs *ipvs = net_ipvs(net);
1623 int retc = -EINVAL;
1613 1624
1614 IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); 1625 IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
1615 1626
@@ -1629,7 +1640,7 @@ int stop_sync_thread(struct net *net, int state)
1629 spin_lock_bh(&ipvs->sync_lock); 1640 spin_lock_bh(&ipvs->sync_lock);
1630 ipvs->sync_state &= ~IP_VS_STATE_MASTER; 1641 ipvs->sync_state &= ~IP_VS_STATE_MASTER;
1631 spin_unlock_bh(&ipvs->sync_lock); 1642 spin_unlock_bh(&ipvs->sync_lock);
1632 kthread_stop(ipvs->master_thread); 1643 retc = kthread_stop(ipvs->master_thread);
1633 ipvs->master_thread = NULL; 1644 ipvs->master_thread = NULL;
1634 } else if (state == IP_VS_STATE_BACKUP) { 1645 } else if (state == IP_VS_STATE_BACKUP) {
1635 if (!ipvs->backup_thread) 1646 if (!ipvs->backup_thread)
@@ -1639,22 +1650,20 @@ int stop_sync_thread(struct net *net, int state)
1639 task_pid_nr(ipvs->backup_thread)); 1650 task_pid_nr(ipvs->backup_thread));
1640 1651
1641 ipvs->sync_state &= ~IP_VS_STATE_BACKUP; 1652 ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
1642 kthread_stop(ipvs->backup_thread); 1653 retc = kthread_stop(ipvs->backup_thread);
1643 ipvs->backup_thread = NULL; 1654 ipvs->backup_thread = NULL;
1644 } else {
1645 return -EINVAL;
1646 } 1655 }
1647 1656
1648 /* decrease the module use count */ 1657 /* decrease the module use count */
1649 ip_vs_use_count_dec(); 1658 ip_vs_use_count_dec();
1650 1659
1651 return 0; 1660 return retc;
1652} 1661}
1653 1662
1654/* 1663/*
1655 * Initialize data struct for each netns 1664 * Initialize data struct for each netns
1656 */ 1665 */
1657static int __net_init __ip_vs_sync_init(struct net *net) 1666int __net_init __ip_vs_sync_init(struct net *net)
1658{ 1667{
1659 struct netns_ipvs *ipvs = net_ipvs(net); 1668 struct netns_ipvs *ipvs = net_ipvs(net);
1660 1669
@@ -1668,24 +1677,24 @@ static int __net_init __ip_vs_sync_init(struct net *net)
1668 return 0; 1677 return 0;
1669} 1678}
1670 1679
1671static void __ip_vs_sync_cleanup(struct net *net) 1680void __ip_vs_sync_cleanup(struct net *net)
1672{ 1681{
1673 stop_sync_thread(net, IP_VS_STATE_MASTER); 1682 int retc;
1674 stop_sync_thread(net, IP_VS_STATE_BACKUP);
1675}
1676 1683
1677static struct pernet_operations ipvs_sync_ops = { 1684 retc = stop_sync_thread(net, IP_VS_STATE_MASTER);
1678 .init = __ip_vs_sync_init, 1685 if (retc && retc != -ESRCH)
1679 .exit = __ip_vs_sync_cleanup, 1686 pr_err("Failed to stop Master Daemon\n");
1680};
1681 1687
1688 retc = stop_sync_thread(net, IP_VS_STATE_BACKUP);
1689 if (retc && retc != -ESRCH)
1690 pr_err("Failed to stop Backup Daemon\n");
1691}
1682 1692
1683int __init ip_vs_sync_init(void) 1693int __init ip_vs_sync_init(void)
1684{ 1694{
1685 return register_pernet_subsys(&ipvs_sync_ops); 1695 return 0;
1686} 1696}
1687 1697
1688void ip_vs_sync_cleanup(void) 1698void ip_vs_sync_cleanup(void)
1689{ 1699{
1690 unregister_pernet_subsys(&ipvs_sync_ops);
1691} 1700}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 30bf8a167fc8..482e90c61850 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1334,6 +1334,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1334 struct nf_conn *ct; 1334 struct nf_conn *ct;
1335 int err = -EINVAL; 1335 int err = -EINVAL;
1336 struct nf_conntrack_helper *helper; 1336 struct nf_conntrack_helper *helper;
1337 struct nf_conn_tstamp *tstamp;
1337 1338
1338 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); 1339 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1339 if (IS_ERR(ct)) 1340 if (IS_ERR(ct))
@@ -1451,6 +1452,9 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1451 __set_bit(IPS_EXPECTED_BIT, &ct->status); 1452 __set_bit(IPS_EXPECTED_BIT, &ct->status);
1452 ct->master = master_ct; 1453 ct->master = master_ct;
1453 } 1454 }
1455 tstamp = nf_conn_tstamp_find(ct);
1456 if (tstamp)
1457 tstamp->start = ktime_to_ns(ktime_get_real());
1454 1458
1455 add_timer(&ct->timeout); 1459 add_timer(&ct->timeout);
1456 nf_conntrack_hash_insert(ct); 1460 nf_conntrack_hash_insert(ct);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index a9adf4c6b299..8a025a585d2f 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -455,6 +455,7 @@ void xt_compat_flush_offsets(u_int8_t af)
455 vfree(xt[af].compat_tab); 455 vfree(xt[af].compat_tab);
456 xt[af].compat_tab = NULL; 456 xt[af].compat_tab = NULL;
457 xt[af].number = 0; 457 xt[af].number = 0;
458 xt[af].cur = 0;
458 } 459 }
459} 460}
460EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); 461EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
@@ -473,8 +474,7 @@ int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
473 else 474 else
474 return mid ? tmp[mid - 1].delta : 0; 475 return mid ? tmp[mid - 1].delta : 0;
475 } 476 }
476 WARN_ON_ONCE(1); 477 return left ? tmp[left - 1].delta : 0;
477 return 0;
478} 478}
479EXPORT_SYMBOL_GPL(xt_compat_calc_jump); 479EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
480 480
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c
index 0a229191e55b..ae8271652efa 100644
--- a/net/netfilter/xt_DSCP.c
+++ b/net/netfilter/xt_DSCP.c
@@ -99,7 +99,7 @@ tos_tg6(struct sk_buff *skb, const struct xt_action_param *par)
99 u_int8_t orig, nv; 99 u_int8_t orig, nv;
100 100
101 orig = ipv6_get_dsfield(iph); 101 orig = ipv6_get_dsfield(iph);
102 nv = (orig & info->tos_mask) ^ info->tos_value; 102 nv = (orig & ~info->tos_mask) ^ info->tos_value;
103 103
104 if (orig != nv) { 104 if (orig != nv) {
105 if (!skb_make_writable(skb, sizeof(struct iphdr))) 105 if (!skb_make_writable(skb, sizeof(struct iphdr)))
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 481a86fdc409..61805d7b38aa 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -272,11 +272,6 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par)
272{ 272{
273 int ret; 273 int ret;
274 274
275 if (strcmp(par->table, "raw") == 0) {
276 pr_info("state is undetermined at the time of raw table\n");
277 return -EINVAL;
278 }
279
280 ret = nf_ct_l3proto_try_module_get(par->family); 275 ret = nf_ct_l3proto_try_module_get(par->family);
281 if (ret < 0) 276 if (ret < 0)
282 pr_info("cannot load conntrack support for proto=%u\n", 277 pr_info("cannot load conntrack support for proto=%u\n",
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 15792d8b6272..b4d745ea8ee1 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1406,6 +1406,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1406 struct net *net = xp_net(policy); 1406 struct net *net = xp_net(policy);
1407 unsigned long now = jiffies; 1407 unsigned long now = jiffies;
1408 struct net_device *dev; 1408 struct net_device *dev;
1409 struct xfrm_mode *inner_mode;
1409 struct dst_entry *dst_prev = NULL; 1410 struct dst_entry *dst_prev = NULL;
1410 struct dst_entry *dst0 = NULL; 1411 struct dst_entry *dst0 = NULL;
1411 int i = 0; 1412 int i = 0;
@@ -1436,6 +1437,17 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1436 goto put_states; 1437 goto put_states;
1437 } 1438 }
1438 1439
1440 if (xfrm[i]->sel.family == AF_UNSPEC) {
1441 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1442 xfrm_af2proto(family));
1443 if (!inner_mode) {
1444 err = -EAFNOSUPPORT;
1445 dst_release(dst);
1446 goto put_states;
1447 }
1448 } else
1449 inner_mode = xfrm[i]->inner_mode;
1450
1439 if (!dst_prev) 1451 if (!dst_prev)
1440 dst0 = dst1; 1452 dst0 = dst1;
1441 else { 1453 else {
@@ -1464,7 +1476,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1464 dst1->lastuse = now; 1476 dst1->lastuse = now;
1465 1477
1466 dst1->input = dst_discard; 1478 dst1->input = dst_discard;
1467 dst1->output = xfrm[i]->outer_mode->afinfo->output; 1479 dst1->output = inner_mode->afinfo->output;
1468 1480
1469 dst1->next = dst_prev; 1481 dst1->next = dst_prev;
1470 dst_prev = dst1; 1482 dst_prev = dst1;
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index e8a781422feb..47f1b8638df9 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -535,6 +535,9 @@ int xfrm_init_replay(struct xfrm_state *x)
535 replay_esn->bmp_len * sizeof(__u32) * 8) 535 replay_esn->bmp_len * sizeof(__u32) * 8)
536 return -EINVAL; 536 return -EINVAL;
537 537
538 if ((x->props.flags & XFRM_STATE_ESN) && replay_esn->replay_window == 0)
539 return -EINVAL;
540
538 if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn) 541 if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn)
539 x->repl = &xfrm_replay_esn; 542 x->repl = &xfrm_replay_esn;
540 else 543 else
diff --git a/scripts/selinux/README b/scripts/selinux/README
index a936315ba2c8..4d020ecb7524 100644
--- a/scripts/selinux/README
+++ b/scripts/selinux/README
@@ -1,2 +1,2 @@
1Please see Documentation/SELinux.txt for information on 1Please see Documentation/security/SELinux.txt for information on
2installing a dummy SELinux policy. 2installing a dummy SELinux policy.
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
index 06d764ccbbe5..94de6b4907c8 100644
--- a/security/apparmor/match.c
+++ b/security/apparmor/match.c
@@ -194,7 +194,7 @@ void aa_dfa_free_kref(struct kref *kref)
194 * @flags: flags controlling what type of accept tables are acceptable 194 * @flags: flags controlling what type of accept tables are acceptable
195 * 195 *
196 * Unpack a dfa that has been serialized. To find information on the dfa 196 * Unpack a dfa that has been serialized. To find information on the dfa
197 * format look in Documentation/apparmor.txt 197 * format look in Documentation/security/apparmor.txt
198 * Assumes the dfa @blob stream has been aligned on a 8 byte boundary 198 * Assumes the dfa @blob stream has been aligned on a 8 byte boundary
199 * 199 *
200 * Returns: an unpacked dfa ready for matching or ERR_PTR on failure 200 * Returns: an unpacked dfa ready for matching or ERR_PTR on failure
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index e33aaf7e5744..d6d9a57b5652 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -12,8 +12,8 @@
12 * published by the Free Software Foundation, version 2 of the 12 * published by the Free Software Foundation, version 2 of the
13 * License. 13 * License.
14 * 14 *
15 * AppArmor uses a serialized binary format for loading policy. 15 * AppArmor uses a serialized binary format for loading policy. To find
16 * To find policy format documentation look in Documentation/apparmor.txt 16 * policy format documentation look in Documentation/security/apparmor.txt
17 * All policy is validated before it is used. 17 * All policy is validated before it is used.
18 */ 18 */
19 19
diff --git a/security/keys/encrypted.c b/security/keys/encrypted.c
index 69907a58a683..b1cba5bf0a5e 100644
--- a/security/keys/encrypted.c
+++ b/security/keys/encrypted.c
@@ -8,7 +8,7 @@
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, version 2 of the License. 9 * the Free Software Foundation, version 2 of the License.
10 * 10 *
11 * See Documentation/keys-trusted-encrypted.txt 11 * See Documentation/security/keys-trusted-encrypted.txt
12 */ 12 */
13 13
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index df3c0417ee40..d41cc153a313 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -8,7 +8,7 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * See Documentation/keys-request-key.txt 11 * See Documentation/security/keys-request-key.txt
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 68164031a74e..3c0cfdec6e37 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -8,7 +8,7 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * See Documentation/keys-request-key.txt 11 * See Documentation/security/keys-request-key.txt
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index c99b9368368c..0c33e2ea1f3c 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -8,7 +8,7 @@
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, version 2 of the License. 9 * the Free Software Foundation, version 2 of the License.
10 * 10 *
11 * See Documentation/keys-trusted-encrypted.txt 11 * See Documentation/security/keys-trusted-encrypted.txt
12 */ 12 */
13 13
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index e6e7ce0d3d55..7102457661d6 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -1819,8 +1819,6 @@ static int filename_trans_read(struct policydb *p, void *fp)
1819 goto out; 1819 goto out;
1820 nel = le32_to_cpu(buf[0]); 1820 nel = le32_to_cpu(buf[0]);
1821 1821
1822 printk(KERN_ERR "%s: nel=%d\n", __func__, nel);
1823
1824 last = p->filename_trans; 1822 last = p->filename_trans;
1825 while (last && last->next) 1823 while (last && last->next)
1826 last = last->next; 1824 last = last->next;
@@ -1857,8 +1855,6 @@ static int filename_trans_read(struct policydb *p, void *fp)
1857 goto out; 1855 goto out;
1858 name[len] = 0; 1856 name[len] = 0;
1859 1857
1860 printk(KERN_ERR "%s: ft=%p ft->name=%p ft->name=%s\n", __func__, ft, ft->name, ft->name);
1861
1862 rc = next_entry(buf, fp, sizeof(u32) * 4); 1858 rc = next_entry(buf, fp, sizeof(u32) * 4);
1863 if (rc) 1859 if (rc)
1864 goto out; 1860 goto out;
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 2727befd158e..b04d28039c16 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -139,7 +139,7 @@ SOC_DOUBLE_R("Capture Volume", SSM2602_LINVOL, SSM2602_RINVOL, 0, 31, 0),
139SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1), 139SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1),
140 140
141SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0), 141SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0),
142SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 7, 1, 0), 142SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 8, 1, 0),
143SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1), 143SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1),
144 144
145SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1), 145SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1),
@@ -602,7 +602,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = {
602 .read = ssm2602_read_reg_cache, 602 .read = ssm2602_read_reg_cache,
603 .write = ssm2602_write, 603 .write = ssm2602_write,
604 .set_bias_level = ssm2602_set_bias_level, 604 .set_bias_level = ssm2602_set_bias_level,
605 .reg_cache_size = sizeof(ssm2602_reg), 605 .reg_cache_size = ARRAY_SIZE(ssm2602_reg),
606 .reg_word_size = sizeof(u16), 606 .reg_word_size = sizeof(u16),
607 .reg_cache_default = ssm2602_reg, 607 .reg_cache_default = ssm2602_reg,
608}; 608};
@@ -614,7 +614,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = {
614 * low = 0x1a 614 * low = 0x1a
615 * high = 0x1b 615 * high = 0x1b
616 */ 616 */
617static int ssm2602_i2c_probe(struct i2c_client *i2c, 617static int __devinit ssm2602_i2c_probe(struct i2c_client *i2c,
618 const struct i2c_device_id *id) 618 const struct i2c_device_id *id)
619{ 619{
620 struct ssm2602_priv *ssm2602; 620 struct ssm2602_priv *ssm2602;
@@ -635,7 +635,7 @@ static int ssm2602_i2c_probe(struct i2c_client *i2c,
635 return ret; 635 return ret;
636} 636}
637 637
638static int ssm2602_i2c_remove(struct i2c_client *client) 638static int __devexit ssm2602_i2c_remove(struct i2c_client *client)
639{ 639{
640 snd_soc_unregister_codec(&client->dev); 640 snd_soc_unregister_codec(&client->dev);
641 kfree(i2c_get_clientdata(client)); 641 kfree(i2c_get_clientdata(client));
@@ -655,7 +655,7 @@ static struct i2c_driver ssm2602_i2c_driver = {
655 .owner = THIS_MODULE, 655 .owner = THIS_MODULE,
656 }, 656 },
657 .probe = ssm2602_i2c_probe, 657 .probe = ssm2602_i2c_probe,
658 .remove = ssm2602_i2c_remove, 658 .remove = __devexit_p(ssm2602_i2c_remove),
659 .id_table = ssm2602_i2c_id, 659 .id_table = ssm2602_i2c_id,
660}; 660};
661#endif 661#endif
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index 48ffd406a71d..a7b8f301bad3 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -601,9 +601,7 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = {
601 .reg_cache_step = 1, 601 .reg_cache_step = 1,
602 .read = uda134x_read_reg_cache, 602 .read = uda134x_read_reg_cache,
603 .write = uda134x_write, 603 .write = uda134x_write,
604#ifdef POWER_OFF_ON_STANDBY
605 .set_bias_level = uda134x_set_bias_level, 604 .set_bias_level = uda134x_set_bias_level,
606#endif
607}; 605};
608 606
609static int __devinit uda134x_codec_probe(struct platform_device *pdev) 607static int __devinit uda134x_codec_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index f52b623bb692..824d1c8c8a35 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -692,7 +692,7 @@ SOC_ENUM("DRC Smoothing Threshold", drc_smoothing),
692SOC_SINGLE_TLV("DRC Startup Volume", WM8903_DRC_0, 6, 18, 0, drc_tlv_startup), 692SOC_SINGLE_TLV("DRC Startup Volume", WM8903_DRC_0, 6, 18, 0, drc_tlv_startup),
693 693
694SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT, 694SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT,
695 WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 96, 0, digital_tlv), 695 WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv),
696SOC_ENUM("ADC Companding Mode", adc_companding), 696SOC_ENUM("ADC Companding Mode", adc_companding),
697SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0), 697SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0),
698 698
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index a5af834c8ef5..4ddc6d3b6678 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -434,17 +434,21 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
434 mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); 434 mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
435 mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE); 435 mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
436 436
437 mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, (0x7 << 26)); 437 mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
438 ACLKX | AHCLKX | AFSX);
438 break; 439 break;
439 case SND_SOC_DAIFMT_CBM_CFS: 440 case SND_SOC_DAIFMT_CBM_CFS:
440 /* codec is clock master and frame slave */ 441 /* codec is clock master and frame slave */
441 mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE); 442 mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
442 mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE); 443 mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
443 444
444 mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); 445 mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
445 mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE); 446 mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
446 447
447 mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, (0x2d << 26)); 448 mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG,
449 ACLKX | ACLKR);
450 mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
451 AFSX | AFSR);
448 break; 452 break;
449 case SND_SOC_DAIFMT_CBM_CFM: 453 case SND_SOC_DAIFMT_CBM_CFM:
450 /* codec is clock and frame master */ 454 /* codec is clock and frame master */
@@ -454,7 +458,8 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
454 mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); 458 mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
455 mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE); 459 mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
456 460
457 mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG, (0x3f << 26)); 461 mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG,
462 ACLKX | AHCLKX | AFSX | ACLKR | AHCLKR | AFSR);
458 break; 463 break;
459 464
460 default: 465 default:
@@ -644,7 +649,7 @@ static void davinci_hw_param(struct davinci_audio_dev *dev, int stream)
644 mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, mask); 649 mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, mask);
645 mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXORD); 650 mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXORD);
646 651
647 if ((dev->tdm_slots >= 2) || (dev->tdm_slots <= 32)) 652 if ((dev->tdm_slots >= 2) && (dev->tdm_slots <= 32))
648 mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG, 653 mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG,
649 FSXMOD(dev->tdm_slots), FSXMOD(0x1FF)); 654 FSXMOD(dev->tdm_slots), FSXMOD(0x1FF));
650 else 655 else
@@ -660,7 +665,7 @@ static void davinci_hw_param(struct davinci_audio_dev *dev, int stream)
660 AHCLKRE); 665 AHCLKRE);
661 mcasp_set_reg(dev->base + DAVINCI_MCASP_RXTDM_REG, mask); 666 mcasp_set_reg(dev->base + DAVINCI_MCASP_RXTDM_REG, mask);
662 667
663 if ((dev->tdm_slots >= 2) || (dev->tdm_slots <= 32)) 668 if ((dev->tdm_slots >= 2) && (dev->tdm_slots <= 32))
664 mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG, 669 mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG,
665 FSRMOD(dev->tdm_slots), FSRMOD(0x1FF)); 670 FSRMOD(dev->tdm_slots), FSRMOD(0x1FF));
666 else 671 else
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index 419bf4f5534a..cd22a54b2f14 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -133,7 +133,7 @@ static void jz4740_i2s_shutdown(struct snd_pcm_substream *substream,
133 struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); 133 struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
134 uint32_t conf; 134 uint32_t conf;
135 135
136 if (!dai->active) 136 if (dai->active)
137 return; 137 return;
138 138
139 conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); 139 conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
diff --git a/sound/soc/mid-x86/sst_platform.c b/sound/soc/mid-x86/sst_platform.c
index d567c322a2fb..6b1f9d3bf34e 100644
--- a/sound/soc/mid-x86/sst_platform.c
+++ b/sound/soc/mid-x86/sst_platform.c
@@ -376,6 +376,11 @@ static int sst_platform_pcm_hw_params(struct snd_pcm_substream *substream,
376 return 0; 376 return 0;
377} 377}
378 378
379static int sst_platform_pcm_hw_free(struct snd_pcm_substream *substream)
380{
381 return snd_pcm_lib_free_pages(substream);
382}
383
379static struct snd_pcm_ops sst_platform_ops = { 384static struct snd_pcm_ops sst_platform_ops = {
380 .open = sst_platform_open, 385 .open = sst_platform_open,
381 .close = sst_platform_close, 386 .close = sst_platform_close,
@@ -384,6 +389,7 @@ static struct snd_pcm_ops sst_platform_ops = {
384 .trigger = sst_platform_pcm_trigger, 389 .trigger = sst_platform_pcm_trigger,
385 .pointer = sst_platform_pcm_pointer, 390 .pointer = sst_platform_pcm_pointer,
386 .hw_params = sst_platform_pcm_hw_params, 391 .hw_params = sst_platform_pcm_hw_params,
392 .hw_free = sst_platform_pcm_hw_free,
387}; 393};
388 394
389static void sst_pcm_free(struct snd_pcm *pcm) 395static void sst_pcm_free(struct snd_pcm *pcm)
diff --git a/sound/soc/samsung/goni_wm8994.c b/sound/soc/samsung/goni_wm8994.c
index f6b3a3ce5919..0e80daee8b6f 100644
--- a/sound/soc/samsung/goni_wm8994.c
+++ b/sound/soc/samsung/goni_wm8994.c
@@ -236,18 +236,18 @@ static struct snd_soc_dai_link goni_dai[] = {
236 .name = "WM8994", 236 .name = "WM8994",
237 .stream_name = "WM8994 HiFi", 237 .stream_name = "WM8994 HiFi",
238 .cpu_dai_name = "samsung-i2s.0", 238 .cpu_dai_name = "samsung-i2s.0",
239 .codec_dai_name = "wm8994-hifi", 239 .codec_dai_name = "wm8994-aif1",
240 .platform_name = "samsung-audio", 240 .platform_name = "samsung-audio",
241 .codec_name = "wm8994-codec.0-0x1a", 241 .codec_name = "wm8994-codec.0-001a",
242 .init = goni_wm8994_init, 242 .init = goni_wm8994_init,
243 .ops = &goni_hifi_ops, 243 .ops = &goni_hifi_ops,
244}, { 244}, {
245 .name = "WM8994 Voice", 245 .name = "WM8994 Voice",
246 .stream_name = "Voice", 246 .stream_name = "Voice",
247 .cpu_dai_name = "goni-voice-dai", 247 .cpu_dai_name = "goni-voice-dai",
248 .codec_dai_name = "wm8994-voice", 248 .codec_dai_name = "wm8994-aif2",
249 .platform_name = "samsung-audio", 249 .platform_name = "samsung-audio",
250 .codec_name = "wm8994-codec.0-0x1a", 250 .codec_name = "wm8994-codec.0-001a",
251 .ops = &goni_voice_ops, 251 .ops = &goni_voice_ops,
252}, 252},
253}; 253};
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index d8562ce4de7a..dd55d1069468 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3291,6 +3291,8 @@ int snd_soc_register_card(struct snd_soc_card *card)
3291 if (!card->name || !card->dev) 3291 if (!card->name || !card->dev)
3292 return -EINVAL; 3292 return -EINVAL;
3293 3293
3294 dev_set_drvdata(card->dev, card);
3295
3294 snd_soc_initialize_card_lists(card); 3296 snd_soc_initialize_card_lists(card);
3295 3297
3296 soc_init_card_debugfs(card); 3298 soc_init_card_debugfs(card);
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 207dee5c5b16..0c542563ea6c 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -35,15 +35,21 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
35 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ 35 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
36 -e s/sh[234].*/sh/ ) 36 -e s/sh[234].*/sh/ )
37 37
38CC = $(CROSS_COMPILE)gcc
39AR = $(CROSS_COMPILE)ar
40
38# Additional ARCH settings for x86 41# Additional ARCH settings for x86
39ifeq ($(ARCH),i386) 42ifeq ($(ARCH),i386)
40 ARCH := x86 43 ARCH := x86
41endif 44endif
42ifeq ($(ARCH),x86_64) 45ifeq ($(ARCH),x86_64)
43 RAW_ARCH := x86_64 46 ARCH := x86
44 ARCH := x86 47 IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1)
45 ARCH_CFLAGS := -DARCH_X86_64 48 ifeq (${IS_X86_64}, 1)
46 ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S 49 RAW_ARCH := x86_64
50 ARCH_CFLAGS := -DARCH_X86_64
51 ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S
52 endif
47endif 53endif
48 54
49# 55#
@@ -119,8 +125,6 @@ lib = lib
119 125
120export prefix bindir sharedir sysconfdir 126export prefix bindir sharedir sysconfdir
121 127
122CC = $(CROSS_COMPILE)gcc
123AR = $(CROSS_COMPILE)ar
124RM = rm -f 128RM = rm -f
125MKDIR = mkdir 129MKDIR = mkdir
126FIND = find 130FIND = find
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 416538248a4b..0974f957b8fa 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -427,7 +427,7 @@ static void mmap_read_all(void)
427{ 427{
428 int i; 428 int i;
429 429
430 for (i = 0; i < evsel_list->cpus->nr; i++) { 430 for (i = 0; i < evsel_list->nr_mmaps; i++) {
431 if (evsel_list->mmap[i].base) 431 if (evsel_list->mmap[i].base)
432 mmap_read(&evsel_list->mmap[i]); 432 mmap_read(&evsel_list->mmap[i]);
433 } 433 }
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 11e3c8458362..2f9a337b182f 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -549,7 +549,7 @@ static int test__basic_mmap(void)
549 ++foo; 549 ++foo;
550 } 550 }
551 551
552 while ((event = perf_evlist__read_on_cpu(evlist, 0)) != NULL) { 552 while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
553 struct perf_sample sample; 553 struct perf_sample sample;
554 554
555 if (event->header.type != PERF_RECORD_SAMPLE) { 555 if (event->header.type != PERF_RECORD_SAMPLE) {
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 7e3d6e310bf8..ebfc7cf5f63b 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -801,12 +801,12 @@ static void perf_event__process_sample(const union perf_event *event,
801 } 801 }
802} 802}
803 803
804static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu) 804static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
805{ 805{
806 struct perf_sample sample; 806 struct perf_sample sample;
807 union perf_event *event; 807 union perf_event *event;
808 808
809 while ((event = perf_evlist__read_on_cpu(top.evlist, cpu)) != NULL) { 809 while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) {
810 perf_session__parse_sample(self, event, &sample); 810 perf_session__parse_sample(self, event, &sample);
811 811
812 if (event->header.type == PERF_RECORD_SAMPLE) 812 if (event->header.type == PERF_RECORD_SAMPLE)
@@ -820,8 +820,8 @@ static void perf_session__mmap_read(struct perf_session *self)
820{ 820{
821 int i; 821 int i;
822 822
823 for (i = 0; i < top.evlist->cpus->nr; i++) 823 for (i = 0; i < top.evlist->nr_mmaps; i++)
824 perf_session__mmap_read_cpu(self, i); 824 perf_session__mmap_read_idx(self, i);
825} 825}
826 826
827static void start_counters(struct perf_evlist *evlist) 827static void start_counters(struct perf_evlist *evlist)
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 45da8d186b49..23eb22b05d27 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -166,11 +166,11 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
166 return NULL; 166 return NULL;
167} 167}
168 168
169union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) 169union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
170{ 170{
171 /* XXX Move this to perf.c, making it generally available */ 171 /* XXX Move this to perf.c, making it generally available */
172 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 172 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
173 struct perf_mmap *md = &evlist->mmap[cpu]; 173 struct perf_mmap *md = &evlist->mmap[idx];
174 unsigned int head = perf_mmap__read_head(md); 174 unsigned int head = perf_mmap__read_head(md);
175 unsigned int old = md->prev; 175 unsigned int old = md->prev;
176 unsigned char *data = md->base + page_size; 176 unsigned char *data = md->base + page_size;
@@ -235,31 +235,37 @@ union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu)
235 235
236void perf_evlist__munmap(struct perf_evlist *evlist) 236void perf_evlist__munmap(struct perf_evlist *evlist)
237{ 237{
238 int cpu; 238 int i;
239 239
240 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 240 for (i = 0; i < evlist->nr_mmaps; i++) {
241 if (evlist->mmap[cpu].base != NULL) { 241 if (evlist->mmap[i].base != NULL) {
242 munmap(evlist->mmap[cpu].base, evlist->mmap_len); 242 munmap(evlist->mmap[i].base, evlist->mmap_len);
243 evlist->mmap[cpu].base = NULL; 243 evlist->mmap[i].base = NULL;
244 } 244 }
245 } 245 }
246
247 free(evlist->mmap);
248 evlist->mmap = NULL;
246} 249}
247 250
248int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 251int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
249{ 252{
250 evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap)); 253 evlist->nr_mmaps = evlist->cpus->nr;
254 if (evlist->cpus->map[0] == -1)
255 evlist->nr_mmaps = evlist->threads->nr;
256 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
251 return evlist->mmap != NULL ? 0 : -ENOMEM; 257 return evlist->mmap != NULL ? 0 : -ENOMEM;
252} 258}
253 259
254static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel, 260static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel,
255 int cpu, int prot, int mask, int fd) 261 int idx, int prot, int mask, int fd)
256{ 262{
257 evlist->mmap[cpu].prev = 0; 263 evlist->mmap[idx].prev = 0;
258 evlist->mmap[cpu].mask = mask; 264 evlist->mmap[idx].mask = mask;
259 evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, 265 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
260 MAP_SHARED, fd, 0); 266 MAP_SHARED, fd, 0);
261 if (evlist->mmap[cpu].base == MAP_FAILED) { 267 if (evlist->mmap[idx].base == MAP_FAILED) {
262 if (evlist->cpus->map[cpu] == -1 && evsel->attr.inherit) 268 if (evlist->cpus->map[idx] == -1 && evsel->attr.inherit)
263 ui__warning("Inherit is not allowed on per-task " 269 ui__warning("Inherit is not allowed on per-task "
264 "events using mmap.\n"); 270 "events using mmap.\n");
265 return -1; 271 return -1;
@@ -269,6 +275,86 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *ev
269 return 0; 275 return 0;
270} 276}
271 277
278static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
279{
280 struct perf_evsel *evsel;
281 int cpu, thread;
282
283 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
284 int output = -1;
285
286 for (thread = 0; thread < evlist->threads->nr; thread++) {
287 list_for_each_entry(evsel, &evlist->entries, node) {
288 int fd = FD(evsel, cpu, thread);
289
290 if (output == -1) {
291 output = fd;
292 if (__perf_evlist__mmap(evlist, evsel, cpu,
293 prot, mask, output) < 0)
294 goto out_unmap;
295 } else {
296 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
297 goto out_unmap;
298 }
299
300 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
301 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
302 goto out_unmap;
303 }
304 }
305 }
306
307 return 0;
308
309out_unmap:
310 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
311 if (evlist->mmap[cpu].base != NULL) {
312 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
313 evlist->mmap[cpu].base = NULL;
314 }
315 }
316 return -1;
317}
318
319static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
320{
321 struct perf_evsel *evsel;
322 int thread;
323
324 for (thread = 0; thread < evlist->threads->nr; thread++) {
325 int output = -1;
326
327 list_for_each_entry(evsel, &evlist->entries, node) {
328 int fd = FD(evsel, 0, thread);
329
330 if (output == -1) {
331 output = fd;
332 if (__perf_evlist__mmap(evlist, evsel, thread,
333 prot, mask, output) < 0)
334 goto out_unmap;
335 } else {
336 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
337 goto out_unmap;
338 }
339
340 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
341 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
342 goto out_unmap;
343 }
344 }
345
346 return 0;
347
348out_unmap:
349 for (thread = 0; thread < evlist->threads->nr; thread++) {
350 if (evlist->mmap[thread].base != NULL) {
351 munmap(evlist->mmap[thread].base, evlist->mmap_len);
352 evlist->mmap[thread].base = NULL;
353 }
354 }
355 return -1;
356}
357
272/** perf_evlist__mmap - Create per cpu maps to receive events 358/** perf_evlist__mmap - Create per cpu maps to receive events
273 * 359 *
274 * @evlist - list of events 360 * @evlist - list of events
@@ -287,11 +373,11 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *ev
287int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) 373int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
288{ 374{
289 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 375 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
290 int mask = pages * page_size - 1, cpu; 376 int mask = pages * page_size - 1;
291 struct perf_evsel *first_evsel, *evsel; 377 struct perf_evsel *evsel;
292 const struct cpu_map *cpus = evlist->cpus; 378 const struct cpu_map *cpus = evlist->cpus;
293 const struct thread_map *threads = evlist->threads; 379 const struct thread_map *threads = evlist->threads;
294 int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); 380 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
295 381
296 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 382 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
297 return -ENOMEM; 383 return -ENOMEM;
@@ -301,43 +387,18 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
301 387
302 evlist->overwrite = overwrite; 388 evlist->overwrite = overwrite;
303 evlist->mmap_len = (pages + 1) * page_size; 389 evlist->mmap_len = (pages + 1) * page_size;
304 first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
305 390
306 list_for_each_entry(evsel, &evlist->entries, node) { 391 list_for_each_entry(evsel, &evlist->entries, node) {
307 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 392 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
308 evsel->sample_id == NULL && 393 evsel->sample_id == NULL &&
309 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) 394 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
310 return -ENOMEM; 395 return -ENOMEM;
311
312 for (cpu = 0; cpu < cpus->nr; cpu++) {
313 for (thread = 0; thread < threads->nr; thread++) {
314 int fd = FD(evsel, cpu, thread);
315
316 if (evsel->idx || thread) {
317 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
318 FD(first_evsel, cpu, 0)) != 0)
319 goto out_unmap;
320 } else if (__perf_evlist__mmap(evlist, evsel, cpu,
321 prot, mask, fd) < 0)
322 goto out_unmap;
323
324 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
325 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
326 goto out_unmap;
327 }
328 }
329 } 396 }
330 397
331 return 0; 398 if (evlist->cpus->map[0] == -1)
399 return perf_evlist__mmap_per_thread(evlist, prot, mask);
332 400
333out_unmap: 401 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
334 for (cpu = 0; cpu < cpus->nr; cpu++) {
335 if (evlist->mmap[cpu].base != NULL) {
336 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
337 evlist->mmap[cpu].base = NULL;
338 }
339 }
340 return -1;
341} 402}
342 403
343int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, 404int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
@@ -348,7 +409,7 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
348 if (evlist->threads == NULL) 409 if (evlist->threads == NULL)
349 return -1; 410 return -1;
350 411
351 if (target_tid != -1) 412 if (cpu_list == NULL && target_tid != -1)
352 evlist->cpus = cpu_map__dummy_new(); 413 evlist->cpus = cpu_map__dummy_new();
353 else 414 else
354 evlist->cpus = cpu_map__new(cpu_list); 415 evlist->cpus = cpu_map__new(cpu_list);
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 8b1cb7a4c5f1..7109d7add14e 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -17,6 +17,7 @@ struct perf_evlist {
17 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; 17 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
18 int nr_entries; 18 int nr_entries;
19 int nr_fds; 19 int nr_fds;
20 int nr_mmaps;
20 int mmap_len; 21 int mmap_len;
21 bool overwrite; 22 bool overwrite;
22 union perf_event event_copy; 23 union perf_event event_copy;
@@ -46,7 +47,7 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
46 47
47struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); 48struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
48 49
49union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu); 50union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
50 51
51int perf_evlist__alloc_mmap(struct perf_evlist *evlist); 52int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
52int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); 53int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index f5e38451fdc5..99c722672f84 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -680,7 +680,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
680 &cpu, &sample_id_all)) 680 &cpu, &sample_id_all))
681 return NULL; 681 return NULL;
682 682
683 event = perf_evlist__read_on_cpu(evlist, cpu); 683 event = perf_evlist__mmap_read(evlist, cpu);
684 if (event != NULL) { 684 if (event != NULL) {
685 struct perf_evsel *first; 685 struct perf_evsel *first;
686 PyObject *pyevent = pyrf_event__new(event); 686 PyObject *pyevent = pyrf_event__new(event);