aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl14
-rw-r--r--Documentation/RCU/checklist.txt46
-rw-r--r--Documentation/RCU/stallwarn.txt18
-rw-r--r--Documentation/RCU/trace.txt13
-rw-r--r--Documentation/networking/e1000.txt373
-rw-r--r--Documentation/networking/e1000e.txt302
-rw-r--r--[-rwxr-xr-x]Documentation/networking/ixgbevf.txt40
-rw-r--r--Documentation/vm/page-types.c2
-rw-r--r--MAINTAINERS51
-rw-r--r--Makefile4
-rw-r--r--arch/arm/Kconfig14
-rw-r--r--arch/arm/kernel/kprobes-decode.c7
-rw-r--r--arch/arm/mach-at91/include/mach/system.h7
-rw-r--r--arch/arm/mach-bcmring/dma.c4
-rw-r--r--arch/arm/mach-ep93xx/dma-m2p.c2
-rw-r--r--arch/arm/mach-imx/Kconfig1
-rw-r--r--arch/arm/mach-imx/mach-cpuimx27.c2
-rw-r--r--arch/arm/mach-s5p6440/cpu.c1
-rw-r--r--arch/arm/mach-s5p6442/cpu.c1
-rw-r--r--arch/arm/mach-s5pc100/cpu.c1
-rw-r--r--arch/arm/mach-s5pv210/clock.c5
-rw-r--r--arch/arm/mach-s5pv210/cpu.c1
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c4
-rw-r--r--arch/arm/mach-vexpress/v2m.c2
-rw-r--r--arch/arm/mm/ioremap.c8
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/mm/proc-v7.S10
-rw-r--r--arch/arm/plat-omap/iommu.c1
-rw-r--r--arch/arm/plat-samsung/adc.c1
-rw-r--r--arch/arm/plat-samsung/clock.c27
-rw-r--r--arch/m32r/include/asm/elf.h4
-rw-r--r--arch/m32r/kernel/.gitignore1
-rw-r--r--arch/m32r/kernel/signal.c4
-rw-r--r--arch/mips/Kbuild4
-rw-r--r--arch/mips/Kconfig4
-rw-r--r--arch/mips/boot/compressed/Makefile2
-rw-r--r--arch/mips/dec/Platform2
-rw-r--r--arch/mips/include/asm/fcntl.h1
-rw-r--r--arch/mips/include/asm/siginfo.h1
-rw-r--r--arch/mips/jz4740/Platform2
-rw-r--r--arch/mips/kernel/branch.c1
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/ptrace.c4
-rw-r--r--arch/mips/kernel/scall32-o32.S11
-rw-r--r--arch/mips/kernel/scall64-64.S7
-rw-r--r--arch/mips/kernel/scall64-n32.S12
-rw-r--r--arch/mips/kernel/scall64-o32.S15
-rw-r--r--arch/mips/kernel/signal.c45
-rw-r--r--arch/mips/kernel/signal_n32.c5
-rw-r--r--arch/mips/kernel/unaligned.c2
-rw-r--r--arch/um/drivers/hostaudio_kern.c14
-rw-r--r--arch/um/drivers/ubd_kern.c9
-rw-r--r--arch/x86/ia32/ia32_aout.c22
-rw-r--r--arch/x86/include/asm/amd_iommu.h2
-rw-r--r--arch/x86/include/asm/amd_iommu_proto.h2
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h23
-rw-r--r--arch/x86/include/asm/gart.h15
-rw-r--r--arch/x86/include/asm/kvm_host.h24
-rw-r--r--arch/x86/kernel/amd_iommu.c2
-rw-r--r--arch/x86/kernel/amd_iommu_init.c124
-rw-r--r--arch/x86/kernel/aperture_64.c18
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c9
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c3
-rw-r--r--arch/x86/kernel/pci-gart_64.c2
-rw-r--r--arch/x86/kvm/svm.c17
-rw-r--r--arch/x86/kvm/vmx.c24
-rw-r--r--arch/x86/mm/srat_64.c8
-rw-r--r--block/bsg.c2
-rw-r--r--block/elevator.c12
-rw-r--r--drivers/acpi/blacklist.c17
-rw-r--r--drivers/acpi/processor_core.c1
-rw-r--r--drivers/atm/iphase.c6
-rw-r--r--drivers/atm/iphase.h2
-rw-r--r--drivers/atm/solos-pci.c8
-rw-r--r--drivers/block/Kconfig17
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/ps3disk.c2
-rw-r--r--drivers/block/rbd.c1841
-rw-r--r--drivers/block/rbd_types.h73
-rw-r--r--drivers/block/virtio_blk.c23
-rw-r--r--drivers/char/agp/amd64-agp.c4
-rw-r--r--drivers/char/agp/generic.c4
-rw-r--r--drivers/char/tpm/tpm.c22
-rw-r--r--drivers/char/virtio_console.c257
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/firewire/ohci.c19
-rw-r--r--drivers/firewire/ohci.h8
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c3
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5
-rw-r--r--drivers/gpu/drm/radeon/r100.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h5
-rw-r--r--drivers/gpu/drm/radeon/rs600.c1
-rw-r--r--drivers/gpu/drm/radeon/rs690.c1
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c83
-rw-r--r--drivers/hid/hid-cando.c2
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hidraw.c11
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/i2c/busses/i2c-cpm.c5
-rw-r--r--drivers/i2c/busses/i2c-davinci.c24
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c3
-rw-r--r--drivers/i2c/busses/i2c-imx.c12
-rw-r--r--drivers/i2c/busses/i2c-mpc.c1
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c12
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c11
-rw-r--r--drivers/i2c/i2c-core.c54
-rw-r--r--drivers/idle/intel_idle.c10
-rw-r--r--drivers/input/evdev.c10
-rw-r--r--drivers/input/joydev.c3
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c4
-rw-r--r--drivers/input/misc/uinput.c7
-rw-r--r--drivers/input/serio/hil_mlc.c6
-rw-r--r--drivers/input/serio/hp_sdc.c4
-rw-r--r--drivers/input/tablet/wacom_sys.c23
-rw-r--r--drivers/input/tablet/wacom_wac.c4
-rw-r--r--drivers/isdn/sc/interrupt.c18
-rw-r--r--drivers/macintosh/adb.c2
-rw-r--r--drivers/md/bitmap.c9
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/media/IR/ir-keytable.c9
-rw-r--r--drivers/media/IR/ir-lirc-codec.c2
-rw-r--r--drivers/media/IR/ir-raw-event.c4
-rw-r--r--drivers/media/IR/ir-sysfs.c17
-rw-r--r--drivers/media/IR/keymaps/rc-rc6-mce.c3
-rw-r--r--drivers/media/IR/mceusb.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c3
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c56
-rw-r--r--drivers/media/dvb/dvb-usb/opera1.c4
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c8
-rw-r--r--drivers/media/dvb/frontends/dib7000p.h5
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c31
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/video/cx231xx/Makefile1
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c17
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c2
-rw-r--r--drivers/media/video/cx88/Kconfig2
-rw-r--r--drivers/media/video/gspca/gspca.c1
-rw-r--r--drivers/media/video/gspca/sn9c20x.c3
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/video/mem2mem_testdev.c3
-rw-r--r--drivers/media/video/mt9m111.c8
-rw-r--r--drivers/media/video/mt9v022.c3
-rw-r--r--drivers/media/video/mx2_camera.c4
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ctrl.c6
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c94
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c10
-rw-r--r--drivers/media/video/saa7164/saa7164-buffer.c5
-rw-r--r--drivers/media/video/uvc/uvc_driver.c24
-rw-r--r--drivers/media/video/uvc/uvcvideo.h1
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c32
-rw-r--r--drivers/media/video/videobuf-dma-contig.c6
-rw-r--r--drivers/media/video/videobuf-dma-sg.c11
-rw-r--r--drivers/misc/bh1780gli.c1
-rw-r--r--drivers/mmc/core/core.c13
-rw-r--r--drivers/mtd/nand/mxc_nand.c92
-rw-r--r--drivers/net/3c527.c2
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/b44.c4
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/ehea/ehea_main.c9
-rw-r--r--drivers/net/ehea/ehea_qmr.h1
-rw-r--r--drivers/net/fec.c44
-rw-r--r--drivers/net/hamradio/6pack.c2
-rw-r--r--drivers/net/hamradio/mkiss.c2
-rw-r--r--drivers/net/irda/sir_dev.c2
-rw-r--r--drivers/net/ppp_async.c2
-rw-r--r--drivers/net/r8169.c65
-rw-r--r--drivers/net/skge.c18
-rw-r--r--drivers/net/tg3.c6
-rw-r--r--drivers/net/tg3.h2
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wimax/i2400m/rx.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c2
-rw-r--r--drivers/parport/share.c2
-rw-r--r--drivers/platform/x86/intel_ips.c122
-rw-r--r--drivers/regulator/ad5398.c1
-rw-r--r--drivers/regulator/isl6271a-regulator.c2
-rw-r--r--drivers/rtc/rtc-ds3232.c2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/serial/ioc3_serial.c1
-rw-r--r--drivers/staging/tm6000/Kconfig2
-rw-r--r--drivers/staging/tm6000/tm6000-input.c61
-rw-r--r--drivers/vhost/net.c16
-rw-r--r--drivers/vhost/vhost.c22
-rw-r--r--drivers/vhost/vhost.h10
-rw-r--r--fs/affs/super.c4
-rw-r--r--fs/binfmt_aout.c4
-rw-r--r--fs/ceph/Kconfig14
-rw-r--r--fs/ceph/Makefile11
-rw-r--r--fs/ceph/README20
-rw-r--r--fs/ceph/addr.c65
-rw-r--r--fs/ceph/caps.c81
-rw-r--r--fs/ceph/ceph_frag.c3
-rw-r--r--fs/ceph/debugfs.c406
-rw-r--r--fs/ceph/dir.c97
-rw-r--r--fs/ceph/export.c26
-rw-r--r--fs/ceph/file.c209
-rw-r--r--fs/ceph/inode.c19
-rw-r--r--fs/ceph/ioctl.c77
-rw-r--r--fs/ceph/ioctl.h4
-rw-r--r--fs/ceph/locks.c23
-rw-r--r--fs/ceph/mds_client.c129
-rw-r--r--fs/ceph/mds_client.h20
-rw-r--r--fs/ceph/mdsmap.c11
-rw-r--r--fs/ceph/pagelist.c63
-rw-r--r--fs/ceph/snap.c10
-rw-r--r--fs/ceph/strings.c (renamed from fs/ceph/ceph_strings.c)82
-rw-r--r--fs/ceph/super.c1154
-rw-r--r--fs/ceph/super.h400
-rw-r--r--fs/ceph/xattr.c18
-rw-r--r--fs/exec.c40
-rw-r--r--fs/exofs/inode.c8
-rw-r--r--fs/gfs2/Kconfig2
-rw-r--r--fs/gfs2/aops.c24
-rw-r--r--fs/gfs2/bmap.c255
-rw-r--r--fs/gfs2/bmap.h20
-rw-r--r--fs/gfs2/dentry.c2
-rw-r--r--fs/gfs2/dir.c31
-rw-r--r--fs/gfs2/dir.h34
-rw-r--r--fs/gfs2/export.c9
-rw-r--r--fs/gfs2/file.c6
-rw-r--r--fs/gfs2/glock.c23
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/glops.c6
-rw-r--r--fs/gfs2/incore.h8
-rw-r--r--fs/gfs2/inode.c9
-rw-r--r--fs/gfs2/inode.h15
-rw-r--r--fs/gfs2/lock_dlm.c4
-rw-r--r--fs/gfs2/main.c6
-rw-r--r--fs/gfs2/ops_fstype.c79
-rw-r--r--fs/gfs2/ops_inode.c326
-rw-r--r--fs/gfs2/quota.c16
-rw-r--r--fs/gfs2/recovery.c15
-rw-r--r--fs/gfs2/rgrp.c50
-rw-r--r--fs/gfs2/rgrp.h8
-rw-r--r--fs/gfs2/super.c26
-rw-r--r--fs/gfs2/sys.c22
-rw-r--r--fs/gfs2/trace_gfs2.h3
-rw-r--r--fs/gfs2/trans.h9
-rw-r--r--fs/gfs2/xattr.c2
-rw-r--r--fs/hfs/bfind.c4
-rw-r--r--fs/hfs/btree.c2
-rw-r--r--fs/hfs/btree.h2
-rw-r--r--fs/hfsplus/bfind.c17
-rw-r--r--fs/hfsplus/bitmap.c20
-rw-r--r--fs/hfsplus/brec.c29
-rw-r--r--fs/hfsplus/btree.c67
-rw-r--r--fs/hfsplus/catalog.c50
-rw-r--r--fs/hfsplus/dir.c201
-rw-r--r--fs/hfsplus/extents.c223
-rw-r--r--fs/hfsplus/hfsplus_fs.h85
-rw-r--r--fs/hfsplus/hfsplus_raw.h3
-rw-r--r--fs/hfsplus/inode.c185
-rw-r--r--fs/hfsplus/ioctl.c153
-rw-r--r--fs/hfsplus/options.c10
-rw-r--r--fs/hfsplus/part_tbl.c5
-rw-r--r--fs/hfsplus/super.c310
-rw-r--r--fs/hfsplus/unicode.c16
-rw-r--r--fs/hfsplus/wrapper.c40
-rw-r--r--fs/nfsd/nfsfh.h2
-rw-r--r--fs/notify/Kconfig2
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c19
-rw-r--r--include/drm/ttm/ttm_bo_api.h4
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/ceph/auth.h (renamed from fs/ceph/auth.h)4
-rw-r--r--include/linux/ceph/buffer.h (renamed from fs/ceph/buffer.h)0
-rw-r--r--include/linux/ceph/ceph_debug.h (renamed from fs/ceph/ceph_debug.h)5
-rw-r--r--include/linux/ceph/ceph_frag.h (renamed from fs/ceph/ceph_frag.h)0
-rw-r--r--include/linux/ceph/ceph_fs.h (renamed from fs/ceph/ceph_fs.h)1
-rw-r--r--include/linux/ceph/ceph_hash.h (renamed from fs/ceph/ceph_hash.h)0
-rw-r--r--include/linux/ceph/debugfs.h33
-rw-r--r--include/linux/ceph/decode.h (renamed from fs/ceph/decode.h)5
-rw-r--r--include/linux/ceph/libceph.h249
-rw-r--r--include/linux/ceph/mdsmap.h (renamed from fs/ceph/mdsmap.h)0
-rw-r--r--include/linux/ceph/messenger.h (renamed from fs/ceph/messenger.h)12
-rw-r--r--include/linux/ceph/mon_client.h (renamed from fs/ceph/mon_client.h)1
-rw-r--r--include/linux/ceph/msgpool.h (renamed from fs/ceph/msgpool.h)0
-rw-r--r--include/linux/ceph/msgr.h (renamed from fs/ceph/msgr.h)0
-rw-r--r--include/linux/ceph/osd_client.h (renamed from fs/ceph/osd_client.h)67
-rw-r--r--include/linux/ceph/osdmap.h (renamed from fs/ceph/osdmap.h)4
-rw-r--r--include/linux/ceph/pagelist.h (renamed from fs/ceph/pagelist.h)23
-rw-r--r--include/linux/ceph/rados.h (renamed from fs/ceph/rados.h)0
-rw-r--r--include/linux/ceph/types.h (renamed from fs/ceph/types.h)0
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/coredump.h34
-rw-r--r--include/linux/cred.h2
-rw-r--r--include/linux/crush/crush.h (renamed from fs/ceph/crush/crush.h)0
-rw-r--r--include/linux/crush/hash.h (renamed from fs/ceph/crush/hash.h)0
-rw-r--r--include/linux/crush/mapper.h (renamed from fs/ceph/crush/mapper.h)0
-rw-r--r--include/linux/debug_locks.h5
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/fdtable.h6
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/genhd.h6
-rw-r--r--include/linux/hardirq.h2
-rw-r--r--include/linux/idr.h4
-rw-r--r--include/linux/init_task.h14
-rw-r--r--include/linux/input.h2
-rw-r--r--include/linux/iocontext.h2
-rw-r--r--include/linux/kernel.h13
-rw-r--r--include/linux/key.h3
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/lockdep.h13
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h10
-rw-r--r--include/linux/netfilter/xt_SECMARK.h12
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/notifier.h10
-rw-r--r--include/linux/radix-tree.h4
-rw-r--r--include/linux/rculist.h62
-rw-r--r--include/linux/rculist_nulls.h16
-rw-r--r--include/linux/rcupdate.h490
-rw-r--r--include/linux/rcutiny.h104
-rw-r--r--include/linux/rcutree.h57
-rw-r--r--include/linux/sched.h16
-rw-r--r--include/linux/security.h45
-rw-r--r--include/linux/selinux.h63
-rw-r--r--include/linux/srcu.h34
-rw-r--r--include/linux/sunrpc/auth_gss.h4
-rw-r--r--include/linux/types.h15
-rw-r--r--include/media/videobuf-dma-sg.h1
-rw-r--r--include/net/bluetooth/bluetooth.h18
-rw-r--r--include/net/cls_cgroup.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--init/Kconfig26
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/hrtimer.c13
-rw-r--r--kernel/hung_task.c4
-rw-r--r--kernel/lockdep.c51
-rw-r--r--kernel/perf_event.c4
-rw-r--r--kernel/pid.c3
-rw-r--r--kernel/printk.c4
-rw-r--r--kernel/rcupdate.c8
-rw-r--r--kernel/rcutiny.c33
-rw-r--r--kernel/rcutiny_plugin.h582
-rw-r--r--kernel/rcutorture.c17
-rw-r--r--kernel/rcutree.c92
-rw-r--r--kernel/rcutree.h20
-rw-r--r--kernel/rcutree_plugin.h47
-rw-r--r--kernel/rcutree_trace.c12
-rw-r--r--kernel/sched.c16
-rw-r--r--kernel/sched_fair.c5
-rw-r--r--kernel/signal.c8
-rw-r--r--kernel/srcu.c2
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/sysctl_check.c9
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/watchdog.c2
-rw-r--r--lib/Kconfig.debug41
-rw-r--r--lib/radix-tree.c2
-rw-r--r--mm/memcontrol.c10
-rw-r--r--mm/memory-failure.c12
-rw-r--r--mm/page_alloc.c4
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile1
-rw-r--r--net/atm/mpc.c2
-rw-r--r--net/bluetooth/l2cap.c62
-rw-r--r--net/bluetooth/rfcomm/sock.c4
-rw-r--r--net/caif/caif_socket.c21
-rw-r--r--net/ceph/Kconfig28
-rw-r--r--net/ceph/Makefile37
-rw-r--r--net/ceph/armor.c (renamed from fs/ceph/armor.c)0
-rw-r--r--net/ceph/auth.c (renamed from fs/ceph/auth.c)10
-rw-r--r--net/ceph/auth_none.c (renamed from fs/ceph/auth_none.c)7
-rw-r--r--net/ceph/auth_none.h (renamed from fs/ceph/auth_none.h)3
-rw-r--r--net/ceph/auth_x.c (renamed from fs/ceph/auth_x.c)9
-rw-r--r--net/ceph/auth_x.h (renamed from fs/ceph/auth_x.h)3
-rw-r--r--net/ceph/auth_x_protocol.h (renamed from fs/ceph/auth_x_protocol.h)0
-rw-r--r--net/ceph/buffer.c (renamed from fs/ceph/buffer.c)9
-rw-r--r--net/ceph/ceph_common.c529
-rw-r--r--net/ceph/ceph_fs.c (renamed from fs/ceph/ceph_fs.c)5
-rw-r--r--net/ceph/ceph_hash.c (renamed from fs/ceph/ceph_hash.c)2
-rw-r--r--net/ceph/ceph_strings.c84
-rw-r--r--net/ceph/crush/crush.c (renamed from fs/ceph/crush/crush.c)2
-rw-r--r--net/ceph/crush/hash.c (renamed from fs/ceph/crush/hash.c)2
-rw-r--r--net/ceph/crush/mapper.c (renamed from fs/ceph/crush/mapper.c)4
-rw-r--r--net/ceph/crypto.c (renamed from fs/ceph/crypto.c)4
-rw-r--r--net/ceph/crypto.h (renamed from fs/ceph/crypto.h)4
-rw-r--r--net/ceph/debugfs.c267
-rw-r--r--net/ceph/messenger.c (renamed from fs/ceph/messenger.c)296
-rw-r--r--net/ceph/mon_client.c (renamed from fs/ceph/mon_client.c)73
-rw-r--r--net/ceph/msgpool.c (renamed from fs/ceph/msgpool.c)4
-rw-r--r--net/ceph/osd_client.c (renamed from fs/ceph/osd_client.c)402
-rw-r--r--net/ceph/osdmap.c (renamed from fs/ceph/osdmap.c)30
-rw-r--r--net/ceph/pagelist.c154
-rw-r--r--net/ceph/pagevec.c223
-rw-r--r--net/core/ethtool.c8
-rw-r--r--net/core/sock.c5
-rw-r--r--net/core/stream.c8
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/igmp.c14
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c28
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv6/route.c28
-rw-r--r--net/mac80211/agg-tx.c2
-rw-r--r--net/mac80211/status.c4
-rw-r--r--net/netfilter/core.c2
-rw-r--r--net/netfilter/nf_conntrack_ecache.c4
-rw-r--r--net/netfilter/nf_conntrack_extend.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c44
-rw-r--r--net/netfilter/nf_conntrack_proto.c4
-rw-r--r--net/netfilter/nf_conntrack_standalone.c28
-rw-r--r--net/netfilter/nf_log.c2
-rw-r--r--net/netfilter/nf_queue.c2
-rw-r--r--net/netfilter/xt_CT.c1
-rw-r--r--net/netfilter/xt_SECMARK.c35
-rw-r--r--net/rds/page.c27
-rw-r--r--net/sched/cls_u32.c2
-rw-r--r--net/sctp/auth.c8
-rw-r--r--net/sctp/socket.c13
-rw-r--r--scripts/kconfig/conf.c2
-rw-r--r--scripts/kconfig/expr.h1
-rw-r--r--scripts/kconfig/menu.c7
-rw-r--r--scripts/kconfig/symbol.c2
-rw-r--r--security/apparmor/.gitignore1
-rw-r--r--security/apparmor/apparmorfs.c4
-rw-r--r--security/capability.c17
-rw-r--r--security/commoncap.c5
-rw-r--r--security/security.c35
-rw-r--r--security/selinux/Makefile21
-rw-r--r--security/selinux/exports.c49
-rw-r--r--security/selinux/hooks.c28
-rw-r--r--security/selinux/include/classmap.h2
-rw-r--r--security/selinux/include/security.h23
-rw-r--r--security/selinux/selinuxfs.c195
-rw-r--r--security/selinux/ss/Makefile9
-rw-r--r--security/selinux/ss/avtab.c46
-rw-r--r--security/selinux/ss/avtab.h3
-rw-r--r--security/selinux/ss/conditional.c123
-rw-r--r--security/selinux/ss/conditional.h2
-rw-r--r--security/selinux/ss/ebitmap.c81
-rw-r--r--security/selinux/ss/ebitmap.h1
-rw-r--r--security/selinux/ss/policydb.c861
-rw-r--r--security/selinux/ss/policydb.h20
-rw-r--r--security/selinux/ss/services.c63
-rw-r--r--security/selinux/ss/status.c126
-rw-r--r--security/smack/smack_lsm.c8
-rw-r--r--security/tomoyo/common.c17
-rw-r--r--sound/core/rawmidi.c4
-rw-r--r--sound/oss/soundcard.c4
-rw-r--r--sound/pci/hda/patch_sigmatel.c2
-rw-r--r--tools/perf/perf.h12
458 files changed, 12335 insertions, 5089 deletions
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index a0d479d1e1dd..f66f4df18690 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1645,7 +1645,9 @@ the amount of locking which needs to be done.
1645 all the readers who were traversing the list when we deleted the 1645 all the readers who were traversing the list when we deleted the
1646 element are finished. We use <function>call_rcu()</function> to 1646 element are finished. We use <function>call_rcu()</function> to
1647 register a callback which will actually destroy the object once 1647 register a callback which will actually destroy the object once
1648 the readers are finished. 1648 all pre-existing readers are finished. Alternatively,
1649 <function>synchronize_rcu()</function> may be used to block until
1650 all pre-existing are finished.
1649 </para> 1651 </para>
1650 <para> 1652 <para>
1651 But how does Read Copy Update know when the readers are 1653 But how does Read Copy Update know when the readers are
@@ -1714,7 +1716,7 @@ the amount of locking which needs to be done.
1714- object_put(obj); 1716- object_put(obj);
1715+ list_del_rcu(&amp;obj-&gt;list); 1717+ list_del_rcu(&amp;obj-&gt;list);
1716 cache_num--; 1718 cache_num--;
1717+ call_rcu(&amp;obj-&gt;rcu, cache_delete_rcu, obj); 1719+ call_rcu(&amp;obj-&gt;rcu, cache_delete_rcu);
1718 } 1720 }
1719 1721
1720 /* Must be holding cache_lock */ 1722 /* Must be holding cache_lock */
@@ -1725,14 +1727,6 @@ the amount of locking which needs to be done.
1725 if (++cache_num > MAX_CACHE_SIZE) { 1727 if (++cache_num > MAX_CACHE_SIZE) {
1726 struct object *i, *outcast = NULL; 1728 struct object *i, *outcast = NULL;
1727 list_for_each_entry(i, &amp;cache, list) { 1729 list_for_each_entry(i, &amp;cache, list) {
1728@@ -85,6 +94,7 @@
1729 obj-&gt;popularity = 0;
1730 atomic_set(&amp;obj-&gt;refcnt, 1); /* The cache holds a reference */
1731 spin_lock_init(&amp;obj-&gt;lock);
1732+ INIT_RCU_HEAD(&amp;obj-&gt;rcu);
1733
1734 spin_lock_irqsave(&amp;cache_lock, flags);
1735 __cache_add(obj);
1736@@ -104,12 +114,11 @@ 1730@@ -104,12 +114,11 @@
1737 struct object *cache_find(int id) 1731 struct object *cache_find(int id)
1738 { 1732 {
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 790d1a812376..0c134f8afc6f 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -218,13 +218,22 @@ over a rather long period of time, but improvements are always welcome!
218 include: 218 include:
219 219
220 a. Keeping a count of the number of data-structure elements 220 a. Keeping a count of the number of data-structure elements
221 used by the RCU-protected data structure, including those 221 used by the RCU-protected data structure, including
222 waiting for a grace period to elapse. Enforce a limit 222 those waiting for a grace period to elapse. Enforce a
223 on this number, stalling updates as needed to allow 223 limit on this number, stalling updates as needed to allow
224 previously deferred frees to complete. 224 previously deferred frees to complete. Alternatively,
225 225 limit only the number awaiting deferred free rather than
226 Alternatively, limit only the number awaiting deferred 226 the total number of elements.
227 free rather than the total number of elements. 227
228 One way to stall the updates is to acquire the update-side
229 mutex. (Don't try this with a spinlock -- other CPUs
230 spinning on the lock could prevent the grace period
231 from ever ending.) Another way to stall the updates
232 is for the updates to use a wrapper function around
233 the memory allocator, so that this wrapper function
234 simulates OOM when there is too much memory awaiting an
235 RCU grace period. There are of course many other
236 variations on this theme.
228 237
229 b. Limiting update rate. For example, if updates occur only 238 b. Limiting update rate. For example, if updates occur only
230 once per hour, then no explicit rate limiting is required, 239 once per hour, then no explicit rate limiting is required,
@@ -365,3 +374,26 @@ over a rather long period of time, but improvements are always welcome!
365 and the compiler to freely reorder code into and out of RCU 374 and the compiler to freely reorder code into and out of RCU
366 read-side critical sections. It is the responsibility of the 375 read-side critical sections. It is the responsibility of the
367 RCU update-side primitives to deal with this. 376 RCU update-side primitives to deal with this.
377
37817. Use CONFIG_PROVE_RCU, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and
379 the __rcu sparse checks to validate your RCU code. These
380 can help find problems as follows:
381
382 CONFIG_PROVE_RCU: check that accesses to RCU-protected data
383 structures are carried out under the proper RCU
384 read-side critical section, while holding the right
385 combination of locks, or whatever other conditions
386 are appropriate.
387
388 CONFIG_DEBUG_OBJECTS_RCU_HEAD: check that you don't pass the
389 same object to call_rcu() (or friends) before an RCU
390 grace period has elapsed since the last time that you
391 passed that same object to call_rcu() (or friends).
392
393 __rcu sparse checks: tag the pointer to the RCU-protected data
394 structure with __rcu, and sparse will warn you if you
395 access that pointer without the services of one of the
396 variants of rcu_dereference().
397
398 These debugging aids can help you find problems that are
399 otherwise extremely difficult to spot.
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 44c6dcc93d6d..862c08ef1fde 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -80,6 +80,24 @@ o A CPU looping with bottom halves disabled. This condition can
80o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel 80o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
81 without invoking schedule(). 81 without invoking schedule().
82 82
83o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
84 happen to preempt a low-priority task in the middle of an RCU
85 read-side critical section. This is especially damaging if
86 that low-priority task is not permitted to run on any other CPU,
87 in which case the next RCU grace period can never complete, which
88 will eventually cause the system to run out of memory and hang.
89 While the system is in the process of running itself out of
90 memory, you might see stall-warning messages.
91
92o A CPU-bound real-time task in a CONFIG_PREEMPT_RT kernel that
93 is running at a higher priority than the RCU softirq threads.
94 This will prevent RCU callbacks from ever being invoked,
95 and in a CONFIG_TREE_PREEMPT_RCU kernel will further prevent
96 RCU grace periods from ever completing. Either way, the
97 system will eventually run out of memory and hang. In the
98 CONFIG_TREE_PREEMPT_RCU case, you might see stall-warning
99 messages.
100
83o A bug in the RCU implementation. 101o A bug in the RCU implementation.
84 102
85o A hardware failure. This is quite unlikely, but has occurred 103o A hardware failure. This is quite unlikely, but has occurred
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index efd8cc95c06b..a851118775d8 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -125,6 +125,17 @@ o "b" is the batch limit for this CPU. If more than this number
125 of RCU callbacks is ready to invoke, then the remainder will 125 of RCU callbacks is ready to invoke, then the remainder will
126 be deferred. 126 be deferred.
127 127
128o "ci" is the number of RCU callbacks that have been invoked for
129 this CPU. Note that ci+ql is the number of callbacks that have
130 been registered in absence of CPU-hotplug activity.
131
132o "co" is the number of RCU callbacks that have been orphaned due to
133 this CPU going offline.
134
135o "ca" is the number of RCU callbacks that have been adopted due to
136 other CPUs going offline. Note that ci+co-ca+ql is the number of
137 RCU callbacks registered on this CPU.
138
128There is also an rcu/rcudata.csv file with the same information in 139There is also an rcu/rcudata.csv file with the same information in
129comma-separated-variable spreadsheet format. 140comma-separated-variable spreadsheet format.
130 141
@@ -180,7 +191,7 @@ o "s" is the "signaled" state that drives force_quiescent_state()'s
180 191
181o "jfq" is the number of jiffies remaining for this grace period 192o "jfq" is the number of jiffies remaining for this grace period
182 before force_quiescent_state() is invoked to help push things 193 before force_quiescent_state() is invoked to help push things
183 along. Note that CPUs in dyntick-idle mode thoughout the grace 194 along. Note that CPUs in dyntick-idle mode throughout the grace
184 period will not report on their own, but rather must be check by 195 period will not report on their own, but rather must be check by
185 some other CPU via force_quiescent_state(). 196 some other CPU via force_quiescent_state().
186 197
diff --git a/Documentation/networking/e1000.txt b/Documentation/networking/e1000.txt
index 2df71861e578..d9271e74e488 100644
--- a/Documentation/networking/e1000.txt
+++ b/Documentation/networking/e1000.txt
@@ -1,82 +1,35 @@
1Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters 1Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters
2=============================================================== 2===============================================================
3 3
4September 26, 2006 4Intel Gigabit Linux driver.
5 5Copyright(c) 1999 - 2010 Intel Corporation.
6 6
7Contents 7Contents
8======== 8========
9 9
10- In This Release
11- Identifying Your Adapter 10- Identifying Your Adapter
12- Building and Installation
13- Command Line Parameters 11- Command Line Parameters
14- Speed and Duplex Configuration 12- Speed and Duplex Configuration
15- Additional Configurations 13- Additional Configurations
16- Known Issues
17- Support 14- Support
18 15
19
20In This Release
21===============
22
23This file describes the Linux* Base Driver for the Intel(R) PRO/1000 Family
24of Adapters. This driver includes support for Itanium(R)2-based systems.
25
26For questions related to hardware requirements, refer to the documentation
27supplied with your Intel PRO/1000 adapter. All hardware requirements listed
28apply to use with Linux.
29
30The following features are now available in supported kernels:
31 - Native VLANs
32 - Channel Bonding (teaming)
33 - SNMP
34
35Channel Bonding documentation can be found in the Linux kernel source:
36/Documentation/networking/bonding.txt
37
38The driver information previously displayed in the /proc filesystem is not
39supported in this release. Alternatively, you can use ethtool (version 1.6
40or later), lspci, and ifconfig to obtain the same information.
41
42Instructions on updating ethtool can be found in the section "Additional
43Configurations" later in this document.
44
45NOTE: The Intel(R) 82562v 10/100 Network Connection only provides 10/100
46support.
47
48
49Identifying Your Adapter 16Identifying Your Adapter
50======================== 17========================
51 18
52For more information on how to identify your adapter, go to the Adapter & 19For more information on how to identify your adapter, go to the Adapter &
53Driver ID Guide at: 20Driver ID Guide at:
54 21
55 http://support.intel.com/support/network/adapter/pro100/21397.htm 22 http://support.intel.com/support/go/network/adapter/idguide.htm
56 23
57For the latest Intel network drivers for Linux, refer to the following 24For the latest Intel network drivers for Linux, refer to the following
58website. In the search field, enter your adapter name or type, or use the 25website. In the search field, enter your adapter name or type, or use the
59networking link on the left to search for your adapter: 26networking link on the left to search for your adapter:
60 27
61 http://downloadfinder.intel.com/scripts-df/support_intel.asp 28 http://support.intel.com/support/go/network/adapter/home.htm
62
63 29
64Command Line Parameters 30Command Line Parameters
65======================= 31=======================
66 32
67If the driver is built as a module, the following optional parameters
68are used by entering them on the command line with the modprobe command
69using this syntax:
70
71 modprobe e1000 [<option>=<VAL1>,<VAL2>,...]
72
73For example, with two PRO/1000 PCI adapters, entering:
74
75 modprobe e1000 TxDescriptors=80,128
76
77loads the e1000 driver with 80 TX descriptors for the first adapter and
78128 TX descriptors for the second adapter.
79
80The default value for each parameter is generally the recommended setting, 33The default value for each parameter is generally the recommended setting,
81unless otherwise noted. 34unless otherwise noted.
82 35
@@ -89,10 +42,6 @@ NOTES: For more information about the AutoNeg, Duplex, and Speed
89 parameters, see the application note at: 42 parameters, see the application note at:
90 http://www.intel.com/design/network/applnots/ap450.htm 43 http://www.intel.com/design/network/applnots/ap450.htm
91 44
92 A descriptor describes a data buffer and attributes related to
93 the data buffer. This information is accessed by the hardware.
94
95
96AutoNeg 45AutoNeg
97------- 46-------
98(Supported only on adapters with copper connections) 47(Supported only on adapters with copper connections)
@@ -106,7 +55,6 @@ Duplex parameters must not be specified.
106NOTE: Refer to the Speed and Duplex section of this readme for more 55NOTE: Refer to the Speed and Duplex section of this readme for more
107 information on the AutoNeg parameter. 56 information on the AutoNeg parameter.
108 57
109
110Duplex 58Duplex
111------ 59------
112(Supported only on adapters with copper connections) 60(Supported only on adapters with copper connections)
@@ -119,7 +67,6 @@ set to auto-negotiate, the board auto-detects the correct duplex. If the
119link partner is forced (either full or half), Duplex defaults to half- 67link partner is forced (either full or half), Duplex defaults to half-
120duplex. 68duplex.
121 69
122
123FlowControl 70FlowControl
124----------- 71-----------
125Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx) 72Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
@@ -128,16 +75,16 @@ Default Value: Reads flow control settings from the EEPROM
128This parameter controls the automatic generation(Tx) and response(Rx) 75This parameter controls the automatic generation(Tx) and response(Rx)
129to Ethernet PAUSE frames. 76to Ethernet PAUSE frames.
130 77
131
132InterruptThrottleRate 78InterruptThrottleRate
133--------------------- 79---------------------
134(not supported on Intel(R) 82542, 82543 or 82544-based adapters) 80(not supported on Intel(R) 82542, 82543 or 82544-based adapters)
135Valid Range: 0,1,3,100-100000 (0=off, 1=dynamic, 3=dynamic conservative) 81Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
82 4=simplified balancing)
136Default Value: 3 83Default Value: 3
137 84
138The driver can limit the amount of interrupts per second that the adapter 85The driver can limit the amount of interrupts per second that the adapter
139will generate for incoming packets. It does this by writing a value to the 86will generate for incoming packets. It does this by writing a value to the
140adapter that is based on the maximum amount of interrupts that the adapter 87adapter that is based on the maximum amount of interrupts that the adapter
141will generate per second. 88will generate per second.
142 89
143Setting InterruptThrottleRate to a value greater or equal to 100 90Setting InterruptThrottleRate to a value greater or equal to 100
@@ -146,37 +93,43 @@ per second, even if more packets have come in. This reduces interrupt
146load on the system and can lower CPU utilization under heavy load, 93load on the system and can lower CPU utilization under heavy load,
147but will increase latency as packets are not processed as quickly. 94but will increase latency as packets are not processed as quickly.
148 95
149The default behaviour of the driver previously assumed a static 96The default behaviour of the driver previously assumed a static
150InterruptThrottleRate value of 8000, providing a good fallback value for 97InterruptThrottleRate value of 8000, providing a good fallback value for
151all traffic types,but lacking in small packet performance and latency. 98all traffic types,but lacking in small packet performance and latency.
152The hardware can handle many more small packets per second however, and 99The hardware can handle many more small packets per second however, and
153for this reason an adaptive interrupt moderation algorithm was implemented. 100for this reason an adaptive interrupt moderation algorithm was implemented.
154 101
155Since 7.3.x, the driver has two adaptive modes (setting 1 or 3) in which 102Since 7.3.x, the driver has two adaptive modes (setting 1 or 3) in which
156it dynamically adjusts the InterruptThrottleRate value based on the traffic 103it dynamically adjusts the InterruptThrottleRate value based on the traffic
157that it receives. After determining the type of incoming traffic in the last 104that it receives. After determining the type of incoming traffic in the last
158timeframe, it will adjust the InterruptThrottleRate to an appropriate value 105timeframe, it will adjust the InterruptThrottleRate to an appropriate value
159for that traffic. 106for that traffic.
160 107
161The algorithm classifies the incoming traffic every interval into 108The algorithm classifies the incoming traffic every interval into
162classes. Once the class is determined, the InterruptThrottleRate value is 109classes. Once the class is determined, the InterruptThrottleRate value is
163adjusted to suit that traffic type the best. There are three classes defined: 110adjusted to suit that traffic type the best. There are three classes defined:
164"Bulk traffic", for large amounts of packets of normal size; "Low latency", 111"Bulk traffic", for large amounts of packets of normal size; "Low latency",
165for small amounts of traffic and/or a significant percentage of small 112for small amounts of traffic and/or a significant percentage of small
166packets; and "Lowest latency", for almost completely small packets or 113packets; and "Lowest latency", for almost completely small packets or
167minimal traffic. 114minimal traffic.
168 115
169In dynamic conservative mode, the InterruptThrottleRate value is set to 4000 116In dynamic conservative mode, the InterruptThrottleRate value is set to 4000
170for traffic that falls in class "Bulk traffic". If traffic falls in the "Low 117for traffic that falls in class "Bulk traffic". If traffic falls in the "Low
171latency" or "Lowest latency" class, the InterruptThrottleRate is increased 118latency" or "Lowest latency" class, the InterruptThrottleRate is increased
172stepwise to 20000. This default mode is suitable for most applications. 119stepwise to 20000. This default mode is suitable for most applications.
173 120
174For situations where low latency is vital such as cluster or 121For situations where low latency is vital such as cluster or
175grid computing, the algorithm can reduce latency even more when 122grid computing, the algorithm can reduce latency even more when
176InterruptThrottleRate is set to mode 1. In this mode, which operates 123InterruptThrottleRate is set to mode 1. In this mode, which operates
177the same as mode 3, the InterruptThrottleRate will be increased stepwise to 124the same as mode 3, the InterruptThrottleRate will be increased stepwise to
17870000 for traffic in class "Lowest latency". 12570000 for traffic in class "Lowest latency".
179 126
127In simplified mode the interrupt rate is based on the ratio of Tx and
128Rx traffic. If the bytes per second rate is approximately equal, the
129interrupt rate will drop as low as 2000 interrupts per second. If the
130traffic is mostly transmit or mostly receive, the interrupt rate could
131be as high as 8000.
132
180Setting InterruptThrottleRate to 0 turns off any interrupt moderation 133Setting InterruptThrottleRate to 0 turns off any interrupt moderation
181and may improve small packet latency, but is generally not suitable 134and may improve small packet latency, but is generally not suitable
182for bulk throughput traffic. 135for bulk throughput traffic.
@@ -212,8 +165,6 @@ NOTE: When e1000 is loaded with default settings and multiple adapters
212 be platform-specific. If CPU utilization is not a concern, use 165 be platform-specific. If CPU utilization is not a concern, use
213 RX_POLLING (NAPI) and default driver settings. 166 RX_POLLING (NAPI) and default driver settings.
214 167
215
216
217RxDescriptors 168RxDescriptors
218------------- 169-------------
219Valid Range: 80-256 for 82542 and 82543-based adapters 170Valid Range: 80-256 for 82542 and 82543-based adapters
@@ -225,15 +176,14 @@ by the driver. Increasing this value allows the driver to buffer more
225incoming packets, at the expense of increased system memory utilization. 176incoming packets, at the expense of increased system memory utilization.
226 177
227Each descriptor is 16 bytes. A receive buffer is also allocated for each 178Each descriptor is 16 bytes. A receive buffer is also allocated for each
228descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending 179descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending
229on the MTU setting. The maximum MTU size is 16110. 180on the MTU setting. The maximum MTU size is 16110.
230 181
231NOTE: MTU designates the frame size. It only needs to be set for Jumbo 182NOTE: MTU designates the frame size. It only needs to be set for Jumbo
232 Frames. Depending on the available system resources, the request 183 Frames. Depending on the available system resources, the request
233 for a higher number of receive descriptors may be denied. In this 184 for a higher number of receive descriptors may be denied. In this
234 case, use a lower number. 185 case, use a lower number.
235 186
236
237RxIntDelay 187RxIntDelay
238---------- 188----------
239Valid Range: 0-65535 (0=off) 189Valid Range: 0-65535 (0=off)
@@ -254,7 +204,6 @@ CAUTION: When setting RxIntDelay to a value other than 0, adapters may
254 restoring the network connection. To eliminate the potential 204 restoring the network connection. To eliminate the potential
255 for the hang ensure that RxIntDelay is set to 0. 205 for the hang ensure that RxIntDelay is set to 0.
256 206
257
258RxAbsIntDelay 207RxAbsIntDelay
259------------- 208-------------
260(This parameter is supported only on 82540, 82545 and later adapters.) 209(This parameter is supported only on 82540, 82545 and later adapters.)
@@ -268,7 +217,6 @@ packet is received within the set amount of time. Proper tuning,
268along with RxIntDelay, may improve traffic throughput in specific network 217along with RxIntDelay, may improve traffic throughput in specific network
269conditions. 218conditions.
270 219
271
272Speed 220Speed
273----- 221-----
274(This parameter is supported only on adapters with copper connections.) 222(This parameter is supported only on adapters with copper connections.)
@@ -280,7 +228,6 @@ Speed forces the line speed to the specified value in megabits per second
280partner is set to auto-negotiate, the board will auto-detect the correct 228partner is set to auto-negotiate, the board will auto-detect the correct
281speed. Duplex should also be set when Speed is set to either 10 or 100. 229speed. Duplex should also be set when Speed is set to either 10 or 100.
282 230
283
284TxDescriptors 231TxDescriptors
285------------- 232-------------
286Valid Range: 80-256 for 82542 and 82543-based adapters 233Valid Range: 80-256 for 82542 and 82543-based adapters
@@ -295,6 +242,36 @@ NOTE: Depending on the available system resources, the request for a
295 higher number of transmit descriptors may be denied. In this case, 242 higher number of transmit descriptors may be denied. In this case,
296 use a lower number. 243 use a lower number.
297 244
245TxDescriptorStep
246----------------
247Valid Range: 1 (use every Tx Descriptor)
248 4 (use every 4th Tx Descriptor)
249
250Default Value: 1 (use every Tx Descriptor)
251
252On certain non-Intel architectures, it has been observed that intense TX
253traffic bursts of short packets may result in an improper descriptor
254writeback. If this occurs, the driver will report a "TX Timeout" and reset
255the adapter, after which the transmit flow will restart, though data may
256have stalled for as much as 10 seconds before it resumes.
257
258The improper writeback does not occur on the first descriptor in a system
259memory cache-line, which is typically 32 bytes, or 4 descriptors long.
260
261Setting TxDescriptorStep to a value of 4 will ensure that all TX descriptors
262are aligned to the start of a system memory cache line, and so this problem
263will not occur.
264
265NOTES: Setting TxDescriptorStep to 4 effectively reduces the number of
266 TxDescriptors available for transmits to 1/4 of the normal allocation.
267 This has a possible negative performance impact, which may be
268 compensated for by allocating more descriptors using the TxDescriptors
269 module parameter.
270
271 There are other conditions which may result in "TX Timeout", which will
272 not be resolved by the use of the TxDescriptorStep parameter. As the
273 issue addressed by this parameter has never been observed on Intel
274 Architecture platforms, it should not be used on Intel platforms.
298 275
299TxIntDelay 276TxIntDelay
300---------- 277----------
@@ -307,7 +284,6 @@ efficiency if properly tuned for specific network traffic. If the
307system is reporting dropped transmits, this value may be set too high 284system is reporting dropped transmits, this value may be set too high
308causing the driver to run out of available transmit descriptors. 285causing the driver to run out of available transmit descriptors.
309 286
310
311TxAbsIntDelay 287TxAbsIntDelay
312------------- 288-------------
313(This parameter is supported only on 82540, 82545 and later adapters.) 289(This parameter is supported only on 82540, 82545 and later adapters.)
@@ -330,6 +306,35 @@ Default Value: 1
330A value of '1' indicates that the driver should enable IP checksum 306A value of '1' indicates that the driver should enable IP checksum
331offload for received packets (both UDP and TCP) to the adapter hardware. 307offload for received packets (both UDP and TCP) to the adapter hardware.
332 308
309Copybreak
310---------
311Valid Range: 0-xxxxxxx (0=off)
312Default Value: 256
313Usage: insmod e1000.ko copybreak=128
314
315Driver copies all packets below or equaling this size to a fresh Rx
316buffer before handing it up the stack.
317
318This parameter is different than other parameters, in that it is a
319single (not 1,1,1 etc.) parameter applied to all driver instances and
320it is also available during runtime at
321/sys/module/e1000/parameters/copybreak
322
323SmartPowerDownEnable
324--------------------
325Valid Range: 0-1
326Default Value: 0 (disabled)
327
328Allows PHY to turn off in lower power states. The user can turn off
329this parameter in supported chipsets.
330
331KumeranLockLoss
332---------------
333Valid Range: 0-1
334Default Value: 1 (enabled)
335
336This workaround skips resetting the PHY at shutdown for the initial
337silicon releases of ICH8 systems.
333 338
334Speed and Duplex Configuration 339Speed and Duplex Configuration
335============================== 340==============================
@@ -385,40 +390,9 @@ If the link partner is forced to a specific speed and duplex, then this
385parameter should not be used. Instead, use the Speed and Duplex parameters 390parameter should not be used. Instead, use the Speed and Duplex parameters
386previously mentioned to force the adapter to the same speed and duplex. 391previously mentioned to force the adapter to the same speed and duplex.
387 392
388
389Additional Configurations 393Additional Configurations
390========================= 394=========================
391 395
392 Configuring the Driver on Different Distributions
393 -------------------------------------------------
394 Configuring a network driver to load properly when the system is started
395 is distribution dependent. Typically, the configuration process involves
396 adding an alias line to /etc/modules.conf or /etc/modprobe.conf as well
397 as editing other system startup scripts and/or configuration files. Many
398 popular Linux distributions ship with tools to make these changes for you.
399 To learn the proper way to configure a network device for your system,
400 refer to your distribution documentation. If during this process you are
401 asked for the driver or module name, the name for the Linux Base Driver
402 for the Intel(R) PRO/1000 Family of Adapters is e1000.
403
404 As an example, if you install the e1000 driver for two PRO/1000 adapters
405 (eth0 and eth1) and set the speed and duplex to 10full and 100half, add
406 the following to modules.conf or or modprobe.conf:
407
408 alias eth0 e1000
409 alias eth1 e1000
410 options e1000 Speed=10,100 Duplex=2,1
411
412 Viewing Link Messages
413 ---------------------
414 Link messages will not be displayed to the console if the distribution is
415 restricting system messages. In order to see network driver link messages
416 on your console, set dmesg to eight by entering the following:
417
418 dmesg -n 8
419
420 NOTE: This setting is not saved across reboots.
421
422 Jumbo Frames 396 Jumbo Frames
423 ------------ 397 ------------
424 Jumbo Frames support is enabled by changing the MTU to a value larger than 398 Jumbo Frames support is enabled by changing the MTU to a value larger than
@@ -437,9 +411,11 @@ Additional Configurations
437 setting in a different location. 411 setting in a different location.
438 412
439 Notes: 413 Notes:
440 414 Degradation in throughput performance may be observed in some Jumbo frames
441 - To enable Jumbo Frames, increase the MTU size on the interface beyond 415 environments. If this is observed, increasing the application's socket buffer
442 1500. 416 size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
417 See the specific application manual and /usr/src/linux*/Documentation/
418 networking/ip-sysctl.txt for more details.
443 419
444 - The maximum MTU setting for Jumbo Frames is 16110. This value coincides 420 - The maximum MTU setting for Jumbo Frames is 16110. This value coincides
445 with the maximum Jumbo Frames size of 16128. 421 with the maximum Jumbo Frames size of 16128.
@@ -447,40 +423,11 @@ Additional Configurations
447 - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or 423 - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or
448 loss of link. 424 loss of link.
449 425
450 - Some Intel gigabit adapters that support Jumbo Frames have a frame size
451 limit of 9238 bytes, with a corresponding MTU size limit of 9216 bytes.
452 The adapters with this limitation are based on the Intel(R) 82571EB,
453 82572EI, 82573L and 80003ES2LAN controller. These correspond to the
454 following product names:
455 Intel(R) PRO/1000 PT Server Adapter
456 Intel(R) PRO/1000 PT Desktop Adapter
457 Intel(R) PRO/1000 PT Network Connection
458 Intel(R) PRO/1000 PT Dual Port Server Adapter
459 Intel(R) PRO/1000 PT Dual Port Network Connection
460 Intel(R) PRO/1000 PF Server Adapter
461 Intel(R) PRO/1000 PF Network Connection
462 Intel(R) PRO/1000 PF Dual Port Server Adapter
463 Intel(R) PRO/1000 PB Server Connection
464 Intel(R) PRO/1000 PL Network Connection
465 Intel(R) PRO/1000 EB Network Connection with I/O Acceleration
466 Intel(R) PRO/1000 EB Backplane Connection with I/O Acceleration
467 Intel(R) PRO/1000 PT Quad Port Server Adapter
468
469 - Adapters based on the Intel(R) 82542 and 82573V/E controller do not 426 - Adapters based on the Intel(R) 82542 and 82573V/E controller do not
470 support Jumbo Frames. These correspond to the following product names: 427 support Jumbo Frames. These correspond to the following product names:
471 Intel(R) PRO/1000 Gigabit Server Adapter 428 Intel(R) PRO/1000 Gigabit Server Adapter
472 Intel(R) PRO/1000 PM Network Connection 429 Intel(R) PRO/1000 PM Network Connection
473 430
474 - The following adapters do not support Jumbo Frames:
475 Intel(R) 82562V 10/100 Network Connection
476 Intel(R) 82566DM Gigabit Network Connection
477 Intel(R) 82566DC Gigabit Network Connection
478 Intel(R) 82566MM Gigabit Network Connection
479 Intel(R) 82566MC Gigabit Network Connection
480 Intel(R) 82562GT 10/100 Network Connection
481 Intel(R) 82562G 10/100 Network Connection
482
483
484 Ethtool 431 Ethtool
485 ------- 432 -------
486 The driver utilizes the ethtool interface for driver configuration and 433 The driver utilizes the ethtool interface for driver configuration and
@@ -490,142 +437,14 @@ Additional Configurations
490 The latest release of ethtool can be found from 437 The latest release of ethtool can be found from
491 http://sourceforge.net/projects/gkernel. 438 http://sourceforge.net/projects/gkernel.
492 439
493 NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support
494 for a more complete ethtool feature set can be enabled by upgrading
495 ethtool to ethtool-1.8.1.
496
497 Enabling Wake on LAN* (WoL) 440 Enabling Wake on LAN* (WoL)
498 --------------------------- 441 ---------------------------
499 WoL is configured through the Ethtool* utility. Ethtool is included with 442 WoL is configured through the Ethtool* utility.
500 all versions of Red Hat after Red Hat 7.2. For other Linux distributions,
501 download and install Ethtool from the following website:
502 http://sourceforge.net/projects/gkernel.
503
504 For instructions on enabling WoL with Ethtool, refer to the website listed
505 above.
506 443
507 WoL will be enabled on the system during the next shut down or reboot. 444 WoL will be enabled on the system during the next shut down or reboot.
508 For this driver version, in order to enable WoL, the e1000 driver must be 445 For this driver version, in order to enable WoL, the e1000 driver must be
509 loaded when shutting down or rebooting the system. 446 loaded when shutting down or rebooting the system.
510 447
511 Wake On LAN is only supported on port A for the following devices:
512 Intel(R) PRO/1000 PT Dual Port Network Connection
513 Intel(R) PRO/1000 PT Dual Port Server Connection
514 Intel(R) PRO/1000 PT Dual Port Server Adapter
515 Intel(R) PRO/1000 PF Dual Port Server Adapter
516 Intel(R) PRO/1000 PT Quad Port Server Adapter
517
518 NAPI
519 ----
520 NAPI (Rx polling mode) is enabled in the e1000 driver.
521
522 See www.cyberus.ca/~hadi/usenix-paper.tgz for more information on NAPI.
523
524
525Known Issues
526============
527
528Dropped Receive Packets on Half-duplex 10/100 Networks
529------------------------------------------------------
530If you have an Intel PCI Express adapter running at 10mbps or 100mbps, half-
531duplex, you may observe occasional dropped receive packets. There are no
532workarounds for this problem in this network configuration. The network must
533be updated to operate in full-duplex, and/or 1000mbps only.
534
535Jumbo Frames System Requirement
536-------------------------------
537Memory allocation failures have been observed on Linux systems with 64 MB
538of RAM or less that are running Jumbo Frames. If you are using Jumbo
539Frames, your system may require more than the advertised minimum
540requirement of 64 MB of system memory.
541
542Performance Degradation with Jumbo Frames
543-----------------------------------------
544Degradation in throughput performance may be observed in some Jumbo frames
545environments. If this is observed, increasing the application's socket
546buffer size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values
547may help. See the specific application manual and
548/usr/src/linux*/Documentation/
549networking/ip-sysctl.txt for more details.
550
551Jumbo Frames on Foundry BigIron 8000 switch
552-------------------------------------------
553There is a known issue using Jumbo frames when connected to a Foundry
554BigIron 8000 switch. This is a 3rd party limitation. If you experience
555loss of packets, lower the MTU size.
556
557Allocating Rx Buffers when Using Jumbo Frames
558---------------------------------------------
559Allocating Rx buffers when using Jumbo Frames on 2.6.x kernels may fail if
560the available memory is heavily fragmented. This issue may be seen with PCI-X
561adapters or with packet split disabled. This can be reduced or eliminated
562by changing the amount of available memory for receive buffer allocation, by
563increasing /proc/sys/vm/min_free_kbytes.
564
565Multiple Interfaces on Same Ethernet Broadcast Network
566------------------------------------------------------
567Due to the default ARP behavior on Linux, it is not possible to have
568one system on two IP networks in the same Ethernet broadcast domain
569(non-partitioned switch) behave as expected. All Ethernet interfaces
570will respond to IP traffic for any IP address assigned to the system.
571This results in unbalanced receive traffic.
572
573If you have multiple interfaces in a server, either turn on ARP
574filtering by entering:
575
576 echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
577(this only works if your kernel's version is higher than 2.4.5),
578
579NOTE: This setting is not saved across reboots. The configuration
580change can be made permanent by adding the line:
581 net.ipv4.conf.all.arp_filter = 1
582to the file /etc/sysctl.conf
583
584 or,
585
586install the interfaces in separate broadcast domains (either in
587different switches or in a switch partitioned to VLANs).
588
58982541/82547 can't link or are slow to link with some link partners
590-----------------------------------------------------------------
591There is a known compatibility issue with 82541/82547 and some
592low-end switches where the link will not be established, or will
593be slow to establish. In particular, these switches are known to
594be incompatible with 82541/82547:
595
596 Planex FXG-08TE
597 I-O Data ETG-SH8
598
599To workaround this issue, the driver can be compiled with an override
600of the PHY's master/slave setting. Forcing master or forcing slave
601mode will improve time-to-link.
602
603 # make CFLAGS_EXTRA=-DE1000_MASTER_SLAVE=<n>
604
605Where <n> is:
606
607 0 = Hardware default
608 1 = Master mode
609 2 = Slave mode
610 3 = Auto master/slave
611
612Disable rx flow control with ethtool
613------------------------------------
614In order to disable receive flow control using ethtool, you must turn
615off auto-negotiation on the same command line.
616
617For example:
618
619 ethtool -A eth? autoneg off rx off
620
621Unplugging network cable while ethtool -p is running
622----------------------------------------------------
623In kernel versions 2.5.50 and later (including 2.6 kernel), unplugging
624the network cable while ethtool -p is running will cause the system to
625become unresponsive to keyboard commands, except for control-alt-delete.
626Restarting the system appears to be the only remedy.
627
628
629Support 448Support
630======= 449=======
631 450
diff --git a/Documentation/networking/e1000e.txt b/Documentation/networking/e1000e.txt
new file mode 100644
index 000000000000..6aa048badf32
--- /dev/null
+++ b/Documentation/networking/e1000e.txt
@@ -0,0 +1,302 @@
1Linux* Driver for Intel(R) Network Connection
2===============================================================
3
4Intel Gigabit Linux driver.
5Copyright(c) 1999 - 2010 Intel Corporation.
6
7Contents
8========
9
10- Identifying Your Adapter
11- Command Line Parameters
12- Additional Configurations
13- Support
14
15Identifying Your Adapter
16========================
17
18The e1000e driver supports all PCI Express Intel(R) Gigabit Network
19Connections, except those that are 82575, 82576 and 82580-based*.
20
21* NOTE: The Intel(R) PRO/1000 P Dual Port Server Adapter is supported by
22 the e1000 driver, not the e1000e driver due to the 82546 part being used
23 behind a PCI Express bridge.
24
25For more information on how to identify your adapter, go to the Adapter &
26Driver ID Guide at:
27
28 http://support.intel.com/support/go/network/adapter/idguide.htm
29
30For the latest Intel network drivers for Linux, refer to the following
31website. In the search field, enter your adapter name or type, or use the
32networking link on the left to search for your adapter:
33
34 http://support.intel.com/support/go/network/adapter/home.htm
35
36Command Line Parameters
37=======================
38
39The default value for each parameter is generally the recommended setting,
40unless otherwise noted.
41
42NOTES: For more information about the InterruptThrottleRate,
43 RxIntDelay, TxIntDelay, RxAbsIntDelay, and TxAbsIntDelay
44 parameters, see the application note at:
45 http://www.intel.com/design/network/applnots/ap450.htm
46
47InterruptThrottleRate
48---------------------
49Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
50 4=simplified balancing)
51Default Value: 3
52
53The driver can limit the amount of interrupts per second that the adapter
54will generate for incoming packets. It does this by writing a value to the
55adapter that is based on the maximum amount of interrupts that the adapter
56will generate per second.
57
58Setting InterruptThrottleRate to a value greater or equal to 100
59will program the adapter to send out a maximum of that many interrupts
60per second, even if more packets have come in. This reduces interrupt
61load on the system and can lower CPU utilization under heavy load,
62but will increase latency as packets are not processed as quickly.
63
64The driver has two adaptive modes (setting 1 or 3) in which
65it dynamically adjusts the InterruptThrottleRate value based on the traffic
66that it receives. After determining the type of incoming traffic in the last
67timeframe, it will adjust the InterruptThrottleRate to an appropriate value
68for that traffic.
69
70The algorithm classifies the incoming traffic every interval into
71classes. Once the class is determined, the InterruptThrottleRate value is
72adjusted to suit that traffic type the best. There are three classes defined:
73"Bulk traffic", for large amounts of packets of normal size; "Low latency",
74for small amounts of traffic and/or a significant percentage of small
75packets; and "Lowest latency", for almost completely small packets or
76minimal traffic.
77
78In dynamic conservative mode, the InterruptThrottleRate value is set to 4000
79for traffic that falls in class "Bulk traffic". If traffic falls in the "Low
80latency" or "Lowest latency" class, the InterruptThrottleRate is increased
81stepwise to 20000. This default mode is suitable for most applications.
82
83For situations where low latency is vital such as cluster or
84grid computing, the algorithm can reduce latency even more when
85InterruptThrottleRate is set to mode 1. In this mode, which operates
86the same as mode 3, the InterruptThrottleRate will be increased stepwise to
8770000 for traffic in class "Lowest latency".
88
89In simplified mode the interrupt rate is based on the ratio of Tx and
90Rx traffic. If the bytes per second rate is approximately equal the
91interrupt rate will drop as low as 2000 interrupts per second. If the
92traffic is mostly transmit or mostly receive, the interrupt rate could
93be as high as 8000.
94
95Setting InterruptThrottleRate to 0 turns off any interrupt moderation
96and may improve small packet latency, but is generally not suitable
97for bulk throughput traffic.
98
99NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and
100 RxAbsIntDelay parameters. In other words, minimizing the receive
101 and/or transmit absolute delays does not force the controller to
102 generate more interrupts than what the Interrupt Throttle Rate
103 allows.
104
105NOTE: When e1000e is loaded with default settings and multiple adapters
106 are in use simultaneously, the CPU utilization may increase non-
107 linearly. In order to limit the CPU utilization without impacting
108 the overall throughput, we recommend that you load the driver as
109 follows:
110
111 modprobe e1000e InterruptThrottleRate=3000,3000,3000
112
113 This sets the InterruptThrottleRate to 3000 interrupts/sec for
114 the first, second, and third instances of the driver. The range
115 of 2000 to 3000 interrupts per second works on a majority of
116 systems and is a good starting point, but the optimal value will
117 be platform-specific. If CPU utilization is not a concern, use
118 RX_POLLING (NAPI) and default driver settings.
119
120RxIntDelay
121----------
122Valid Range: 0-65535 (0=off)
123Default Value: 0
124
125This value delays the generation of receive interrupts in units of 1.024
126microseconds. Receive interrupt reduction can improve CPU efficiency if
127properly tuned for specific network traffic. Increasing this value adds
128extra latency to frame reception and can end up decreasing the throughput
129of TCP traffic. If the system is reporting dropped receives, this value
130may be set too high, causing the driver to run out of available receive
131descriptors.
132
133CAUTION: When setting RxIntDelay to a value other than 0, adapters may
134 hang (stop transmitting) under certain network conditions. If
135 this occurs a NETDEV WATCHDOG message is logged in the system
136 event log. In addition, the controller is automatically reset,
137 restoring the network connection. To eliminate the potential
138 for the hang ensure that RxIntDelay is set to 0.
139
140RxAbsIntDelay
141-------------
142Valid Range: 0-65535 (0=off)
143Default Value: 8
144
145This value, in units of 1.024 microseconds, limits the delay in which a
146receive interrupt is generated. Useful only if RxIntDelay is non-zero,
147this value ensures that an interrupt is generated after the initial
148packet is received within the set amount of time. Proper tuning,
149along with RxIntDelay, may improve traffic throughput in specific network
150conditions.
151
152TxIntDelay
153----------
154Valid Range: 0-65535 (0=off)
155Default Value: 8
156
157This value delays the generation of transmit interrupts in units of
1581.024 microseconds. Transmit interrupt reduction can improve CPU
159efficiency if properly tuned for specific network traffic. If the
160system is reporting dropped transmits, this value may be set too high
161causing the driver to run out of available transmit descriptors.
162
163TxAbsIntDelay
164-------------
165Valid Range: 0-65535 (0=off)
166Default Value: 32
167
168This value, in units of 1.024 microseconds, limits the delay in which a
169transmit interrupt is generated. Useful only if TxIntDelay is non-zero,
170this value ensures that an interrupt is generated after the initial
171packet is sent on the wire within the set amount of time. Proper tuning,
172along with TxIntDelay, may improve traffic throughput in specific
173network conditions.
174
175Copybreak
176---------
177Valid Range: 0-xxxxxxx (0=off)
178Default Value: 256
179
180Driver copies all packets below or equaling this size to a fresh Rx
181buffer before handing it up the stack.
182
183This parameter is different than other parameters, in that it is a
184single (not 1,1,1 etc.) parameter applied to all driver instances and
185it is also available during runtime at
186/sys/module/e1000e/parameters/copybreak
187
188SmartPowerDownEnable
189--------------------
190Valid Range: 0-1
191Default Value: 0 (disabled)
192
193Allows PHY to turn off in lower power states. The user can set this parameter
194in supported chipsets.
195
196KumeranLockLoss
197---------------
198Valid Range: 0-1
199Default Value: 1 (enabled)
200
201This workaround skips resetting the PHY at shutdown for the initial
202silicon releases of ICH8 systems.
203
204IntMode
205-------
206Valid Range: 0-2 (0=legacy, 1=MSI, 2=MSI-X)
207Default Value: 2
208
209Allows changing the interrupt mode at module load time, without requiring a
210recompile. If the driver load fails to enable a specific interrupt mode, the
211driver will try other interrupt modes, from least to most compatible. The
212interrupt order is MSI-X, MSI, Legacy. If specifying MSI (IntMode=1)
213interrupts, only MSI and Legacy will be attempted.
214
215CrcStripping
216------------
217Valid Range: 0-1
218Default Value: 1 (enabled)
219
220Strip the CRC from received packets before sending up the network stack. If
221you have a machine with a BMC enabled but cannot receive IPMI traffic after
222loading or enabling the driver, try disabling this feature.
223
224WriteProtectNVM
225---------------
226Valid Range: 0-1
227Default Value: 1 (enabled)
228
229Set the hardware to ignore all write/erase cycles to the GbE region in the
230ICHx NVM (non-volatile memory). This feature can be disabled by the
231WriteProtectNVM module parameter (enabled by default) only after a hardware
232reset, but the machine must be power cycled before trying to enable writes.
233
234Note: the kernel boot option iomem=relaxed may need to be set if the kernel
235config option CONFIG_STRICT_DEVMEM=y, if the root user wants to write the
236NVM from user space via ethtool.
237
238Additional Configurations
239=========================
240
241 Jumbo Frames
242 ------------
243 Jumbo Frames support is enabled by changing the MTU to a value larger than
244 the default of 1500. Use the ifconfig command to increase the MTU size.
245 For example:
246
247 ifconfig eth<x> mtu 9000 up
248
249 This setting is not saved across reboots.
250
251 Notes:
252
253 - The maximum MTU setting for Jumbo Frames is 9216. This value coincides
254 with the maximum Jumbo Frames size of 9234 bytes.
255
256 - Using Jumbo Frames at 10 or 100 Mbps is not supported and may result in
257 poor performance or loss of link.
258
259 - Some adapters limit Jumbo Frames sized packets to a maximum of
260 4096 bytes and some adapters do not support Jumbo Frames.
261
262
263 Ethtool
264 -------
265 The driver utilizes the ethtool interface for driver configuration and
266 diagnostics, as well as displaying statistical information. We
267 strongly recommend downloading the latest version of Ethtool at:
268
269 http://sourceforge.net/projects/gkernel.
270
271 Speed and Duplex
272 ----------------
273 Speed and Duplex are configured through the Ethtool* utility. For
274 instructions, refer to the Ethtool man page.
275
276 Enabling Wake on LAN* (WoL)
277 ---------------------------
278 WoL is configured through the Ethtool* utility. For instructions on
279 enabling WoL with Ethtool, refer to the Ethtool man page.
280
281 WoL will be enabled on the system during the next shut down or reboot.
282 For this driver version, in order to enable WoL, the e1000e driver must be
283 loaded when shutting down or rebooting the system.
284
285 In most cases Wake On LAN is only supported on port A for multiple port
286 adapters. To verify if a port supports Wake on LAN run ethtool eth<X>.
287
288
289Support
290=======
291
292For general information, go to the Intel support website at:
293
294 www.intel.com/support/
295
296or the Intel Wired Networking project hosted by Sourceforge at:
297
298 http://sourceforge.net/projects/e1000
299
300If an issue is identified with the released source code on the supported
301kernel with a supported adapter, email the specific information related
302to the issue to e1000-devel@lists.sf.net
diff --git a/Documentation/networking/ixgbevf.txt b/Documentation/networking/ixgbevf.txt
index 19015de6725f..21dd5d15b6b4 100755..100644
--- a/Documentation/networking/ixgbevf.txt
+++ b/Documentation/networking/ixgbevf.txt
@@ -1,19 +1,16 @@
1Linux* Base Driver for Intel(R) Network Connection 1Linux* Base Driver for Intel(R) Network Connection
2================================================== 2==================================================
3 3
4November 24, 2009 4Intel Gigabit Linux driver.
5Copyright(c) 1999 - 2010 Intel Corporation.
5 6
6Contents 7Contents
7======== 8========
8 9
9- In This Release
10- Identifying Your Adapter 10- Identifying Your Adapter
11- Known Issues/Troubleshooting 11- Known Issues/Troubleshooting
12- Support 12- Support
13 13
14In This Release
15===============
16
17This file describes the ixgbevf Linux* Base Driver for Intel Network 14This file describes the ixgbevf Linux* Base Driver for Intel Network
18Connection. 15Connection.
19 16
@@ -33,7 +30,7 @@ Identifying Your Adapter
33For more information on how to identify your adapter, go to the Adapter & 30For more information on how to identify your adapter, go to the Adapter &
34Driver ID Guide at: 31Driver ID Guide at:
35 32
36 http://support.intel.com/support/network/sb/CS-008441.htm 33 http://support.intel.com/support/go/network/adapter/idguide.htm
37 34
38Known Issues/Troubleshooting 35Known Issues/Troubleshooting
39============================ 36============================
@@ -57,34 +54,3 @@ or the Intel Wired Networking project hosted by Sourceforge at:
57If an issue is identified with the released source code on the supported 54If an issue is identified with the released source code on the supported
58kernel with a supported adapter, email the specific information related 55kernel with a supported adapter, email the specific information related
59to the issue to e1000-devel@lists.sf.net 56to the issue to e1000-devel@lists.sf.net
60
61License
62=======
63
64Intel 10 Gigabit Linux driver.
65Copyright(c) 1999 - 2009 Intel Corporation.
66
67This program is free software; you can redistribute it and/or modify it
68under the terms and conditions of the GNU General Public License,
69version 2, as published by the Free Software Foundation.
70
71This program is distributed in the hope it will be useful, but WITHOUT
72ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
73FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
74more details.
75
76You should have received a copy of the GNU General Public License along with
77this program; if not, write to the Free Software Foundation, Inc.,
7851 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
79
80The full GNU General Public License is included in this distribution in
81the file called "COPYING".
82
83Trademarks
84==========
85
86Intel, Itanium, and Pentium are trademarks or registered trademarks of
87Intel Corporation or its subsidiaries in the United States and other
88countries.
89
90* Other names and brands may be claimed as the property of others.
diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c
index ccd951fa94ee..cc96ee2666f2 100644
--- a/Documentation/vm/page-types.c
+++ b/Documentation/vm/page-types.c
@@ -478,7 +478,7 @@ static void prepare_hwpoison_fd(void)
478 } 478 }
479 479
480 if (opt_unpoison && !hwpoison_forget_fd) { 480 if (opt_unpoison && !hwpoison_forget_fd) {
481 sprintf(buf, "%s/renew-pfn", hwpoison_debug_fs); 481 sprintf(buf, "%s/unpoison-pfn", hwpoison_debug_fs);
482 hwpoison_forget_fd = checked_open(buf, O_WRONLY); 482 hwpoison_forget_fd = checked_open(buf, O_WRONLY);
483 } 483 }
484} 484}
diff --git a/MAINTAINERS b/MAINTAINERS
index f46d8e66333f..3d4179fbc526 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -969,6 +969,16 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
969S: Maintained 969S: Maintained
970F: arch/arm/mach-s5p*/ 970F: arch/arm/mach-s5p*/
971 971
972ARM/SAMSUNG S5P SERIES FIMC SUPPORT
973M: Kyungmin Park <kyungmin.park@samsung.com>
974M: Sylwester Nawrocki <s.nawrocki@samsung.com>
975L: linux-arm-kernel@lists.infradead.org
976L: linux-media@vger.kernel.org
977S: Maintained
978F: arch/arm/plat-s5p/dev-fimc*
979F: arch/arm/plat-samsung/include/plat/*fimc*
980F: drivers/media/video/s5p-fimc/
981
972ARM/SHMOBILE ARM ARCHITECTURE 982ARM/SHMOBILE ARM ARCHITECTURE
973M: Paul Mundt <lethal@linux-sh.org> 983M: Paul Mundt <lethal@linux-sh.org>
974M: Magnus Damm <magnus.damm@gmail.com> 984M: Magnus Damm <magnus.damm@gmail.com>
@@ -1517,6 +1527,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
1517S: Supported 1527S: Supported
1518F: Documentation/filesystems/ceph.txt 1528F: Documentation/filesystems/ceph.txt
1519F: fs/ceph 1529F: fs/ceph
1530F: net/ceph
1531F: include/linux/ceph
1520 1532
1521CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM: 1533CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
1522M: David Vrabel <david.vrabel@csr.com> 1534M: David Vrabel <david.vrabel@csr.com>
@@ -2535,7 +2547,7 @@ S: Supported
2535F: drivers/scsi/gdt* 2547F: drivers/scsi/gdt*
2536 2548
2537GENERIC GPIO I2C DRIVER 2549GENERIC GPIO I2C DRIVER
2538M: Haavard Skinnemoen <hskinnemoen@atmel.com> 2550M: Haavard Skinnemoen <hskinnemoen@gmail.com>
2539S: Supported 2551S: Supported
2540F: drivers/i2c/busses/i2c-gpio.c 2552F: drivers/i2c/busses/i2c-gpio.c
2541F: include/linux/i2c-gpio.h 2553F: include/linux/i2c-gpio.h
@@ -3063,16 +3075,27 @@ L: netdev@vger.kernel.org
3063S: Maintained 3075S: Maintained
3064F: drivers/net/ixp2000/ 3076F: drivers/net/ixp2000/
3065 3077
3066INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe) 3078INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf)
3067M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> 3079M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
3068M: Jesse Brandeburg <jesse.brandeburg@intel.com> 3080M: Jesse Brandeburg <jesse.brandeburg@intel.com>
3069M: Bruce Allan <bruce.w.allan@intel.com> 3081M: Bruce Allan <bruce.w.allan@intel.com>
3070M: Alex Duyck <alexander.h.duyck@intel.com> 3082M: Carolyn Wyborny <carolyn.wyborny@intel.com>
3083M: Don Skidmore <donald.c.skidmore@intel.com>
3084M: Greg Rose <gregory.v.rose@intel.com>
3071M: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com> 3085M: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>
3086M: Alex Duyck <alexander.h.duyck@intel.com>
3072M: John Ronciak <john.ronciak@intel.com> 3087M: John Ronciak <john.ronciak@intel.com>
3073L: e1000-devel@lists.sourceforge.net 3088L: e1000-devel@lists.sourceforge.net
3074W: http://e1000.sourceforge.net/ 3089W: http://e1000.sourceforge.net/
3075S: Supported 3090S: Supported
3091F: Documentation/networking/e100.txt
3092F: Documentation/networking/e1000.txt
3093F: Documentation/networking/e1000e.txt
3094F: Documentation/networking/igb.txt
3095F: Documentation/networking/igbvf.txt
3096F: Documentation/networking/ixgb.txt
3097F: Documentation/networking/ixgbe.txt
3098F: Documentation/networking/ixgbevf.txt
3076F: drivers/net/e100.c 3099F: drivers/net/e100.c
3077F: drivers/net/e1000/ 3100F: drivers/net/e1000/
3078F: drivers/net/e1000e/ 3101F: drivers/net/e1000e/
@@ -3080,6 +3103,7 @@ F: drivers/net/igb/
3080F: drivers/net/igbvf/ 3103F: drivers/net/igbvf/
3081F: drivers/net/ixgb/ 3104F: drivers/net/ixgb/
3082F: drivers/net/ixgbe/ 3105F: drivers/net/ixgbe/
3106F: drivers/net/ixgbevf/
3083 3107
3084INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT 3108INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
3085L: linux-wireless@vger.kernel.org 3109L: linux-wireless@vger.kernel.org
@@ -3140,7 +3164,7 @@ F: drivers/net/ioc3-eth.c
3140 3164
3141IOC3 SERIAL DRIVER 3165IOC3 SERIAL DRIVER
3142M: Pat Gefre <pfg@sgi.com> 3166M: Pat Gefre <pfg@sgi.com>
3143L: linux-mips@linux-mips.org 3167L: linux-serial@vger.kernel.org
3144S: Maintained 3168S: Maintained
3145F: drivers/serial/ioc3_serial.c 3169F: drivers/serial/ioc3_serial.c
3146 3170
@@ -4783,6 +4807,15 @@ F: fs/qnx4/
4783F: include/linux/qnx4_fs.h 4807F: include/linux/qnx4_fs.h
4784F: include/linux/qnxtypes.h 4808F: include/linux/qnxtypes.h
4785 4809
4810RADOS BLOCK DEVICE (RBD)
4811F: include/linux/qnxtypes.h
4812M: Yehuda Sadeh <yehuda@hq.newdream.net>
4813M: Sage Weil <sage@newdream.net>
4814M: ceph-devel@vger.kernel.org
4815S: Supported
4816F: drivers/block/rbd.c
4817F: drivers/block/rbd_types.h
4818
4786RADEON FRAMEBUFFER DISPLAY DRIVER 4819RADEON FRAMEBUFFER DISPLAY DRIVER
4787M: Benjamin Herrenschmidt <benh@kernel.crashing.org> 4820M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
4788L: linux-fbdev@vger.kernel.org 4821L: linux-fbdev@vger.kernel.org
@@ -5008,6 +5041,12 @@ F: drivers/media/common/saa7146*
5008F: drivers/media/video/*7146* 5041F: drivers/media/video/*7146*
5009F: include/media/*7146* 5042F: include/media/*7146*
5010 5043
5044SAMSUNG AUDIO (ASoC) DRIVERS
5045M: Jassi Brar <jassi.brar@samsung.com>
5046L: alsa-devel@alsa-project.org (moderated for non-subscribers)
5047S: Supported
5048F: sound/soc/s3c24xx
5049
5011TLG2300 VIDEO4LINUX-2 DRIVER 5050TLG2300 VIDEO4LINUX-2 DRIVER
5012M: Huang Shijie <shijie8@gmail.com> 5051M: Huang Shijie <shijie8@gmail.com>
5013M: Kang Yong <kangyong@telegent.com> 5052M: Kang Yong <kangyong@telegent.com>
@@ -6450,8 +6489,10 @@ F: include/linux/wm97xx.h
6450WOLFSON MICROELECTRONICS DRIVERS 6489WOLFSON MICROELECTRONICS DRIVERS
6451M: Mark Brown <broonie@opensource.wolfsonmicro.com> 6490M: Mark Brown <broonie@opensource.wolfsonmicro.com>
6452M: Ian Lartey <ian@opensource.wolfsonmicro.com> 6491M: Ian Lartey <ian@opensource.wolfsonmicro.com>
6492M: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
6493T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc
6453T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus 6494T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
6454W: http://opensource.wolfsonmicro.com/node/8 6495W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
6455S: Supported 6496S: Supported
6456F: Documentation/hwmon/wm83?? 6497F: Documentation/hwmon/wm83??
6457F: drivers/leds/leds-wm83*.c 6498F: drivers/leds/leds-wm83*.c
diff --git a/Makefile b/Makefile
index c2362c273006..d3c10719bbbd 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 36 3SUBLEVEL = 36
4EXTRAVERSION = -rc7 4EXTRAVERSION =
5NAME = Sheep on Meth 5NAME = Flesh-Eating Bats with Fangs
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 7c0dfccd05bd..9103904b3dab 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1102,6 +1102,20 @@ config ARM_ERRATA_720789
1102 invalidated are not, resulting in an incoherency in the system page 1102 invalidated are not, resulting in an incoherency in the system page
1103 tables. The workaround changes the TLB flushing routines to invalidate 1103 tables. The workaround changes the TLB flushing routines to invalidate
1104 entries regardless of the ASID. 1104 entries regardless of the ASID.
1105
1106config ARM_ERRATA_743622
1107 bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
1108 depends on CPU_V7
1109 help
1110 This option enables the workaround for the 743622 Cortex-A9
1111 (r2p0..r2p2) erratum. Under very rare conditions, a faulty
1112 optimisation in the Cortex-A9 Store Buffer may lead to data
1113 corruption. This workaround sets a specific bit in the diagnostic
1114 register of the Cortex-A9 which disables the Store Buffer
1115 optimisation, preventing the defect from occurring. This has no
1116 visible impact on the overall performance or power consumption of the
1117 processor.
1118
1105endmenu 1119endmenu
1106 1120
1107source "arch/arm/common/Kconfig" 1121source "arch/arm/common/Kconfig"
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c
index 8bccbfa693ff..2c1f0050c9c4 100644
--- a/arch/arm/kernel/kprobes-decode.c
+++ b/arch/arm/kernel/kprobes-decode.c
@@ -1162,11 +1162,12 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1162{ 1162{
1163 /* 1163 /*
1164 * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx 1164 * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx
1165 * Undef : cccc 0011 0x00 xxxx xxxx xxxx xxxx xxxx 1165 * Undef : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx
1166 * ALU op with S bit and Rd == 15 : 1166 * ALU op with S bit and Rd == 15 :
1167 * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx 1167 * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx
1168 */ 1168 */
1169 if ((insn & 0x0f900000) == 0x03200000 || /* MSR & Undef */ 1169 if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */
1170 (insn & 0x0ff00000) == 0x03400000 || /* Undef */
1170 (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */ 1171 (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */
1171 return INSN_REJECTED; 1172 return INSN_REJECTED;
1172 1173
@@ -1177,7 +1178,7 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1177 * *S (bit 20) updates condition codes 1178 * *S (bit 20) updates condition codes
1178 * ADC/SBC/RSC reads the C flag 1179 * ADC/SBC/RSC reads the C flag
1179 */ 1180 */
1180 insn &= 0xfff00fff; /* Rn = r0, Rd = r0 */ 1181 insn &= 0xffff0fff; /* Rd = r0 */
1181 asi->insn[0] = insn; 1182 asi->insn[0] = insn;
1182 asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ 1183 asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */
1183 emulate_alu_imm_rwflags : emulate_alu_imm_rflags; 1184 emulate_alu_imm_rwflags : emulate_alu_imm_rflags;
diff --git a/arch/arm/mach-at91/include/mach/system.h b/arch/arm/mach-at91/include/mach/system.h
index c80e090b3670..ee8db152592e 100644
--- a/arch/arm/mach-at91/include/mach/system.h
+++ b/arch/arm/mach-at91/include/mach/system.h
@@ -28,17 +28,16 @@
28 28
29static inline void arch_idle(void) 29static inline void arch_idle(void)
30{ 30{
31#ifndef CONFIG_DEBUG_KERNEL
32 /* 31 /*
33 * Disable the processor clock. The processor will be automatically 32 * Disable the processor clock. The processor will be automatically
34 * re-enabled by an interrupt or by a reset. 33 * re-enabled by an interrupt or by a reset.
35 */ 34 */
36 at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK); 35 at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK);
37#else 36#ifndef CONFIG_CPU_ARM920T
38 /* 37 /*
39 * Set the processor (CP15) into 'Wait for Interrupt' mode. 38 * Set the processor (CP15) into 'Wait for Interrupt' mode.
40 * Unlike disabling the processor clock via the PMC (above) 39 * Post-RM9200 processors need this in conjunction with the above
41 * this allows the processor to be woken via JTAG. 40 * to save power when idle.
42 */ 41 */
43 cpu_do_idle(); 42 cpu_do_idle();
44#endif 43#endif
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
index 29c0a911df26..77eb35c89cd0 100644
--- a/arch/arm/mach-bcmring/dma.c
+++ b/arch/arm/mach-bcmring/dma.c
@@ -691,7 +691,7 @@ int dma_init(void)
691 691
692 memset(&gDMA, 0, sizeof(gDMA)); 692 memset(&gDMA, 0, sizeof(gDMA));
693 693
694 init_MUTEX_LOCKED(&gDMA.lock); 694 sema_init(&gDMA.lock, 0);
695 init_waitqueue_head(&gDMA.freeChannelQ); 695 init_waitqueue_head(&gDMA.freeChannelQ);
696 696
697 /* Initialize the Hardware */ 697 /* Initialize the Hardware */
@@ -1574,7 +1574,7 @@ int dma_init_mem_map(DMA_MemMap_t *memMap)
1574{ 1574{
1575 memset(memMap, 0, sizeof(*memMap)); 1575 memset(memMap, 0, sizeof(*memMap));
1576 1576
1577 init_MUTEX(&memMap->lock); 1577 sema_init(&memMap->lock, 1);
1578 1578
1579 return 0; 1579 return 0;
1580} 1580}
diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c
index 8904ca4e2e24..a696d354b1f8 100644
--- a/arch/arm/mach-ep93xx/dma-m2p.c
+++ b/arch/arm/mach-ep93xx/dma-m2p.c
@@ -276,7 +276,7 @@ static void channel_disable(struct m2p_channel *ch)
276 v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN); 276 v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
277 m2p_set_control(ch, v); 277 m2p_set_control(ch, v);
278 278
279 while (m2p_channel_state(ch) == STATE_ON) 279 while (m2p_channel_state(ch) >= STATE_ON)
280 cpu_relax(); 280 cpu_relax();
281 281
282 m2p_set_control(ch, 0x0); 282 m2p_set_control(ch, 0x0);
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index c5c0369bb481..2f7e2728970d 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -122,6 +122,7 @@ config MACH_CPUIMX27
122 select IMX_HAVE_PLATFORM_IMX_I2C 122 select IMX_HAVE_PLATFORM_IMX_I2C
123 select IMX_HAVE_PLATFORM_IMX_UART 123 select IMX_HAVE_PLATFORM_IMX_UART
124 select IMX_HAVE_PLATFORM_MXC_NAND 124 select IMX_HAVE_PLATFORM_MXC_NAND
125 select MXC_ULPI if USB_ULPI
125 help 126 help
126 Include support for Eukrea CPUIMX27 platform. This includes 127 Include support for Eukrea CPUIMX27 platform. This includes
127 specific configurations for the module and its peripherals. 128 specific configurations for the module and its peripherals.
diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c
index 339150ab0ea5..6830afd1d2ba 100644
--- a/arch/arm/mach-imx/mach-cpuimx27.c
+++ b/arch/arm/mach-imx/mach-cpuimx27.c
@@ -259,7 +259,7 @@ static void __init eukrea_cpuimx27_init(void)
259 i2c_register_board_info(0, eukrea_cpuimx27_i2c_devices, 259 i2c_register_board_info(0, eukrea_cpuimx27_i2c_devices,
260 ARRAY_SIZE(eukrea_cpuimx27_i2c_devices)); 260 ARRAY_SIZE(eukrea_cpuimx27_i2c_devices));
261 261
262 imx27_add_i2c_imx1(&cpuimx27_i2c1_data); 262 imx27_add_i2c_imx0(&cpuimx27_i2c1_data);
263 263
264 platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); 264 platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
265 265
diff --git a/arch/arm/mach-s5p6440/cpu.c b/arch/arm/mach-s5p6440/cpu.c
index 526f33adb31d..ec592e866054 100644
--- a/arch/arm/mach-s5p6440/cpu.c
+++ b/arch/arm/mach-s5p6440/cpu.c
@@ -19,6 +19,7 @@
19#include <linux/sysdev.h> 19#include <linux/sysdev.h>
20#include <linux/serial_core.h> 20#include <linux/serial_core.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/sched.h>
22 23
23#include <asm/mach/arch.h> 24#include <asm/mach/arch.h>
24#include <asm/mach/map.h> 25#include <asm/mach/map.h>
diff --git a/arch/arm/mach-s5p6442/cpu.c b/arch/arm/mach-s5p6442/cpu.c
index a48fb553fd01..70ac681af72b 100644
--- a/arch/arm/mach-s5p6442/cpu.c
+++ b/arch/arm/mach-s5p6442/cpu.c
@@ -19,6 +19,7 @@
19#include <linux/sysdev.h> 19#include <linux/sysdev.h>
20#include <linux/serial_core.h> 20#include <linux/serial_core.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/sched.h>
22 23
23#include <asm/mach/arch.h> 24#include <asm/mach/arch.h>
24#include <asm/mach/map.h> 25#include <asm/mach/map.h>
diff --git a/arch/arm/mach-s5pc100/cpu.c b/arch/arm/mach-s5pc100/cpu.c
index 251c92ac5b22..cd1afbce83e2 100644
--- a/arch/arm/mach-s5pc100/cpu.c
+++ b/arch/arm/mach-s5pc100/cpu.c
@@ -21,6 +21,7 @@
21#include <linux/sysdev.h> 21#include <linux/sysdev.h>
22#include <linux/serial_core.h> 22#include <linux/serial_core.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/sched.h>
24 25
25#include <asm/mach/arch.h> 26#include <asm/mach/arch.h>
26#include <asm/mach/map.h> 27#include <asm/mach/map.h>
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index cfecd70657cb..d562670e1b0b 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -173,11 +173,6 @@ static int s5pv210_clk_ip3_ctrl(struct clk *clk, int enable)
173 return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable); 173 return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable);
174} 174}
175 175
176static int s5pv210_clk_ip4_ctrl(struct clk *clk, int enable)
177{
178 return s5p_gatectrl(S5P_CLKGATE_IP4, clk, enable);
179}
180
181static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable) 176static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable)
182{ 177{
183 return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable); 178 return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable);
diff --git a/arch/arm/mach-s5pv210/cpu.c b/arch/arm/mach-s5pv210/cpu.c
index 77f456c91ad3..245b82b53df4 100644
--- a/arch/arm/mach-s5pv210/cpu.c
+++ b/arch/arm/mach-s5pv210/cpu.c
@@ -19,6 +19,7 @@
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/sysdev.h> 20#include <linux/sysdev.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/sched.h>
22 23
23#include <asm/mach/arch.h> 24#include <asm/mach/arch.h>
24#include <asm/mach/map.h> 25#include <asm/mach/map.h>
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index efb127022d42..71fb17349520 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -68,7 +68,7 @@ static void __init ct_ca9x4_init_irq(void)
68} 68}
69 69
70#if 0 70#if 0
71static void ct_ca9x4_timer_init(void) 71static void __init ct_ca9x4_timer_init(void)
72{ 72{
73 writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL); 73 writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL);
74 writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL); 74 writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL);
@@ -222,7 +222,7 @@ static struct platform_device pmu_device = {
222 .resource = pmu_resources, 222 .resource = pmu_resources,
223}; 223};
224 224
225static void ct_ca9x4_init(void) 225static void __init ct_ca9x4_init(void)
226{ 226{
227 int i; 227 int i;
228 228
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 817f0ad38a0b..7eaa232180a5 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -48,7 +48,7 @@ void __init v2m_map_io(struct map_desc *tile, size_t num)
48} 48}
49 49
50 50
51static void v2m_timer_init(void) 51static void __init v2m_timer_init(void)
52{ 52{
53 writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL); 53 writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL);
54 writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL); 54 writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL);
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index ab506272b2d3..17e7b0b57e49 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -204,8 +204,12 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
204 /* 204 /*
205 * Don't allow RAM to be mapped - this causes problems with ARMv6+ 205 * Don't allow RAM to be mapped - this causes problems with ARMv6+
206 */ 206 */
207 if (WARN_ON(pfn_valid(pfn))) 207 if (pfn_valid(pfn)) {
208 return NULL; 208 printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n"
209 KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
210 KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n");
211 WARN_ON(1);
212 }
209 213
210 type = get_mem_type(mtype); 214 type = get_mem_type(mtype);
211 if (!type) 215 if (!type)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6a3a2d0cd6db..e8ed9dc461fe 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -248,7 +248,7 @@ static struct mem_type mem_types[] = {
248 }, 248 },
249 [MT_MEMORY] = { 249 [MT_MEMORY] = {
250 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 250 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
251 L_PTE_USER | L_PTE_EXEC, 251 L_PTE_WRITE | L_PTE_EXEC,
252 .prot_l1 = PMD_TYPE_TABLE, 252 .prot_l1 = PMD_TYPE_TABLE,
253 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 253 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
254 .domain = DOMAIN_KERNEL, 254 .domain = DOMAIN_KERNEL,
@@ -259,7 +259,7 @@ static struct mem_type mem_types[] = {
259 }, 259 },
260 [MT_MEMORY_NONCACHED] = { 260 [MT_MEMORY_NONCACHED] = {
261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
262 L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, 262 L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
263 .prot_l1 = PMD_TYPE_TABLE, 263 .prot_l1 = PMD_TYPE_TABLE,
264 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 264 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
265 .domain = DOMAIN_KERNEL, 265 .domain = DOMAIN_KERNEL,
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 7563ff0141bd..197f21bed5e9 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -253,6 +253,14 @@ __v7_setup:
253 orreq r10, r10, #1 << 22 @ set bit #22 253 orreq r10, r10, #1 << 22 @ set bit #22
254 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register 254 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
255#endif 255#endif
256#ifdef CONFIG_ARM_ERRATA_743622
257 teq r6, #0x20 @ present in r2p0
258 teqne r6, #0x21 @ present in r2p1
259 teqne r6, #0x22 @ present in r2p2
260 mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
261 orreq r10, r10, #1 << 6 @ set bit #6
262 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
263#endif
256 264
2573: mov r10, #0 2653: mov r10, #0
258#ifdef HARVARD_CACHE 266#ifdef HARVARD_CACHE
@@ -365,7 +373,7 @@ __v7_ca9mp_proc_info:
365 b __v7_ca9mp_setup 373 b __v7_ca9mp_setup
366 .long cpu_arch_name 374 .long cpu_arch_name
367 .long cpu_elf_name 375 .long cpu_elf_name
368 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 376 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
369 .long cpu_v7_name 377 .long cpu_v7_name
370 .long v7_processor_functions 378 .long v7_processor_functions
371 .long v7wbi_tlb_fns 379 .long v7wbi_tlb_fns
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index a202a2ce6e3d..6cd151b31bc5 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -320,6 +320,7 @@ void flush_iotlb_page(struct iommu *obj, u32 da)
320 if ((start <= da) && (da < start + bytes)) { 320 if ((start <= da) && (da < start + bytes)) {
321 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", 321 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
322 __func__, start, da, bytes); 322 __func__, start, da, bytes);
323 iotlb_load_cr(obj, &cr);
323 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 324 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
324 } 325 }
325 } 326 }
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
index 04d9521ddc9f..e8f2be2d67f2 100644
--- a/arch/arm/plat-samsung/adc.c
+++ b/arch/arm/plat-samsung/adc.c
@@ -435,7 +435,6 @@ static int s3c_adc_suspend(struct platform_device *pdev, pm_message_t state)
435static int s3c_adc_resume(struct platform_device *pdev) 435static int s3c_adc_resume(struct platform_device *pdev)
436{ 436{
437 struct adc_device *adc = platform_get_drvdata(pdev); 437 struct adc_device *adc = platform_get_drvdata(pdev);
438 unsigned long flags;
439 438
440 clk_enable(adc->clk); 439 clk_enable(adc->clk);
441 enable_irq(adc->irq); 440 enable_irq(adc->irq);
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index 90a20512d68d..e8d20b0bc50e 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -48,6 +48,9 @@
48#include <plat/clock.h> 48#include <plat/clock.h>
49#include <plat/cpu.h> 49#include <plat/cpu.h>
50 50
51#include <linux/serial_core.h>
52#include <plat/regs-serial.h> /* for s3c24xx_uart_devs */
53
51/* clock information */ 54/* clock information */
52 55
53static LIST_HEAD(clocks); 56static LIST_HEAD(clocks);
@@ -65,6 +68,28 @@ static int clk_null_enable(struct clk *clk, int enable)
65 return 0; 68 return 0;
66} 69}
67 70
71static int dev_is_s3c_uart(struct device *dev)
72{
73 struct platform_device **pdev = s3c24xx_uart_devs;
74 int i;
75 for (i = 0; i < ARRAY_SIZE(s3c24xx_uart_devs); i++, pdev++)
76 if (*pdev && dev == &(*pdev)->dev)
77 return 1;
78 return 0;
79}
80
81/*
82 * Serial drivers call get_clock() very early, before platform bus
83 * has been set up, this requires a special check to let them get
84 * a proper clock
85 */
86
87static int dev_is_platform_device(struct device *dev)
88{
89 return dev->bus == &platform_bus_type ||
90 (dev->bus == NULL && dev_is_s3c_uart(dev));
91}
92
68/* Clock API calls */ 93/* Clock API calls */
69 94
70struct clk *clk_get(struct device *dev, const char *id) 95struct clk *clk_get(struct device *dev, const char *id)
@@ -73,7 +98,7 @@ struct clk *clk_get(struct device *dev, const char *id)
73 struct clk *clk = ERR_PTR(-ENOENT); 98 struct clk *clk = ERR_PTR(-ENOENT);
74 int idno; 99 int idno;
75 100
76 if (dev == NULL || dev->bus != &platform_bus_type) 101 if (dev == NULL || !dev_is_platform_device(dev))
77 idno = -1; 102 idno = -1;
78 else 103 else
79 idno = to_platform_device(dev)->id; 104 idno = to_platform_device(dev)->id;
diff --git a/arch/m32r/include/asm/elf.h b/arch/m32r/include/asm/elf.h
index 2f85412ef730..b8da7d0574d2 100644
--- a/arch/m32r/include/asm/elf.h
+++ b/arch/m32r/include/asm/elf.h
@@ -82,9 +82,9 @@ typedef elf_fpreg_t elf_fpregset_t;
82 * These are used to set parameters in the core dumps. 82 * These are used to set parameters in the core dumps.
83 */ 83 */
84#define ELF_CLASS ELFCLASS32 84#define ELF_CLASS ELFCLASS32
85#if defined(__LITTLE_ENDIAN) 85#if defined(__LITTLE_ENDIAN__)
86#define ELF_DATA ELFDATA2LSB 86#define ELF_DATA ELFDATA2LSB
87#elif defined(__BIG_ENDIAN) 87#elif defined(__BIG_ENDIAN__)
88#define ELF_DATA ELFDATA2MSB 88#define ELF_DATA ELFDATA2MSB
89#else 89#else
90#error no endian defined 90#error no endian defined
diff --git a/arch/m32r/kernel/.gitignore b/arch/m32r/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/m32r/kernel/.gitignore
@@ -0,0 +1 @@
vmlinux.lds
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 7bbe38645ed5..a08697f0886d 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -28,6 +28,8 @@
28 28
29#define DEBUG_SIG 0 29#define DEBUG_SIG 0
30 30
31#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
32
31asmlinkage int 33asmlinkage int
32sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 34sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
33 unsigned long r2, unsigned long r3, unsigned long r4, 35 unsigned long r2, unsigned long r3, unsigned long r4,
@@ -254,7 +256,7 @@ give_sigsegv:
254static int prev_insn(struct pt_regs *regs) 256static int prev_insn(struct pt_regs *regs)
255{ 257{
256 u16 inst; 258 u16 inst;
257 if (get_user(&inst, (u16 __user *)(regs->bpc - 2))) 259 if (get_user(inst, (u16 __user *)(regs->bpc - 2)))
258 return -EFAULT; 260 return -EFAULT;
259 if ((inst & 0xfff0) == 0x10f0) /* trap ? */ 261 if ((inst & 0xfff0) == 0x10f0) /* trap ? */
260 regs->bpc -= 2; 262 regs->bpc -= 2;
diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild
index e322d65f33a4..7dd65cfae837 100644
--- a/arch/mips/Kbuild
+++ b/arch/mips/Kbuild
@@ -7,6 +7,10 @@ subdir-ccflags-y := -Werror
7include arch/mips/Kbuild.platforms 7include arch/mips/Kbuild.platforms
8obj-y := $(platform-y) 8obj-y := $(platform-y)
9 9
10# make clean traverses $(obj-) without having included .config, so
11# everything ends up here
12obj- := $(platform-)
13
10# mips object files 14# mips object files
11# The object files are linked as core-y files would be linked 15# The object files are linked as core-y files would be linked
12 16
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 5526faabfc21..4c9f402295dd 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -881,11 +881,15 @@ config NO_IOPORT
881config GENERIC_ISA_DMA 881config GENERIC_ISA_DMA
882 bool 882 bool
883 select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n 883 select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n
884 select ISA_DMA_API
884 885
885config GENERIC_ISA_DMA_SUPPORT_BROKEN 886config GENERIC_ISA_DMA_SUPPORT_BROKEN
886 bool 887 bool
887 select GENERIC_ISA_DMA 888 select GENERIC_ISA_DMA
888 889
890config ISA_DMA_API
891 bool
892
889config GENERIC_GPIO 893config GENERIC_GPIO
890 bool 894 bool
891 895
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 5fd7f7a58b7e..5042d51b0512 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -105,4 +105,4 @@ OBJCOPYFLAGS_vmlinuz.srec := $(OBJCOPYFLAGS) -S -O srec
105vmlinuz.srec: vmlinuz 105vmlinuz.srec: vmlinuz
106 $(call cmd,objcopy) 106 $(call cmd,objcopy)
107 107
108clean-files := $(objtree)/vmlinuz.* 108clean-files := $(objtree)/vmlinuz $(objtree)/vmlinuz.{32,ecoff,bin,srec}
diff --git a/arch/mips/dec/Platform b/arch/mips/dec/Platform
index 3adbcbd95db1..cf55a6f4e720 100644
--- a/arch/mips/dec/Platform
+++ b/arch/mips/dec/Platform
@@ -1,7 +1,7 @@
1# 1#
2# DECstation family 2# DECstation family
3# 3#
4platform-$(CONFIG_MACH_DECSTATION) = dec/ 4platform-$(CONFIG_MACH_DECSTATION) += dec/
5cflags-$(CONFIG_MACH_DECSTATION) += \ 5cflags-$(CONFIG_MACH_DECSTATION) += \
6 -I$(srctree)/arch/mips/include/asm/mach-dec 6 -I$(srctree)/arch/mips/include/asm/mach-dec
7libs-$(CONFIG_MACH_DECSTATION) += arch/mips/dec/prom/ 7libs-$(CONFIG_MACH_DECSTATION) += arch/mips/dec/prom/
diff --git a/arch/mips/include/asm/fcntl.h b/arch/mips/include/asm/fcntl.h
index e482fe90fe88..75eddedcfc3e 100644
--- a/arch/mips/include/asm/fcntl.h
+++ b/arch/mips/include/asm/fcntl.h
@@ -56,6 +56,7 @@
56 */ 56 */
57 57
58#ifdef CONFIG_32BIT 58#ifdef CONFIG_32BIT
59#include <linux/types.h>
59 60
60struct flock { 61struct flock {
61 short l_type; 62 short l_type;
diff --git a/arch/mips/include/asm/siginfo.h b/arch/mips/include/asm/siginfo.h
index 96e28f18dad1..1ca64b4d33d9 100644
--- a/arch/mips/include/asm/siginfo.h
+++ b/arch/mips/include/asm/siginfo.h
@@ -88,6 +88,7 @@ typedef struct siginfo {
88#ifdef __ARCH_SI_TRAPNO 88#ifdef __ARCH_SI_TRAPNO
89 int _trapno; /* TRAP # which caused the signal */ 89 int _trapno; /* TRAP # which caused the signal */
90#endif 90#endif
91 short _addr_lsb;
91 } _sigfault; 92 } _sigfault;
92 93
93 /* SIGPOLL, SIGXFSZ (To do ...) */ 94 /* SIGPOLL, SIGXFSZ (To do ...) */
diff --git a/arch/mips/jz4740/Platform b/arch/mips/jz4740/Platform
index 6a97230e3d05..ba91be9c21ef 100644
--- a/arch/mips/jz4740/Platform
+++ b/arch/mips/jz4740/Platform
@@ -1,3 +1,3 @@
1core-$(CONFIG_MACH_JZ4740) += arch/mips/jz4740/ 1platform-$(CONFIG_MACH_JZ4740) += jz4740/
2cflags-$(CONFIG_MACH_JZ4740) += -I$(srctree)/arch/mips/include/asm/mach-jz4740 2cflags-$(CONFIG_MACH_JZ4740) += -I$(srctree)/arch/mips/include/asm/mach-jz4740
3load-$(CONFIG_MACH_JZ4740) += 0xffffffff80010000 3load-$(CONFIG_MACH_JZ4740) += 0xffffffff80010000
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 0176ed015c89..32103cc2a257 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -40,7 +40,6 @@ int __compute_return_epc(struct pt_regs *regs)
40 return -EFAULT; 40 return -EFAULT;
41 } 41 }
42 42
43 regs->regs[0] = 0;
44 switch (insn.i_format.opcode) { 43 switch (insn.i_format.opcode) {
45 /* 44 /*
46 * jr and jalr are in r_format format. 45 * jr and jalr are in r_format format.
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 2340f11dc29c..9a526ba6f257 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -103,7 +103,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
103 if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) 103 if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
104 goto out_unlock; 104 goto out_unlock;
105 105
106 retval = security_task_setscheduler(p, 0, NULL); 106 retval = security_task_setscheduler(p)
107 if (retval) 107 if (retval)
108 goto out_unlock; 108 goto out_unlock;
109 109
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index c51b95ff8644..c8777333e198 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -536,7 +536,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
536{ 536{
537 /* do the secure computing check first */ 537 /* do the secure computing check first */
538 if (!entryexit) 538 if (!entryexit)
539 secure_computing(regs->regs[0]); 539 secure_computing(regs->regs[2]);
540 540
541 if (unlikely(current->audit_context) && entryexit) 541 if (unlikely(current->audit_context) && entryexit)
542 audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), 542 audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
@@ -565,7 +565,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
565 565
566out: 566out:
567 if (unlikely(current->audit_context) && !entryexit) 567 if (unlikely(current->audit_context) && !entryexit)
568 audit_syscall_entry(audit_arch(), regs->regs[0], 568 audit_syscall_entry(audit_arch(), regs->regs[2],
569 regs->regs[4], regs->regs[5], 569 regs->regs[4], regs->regs[5],
570 regs->regs[6], regs->regs[7]); 570 regs->regs[6], regs->regs[7]);
571} 571}
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 584415eef8c9..fbaabad0e6e2 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -63,9 +63,9 @@ stack_done:
63 sw t0, PT_R7(sp) # set error flag 63 sw t0, PT_R7(sp) # set error flag
64 beqz t0, 1f 64 beqz t0, 1f
65 65
66 lw t1, PT_R2(sp) # syscall number
66 negu v0 # error 67 negu v0 # error
67 sw v0, PT_R0(sp) # set flag for syscall 68 sw t1, PT_R0(sp) # save it for syscall restarting
68 # restarting
691: sw v0, PT_R2(sp) # result 691: sw v0, PT_R2(sp) # result
70 70
71o32_syscall_exit: 71o32_syscall_exit:
@@ -104,9 +104,9 @@ syscall_trace_entry:
104 sw t0, PT_R7(sp) # set error flag 104 sw t0, PT_R7(sp) # set error flag
105 beqz t0, 1f 105 beqz t0, 1f
106 106
107 lw t1, PT_R2(sp) # syscall number
107 negu v0 # error 108 negu v0 # error
108 sw v0, PT_R0(sp) # set flag for syscall 109 sw t1, PT_R0(sp) # save it for syscall restarting
109 # restarting
1101: sw v0, PT_R2(sp) # result 1101: sw v0, PT_R2(sp) # result
111 111
112 j syscall_exit 112 j syscall_exit
@@ -169,8 +169,7 @@ stackargs:
169 * We probably should handle this case a bit more drastic. 169 * We probably should handle this case a bit more drastic.
170 */ 170 */
171bad_stack: 171bad_stack:
172 negu v0 # error 172 li v0, EFAULT
173 sw v0, PT_R0(sp)
174 sw v0, PT_R2(sp) 173 sw v0, PT_R2(sp)
175 li t0, 1 # set error flag 174 li t0, 1 # set error flag
176 sw t0, PT_R7(sp) 175 sw t0, PT_R7(sp)
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 5573f8e4e326..3f4179283207 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -66,9 +66,9 @@ NESTED(handle_sys64, PT_SIZE, sp)
66 sd t0, PT_R7(sp) # set error flag 66 sd t0, PT_R7(sp) # set error flag
67 beqz t0, 1f 67 beqz t0, 1f
68 68
69 ld t1, PT_R2(sp) # syscall number
69 dnegu v0 # error 70 dnegu v0 # error
70 sd v0, PT_R0(sp) # set flag for syscall 71 sd t1, PT_R0(sp) # save it for syscall restarting
71 # restarting
721: sd v0, PT_R2(sp) # result 721: sd v0, PT_R2(sp) # result
73 73
74n64_syscall_exit: 74n64_syscall_exit:
@@ -109,8 +109,9 @@ syscall_trace_entry:
109 sd t0, PT_R7(sp) # set error flag 109 sd t0, PT_R7(sp) # set error flag
110 beqz t0, 1f 110 beqz t0, 1f
111 111
112 ld t1, PT_R2(sp) # syscall number
112 dnegu v0 # error 113 dnegu v0 # error
113 sd v0, PT_R0(sp) # set flag for syscall restarting 114 sd t1, PT_R0(sp) # save it for syscall restarting
1141: sd v0, PT_R2(sp) # result 1151: sd v0, PT_R2(sp) # result
115 116
116 j syscall_exit 117 j syscall_exit
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 1e38ec97672e..f08ece6d8acc 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -65,8 +65,9 @@ NESTED(handle_sysn32, PT_SIZE, sp)
65 sd t0, PT_R7(sp) # set error flag 65 sd t0, PT_R7(sp) # set error flag
66 beqz t0, 1f 66 beqz t0, 1f
67 67
68 ld t1, PT_R2(sp) # syscall number
68 dnegu v0 # error 69 dnegu v0 # error
69 sd v0, PT_R0(sp) # set flag for syscall restarting 70 sd t1, PT_R0(sp) # save it for syscall restarting
701: sd v0, PT_R2(sp) # result 711: sd v0, PT_R2(sp) # result
71 72
72 local_irq_disable # make sure need_resched and 73 local_irq_disable # make sure need_resched and
@@ -106,8 +107,9 @@ n32_syscall_trace_entry:
106 sd t0, PT_R7(sp) # set error flag 107 sd t0, PT_R7(sp) # set error flag
107 beqz t0, 1f 108 beqz t0, 1f
108 109
110 ld t1, PT_R2(sp) # syscall number
109 dnegu v0 # error 111 dnegu v0 # error
110 sd v0, PT_R0(sp) # set flag for syscall restarting 112 sd t1, PT_R0(sp) # save it for syscall restarting
1111: sd v0, PT_R2(sp) # result 1131: sd v0, PT_R2(sp) # result
112 114
113 j syscall_exit 115 j syscall_exit
@@ -320,10 +322,10 @@ EXPORT(sysn32_call_table)
320 PTR sys_cacheflush 322 PTR sys_cacheflush
321 PTR sys_cachectl 323 PTR sys_cachectl
322 PTR sys_sysmips 324 PTR sys_sysmips
323 PTR sys_io_setup /* 6200 */ 325 PTR compat_sys_io_setup /* 6200 */
324 PTR sys_io_destroy 326 PTR sys_io_destroy
325 PTR sys_io_getevents 327 PTR compat_sys_io_getevents
326 PTR sys_io_submit 328 PTR compat_sys_io_submit
327 PTR sys_io_cancel 329 PTR sys_io_cancel
328 PTR sys_exit_group /* 6205 */ 330 PTR sys_exit_group /* 6205 */
329 PTR sys_lookup_dcookie 331 PTR sys_lookup_dcookie
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 171979fc98e5..78d768a3e19d 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -93,8 +93,9 @@ NESTED(handle_sys, PT_SIZE, sp)
93 sd t0, PT_R7(sp) # set error flag 93 sd t0, PT_R7(sp) # set error flag
94 beqz t0, 1f 94 beqz t0, 1f
95 95
96 ld t1, PT_R2(sp) # syscall number
96 dnegu v0 # error 97 dnegu v0 # error
97 sd v0, PT_R0(sp) # flag for syscall restarting 98 sd t1, PT_R0(sp) # save it for syscall restarting
981: sd v0, PT_R2(sp) # result 991: sd v0, PT_R2(sp) # result
99 100
100o32_syscall_exit: 101o32_syscall_exit:
@@ -142,8 +143,9 @@ trace_a_syscall:
142 sd t0, PT_R7(sp) # set error flag 143 sd t0, PT_R7(sp) # set error flag
143 beqz t0, 1f 144 beqz t0, 1f
144 145
146 ld t1, PT_R2(sp) # syscall number
145 dnegu v0 # error 147 dnegu v0 # error
146 sd v0, PT_R0(sp) # set flag for syscall restarting 148 sd t1, PT_R0(sp) # save it for syscall restarting
1471: sd v0, PT_R2(sp) # result 1491: sd v0, PT_R2(sp) # result
148 150
149 j syscall_exit 151 j syscall_exit
@@ -154,8 +156,7 @@ trace_a_syscall:
154 * The stackpointer for a call with more than 4 arguments is bad. 156 * The stackpointer for a call with more than 4 arguments is bad.
155 */ 157 */
156bad_stack: 158bad_stack:
157 dnegu v0 # error 159 li v0, EFAULT
158 sd v0, PT_R0(sp)
159 sd v0, PT_R2(sp) 160 sd v0, PT_R2(sp)
160 li t0, 1 # set error flag 161 li t0, 1 # set error flag
161 sd t0, PT_R7(sp) 162 sd t0, PT_R7(sp)
@@ -444,10 +445,10 @@ sys_call_table:
444 PTR compat_sys_futex 445 PTR compat_sys_futex
445 PTR compat_sys_sched_setaffinity 446 PTR compat_sys_sched_setaffinity
446 PTR compat_sys_sched_getaffinity /* 4240 */ 447 PTR compat_sys_sched_getaffinity /* 4240 */
447 PTR sys_io_setup 448 PTR compat_sys_io_setup
448 PTR sys_io_destroy 449 PTR sys_io_destroy
449 PTR sys_io_getevents 450 PTR compat_sys_io_getevents
450 PTR sys_io_submit 451 PTR compat_sys_io_submit
451 PTR sys_io_cancel /* 4245 */ 452 PTR sys_io_cancel /* 4245 */
452 PTR sys_exit_group 453 PTR sys_exit_group
453 PTR sys32_lookup_dcookie 454 PTR sys32_lookup_dcookie
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 2099d5a4c4b7..5922342bca39 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -390,7 +390,6 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
390{ 390{
391 struct rt_sigframe __user *frame; 391 struct rt_sigframe __user *frame;
392 sigset_t set; 392 sigset_t set;
393 stack_t st;
394 int sig; 393 int sig;
395 394
396 frame = (struct rt_sigframe __user *) regs.regs[29]; 395 frame = (struct rt_sigframe __user *) regs.regs[29];
@@ -411,11 +410,9 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
411 else if (sig) 410 else if (sig)
412 force_sig(sig, current); 411 force_sig(sig, current);
413 412
414 if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
415 goto badframe;
416 /* It is more difficult to avoid calling this function than to 413 /* It is more difficult to avoid calling this function than to
417 call it and ignore errors. */ 414 call it and ignore errors. */
418 do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]); 415 do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs.regs[29]);
419 416
420 /* 417 /*
421 * Don't let your children do this ... 418 * Don't let your children do this ...
@@ -550,23 +547,26 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
550 struct mips_abi *abi = current->thread.abi; 547 struct mips_abi *abi = current->thread.abi;
551 void *vdso = current->mm->context.vdso; 548 void *vdso = current->mm->context.vdso;
552 549
553 switch(regs->regs[0]) { 550 if (regs->regs[0]) {
554 case ERESTART_RESTARTBLOCK: 551 switch(regs->regs[2]) {
555 case ERESTARTNOHAND: 552 case ERESTART_RESTARTBLOCK:
556 regs->regs[2] = EINTR; 553 case ERESTARTNOHAND:
557 break;
558 case ERESTARTSYS:
559 if (!(ka->sa.sa_flags & SA_RESTART)) {
560 regs->regs[2] = EINTR; 554 regs->regs[2] = EINTR;
561 break; 555 break;
556 case ERESTARTSYS:
557 if (!(ka->sa.sa_flags & SA_RESTART)) {
558 regs->regs[2] = EINTR;
559 break;
560 }
561 /* fallthrough */
562 case ERESTARTNOINTR:
563 regs->regs[7] = regs->regs[26];
564 regs->regs[2] = regs->regs[0];
565 regs->cp0_epc -= 4;
562 } 566 }
563 /* fallthrough */
564 case ERESTARTNOINTR: /* Userland will reload $v0. */
565 regs->regs[7] = regs->regs[26];
566 regs->cp0_epc -= 8;
567 }
568 567
569 regs->regs[0] = 0; /* Don't deal with this again. */ 568 regs->regs[0] = 0; /* Don't deal with this again. */
569 }
570 570
571 if (sig_uses_siginfo(ka)) 571 if (sig_uses_siginfo(ka))
572 ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, 572 ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
@@ -575,6 +575,9 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
575 ret = abi->setup_frame(vdso + abi->signal_return_offset, 575 ret = abi->setup_frame(vdso + abi->signal_return_offset,
576 ka, regs, sig, oldset); 576 ka, regs, sig, oldset);
577 577
578 if (ret)
579 return ret;
580
578 spin_lock_irq(&current->sighand->siglock); 581 spin_lock_irq(&current->sighand->siglock);
579 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); 582 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
580 if (!(ka->sa.sa_flags & SA_NODEFER)) 583 if (!(ka->sa.sa_flags & SA_NODEFER))
@@ -622,17 +625,13 @@ static void do_signal(struct pt_regs *regs)
622 return; 625 return;
623 } 626 }
624 627
625 /*
626 * Who's code doesn't conform to the restartable syscall convention
627 * dies here!!! The li instruction, a single machine instruction,
628 * must directly be followed by the syscall instruction.
629 */
630 if (regs->regs[0]) { 628 if (regs->regs[0]) {
631 if (regs->regs[2] == ERESTARTNOHAND || 629 if (regs->regs[2] == ERESTARTNOHAND ||
632 regs->regs[2] == ERESTARTSYS || 630 regs->regs[2] == ERESTARTSYS ||
633 regs->regs[2] == ERESTARTNOINTR) { 631 regs->regs[2] == ERESTARTNOINTR) {
632 regs->regs[2] = regs->regs[0];
634 regs->regs[7] = regs->regs[26]; 633 regs->regs[7] = regs->regs[26];
635 regs->cp0_epc -= 8; 634 regs->cp0_epc -= 4;
636 } 635 }
637 if (regs->regs[2] == ERESTART_RESTARTBLOCK) { 636 if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
638 regs->regs[2] = current->thread.abi->restart; 637 regs->regs[2] = current->thread.abi->restart;
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index 2c5df818c65a..ee24d814d5b9 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -109,6 +109,7 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
109asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) 109asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
110{ 110{
111 struct rt_sigframe_n32 __user *frame; 111 struct rt_sigframe_n32 __user *frame;
112 mm_segment_t old_fs;
112 sigset_t set; 113 sigset_t set;
113 stack_t st; 114 stack_t st;
114 s32 sp; 115 s32 sp;
@@ -143,7 +144,11 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
143 144
144 /* It is more difficult to avoid calling this function than to 145 /* It is more difficult to avoid calling this function than to
145 call it and ignore errors. */ 146 call it and ignore errors. */
147 old_fs = get_fs();
148 set_fs(KERNEL_DS);
146 do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]); 149 do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
150 set_fs(old_fs);
151
147 152
148 /* 153 /*
149 * Don't let your children do this ... 154 * Don't let your children do this ...
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 69b039ca8d83..33d5a5ce4a29 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -109,8 +109,6 @@ static void emulate_load_store_insn(struct pt_regs *regs,
109 unsigned long value; 109 unsigned long value;
110 unsigned int res; 110 unsigned int res;
111 111
112 regs->regs[0] = 0;
113
114 /* 112 /*
115 * This load never faults. 113 * This load never faults.
116 */ 114 */
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c
index 0c46e398cd8f..63c740a85b4c 100644
--- a/arch/um/drivers/hostaudio_kern.c
+++ b/arch/um/drivers/hostaudio_kern.c
@@ -40,6 +40,11 @@ static char *mixer = HOSTAUDIO_DEV_MIXER;
40" This is used to specify the host mixer device to the hostaudio driver.\n"\ 40" This is used to specify the host mixer device to the hostaudio driver.\n"\
41" The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n" 41" The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n"
42 42
43module_param(dsp, charp, 0644);
44MODULE_PARM_DESC(dsp, DSP_HELP);
45module_param(mixer, charp, 0644);
46MODULE_PARM_DESC(mixer, MIXER_HELP);
47
43#ifndef MODULE 48#ifndef MODULE
44static int set_dsp(char *name, int *add) 49static int set_dsp(char *name, int *add)
45{ 50{
@@ -56,15 +61,6 @@ static int set_mixer(char *name, int *add)
56} 61}
57 62
58__uml_setup("mixer=", set_mixer, "mixer=<mixer device>\n" MIXER_HELP); 63__uml_setup("mixer=", set_mixer, "mixer=<mixer device>\n" MIXER_HELP);
59
60#else /*MODULE*/
61
62module_param(dsp, charp, 0644);
63MODULE_PARM_DESC(dsp, DSP_HELP);
64
65module_param(mixer, charp, 0644);
66MODULE_PARM_DESC(mixer, MIXER_HELP);
67
68#endif 64#endif
69 65
70/* /dev/dsp file operations */ 66/* /dev/dsp file operations */
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 1bcd208c459f..9734994cba1e 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -163,6 +163,7 @@ struct ubd {
163 struct scatterlist sg[MAX_SG]; 163 struct scatterlist sg[MAX_SG];
164 struct request *request; 164 struct request *request;
165 int start_sg, end_sg; 165 int start_sg, end_sg;
166 sector_t rq_pos;
166}; 167};
167 168
168#define DEFAULT_COW { \ 169#define DEFAULT_COW { \
@@ -187,6 +188,7 @@ struct ubd {
187 .request = NULL, \ 188 .request = NULL, \
188 .start_sg = 0, \ 189 .start_sg = 0, \
189 .end_sg = 0, \ 190 .end_sg = 0, \
191 .rq_pos = 0, \
190} 192}
191 193
192/* Protected by ubd_lock */ 194/* Protected by ubd_lock */
@@ -1228,7 +1230,6 @@ static void do_ubd_request(struct request_queue *q)
1228{ 1230{
1229 struct io_thread_req *io_req; 1231 struct io_thread_req *io_req;
1230 struct request *req; 1232 struct request *req;
1231 sector_t sector;
1232 int n; 1233 int n;
1233 1234
1234 while(1){ 1235 while(1){
@@ -1239,12 +1240,12 @@ static void do_ubd_request(struct request_queue *q)
1239 return; 1240 return;
1240 1241
1241 dev->request = req; 1242 dev->request = req;
1243 dev->rq_pos = blk_rq_pos(req);
1242 dev->start_sg = 0; 1244 dev->start_sg = 0;
1243 dev->end_sg = blk_rq_map_sg(q, req, dev->sg); 1245 dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
1244 } 1246 }
1245 1247
1246 req = dev->request; 1248 req = dev->request;
1247 sector = blk_rq_pos(req);
1248 while(dev->start_sg < dev->end_sg){ 1249 while(dev->start_sg < dev->end_sg){
1249 struct scatterlist *sg = &dev->sg[dev->start_sg]; 1250 struct scatterlist *sg = &dev->sg[dev->start_sg];
1250 1251
@@ -1256,10 +1257,9 @@ static void do_ubd_request(struct request_queue *q)
1256 return; 1257 return;
1257 } 1258 }
1258 prepare_request(req, io_req, 1259 prepare_request(req, io_req,
1259 (unsigned long long)sector << 9, 1260 (unsigned long long)dev->rq_pos << 9,
1260 sg->offset, sg->length, sg_page(sg)); 1261 sg->offset, sg->length, sg_page(sg));
1261 1262
1262 sector += sg->length >> 9;
1263 n = os_write_file(thread_fd, &io_req, 1263 n = os_write_file(thread_fd, &io_req,
1264 sizeof(struct io_thread_req *)); 1264 sizeof(struct io_thread_req *));
1265 if(n != sizeof(struct io_thread_req *)){ 1265 if(n != sizeof(struct io_thread_req *)){
@@ -1272,6 +1272,7 @@ static void do_ubd_request(struct request_queue *q)
1272 return; 1272 return;
1273 } 1273 }
1274 1274
1275 dev->rq_pos += sg->length >> 9;
1275 dev->start_sg++; 1276 dev->start_sg++;
1276 } 1277 }
1277 dev->end_sg = 0; 1278 dev->end_sg = 0;
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 0350311906ae..2d93bdbc9ac0 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -34,7 +34,7 @@
34#include <asm/ia32.h> 34#include <asm/ia32.h>
35 35
36#undef WARN_OLD 36#undef WARN_OLD
37#undef CORE_DUMP /* probably broken */ 37#undef CORE_DUMP /* definitely broken */
38 38
39static int load_aout_binary(struct linux_binprm *, struct pt_regs *regs); 39static int load_aout_binary(struct linux_binprm *, struct pt_regs *regs);
40static int load_aout_library(struct file *); 40static int load_aout_library(struct file *);
@@ -131,21 +131,15 @@ static void set_brk(unsigned long start, unsigned long end)
131 * macros to write out all the necessary info. 131 * macros to write out all the necessary info.
132 */ 132 */
133 133
134static int dump_write(struct file *file, const void *addr, int nr) 134#include <linux/coredump.h>
135{
136 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
137}
138 135
139#define DUMP_WRITE(addr, nr) \ 136#define DUMP_WRITE(addr, nr) \
140 if (!dump_write(file, (void *)(addr), (nr))) \ 137 if (!dump_write(file, (void *)(addr), (nr))) \
141 goto end_coredump; 138 goto end_coredump;
142 139
143#define DUMP_SEEK(offset) \ 140#define DUMP_SEEK(offset) \
144 if (file->f_op->llseek) { \ 141 if (!dump_seek(file, offset)) \
145 if (file->f_op->llseek(file, (offset), 0) != (offset)) \ 142 goto end_coredump;
146 goto end_coredump; \
147 } else \
148 file->f_pos = (offset)
149 143
150#define START_DATA() (u.u_tsize << PAGE_SHIFT) 144#define START_DATA() (u.u_tsize << PAGE_SHIFT)
151#define START_STACK(u) (u.start_stack) 145#define START_STACK(u) (u.start_stack)
@@ -217,12 +211,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
217 dump_size = dump.u_ssize << PAGE_SHIFT; 211 dump_size = dump.u_ssize << PAGE_SHIFT;
218 DUMP_WRITE(dump_start, dump_size); 212 DUMP_WRITE(dump_start, dump_size);
219 } 213 }
220 /*
221 * Finally dump the task struct. Not be used by gdb, but
222 * could be useful
223 */
224 set_fs(KERNEL_DS);
225 DUMP_WRITE(current, sizeof(*current));
226end_coredump: 214end_coredump:
227 set_fs(fs); 215 set_fs(fs);
228 return has_dumped; 216 return has_dumped;
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h
index 5af2982133b5..f16a2caca1e0 100644
--- a/arch/x86/include/asm/amd_iommu.h
+++ b/arch/x86/include/asm/amd_iommu.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h
index cb030374b90a..916bc8111a01 100644
--- a/arch/x86/include/asm/amd_iommu_proto.h
+++ b/arch/x86/include/asm/amd_iommu_proto.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2009 Advanced Micro Devices, Inc. 2 * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 08616180deaf..e3509fc303bf 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
@@ -416,13 +416,22 @@ struct amd_iommu {
416 struct dma_ops_domain *default_dom; 416 struct dma_ops_domain *default_dom;
417 417
418 /* 418 /*
419 * This array is required to work around a potential BIOS bug. 419 * We can't rely on the BIOS to restore all values on reinit, so we
420 * The BIOS may miss to restore parts of the PCI configuration 420 * need to stash them
421 * space when the system resumes from S3. The result is that the
422 * IOMMU does not execute commands anymore which leads to system
423 * failure.
424 */ 421 */
425 u32 cache_cfg[4]; 422
423 /* The iommu BAR */
424 u32 stored_addr_lo;
425 u32 stored_addr_hi;
426
427 /*
428 * Each iommu has 6 l1s, each of which is documented as having 0x12
429 * registers
430 */
431 u32 stored_l1[6][0x12];
432
433 /* The l2 indirect registers */
434 u32 stored_l2[0x83];
426}; 435};
427 436
428/* 437/*
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h
index 4ac5b0f33fc1..bf357f9b25f0 100644
--- a/arch/x86/include/asm/gart.h
+++ b/arch/x86/include/asm/gart.h
@@ -17,6 +17,7 @@ extern int fix_aperture;
17#define GARTEN (1<<0) 17#define GARTEN (1<<0)
18#define DISGARTCPU (1<<4) 18#define DISGARTCPU (1<<4)
19#define DISGARTIO (1<<5) 19#define DISGARTIO (1<<5)
20#define DISTLBWALKPRB (1<<6)
20 21
21/* GART cache control register bits. */ 22/* GART cache control register bits. */
22#define INVGART (1<<0) 23#define INVGART (1<<0)
@@ -27,7 +28,6 @@ extern int fix_aperture;
27#define AMD64_GARTAPERTUREBASE 0x94 28#define AMD64_GARTAPERTUREBASE 0x94
28#define AMD64_GARTTABLEBASE 0x98 29#define AMD64_GARTTABLEBASE 0x98
29#define AMD64_GARTCACHECTL 0x9c 30#define AMD64_GARTCACHECTL 0x9c
30#define AMD64_GARTEN (1<<0)
31 31
32#ifdef CONFIG_GART_IOMMU 32#ifdef CONFIG_GART_IOMMU
33extern int gart_iommu_aperture; 33extern int gart_iommu_aperture;
@@ -57,6 +57,19 @@ static inline void gart_iommu_hole_init(void)
57 57
58extern int agp_amd64_init(void); 58extern int agp_amd64_init(void);
59 59
60static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order)
61{
62 u32 ctl;
63
64 /*
65 * Don't enable translation but enable GART IO and CPU accesses.
66 * Also, set DISTLBWALKPRB since GART tables memory is UC.
67 */
68 ctl = DISTLBWALKPRB | order << 1;
69
70 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
71}
72
60static inline void enable_gart_translation(struct pci_dev *dev, u64 addr) 73static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
61{ 74{
62 u32 tmp, ctl; 75 u32 tmp, ctl;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 502e53f999cf..c52e2eb40a1e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -652,20 +652,6 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
652 return (struct kvm_mmu_page *)page_private(page); 652 return (struct kvm_mmu_page *)page_private(page);
653} 653}
654 654
655static inline u16 kvm_read_fs(void)
656{
657 u16 seg;
658 asm("mov %%fs, %0" : "=g"(seg));
659 return seg;
660}
661
662static inline u16 kvm_read_gs(void)
663{
664 u16 seg;
665 asm("mov %%gs, %0" : "=g"(seg));
666 return seg;
667}
668
669static inline u16 kvm_read_ldt(void) 655static inline u16 kvm_read_ldt(void)
670{ 656{
671 u16 ldt; 657 u16 ldt;
@@ -673,16 +659,6 @@ static inline u16 kvm_read_ldt(void)
673 return ldt; 659 return ldt;
674} 660}
675 661
676static inline void kvm_load_fs(u16 sel)
677{
678 asm("mov %0, %%fs" : : "rm"(sel));
679}
680
681static inline void kvm_load_gs(u16 sel)
682{
683 asm("mov %0, %%gs" : : "rm"(sel));
684}
685
686static inline void kvm_load_ldt(u16 sel) 662static inline void kvm_load_ldt(u16 sel)
687{ 663{
688 asm("lldt %0" : : "rm"(sel)); 664 asm("lldt %0" : : "rm"(sel));
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 679b6450382b..d2fdb0826df2 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 5a170cbbbed8..3cb482e123de 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
@@ -194,6 +194,39 @@ static inline unsigned long tbl_size(int entry_size)
194 return 1UL << shift; 194 return 1UL << shift;
195} 195}
196 196
197/* Access to l1 and l2 indexed register spaces */
198
199static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
200{
201 u32 val;
202
203 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
204 pci_read_config_dword(iommu->dev, 0xfc, &val);
205 return val;
206}
207
208static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
209{
210 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
211 pci_write_config_dword(iommu->dev, 0xfc, val);
212 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
213}
214
215static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
216{
217 u32 val;
218
219 pci_write_config_dword(iommu->dev, 0xf0, address);
220 pci_read_config_dword(iommu->dev, 0xf4, &val);
221 return val;
222}
223
224static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
225{
226 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
227 pci_write_config_dword(iommu->dev, 0xf4, val);
228}
229
197/**************************************************************************** 230/****************************************************************************
198 * 231 *
199 * AMD IOMMU MMIO register space handling functions 232 * AMD IOMMU MMIO register space handling functions
@@ -619,6 +652,7 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
619{ 652{
620 int cap_ptr = iommu->cap_ptr; 653 int cap_ptr = iommu->cap_ptr;
621 u32 range, misc; 654 u32 range, misc;
655 int i, j;
622 656
623 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, 657 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
624 &iommu->cap); 658 &iommu->cap);
@@ -633,12 +667,29 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
633 MMIO_GET_LD(range)); 667 MMIO_GET_LD(range));
634 iommu->evt_msi_num = MMIO_MSI_NUM(misc); 668 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
635 669
636 if (is_rd890_iommu(iommu->dev)) { 670 if (!is_rd890_iommu(iommu->dev))
637 pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]); 671 return;
638 pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]); 672
639 pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]); 673 /*
640 pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]); 674 * Some rd890 systems may not be fully reconfigured by the BIOS, so
641 } 675 * it's necessary for us to store this information so it can be
676 * reprogrammed on resume
677 */
678
679 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
680 &iommu->stored_addr_lo);
681 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
682 &iommu->stored_addr_hi);
683
684 /* Low bit locks writes to configuration space */
685 iommu->stored_addr_lo &= ~1;
686
687 for (i = 0; i < 6; i++)
688 for (j = 0; j < 0x12; j++)
689 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
690
691 for (i = 0; i < 0x83; i++)
692 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
642} 693}
643 694
644/* 695/*
@@ -1127,14 +1178,53 @@ static void iommu_init_flags(struct amd_iommu *iommu)
1127 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 1178 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1128} 1179}
1129 1180
1130static void iommu_apply_quirks(struct amd_iommu *iommu) 1181static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1131{ 1182{
1132 if (is_rd890_iommu(iommu->dev)) { 1183 int i, j;
1133 pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]); 1184 u32 ioc_feature_control;
1134 pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]); 1185 struct pci_dev *pdev = NULL;
1135 pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]); 1186
1136 pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]); 1187 /* RD890 BIOSes may not have completely reconfigured the iommu */
1137 } 1188 if (!is_rd890_iommu(iommu->dev))
1189 return;
1190
1191 /*
1192 * First, we need to ensure that the iommu is enabled. This is
1193 * controlled by a register in the northbridge
1194 */
1195 pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
1196
1197 if (!pdev)
1198 return;
1199
1200 /* Select Northbridge indirect register 0x75 and enable writing */
1201 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1202 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
1203
1204 /* Enable the iommu */
1205 if (!(ioc_feature_control & 0x1))
1206 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1207
1208 pci_dev_put(pdev);
1209
1210 /* Restore the iommu BAR */
1211 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1212 iommu->stored_addr_lo);
1213 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
1214 iommu->stored_addr_hi);
1215
1216 /* Restore the l1 indirect regs for each of the 6 l1s */
1217 for (i = 0; i < 6; i++)
1218 for (j = 0; j < 0x12; j++)
1219 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
1220
1221 /* Restore the l2 indirect regs */
1222 for (i = 0; i < 0x83; i++)
1223 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
1224
1225 /* Lock PCI setup registers */
1226 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1227 iommu->stored_addr_lo | 1);
1138} 1228}
1139 1229
1140/* 1230/*
@@ -1147,7 +1237,6 @@ static void enable_iommus(void)
1147 1237
1148 for_each_iommu(iommu) { 1238 for_each_iommu(iommu) {
1149 iommu_disable(iommu); 1239 iommu_disable(iommu);
1150 iommu_apply_quirks(iommu);
1151 iommu_init_flags(iommu); 1240 iommu_init_flags(iommu);
1152 iommu_set_device_table(iommu); 1241 iommu_set_device_table(iommu);
1153 iommu_enable_command_buffer(iommu); 1242 iommu_enable_command_buffer(iommu);
@@ -1173,6 +1262,11 @@ static void disable_iommus(void)
1173 1262
1174static int amd_iommu_resume(struct sys_device *dev) 1263static int amd_iommu_resume(struct sys_device *dev)
1175{ 1264{
1265 struct amd_iommu *iommu;
1266
1267 for_each_iommu(iommu)
1268 iommu_apply_resume_quirks(iommu);
1269
1176 /* re-load the hardware */ 1270 /* re-load the hardware */
1177 enable_iommus(); 1271 enable_iommus();
1178 1272
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index a2e0caf26e17..c9cb17368448 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -307,7 +307,7 @@ void __init early_gart_iommu_check(void)
307 continue; 307 continue;
308 308
309 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); 309 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
310 aper_enabled = ctl & AMD64_GARTEN; 310 aper_enabled = ctl & GARTEN;
311 aper_order = (ctl >> 1) & 7; 311 aper_order = (ctl >> 1) & 7;
312 aper_size = (32 * 1024 * 1024) << aper_order; 312 aper_size = (32 * 1024 * 1024) << aper_order;
313 aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; 313 aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
@@ -362,7 +362,7 @@ void __init early_gart_iommu_check(void)
362 continue; 362 continue;
363 363
364 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); 364 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
365 ctl &= ~AMD64_GARTEN; 365 ctl &= ~GARTEN;
366 write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); 366 write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
367 } 367 }
368 } 368 }
@@ -505,8 +505,13 @@ out:
505 505
506 /* Fix up the north bridges */ 506 /* Fix up the north bridges */
507 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 507 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
508 int bus; 508 int bus, dev_base, dev_limit;
509 int dev_base, dev_limit; 509
510 /*
511 * Don't enable translation yet but enable GART IO and CPU
512 * accesses and set DISTLBWALKPRB since GART table memory is UC.
513 */
514 u32 ctl = DISTLBWALKPRB | aper_order << 1;
510 515
511 bus = bus_dev_ranges[i].bus; 516 bus = bus_dev_ranges[i].bus;
512 dev_base = bus_dev_ranges[i].dev_base; 517 dev_base = bus_dev_ranges[i].dev_base;
@@ -515,10 +520,7 @@ out:
515 if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 520 if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00)))
516 continue; 521 continue;
517 522
518 /* Don't enable translation yet. That is done later. 523 write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
519 Assume this BIOS didn't initialise the GART so
520 just overwrite all previous bits */
521 write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, aper_order << 1);
522 write_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE, aper_alloc >> 25); 524 write_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE, aper_alloc >> 25);
523 } 525 }
524 } 526 }
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 5e975298fa81..39aaee5c1ab2 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -141,6 +141,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
141 address = (low & MASK_BLKPTR_LO) >> 21; 141 address = (low & MASK_BLKPTR_LO) >> 21;
142 if (!address) 142 if (!address)
143 break; 143 break;
144
144 address += MCG_XBLK_ADDR; 145 address += MCG_XBLK_ADDR;
145 } else 146 } else
146 ++address; 147 ++address;
@@ -148,12 +149,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
148 if (rdmsr_safe(address, &low, &high)) 149 if (rdmsr_safe(address, &low, &high))
149 break; 150 break;
150 151
151 if (!(high & MASK_VALID_HI)) { 152 if (!(high & MASK_VALID_HI))
152 if (block) 153 continue;
153 continue;
154 else
155 break;
156 }
157 154
158 if (!(high & MASK_CNTP_HI) || 155 if (!(high & MASK_CNTP_HI) ||
159 (high & MASK_LOCKED_HI)) 156 (high & MASK_LOCKED_HI))
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index d9368eeda309..169d8804a9f8 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -216,7 +216,7 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
216 err = sysfs_add_file_to_group(&sys_dev->kobj, 216 err = sysfs_add_file_to_group(&sys_dev->kobj,
217 &attr_core_power_limit_count.attr, 217 &attr_core_power_limit_count.attr,
218 thermal_attr_group.name); 218 thermal_attr_group.name);
219 if (cpu_has(c, X86_FEATURE_PTS)) 219 if (cpu_has(c, X86_FEATURE_PTS)) {
220 err = sysfs_add_file_to_group(&sys_dev->kobj, 220 err = sysfs_add_file_to_group(&sys_dev->kobj,
221 &attr_package_throttle_count.attr, 221 &attr_package_throttle_count.attr,
222 thermal_attr_group.name); 222 thermal_attr_group.name);
@@ -224,6 +224,7 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
224 err = sysfs_add_file_to_group(&sys_dev->kobj, 224 err = sysfs_add_file_to_group(&sys_dev->kobj,
225 &attr_package_power_limit_count.attr, 225 &attr_package_power_limit_count.attr,
226 thermal_attr_group.name); 226 thermal_attr_group.name);
227 }
227 228
228 return err; 229 return err;
229} 230}
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 0f7f130caa67..6015ee13e22b 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -601,7 +601,7 @@ static void gart_fixup_northbridges(struct sys_device *dev)
601 * Don't enable translations just yet. That is the next 601 * Don't enable translations just yet. That is the next
602 * step. Restore the pre-suspend aperture settings. 602 * step. Restore the pre-suspend aperture settings.
603 */ 603 */
604 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, aperture_order << 1); 604 gart_set_size_and_enable(dev, aperture_order);
605 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25); 605 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
606 } 606 }
607} 607}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index bc5b9b8d4a33..8a3f9f64f86f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -766,7 +766,6 @@ static void init_vmcb(struct vcpu_svm *svm)
766 766
767 control->iopm_base_pa = iopm_base; 767 control->iopm_base_pa = iopm_base;
768 control->msrpm_base_pa = __pa(svm->msrpm); 768 control->msrpm_base_pa = __pa(svm->msrpm);
769 control->tsc_offset = 0;
770 control->int_ctl = V_INTR_MASKING_MASK; 769 control->int_ctl = V_INTR_MASKING_MASK;
771 770
772 init_seg(&save->es); 771 init_seg(&save->es);
@@ -902,6 +901,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
902 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; 901 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
903 svm->asid_generation = 0; 902 svm->asid_generation = 0;
904 init_vmcb(svm); 903 init_vmcb(svm);
904 svm->vmcb->control.tsc_offset = 0-native_read_tsc();
905 905
906 err = fx_init(&svm->vcpu); 906 err = fx_init(&svm->vcpu);
907 if (err) 907 if (err)
@@ -3163,8 +3163,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3163 sync_lapic_to_cr8(vcpu); 3163 sync_lapic_to_cr8(vcpu);
3164 3164
3165 save_host_msrs(vcpu); 3165 save_host_msrs(vcpu);
3166 fs_selector = kvm_read_fs(); 3166 savesegment(fs, fs_selector);
3167 gs_selector = kvm_read_gs(); 3167 savesegment(gs, gs_selector);
3168 ldt_selector = kvm_read_ldt(); 3168 ldt_selector = kvm_read_ldt();
3169 svm->vmcb->save.cr2 = vcpu->arch.cr2; 3169 svm->vmcb->save.cr2 = vcpu->arch.cr2;
3170 /* required for live migration with NPT */ 3170 /* required for live migration with NPT */
@@ -3251,10 +3251,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3251 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 3251 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3252 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 3252 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3253 3253
3254 kvm_load_fs(fs_selector);
3255 kvm_load_gs(gs_selector);
3256 kvm_load_ldt(ldt_selector);
3257 load_host_msrs(vcpu); 3254 load_host_msrs(vcpu);
3255 loadsegment(fs, fs_selector);
3256#ifdef CONFIG_X86_64
3257 load_gs_index(gs_selector);
3258 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
3259#else
3260 loadsegment(gs, gs_selector);
3261#endif
3262 kvm_load_ldt(ldt_selector);
3258 3263
3259 reload_tss(vcpu); 3264 reload_tss(vcpu);
3260 3265
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 49b25eee25ac..7bddfab12013 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -803,7 +803,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
803 */ 803 */
804 vmx->host_state.ldt_sel = kvm_read_ldt(); 804 vmx->host_state.ldt_sel = kvm_read_ldt();
805 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; 805 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
806 vmx->host_state.fs_sel = kvm_read_fs(); 806 savesegment(fs, vmx->host_state.fs_sel);
807 if (!(vmx->host_state.fs_sel & 7)) { 807 if (!(vmx->host_state.fs_sel & 7)) {
808 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); 808 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
809 vmx->host_state.fs_reload_needed = 0; 809 vmx->host_state.fs_reload_needed = 0;
@@ -811,7 +811,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
811 vmcs_write16(HOST_FS_SELECTOR, 0); 811 vmcs_write16(HOST_FS_SELECTOR, 0);
812 vmx->host_state.fs_reload_needed = 1; 812 vmx->host_state.fs_reload_needed = 1;
813 } 813 }
814 vmx->host_state.gs_sel = kvm_read_gs(); 814 savesegment(gs, vmx->host_state.gs_sel);
815 if (!(vmx->host_state.gs_sel & 7)) 815 if (!(vmx->host_state.gs_sel & 7))
816 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); 816 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
817 else { 817 else {
@@ -841,27 +841,21 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
841 841
842static void __vmx_load_host_state(struct vcpu_vmx *vmx) 842static void __vmx_load_host_state(struct vcpu_vmx *vmx)
843{ 843{
844 unsigned long flags;
845
846 if (!vmx->host_state.loaded) 844 if (!vmx->host_state.loaded)
847 return; 845 return;
848 846
849 ++vmx->vcpu.stat.host_state_reload; 847 ++vmx->vcpu.stat.host_state_reload;
850 vmx->host_state.loaded = 0; 848 vmx->host_state.loaded = 0;
851 if (vmx->host_state.fs_reload_needed) 849 if (vmx->host_state.fs_reload_needed)
852 kvm_load_fs(vmx->host_state.fs_sel); 850 loadsegment(fs, vmx->host_state.fs_sel);
853 if (vmx->host_state.gs_ldt_reload_needed) { 851 if (vmx->host_state.gs_ldt_reload_needed) {
854 kvm_load_ldt(vmx->host_state.ldt_sel); 852 kvm_load_ldt(vmx->host_state.ldt_sel);
855 /*
856 * If we have to reload gs, we must take care to
857 * preserve our gs base.
858 */
859 local_irq_save(flags);
860 kvm_load_gs(vmx->host_state.gs_sel);
861#ifdef CONFIG_X86_64 853#ifdef CONFIG_X86_64
862 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); 854 load_gs_index(vmx->host_state.gs_sel);
855 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
856#else
857 loadsegment(gs, vmx->host_state.gs_sel);
863#endif 858#endif
864 local_irq_restore(flags);
865 } 859 }
866 reload_tss(); 860 reload_tss();
867#ifdef CONFIG_X86_64 861#ifdef CONFIG_X86_64
@@ -2589,8 +2583,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2589 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ 2583 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
2590 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 2584 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
2591 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 2585 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
2592 vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */ 2586 vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
2593 vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */ 2587 vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
2594 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 2588 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
2595#ifdef CONFIG_X86_64 2589#ifdef CONFIG_X86_64
2596 rdmsrl(MSR_FS_BASE, a); 2590 rdmsrl(MSR_FS_BASE, a);
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index f9897f7a9ef1..9c0d0d399c30 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -420,9 +420,11 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
420 return -1; 420 return -1;
421 } 421 }
422 422
423 for_each_node_mask(i, nodes_parsed) 423 for (i = 0; i < num_node_memblks; i++)
424 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, 424 e820_register_active_regions(memblk_nodeid[i],
425 nodes[i].end >> PAGE_SHIFT); 425 node_memblk_range[i].start >> PAGE_SHIFT,
426 node_memblk_range[i].end >> PAGE_SHIFT);
427
426 /* for out of order entries in SRAT */ 428 /* for out of order entries in SRAT */
427 sort_node_map(); 429 sort_node_map();
428 if (!nodes_cover_memory(nodes)) { 430 if (!nodes_cover_memory(nodes)) {
diff --git a/block/bsg.c b/block/bsg.c
index 82d58829ba59..0c00870553a3 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -426,7 +426,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
426 /* 426 /*
427 * fill in all the output members 427 * fill in all the output members
428 */ 428 */
429 hdr->device_status = status_byte(rq->errors); 429 hdr->device_status = rq->errors & 0xff;
430 hdr->transport_status = host_byte(rq->errors); 430 hdr->transport_status = host_byte(rq->errors);
431 hdr->driver_status = driver_byte(rq->errors); 431 hdr->driver_status = driver_byte(rq->errors);
432 hdr->info = 0; 432 hdr->info = 0;
diff --git a/block/elevator.c b/block/elevator.c
index 205b09a5bd9e..4e11559aa2b0 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -938,6 +938,7 @@ int elv_register_queue(struct request_queue *q)
938 } 938 }
939 } 939 }
940 kobject_uevent(&e->kobj, KOBJ_ADD); 940 kobject_uevent(&e->kobj, KOBJ_ADD);
941 e->registered = 1;
941 } 942 }
942 return error; 943 return error;
943} 944}
@@ -947,6 +948,7 @@ static void __elv_unregister_queue(struct elevator_queue *e)
947{ 948{
948 kobject_uevent(&e->kobj, KOBJ_REMOVE); 949 kobject_uevent(&e->kobj, KOBJ_REMOVE);
949 kobject_del(&e->kobj); 950 kobject_del(&e->kobj);
951 e->registered = 0;
950} 952}
951 953
952void elv_unregister_queue(struct request_queue *q) 954void elv_unregister_queue(struct request_queue *q)
@@ -1042,11 +1044,13 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1042 1044
1043 spin_unlock_irq(q->queue_lock); 1045 spin_unlock_irq(q->queue_lock);
1044 1046
1045 __elv_unregister_queue(old_elevator); 1047 if (old_elevator->registered) {
1048 __elv_unregister_queue(old_elevator);
1046 1049
1047 err = elv_register_queue(q); 1050 err = elv_register_queue(q);
1048 if (err) 1051 if (err)
1049 goto fail_register; 1052 goto fail_register;
1053 }
1050 1054
1051 /* 1055 /*
1052 * finally exit old elevator and turn off BYPASS. 1056 * finally exit old elevator and turn off BYPASS.
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index f7619600270a..af308d03f492 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -204,6 +204,23 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
204 }, 204 },
205 }, 205 },
206 { 206 {
207 /*
208 * There have a NVIF method in MSI GX723 DSDT need call by Nvidia
209 * driver (e.g. nouveau) when user press brightness hotkey.
210 * Currently, nouveau driver didn't do the job and it causes there
211 * have a infinite while loop in DSDT when user press hotkey.
212 * We add MSI GX723's dmi information to this table for workaround
213 * this issue.
214 * Will remove MSI GX723 from the table after nouveau grows support.
215 */
216 .callback = dmi_disable_osi_vista,
217 .ident = "MSI GX723",
218 .matches = {
219 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
220 DMI_MATCH(DMI_PRODUCT_NAME, "GX723"),
221 },
222 },
223 {
207 .callback = dmi_disable_osi_vista, 224 .callback = dmi_disable_osi_vista,
208 .ident = "Sony VGN-NS10J_S", 225 .ident = "Sony VGN-NS10J_S",
209 .matches = { 226 .matches = {
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index b618f888d66b..bec561c14beb 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -346,4 +346,5 @@ void __init acpi_early_processor_set_pdc(void)
346 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 346 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
347 ACPI_UINT32_MAX, 347 ACPI_UINT32_MAX,
348 early_init_pdc, NULL, NULL, NULL); 348 early_init_pdc, NULL, NULL, NULL);
349 acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL);
349} 350}
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index ee9ddeb53417..8cb0347dec28 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -3156,7 +3156,6 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
3156{ 3156{
3157 struct atm_dev *dev; 3157 struct atm_dev *dev;
3158 IADEV *iadev; 3158 IADEV *iadev;
3159 unsigned long flags;
3160 int ret; 3159 int ret;
3161 3160
3162 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); 3161 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
@@ -3188,19 +3187,14 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
3188 ia_dev[iadev_count] = iadev; 3187 ia_dev[iadev_count] = iadev;
3189 _ia_dev[iadev_count] = dev; 3188 _ia_dev[iadev_count] = dev;
3190 iadev_count++; 3189 iadev_count++;
3191 spin_lock_init(&iadev->misc_lock);
3192 /* First fixes first. I don't want to think about this now. */
3193 spin_lock_irqsave(&iadev->misc_lock, flags);
3194 if (ia_init(dev) || ia_start(dev)) { 3190 if (ia_init(dev) || ia_start(dev)) {
3195 IF_INIT(printk("IA register failed!\n");) 3191 IF_INIT(printk("IA register failed!\n");)
3196 iadev_count--; 3192 iadev_count--;
3197 ia_dev[iadev_count] = NULL; 3193 ia_dev[iadev_count] = NULL;
3198 _ia_dev[iadev_count] = NULL; 3194 _ia_dev[iadev_count] = NULL;
3199 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3200 ret = -EINVAL; 3195 ret = -EINVAL;
3201 goto err_out_deregister_dev; 3196 goto err_out_deregister_dev;
3202 } 3197 }
3203 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3204 IF_EVENT(printk("iadev_count = %d\n", iadev_count);) 3198 IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3205 3199
3206 iadev->next_board = ia_boards; 3200 iadev->next_board = ia_boards;
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
index b2cd20f549cb..077735e0e04b 100644
--- a/drivers/atm/iphase.h
+++ b/drivers/atm/iphase.h
@@ -1022,7 +1022,7 @@ typedef struct iadev_t {
1022 struct dle_q rx_dle_q; 1022 struct dle_q rx_dle_q;
1023 struct free_desc_q *rx_free_desc_qhead; 1023 struct free_desc_q *rx_free_desc_qhead;
1024 struct sk_buff_head rx_dma_q; 1024 struct sk_buff_head rx_dma_q;
1025 spinlock_t rx_lock, misc_lock; 1025 spinlock_t rx_lock;
1026 struct atm_vcc **rx_open; /* list of all open VCs */ 1026 struct atm_vcc **rx_open; /* list of all open VCs */
1027 u16 num_rx_desc, rx_buf_sz, rxing; 1027 u16 num_rx_desc, rx_buf_sz, rxing;
1028 u32 rx_pkt_ram, rx_tmp_cnt; 1028 u32 rx_pkt_ram, rx_tmp_cnt;
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index f916ddf63938..f46138ab38b6 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -444,6 +444,7 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
444 struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev); 444 struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev);
445 struct solos_card *card = atmdev->dev_data; 445 struct solos_card *card = atmdev->dev_data;
446 struct sk_buff *skb; 446 struct sk_buff *skb;
447 unsigned int len;
447 448
448 spin_lock(&card->cli_queue_lock); 449 spin_lock(&card->cli_queue_lock);
449 skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]); 450 skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
@@ -451,11 +452,12 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
451 if(skb == NULL) 452 if(skb == NULL)
452 return sprintf(buf, "No data.\n"); 453 return sprintf(buf, "No data.\n");
453 454
454 memcpy(buf, skb->data, skb->len); 455 len = skb->len;
455 dev_dbg(&card->dev->dev, "len: %d\n", skb->len); 456 memcpy(buf, skb->data, len);
457 dev_dbg(&card->dev->dev, "len: %d\n", len);
456 458
457 kfree_skb(skb); 459 kfree_skb(skb);
458 return skb->len; 460 return len;
459} 461}
460 462
461static int send_command(struct solos_card *card, int dev, const char *buf, size_t size) 463static int send_command(struct solos_card *card, int dev, const char *buf, size_t size)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index de277689da61..4b9359a6f6ca 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -488,4 +488,21 @@ config BLK_DEV_HD
488 488
489 If unsure, say N. 489 If unsure, say N.
490 490
491config BLK_DEV_RBD
492 tristate "Rados block device (RBD)"
493 depends on INET && EXPERIMENTAL && BLOCK
494 select CEPH_LIB
495 select LIBCRC32C
496 select CRYPTO_AES
497 select CRYPTO
498 default n
499 help
500 Say Y here if you want include the Rados block device, which stripes
501 a block device over objects stored in the Ceph distributed object
502 store.
503
504 More information at http://ceph.newdream.net/.
505
506 If unsure, say N.
507
491endif # BLK_DEV 508endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index aff5ac925c34..d7f463d6312d 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -37,5 +37,6 @@ obj-$(CONFIG_BLK_DEV_HD) += hd.o
37 37
38obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o 38obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
39obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ 39obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
40obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
40 41
41swim_mod-objs := swim.o swim_asm.o 42swim_mod-objs := swim.o swim_asm.o
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index e9da874d0419..03688c2da319 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -113,7 +113,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
113 memcpy(buf, dev->bounce_buf+offset, size); 113 memcpy(buf, dev->bounce_buf+offset, size);
114 offset += size; 114 offset += size;
115 flush_kernel_dcache_page(bvec->bv_page); 115 flush_kernel_dcache_page(bvec->bv_page);
116 bvec_kunmap_irq(bvec, &flags); 116 bvec_kunmap_irq(buf, &flags);
117 i++; 117 i++;
118 } 118 }
119} 119}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
new file mode 100644
index 000000000000..6ec9d53806c5
--- /dev/null
+++ b/drivers/block/rbd.c
@@ -0,0 +1,1841 @@
1/*
2 rbd.c -- Export ceph rados objects as a Linux block device
3
4
5 based on drivers/block/osdblk.c:
6
7 Copyright 2009 Red Hat, Inc.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21
22
23
24 Instructions for use
25 --------------------
26
27 1) Map a Linux block device to an existing rbd image.
28
29 Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name]
30
31 $ echo "192.168.0.1 name=admin rbd foo" > /sys/class/rbd/add
32
33 The snapshot name can be "-" or omitted to map the image read/write.
34
35 2) List all active blkdev<->object mappings.
36
37 In this example, we have performed step #1 twice, creating two blkdevs,
38 mapped to two separate rados objects in the rados rbd pool
39
40 $ cat /sys/class/rbd/list
41 #id major client_name pool name snap KB
42 0 254 client4143 rbd foo - 1024000
43
44 The columns, in order, are:
45 - blkdev unique id
46 - blkdev assigned major
47 - rados client id
48 - rados pool name
49 - rados block device name
50 - mapped snapshot ("-" if none)
51 - device size in KB
52
53
54 3) Create a snapshot.
55
56 Usage: <blkdev id> <snapname>
57
58 $ echo "0 mysnap" > /sys/class/rbd/snap_create
59
60
61 4) Listing a snapshot.
62
63 $ cat /sys/class/rbd/snaps_list
64 #id snap KB
65 0 - 1024000 (*)
66 0 foo 1024000
67
68 The columns, in order, are:
69 - blkdev unique id
70 - snapshot name, '-' means none (active read/write version)
71 - size of device at time of snapshot
72 - the (*) indicates this is the active version
73
74 5) Rollback to snapshot.
75
76 Usage: <blkdev id> <snapname>
77
78 $ echo "0 mysnap" > /sys/class/rbd/snap_rollback
79
80
81 6) Mapping an image using snapshot.
82
83 A snapshot mapping is read-only. This is being done by passing
84 snap=<snapname> to the options when adding a device.
85
86 $ echo "192.168.0.1 name=admin,snap=mysnap rbd foo" > /sys/class/rbd/add
87
88
89 7) Remove an active blkdev<->rbd image mapping.
90
91 In this example, we remove the mapping with blkdev unique id 1.
92
93 $ echo 1 > /sys/class/rbd/remove
94
95
96 NOTE: The actual creation and deletion of rados objects is outside the scope
97 of this driver.
98
99 */
100
101#include <linux/ceph/libceph.h>
102#include <linux/ceph/osd_client.h>
103#include <linux/ceph/mon_client.h>
104#include <linux/ceph/decode.h>
105
106#include <linux/kernel.h>
107#include <linux/device.h>
108#include <linux/module.h>
109#include <linux/fs.h>
110#include <linux/blkdev.h>
111
112#include "rbd_types.h"
113
114#define DRV_NAME "rbd"
115#define DRV_NAME_LONG "rbd (rados block device)"
116
117#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
118
119#define RBD_MAX_MD_NAME_LEN (96 + sizeof(RBD_SUFFIX))
120#define RBD_MAX_POOL_NAME_LEN 64
121#define RBD_MAX_SNAP_NAME_LEN 32
122#define RBD_MAX_OPT_LEN 1024
123
124#define RBD_SNAP_HEAD_NAME "-"
125
126#define DEV_NAME_LEN 32
127
128/*
129 * block device image metadata (in-memory version)
130 */
131struct rbd_image_header {
132 u64 image_size;
133 char block_name[32];
134 __u8 obj_order;
135 __u8 crypt_type;
136 __u8 comp_type;
137 struct rw_semaphore snap_rwsem;
138 struct ceph_snap_context *snapc;
139 size_t snap_names_len;
140 u64 snap_seq;
141 u32 total_snaps;
142
143 char *snap_names;
144 u64 *snap_sizes;
145};
146
147/*
148 * an instance of the client. multiple devices may share a client.
149 */
150struct rbd_client {
151 struct ceph_client *client;
152 struct kref kref;
153 struct list_head node;
154};
155
156/*
157 * a single io request
158 */
159struct rbd_request {
160 struct request *rq; /* blk layer request */
161 struct bio *bio; /* cloned bio */
162 struct page **pages; /* list of used pages */
163 u64 len;
164};
165
166/*
167 * a single device
168 */
169struct rbd_device {
170 int id; /* blkdev unique id */
171
172 int major; /* blkdev assigned major */
173 struct gendisk *disk; /* blkdev's gendisk and rq */
174 struct request_queue *q;
175
176 struct ceph_client *client;
177 struct rbd_client *rbd_client;
178
179 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
180
181 spinlock_t lock; /* queue lock */
182
183 struct rbd_image_header header;
184 char obj[RBD_MAX_OBJ_NAME_LEN]; /* rbd image name */
185 int obj_len;
186 char obj_md_name[RBD_MAX_MD_NAME_LEN]; /* hdr nm. */
187 char pool_name[RBD_MAX_POOL_NAME_LEN];
188 int poolid;
189
190 char snap_name[RBD_MAX_SNAP_NAME_LEN];
191 u32 cur_snap; /* index+1 of current snapshot within snap context
192 0 - for the head */
193 int read_only;
194
195 struct list_head node;
196};
197
198static spinlock_t node_lock; /* protects client get/put */
199
200static struct class *class_rbd; /* /sys/class/rbd */
201static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
202static LIST_HEAD(rbd_dev_list); /* devices */
203static LIST_HEAD(rbd_client_list); /* clients */
204
205
206static int rbd_open(struct block_device *bdev, fmode_t mode)
207{
208 struct gendisk *disk = bdev->bd_disk;
209 struct rbd_device *rbd_dev = disk->private_data;
210
211 set_device_ro(bdev, rbd_dev->read_only);
212
213 if ((mode & FMODE_WRITE) && rbd_dev->read_only)
214 return -EROFS;
215
216 return 0;
217}
218
219static const struct block_device_operations rbd_bd_ops = {
220 .owner = THIS_MODULE,
221 .open = rbd_open,
222};
223
224/*
225 * Initialize an rbd client instance.
226 * We own *opt.
227 */
228static struct rbd_client *rbd_client_create(struct ceph_options *opt)
229{
230 struct rbd_client *rbdc;
231 int ret = -ENOMEM;
232
233 dout("rbd_client_create\n");
234 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
235 if (!rbdc)
236 goto out_opt;
237
238 kref_init(&rbdc->kref);
239 INIT_LIST_HEAD(&rbdc->node);
240
241 rbdc->client = ceph_create_client(opt, rbdc);
242 if (IS_ERR(rbdc->client))
243 goto out_rbdc;
244 opt = NULL; /* Now rbdc->client is responsible for opt */
245
246 ret = ceph_open_session(rbdc->client);
247 if (ret < 0)
248 goto out_err;
249
250 spin_lock(&node_lock);
251 list_add_tail(&rbdc->node, &rbd_client_list);
252 spin_unlock(&node_lock);
253
254 dout("rbd_client_create created %p\n", rbdc);
255 return rbdc;
256
257out_err:
258 ceph_destroy_client(rbdc->client);
259out_rbdc:
260 kfree(rbdc);
261out_opt:
262 if (opt)
263 ceph_destroy_options(opt);
264 return ERR_PTR(ret);
265}
266
267/*
268 * Find a ceph client with specific addr and configuration.
269 */
270static struct rbd_client *__rbd_client_find(struct ceph_options *opt)
271{
272 struct rbd_client *client_node;
273
274 if (opt->flags & CEPH_OPT_NOSHARE)
275 return NULL;
276
277 list_for_each_entry(client_node, &rbd_client_list, node)
278 if (ceph_compare_options(opt, client_node->client) == 0)
279 return client_node;
280 return NULL;
281}
282
283/*
284 * Get a ceph client with specific addr and configuration, if one does
285 * not exist create it.
286 */
287static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
288 char *options)
289{
290 struct rbd_client *rbdc;
291 struct ceph_options *opt;
292 int ret;
293
294 ret = ceph_parse_options(&opt, options, mon_addr,
295 mon_addr + strlen(mon_addr), NULL, NULL);
296 if (ret < 0)
297 return ret;
298
299 spin_lock(&node_lock);
300 rbdc = __rbd_client_find(opt);
301 if (rbdc) {
302 ceph_destroy_options(opt);
303
304 /* using an existing client */
305 kref_get(&rbdc->kref);
306 rbd_dev->rbd_client = rbdc;
307 rbd_dev->client = rbdc->client;
308 spin_unlock(&node_lock);
309 return 0;
310 }
311 spin_unlock(&node_lock);
312
313 rbdc = rbd_client_create(opt);
314 if (IS_ERR(rbdc))
315 return PTR_ERR(rbdc);
316
317 rbd_dev->rbd_client = rbdc;
318 rbd_dev->client = rbdc->client;
319 return 0;
320}
321
322/*
323 * Destroy ceph client
324 */
325static void rbd_client_release(struct kref *kref)
326{
327 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
328
329 dout("rbd_release_client %p\n", rbdc);
330 spin_lock(&node_lock);
331 list_del(&rbdc->node);
332 spin_unlock(&node_lock);
333
334 ceph_destroy_client(rbdc->client);
335 kfree(rbdc);
336}
337
338/*
339 * Drop reference to ceph client node. If it's not referenced anymore, release
340 * it.
341 */
342static void rbd_put_client(struct rbd_device *rbd_dev)
343{
344 kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
345 rbd_dev->rbd_client = NULL;
346 rbd_dev->client = NULL;
347}
348
349
350/*
351 * Create a new header structure, translate header format from the on-disk
352 * header.
353 */
354static int rbd_header_from_disk(struct rbd_image_header *header,
355 struct rbd_image_header_ondisk *ondisk,
356 int allocated_snaps,
357 gfp_t gfp_flags)
358{
359 int i;
360 u32 snap_count = le32_to_cpu(ondisk->snap_count);
361 int ret = -ENOMEM;
362
363 init_rwsem(&header->snap_rwsem);
364
365 header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
366 header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
367 snap_count *
368 sizeof(struct rbd_image_snap_ondisk),
369 gfp_flags);
370 if (!header->snapc)
371 return -ENOMEM;
372 if (snap_count) {
373 header->snap_names = kmalloc(header->snap_names_len,
374 GFP_KERNEL);
375 if (!header->snap_names)
376 goto err_snapc;
377 header->snap_sizes = kmalloc(snap_count * sizeof(u64),
378 GFP_KERNEL);
379 if (!header->snap_sizes)
380 goto err_names;
381 } else {
382 header->snap_names = NULL;
383 header->snap_sizes = NULL;
384 }
385 memcpy(header->block_name, ondisk->block_name,
386 sizeof(ondisk->block_name));
387
388 header->image_size = le64_to_cpu(ondisk->image_size);
389 header->obj_order = ondisk->options.order;
390 header->crypt_type = ondisk->options.crypt_type;
391 header->comp_type = ondisk->options.comp_type;
392
393 atomic_set(&header->snapc->nref, 1);
394 header->snap_seq = le64_to_cpu(ondisk->snap_seq);
395 header->snapc->num_snaps = snap_count;
396 header->total_snaps = snap_count;
397
398 if (snap_count &&
399 allocated_snaps == snap_count) {
400 for (i = 0; i < snap_count; i++) {
401 header->snapc->snaps[i] =
402 le64_to_cpu(ondisk->snaps[i].id);
403 header->snap_sizes[i] =
404 le64_to_cpu(ondisk->snaps[i].image_size);
405 }
406
407 /* copy snapshot names */
408 memcpy(header->snap_names, &ondisk->snaps[i],
409 header->snap_names_len);
410 }
411
412 return 0;
413
414err_names:
415 kfree(header->snap_names);
416err_snapc:
417 kfree(header->snapc);
418 return ret;
419}
420
421static int snap_index(struct rbd_image_header *header, int snap_num)
422{
423 return header->total_snaps - snap_num;
424}
425
426static u64 cur_snap_id(struct rbd_device *rbd_dev)
427{
428 struct rbd_image_header *header = &rbd_dev->header;
429
430 if (!rbd_dev->cur_snap)
431 return 0;
432
433 return header->snapc->snaps[snap_index(header, rbd_dev->cur_snap)];
434}
435
436static int snap_by_name(struct rbd_image_header *header, const char *snap_name,
437 u64 *seq, u64 *size)
438{
439 int i;
440 char *p = header->snap_names;
441
442 for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) {
443 if (strcmp(snap_name, p) == 0)
444 break;
445 }
446 if (i == header->total_snaps)
447 return -ENOENT;
448 if (seq)
449 *seq = header->snapc->snaps[i];
450
451 if (size)
452 *size = header->snap_sizes[i];
453
454 return i;
455}
456
457static int rbd_header_set_snap(struct rbd_device *dev,
458 const char *snap_name,
459 u64 *size)
460{
461 struct rbd_image_header *header = &dev->header;
462 struct ceph_snap_context *snapc = header->snapc;
463 int ret = -ENOENT;
464
465 down_write(&header->snap_rwsem);
466
467 if (!snap_name ||
468 !*snap_name ||
469 strcmp(snap_name, "-") == 0 ||
470 strcmp(snap_name, RBD_SNAP_HEAD_NAME) == 0) {
471 if (header->total_snaps)
472 snapc->seq = header->snap_seq;
473 else
474 snapc->seq = 0;
475 dev->cur_snap = 0;
476 dev->read_only = 0;
477 if (size)
478 *size = header->image_size;
479 } else {
480 ret = snap_by_name(header, snap_name, &snapc->seq, size);
481 if (ret < 0)
482 goto done;
483
484 dev->cur_snap = header->total_snaps - ret;
485 dev->read_only = 1;
486 }
487
488 ret = 0;
489done:
490 up_write(&header->snap_rwsem);
491 return ret;
492}
493
494static void rbd_header_free(struct rbd_image_header *header)
495{
496 kfree(header->snapc);
497 kfree(header->snap_names);
498 kfree(header->snap_sizes);
499}
500
501/*
502 * get the actual striped segment name, offset and length
503 */
504static u64 rbd_get_segment(struct rbd_image_header *header,
505 const char *block_name,
506 u64 ofs, u64 len,
507 char *seg_name, u64 *segofs)
508{
509 u64 seg = ofs >> header->obj_order;
510
511 if (seg_name)
512 snprintf(seg_name, RBD_MAX_SEG_NAME_LEN,
513 "%s.%012llx", block_name, seg);
514
515 ofs = ofs & ((1 << header->obj_order) - 1);
516 len = min_t(u64, len, (1 << header->obj_order) - ofs);
517
518 if (segofs)
519 *segofs = ofs;
520
521 return len;
522}
523
524/*
525 * bio helpers
526 */
527
528static void bio_chain_put(struct bio *chain)
529{
530 struct bio *tmp;
531
532 while (chain) {
533 tmp = chain;
534 chain = chain->bi_next;
535 bio_put(tmp);
536 }
537}
538
539/*
540 * zeros a bio chain, starting at specific offset
541 */
542static void zero_bio_chain(struct bio *chain, int start_ofs)
543{
544 struct bio_vec *bv;
545 unsigned long flags;
546 void *buf;
547 int i;
548 int pos = 0;
549
550 while (chain) {
551 bio_for_each_segment(bv, chain, i) {
552 if (pos + bv->bv_len > start_ofs) {
553 int remainder = max(start_ofs - pos, 0);
554 buf = bvec_kmap_irq(bv, &flags);
555 memset(buf + remainder, 0,
556 bv->bv_len - remainder);
557 bvec_kunmap_irq(buf, &flags);
558 }
559 pos += bv->bv_len;
560 }
561
562 chain = chain->bi_next;
563 }
564}
565
566/*
567 * bio_chain_clone - clone a chain of bios up to a certain length.
568 * might return a bio_pair that will need to be released.
569 */
570static struct bio *bio_chain_clone(struct bio **old, struct bio **next,
571 struct bio_pair **bp,
572 int len, gfp_t gfpmask)
573{
574 struct bio *tmp, *old_chain = *old, *new_chain = NULL, *tail = NULL;
575 int total = 0;
576
577 if (*bp) {
578 bio_pair_release(*bp);
579 *bp = NULL;
580 }
581
582 while (old_chain && (total < len)) {
583 tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs);
584 if (!tmp)
585 goto err_out;
586
587 if (total + old_chain->bi_size > len) {
588 struct bio_pair *bp;
589
590 /*
591 * this split can only happen with a single paged bio,
592 * split_bio will BUG_ON if this is not the case
593 */
594 dout("bio_chain_clone split! total=%d remaining=%d"
595 "bi_size=%d\n",
596 (int)total, (int)len-total,
597 (int)old_chain->bi_size);
598
599 /* split the bio. We'll release it either in the next
600 call, or it will have to be released outside */
601 bp = bio_split(old_chain, (len - total) / 512ULL);
602 if (!bp)
603 goto err_out;
604
605 __bio_clone(tmp, &bp->bio1);
606
607 *next = &bp->bio2;
608 } else {
609 __bio_clone(tmp, old_chain);
610 *next = old_chain->bi_next;
611 }
612
613 tmp->bi_bdev = NULL;
614 gfpmask &= ~__GFP_WAIT;
615 tmp->bi_next = NULL;
616
617 if (!new_chain) {
618 new_chain = tail = tmp;
619 } else {
620 tail->bi_next = tmp;
621 tail = tmp;
622 }
623 old_chain = old_chain->bi_next;
624
625 total += tmp->bi_size;
626 }
627
628 BUG_ON(total < len);
629
630 if (tail)
631 tail->bi_next = NULL;
632
633 *old = old_chain;
634
635 return new_chain;
636
637err_out:
638 dout("bio_chain_clone with err\n");
639 bio_chain_put(new_chain);
640 return NULL;
641}
642
643/*
644 * helpers for osd request op vectors.
645 */
646static int rbd_create_rw_ops(struct ceph_osd_req_op **ops,
647 int num_ops,
648 int opcode,
649 u32 payload_len)
650{
651 *ops = kzalloc(sizeof(struct ceph_osd_req_op) * (num_ops + 1),
652 GFP_NOIO);
653 if (!*ops)
654 return -ENOMEM;
655 (*ops)[0].op = opcode;
656 /*
657 * op extent offset and length will be set later on
658 * in calc_raw_layout()
659 */
660 (*ops)[0].payload_len = payload_len;
661 return 0;
662}
663
664static void rbd_destroy_ops(struct ceph_osd_req_op *ops)
665{
666 kfree(ops);
667}
668
669/*
670 * Send ceph osd request
671 */
672static int rbd_do_request(struct request *rq,
673 struct rbd_device *dev,
674 struct ceph_snap_context *snapc,
675 u64 snapid,
676 const char *obj, u64 ofs, u64 len,
677 struct bio *bio,
678 struct page **pages,
679 int num_pages,
680 int flags,
681 struct ceph_osd_req_op *ops,
682 int num_reply,
683 void (*rbd_cb)(struct ceph_osd_request *req,
684 struct ceph_msg *msg))
685{
686 struct ceph_osd_request *req;
687 struct ceph_file_layout *layout;
688 int ret;
689 u64 bno;
690 struct timespec mtime = CURRENT_TIME;
691 struct rbd_request *req_data;
692 struct ceph_osd_request_head *reqhead;
693 struct rbd_image_header *header = &dev->header;
694
695 ret = -ENOMEM;
696 req_data = kzalloc(sizeof(*req_data), GFP_NOIO);
697 if (!req_data)
698 goto done;
699
700 dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs);
701
702 down_read(&header->snap_rwsem);
703
704 req = ceph_osdc_alloc_request(&dev->client->osdc, flags,
705 snapc,
706 ops,
707 false,
708 GFP_NOIO, pages, bio);
709 if (IS_ERR(req)) {
710 up_read(&header->snap_rwsem);
711 ret = PTR_ERR(req);
712 goto done_pages;
713 }
714
715 req->r_callback = rbd_cb;
716
717 req_data->rq = rq;
718 req_data->bio = bio;
719 req_data->pages = pages;
720 req_data->len = len;
721
722 req->r_priv = req_data;
723
724 reqhead = req->r_request->front.iov_base;
725 reqhead->snapid = cpu_to_le64(CEPH_NOSNAP);
726
727 strncpy(req->r_oid, obj, sizeof(req->r_oid));
728 req->r_oid_len = strlen(req->r_oid);
729
730 layout = &req->r_file_layout;
731 memset(layout, 0, sizeof(*layout));
732 layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
733 layout->fl_stripe_count = cpu_to_le32(1);
734 layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
735 layout->fl_pg_preferred = cpu_to_le32(-1);
736 layout->fl_pg_pool = cpu_to_le32(dev->poolid);
737 ceph_calc_raw_layout(&dev->client->osdc, layout, snapid,
738 ofs, &len, &bno, req, ops);
739
740 ceph_osdc_build_request(req, ofs, &len,
741 ops,
742 snapc,
743 &mtime,
744 req->r_oid, req->r_oid_len);
745 up_read(&header->snap_rwsem);
746
747 ret = ceph_osdc_start_request(&dev->client->osdc, req, false);
748 if (ret < 0)
749 goto done_err;
750
751 if (!rbd_cb) {
752 ret = ceph_osdc_wait_request(&dev->client->osdc, req);
753 ceph_osdc_put_request(req);
754 }
755 return ret;
756
757done_err:
758 bio_chain_put(req_data->bio);
759 ceph_osdc_put_request(req);
760done_pages:
761 kfree(req_data);
762done:
763 if (rq)
764 blk_end_request(rq, ret, len);
765 return ret;
766}
767
768/*
769 * Ceph osd op callback
770 */
771static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
772{
773 struct rbd_request *req_data = req->r_priv;
774 struct ceph_osd_reply_head *replyhead;
775 struct ceph_osd_op *op;
776 __s32 rc;
777 u64 bytes;
778 int read_op;
779
780 /* parse reply */
781 replyhead = msg->front.iov_base;
782 WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
783 op = (void *)(replyhead + 1);
784 rc = le32_to_cpu(replyhead->result);
785 bytes = le64_to_cpu(op->extent.length);
786 read_op = (le32_to_cpu(op->op) == CEPH_OSD_OP_READ);
787
788 dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc);
789
790 if (rc == -ENOENT && read_op) {
791 zero_bio_chain(req_data->bio, 0);
792 rc = 0;
793 } else if (rc == 0 && read_op && bytes < req_data->len) {
794 zero_bio_chain(req_data->bio, bytes);
795 bytes = req_data->len;
796 }
797
798 blk_end_request(req_data->rq, rc, bytes);
799
800 if (req_data->bio)
801 bio_chain_put(req_data->bio);
802
803 ceph_osdc_put_request(req);
804 kfree(req_data);
805}
806
807/*
808 * Do a synchronous ceph osd operation
809 */
810static int rbd_req_sync_op(struct rbd_device *dev,
811 struct ceph_snap_context *snapc,
812 u64 snapid,
813 int opcode,
814 int flags,
815 struct ceph_osd_req_op *orig_ops,
816 int num_reply,
817 const char *obj,
818 u64 ofs, u64 len,
819 char *buf)
820{
821 int ret;
822 struct page **pages;
823 int num_pages;
824 struct ceph_osd_req_op *ops = orig_ops;
825 u32 payload_len;
826
827 num_pages = calc_pages_for(ofs , len);
828 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
829 if (IS_ERR(pages))
830 return PTR_ERR(pages);
831
832 if (!orig_ops) {
833 payload_len = (flags & CEPH_OSD_FLAG_WRITE ? len : 0);
834 ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len);
835 if (ret < 0)
836 goto done;
837
838 if ((flags & CEPH_OSD_FLAG_WRITE) && buf) {
839 ret = ceph_copy_to_page_vector(pages, buf, ofs, len);
840 if (ret < 0)
841 goto done_ops;
842 }
843 }
844
845 ret = rbd_do_request(NULL, dev, snapc, snapid,
846 obj, ofs, len, NULL,
847 pages, num_pages,
848 flags,
849 ops,
850 2,
851 NULL);
852 if (ret < 0)
853 goto done_ops;
854
855 if ((flags & CEPH_OSD_FLAG_READ) && buf)
856 ret = ceph_copy_from_page_vector(pages, buf, ofs, ret);
857
858done_ops:
859 if (!orig_ops)
860 rbd_destroy_ops(ops);
861done:
862 ceph_release_page_vector(pages, num_pages);
863 return ret;
864}
865
866/*
867 * Do an asynchronous ceph osd operation
868 */
869static int rbd_do_op(struct request *rq,
870 struct rbd_device *rbd_dev ,
871 struct ceph_snap_context *snapc,
872 u64 snapid,
873 int opcode, int flags, int num_reply,
874 u64 ofs, u64 len,
875 struct bio *bio)
876{
877 char *seg_name;
878 u64 seg_ofs;
879 u64 seg_len;
880 int ret;
881 struct ceph_osd_req_op *ops;
882 u32 payload_len;
883
884 seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
885 if (!seg_name)
886 return -ENOMEM;
887
888 seg_len = rbd_get_segment(&rbd_dev->header,
889 rbd_dev->header.block_name,
890 ofs, len,
891 seg_name, &seg_ofs);
892
893 payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0);
894
895 ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len);
896 if (ret < 0)
897 goto done;
898
899 /* we've taken care of segment sizes earlier when we
900 cloned the bios. We should never have a segment
901 truncated at this point */
902 BUG_ON(seg_len < len);
903
904 ret = rbd_do_request(rq, rbd_dev, snapc, snapid,
905 seg_name, seg_ofs, seg_len,
906 bio,
907 NULL, 0,
908 flags,
909 ops,
910 num_reply,
911 rbd_req_cb);
912done:
913 kfree(seg_name);
914 return ret;
915}
916
917/*
918 * Request async osd write
919 */
920static int rbd_req_write(struct request *rq,
921 struct rbd_device *rbd_dev,
922 struct ceph_snap_context *snapc,
923 u64 ofs, u64 len,
924 struct bio *bio)
925{
926 return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP,
927 CEPH_OSD_OP_WRITE,
928 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
929 2,
930 ofs, len, bio);
931}
932
933/*
934 * Request async osd read
935 */
936static int rbd_req_read(struct request *rq,
937 struct rbd_device *rbd_dev,
938 u64 snapid,
939 u64 ofs, u64 len,
940 struct bio *bio)
941{
942 return rbd_do_op(rq, rbd_dev, NULL,
943 (snapid ? snapid : CEPH_NOSNAP),
944 CEPH_OSD_OP_READ,
945 CEPH_OSD_FLAG_READ,
946 2,
947 ofs, len, bio);
948}
949
950/*
951 * Request sync osd read
952 */
953static int rbd_req_sync_read(struct rbd_device *dev,
954 struct ceph_snap_context *snapc,
955 u64 snapid,
956 const char *obj,
957 u64 ofs, u64 len,
958 char *buf)
959{
960 return rbd_req_sync_op(dev, NULL,
961 (snapid ? snapid : CEPH_NOSNAP),
962 CEPH_OSD_OP_READ,
963 CEPH_OSD_FLAG_READ,
964 NULL,
965 1, obj, ofs, len, buf);
966}
967
968/*
969 * Request sync osd read
970 */
971static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
972 u64 snapid,
973 const char *obj)
974{
975 struct ceph_osd_req_op *ops;
976 int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
977 if (ret < 0)
978 return ret;
979
980 ops[0].snap.snapid = snapid;
981
982 ret = rbd_req_sync_op(dev, NULL,
983 CEPH_NOSNAP,
984 0,
985 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
986 ops,
987 1, obj, 0, 0, NULL);
988
989 rbd_destroy_ops(ops);
990
991 if (ret < 0)
992 return ret;
993
994 return ret;
995}
996
997/*
998 * Request sync osd read
999 */
1000static int rbd_req_sync_exec(struct rbd_device *dev,
1001 const char *obj,
1002 const char *cls,
1003 const char *method,
1004 const char *data,
1005 int len)
1006{
1007 struct ceph_osd_req_op *ops;
1008 int cls_len = strlen(cls);
1009 int method_len = strlen(method);
1010 int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_CALL,
1011 cls_len + method_len + len);
1012 if (ret < 0)
1013 return ret;
1014
1015 ops[0].cls.class_name = cls;
1016 ops[0].cls.class_len = (__u8)cls_len;
1017 ops[0].cls.method_name = method;
1018 ops[0].cls.method_len = (__u8)method_len;
1019 ops[0].cls.argc = 0;
1020 ops[0].cls.indata = data;
1021 ops[0].cls.indata_len = len;
1022
1023 ret = rbd_req_sync_op(dev, NULL,
1024 CEPH_NOSNAP,
1025 0,
1026 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1027 ops,
1028 1, obj, 0, 0, NULL);
1029
1030 rbd_destroy_ops(ops);
1031
1032 dout("cls_exec returned %d\n", ret);
1033 return ret;
1034}
1035
1036/*
1037 * block device queue callback
1038 */
1039static void rbd_rq_fn(struct request_queue *q)
1040{
1041 struct rbd_device *rbd_dev = q->queuedata;
1042 struct request *rq;
1043 struct bio_pair *bp = NULL;
1044
1045 rq = blk_fetch_request(q);
1046
1047 while (1) {
1048 struct bio *bio;
1049 struct bio *rq_bio, *next_bio = NULL;
1050 bool do_write;
1051 int size, op_size = 0;
1052 u64 ofs;
1053
1054 /* peek at request from block layer */
1055 if (!rq)
1056 break;
1057
1058 dout("fetched request\n");
1059
1060 /* filter out block requests we don't understand */
1061 if ((rq->cmd_type != REQ_TYPE_FS)) {
1062 __blk_end_request_all(rq, 0);
1063 goto next;
1064 }
1065
1066 /* deduce our operation (read, write) */
1067 do_write = (rq_data_dir(rq) == WRITE);
1068
1069 size = blk_rq_bytes(rq);
1070 ofs = blk_rq_pos(rq) * 512ULL;
1071 rq_bio = rq->bio;
1072 if (do_write && rbd_dev->read_only) {
1073 __blk_end_request_all(rq, -EROFS);
1074 goto next;
1075 }
1076
1077 spin_unlock_irq(q->queue_lock);
1078
1079 dout("%s 0x%x bytes at 0x%llx\n",
1080 do_write ? "write" : "read",
1081 size, blk_rq_pos(rq) * 512ULL);
1082
1083 do {
1084 /* a bio clone to be passed down to OSD req */
1085 dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt);
1086 op_size = rbd_get_segment(&rbd_dev->header,
1087 rbd_dev->header.block_name,
1088 ofs, size,
1089 NULL, NULL);
1090 bio = bio_chain_clone(&rq_bio, &next_bio, &bp,
1091 op_size, GFP_ATOMIC);
1092 if (!bio) {
1093 spin_lock_irq(q->queue_lock);
1094 __blk_end_request_all(rq, -ENOMEM);
1095 goto next;
1096 }
1097
1098 /* init OSD command: write or read */
1099 if (do_write)
1100 rbd_req_write(rq, rbd_dev,
1101 rbd_dev->header.snapc,
1102 ofs,
1103 op_size, bio);
1104 else
1105 rbd_req_read(rq, rbd_dev,
1106 cur_snap_id(rbd_dev),
1107 ofs,
1108 op_size, bio);
1109
1110 size -= op_size;
1111 ofs += op_size;
1112
1113 rq_bio = next_bio;
1114 } while (size > 0);
1115
1116 if (bp)
1117 bio_pair_release(bp);
1118
1119 spin_lock_irq(q->queue_lock);
1120next:
1121 rq = blk_fetch_request(q);
1122 }
1123}
1124
1125/*
1126 * a queue callback. Makes sure that we don't create a bio that spans across
1127 * multiple osd objects. One exception would be with a single page bios,
1128 * which we handle later at bio_chain_clone
1129 */
1130static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
1131 struct bio_vec *bvec)
1132{
1133 struct rbd_device *rbd_dev = q->queuedata;
1134 unsigned int chunk_sectors = 1 << (rbd_dev->header.obj_order - 9);
1135 sector_t sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev);
1136 unsigned int bio_sectors = bmd->bi_size >> 9;
1137 int max;
1138
1139 max = (chunk_sectors - ((sector & (chunk_sectors - 1))
1140 + bio_sectors)) << 9;
1141 if (max < 0)
1142 max = 0; /* bio_add cannot handle a negative return */
1143 if (max <= bvec->bv_len && bio_sectors == 0)
1144 return bvec->bv_len;
1145 return max;
1146}
1147
1148static void rbd_free_disk(struct rbd_device *rbd_dev)
1149{
1150 struct gendisk *disk = rbd_dev->disk;
1151
1152 if (!disk)
1153 return;
1154
1155 rbd_header_free(&rbd_dev->header);
1156
1157 if (disk->flags & GENHD_FL_UP)
1158 del_gendisk(disk);
1159 if (disk->queue)
1160 blk_cleanup_queue(disk->queue);
1161 put_disk(disk);
1162}
1163
1164/*
1165 * reload the ondisk the header
1166 */
1167static int rbd_read_header(struct rbd_device *rbd_dev,
1168 struct rbd_image_header *header)
1169{
1170 ssize_t rc;
1171 struct rbd_image_header_ondisk *dh;
1172 int snap_count = 0;
1173 u64 snap_names_len = 0;
1174
1175 while (1) {
1176 int len = sizeof(*dh) +
1177 snap_count * sizeof(struct rbd_image_snap_ondisk) +
1178 snap_names_len;
1179
1180 rc = -ENOMEM;
1181 dh = kmalloc(len, GFP_KERNEL);
1182 if (!dh)
1183 return -ENOMEM;
1184
1185 rc = rbd_req_sync_read(rbd_dev,
1186 NULL, CEPH_NOSNAP,
1187 rbd_dev->obj_md_name,
1188 0, len,
1189 (char *)dh);
1190 if (rc < 0)
1191 goto out_dh;
1192
1193 rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
1194 if (rc < 0)
1195 goto out_dh;
1196
1197 if (snap_count != header->total_snaps) {
1198 snap_count = header->total_snaps;
1199 snap_names_len = header->snap_names_len;
1200 rbd_header_free(header);
1201 kfree(dh);
1202 continue;
1203 }
1204 break;
1205 }
1206
1207out_dh:
1208 kfree(dh);
1209 return rc;
1210}
1211
1212/*
1213 * create a snapshot
1214 */
1215static int rbd_header_add_snap(struct rbd_device *dev,
1216 const char *snap_name,
1217 gfp_t gfp_flags)
1218{
1219 int name_len = strlen(snap_name);
1220 u64 new_snapid;
1221 int ret;
1222 void *data, *data_start, *data_end;
1223
1224 /* we should create a snapshot only if we're pointing at the head */
1225 if (dev->cur_snap)
1226 return -EINVAL;
1227
1228 ret = ceph_monc_create_snapid(&dev->client->monc, dev->poolid,
1229 &new_snapid);
1230 dout("created snapid=%lld\n", new_snapid);
1231 if (ret < 0)
1232 return ret;
1233
1234 data = kmalloc(name_len + 16, gfp_flags);
1235 if (!data)
1236 return -ENOMEM;
1237
1238 data_start = data;
1239 data_end = data + name_len + 16;
1240
1241 ceph_encode_string_safe(&data, data_end, snap_name, name_len, bad);
1242 ceph_encode_64_safe(&data, data_end, new_snapid, bad);
1243
1244 ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add",
1245 data_start, data - data_start);
1246
1247 kfree(data_start);
1248
1249 if (ret < 0)
1250 return ret;
1251
1252 dev->header.snapc->seq = new_snapid;
1253
1254 return 0;
1255bad:
1256 return -ERANGE;
1257}
1258
1259/*
1260 * only read the first part of the ondisk header, without the snaps info
1261 */
1262static int rbd_update_snaps(struct rbd_device *rbd_dev)
1263{
1264 int ret;
1265 struct rbd_image_header h;
1266 u64 snap_seq;
1267
1268 ret = rbd_read_header(rbd_dev, &h);
1269 if (ret < 0)
1270 return ret;
1271
1272 down_write(&rbd_dev->header.snap_rwsem);
1273
1274 snap_seq = rbd_dev->header.snapc->seq;
1275
1276 kfree(rbd_dev->header.snapc);
1277 kfree(rbd_dev->header.snap_names);
1278 kfree(rbd_dev->header.snap_sizes);
1279
1280 rbd_dev->header.total_snaps = h.total_snaps;
1281 rbd_dev->header.snapc = h.snapc;
1282 rbd_dev->header.snap_names = h.snap_names;
1283 rbd_dev->header.snap_sizes = h.snap_sizes;
1284 rbd_dev->header.snapc->seq = snap_seq;
1285
1286 up_write(&rbd_dev->header.snap_rwsem);
1287
1288 return 0;
1289}
1290
1291static int rbd_init_disk(struct rbd_device *rbd_dev)
1292{
1293 struct gendisk *disk;
1294 struct request_queue *q;
1295 int rc;
1296 u64 total_size = 0;
1297
1298 /* contact OSD, request size info about the object being mapped */
1299 rc = rbd_read_header(rbd_dev, &rbd_dev->header);
1300 if (rc)
1301 return rc;
1302
1303 rc = rbd_header_set_snap(rbd_dev, rbd_dev->snap_name, &total_size);
1304 if (rc)
1305 return rc;
1306
1307 /* create gendisk info */
1308 rc = -ENOMEM;
1309 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
1310 if (!disk)
1311 goto out;
1312
1313 sprintf(disk->disk_name, DRV_NAME "%d", rbd_dev->id);
1314 disk->major = rbd_dev->major;
1315 disk->first_minor = 0;
1316 disk->fops = &rbd_bd_ops;
1317 disk->private_data = rbd_dev;
1318
1319 /* init rq */
1320 rc = -ENOMEM;
1321 q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock);
1322 if (!q)
1323 goto out_disk;
1324 blk_queue_merge_bvec(q, rbd_merge_bvec);
1325 disk->queue = q;
1326
1327 q->queuedata = rbd_dev;
1328
1329 rbd_dev->disk = disk;
1330 rbd_dev->q = q;
1331
1332 /* finally, announce the disk to the world */
1333 set_capacity(disk, total_size / 512ULL);
1334 add_disk(disk);
1335
1336 pr_info("%s: added with size 0x%llx\n",
1337 disk->disk_name, (unsigned long long)total_size);
1338 return 0;
1339
1340out_disk:
1341 put_disk(disk);
1342out:
1343 return rc;
1344}
1345
1346/********************************************************************
1347 * /sys/class/rbd/
1348 * add map rados objects to blkdev
1349 * remove unmap rados objects
1350 * list show mappings
1351 *******************************************************************/
1352
1353static void class_rbd_release(struct class *cls)
1354{
1355 kfree(cls);
1356}
1357
1358static ssize_t class_rbd_list(struct class *c,
1359 struct class_attribute *attr,
1360 char *data)
1361{
1362 int n = 0;
1363 struct list_head *tmp;
1364 int max = PAGE_SIZE;
1365
1366 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1367
1368 n += snprintf(data, max,
1369 "#id\tmajor\tclient_name\tpool\tname\tsnap\tKB\n");
1370
1371 list_for_each(tmp, &rbd_dev_list) {
1372 struct rbd_device *rbd_dev;
1373
1374 rbd_dev = list_entry(tmp, struct rbd_device, node);
1375 n += snprintf(data+n, max-n,
1376 "%d\t%d\tclient%lld\t%s\t%s\t%s\t%lld\n",
1377 rbd_dev->id,
1378 rbd_dev->major,
1379 ceph_client_id(rbd_dev->client),
1380 rbd_dev->pool_name,
1381 rbd_dev->obj, rbd_dev->snap_name,
1382 rbd_dev->header.image_size >> 10);
1383 if (n == max)
1384 break;
1385 }
1386
1387 mutex_unlock(&ctl_mutex);
1388 return n;
1389}
1390
1391static ssize_t class_rbd_add(struct class *c,
1392 struct class_attribute *attr,
1393 const char *buf, size_t count)
1394{
1395 struct ceph_osd_client *osdc;
1396 struct rbd_device *rbd_dev;
1397 ssize_t rc = -ENOMEM;
1398 int irc, new_id = 0;
1399 struct list_head *tmp;
1400 char *mon_dev_name;
1401 char *options;
1402
1403 if (!try_module_get(THIS_MODULE))
1404 return -ENODEV;
1405
1406 mon_dev_name = kmalloc(RBD_MAX_OPT_LEN, GFP_KERNEL);
1407 if (!mon_dev_name)
1408 goto err_out_mod;
1409
1410 options = kmalloc(RBD_MAX_OPT_LEN, GFP_KERNEL);
1411 if (!options)
1412 goto err_mon_dev;
1413
1414 /* new rbd_device object */
1415 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
1416 if (!rbd_dev)
1417 goto err_out_opt;
1418
1419 /* static rbd_device initialization */
1420 spin_lock_init(&rbd_dev->lock);
1421 INIT_LIST_HEAD(&rbd_dev->node);
1422
1423 /* generate unique id: find highest unique id, add one */
1424 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1425
1426 list_for_each(tmp, &rbd_dev_list) {
1427 struct rbd_device *rbd_dev;
1428
1429 rbd_dev = list_entry(tmp, struct rbd_device, node);
1430 if (rbd_dev->id >= new_id)
1431 new_id = rbd_dev->id + 1;
1432 }
1433
1434 rbd_dev->id = new_id;
1435
1436 /* add to global list */
1437 list_add_tail(&rbd_dev->node, &rbd_dev_list);
1438
1439 /* parse add command */
1440 if (sscanf(buf, "%" __stringify(RBD_MAX_OPT_LEN) "s "
1441 "%" __stringify(RBD_MAX_OPT_LEN) "s "
1442 "%" __stringify(RBD_MAX_POOL_NAME_LEN) "s "
1443 "%" __stringify(RBD_MAX_OBJ_NAME_LEN) "s"
1444 "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
1445 mon_dev_name, options, rbd_dev->pool_name,
1446 rbd_dev->obj, rbd_dev->snap_name) < 4) {
1447 rc = -EINVAL;
1448 goto err_out_slot;
1449 }
1450
1451 if (rbd_dev->snap_name[0] == 0)
1452 rbd_dev->snap_name[0] = '-';
1453
1454 rbd_dev->obj_len = strlen(rbd_dev->obj);
1455 snprintf(rbd_dev->obj_md_name, sizeof(rbd_dev->obj_md_name), "%s%s",
1456 rbd_dev->obj, RBD_SUFFIX);
1457
1458 /* initialize rest of new object */
1459 snprintf(rbd_dev->name, DEV_NAME_LEN, DRV_NAME "%d", rbd_dev->id);
1460 rc = rbd_get_client(rbd_dev, mon_dev_name, options);
1461 if (rc < 0)
1462 goto err_out_slot;
1463
1464 mutex_unlock(&ctl_mutex);
1465
1466 /* pick the pool */
1467 osdc = &rbd_dev->client->osdc;
1468 rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name);
1469 if (rc < 0)
1470 goto err_out_client;
1471 rbd_dev->poolid = rc;
1472
1473 /* register our block device */
1474 irc = register_blkdev(0, rbd_dev->name);
1475 if (irc < 0) {
1476 rc = irc;
1477 goto err_out_client;
1478 }
1479 rbd_dev->major = irc;
1480
1481 /* set up and announce blkdev mapping */
1482 rc = rbd_init_disk(rbd_dev);
1483 if (rc)
1484 goto err_out_blkdev;
1485
1486 return count;
1487
1488err_out_blkdev:
1489 unregister_blkdev(rbd_dev->major, rbd_dev->name);
1490err_out_client:
1491 rbd_put_client(rbd_dev);
1492 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1493err_out_slot:
1494 list_del_init(&rbd_dev->node);
1495 mutex_unlock(&ctl_mutex);
1496
1497 kfree(rbd_dev);
1498err_out_opt:
1499 kfree(options);
1500err_mon_dev:
1501 kfree(mon_dev_name);
1502err_out_mod:
1503 dout("Error adding device %s\n", buf);
1504 module_put(THIS_MODULE);
1505 return rc;
1506}
1507
1508static struct rbd_device *__rbd_get_dev(unsigned long id)
1509{
1510 struct list_head *tmp;
1511 struct rbd_device *rbd_dev;
1512
1513 list_for_each(tmp, &rbd_dev_list) {
1514 rbd_dev = list_entry(tmp, struct rbd_device, node);
1515 if (rbd_dev->id == id)
1516 return rbd_dev;
1517 }
1518 return NULL;
1519}
1520
1521static ssize_t class_rbd_remove(struct class *c,
1522 struct class_attribute *attr,
1523 const char *buf,
1524 size_t count)
1525{
1526 struct rbd_device *rbd_dev = NULL;
1527 int target_id, rc;
1528 unsigned long ul;
1529
1530 rc = strict_strtoul(buf, 10, &ul);
1531 if (rc)
1532 return rc;
1533
1534 /* convert to int; abort if we lost anything in the conversion */
1535 target_id = (int) ul;
1536 if (target_id != ul)
1537 return -EINVAL;
1538
1539 /* remove object from list immediately */
1540 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1541
1542 rbd_dev = __rbd_get_dev(target_id);
1543 if (rbd_dev)
1544 list_del_init(&rbd_dev->node);
1545
1546 mutex_unlock(&ctl_mutex);
1547
1548 if (!rbd_dev)
1549 return -ENOENT;
1550
1551 rbd_put_client(rbd_dev);
1552
1553 /* clean up and free blkdev */
1554 rbd_free_disk(rbd_dev);
1555 unregister_blkdev(rbd_dev->major, rbd_dev->name);
1556 kfree(rbd_dev);
1557
1558 /* release module ref */
1559 module_put(THIS_MODULE);
1560
1561 return count;
1562}
1563
1564static ssize_t class_rbd_snaps_list(struct class *c,
1565 struct class_attribute *attr,
1566 char *data)
1567{
1568 struct rbd_device *rbd_dev = NULL;
1569 struct list_head *tmp;
1570 struct rbd_image_header *header;
1571 int i, n = 0, max = PAGE_SIZE;
1572 int ret;
1573
1574 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1575
1576 n += snprintf(data, max, "#id\tsnap\tKB\n");
1577
1578 list_for_each(tmp, &rbd_dev_list) {
1579 char *names, *p;
1580 struct ceph_snap_context *snapc;
1581
1582 rbd_dev = list_entry(tmp, struct rbd_device, node);
1583 header = &rbd_dev->header;
1584
1585 down_read(&header->snap_rwsem);
1586
1587 names = header->snap_names;
1588 snapc = header->snapc;
1589
1590 n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n",
1591 rbd_dev->id, RBD_SNAP_HEAD_NAME,
1592 header->image_size >> 10,
1593 (!rbd_dev->cur_snap ? " (*)" : ""));
1594 if (n == max)
1595 break;
1596
1597 p = names;
1598 for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) {
1599 n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n",
1600 rbd_dev->id, p, header->snap_sizes[i] >> 10,
1601 (rbd_dev->cur_snap &&
1602 (snap_index(header, i) == rbd_dev->cur_snap) ?
1603 " (*)" : ""));
1604 if (n == max)
1605 break;
1606 }
1607
1608 up_read(&header->snap_rwsem);
1609 }
1610
1611
1612 ret = n;
1613 mutex_unlock(&ctl_mutex);
1614 return ret;
1615}
1616
1617static ssize_t class_rbd_snaps_refresh(struct class *c,
1618 struct class_attribute *attr,
1619 const char *buf,
1620 size_t count)
1621{
1622 struct rbd_device *rbd_dev = NULL;
1623 int target_id, rc;
1624 unsigned long ul;
1625 int ret = count;
1626
1627 rc = strict_strtoul(buf, 10, &ul);
1628 if (rc)
1629 return rc;
1630
1631 /* convert to int; abort if we lost anything in the conversion */
1632 target_id = (int) ul;
1633 if (target_id != ul)
1634 return -EINVAL;
1635
1636 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1637
1638 rbd_dev = __rbd_get_dev(target_id);
1639 if (!rbd_dev) {
1640 ret = -ENOENT;
1641 goto done;
1642 }
1643
1644 rc = rbd_update_snaps(rbd_dev);
1645 if (rc < 0)
1646 ret = rc;
1647
1648done:
1649 mutex_unlock(&ctl_mutex);
1650 return ret;
1651}
1652
1653static ssize_t class_rbd_snap_create(struct class *c,
1654 struct class_attribute *attr,
1655 const char *buf,
1656 size_t count)
1657{
1658 struct rbd_device *rbd_dev = NULL;
1659 int target_id, ret;
1660 char *name;
1661
1662 name = kmalloc(RBD_MAX_SNAP_NAME_LEN + 1, GFP_KERNEL);
1663 if (!name)
1664 return -ENOMEM;
1665
1666 /* parse snaps add command */
1667 if (sscanf(buf, "%d "
1668 "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
1669 &target_id,
1670 name) != 2) {
1671 ret = -EINVAL;
1672 goto done;
1673 }
1674
1675 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1676
1677 rbd_dev = __rbd_get_dev(target_id);
1678 if (!rbd_dev) {
1679 ret = -ENOENT;
1680 goto done_unlock;
1681 }
1682
1683 ret = rbd_header_add_snap(rbd_dev,
1684 name, GFP_KERNEL);
1685 if (ret < 0)
1686 goto done_unlock;
1687
1688 ret = rbd_update_snaps(rbd_dev);
1689 if (ret < 0)
1690 goto done_unlock;
1691
1692 ret = count;
1693done_unlock:
1694 mutex_unlock(&ctl_mutex);
1695done:
1696 kfree(name);
1697 return ret;
1698}
1699
1700static ssize_t class_rbd_rollback(struct class *c,
1701 struct class_attribute *attr,
1702 const char *buf,
1703 size_t count)
1704{
1705 struct rbd_device *rbd_dev = NULL;
1706 int target_id, ret;
1707 u64 snapid;
1708 char snap_name[RBD_MAX_SNAP_NAME_LEN];
1709 u64 cur_ofs;
1710 char *seg_name;
1711
1712 /* parse snaps add command */
1713 if (sscanf(buf, "%d "
1714 "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
1715 &target_id,
1716 snap_name) != 2) {
1717 return -EINVAL;
1718 }
1719
1720 ret = -ENOMEM;
1721 seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
1722 if (!seg_name)
1723 return ret;
1724
1725 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1726
1727 rbd_dev = __rbd_get_dev(target_id);
1728 if (!rbd_dev) {
1729 ret = -ENOENT;
1730 goto done_unlock;
1731 }
1732
1733 ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
1734 if (ret < 0)
1735 goto done_unlock;
1736
1737 dout("snapid=%lld\n", snapid);
1738
1739 cur_ofs = 0;
1740 while (cur_ofs < rbd_dev->header.image_size) {
1741 cur_ofs += rbd_get_segment(&rbd_dev->header,
1742 rbd_dev->obj,
1743 cur_ofs, (u64)-1,
1744 seg_name, NULL);
1745 dout("seg_name=%s\n", seg_name);
1746
1747 ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
1748 if (ret < 0)
1749 pr_warning("could not roll back obj %s err=%d\n",
1750 seg_name, ret);
1751 }
1752
1753 ret = rbd_update_snaps(rbd_dev);
1754 if (ret < 0)
1755 goto done_unlock;
1756
1757 ret = count;
1758
1759done_unlock:
1760 mutex_unlock(&ctl_mutex);
1761 kfree(seg_name);
1762
1763 return ret;
1764}
1765
1766static struct class_attribute class_rbd_attrs[] = {
1767 __ATTR(add, 0200, NULL, class_rbd_add),
1768 __ATTR(remove, 0200, NULL, class_rbd_remove),
1769 __ATTR(list, 0444, class_rbd_list, NULL),
1770 __ATTR(snaps_refresh, 0200, NULL, class_rbd_snaps_refresh),
1771 __ATTR(snap_create, 0200, NULL, class_rbd_snap_create),
1772 __ATTR(snaps_list, 0444, class_rbd_snaps_list, NULL),
1773 __ATTR(snap_rollback, 0200, NULL, class_rbd_rollback),
1774 __ATTR_NULL
1775};
1776
1777/*
1778 * create control files in sysfs
1779 * /sys/class/rbd/...
1780 */
1781static int rbd_sysfs_init(void)
1782{
1783 int ret = -ENOMEM;
1784
1785 class_rbd = kzalloc(sizeof(*class_rbd), GFP_KERNEL);
1786 if (!class_rbd)
1787 goto out;
1788
1789 class_rbd->name = DRV_NAME;
1790 class_rbd->owner = THIS_MODULE;
1791 class_rbd->class_release = class_rbd_release;
1792 class_rbd->class_attrs = class_rbd_attrs;
1793
1794 ret = class_register(class_rbd);
1795 if (ret)
1796 goto out_class;
1797 return 0;
1798
1799out_class:
1800 kfree(class_rbd);
1801 class_rbd = NULL;
1802 pr_err(DRV_NAME ": failed to create class rbd\n");
1803out:
1804 return ret;
1805}
1806
1807static void rbd_sysfs_cleanup(void)
1808{
1809 if (class_rbd)
1810 class_destroy(class_rbd);
1811 class_rbd = NULL;
1812}
1813
1814int __init rbd_init(void)
1815{
1816 int rc;
1817
1818 rc = rbd_sysfs_init();
1819 if (rc)
1820 return rc;
1821 spin_lock_init(&node_lock);
1822 pr_info("loaded " DRV_NAME_LONG "\n");
1823 return 0;
1824}
1825
1826void __exit rbd_exit(void)
1827{
1828 rbd_sysfs_cleanup();
1829}
1830
1831module_init(rbd_init);
1832module_exit(rbd_exit);
1833
1834MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1835MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1836MODULE_DESCRIPTION("rados block device");
1837
1838/* following authorship retained from original osdblk.c */
1839MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
1840
1841MODULE_LICENSE("GPL");
diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
new file mode 100644
index 000000000000..fc6c678aa2cb
--- /dev/null
+++ b/drivers/block/rbd_types.h
@@ -0,0 +1,73 @@
1/*
2 * Ceph - scalable distributed file system
3 *
4 * Copyright (C) 2004-2010 Sage Weil <sage@newdream.net>
5 *
6 * This is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License version 2.1, as published by the Free Software
9 * Foundation. See file COPYING.
10 *
11 */
12
13#ifndef CEPH_RBD_TYPES_H
14#define CEPH_RBD_TYPES_H
15
16#include <linux/types.h>
17
18/*
19 * rbd image 'foo' consists of objects
20 * foo.rbd - image metadata
21 * foo.00000000
22 * foo.00000001
23 * ... - data
24 */
25
26#define RBD_SUFFIX ".rbd"
27#define RBD_DIRECTORY "rbd_directory"
28#define RBD_INFO "rbd_info"
29
30#define RBD_DEFAULT_OBJ_ORDER 22 /* 4MB */
31#define RBD_MIN_OBJ_ORDER 16
32#define RBD_MAX_OBJ_ORDER 30
33
34#define RBD_MAX_OBJ_NAME_LEN 96
35#define RBD_MAX_SEG_NAME_LEN 128
36
37#define RBD_COMP_NONE 0
38#define RBD_CRYPT_NONE 0
39
40#define RBD_HEADER_TEXT "<<< Rados Block Device Image >>>\n"
41#define RBD_HEADER_SIGNATURE "RBD"
42#define RBD_HEADER_VERSION "001.005"
43
44struct rbd_info {
45 __le64 max_id;
46} __attribute__ ((packed));
47
48struct rbd_image_snap_ondisk {
49 __le64 id;
50 __le64 image_size;
51} __attribute__((packed));
52
53struct rbd_image_header_ondisk {
54 char text[40];
55 char block_name[24];
56 char signature[4];
57 char version[8];
58 struct {
59 __u8 order;
60 __u8 crypt_type;
61 __u8 comp_type;
62 __u8 unused;
63 } __attribute__((packed)) options;
64 __le64 image_size;
65 __le64 snap_seq;
66 __le32 snap_count;
67 __le32 reserved;
68 __le64 snap_names_len;
69 struct rbd_image_snap_ondisk snaps[0];
70} __attribute__((packed));
71
72
73#endif
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2aafafca2b13..8320490226b7 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -2,7 +2,6 @@
2#include <linux/spinlock.h> 2#include <linux/spinlock.h>
3#include <linux/slab.h> 3#include <linux/slab.h>
4#include <linux/blkdev.h> 4#include <linux/blkdev.h>
5#include <linux/smp_lock.h>
6#include <linux/hdreg.h> 5#include <linux/hdreg.h>
7#include <linux/virtio.h> 6#include <linux/virtio.h>
8#include <linux/virtio_blk.h> 7#include <linux/virtio_blk.h>
@@ -202,6 +201,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
202 struct virtio_blk *vblk = disk->private_data; 201 struct virtio_blk *vblk = disk->private_data;
203 struct request *req; 202 struct request *req;
204 struct bio *bio; 203 struct bio *bio;
204 int err;
205 205
206 bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, 206 bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
207 GFP_KERNEL); 207 GFP_KERNEL);
@@ -215,11 +215,14 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
215 } 215 }
216 216
217 req->cmd_type = REQ_TYPE_SPECIAL; 217 req->cmd_type = REQ_TYPE_SPECIAL;
218 return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); 218 err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
219 blk_put_request(req);
220
221 return err;
219} 222}
220 223
221static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode, 224static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
222 unsigned cmd, unsigned long data) 225 unsigned int cmd, unsigned long data)
223{ 226{
224 struct gendisk *disk = bdev->bd_disk; 227 struct gendisk *disk = bdev->bd_disk;
225 struct virtio_blk *vblk = disk->private_data; 228 struct virtio_blk *vblk = disk->private_data;
@@ -234,18 +237,6 @@ static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode,
234 (void __user *)data); 237 (void __user *)data);
235} 238}
236 239
237static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
238 unsigned int cmd, unsigned long param)
239{
240 int ret;
241
242 lock_kernel();
243 ret = virtblk_locked_ioctl(bdev, mode, cmd, param);
244 unlock_kernel();
245
246 return ret;
247}
248
249/* We provide getgeo only to please some old bootloader/partitioning tools */ 240/* We provide getgeo only to please some old bootloader/partitioning tools */
250static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) 241static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
251{ 242{
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 70312da4c968..564808a5c3c0 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -199,7 +199,7 @@ static void amd64_cleanup(void)
199 struct pci_dev *dev = k8_northbridges[i]; 199 struct pci_dev *dev = k8_northbridges[i];
200 /* disable gart translation */ 200 /* disable gart translation */
201 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); 201 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
202 tmp &= ~AMD64_GARTEN; 202 tmp &= ~GARTEN;
203 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp); 203 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp);
204 } 204 }
205} 205}
@@ -313,7 +313,7 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
313 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order)) 313 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
314 return -1; 314 return -1;
315 315
316 pci_write_config_dword(nb, AMD64_GARTAPERTURECTL, order << 1); 316 gart_set_size_and_enable(nb, order);
317 pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25); 317 pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25);
318 318
319 return 0; 319 return 0;
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index d2abf5143983..64255cef8a7d 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -984,7 +984,9 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
984 984
985 bridge->driver->cache_flush(); 985 bridge->driver->cache_flush();
986#ifdef CONFIG_X86 986#ifdef CONFIG_X86
987 set_memory_uc((unsigned long)table, 1 << page_order); 987 if (set_memory_uc((unsigned long)table, 1 << page_order))
988 printk(KERN_WARNING "Could not set GATT table memory to UC!");
989
988 bridge->gatt_table = (void *)table; 990 bridge->gatt_table = (void *)table;
989#else 991#else
990 bridge->gatt_table = ioremap_nocache(virt_to_phys(table), 992 bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 05ad4a17a28f..7c4133582dba 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -47,6 +47,16 @@ enum tpm_duration {
47#define TPM_MAX_PROTECTED_ORDINAL 12 47#define TPM_MAX_PROTECTED_ORDINAL 12
48#define TPM_PROTECTED_ORDINAL_MASK 0xFF 48#define TPM_PROTECTED_ORDINAL_MASK 0xFF
49 49
50/*
51 * Bug workaround - some TPM's don't flush the most
52 * recently changed pcr on suspend, so force the flush
53 * with an extend to the selected _unused_ non-volatile pcr.
54 */
55static int tpm_suspend_pcr;
56module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644);
57MODULE_PARM_DESC(suspend_pcr,
58 "PCR to use for dummy writes to faciltate flush on suspend.");
59
50static LIST_HEAD(tpm_chip_list); 60static LIST_HEAD(tpm_chip_list);
51static DEFINE_SPINLOCK(driver_lock); 61static DEFINE_SPINLOCK(driver_lock);
52static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES); 62static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
@@ -1077,18 +1087,6 @@ static struct tpm_input_header savestate_header = {
1077 .ordinal = TPM_ORD_SAVESTATE 1087 .ordinal = TPM_ORD_SAVESTATE
1078}; 1088};
1079 1089
1080/* Bug workaround - some TPM's don't flush the most
1081 * recently changed pcr on suspend, so force the flush
1082 * with an extend to the selected _unused_ non-volatile pcr.
1083 */
1084static int tpm_suspend_pcr;
1085static int __init tpm_suspend_setup(char *str)
1086{
1087 get_option(&str, &tpm_suspend_pcr);
1088 return 1;
1089}
1090__setup("tpm_suspend_pcr=", tpm_suspend_setup);
1091
1092/* 1090/*
1093 * We are about to suspend. Save the TPM state 1091 * We are about to suspend. Save the TPM state
1094 * so that it can be restored. 1092 * so that it can be restored.
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index c810481a5bc2..6c1b676643a9 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -48,6 +48,9 @@ struct ports_driver_data {
48 /* Used for exporting per-port information to debugfs */ 48 /* Used for exporting per-port information to debugfs */
49 struct dentry *debugfs_dir; 49 struct dentry *debugfs_dir;
50 50
51 /* List of all the devices we're handling */
52 struct list_head portdevs;
53
51 /* Number of devices this driver is handling */ 54 /* Number of devices this driver is handling */
52 unsigned int index; 55 unsigned int index;
53 56
@@ -108,6 +111,9 @@ struct port_buffer {
108 * ports for that device (vdev->priv). 111 * ports for that device (vdev->priv).
109 */ 112 */
110struct ports_device { 113struct ports_device {
114 /* Next portdev in the list, head is in the pdrvdata struct */
115 struct list_head list;
116
111 /* 117 /*
112 * Workqueue handlers where we process deferred work after 118 * Workqueue handlers where we process deferred work after
113 * notification 119 * notification
@@ -178,15 +184,21 @@ struct port {
178 struct console cons; 184 struct console cons;
179 185
180 /* Each port associates with a separate char device */ 186 /* Each port associates with a separate char device */
181 struct cdev cdev; 187 struct cdev *cdev;
182 struct device *dev; 188 struct device *dev;
183 189
190 /* Reference-counting to handle port hot-unplugs and file operations */
191 struct kref kref;
192
184 /* A waitqueue for poll() or blocking read operations */ 193 /* A waitqueue for poll() or blocking read operations */
185 wait_queue_head_t waitqueue; 194 wait_queue_head_t waitqueue;
186 195
187 /* The 'name' of the port that we expose via sysfs properties */ 196 /* The 'name' of the port that we expose via sysfs properties */
188 char *name; 197 char *name;
189 198
199 /* We can notify apps of host connect / disconnect events via SIGIO */
200 struct fasync_struct *async_queue;
201
190 /* The 'id' to identify the port with the Host */ 202 /* The 'id' to identify the port with the Host */
191 u32 id; 203 u32 id;
192 204
@@ -221,6 +233,41 @@ out:
221 return port; 233 return port;
222} 234}
223 235
236static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
237 dev_t dev)
238{
239 struct port *port;
240 unsigned long flags;
241
242 spin_lock_irqsave(&portdev->ports_lock, flags);
243 list_for_each_entry(port, &portdev->ports, list)
244 if (port->cdev->dev == dev)
245 goto out;
246 port = NULL;
247out:
248 spin_unlock_irqrestore(&portdev->ports_lock, flags);
249
250 return port;
251}
252
253static struct port *find_port_by_devt(dev_t dev)
254{
255 struct ports_device *portdev;
256 struct port *port;
257 unsigned long flags;
258
259 spin_lock_irqsave(&pdrvdata_lock, flags);
260 list_for_each_entry(portdev, &pdrvdata.portdevs, list) {
261 port = find_port_by_devt_in_portdev(portdev, dev);
262 if (port)
263 goto out;
264 }
265 port = NULL;
266out:
267 spin_unlock_irqrestore(&pdrvdata_lock, flags);
268 return port;
269}
270
224static struct port *find_port_by_id(struct ports_device *portdev, u32 id) 271static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
225{ 272{
226 struct port *port; 273 struct port *port;
@@ -410,7 +457,10 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
410static ssize_t send_control_msg(struct port *port, unsigned int event, 457static ssize_t send_control_msg(struct port *port, unsigned int event,
411 unsigned int value) 458 unsigned int value)
412{ 459{
413 return __send_control_msg(port->portdev, port->id, event, value); 460 /* Did the port get unplugged before userspace closed it? */
461 if (port->portdev)
462 return __send_control_msg(port->portdev, port->id, event, value);
463 return 0;
414} 464}
415 465
416/* Callers must take the port->outvq_lock */ 466/* Callers must take the port->outvq_lock */
@@ -459,9 +509,12 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
459 509
460 /* 510 /*
461 * Wait till the host acknowledges it pushed out the data we 511 * Wait till the host acknowledges it pushed out the data we
462 * sent. This is done for ports in blocking mode or for data 512 * sent. This is done for data from the hvc_console; the tty
463 * from the hvc_console; the tty operations are performed with 513 * operations are performed with spinlocks held so we can't
464 * spinlocks held so we can't sleep here. 514 * sleep here. An alternative would be to copy the data to a
515 * buffer and relax the spinning requirement. The downside is
516 * we need to kmalloc a GFP_ATOMIC buffer each time the
517 * console driver writes something out.
465 */ 518 */
466 while (!virtqueue_get_buf(out_vq, &len)) 519 while (!virtqueue_get_buf(out_vq, &len))
467 cpu_relax(); 520 cpu_relax();
@@ -522,6 +575,10 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
522/* The condition that must be true for polling to end */ 575/* The condition that must be true for polling to end */
523static bool will_read_block(struct port *port) 576static bool will_read_block(struct port *port)
524{ 577{
578 if (!port->guest_connected) {
579 /* Port got hot-unplugged. Let's exit. */
580 return false;
581 }
525 return !port_has_data(port) && port->host_connected; 582 return !port_has_data(port) && port->host_connected;
526} 583}
527 584
@@ -572,6 +629,9 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
572 if (ret < 0) 629 if (ret < 0)
573 return ret; 630 return ret;
574 } 631 }
632 /* Port got hot-unplugged. */
633 if (!port->guest_connected)
634 return -ENODEV;
575 /* 635 /*
576 * We could've received a disconnection message while we were 636 * We could've received a disconnection message while we were
577 * waiting for more data. 637 * waiting for more data.
@@ -613,6 +673,9 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
613 if (ret < 0) 673 if (ret < 0)
614 return ret; 674 return ret;
615 } 675 }
676 /* Port got hot-unplugged. */
677 if (!port->guest_connected)
678 return -ENODEV;
616 679
617 count = min((size_t)(32 * 1024), count); 680 count = min((size_t)(32 * 1024), count);
618 681
@@ -626,6 +689,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
626 goto free_buf; 689 goto free_buf;
627 } 690 }
628 691
692 /*
693 * We now ask send_buf() to not spin for generic ports -- we
694 * can re-use the same code path that non-blocking file
695 * descriptors take for blocking file descriptors since the
696 * wait is already done and we're certain the write will go
697 * through to the host.
698 */
699 nonblock = true;
629 ret = send_buf(port, buf, count, nonblock); 700 ret = send_buf(port, buf, count, nonblock);
630 701
631 if (nonblock && ret > 0) 702 if (nonblock && ret > 0)
@@ -645,6 +716,10 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
645 port = filp->private_data; 716 port = filp->private_data;
646 poll_wait(filp, &port->waitqueue, wait); 717 poll_wait(filp, &port->waitqueue, wait);
647 718
719 if (!port->guest_connected) {
720 /* Port got unplugged */
721 return POLLHUP;
722 }
648 ret = 0; 723 ret = 0;
649 if (!will_read_block(port)) 724 if (!will_read_block(port))
650 ret |= POLLIN | POLLRDNORM; 725 ret |= POLLIN | POLLRDNORM;
@@ -656,6 +731,8 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
656 return ret; 731 return ret;
657} 732}
658 733
734static void remove_port(struct kref *kref);
735
659static int port_fops_release(struct inode *inode, struct file *filp) 736static int port_fops_release(struct inode *inode, struct file *filp)
660{ 737{
661 struct port *port; 738 struct port *port;
@@ -676,6 +753,16 @@ static int port_fops_release(struct inode *inode, struct file *filp)
676 reclaim_consumed_buffers(port); 753 reclaim_consumed_buffers(port);
677 spin_unlock_irq(&port->outvq_lock); 754 spin_unlock_irq(&port->outvq_lock);
678 755
756 /*
757 * Locks aren't necessary here as a port can't be opened after
758 * unplug, and if a port isn't unplugged, a kref would already
759 * exist for the port. Plus, taking ports_lock here would
760 * create a dependency on other locks taken by functions
761 * inside remove_port if we're the last holder of the port,
762 * creating many problems.
763 */
764 kref_put(&port->kref, remove_port);
765
679 return 0; 766 return 0;
680} 767}
681 768
@@ -683,22 +770,31 @@ static int port_fops_open(struct inode *inode, struct file *filp)
683{ 770{
684 struct cdev *cdev = inode->i_cdev; 771 struct cdev *cdev = inode->i_cdev;
685 struct port *port; 772 struct port *port;
773 int ret;
686 774
687 port = container_of(cdev, struct port, cdev); 775 port = find_port_by_devt(cdev->dev);
688 filp->private_data = port; 776 filp->private_data = port;
689 777
778 /* Prevent against a port getting hot-unplugged at the same time */
779 spin_lock_irq(&port->portdev->ports_lock);
780 kref_get(&port->kref);
781 spin_unlock_irq(&port->portdev->ports_lock);
782
690 /* 783 /*
691 * Don't allow opening of console port devices -- that's done 784 * Don't allow opening of console port devices -- that's done
692 * via /dev/hvc 785 * via /dev/hvc
693 */ 786 */
694 if (is_console_port(port)) 787 if (is_console_port(port)) {
695 return -ENXIO; 788 ret = -ENXIO;
789 goto out;
790 }
696 791
697 /* Allow only one process to open a particular port at a time */ 792 /* Allow only one process to open a particular port at a time */
698 spin_lock_irq(&port->inbuf_lock); 793 spin_lock_irq(&port->inbuf_lock);
699 if (port->guest_connected) { 794 if (port->guest_connected) {
700 spin_unlock_irq(&port->inbuf_lock); 795 spin_unlock_irq(&port->inbuf_lock);
701 return -EMFILE; 796 ret = -EMFILE;
797 goto out;
702 } 798 }
703 799
704 port->guest_connected = true; 800 port->guest_connected = true;
@@ -713,10 +809,23 @@ static int port_fops_open(struct inode *inode, struct file *filp)
713 reclaim_consumed_buffers(port); 809 reclaim_consumed_buffers(port);
714 spin_unlock_irq(&port->outvq_lock); 810 spin_unlock_irq(&port->outvq_lock);
715 811
812 nonseekable_open(inode, filp);
813
716 /* Notify host of port being opened */ 814 /* Notify host of port being opened */
717 send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); 815 send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
718 816
719 return 0; 817 return 0;
818out:
819 kref_put(&port->kref, remove_port);
820 return ret;
821}
822
823static int port_fops_fasync(int fd, struct file *filp, int mode)
824{
825 struct port *port;
826
827 port = filp->private_data;
828 return fasync_helper(fd, filp, mode, &port->async_queue);
720} 829}
721 830
722/* 831/*
@@ -732,6 +841,8 @@ static const struct file_operations port_fops = {
732 .write = port_fops_write, 841 .write = port_fops_write,
733 .poll = port_fops_poll, 842 .poll = port_fops_poll,
734 .release = port_fops_release, 843 .release = port_fops_release,
844 .fasync = port_fops_fasync,
845 .llseek = no_llseek,
735}; 846};
736 847
737/* 848/*
@@ -990,6 +1101,12 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
990 return nr_added_bufs; 1101 return nr_added_bufs;
991} 1102}
992 1103
1104static void send_sigio_to_port(struct port *port)
1105{
1106 if (port->async_queue && port->guest_connected)
1107 kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
1108}
1109
993static int add_port(struct ports_device *portdev, u32 id) 1110static int add_port(struct ports_device *portdev, u32 id)
994{ 1111{
995 char debugfs_name[16]; 1112 char debugfs_name[16];
@@ -1004,6 +1121,7 @@ static int add_port(struct ports_device *portdev, u32 id)
1004 err = -ENOMEM; 1121 err = -ENOMEM;
1005 goto fail; 1122 goto fail;
1006 } 1123 }
1124 kref_init(&port->kref);
1007 1125
1008 port->portdev = portdev; 1126 port->portdev = portdev;
1009 port->id = id; 1127 port->id = id;
@@ -1011,6 +1129,7 @@ static int add_port(struct ports_device *portdev, u32 id)
1011 port->name = NULL; 1129 port->name = NULL;
1012 port->inbuf = NULL; 1130 port->inbuf = NULL;
1013 port->cons.hvc = NULL; 1131 port->cons.hvc = NULL;
1132 port->async_queue = NULL;
1014 1133
1015 port->cons.ws.ws_row = port->cons.ws.ws_col = 0; 1134 port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
1016 1135
@@ -1021,14 +1140,20 @@ static int add_port(struct ports_device *portdev, u32 id)
1021 port->in_vq = portdev->in_vqs[port->id]; 1140 port->in_vq = portdev->in_vqs[port->id];
1022 port->out_vq = portdev->out_vqs[port->id]; 1141 port->out_vq = portdev->out_vqs[port->id];
1023 1142
1024 cdev_init(&port->cdev, &port_fops); 1143 port->cdev = cdev_alloc();
1144 if (!port->cdev) {
1145 dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n");
1146 err = -ENOMEM;
1147 goto free_port;
1148 }
1149 port->cdev->ops = &port_fops;
1025 1150
1026 devt = MKDEV(portdev->chr_major, id); 1151 devt = MKDEV(portdev->chr_major, id);
1027 err = cdev_add(&port->cdev, devt, 1); 1152 err = cdev_add(port->cdev, devt, 1);
1028 if (err < 0) { 1153 if (err < 0) {
1029 dev_err(&port->portdev->vdev->dev, 1154 dev_err(&port->portdev->vdev->dev,
1030 "Error %d adding cdev for port %u\n", err, id); 1155 "Error %d adding cdev for port %u\n", err, id);
1031 goto free_port; 1156 goto free_cdev;
1032 } 1157 }
1033 port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, 1158 port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
1034 devt, port, "vport%up%u", 1159 devt, port, "vport%up%u",
@@ -1093,7 +1218,7 @@ free_inbufs:
1093free_device: 1218free_device:
1094 device_destroy(pdrvdata.class, port->dev->devt); 1219 device_destroy(pdrvdata.class, port->dev->devt);
1095free_cdev: 1220free_cdev:
1096 cdev_del(&port->cdev); 1221 cdev_del(port->cdev);
1097free_port: 1222free_port:
1098 kfree(port); 1223 kfree(port);
1099fail: 1224fail:
@@ -1102,21 +1227,45 @@ fail:
1102 return err; 1227 return err;
1103} 1228}
1104 1229
1105/* Remove all port-specific data. */ 1230/* No users remain, remove all port-specific data. */
1106static int remove_port(struct port *port) 1231static void remove_port(struct kref *kref)
1232{
1233 struct port *port;
1234
1235 port = container_of(kref, struct port, kref);
1236
1237 sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
1238 device_destroy(pdrvdata.class, port->dev->devt);
1239 cdev_del(port->cdev);
1240
1241 kfree(port->name);
1242
1243 debugfs_remove(port->debugfs_file);
1244
1245 kfree(port);
1246}
1247
1248/*
1249 * Port got unplugged. Remove port from portdev's list and drop the
1250 * kref reference. If no userspace has this port opened, it will
1251 * result in immediate removal the port.
1252 */
1253static void unplug_port(struct port *port)
1107{ 1254{
1108 struct port_buffer *buf; 1255 struct port_buffer *buf;
1109 1256
1257 spin_lock_irq(&port->portdev->ports_lock);
1258 list_del(&port->list);
1259 spin_unlock_irq(&port->portdev->ports_lock);
1260
1110 if (port->guest_connected) { 1261 if (port->guest_connected) {
1111 port->guest_connected = false; 1262 port->guest_connected = false;
1112 port->host_connected = false; 1263 port->host_connected = false;
1113 wake_up_interruptible(&port->waitqueue); 1264 wake_up_interruptible(&port->waitqueue);
1114 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
1115 }
1116 1265
1117 spin_lock_irq(&port->portdev->ports_lock); 1266 /* Let the app know the port is going down. */
1118 list_del(&port->list); 1267 send_sigio_to_port(port);
1119 spin_unlock_irq(&port->portdev->ports_lock); 1268 }
1120 1269
1121 if (is_console_port(port)) { 1270 if (is_console_port(port)) {
1122 spin_lock_irq(&pdrvdata_lock); 1271 spin_lock_irq(&pdrvdata_lock);
@@ -1135,9 +1284,6 @@ static int remove_port(struct port *port)
1135 hvc_remove(port->cons.hvc); 1284 hvc_remove(port->cons.hvc);
1136#endif 1285#endif
1137 } 1286 }
1138 sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
1139 device_destroy(pdrvdata.class, port->dev->devt);
1140 cdev_del(&port->cdev);
1141 1287
1142 /* Remove unused data this port might have received. */ 1288 /* Remove unused data this port might have received. */
1143 discard_port_data(port); 1289 discard_port_data(port);
@@ -1148,12 +1294,19 @@ static int remove_port(struct port *port)
1148 while ((buf = virtqueue_detach_unused_buf(port->in_vq))) 1294 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1149 free_buf(buf); 1295 free_buf(buf);
1150 1296
1151 kfree(port->name); 1297 /*
1152 1298 * We should just assume the device itself has gone off --
1153 debugfs_remove(port->debugfs_file); 1299 * else a close on an open port later will try to send out a
1300 * control message.
1301 */
1302 port->portdev = NULL;
1154 1303
1155 kfree(port); 1304 /*
1156 return 0; 1305 * Locks around here are not necessary - a port can't be
1306 * opened after we removed the port struct from ports_list
1307 * above.
1308 */
1309 kref_put(&port->kref, remove_port);
1157} 1310}
1158 1311
1159/* Any private messages that the Host and Guest want to share */ 1312/* Any private messages that the Host and Guest want to share */
@@ -1192,7 +1345,7 @@ static void handle_control_message(struct ports_device *portdev,
1192 add_port(portdev, cpkt->id); 1345 add_port(portdev, cpkt->id);
1193 break; 1346 break;
1194 case VIRTIO_CONSOLE_PORT_REMOVE: 1347 case VIRTIO_CONSOLE_PORT_REMOVE:
1195 remove_port(port); 1348 unplug_port(port);
1196 break; 1349 break;
1197 case VIRTIO_CONSOLE_CONSOLE_PORT: 1350 case VIRTIO_CONSOLE_CONSOLE_PORT:
1198 if (!cpkt->value) 1351 if (!cpkt->value)
@@ -1234,6 +1387,12 @@ static void handle_control_message(struct ports_device *portdev,
1234 spin_lock_irq(&port->outvq_lock); 1387 spin_lock_irq(&port->outvq_lock);
1235 reclaim_consumed_buffers(port); 1388 reclaim_consumed_buffers(port);
1236 spin_unlock_irq(&port->outvq_lock); 1389 spin_unlock_irq(&port->outvq_lock);
1390
1391 /*
1392 * If the guest is connected, it'll be interested in
1393 * knowing the host connection state changed.
1394 */
1395 send_sigio_to_port(port);
1237 break; 1396 break;
1238 case VIRTIO_CONSOLE_PORT_NAME: 1397 case VIRTIO_CONSOLE_PORT_NAME:
1239 /* 1398 /*
@@ -1330,6 +1489,9 @@ static void in_intr(struct virtqueue *vq)
1330 1489
1331 wake_up_interruptible(&port->waitqueue); 1490 wake_up_interruptible(&port->waitqueue);
1332 1491
1492 /* Send a SIGIO indicating new data in case the process asked for it */
1493 send_sigio_to_port(port);
1494
1333 if (is_console_port(port) && hvc_poll(port->cons.hvc)) 1495 if (is_console_port(port) && hvc_poll(port->cons.hvc))
1334 hvc_kick(); 1496 hvc_kick();
1335} 1497}
@@ -1566,6 +1728,10 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
1566 add_port(portdev, 0); 1728 add_port(portdev, 0);
1567 } 1729 }
1568 1730
1731 spin_lock_irq(&pdrvdata_lock);
1732 list_add_tail(&portdev->list, &pdrvdata.portdevs);
1733 spin_unlock_irq(&pdrvdata_lock);
1734
1569 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, 1735 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
1570 VIRTIO_CONSOLE_DEVICE_READY, 1); 1736 VIRTIO_CONSOLE_DEVICE_READY, 1);
1571 return 0; 1737 return 0;
@@ -1589,23 +1755,41 @@ static void virtcons_remove(struct virtio_device *vdev)
1589{ 1755{
1590 struct ports_device *portdev; 1756 struct ports_device *portdev;
1591 struct port *port, *port2; 1757 struct port *port, *port2;
1592 struct port_buffer *buf;
1593 unsigned int len;
1594 1758
1595 portdev = vdev->priv; 1759 portdev = vdev->priv;
1596 1760
1761 spin_lock_irq(&pdrvdata_lock);
1762 list_del(&portdev->list);
1763 spin_unlock_irq(&pdrvdata_lock);
1764
1765 /* Disable interrupts for vqs */
1766 vdev->config->reset(vdev);
1767 /* Finish up work that's lined up */
1597 cancel_work_sync(&portdev->control_work); 1768 cancel_work_sync(&portdev->control_work);
1598 1769
1599 list_for_each_entry_safe(port, port2, &portdev->ports, list) 1770 list_for_each_entry_safe(port, port2, &portdev->ports, list)
1600 remove_port(port); 1771 unplug_port(port);
1601 1772
1602 unregister_chrdev(portdev->chr_major, "virtio-portsdev"); 1773 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
1603 1774
1604 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) 1775 /*
1605 free_buf(buf); 1776 * When yanking out a device, we immediately lose the
1777 * (device-side) queues. So there's no point in keeping the
1778 * guest side around till we drop our final reference. This
1779 * also means that any ports which are in an open state will
1780 * have to just stop using the port, as the vqs are going
1781 * away.
1782 */
1783 if (use_multiport(portdev)) {
1784 struct port_buffer *buf;
1785 unsigned int len;
1606 1786
1607 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) 1787 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
1608 free_buf(buf); 1788 free_buf(buf);
1789
1790 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
1791 free_buf(buf);
1792 }
1609 1793
1610 vdev->config->del_vqs(vdev); 1794 vdev->config->del_vqs(vdev);
1611 kfree(portdev->in_vqs); 1795 kfree(portdev->in_vqs);
@@ -1652,6 +1836,7 @@ static int __init init(void)
1652 PTR_ERR(pdrvdata.debugfs_dir)); 1836 PTR_ERR(pdrvdata.debugfs_dir));
1653 } 1837 }
1654 INIT_LIST_HEAD(&pdrvdata.consoles); 1838 INIT_LIST_HEAD(&pdrvdata.consoles);
1839 INIT_LIST_HEAD(&pdrvdata.portdevs);
1655 1840
1656 return register_virtio_driver(&virtio_console); 1841 return register_virtio_driver(&virtio_console);
1657} 1842}
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 216f9d383b5b..effd140fc042 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -879,7 +879,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
879 dma->device_issue_pending = ioat2_issue_pending; 879 dma->device_issue_pending = ioat2_issue_pending;
880 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 880 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
881 dma->device_free_chan_resources = ioat2_free_chan_resources; 881 dma->device_free_chan_resources = ioat2_free_chan_resources;
882 dma->device_tx_status = ioat_tx_status; 882 dma->device_tx_status = ioat_dma_tx_status;
883 883
884 err = ioat_probe(device); 884 err = ioat_probe(device);
885 if (err) 885 if (err)
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 1b05896648bc..9dcb17d51aee 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2840,7 +2840,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
2840 const struct pci_device_id *ent) 2840 const struct pci_device_id *ent)
2841{ 2841{
2842 struct fw_ohci *ohci; 2842 struct fw_ohci *ohci;
2843 u32 bus_options, max_receive, link_speed, version, link_enh; 2843 u32 bus_options, max_receive, link_speed, version;
2844 u64 guid; 2844 u64 guid;
2845 int i, err, n_ir, n_it; 2845 int i, err, n_ir, n_it;
2846 size_t size; 2846 size_t size;
@@ -2894,23 +2894,6 @@ static int __devinit pci_probe(struct pci_dev *dev,
2894 if (param_quirks) 2894 if (param_quirks)
2895 ohci->quirks = param_quirks; 2895 ohci->quirks = param_quirks;
2896 2896
2897 /* TI OHCI-Lynx and compatible: set recommended configuration bits. */
2898 if (dev->vendor == PCI_VENDOR_ID_TI) {
2899 pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh);
2900
2901 /* adjust latency of ATx FIFO: use 1.7 KB threshold */
2902 link_enh &= ~TI_LinkEnh_atx_thresh_mask;
2903 link_enh |= TI_LinkEnh_atx_thresh_1_7K;
2904
2905 /* use priority arbitration for asynchronous responses */
2906 link_enh |= TI_LinkEnh_enab_unfair;
2907
2908 /* required for aPhyEnhanceEnable to work */
2909 link_enh |= TI_LinkEnh_enab_accel;
2910
2911 pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh);
2912 }
2913
2914 ar_context_init(&ohci->ar_request_ctx, ohci, 2897 ar_context_init(&ohci->ar_request_ctx, ohci,
2915 OHCI1394_AsReqRcvContextControlSet); 2898 OHCI1394_AsReqRcvContextControlSet);
2916 2899
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
index 0e6c5a466908..ef5e7336da68 100644
--- a/drivers/firewire/ohci.h
+++ b/drivers/firewire/ohci.h
@@ -155,12 +155,4 @@
155 155
156#define OHCI1394_phy_tcode 0xe 156#define OHCI1394_phy_tcode 0xe
157 157
158/* TI extensions */
159
160#define PCI_CFG_TI_LinkEnh 0xf4
161#define TI_LinkEnh_enab_accel 0x00000002
162#define TI_LinkEnh_enab_unfair 0x00000080
163#define TI_LinkEnh_atx_thresh_mask 0x00003000
164#define TI_LinkEnh_atx_thresh_1_7K 0x00001000
165
166#endif /* _FIREWIRE_OHCI_H */ 158#endif /* _FIREWIRE_OHCI_H */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c74e4e8006d4..2dd2c93ebfa3 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -2231,6 +2231,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2231 dev_priv->mchdev_lock = &mchdev_lock; 2231 dev_priv->mchdev_lock = &mchdev_lock;
2232 spin_unlock(&mchdev_lock); 2232 spin_unlock(&mchdev_lock);
2233 2233
2234 /* XXX Prevent module unload due to memory corruption bugs. */
2235 __module_get(THIS_MODULE);
2236
2234 return 0; 2237 return 0;
2235 2238
2236out_workqueue_free: 2239out_workqueue_free:
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 56ad9df2ccb5..b61966c126d3 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -238,8 +238,8 @@ int intel_fbdev_destroy(struct drm_device *dev,
238 238
239 drm_framebuffer_cleanup(&ifb->base); 239 drm_framebuffer_cleanup(&ifb->base);
240 if (ifb->obj) { 240 if (ifb->obj) {
241 drm_gem_object_handle_unreference(ifb->obj);
242 drm_gem_object_unreference(ifb->obj); 241 drm_gem_object_unreference(ifb->obj);
242 ifb->obj = NULL;
243 } 243 }
244 244
245 return 0; 245 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index d2047713dc59..dbd30b2e43fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -352,7 +352,6 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
352 352
353 if (nouveau_fb->nvbo) { 353 if (nouveau_fb->nvbo) {
354 nouveau_bo_unmap(nouveau_fb->nvbo); 354 nouveau_bo_unmap(nouveau_fb->nvbo);
355 drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem);
356 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 355 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
357 nouveau_fb->nvbo = NULL; 356 nouveau_fb->nvbo = NULL;
358 } 357 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 3c9964a8fbad..3ec181ff50ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -79,7 +79,6 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
79 mutex_lock(&dev->struct_mutex); 79 mutex_lock(&dev->struct_mutex);
80 nouveau_bo_unpin(chan->notifier_bo); 80 nouveau_bo_unpin(chan->notifier_bo);
81 mutex_unlock(&dev->struct_mutex); 81 mutex_unlock(&dev->struct_mutex);
82 drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem);
83 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); 82 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
84 drm_mm_takedown(&chan->notifier_heap); 83 drm_mm_takedown(&chan->notifier_heap);
85} 84}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 79082d4398ae..2f93d46ae69a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1137,7 +1137,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1137 1137
1138 WREG32(RCU_IND_INDEX, 0x203); 1138 WREG32(RCU_IND_INDEX, 0x203);
1139 efuse_straps_3 = RREG32(RCU_IND_DATA); 1139 efuse_straps_3 = RREG32(RCU_IND_DATA);
1140 efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28; 1140 efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
1141 1141
1142 switch(efuse_box_bit_127_124) { 1142 switch(efuse_box_bit_127_124) {
1143 case 0x0: 1143 case 0x0:
@@ -1407,6 +1407,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
1407 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 1407 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
1408 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 1408 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
1409 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1409 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1410 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1410 r600_vram_gtt_location(rdev, &rdev->mc); 1411 r600_vram_gtt_location(rdev, &rdev->mc);
1411 radeon_update_bandwidth_info(rdev); 1412 radeon_update_bandwidth_info(rdev);
1412 1413
@@ -1520,7 +1521,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
1520{ 1521{
1521 u32 tmp; 1522 u32 tmp;
1522 1523
1523 WREG32(CP_INT_CNTL, 0); 1524 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
1524 WREG32(GRBM_INT_CNTL, 0); 1525 WREG32(GRBM_INT_CNTL, 0);
1525 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 1526 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
1526 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 1527 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index e151f16a8f86..e59422320bb6 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1030,6 +1030,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1030 return r; 1030 return r;
1031 } 1031 }
1032 rdev->cp.ready = true; 1032 rdev->cp.ready = true;
1033 rdev->mc.active_vram_size = rdev->mc.real_vram_size;
1033 return 0; 1034 return 0;
1034} 1035}
1035 1036
@@ -1047,6 +1048,7 @@ void r100_cp_fini(struct radeon_device *rdev)
1047void r100_cp_disable(struct radeon_device *rdev) 1048void r100_cp_disable(struct radeon_device *rdev)
1048{ 1049{
1049 /* Disable ring */ 1050 /* Disable ring */
1051 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1050 rdev->cp.ready = false; 1052 rdev->cp.ready = false;
1051 WREG32(RADEON_CP_CSQ_MODE, 0); 1053 WREG32(RADEON_CP_CSQ_MODE, 0);
1052 WREG32(RADEON_CP_CSQ_CNTL, 0); 1054 WREG32(RADEON_CP_CSQ_CNTL, 0);
@@ -2295,6 +2297,7 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
2295 /* FIXME we don't use the second aperture yet when we could use it */ 2297 /* FIXME we don't use the second aperture yet when we could use it */
2296 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2298 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2297 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2299 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2300 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
2298 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2301 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2299 if (rdev->flags & RADEON_IS_IGP) { 2302 if (rdev->flags & RADEON_IS_IGP) {
2300 uint32_t tom; 2303 uint32_t tom;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 7a04959ba0ee..7b65e4efe8af 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1248,6 +1248,7 @@ int r600_mc_init(struct radeon_device *rdev)
1248 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1248 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1249 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1249 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1250 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1250 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1251 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1251 r600_vram_gtt_location(rdev, &rdev->mc); 1252 r600_vram_gtt_location(rdev, &rdev->mc);
1252 1253
1253 if (rdev->flags & RADEON_IS_IGP) { 1254 if (rdev->flags & RADEON_IS_IGP) {
@@ -1917,6 +1918,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1917 */ 1918 */
1918void r600_cp_stop(struct radeon_device *rdev) 1919void r600_cp_stop(struct radeon_device *rdev)
1919{ 1920{
1921 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1920 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1922 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1921} 1923}
1922 1924
@@ -2910,7 +2912,7 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
2910{ 2912{
2911 u32 tmp; 2913 u32 tmp;
2912 2914
2913 WREG32(CP_INT_CNTL, 0); 2915 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2914 WREG32(GRBM_INT_CNTL, 0); 2916 WREG32(GRBM_INT_CNTL, 0);
2915 WREG32(DxMODE_INT_MASK, 0); 2917 WREG32(DxMODE_INT_MASK, 0);
2916 if (ASIC_IS_DCE3(rdev)) { 2918 if (ASIC_IS_DCE3(rdev)) {
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 9ceb2a1ce799..3473c00781ff 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -532,6 +532,7 @@ int r600_blit_init(struct radeon_device *rdev)
532 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); 532 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
533 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 533 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
534 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 534 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
535 rdev->mc.active_vram_size = rdev->mc.real_vram_size;
535 return 0; 536 return 0;
536} 537}
537 538
@@ -539,6 +540,7 @@ void r600_blit_fini(struct radeon_device *rdev)
539{ 540{
540 int r; 541 int r;
541 542
543 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
542 if (rdev->r600_blit.shader_obj == NULL) 544 if (rdev->r600_blit.shader_obj == NULL)
543 return; 545 return;
544 /* If we can't reserve the bo, unref should be enough to destroy 546 /* If we can't reserve the bo, unref should be enough to destroy
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a168d644bf9e..9ff38c99a6ea 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -344,6 +344,7 @@ struct radeon_mc {
344 * about vram size near mc fb location */ 344 * about vram size near mc fb location */
345 u64 mc_vram_size; 345 u64 mc_vram_size;
346 u64 visible_vram_size; 346 u64 visible_vram_size;
347 u64 active_vram_size;
347 u64 gtt_size; 348 u64 gtt_size;
348 u64 gtt_start; 349 u64 gtt_start;
349 u64 gtt_end; 350 u64 gtt_end;
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 68932ba7b8a4..8e43ddae70cc 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1558,39 +1558,39 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev)
1558 switch (tv_info->ucTV_BootUpDefaultStandard) { 1558 switch (tv_info->ucTV_BootUpDefaultStandard) {
1559 case ATOM_TV_NTSC: 1559 case ATOM_TV_NTSC:
1560 tv_std = TV_STD_NTSC; 1560 tv_std = TV_STD_NTSC;
1561 DRM_INFO("Default TV standard: NTSC\n"); 1561 DRM_DEBUG_KMS("Default TV standard: NTSC\n");
1562 break; 1562 break;
1563 case ATOM_TV_NTSCJ: 1563 case ATOM_TV_NTSCJ:
1564 tv_std = TV_STD_NTSC_J; 1564 tv_std = TV_STD_NTSC_J;
1565 DRM_INFO("Default TV standard: NTSC-J\n"); 1565 DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
1566 break; 1566 break;
1567 case ATOM_TV_PAL: 1567 case ATOM_TV_PAL:
1568 tv_std = TV_STD_PAL; 1568 tv_std = TV_STD_PAL;
1569 DRM_INFO("Default TV standard: PAL\n"); 1569 DRM_DEBUG_KMS("Default TV standard: PAL\n");
1570 break; 1570 break;
1571 case ATOM_TV_PALM: 1571 case ATOM_TV_PALM:
1572 tv_std = TV_STD_PAL_M; 1572 tv_std = TV_STD_PAL_M;
1573 DRM_INFO("Default TV standard: PAL-M\n"); 1573 DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
1574 break; 1574 break;
1575 case ATOM_TV_PALN: 1575 case ATOM_TV_PALN:
1576 tv_std = TV_STD_PAL_N; 1576 tv_std = TV_STD_PAL_N;
1577 DRM_INFO("Default TV standard: PAL-N\n"); 1577 DRM_DEBUG_KMS("Default TV standard: PAL-N\n");
1578 break; 1578 break;
1579 case ATOM_TV_PALCN: 1579 case ATOM_TV_PALCN:
1580 tv_std = TV_STD_PAL_CN; 1580 tv_std = TV_STD_PAL_CN;
1581 DRM_INFO("Default TV standard: PAL-CN\n"); 1581 DRM_DEBUG_KMS("Default TV standard: PAL-CN\n");
1582 break; 1582 break;
1583 case ATOM_TV_PAL60: 1583 case ATOM_TV_PAL60:
1584 tv_std = TV_STD_PAL_60; 1584 tv_std = TV_STD_PAL_60;
1585 DRM_INFO("Default TV standard: PAL-60\n"); 1585 DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
1586 break; 1586 break;
1587 case ATOM_TV_SECAM: 1587 case ATOM_TV_SECAM:
1588 tv_std = TV_STD_SECAM; 1588 tv_std = TV_STD_SECAM;
1589 DRM_INFO("Default TV standard: SECAM\n"); 1589 DRM_DEBUG_KMS("Default TV standard: SECAM\n");
1590 break; 1590 break;
1591 default: 1591 default:
1592 tv_std = TV_STD_NTSC; 1592 tv_std = TV_STD_NTSC;
1593 DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); 1593 DRM_DEBUG_KMS("Unknown TV standard; defaulting to NTSC\n");
1594 break; 1594 break;
1595 } 1595 }
1596 } 1596 }
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index a04b7a6ad95f..7b7ea269549c 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -913,47 +913,47 @@ radeon_combios_get_tv_info(struct radeon_device *rdev)
913 switch (RBIOS8(tv_info + 7) & 0xf) { 913 switch (RBIOS8(tv_info + 7) & 0xf) {
914 case 1: 914 case 1:
915 tv_std = TV_STD_NTSC; 915 tv_std = TV_STD_NTSC;
916 DRM_INFO("Default TV standard: NTSC\n"); 916 DRM_DEBUG_KMS("Default TV standard: NTSC\n");
917 break; 917 break;
918 case 2: 918 case 2:
919 tv_std = TV_STD_PAL; 919 tv_std = TV_STD_PAL;
920 DRM_INFO("Default TV standard: PAL\n"); 920 DRM_DEBUG_KMS("Default TV standard: PAL\n");
921 break; 921 break;
922 case 3: 922 case 3:
923 tv_std = TV_STD_PAL_M; 923 tv_std = TV_STD_PAL_M;
924 DRM_INFO("Default TV standard: PAL-M\n"); 924 DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
925 break; 925 break;
926 case 4: 926 case 4:
927 tv_std = TV_STD_PAL_60; 927 tv_std = TV_STD_PAL_60;
928 DRM_INFO("Default TV standard: PAL-60\n"); 928 DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
929 break; 929 break;
930 case 5: 930 case 5:
931 tv_std = TV_STD_NTSC_J; 931 tv_std = TV_STD_NTSC_J;
932 DRM_INFO("Default TV standard: NTSC-J\n"); 932 DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
933 break; 933 break;
934 case 6: 934 case 6:
935 tv_std = TV_STD_SCART_PAL; 935 tv_std = TV_STD_SCART_PAL;
936 DRM_INFO("Default TV standard: SCART-PAL\n"); 936 DRM_DEBUG_KMS("Default TV standard: SCART-PAL\n");
937 break; 937 break;
938 default: 938 default:
939 tv_std = TV_STD_NTSC; 939 tv_std = TV_STD_NTSC;
940 DRM_INFO 940 DRM_DEBUG_KMS
941 ("Unknown TV standard; defaulting to NTSC\n"); 941 ("Unknown TV standard; defaulting to NTSC\n");
942 break; 942 break;
943 } 943 }
944 944
945 switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) { 945 switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) {
946 case 0: 946 case 0:
947 DRM_INFO("29.498928713 MHz TV ref clk\n"); 947 DRM_DEBUG_KMS("29.498928713 MHz TV ref clk\n");
948 break; 948 break;
949 case 1: 949 case 1:
950 DRM_INFO("28.636360000 MHz TV ref clk\n"); 950 DRM_DEBUG_KMS("28.636360000 MHz TV ref clk\n");
951 break; 951 break;
952 case 2: 952 case 2:
953 DRM_INFO("14.318180000 MHz TV ref clk\n"); 953 DRM_DEBUG_KMS("14.318180000 MHz TV ref clk\n");
954 break; 954 break;
955 case 3: 955 case 3:
956 DRM_INFO("27.000000000 MHz TV ref clk\n"); 956 DRM_DEBUG_KMS("27.000000000 MHz TV ref clk\n");
957 break; 957 break;
958 default: 958 default:
959 break; 959 break;
@@ -1324,7 +1324,7 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
1324 1324
1325 if (tmds_info) { 1325 if (tmds_info) {
1326 ver = RBIOS8(tmds_info); 1326 ver = RBIOS8(tmds_info);
1327 DRM_INFO("DFP table revision: %d\n", ver); 1327 DRM_DEBUG_KMS("DFP table revision: %d\n", ver);
1328 if (ver == 3) { 1328 if (ver == 3) {
1329 n = RBIOS8(tmds_info + 5) + 1; 1329 n = RBIOS8(tmds_info + 5) + 1;
1330 if (n > 4) 1330 if (n > 4)
@@ -1408,7 +1408,7 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
1408 offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); 1408 offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
1409 if (offset) { 1409 if (offset) {
1410 ver = RBIOS8(offset); 1410 ver = RBIOS8(offset);
1411 DRM_INFO("External TMDS Table revision: %d\n", ver); 1411 DRM_DEBUG_KMS("External TMDS Table revision: %d\n", ver);
1412 tmds->slave_addr = RBIOS8(offset + 4 + 2); 1412 tmds->slave_addr = RBIOS8(offset + 4 + 2);
1413 tmds->slave_addr >>= 1; /* 7 bit addressing */ 1413 tmds->slave_addr >>= 1; /* 7 bit addressing */
1414 gpio = RBIOS8(offset + 4 + 3); 1414 gpio = RBIOS8(offset + 4 + 3);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 5731fc9b1ae3..3eef567b0421 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -203,6 +203,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
203 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 203 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
204 struct radeon_device *rdev = crtc->dev->dev_private; 204 struct radeon_device *rdev = crtc->dev->dev_private;
205 int xorigin = 0, yorigin = 0; 205 int xorigin = 0, yorigin = 0;
206 int w = radeon_crtc->cursor_width;
206 207
207 if (x < 0) 208 if (x < 0)
208 xorigin = -x + 1; 209 xorigin = -x + 1;
@@ -213,22 +214,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
213 if (yorigin >= CURSOR_HEIGHT) 214 if (yorigin >= CURSOR_HEIGHT)
214 yorigin = CURSOR_HEIGHT - 1; 215 yorigin = CURSOR_HEIGHT - 1;
215 216
216 radeon_lock_cursor(crtc, true); 217 if (ASIC_IS_AVIVO(rdev)) {
217 if (ASIC_IS_DCE4(rdev)) {
218 /* cursors are offset into the total surface */
219 x += crtc->x;
220 y += crtc->y;
221 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
222
223 /* XXX: check if evergreen has the same issues as avivo chips */
224 WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
225 ((xorigin ? 0 : x) << 16) |
226 (yorigin ? 0 : y));
227 WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
228 WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
229 ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1));
230 } else if (ASIC_IS_AVIVO(rdev)) {
231 int w = radeon_crtc->cursor_width;
232 int i = 0; 218 int i = 0;
233 struct drm_crtc *crtc_p; 219 struct drm_crtc *crtc_p;
234 220
@@ -260,7 +246,17 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
260 if (w <= 0) 246 if (w <= 0)
261 w = 1; 247 w = 1;
262 } 248 }
249 }
263 250
251 radeon_lock_cursor(crtc, true);
252 if (ASIC_IS_DCE4(rdev)) {
253 WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
254 ((xorigin ? 0 : x) << 16) |
255 (yorigin ? 0 : y));
256 WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
257 WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
258 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
259 } else if (ASIC_IS_AVIVO(rdev)) {
264 WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, 260 WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
265 ((xorigin ? 0 : x) << 16) | 261 ((xorigin ? 0 : x) << 16) |
266 (yorigin ? 0 : y)); 262 (yorigin ? 0 : y));
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 9cdf6a35bc2c..40b0c087b592 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -97,7 +97,6 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
97 radeon_bo_unpin(rbo); 97 radeon_bo_unpin(rbo);
98 radeon_bo_unreserve(rbo); 98 radeon_bo_unreserve(rbo);
99 } 99 }
100 drm_gem_object_handle_unreference(gobj);
101 drm_gem_object_unreference_unlocked(gobj); 100 drm_gem_object_unreference_unlocked(gobj);
102} 101}
103 102
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 0afd1e62347d..b3b5306bb578 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -69,7 +69,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
69 u32 c = 0; 69 u32 c = 0;
70 70
71 rbo->placement.fpfn = 0; 71 rbo->placement.fpfn = 0;
72 rbo->placement.lpfn = 0; 72 rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT;
73 rbo->placement.placement = rbo->placements; 73 rbo->placement.placement = rbo->placements;
74 rbo->placement.busy_placement = rbo->placements; 74 rbo->placement.busy_placement = rbo->placements;
75 if (domain & RADEON_GEM_DOMAIN_VRAM) 75 if (domain & RADEON_GEM_DOMAIN_VRAM)
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 353998dc2c03..3481bc7f6f58 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -124,11 +124,8 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
124 int r; 124 int r;
125 125
126 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 126 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
127 if (unlikely(r != 0)) { 127 if (unlikely(r != 0))
128 if (r != -ERESTARTSYS)
129 dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
130 return r; 128 return r;
131 }
132 spin_lock(&bo->tbo.lock); 129 spin_lock(&bo->tbo.lock);
133 if (mem_type) 130 if (mem_type)
134 *mem_type = bo->tbo.mem.mem_type; 131 *mem_type = bo->tbo.mem.mem_type;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index cc05b230d7ef..51d5f7b5ab21 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -693,6 +693,7 @@ void rs600_mc_init(struct radeon_device *rdev)
693 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 693 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
694 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 694 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
695 rdev->mc.visible_vram_size = rdev->mc.aper_size; 695 rdev->mc.visible_vram_size = rdev->mc.aper_size;
696 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
696 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 697 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
697 base = RREG32_MC(R_000004_MC_FB_LOCATION); 698 base = RREG32_MC(R_000004_MC_FB_LOCATION);
698 base = G_000004_MC_FB_START(base) << 16; 699 base = G_000004_MC_FB_START(base) << 16;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3e3f75718be3..4dc2a87ea680 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -157,6 +157,7 @@ void rs690_mc_init(struct radeon_device *rdev)
157 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 157 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
158 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 158 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
159 rdev->mc.visible_vram_size = rdev->mc.aper_size; 159 rdev->mc.visible_vram_size = rdev->mc.aper_size;
160 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
160 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 161 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
161 base = G_000100_MC_FB_START(base) << 16; 162 base = G_000100_MC_FB_START(base) << 16;
162 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 163 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index bfa59db374d2..9490da700749 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -267,6 +267,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
267 */ 267 */
268void r700_cp_stop(struct radeon_device *rdev) 268void r700_cp_stop(struct radeon_device *rdev)
269{ 269{
270 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
270 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 271 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
271} 272}
272 273
@@ -992,6 +993,7 @@ int rv770_mc_init(struct radeon_device *rdev)
992 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 993 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
993 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 994 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
994 rdev->mc.visible_vram_size = rdev->mc.aper_size; 995 rdev->mc.visible_vram_size = rdev->mc.aper_size;
996 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
995 r600_vram_gtt_location(rdev, &rdev->mc); 997 r600_vram_gtt_location(rdev, &rdev->mc);
996 radeon_update_bandwidth_info(rdev); 998 radeon_update_bandwidth_info(rdev);
997 999
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cb4cf7ef4d1e..db809e034cc4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -442,6 +442,43 @@ out_err:
442} 442}
443 443
444/** 444/**
445 * Call bo::reserved and with the lru lock held.
446 * Will release GPU memory type usage on destruction.
447 * This is the place to put in driver specific hooks.
448 * Will release the bo::reserved lock and the
449 * lru lock on exit.
450 */
451
452static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
453{
454 struct ttm_bo_global *glob = bo->glob;
455
456 if (bo->ttm) {
457
458 /**
459 * Release the lru_lock, since we don't want to have
460 * an atomic requirement on ttm_tt[unbind|destroy].
461 */
462
463 spin_unlock(&glob->lru_lock);
464 ttm_tt_unbind(bo->ttm);
465 ttm_tt_destroy(bo->ttm);
466 bo->ttm = NULL;
467 spin_lock(&glob->lru_lock);
468 }
469
470 if (bo->mem.mm_node) {
471 drm_mm_put_block(bo->mem.mm_node);
472 bo->mem.mm_node = NULL;
473 }
474
475 atomic_set(&bo->reserved, 0);
476 wake_up_all(&bo->event_queue);
477 spin_unlock(&glob->lru_lock);
478}
479
480
481/**
445 * If bo idle, remove from delayed- and lru lists, and unref. 482 * If bo idle, remove from delayed- and lru lists, and unref.
446 * If not idle, and already on delayed list, do nothing. 483 * If not idle, and already on delayed list, do nothing.
447 * If not idle, and not on delayed list, put on delayed list, 484 * If not idle, and not on delayed list, put on delayed list,
@@ -456,6 +493,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
456 int ret; 493 int ret;
457 494
458 spin_lock(&bo->lock); 495 spin_lock(&bo->lock);
496retry:
459 (void) ttm_bo_wait(bo, false, false, !remove_all); 497 (void) ttm_bo_wait(bo, false, false, !remove_all);
460 498
461 if (!bo->sync_obj) { 499 if (!bo->sync_obj) {
@@ -464,31 +502,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
464 spin_unlock(&bo->lock); 502 spin_unlock(&bo->lock);
465 503
466 spin_lock(&glob->lru_lock); 504 spin_lock(&glob->lru_lock);
467 put_count = ttm_bo_del_from_lru(bo); 505 ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
506
507 /**
508 * Someone else has the object reserved. Bail and retry.
509 */
468 510
469 ret = ttm_bo_reserve_locked(bo, false, false, false, 0); 511 if (unlikely(ret == -EBUSY)) {
470 BUG_ON(ret); 512 spin_unlock(&glob->lru_lock);
471 if (bo->ttm) 513 spin_lock(&bo->lock);
472 ttm_tt_unbind(bo->ttm); 514 goto requeue;
515 }
516
517 /**
518 * We can re-check for sync object without taking
519 * the bo::lock since setting the sync object requires
520 * also bo::reserved. A busy object at this point may
521 * be caused by another thread starting an accelerated
522 * eviction.
523 */
524
525 if (unlikely(bo->sync_obj)) {
526 atomic_set(&bo->reserved, 0);
527 wake_up_all(&bo->event_queue);
528 spin_unlock(&glob->lru_lock);
529 spin_lock(&bo->lock);
530 if (remove_all)
531 goto retry;
532 else
533 goto requeue;
534 }
535
536 put_count = ttm_bo_del_from_lru(bo);
473 537
474 if (!list_empty(&bo->ddestroy)) { 538 if (!list_empty(&bo->ddestroy)) {
475 list_del_init(&bo->ddestroy); 539 list_del_init(&bo->ddestroy);
476 ++put_count; 540 ++put_count;
477 } 541 }
478 if (bo->mem.mm_node) {
479 drm_mm_put_block(bo->mem.mm_node);
480 bo->mem.mm_node = NULL;
481 }
482 spin_unlock(&glob->lru_lock);
483 542
484 atomic_set(&bo->reserved, 0); 543 ttm_bo_cleanup_memtype_use(bo);
485 544
486 while (put_count--) 545 while (put_count--)
487 kref_put(&bo->list_kref, ttm_bo_ref_bug); 546 kref_put(&bo->list_kref, ttm_bo_ref_bug);
488 547
489 return 0; 548 return 0;
490 } 549 }
491 550requeue:
492 spin_lock(&glob->lru_lock); 551 spin_lock(&glob->lru_lock);
493 if (list_empty(&bo->ddestroy)) { 552 if (list_empty(&bo->ddestroy)) {
494 void *sync_obj = bo->sync_obj; 553 void *sync_obj = bo->sync_obj;
diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c
index 4267a6fdc277..5925bdcd417d 100644
--- a/drivers/hid/hid-cando.c
+++ b/drivers/hid/hid-cando.c
@@ -237,6 +237,8 @@ static const struct hid_device_id cando_devices[] = {
237 USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, 237 USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
238 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, 238 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
239 USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, 239 USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
240 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
241 USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
240 { } 242 { }
241}; 243};
242MODULE_DEVICE_TABLE(hid, cando_devices); 244MODULE_DEVICE_TABLE(hid, cando_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3f7292486024..a0dea3d1296e 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1292,6 +1292,7 @@ static const struct hid_device_id hid_blacklist[] = {
1292 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, 1292 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
1293 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, 1293 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
1294 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, 1294 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
1295 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
1295 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, 1296 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
1296 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, 1297 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
1297 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, 1298 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 765a4f53eb5c..c5ae5f1545bd 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -134,6 +134,7 @@
134#define USB_VENDOR_ID_CANDO 0x2087 134#define USB_VENDOR_ID_CANDO 0x2087
135#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01 135#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
136#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03 136#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
137#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6 0x0f01
137 138
138#define USB_VENDOR_ID_CH 0x068e 139#define USB_VENDOR_ID_CH 0x068e
139#define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2 140#define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2
@@ -503,6 +504,7 @@
503 504
504#define USB_VENDOR_ID_TURBOX 0x062a 505#define USB_VENDOR_ID_TURBOX 0x062a
505#define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201 506#define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201
507#define USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART 0x7100
506 508
507#define USB_VENDOR_ID_TWINHAN 0x6253 509#define USB_VENDOR_ID_TWINHAN 0x6253
508#define USB_DEVICE_ID_TWINHAN_IR_REMOTE 0x0100 510#define USB_DEVICE_ID_TWINHAN_IR_REMOTE 0x0100
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 47d70c523d93..a3866b5c0c43 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -109,6 +109,12 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t
109 int ret = 0; 109 int ret = 0;
110 110
111 mutex_lock(&minors_lock); 111 mutex_lock(&minors_lock);
112
113 if (!hidraw_table[minor]) {
114 ret = -ENODEV;
115 goto out;
116 }
117
112 dev = hidraw_table[minor]->hid; 118 dev = hidraw_table[minor]->hid;
113 119
114 if (!dev->hid_output_raw_report) { 120 if (!dev->hid_output_raw_report) {
@@ -244,6 +250,10 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
244 250
245 mutex_lock(&minors_lock); 251 mutex_lock(&minors_lock);
246 dev = hidraw_table[minor]; 252 dev = hidraw_table[minor];
253 if (!dev) {
254 ret = -ENODEV;
255 goto out;
256 }
247 257
248 switch (cmd) { 258 switch (cmd) {
249 case HIDIOCGRDESCSIZE: 259 case HIDIOCGRDESCSIZE:
@@ -317,6 +327,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
317 327
318 ret = -ENOTTY; 328 ret = -ENOTTY;
319 } 329 }
330out:
320 mutex_unlock(&minors_lock); 331 mutex_unlock(&minors_lock);
321 return ret; 332 return ret;
322} 333}
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 70da3181c8a0..f0260c699adb 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -36,6 +36,7 @@ static const struct hid_blacklist {
36 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET }, 36 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
37 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT }, 37 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
38 { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT }, 38 { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
39 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT },
39 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, 40 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
40 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, 41 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
41 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, 42 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index f7bd2613cecc..f2de3be35df3 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -677,6 +677,11 @@ static int __devinit cpm_i2c_probe(struct platform_device *ofdev,
677 dev_dbg(&ofdev->dev, "hw routines for %s registered.\n", 677 dev_dbg(&ofdev->dev, "hw routines for %s registered.\n",
678 cpm->adap.name); 678 cpm->adap.name);
679 679
680 /*
681 * register OF I2C devices
682 */
683 of_i2c_register_devices(&cpm->adap);
684
680 return 0; 685 return 0;
681out_shut: 686out_shut:
682 cpm_i2c_shutdown(cpm); 687 cpm_i2c_shutdown(cpm);
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index b8feac5f2ef4..5795c8398c7c 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -331,21 +331,16 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
331 INIT_COMPLETION(dev->cmd_complete); 331 INIT_COMPLETION(dev->cmd_complete);
332 dev->cmd_err = 0; 332 dev->cmd_err = 0;
333 333
334 /* Take I2C out of reset, configure it as master and set the 334 /* Take I2C out of reset and configure it as master */
335 * start bit */ 335 flag = DAVINCI_I2C_MDR_IRS | DAVINCI_I2C_MDR_MST;
336 flag = DAVINCI_I2C_MDR_IRS | DAVINCI_I2C_MDR_MST | DAVINCI_I2C_MDR_STT;
337 336
338 /* if the slave address is ten bit address, enable XA bit */ 337 /* if the slave address is ten bit address, enable XA bit */
339 if (msg->flags & I2C_M_TEN) 338 if (msg->flags & I2C_M_TEN)
340 flag |= DAVINCI_I2C_MDR_XA; 339 flag |= DAVINCI_I2C_MDR_XA;
341 if (!(msg->flags & I2C_M_RD)) 340 if (!(msg->flags & I2C_M_RD))
342 flag |= DAVINCI_I2C_MDR_TRX; 341 flag |= DAVINCI_I2C_MDR_TRX;
343 if (stop) 342 if (msg->len == 0)
344 flag |= DAVINCI_I2C_MDR_STP;
345 if (msg->len == 0) {
346 flag |= DAVINCI_I2C_MDR_RM; 343 flag |= DAVINCI_I2C_MDR_RM;
347 flag &= ~DAVINCI_I2C_MDR_STP;
348 }
349 344
350 /* Enable receive or transmit interrupts */ 345 /* Enable receive or transmit interrupts */
351 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG); 346 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG);
@@ -358,17 +353,28 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
358 dev->terminate = 0; 353 dev->terminate = 0;
359 354
360 /* 355 /*
356 * Write mode register first as needed for correct behaviour
357 * on OMAP-L138, but don't set STT yet to avoid a race with XRDY
358 * occuring before we have loaded DXR
359 */
360 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
361
362 /*
361 * First byte should be set here, not after interrupt, 363 * First byte should be set here, not after interrupt,
362 * because transmit-data-ready interrupt can come before 364 * because transmit-data-ready interrupt can come before
363 * NACK-interrupt during sending of previous message and 365 * NACK-interrupt during sending of previous message and
364 * ICDXR may have wrong data 366 * ICDXR may have wrong data
367 * It also saves us one interrupt, slightly faster
365 */ 368 */
366 if ((!(msg->flags & I2C_M_RD)) && dev->buf_len) { 369 if ((!(msg->flags & I2C_M_RD)) && dev->buf_len) {
367 davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++); 370 davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++);
368 dev->buf_len--; 371 dev->buf_len--;
369 } 372 }
370 373
371 /* write the data into mode register; start transmitting */ 374 /* Set STT to begin transmit now DXR is loaded */
375 flag |= DAVINCI_I2C_MDR_STT;
376 if (stop && msg->len != 0)
377 flag |= DAVINCI_I2C_MDR_STP;
372 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); 378 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
373 379
374 r = wait_for_completion_interruptible_timeout(&dev->cmd_complete, 380 r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 43ca32fddde2..89eedf45d30e 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -761,6 +761,9 @@ static int __devinit iic_probe(struct platform_device *ofdev,
761 dev_info(&ofdev->dev, "using %s mode\n", 761 dev_info(&ofdev->dev, "using %s mode\n",
762 dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)"); 762 dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)");
763 763
764 /* Now register all the child nodes */
765 of_i2c_register_devices(adap);
766
764 return 0; 767 return 0;
765 768
766error_cleanup: 769error_cleanup:
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index d1ff9408dc1f..4c2a62b75b5c 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -159,15 +159,9 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
159 159
160static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx) 160static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx)
161{ 161{
162 int result; 162 wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10);
163
164 result = wait_event_interruptible_timeout(i2c_imx->queue,
165 i2c_imx->i2csr & I2SR_IIF, HZ / 10);
166 163
167 if (unlikely(result < 0)) { 164 if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) {
168 dev_dbg(&i2c_imx->adapter.dev, "<%s> result < 0\n", __func__);
169 return result;
170 } else if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) {
171 dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__); 165 dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__);
172 return -ETIMEDOUT; 166 return -ETIMEDOUT;
173 } 167 }
@@ -295,7 +289,7 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
295 i2c_imx->i2csr = temp; 289 i2c_imx->i2csr = temp;
296 temp &= ~I2SR_IIF; 290 temp &= ~I2SR_IIF;
297 writeb(temp, i2c_imx->base + IMX_I2C_I2SR); 291 writeb(temp, i2c_imx->base + IMX_I2C_I2SR);
298 wake_up_interruptible(&i2c_imx->queue); 292 wake_up(&i2c_imx->queue);
299 return IRQ_HANDLED; 293 return IRQ_HANDLED;
300 } 294 }
301 295
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index a1c419a716af..b74e6dc6886c 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -632,6 +632,7 @@ static int __devinit fsl_i2c_probe(struct platform_device *op,
632 dev_err(i2c->dev, "failed to add adapter\n"); 632 dev_err(i2c->dev, "failed to add adapter\n");
633 goto fail_add; 633 goto fail_add;
634 } 634 }
635 of_i2c_register_devices(&i2c->adap);
635 636
636 return result; 637 return result;
637 638
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index bbd77603a417..29933f87d8fa 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -71,8 +71,8 @@ static int pca_isa_readbyte(void *pd, int reg)
71 71
72static int pca_isa_waitforcompletion(void *pd) 72static int pca_isa_waitforcompletion(void *pd)
73{ 73{
74 long ret = ~0;
75 unsigned long timeout; 74 unsigned long timeout;
75 long ret;
76 76
77 if (irq > -1) { 77 if (irq > -1) {
78 ret = wait_event_timeout(pca_wait, 78 ret = wait_event_timeout(pca_wait,
@@ -81,11 +81,15 @@ static int pca_isa_waitforcompletion(void *pd)
81 } else { 81 } else {
82 /* Do polling */ 82 /* Do polling */
83 timeout = jiffies + pca_isa_ops.timeout; 83 timeout = jiffies + pca_isa_ops.timeout;
84 while (((pca_isa_readbyte(pd, I2C_PCA_CON) 84 do {
85 & I2C_PCA_CON_SI) == 0) 85 ret = time_before(jiffies, timeout);
86 && (ret = time_before(jiffies, timeout))) 86 if (pca_isa_readbyte(pd, I2C_PCA_CON)
87 & I2C_PCA_CON_SI)
88 break;
87 udelay(100); 89 udelay(100);
90 } while (ret);
88 } 91 }
92
89 return ret > 0; 93 return ret > 0;
90} 94}
91 95
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index ef5c78487eb7..5f6d7f89e225 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -80,8 +80,8 @@ static void i2c_pca_pf_writebyte32(void *pd, int reg, int val)
80static int i2c_pca_pf_waitforcompletion(void *pd) 80static int i2c_pca_pf_waitforcompletion(void *pd)
81{ 81{
82 struct i2c_pca_pf_data *i2c = pd; 82 struct i2c_pca_pf_data *i2c = pd;
83 long ret = ~0;
84 unsigned long timeout; 83 unsigned long timeout;
84 long ret;
85 85
86 if (i2c->irq) { 86 if (i2c->irq) {
87 ret = wait_event_timeout(i2c->wait, 87 ret = wait_event_timeout(i2c->wait,
@@ -90,10 +90,13 @@ static int i2c_pca_pf_waitforcompletion(void *pd)
90 } else { 90 } else {
91 /* Do polling */ 91 /* Do polling */
92 timeout = jiffies + i2c->adap.timeout; 92 timeout = jiffies + i2c->adap.timeout;
93 while (((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) 93 do {
94 & I2C_PCA_CON_SI) == 0) 94 ret = time_before(jiffies, timeout);
95 && (ret = time_before(jiffies, timeout))) 95 if (i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
96 & I2C_PCA_CON_SI)
97 break;
96 udelay(100); 98 udelay(100);
99 } while (ret);
97 } 100 }
98 101
99 return ret > 0; 102 return ret > 0;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 6649176de940..bea4c5021d26 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -32,7 +32,6 @@
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/idr.h> 33#include <linux/idr.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/of_i2c.h>
36#include <linux/of_device.h> 35#include <linux/of_device.h>
37#include <linux/completion.h> 36#include <linux/completion.h>
38#include <linux/hardirq.h> 37#include <linux/hardirq.h>
@@ -197,11 +196,12 @@ static int i2c_device_pm_suspend(struct device *dev)
197{ 196{
198 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 197 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
199 198
200 if (pm_runtime_suspended(dev)) 199 if (pm) {
201 return 0; 200 if (pm_runtime_suspended(dev))
202 201 return 0;
203 if (pm) 202 else
204 return pm->suspend ? pm->suspend(dev) : 0; 203 return pm->suspend ? pm->suspend(dev) : 0;
204 }
205 205
206 return i2c_legacy_suspend(dev, PMSG_SUSPEND); 206 return i2c_legacy_suspend(dev, PMSG_SUSPEND);
207} 207}
@@ -216,12 +216,6 @@ static int i2c_device_pm_resume(struct device *dev)
216 else 216 else
217 ret = i2c_legacy_resume(dev); 217 ret = i2c_legacy_resume(dev);
218 218
219 if (!ret) {
220 pm_runtime_disable(dev);
221 pm_runtime_set_active(dev);
222 pm_runtime_enable(dev);
223 }
224
225 return ret; 219 return ret;
226} 220}
227 221
@@ -229,11 +223,12 @@ static int i2c_device_pm_freeze(struct device *dev)
229{ 223{
230 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 224 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
231 225
232 if (pm_runtime_suspended(dev)) 226 if (pm) {
233 return 0; 227 if (pm_runtime_suspended(dev))
234 228 return 0;
235 if (pm) 229 else
236 return pm->freeze ? pm->freeze(dev) : 0; 230 return pm->freeze ? pm->freeze(dev) : 0;
231 }
237 232
238 return i2c_legacy_suspend(dev, PMSG_FREEZE); 233 return i2c_legacy_suspend(dev, PMSG_FREEZE);
239} 234}
@@ -242,11 +237,12 @@ static int i2c_device_pm_thaw(struct device *dev)
242{ 237{
243 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 238 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
244 239
245 if (pm_runtime_suspended(dev)) 240 if (pm) {
246 return 0; 241 if (pm_runtime_suspended(dev))
247 242 return 0;
248 if (pm) 243 else
249 return pm->thaw ? pm->thaw(dev) : 0; 244 return pm->thaw ? pm->thaw(dev) : 0;
245 }
250 246
251 return i2c_legacy_resume(dev); 247 return i2c_legacy_resume(dev);
252} 248}
@@ -255,11 +251,12 @@ static int i2c_device_pm_poweroff(struct device *dev)
255{ 251{
256 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 252 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
257 253
258 if (pm_runtime_suspended(dev)) 254 if (pm) {
259 return 0; 255 if (pm_runtime_suspended(dev))
260 256 return 0;
261 if (pm) 257 else
262 return pm->poweroff ? pm->poweroff(dev) : 0; 258 return pm->poweroff ? pm->poweroff(dev) : 0;
259 }
263 260
264 return i2c_legacy_suspend(dev, PMSG_HIBERNATE); 261 return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
265} 262}
@@ -876,9 +873,6 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
876 if (adap->nr < __i2c_first_dynamic_bus_num) 873 if (adap->nr < __i2c_first_dynamic_bus_num)
877 i2c_scan_static_board_info(adap); 874 i2c_scan_static_board_info(adap);
878 875
879 /* Register devices from the device tree */
880 of_i2c_register_devices(adap);
881
882 /* Notify drivers */ 876 /* Notify drivers */
883 mutex_lock(&core_lock); 877 mutex_lock(&core_lock);
884 bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_new_adapter); 878 bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_new_adapter);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 0906fc5b69b9..c37ef64d1465 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -157,13 +157,13 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
157 { /* MWAIT C5 */ }, 157 { /* MWAIT C5 */ },
158 { /* MWAIT C6 */ 158 { /* MWAIT C6 */
159 .name = "ATM-C6", 159 .name = "ATM-C6",
160 .desc = "MWAIT 0x40", 160 .desc = "MWAIT 0x52",
161 .driver_data = (void *) 0x40, 161 .driver_data = (void *) 0x52,
162 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 162 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
163 .exit_latency = 200, 163 .exit_latency = 140,
164 .power_usage = 150, 164 .power_usage = 150,
165 .target_residency = 800, 165 .target_residency = 560,
166 .enter = NULL }, /* disabled */ 166 .enter = &intel_idle },
167}; 167};
168 168
169/** 169/**
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index c908c5f83645..af9ee313c10b 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -28,7 +28,7 @@ struct evdev {
28 int minor; 28 int minor;
29 struct input_handle handle; 29 struct input_handle handle;
30 wait_queue_head_t wait; 30 wait_queue_head_t wait;
31 struct evdev_client *grab; 31 struct evdev_client __rcu *grab;
32 struct list_head client_list; 32 struct list_head client_list;
33 spinlock_t client_lock; /* protects client_list */ 33 spinlock_t client_lock; /* protects client_list */
34 struct mutex mutex; 34 struct mutex mutex;
@@ -669,6 +669,9 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
669 669
670 if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) { 670 if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
671 671
672 if (!dev->absinfo)
673 return -EINVAL;
674
672 t = _IOC_NR(cmd) & ABS_MAX; 675 t = _IOC_NR(cmd) & ABS_MAX;
673 abs = dev->absinfo[t]; 676 abs = dev->absinfo[t];
674 677
@@ -680,10 +683,13 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
680 } 683 }
681 } 684 }
682 685
683 if (_IOC_DIR(cmd) == _IOC_READ) { 686 if (_IOC_DIR(cmd) == _IOC_WRITE) {
684 687
685 if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) { 688 if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
686 689
690 if (!dev->absinfo)
691 return -EINVAL;
692
687 t = _IOC_NR(cmd) & ABS_MAX; 693 t = _IOC_NR(cmd) & ABS_MAX;
688 694
689 if (copy_from_user(&abs, p, min_t(size_t, 695 if (copy_from_user(&abs, p, min_t(size_t,
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index d85bd8a7967d..22239e988498 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -483,6 +483,9 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
483 483
484 memcpy(joydev->abspam, abspam, len); 484 memcpy(joydev->abspam, abspam, len);
485 485
486 for (i = 0; i < joydev->nabs; i++)
487 joydev->absmap[joydev->abspam[i]] = i;
488
486 out: 489 out:
487 kfree(abspam); 490 kfree(abspam);
488 return retval; 491 return retval;
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index c19066479057..7e2c12a5b839 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -104,7 +104,7 @@ static int hp_sdc_rtc_do_read_bbrtc (struct rtc_time *rtctm)
104 t.endidx = 91; 104 t.endidx = 91;
105 t.seq = tseq; 105 t.seq = tseq;
106 t.act.semaphore = &tsem; 106 t.act.semaphore = &tsem;
107 init_MUTEX_LOCKED(&tsem); 107 sema_init(&tsem, 0);
108 108
109 if (hp_sdc_enqueue_transaction(&t)) return -1; 109 if (hp_sdc_enqueue_transaction(&t)) return -1;
110 110
@@ -698,7 +698,7 @@ static int __init hp_sdc_rtc_init(void)
698 return -ENODEV; 698 return -ENODEV;
699#endif 699#endif
700 700
701 init_MUTEX(&i8042tregs); 701 sema_init(&i8042tregs, 1);
702 702
703 if ((ret = hp_sdc_request_timer_irq(&hp_sdc_rtc_isr))) 703 if ((ret = hp_sdc_request_timer_irq(&hp_sdc_rtc_isr)))
704 return ret; 704 return ret;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 0d4266a533a5..360698553eb5 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -404,6 +404,13 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
404 retval = uinput_validate_absbits(dev); 404 retval = uinput_validate_absbits(dev);
405 if (retval < 0) 405 if (retval < 0)
406 goto exit; 406 goto exit;
407 if (test_bit(ABS_MT_SLOT, dev->absbit)) {
408 int nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
409 input_mt_create_slots(dev, nslot);
410 input_set_events_per_packet(dev, 6 * nslot);
411 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
412 input_set_events_per_packet(dev, 60);
413 }
407 } 414 }
408 415
409 udev->state = UIST_SETUP_COMPLETE; 416 udev->state = UIST_SETUP_COMPLETE;
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index c92f4edfee7b..e5624d8f1709 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -915,15 +915,15 @@ int hil_mlc_register(hil_mlc *mlc)
915 mlc->ostarted = 0; 915 mlc->ostarted = 0;
916 916
917 rwlock_init(&mlc->lock); 917 rwlock_init(&mlc->lock);
918 init_MUTEX(&mlc->osem); 918 sema_init(&mlc->osem, 1);
919 919
920 init_MUTEX(&mlc->isem); 920 sema_init(&mlc->isem, 1);
921 mlc->icount = -1; 921 mlc->icount = -1;
922 mlc->imatch = 0; 922 mlc->imatch = 0;
923 923
924 mlc->opercnt = 0; 924 mlc->opercnt = 0;
925 925
926 init_MUTEX_LOCKED(&(mlc->csem)); 926 sema_init(&(mlc->csem), 0);
927 927
928 hil_mlc_clear_di_scratch(mlc); 928 hil_mlc_clear_di_scratch(mlc);
929 hil_mlc_clear_di_map(mlc, 0); 929 hil_mlc_clear_di_map(mlc, 0);
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index bcc2d30ec245..8c0b51c31424 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -905,7 +905,7 @@ static int __init hp_sdc_init(void)
905 ts_sync[1] = 0x0f; 905 ts_sync[1] = 0x0f;
906 ts_sync[2] = ts_sync[3] = ts_sync[4] = ts_sync[5] = 0; 906 ts_sync[2] = ts_sync[3] = ts_sync[4] = ts_sync[5] = 0;
907 t_sync.act.semaphore = &s_sync; 907 t_sync.act.semaphore = &s_sync;
908 init_MUTEX_LOCKED(&s_sync); 908 sema_init(&s_sync, 0);
909 hp_sdc_enqueue_transaction(&t_sync); 909 hp_sdc_enqueue_transaction(&t_sync);
910 down(&s_sync); /* Wait for t_sync to complete */ 910 down(&s_sync); /* Wait for t_sync to complete */
911 911
@@ -1039,7 +1039,7 @@ static int __init hp_sdc_register(void)
1039 return hp_sdc.dev_err; 1039 return hp_sdc.dev_err;
1040 } 1040 }
1041 1041
1042 init_MUTEX_LOCKED(&tq_init_sem); 1042 sema_init(&tq_init_sem, 0);
1043 1043
1044 tq_init.actidx = 0; 1044 tq_init.actidx = 0;
1045 tq_init.idx = 1; 1045 tq_init.idx = 1;
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 42ba3691d908..b35876ee6908 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -103,27 +103,26 @@ static void wacom_sys_irq(struct urb *urb)
103static int wacom_open(struct input_dev *dev) 103static int wacom_open(struct input_dev *dev)
104{ 104{
105 struct wacom *wacom = input_get_drvdata(dev); 105 struct wacom *wacom = input_get_drvdata(dev);
106 int retval = 0;
106 107
107 mutex_lock(&wacom->lock); 108 if (usb_autopm_get_interface(wacom->intf) < 0)
108
109 wacom->irq->dev = wacom->usbdev;
110
111 if (usb_autopm_get_interface(wacom->intf) < 0) {
112 mutex_unlock(&wacom->lock);
113 return -EIO; 109 return -EIO;
114 } 110
111 mutex_lock(&wacom->lock);
115 112
116 if (usb_submit_urb(wacom->irq, GFP_KERNEL)) { 113 if (usb_submit_urb(wacom->irq, GFP_KERNEL)) {
117 usb_autopm_put_interface(wacom->intf); 114 retval = -EIO;
118 mutex_unlock(&wacom->lock); 115 goto out;
119 return -EIO;
120 } 116 }
121 117
122 wacom->open = true; 118 wacom->open = true;
123 wacom->intf->needs_remote_wakeup = 1; 119 wacom->intf->needs_remote_wakeup = 1;
124 120
121out:
125 mutex_unlock(&wacom->lock); 122 mutex_unlock(&wacom->lock);
126 return 0; 123 if (retval)
124 usb_autopm_put_interface(wacom->intf);
125 return retval;
127} 126}
128 127
129static void wacom_close(struct input_dev *dev) 128static void wacom_close(struct input_dev *dev)
@@ -135,6 +134,8 @@ static void wacom_close(struct input_dev *dev)
135 wacom->open = false; 134 wacom->open = false;
136 wacom->intf->needs_remote_wakeup = 0; 135 wacom->intf->needs_remote_wakeup = 0;
137 mutex_unlock(&wacom->lock); 136 mutex_unlock(&wacom->lock);
137
138 usb_autopm_put_interface(wacom->intf);
138} 139}
139 140
140static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc, 141static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 6e29badb969e..47fd7a041c52 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -442,8 +442,10 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
442 /* general pen packet */ 442 /* general pen packet */
443 if ((data[1] & 0xb8) == 0xa0) { 443 if ((data[1] & 0xb8) == 0xa0) {
444 t = (data[6] << 2) | ((data[7] >> 6) & 3); 444 t = (data[6] << 2) | ((data[7] >> 6) & 3);
445 if (features->type >= INTUOS4S && features->type <= INTUOS4L) 445 if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
446 features->type == WACOM_21UX2) {
446 t = (t << 1) | (data[1] & 1); 447 t = (t << 1) | (data[1] & 1);
448 }
447 input_report_abs(input, ABS_PRESSURE, t); 449 input_report_abs(input, ABS_PRESSURE, t);
448 input_report_abs(input, ABS_TILT_X, 450 input_report_abs(input, ABS_TILT_X,
449 ((data[7] << 1) & 0x7e) | (data[8] >> 7)); 451 ((data[7] << 1) & 0x7e) | (data[8] >> 7));
diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
index 485be8b1e1b3..f0225bc0f267 100644
--- a/drivers/isdn/sc/interrupt.c
+++ b/drivers/isdn/sc/interrupt.c
@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
112 } 112 }
113 else if(callid>=0x0000 && callid<=0x7FFF) 113 else if(callid>=0x0000 && callid<=0x7FFF)
114 { 114 {
115 int len;
116
115 pr_debug("%s: Got Incoming Call\n", 117 pr_debug("%s: Got Incoming Call\n",
116 sc_adapter[card]->devicename); 118 sc_adapter[card]->devicename);
117 strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4])); 119 len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
118 strcpy(setup.eazmsn, 120 sizeof(setup.phone));
119 sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn); 121 if (len >= sizeof(setup.phone))
122 continue;
123 len = strlcpy(setup.eazmsn,
124 sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
125 sizeof(setup.eazmsn));
126 if (len >= sizeof(setup.eazmsn))
127 continue;
120 setup.si1 = 7; 128 setup.si1 = 7;
121 setup.si2 = 0; 129 setup.si2 = 0;
122 setup.plan = 0; 130 setup.plan = 0;
@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
176 * Handle a GetMyNumber Rsp 184 * Handle a GetMyNumber Rsp
177 */ 185 */
178 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){ 186 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
179 strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array); 187 strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
188 rcvmsg.msg_data.byte_array,
189 sizeof(rcvmsg.msg_data.byte_array));
180 continue; 190 continue;
181 } 191 }
182 192
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 1c4ee6e77937..bf64e49d996a 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -83,7 +83,7 @@ static struct adb_driver *adb_controller;
83BLOCKING_NOTIFIER_HEAD(adb_client_list); 83BLOCKING_NOTIFIER_HEAD(adb_client_list);
84static int adb_got_sleep; 84static int adb_got_sleep;
85static int adb_inited; 85static int adb_inited;
86static DECLARE_MUTEX(adb_probe_mutex); 86static DEFINE_SEMAPHORE(adb_probe_mutex);
87static int sleepy_trackpad; 87static int sleepy_trackpad;
88static int autopoll_devs; 88static int autopoll_devs;
89int __adb_probe_sync; 89int __adb_probe_sync;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index ed4900ade93a..e4fb58db5454 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1000,10 +1000,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1000 page = bitmap->sb_page; 1000 page = bitmap->sb_page;
1001 offset = sizeof(bitmap_super_t); 1001 offset = sizeof(bitmap_super_t);
1002 if (!file) 1002 if (!file)
1003 read_sb_page(bitmap->mddev, 1003 page = read_sb_page(
1004 bitmap->mddev->bitmap_info.offset, 1004 bitmap->mddev,
1005 page, 1005 bitmap->mddev->bitmap_info.offset,
1006 index, count); 1006 page,
1007 index, count);
1007 } else if (file) { 1008 } else if (file) {
1008 page = read_page(file, index, bitmap, count); 1009 page = read_page(file, index, bitmap, count);
1009 offset = 0; 1010 offset = 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index ad83a4dcadc3..0b830bbe1d8b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1839,7 +1839,9 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1839 1839
1840 /* take from bio_init */ 1840 /* take from bio_init */
1841 bio->bi_next = NULL; 1841 bio->bi_next = NULL;
1842 bio->bi_flags &= ~(BIO_POOL_MASK-1);
1842 bio->bi_flags |= 1 << BIO_UPTODATE; 1843 bio->bi_flags |= 1 << BIO_UPTODATE;
1844 bio->bi_comp_cpu = -1;
1843 bio->bi_rw = READ; 1845 bio->bi_rw = READ;
1844 bio->bi_vcnt = 0; 1846 bio->bi_vcnt = 0;
1845 bio->bi_idx = 0; 1847 bio->bi_idx = 0;
@@ -1912,7 +1914,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1912 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 1914 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1913 break; 1915 break;
1914 BUG_ON(sync_blocks < (PAGE_SIZE>>9)); 1916 BUG_ON(sync_blocks < (PAGE_SIZE>>9));
1915 if (len > (sync_blocks<<9)) 1917 if ((len >> 9) > sync_blocks)
1916 len = sync_blocks<<9; 1918 len = sync_blocks<<9;
1917 } 1919 }
1918 1920
diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index 7e82a9df726b..7961d59f5cac 100644
--- a/drivers/media/IR/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -319,7 +319,7 @@ static void ir_timer_keyup(unsigned long cookie)
319 * a keyup event might follow immediately after the keydown. 319 * a keyup event might follow immediately after the keydown.
320 */ 320 */
321 spin_lock_irqsave(&ir->keylock, flags); 321 spin_lock_irqsave(&ir->keylock, flags);
322 if (time_is_after_eq_jiffies(ir->keyup_jiffies)) 322 if (time_is_before_eq_jiffies(ir->keyup_jiffies))
323 ir_keyup(ir); 323 ir_keyup(ir);
324 spin_unlock_irqrestore(&ir->keylock, flags); 324 spin_unlock_irqrestore(&ir->keylock, flags);
325} 325}
@@ -510,6 +510,13 @@ int __ir_input_register(struct input_dev *input_dev,
510 (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_IR_RAW) ? 510 (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_IR_RAW) ?
511 " in raw mode" : ""); 511 " in raw mode" : "");
512 512
513 /*
514 * Default delay of 250ms is too short for some protocols, expecially
515 * since the timeout is currently set to 250ms. Increase it to 500ms,
516 * to avoid wrong repetition of the keycodes.
517 */
518 input_dev->rep[REP_DELAY] = 500;
519
513 return 0; 520 return 0;
514 521
515out_event: 522out_event:
diff --git a/drivers/media/IR/ir-lirc-codec.c b/drivers/media/IR/ir-lirc-codec.c
index 77b5946413c0..e63f757d5d72 100644
--- a/drivers/media/IR/ir-lirc-codec.c
+++ b/drivers/media/IR/ir-lirc-codec.c
@@ -267,7 +267,7 @@ static int ir_lirc_register(struct input_dev *input_dev)
267 features |= LIRC_CAN_SET_SEND_CARRIER; 267 features |= LIRC_CAN_SET_SEND_CARRIER;
268 268
269 if (ir_dev->props->s_tx_duty_cycle) 269 if (ir_dev->props->s_tx_duty_cycle)
270 features |= LIRC_CAN_SET_REC_DUTY_CYCLE; 270 features |= LIRC_CAN_SET_SEND_DUTY_CYCLE;
271 } 271 }
272 272
273 if (ir_dev->props->s_rx_carrier_range) 273 if (ir_dev->props->s_rx_carrier_range)
diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c
index 43094e7eccfa..8e0e1b1f8c87 100644
--- a/drivers/media/IR/ir-raw-event.c
+++ b/drivers/media/IR/ir-raw-event.c
@@ -279,9 +279,11 @@ int ir_raw_event_register(struct input_dev *input_dev)
279 "rc%u", (unsigned int)ir->devno); 279 "rc%u", (unsigned int)ir->devno);
280 280
281 if (IS_ERR(ir->raw->thread)) { 281 if (IS_ERR(ir->raw->thread)) {
282 int ret = PTR_ERR(ir->raw->thread);
283
282 kfree(ir->raw); 284 kfree(ir->raw);
283 ir->raw = NULL; 285 ir->raw = NULL;
284 return PTR_ERR(ir->raw->thread); 286 return ret;
285 } 287 }
286 288
287 mutex_lock(&ir_raw_handler_lock); 289 mutex_lock(&ir_raw_handler_lock);
diff --git a/drivers/media/IR/ir-sysfs.c b/drivers/media/IR/ir-sysfs.c
index 96dafc425c8e..46d42467f9b4 100644
--- a/drivers/media/IR/ir-sysfs.c
+++ b/drivers/media/IR/ir-sysfs.c
@@ -67,13 +67,14 @@ static ssize_t show_protocols(struct device *d,
67 char *tmp = buf; 67 char *tmp = buf;
68 int i; 68 int i;
69 69
70 if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) { 70 if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) {
71 enabled = ir_dev->rc_tab.ir_type; 71 enabled = ir_dev->rc_tab.ir_type;
72 allowed = ir_dev->props->allowed_protos; 72 allowed = ir_dev->props->allowed_protos;
73 } else { 73 } else if (ir_dev->raw) {
74 enabled = ir_dev->raw->enabled_protocols; 74 enabled = ir_dev->raw->enabled_protocols;
75 allowed = ir_raw_get_allowed_protocols(); 75 allowed = ir_raw_get_allowed_protocols();
76 } 76 } else
77 return sprintf(tmp, "[builtin]\n");
77 78
78 IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n", 79 IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n",
79 (long long)allowed, 80 (long long)allowed,
@@ -121,10 +122,14 @@ static ssize_t store_protocols(struct device *d,
121 int rc, i, count = 0; 122 int rc, i, count = 0;
122 unsigned long flags; 123 unsigned long flags;
123 124
124 if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) 125 if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE)
125 type = ir_dev->rc_tab.ir_type; 126 type = ir_dev->rc_tab.ir_type;
126 else 127 else if (ir_dev->raw)
127 type = ir_dev->raw->enabled_protocols; 128 type = ir_dev->raw->enabled_protocols;
129 else {
130 IR_dprintk(1, "Protocol switching not supported\n");
131 return -EINVAL;
132 }
128 133
129 while ((tmp = strsep((char **) &data, " \n")) != NULL) { 134 while ((tmp = strsep((char **) &data, " \n")) != NULL) {
130 if (!*tmp) 135 if (!*tmp)
@@ -185,7 +190,7 @@ static ssize_t store_protocols(struct device *d,
185 } 190 }
186 } 191 }
187 192
188 if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) { 193 if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) {
189 spin_lock_irqsave(&ir_dev->rc_tab.lock, flags); 194 spin_lock_irqsave(&ir_dev->rc_tab.lock, flags);
190 ir_dev->rc_tab.ir_type = type; 195 ir_dev->rc_tab.ir_type = type;
191 spin_unlock_irqrestore(&ir_dev->rc_tab.lock, flags); 196 spin_unlock_irqrestore(&ir_dev->rc_tab.lock, flags);
diff --git a/drivers/media/IR/keymaps/rc-rc6-mce.c b/drivers/media/IR/keymaps/rc-rc6-mce.c
index 64264f7f838f..39557ad401b6 100644
--- a/drivers/media/IR/keymaps/rc-rc6-mce.c
+++ b/drivers/media/IR/keymaps/rc-rc6-mce.c
@@ -19,6 +19,7 @@ static struct ir_scancode rc6_mce[] = {
19 19
20 { 0x800f0416, KEY_PLAY }, 20 { 0x800f0416, KEY_PLAY },
21 { 0x800f0418, KEY_PAUSE }, 21 { 0x800f0418, KEY_PAUSE },
22 { 0x800f046e, KEY_PLAYPAUSE },
22 { 0x800f0419, KEY_STOP }, 23 { 0x800f0419, KEY_STOP },
23 { 0x800f0417, KEY_RECORD }, 24 { 0x800f0417, KEY_RECORD },
24 25
@@ -37,6 +38,8 @@ static struct ir_scancode rc6_mce[] = {
37 { 0x800f0411, KEY_VOLUMEDOWN }, 38 { 0x800f0411, KEY_VOLUMEDOWN },
38 { 0x800f0412, KEY_CHANNELUP }, 39 { 0x800f0412, KEY_CHANNELUP },
39 { 0x800f0413, KEY_CHANNELDOWN }, 40 { 0x800f0413, KEY_CHANNELDOWN },
41 { 0x800f043a, KEY_BRIGHTNESSUP },
42 { 0x800f0480, KEY_BRIGHTNESSDOWN },
40 43
41 { 0x800f0401, KEY_NUMERIC_1 }, 44 { 0x800f0401, KEY_NUMERIC_1 },
42 { 0x800f0402, KEY_NUMERIC_2 }, 45 { 0x800f0402, KEY_NUMERIC_2 },
diff --git a/drivers/media/IR/mceusb.c b/drivers/media/IR/mceusb.c
index ac6bb2c01a48..bc620e10ef77 100644
--- a/drivers/media/IR/mceusb.c
+++ b/drivers/media/IR/mceusb.c
@@ -120,6 +120,10 @@ static struct usb_device_id mceusb_dev_table[] = {
120 { USB_DEVICE(VENDOR_PHILIPS, 0x0613) }, 120 { USB_DEVICE(VENDOR_PHILIPS, 0x0613) },
121 /* Philips eHome Infrared Transceiver */ 121 /* Philips eHome Infrared Transceiver */
122 { USB_DEVICE(VENDOR_PHILIPS, 0x0815) }, 122 { USB_DEVICE(VENDOR_PHILIPS, 0x0815) },
123 /* Philips/Spinel plus IR transceiver for ASUS */
124 { USB_DEVICE(VENDOR_PHILIPS, 0x206c) },
125 /* Philips/Spinel plus IR transceiver for ASUS */
126 { USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
123 /* Realtek MCE IR Receiver */ 127 /* Realtek MCE IR Receiver */
124 { USB_DEVICE(VENDOR_REALTEK, 0x0161) }, 128 { USB_DEVICE(VENDOR_REALTEK, 0x0161) },
125 /* SMK/Toshiba G83C0004D410 */ 129 /* SMK/Toshiba G83C0004D410 */
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index fe818348b8a3..48397f103d32 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -673,9 +673,6 @@ static int dib0700_probe(struct usb_interface *intf,
673 else 673 else
674 dev->props.rc.core.bulk_mode = false; 674 dev->props.rc.core.bulk_mode = false;
675 675
676 /* Need a higher delay, to avoid wrong repeat */
677 dev->rc_input_dev->rep[REP_DELAY] = 500;
678
679 dib0700_rc_setup(dev); 676 dib0700_rc_setup(dev);
680 677
681 return 0; 678 return 0;
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index f634d2e784b2..e06acd1fecb6 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -940,6 +940,58 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap)
940 return adap->fe == NULL ? -ENODEV : 0; 940 return adap->fe == NULL ? -ENODEV : 0;
941} 941}
942 942
943/* STK7770P */
944static struct dib7000p_config dib7770p_dib7000p_config = {
945 .output_mpeg2_in_188_bytes = 1,
946
947 .agc_config_count = 1,
948 .agc = &dib7070_agc_config,
949 .bw = &dib7070_bw_config_12_mhz,
950 .tuner_is_baseband = 1,
951 .spur_protect = 1,
952
953 .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS,
954 .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES,
955 .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS,
956
957 .hostbus_diversity = 1,
958 .enable_current_mirror = 1,
959 .disable_sample_and_hold = 0,
960};
961
962static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap)
963{
964 struct usb_device_descriptor *p = &adap->dev->udev->descriptor;
965 if (p->idVendor == cpu_to_le16(USB_VID_PINNACLE) &&
966 p->idProduct == cpu_to_le16(USB_PID_PINNACLE_PCTV72E))
967 dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0);
968 else
969 dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
970 msleep(10);
971 dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
972 dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
973 dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
974 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
975
976 dib0700_ctrl_clock(adap->dev, 72, 1);
977
978 msleep(10);
979 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
980 msleep(10);
981 dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
982
983 if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 18,
984 &dib7770p_dib7000p_config) != 0) {
985 err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n",
986 __func__);
987 return -ENODEV;
988 }
989
990 adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
991 &dib7770p_dib7000p_config);
992 return adap->fe == NULL ? -ENODEV : 0;
993}
994
943/* DIB807x generic */ 995/* DIB807x generic */
944static struct dibx000_agc_config dib807x_agc_config[2] = { 996static struct dibx000_agc_config dib807x_agc_config[2] = {
945 { 997 {
@@ -1781,7 +1833,7 @@ struct usb_device_id dib0700_usb_id_table[] = {
1781/* 60 */{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS_2) }, 1833/* 60 */{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS_2) },
1782 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XPVR) }, 1834 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XPVR) },
1783 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XP) }, 1835 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XP) },
1784 { USB_DEVICE(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD) }, 1836 { USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x000, 0x3f00) },
1785 { USB_DEVICE(USB_VID_EVOLUTEPC, USB_PID_TVWAY_PLUS) }, 1837 { USB_DEVICE(USB_VID_EVOLUTEPC, USB_PID_TVWAY_PLUS) },
1786/* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) }, 1838/* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) },
1787 { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) }, 1839 { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) },
@@ -2406,7 +2458,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
2406 .pid_filter_count = 32, 2458 .pid_filter_count = 32,
2407 .pid_filter = stk70x0p_pid_filter, 2459 .pid_filter = stk70x0p_pid_filter,
2408 .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, 2460 .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
2409 .frontend_attach = stk7070p_frontend_attach, 2461 .frontend_attach = stk7770p_frontend_attach,
2410 .tuner_attach = dib7770p_tuner_attach, 2462 .tuner_attach = dib7770p_tuner_attach,
2411 2463
2412 DIB0700_DEFAULT_STREAMING_CONFIG(0x02), 2464 DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
diff --git a/drivers/media/dvb/dvb-usb/opera1.c b/drivers/media/dvb/dvb-usb/opera1.c
index 6b22ec64ab0c..f896337b4535 100644
--- a/drivers/media/dvb/dvb-usb/opera1.c
+++ b/drivers/media/dvb/dvb-usb/opera1.c
@@ -483,9 +483,7 @@ static int opera1_xilinx_load_firmware(struct usb_device *dev,
483 } 483 }
484 } 484 }
485 kfree(p); 485 kfree(p);
486 if (fw) { 486 release_firmware(fw);
487 release_firmware(fw);
488 }
489 return ret; 487 return ret;
490} 488}
491 489
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 2e28b973dfd3..3aed0d433921 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -260,6 +260,9 @@ static void dib7000p_set_adc_state(struct dib7000p_state *state, enum dibx000_ad
260 260
261// dprintk( "908: %x, 909: %x\n", reg_908, reg_909); 261// dprintk( "908: %x, 909: %x\n", reg_908, reg_909);
262 262
263 reg_909 |= (state->cfg.disable_sample_and_hold & 1) << 4;
264 reg_908 |= (state->cfg.enable_current_mirror & 1) << 7;
265
263 dib7000p_write_word(state, 908, reg_908); 266 dib7000p_write_word(state, 908, reg_908);
264 dib7000p_write_word(state, 909, reg_909); 267 dib7000p_write_word(state, 909, reg_909);
265} 268}
@@ -778,7 +781,10 @@ static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_fronte
778 default: 781 default:
779 case GUARD_INTERVAL_1_32: value *= 1; break; 782 case GUARD_INTERVAL_1_32: value *= 1; break;
780 } 783 }
781 state->div_sync_wait = (value * 3) / 2 + 32; // add 50% SFN margin + compensate for one DVSY-fifo TODO 784 if (state->cfg.diversity_delay == 0)
785 state->div_sync_wait = (value * 3) / 2 + 48; // add 50% SFN margin + compensate for one DVSY-fifo
786 else
787 state->div_sync_wait = (value * 3) / 2 + state->cfg.diversity_delay; // add 50% SFN margin + compensate for one DVSY-fifo
782 788
783 /* deactive the possibility of diversity reception if extended interleaver */ 789 /* deactive the possibility of diversity reception if extended interleaver */
784 state->div_force_off = !1 && ch->u.ofdm.transmission_mode != TRANSMISSION_MODE_8K; 790 state->div_force_off = !1 && ch->u.ofdm.transmission_mode != TRANSMISSION_MODE_8K;
diff --git a/drivers/media/dvb/frontends/dib7000p.h b/drivers/media/dvb/frontends/dib7000p.h
index 805dd13a97ee..da17345bf5bd 100644
--- a/drivers/media/dvb/frontends/dib7000p.h
+++ b/drivers/media/dvb/frontends/dib7000p.h
@@ -33,6 +33,11 @@ struct dib7000p_config {
33 int (*agc_control) (struct dvb_frontend *, u8 before); 33 int (*agc_control) (struct dvb_frontend *, u8 before);
34 34
35 u8 output_mode; 35 u8 output_mode;
36 u8 disable_sample_and_hold : 1;
37
38 u8 enable_current_mirror : 1;
39 u8 diversity_delay;
40
36}; 41};
37 42
38#define DEFAULT_DIB7000P_I2C_ADDRESS 18 43#define DEFAULT_DIB7000P_I2C_ADDRESS 18
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index d93468cd3a85..ff3b0fa901b3 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -1098,33 +1098,26 @@ EXPORT_SYMBOL_GPL(smscore_onresponse);
1098 * 1098 *
1099 * @return pointer to descriptor on success, NULL on error. 1099 * @return pointer to descriptor on success, NULL on error.
1100 */ 1100 */
1101struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev) 1101
1102struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev)
1102{ 1103{
1103 struct smscore_buffer_t *cb = NULL; 1104 struct smscore_buffer_t *cb = NULL;
1104 unsigned long flags; 1105 unsigned long flags;
1105 1106
1106 DEFINE_WAIT(wait);
1107
1108 spin_lock_irqsave(&coredev->bufferslock, flags); 1107 spin_lock_irqsave(&coredev->bufferslock, flags);
1109 1108 if (!list_empty(&coredev->buffers)) {
1110 /* This function must return a valid buffer, since the buffer list is 1109 cb = (struct smscore_buffer_t *) coredev->buffers.next;
1111 * finite, we check that there is an available buffer, if not, we wait 1110 list_del(&cb->entry);
1112 * until such buffer become available.
1113 */
1114
1115 prepare_to_wait(&coredev->buffer_mng_waitq, &wait, TASK_INTERRUPTIBLE);
1116 if (list_empty(&coredev->buffers)) {
1117 spin_unlock_irqrestore(&coredev->bufferslock, flags);
1118 schedule();
1119 spin_lock_irqsave(&coredev->bufferslock, flags);
1120 } 1111 }
1112 spin_unlock_irqrestore(&coredev->bufferslock, flags);
1113 return cb;
1114}
1121 1115
1122 finish_wait(&coredev->buffer_mng_waitq, &wait); 1116struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev)
1123 1117{
1124 cb = (struct smscore_buffer_t *) coredev->buffers.next; 1118 struct smscore_buffer_t *cb = NULL;
1125 list_del(&cb->entry);
1126 1119
1127 spin_unlock_irqrestore(&coredev->bufferslock, flags); 1120 wait_event(coredev->buffer_mng_waitq, (cb = get_entry(coredev)));
1128 1121
1129 return cb; 1122 return cb;
1130} 1123}
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 67a4ec8768a6..4ce541a5eb47 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -395,7 +395,7 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
395 radio->registers[POWERCFG] = POWERCFG_ENABLE; 395 radio->registers[POWERCFG] = POWERCFG_ENABLE;
396 if (si470x_set_register(radio, POWERCFG) < 0) { 396 if (si470x_set_register(radio, POWERCFG) < 0) {
397 retval = -EIO; 397 retval = -EIO;
398 goto err_all; 398 goto err_video;
399 } 399 }
400 msleep(110); 400 msleep(110);
401 401
diff --git a/drivers/media/video/cx231xx/Makefile b/drivers/media/video/cx231xx/Makefile
index 755dd0ce65ff..6f2b57384488 100644
--- a/drivers/media/video/cx231xx/Makefile
+++ b/drivers/media/video/cx231xx/Makefile
@@ -11,4 +11,5 @@ EXTRA_CFLAGS += -Idrivers/media/video
11EXTRA_CFLAGS += -Idrivers/media/common/tuners 11EXTRA_CFLAGS += -Idrivers/media/common/tuners
12EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core 12EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
13EXTRA_CFLAGS += -Idrivers/media/dvb/frontends 13EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
14EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-usb
14 15
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 6bdc0ef18119..f2a4900014bc 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -32,6 +32,7 @@
32#include <media/v4l2-chip-ident.h> 32#include <media/v4l2-chip-ident.h>
33 33
34#include <media/cx25840.h> 34#include <media/cx25840.h>
35#include "dvb-usb-ids.h"
35#include "xc5000.h" 36#include "xc5000.h"
36 37
37#include "cx231xx.h" 38#include "cx231xx.h"
@@ -175,6 +176,8 @@ struct usb_device_id cx231xx_id_table[] = {
175 .driver_info = CX231XX_BOARD_CNXT_RDE_250}, 176 .driver_info = CX231XX_BOARD_CNXT_RDE_250},
176 {USB_DEVICE(0x0572, 0x58A1), 177 {USB_DEVICE(0x0572, 0x58A1),
177 .driver_info = CX231XX_BOARD_CNXT_RDU_250}, 178 .driver_info = CX231XX_BOARD_CNXT_RDU_250},
179 {USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x4000,0x4fff),
180 .driver_info = CX231XX_BOARD_UNKNOWN},
178 {}, 181 {},
179}; 182};
180 183
@@ -226,14 +229,16 @@ void cx231xx_pre_card_setup(struct cx231xx *dev)
226 dev->board.name, dev->model); 229 dev->board.name, dev->model);
227 230
228 /* set the direction for GPIO pins */ 231 /* set the direction for GPIO pins */
229 cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1); 232 if (dev->board.tuner_gpio) {
230 cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1); 233 cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
231 cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1); 234 cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
235 cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
232 236
233 /* request some modules if any required */ 237 /* request some modules if any required */
234 238
235 /* reset the Tuner */ 239 /* reset the Tuner */
236 cx231xx_gpio_set(dev, dev->board.tuner_gpio); 240 cx231xx_gpio_set(dev, dev->board.tuner_gpio);
241 }
237 242
238 /* set the mode to Analog mode initially */ 243 /* set the mode to Analog mode initially */
239 cx231xx_set_mode(dev, CX231XX_ANALOG_MODE); 244 cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index 86ca8c2359dd..f5a3e74c3c7c 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -1996,7 +1996,7 @@ static int cx25840_probe(struct i2c_client *client,
1996 1996
1997 state->volume = v4l2_ctrl_new_std(&state->hdl, 1997 state->volume = v4l2_ctrl_new_std(&state->hdl,
1998 &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 1998 &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME,
1999 0, 65335, 65535 / 100, default_volume); 1999 0, 65535, 65535 / 100, default_volume);
2000 state->mute = v4l2_ctrl_new_std(&state->hdl, 2000 state->mute = v4l2_ctrl_new_std(&state->hdl,
2001 &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_MUTE, 2001 &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_MUTE,
2002 0, 1, 1, 0); 2002 0, 1, 1, 0);
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index 99dbae117591..0fa85cbefbb1 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -17,7 +17,7 @@ config VIDEO_CX88
17 17
18config VIDEO_CX88_ALSA 18config VIDEO_CX88_ALSA
19 tristate "Conexant 2388x DMA audio support" 19 tristate "Conexant 2388x DMA audio support"
20 depends on VIDEO_CX88 && SND && EXPERIMENTAL 20 depends on VIDEO_CX88 && SND
21 select SND_PCM 21 select SND_PCM
22 ---help--- 22 ---help---
23 This is a video4linux driver for direct (DMA) audio on 23 This is a video4linux driver for direct (DMA) audio on
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index b9846106913e..78abc1c1f9d5 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -223,6 +223,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
223 usb_rcvintpipe(dev, ep->bEndpointAddress), 223 usb_rcvintpipe(dev, ep->bEndpointAddress),
224 buffer, buffer_len, 224 buffer, buffer_len,
225 int_irq, (void *)gspca_dev, interval); 225 int_irq, (void *)gspca_dev, interval);
226 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
226 gspca_dev->int_urb = urb; 227 gspca_dev->int_urb = urb;
227 ret = usb_submit_urb(urb, GFP_KERNEL); 228 ret = usb_submit_urb(urb, GFP_KERNEL);
228 if (ret < 0) { 229 if (ret < 0) {
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 83a718f0f3f9..9052d5702556 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -2357,8 +2357,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
2357 (data[33] << 10); 2357 (data[33] << 10);
2358 avg_lum >>= 9; 2358 avg_lum >>= 9;
2359 atomic_set(&sd->avg_lum, avg_lum); 2359 atomic_set(&sd->avg_lum, avg_lum);
2360 gspca_frame_add(gspca_dev, LAST_PACKET, 2360 gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
2361 data, len);
2362 return; 2361 return;
2363 } 2362 }
2364 if (gspca_dev->last_packet_type == LAST_PACKET) { 2363 if (gspca_dev->last_packet_type == LAST_PACKET) {
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index be03a712731c..f0316d02f09f 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -466,6 +466,8 @@ static int ivtvfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar
466 struct fb_vblank vblank; 466 struct fb_vblank vblank;
467 u32 trace; 467 u32 trace;
468 468
469 memset(&vblank, 0, sizeof(struct fb_vblank));
470
469 vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT | 471 vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
470 FB_VBLANK_HAVE_VSYNC; 472 FB_VBLANK_HAVE_VSYNC;
471 trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16; 473 trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16;
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index 4525335f9bd4..a7210d981388 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -239,7 +239,7 @@ static int device_process(struct m2mtest_ctx *ctx,
239 return -EFAULT; 239 return -EFAULT;
240 } 240 }
241 241
242 if (in_buf->vb.size < out_buf->vb.size) { 242 if (in_buf->vb.size > out_buf->vb.size) {
243 v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n"); 243 v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
244 return -EINVAL; 244 return -EINVAL;
245 } 245 }
@@ -1014,6 +1014,7 @@ static int m2mtest_remove(struct platform_device *pdev)
1014 v4l2_m2m_release(dev->m2m_dev); 1014 v4l2_m2m_release(dev->m2m_dev);
1015 del_timer_sync(&dev->timer); 1015 del_timer_sync(&dev->timer);
1016 video_unregister_device(dev->vfd); 1016 video_unregister_device(dev->vfd);
1017 video_device_release(dev->vfd);
1017 v4l2_device_unregister(&dev->v4l2_dev); 1018 v4l2_device_unregister(&dev->v4l2_dev);
1018 kfree(dev); 1019 kfree(dev);
1019 1020
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index 758a4db27d65..c71af4e0e517 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -447,6 +447,9 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
447 dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n", 447 dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n",
448 __func__, rect.left, rect.top, rect.width, rect.height); 448 __func__, rect.left, rect.top, rect.width, rect.height);
449 449
450 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
451 return -EINVAL;
452
450 ret = mt9m111_make_rect(client, &rect); 453 ret = mt9m111_make_rect(client, &rect);
451 if (!ret) 454 if (!ret)
452 mt9m111->rect = rect; 455 mt9m111->rect = rect;
@@ -466,12 +469,14 @@ static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
466 469
467static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) 470static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
468{ 471{
472 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
473 return -EINVAL;
474
469 a->bounds.left = MT9M111_MIN_DARK_COLS; 475 a->bounds.left = MT9M111_MIN_DARK_COLS;
470 a->bounds.top = MT9M111_MIN_DARK_ROWS; 476 a->bounds.top = MT9M111_MIN_DARK_ROWS;
471 a->bounds.width = MT9M111_MAX_WIDTH; 477 a->bounds.width = MT9M111_MAX_WIDTH;
472 a->bounds.height = MT9M111_MAX_HEIGHT; 478 a->bounds.height = MT9M111_MAX_HEIGHT;
473 a->defrect = a->bounds; 479 a->defrect = a->bounds;
474 a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
475 a->pixelaspect.numerator = 1; 480 a->pixelaspect.numerator = 1;
476 a->pixelaspect.denominator = 1; 481 a->pixelaspect.denominator = 1;
477 482
@@ -487,6 +492,7 @@ static int mt9m111_g_fmt(struct v4l2_subdev *sd,
487 mf->width = mt9m111->rect.width; 492 mf->width = mt9m111->rect.width;
488 mf->height = mt9m111->rect.height; 493 mf->height = mt9m111->rect.height;
489 mf->code = mt9m111->fmt->code; 494 mf->code = mt9m111->fmt->code;
495 mf->colorspace = mt9m111->fmt->colorspace;
490 mf->field = V4L2_FIELD_NONE; 496 mf->field = V4L2_FIELD_NONE;
491 497
492 return 0; 498 return 0;
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index e7cd23cd6394..b48473c7896b 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -402,9 +402,6 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd,
402 if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATC) 402 if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATC)
403 return -EINVAL; 403 return -EINVAL;
404 break; 404 break;
405 case 0:
406 /* No format change, only geometry */
407 break;
408 default: 405 default:
409 return -EINVAL; 406 return -EINVAL;
410 } 407 }
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index 66ff174151b5..b6ea67221d1d 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -378,6 +378,9 @@ static void mx25_camera_frame_done(struct mx2_camera_dev *pcdev, int fb,
378 378
379 spin_lock_irqsave(&pcdev->lock, flags); 379 spin_lock_irqsave(&pcdev->lock, flags);
380 380
381 if (*fb_active == NULL)
382 goto out;
383
381 vb = &(*fb_active)->vb; 384 vb = &(*fb_active)->vb;
382 dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 385 dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
383 vb, vb->baddr, vb->bsize); 386 vb, vb->baddr, vb->bsize);
@@ -402,6 +405,7 @@ static void mx25_camera_frame_done(struct mx2_camera_dev *pcdev, int fb,
402 405
403 *fb_active = buf; 406 *fb_active = buf;
404 407
408out:
405 spin_unlock_irqrestore(&pcdev->lock, flags); 409 spin_unlock_irqrestore(&pcdev->lock, flags);
406} 410}
407 411
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ctrl.c b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c
index 1b992b847198..55ea914c7fcd 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-ctrl.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c
@@ -513,7 +513,7 @@ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr,
513 if (ret >= 0) { 513 if (ret >= 0) {
514 ret = pvr2_ctrl_range_check(cptr,*valptr); 514 ret = pvr2_ctrl_range_check(cptr,*valptr);
515 } 515 }
516 if (maskptr) *maskptr = ~0; 516 *maskptr = ~0;
517 } else if (cptr->info->type == pvr2_ctl_bool) { 517 } else if (cptr->info->type == pvr2_ctl_bool) {
518 ret = parse_token(ptr,len,valptr,boolNames, 518 ret = parse_token(ptr,len,valptr,boolNames,
519 ARRAY_SIZE(boolNames)); 519 ARRAY_SIZE(boolNames));
@@ -522,7 +522,7 @@ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr,
522 } else if (ret == 0) { 522 } else if (ret == 0) {
523 *valptr = (*valptr & 1) ? !0 : 0; 523 *valptr = (*valptr & 1) ? !0 : 0;
524 } 524 }
525 if (maskptr) *maskptr = 1; 525 *maskptr = 1;
526 } else if (cptr->info->type == pvr2_ctl_enum) { 526 } else if (cptr->info->type == pvr2_ctl_enum) {
527 ret = parse_token( 527 ret = parse_token(
528 ptr,len,valptr, 528 ptr,len,valptr,
@@ -531,7 +531,7 @@ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr,
531 if (ret >= 0) { 531 if (ret >= 0) {
532 ret = pvr2_ctrl_range_check(cptr,*valptr); 532 ret = pvr2_ctrl_range_check(cptr,*valptr);
533 } 533 }
534 if (maskptr) *maskptr = ~0; 534 *maskptr = ~0;
535 } else if (cptr->info->type == pvr2_ctl_bitmask) { 535 } else if (cptr->info->type == pvr2_ctl_bitmask) {
536 ret = parse_tlist( 536 ret = parse_tlist(
537 ptr,len,maskptr,valptr, 537 ptr,len,maskptr,valptr,
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index b151c7be8a50..6961c55baf9b 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -393,6 +393,37 @@ static void fimc_set_yuv_order(struct fimc_ctx *ctx)
393 dbg("ctx->out_order_1p= %d", ctx->out_order_1p); 393 dbg("ctx->out_order_1p= %d", ctx->out_order_1p);
394} 394}
395 395
396static void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
397{
398 struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
399
400 f->dma_offset.y_h = f->offs_h;
401 if (!variant->pix_hoff)
402 f->dma_offset.y_h *= (f->fmt->depth >> 3);
403
404 f->dma_offset.y_v = f->offs_v;
405
406 f->dma_offset.cb_h = f->offs_h;
407 f->dma_offset.cb_v = f->offs_v;
408
409 f->dma_offset.cr_h = f->offs_h;
410 f->dma_offset.cr_v = f->offs_v;
411
412 if (!variant->pix_hoff) {
413 if (f->fmt->planes_cnt == 3) {
414 f->dma_offset.cb_h >>= 1;
415 f->dma_offset.cr_h >>= 1;
416 }
417 if (f->fmt->color == S5P_FIMC_YCBCR420) {
418 f->dma_offset.cb_v >>= 1;
419 f->dma_offset.cr_v >>= 1;
420 }
421 }
422
423 dbg("in_offset: color= %d, y_h= %d, y_v= %d",
424 f->fmt->color, f->dma_offset.y_h, f->dma_offset.y_v);
425}
426
396/** 427/**
397 * fimc_prepare_config - check dimensions, operation and color mode 428 * fimc_prepare_config - check dimensions, operation and color mode
398 * and pre-calculate offset and the scaling coefficients. 429 * and pre-calculate offset and the scaling coefficients.
@@ -406,7 +437,6 @@ static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
406{ 437{
407 struct fimc_frame *s_frame, *d_frame; 438 struct fimc_frame *s_frame, *d_frame;
408 struct fimc_vid_buffer *buf = NULL; 439 struct fimc_vid_buffer *buf = NULL;
409 struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
410 int ret = 0; 440 int ret = 0;
411 441
412 s_frame = &ctx->s_frame; 442 s_frame = &ctx->s_frame;
@@ -419,61 +449,16 @@ static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
419 swap(d_frame->width, d_frame->height); 449 swap(d_frame->width, d_frame->height);
420 } 450 }
421 451
422 /* Prepare the output offset ratios for scaler. */ 452 /* Prepare the DMA offset ratios for scaler. */
423 d_frame->dma_offset.y_h = d_frame->offs_h; 453 fimc_prepare_dma_offset(ctx, &ctx->s_frame);
424 if (!variant->pix_hoff) 454 fimc_prepare_dma_offset(ctx, &ctx->d_frame);
425 d_frame->dma_offset.y_h *= (d_frame->fmt->depth >> 3);
426
427 d_frame->dma_offset.y_v = d_frame->offs_v;
428
429 d_frame->dma_offset.cb_h = d_frame->offs_h;
430 d_frame->dma_offset.cb_v = d_frame->offs_v;
431
432 d_frame->dma_offset.cr_h = d_frame->offs_h;
433 d_frame->dma_offset.cr_v = d_frame->offs_v;
434 455
435 if (!variant->pix_hoff && d_frame->fmt->planes_cnt == 3) {
436 d_frame->dma_offset.cb_h >>= 1;
437 d_frame->dma_offset.cb_v >>= 1;
438 d_frame->dma_offset.cr_h >>= 1;
439 d_frame->dma_offset.cr_v >>= 1;
440 }
441
442 dbg("out offset: color= %d, y_h= %d, y_v= %d",
443 d_frame->fmt->color,
444 d_frame->dma_offset.y_h, d_frame->dma_offset.y_v);
445
446 /* Prepare the input offset ratios for scaler. */
447 s_frame->dma_offset.y_h = s_frame->offs_h;
448 if (!variant->pix_hoff)
449 s_frame->dma_offset.y_h *= (s_frame->fmt->depth >> 3);
450 s_frame->dma_offset.y_v = s_frame->offs_v;
451
452 s_frame->dma_offset.cb_h = s_frame->offs_h;
453 s_frame->dma_offset.cb_v = s_frame->offs_v;
454
455 s_frame->dma_offset.cr_h = s_frame->offs_h;
456 s_frame->dma_offset.cr_v = s_frame->offs_v;
457
458 if (!variant->pix_hoff && s_frame->fmt->planes_cnt == 3) {
459 s_frame->dma_offset.cb_h >>= 1;
460 s_frame->dma_offset.cb_v >>= 1;
461 s_frame->dma_offset.cr_h >>= 1;
462 s_frame->dma_offset.cr_v >>= 1;
463 }
464
465 dbg("in offset: color= %d, y_h= %d, y_v= %d",
466 s_frame->fmt->color, s_frame->dma_offset.y_h,
467 s_frame->dma_offset.y_v);
468
469 fimc_set_yuv_order(ctx);
470
471 /* Check against the scaler ratio. */
472 if (s_frame->height > (SCALER_MAX_VRATIO * d_frame->height) || 456 if (s_frame->height > (SCALER_MAX_VRATIO * d_frame->height) ||
473 s_frame->width > (SCALER_MAX_HRATIO * d_frame->width)) { 457 s_frame->width > (SCALER_MAX_HRATIO * d_frame->width)) {
474 err("out of scaler range"); 458 err("out of scaler range");
475 return -EINVAL; 459 return -EINVAL;
476 } 460 }
461 fimc_set_yuv_order(ctx);
477 } 462 }
478 463
479 /* Input DMA mode is not allowed when the scaler is disabled. */ 464 /* Input DMA mode is not allowed when the scaler is disabled. */
@@ -822,7 +807,8 @@ static int fimc_m2m_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
822 } else { 807 } else {
823 v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev, 808 v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
824 "Wrong buffer/video queue type (%d)\n", f->type); 809 "Wrong buffer/video queue type (%d)\n", f->type);
825 return -EINVAL; 810 ret = -EINVAL;
811 goto s_fmt_out;
826 } 812 }
827 813
828 pix = &f->fmt.pix; 814 pix = &f->fmt.pix;
@@ -1414,8 +1400,10 @@ static int fimc_probe(struct platform_device *pdev)
1414 } 1400 }
1415 1401
1416 fimc->work_queue = create_workqueue(dev_name(&fimc->pdev->dev)); 1402 fimc->work_queue = create_workqueue(dev_name(&fimc->pdev->dev));
1417 if (!fimc->work_queue) 1403 if (!fimc->work_queue) {
1404 ret = -ENOMEM;
1418 goto err_irq; 1405 goto err_irq;
1406 }
1419 1407
1420 ret = fimc_register_m2m_device(fimc); 1408 ret = fimc_register_m2m_device(fimc);
1421 if (ret) 1409 if (ret)
@@ -1492,6 +1480,7 @@ static struct samsung_fimc_variant fimc2_variant_s5p = {
1492}; 1480};
1493 1481
1494static struct samsung_fimc_variant fimc01_variant_s5pv210 = { 1482static struct samsung_fimc_variant fimc01_variant_s5pv210 = {
1483 .pix_hoff = 1,
1495 .has_inp_rot = 1, 1484 .has_inp_rot = 1,
1496 .has_out_rot = 1, 1485 .has_out_rot = 1,
1497 .min_inp_pixsize = 16, 1486 .min_inp_pixsize = 16,
@@ -1506,6 +1495,7 @@ static struct samsung_fimc_variant fimc01_variant_s5pv210 = {
1506}; 1495};
1507 1496
1508static struct samsung_fimc_variant fimc2_variant_s5pv210 = { 1497static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
1498 .pix_hoff = 1,
1509 .min_inp_pixsize = 16, 1499 .min_inp_pixsize = 16,
1510 .min_out_pixsize = 32, 1500 .min_out_pixsize = 32,
1511 1501
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index ec697fcd406e..bb8d83d8ddaf 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -4323,13 +4323,13 @@ struct saa7134_board saa7134_boards[] = {
4323 }, 4323 },
4324 [SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM] = { 4324 [SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM] = {
4325 /* Beholder Intl. Ltd. 2008 */ 4325 /* Beholder Intl. Ltd. 2008 */
4326 /*Dmitry Belimov <d.belimov@gmail.com> */ 4326 /* Dmitry Belimov <d.belimov@gmail.com> */
4327 .name = "Beholder BeholdTV Columbus TVFM", 4327 .name = "Beholder BeholdTV Columbus TV/FM",
4328 .audio_clock = 0x00187de7, 4328 .audio_clock = 0x00187de7,
4329 .tuner_type = TUNER_ALPS_TSBE5_PAL, 4329 .tuner_type = TUNER_ALPS_TSBE5_PAL,
4330 .radio_type = UNSET, 4330 .radio_type = TUNER_TEA5767,
4331 .tuner_addr = ADDR_UNSET, 4331 .tuner_addr = 0xc2 >> 1,
4332 .radio_addr = ADDR_UNSET, 4332 .radio_addr = 0xc0 >> 1,
4333 .tda9887_conf = TDA9887_PRESENT, 4333 .tda9887_conf = TDA9887_PRESENT,
4334 .gpiomask = 0x000A8004, 4334 .gpiomask = 0x000A8004,
4335 .inputs = {{ 4335 .inputs = {{
diff --git a/drivers/media/video/saa7164/saa7164-buffer.c b/drivers/media/video/saa7164/saa7164-buffer.c
index 5713f3a4b76c..ddd25d32723d 100644
--- a/drivers/media/video/saa7164/saa7164-buffer.c
+++ b/drivers/media/video/saa7164/saa7164-buffer.c
@@ -136,10 +136,11 @@ ret:
136int saa7164_buffer_dealloc(struct saa7164_tsport *port, 136int saa7164_buffer_dealloc(struct saa7164_tsport *port,
137 struct saa7164_buffer *buf) 137 struct saa7164_buffer *buf)
138{ 138{
139 struct saa7164_dev *dev = port->dev; 139 struct saa7164_dev *dev;
140 140
141 if ((buf == 0) || (port == 0)) 141 if (!buf || !port)
142 return SAA_ERR_BAD_PARAMETER; 142 return SAA_ERR_BAD_PARAMETER;
143 dev = port->dev;
143 144
144 dprintk(DBGLVL_BUF, "%s() deallocating buffer @ 0x%p\n", __func__, buf); 145 dprintk(DBGLVL_BUF, "%s() deallocating buffer @ 0x%p\n", __func__, buf);
145 146
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 8bdd940f32e6..2ac85d8984f0 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -486,6 +486,12 @@ static int uvc_parse_format(struct uvc_device *dev,
486 max(frame->dwFrameInterval[0], 486 max(frame->dwFrameInterval[0],
487 frame->dwDefaultFrameInterval)); 487 frame->dwDefaultFrameInterval));
488 488
489 if (dev->quirks & UVC_QUIRK_RESTRICT_FRAME_RATE) {
490 frame->bFrameIntervalType = 1;
491 frame->dwFrameInterval[0] =
492 frame->dwDefaultFrameInterval;
493 }
494
489 uvc_trace(UVC_TRACE_DESCR, "- %ux%u (%u.%u fps)\n", 495 uvc_trace(UVC_TRACE_DESCR, "- %ux%u (%u.%u fps)\n",
490 frame->wWidth, frame->wHeight, 496 frame->wWidth, frame->wHeight,
491 10000000/frame->dwDefaultFrameInterval, 497 10000000/frame->dwDefaultFrameInterval,
@@ -2026,6 +2032,15 @@ static struct usb_device_id uvc_ids[] = {
2026 .bInterfaceClass = USB_CLASS_VENDOR_SPEC, 2032 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
2027 .bInterfaceSubClass = 1, 2033 .bInterfaceSubClass = 1,
2028 .bInterfaceProtocol = 0 }, 2034 .bInterfaceProtocol = 0 },
2035 /* Chicony CNF7129 (Asus EEE 100HE) */
2036 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2037 | USB_DEVICE_ID_MATCH_INT_INFO,
2038 .idVendor = 0x04f2,
2039 .idProduct = 0xb071,
2040 .bInterfaceClass = USB_CLASS_VIDEO,
2041 .bInterfaceSubClass = 1,
2042 .bInterfaceProtocol = 0,
2043 .driver_info = UVC_QUIRK_RESTRICT_FRAME_RATE },
2029 /* Alcor Micro AU3820 (Future Boy PC USB Webcam) */ 2044 /* Alcor Micro AU3820 (Future Boy PC USB Webcam) */
2030 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 2045 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2031 | USB_DEVICE_ID_MATCH_INT_INFO, 2046 | USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2091,6 +2106,15 @@ static struct usb_device_id uvc_ids[] = {
2091 .bInterfaceProtocol = 0, 2106 .bInterfaceProtocol = 0,
2092 .driver_info = UVC_QUIRK_PROBE_MINMAX 2107 .driver_info = UVC_QUIRK_PROBE_MINMAX
2093 | UVC_QUIRK_PROBE_DEF }, 2108 | UVC_QUIRK_PROBE_DEF },
2109 /* IMC Networks (Medion Akoya) */
2110 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2111 | USB_DEVICE_ID_MATCH_INT_INFO,
2112 .idVendor = 0x13d3,
2113 .idProduct = 0x5103,
2114 .bInterfaceClass = USB_CLASS_VIDEO,
2115 .bInterfaceSubClass = 1,
2116 .bInterfaceProtocol = 0,
2117 .driver_info = UVC_QUIRK_STREAM_NO_FID },
2094 /* Syntek (HP Spartan) */ 2118 /* Syntek (HP Spartan) */
2095 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 2119 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2096 | USB_DEVICE_ID_MATCH_INT_INFO, 2120 | USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index bdacf3beabf5..892e0e51916c 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -182,6 +182,7 @@ struct uvc_xu_control {
182#define UVC_QUIRK_IGNORE_SELECTOR_UNIT 0x00000020 182#define UVC_QUIRK_IGNORE_SELECTOR_UNIT 0x00000020
183#define UVC_QUIRK_FIX_BANDWIDTH 0x00000080 183#define UVC_QUIRK_FIX_BANDWIDTH 0x00000080
184#define UVC_QUIRK_PROBE_DEF 0x00000100 184#define UVC_QUIRK_PROBE_DEF 0x00000100
185#define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200
185 186
186/* Format flags */ 187/* Format flags */
187#define UVC_FMT_FLAG_COMPRESSED 0x00000001 188#define UVC_FMT_FLAG_COMPRESSED 0x00000001
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index 073f01390cdd..86294ed35c9b 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -193,17 +193,24 @@ static int put_video_window32(struct video_window *kp, struct video_window32 __u
193struct video_code32 { 193struct video_code32 {
194 char loadwhat[16]; /* name or tag of file being passed */ 194 char loadwhat[16]; /* name or tag of file being passed */
195 compat_int_t datasize; 195 compat_int_t datasize;
196 unsigned char *data; 196 compat_uptr_t data;
197}; 197};
198 198
199static int get_microcode32(struct video_code *kp, struct video_code32 __user *up) 199static struct video_code __user *get_microcode32(struct video_code32 *kp)
200{ 200{
201 if (!access_ok(VERIFY_READ, up, sizeof(struct video_code32)) || 201 struct video_code __user *up;
202 copy_from_user(kp->loadwhat, up->loadwhat, sizeof(up->loadwhat)) || 202
203 get_user(kp->datasize, &up->datasize) || 203 up = compat_alloc_user_space(sizeof(*up));
204 copy_from_user(kp->data, up->data, up->datasize)) 204
205 return -EFAULT; 205 /*
206 return 0; 206 * NOTE! We don't actually care if these fail. If the
207 * user address is invalid, the native ioctl will do
208 * the error handling for us
209 */
210 (void) copy_to_user(up->loadwhat, kp->loadwhat, sizeof(up->loadwhat));
211 (void) put_user(kp->datasize, &up->datasize);
212 (void) put_user(compat_ptr(kp->data), &up->data);
213 return up;
207} 214}
208 215
209#define VIDIOCGTUNER32 _IOWR('v', 4, struct video_tuner32) 216#define VIDIOCGTUNER32 _IOWR('v', 4, struct video_tuner32)
@@ -739,7 +746,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
739 struct video_tuner vt; 746 struct video_tuner vt;
740 struct video_buffer vb; 747 struct video_buffer vb;
741 struct video_window vw; 748 struct video_window vw;
742 struct video_code vc; 749 struct video_code32 vc;
743 struct video_audio va; 750 struct video_audio va;
744#endif 751#endif
745 struct v4l2_format v2f; 752 struct v4l2_format v2f;
@@ -818,8 +825,11 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
818 break; 825 break;
819 826
820 case VIDIOCSMICROCODE: 827 case VIDIOCSMICROCODE:
821 err = get_microcode32(&karg.vc, up); 828 /* Copy the 32-bit "video_code32" to kernel space */
822 compatible_arg = 0; 829 if (copy_from_user(&karg.vc, up, sizeof(karg.vc)))
830 return -EFAULT;
831 /* Convert the 32-bit version to a 64-bit version in user space */
832 up = get_microcode32(&karg.vc);
823 break; 833 break;
824 834
825 case VIDIOCSFREQ: 835 case VIDIOCSFREQ:
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index 372b87efcd05..6ff9e4bac3ea 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -393,8 +393,10 @@ void videobuf_dma_contig_free(struct videobuf_queue *q,
393 } 393 }
394 394
395 /* read() method */ 395 /* read() method */
396 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle); 396 if (mem->vaddr) {
397 mem->vaddr = NULL; 397 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
398 mem->vaddr = NULL;
399 }
398} 400}
399EXPORT_SYMBOL_GPL(videobuf_dma_contig_free); 401EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
400 402
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index 06f9a9c2a39a..2ad0bc252b0e 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -94,7 +94,7 @@ err:
94 * must free the memory. 94 * must free the memory.
95 */ 95 */
96static struct scatterlist *videobuf_pages_to_sg(struct page **pages, 96static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
97 int nr_pages, int offset) 97 int nr_pages, int offset, size_t size)
98{ 98{
99 struct scatterlist *sglist; 99 struct scatterlist *sglist;
100 int i; 100 int i;
@@ -110,12 +110,14 @@ static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
110 /* DMA to highmem pages might not work */ 110 /* DMA to highmem pages might not work */
111 goto highmem; 111 goto highmem;
112 sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset); 112 sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset);
113 size -= PAGE_SIZE - offset;
113 for (i = 1; i < nr_pages; i++) { 114 for (i = 1; i < nr_pages; i++) {
114 if (NULL == pages[i]) 115 if (NULL == pages[i])
115 goto nopage; 116 goto nopage;
116 if (PageHighMem(pages[i])) 117 if (PageHighMem(pages[i]))
117 goto highmem; 118 goto highmem;
118 sg_set_page(&sglist[i], pages[i], PAGE_SIZE, 0); 119 sg_set_page(&sglist[i], pages[i], min(PAGE_SIZE, size), 0);
120 size -= min(PAGE_SIZE, size);
119 } 121 }
120 return sglist; 122 return sglist;
121 123
@@ -170,7 +172,8 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
170 172
171 first = (data & PAGE_MASK) >> PAGE_SHIFT; 173 first = (data & PAGE_MASK) >> PAGE_SHIFT;
172 last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT; 174 last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
173 dma->offset = data & ~PAGE_MASK; 175 dma->offset = data & ~PAGE_MASK;
176 dma->size = size;
174 dma->nr_pages = last-first+1; 177 dma->nr_pages = last-first+1;
175 dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL); 178 dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL);
176 if (NULL == dma->pages) 179 if (NULL == dma->pages)
@@ -252,7 +255,7 @@ int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
252 255
253 if (dma->pages) { 256 if (dma->pages) {
254 dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages, 257 dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
255 dma->offset); 258 dma->offset, dma->size);
256 } 259 }
257 if (dma->vaddr) { 260 if (dma->vaddr) {
258 dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr, 261 dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr,
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index 714c6b487313..d5f3a3fd2319 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -190,7 +190,6 @@ static int __devexit bh1780_remove(struct i2c_client *client)
190 190
191 ddata = i2c_get_clientdata(client); 191 ddata = i2c_get_clientdata(client);
192 sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group); 192 sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group);
193 i2c_set_clientdata(client, NULL);
194 kfree(ddata); 193 kfree(ddata);
195 194
196 return 0; 195 return 0;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5db49b124ffa..09eee6df0653 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1631,6 +1631,19 @@ int mmc_suspend_host(struct mmc_host *host)
1631 if (host->bus_ops && !host->bus_dead) { 1631 if (host->bus_ops && !host->bus_dead) {
1632 if (host->bus_ops->suspend) 1632 if (host->bus_ops->suspend)
1633 err = host->bus_ops->suspend(host); 1633 err = host->bus_ops->suspend(host);
1634 if (err == -ENOSYS || !host->bus_ops->resume) {
1635 /*
1636 * We simply "remove" the card in this case.
1637 * It will be redetected on resume.
1638 */
1639 if (host->bus_ops->remove)
1640 host->bus_ops->remove(host);
1641 mmc_claim_host(host);
1642 mmc_detach_bus(host);
1643 mmc_release_host(host);
1644 host->pm_flags = 0;
1645 err = 0;
1646 }
1634 } 1647 }
1635 mmc_bus_put(host); 1648 mmc_bus_put(host);
1636 1649
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index b2828e84d243..214b03afdd48 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -30,6 +30,8 @@
30#include <linux/clk.h> 30#include <linux/clk.h>
31#include <linux/err.h> 31#include <linux/err.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/irq.h>
34#include <linux/completion.h>
33 35
34#include <asm/mach/flash.h> 36#include <asm/mach/flash.h>
35#include <mach/mxc_nand.h> 37#include <mach/mxc_nand.h>
@@ -151,7 +153,7 @@ struct mxc_nand_host {
151 int irq; 153 int irq;
152 int eccsize; 154 int eccsize;
153 155
154 wait_queue_head_t irq_waitq; 156 struct completion op_completion;
155 157
156 uint8_t *data_buf; 158 uint8_t *data_buf;
157 unsigned int buf_start; 159 unsigned int buf_start;
@@ -164,6 +166,7 @@ struct mxc_nand_host {
164 void (*send_read_id)(struct mxc_nand_host *); 166 void (*send_read_id)(struct mxc_nand_host *);
165 uint16_t (*get_dev_status)(struct mxc_nand_host *); 167 uint16_t (*get_dev_status)(struct mxc_nand_host *);
166 int (*check_int)(struct mxc_nand_host *); 168 int (*check_int)(struct mxc_nand_host *);
169 void (*irq_control)(struct mxc_nand_host *, int);
167}; 170};
168 171
169/* OOB placement block for use with hardware ecc generation */ 172/* OOB placement block for use with hardware ecc generation */
@@ -216,9 +219,12 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
216{ 219{
217 struct mxc_nand_host *host = dev_id; 220 struct mxc_nand_host *host = dev_id;
218 221
219 disable_irq_nosync(irq); 222 if (!host->check_int(host))
223 return IRQ_NONE;
220 224
221 wake_up(&host->irq_waitq); 225 host->irq_control(host, 0);
226
227 complete(&host->op_completion);
222 228
223 return IRQ_HANDLED; 229 return IRQ_HANDLED;
224} 230}
@@ -245,11 +251,54 @@ static int check_int_v1_v2(struct mxc_nand_host *host)
245 if (!(tmp & NFC_V1_V2_CONFIG2_INT)) 251 if (!(tmp & NFC_V1_V2_CONFIG2_INT))
246 return 0; 252 return 0;
247 253
248 writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2); 254 if (!cpu_is_mx21())
255 writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
249 256
250 return 1; 257 return 1;
251} 258}
252 259
260/*
261 * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit
262 * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the
263 * driver can enable/disable the irq line rather than simply masking the
264 * interrupts.
265 */
266static void irq_control_mx21(struct mxc_nand_host *host, int activate)
267{
268 if (activate)
269 enable_irq(host->irq);
270 else
271 disable_irq_nosync(host->irq);
272}
273
274static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
275{
276 uint16_t tmp;
277
278 tmp = readw(NFC_V1_V2_CONFIG1);
279
280 if (activate)
281 tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK;
282 else
283 tmp |= NFC_V1_V2_CONFIG1_INT_MSK;
284
285 writew(tmp, NFC_V1_V2_CONFIG1);
286}
287
288static void irq_control_v3(struct mxc_nand_host *host, int activate)
289{
290 uint32_t tmp;
291
292 tmp = readl(NFC_V3_CONFIG2);
293
294 if (activate)
295 tmp &= ~NFC_V3_CONFIG2_INT_MSK;
296 else
297 tmp |= NFC_V3_CONFIG2_INT_MSK;
298
299 writel(tmp, NFC_V3_CONFIG2);
300}
301
253/* This function polls the NANDFC to wait for the basic operation to 302/* This function polls the NANDFC to wait for the basic operation to
254 * complete by checking the INT bit of config2 register. 303 * complete by checking the INT bit of config2 register.
255 */ 304 */
@@ -259,10 +308,9 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
259 308
260 if (useirq) { 309 if (useirq) {
261 if (!host->check_int(host)) { 310 if (!host->check_int(host)) {
262 311 INIT_COMPLETION(host->op_completion);
263 enable_irq(host->irq); 312 host->irq_control(host, 1);
264 313 wait_for_completion(&host->op_completion);
265 wait_event(host->irq_waitq, host->check_int(host));
266 } 314 }
267 } else { 315 } else {
268 while (max_retries-- > 0) { 316 while (max_retries-- > 0) {
@@ -799,6 +847,7 @@ static void preset_v3(struct mtd_info *mtd)
799 NFC_V3_CONFIG2_2CMD_PHASES | 847 NFC_V3_CONFIG2_2CMD_PHASES |
800 NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) | 848 NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) |
801 NFC_V3_CONFIG2_ST_CMD(0x70) | 849 NFC_V3_CONFIG2_ST_CMD(0x70) |
850 NFC_V3_CONFIG2_INT_MSK |
802 NFC_V3_CONFIG2_NUM_ADDR_PHASE0; 851 NFC_V3_CONFIG2_NUM_ADDR_PHASE0;
803 852
804 if (chip->ecc.mode == NAND_ECC_HW) 853 if (chip->ecc.mode == NAND_ECC_HW)
@@ -1024,6 +1073,10 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1024 host->send_read_id = send_read_id_v1_v2; 1073 host->send_read_id = send_read_id_v1_v2;
1025 host->get_dev_status = get_dev_status_v1_v2; 1074 host->get_dev_status = get_dev_status_v1_v2;
1026 host->check_int = check_int_v1_v2; 1075 host->check_int = check_int_v1_v2;
1076 if (cpu_is_mx21())
1077 host->irq_control = irq_control_mx21;
1078 else
1079 host->irq_control = irq_control_v1_v2;
1027 } 1080 }
1028 1081
1029 if (nfc_is_v21()) { 1082 if (nfc_is_v21()) {
@@ -1062,6 +1115,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1062 host->send_read_id = send_read_id_v3; 1115 host->send_read_id = send_read_id_v3;
1063 host->check_int = check_int_v3; 1116 host->check_int = check_int_v3;
1064 host->get_dev_status = get_dev_status_v3; 1117 host->get_dev_status = get_dev_status_v3;
1118 host->irq_control = irq_control_v3;
1065 oob_smallpage = &nandv2_hw_eccoob_smallpage; 1119 oob_smallpage = &nandv2_hw_eccoob_smallpage;
1066 oob_largepage = &nandv2_hw_eccoob_largepage; 1120 oob_largepage = &nandv2_hw_eccoob_largepage;
1067 } else 1121 } else
@@ -1093,14 +1147,34 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1093 this->options |= NAND_USE_FLASH_BBT; 1147 this->options |= NAND_USE_FLASH_BBT;
1094 } 1148 }
1095 1149
1096 init_waitqueue_head(&host->irq_waitq); 1150 init_completion(&host->op_completion);
1097 1151
1098 host->irq = platform_get_irq(pdev, 0); 1152 host->irq = platform_get_irq(pdev, 0);
1099 1153
1154 /*
1155 * mask the interrupt. For i.MX21 explicitely call
1156 * irq_control_v1_v2 to use the mask bit. We can't call
1157 * disable_irq_nosync() for an interrupt we do not own yet.
1158 */
1159 if (cpu_is_mx21())
1160 irq_control_v1_v2(host, 0);
1161 else
1162 host->irq_control(host, 0);
1163
1100 err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host); 1164 err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
1101 if (err) 1165 if (err)
1102 goto eirq; 1166 goto eirq;
1103 1167
1168 host->irq_control(host, 0);
1169
1170 /*
1171 * Now that the interrupt is disabled make sure the interrupt
1172 * mask bit is cleared on i.MX21. Otherwise we can't read
1173 * the interrupt status bit on this machine.
1174 */
1175 if (cpu_is_mx21())
1176 irq_control_v1_v2(host, 1);
1177
1104 /* first scan to find the device and get the page size */ 1178 /* first scan to find the device and get the page size */
1105 if (nand_scan_ident(mtd, 1, NULL)) { 1179 if (nand_scan_ident(mtd, 1, NULL)) {
1106 err = -ENXIO; 1180 err = -ENXIO;
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 70705d1306b9..eca55c52bdfd 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -522,7 +522,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
522 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */ 522 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
523 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */ 523 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
524 524
525 init_MUTEX_LOCKED(&lp->cmd_mutex); 525 sema_init(&lp->cmd_mutex, 0);
526 init_completion(&lp->execution_cmd); 526 init_completion(&lp->execution_cmd);
527 init_completion(&lp->xceiver_cmd); 527 init_completion(&lp->xceiver_cmd);
528 528
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2cc81a54cbf3..5db667c0b371 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2428,7 +2428,7 @@ config UGETH_TX_ON_DEMAND
2428 2428
2429config MV643XX_ETH 2429config MV643XX_ETH
2430 tristate "Marvell Discovery (643XX) and Orion ethernet support" 2430 tristate "Marvell Discovery (643XX) and Orion ethernet support"
2431 depends on MV64X60 || PPC32 || PLAT_ORION 2431 depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
2432 select INET_LRO 2432 select INET_LRO
2433 select PHYLIB 2433 select PHYLIB
2434 help 2434 help
@@ -2803,7 +2803,7 @@ config NIU
2803 2803
2804config PASEMI_MAC 2804config PASEMI_MAC
2805 tristate "PA Semi 1/10Gbit MAC" 2805 tristate "PA Semi 1/10Gbit MAC"
2806 depends on PPC_PASEMI && PCI 2806 depends on PPC_PASEMI && PCI && INET
2807 select PHYLIB 2807 select PHYLIB
2808 select INET_LRO 2808 select INET_LRO
2809 help 2809 help
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 1e620e287ae0..efeffdf9e5fa 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -2170,8 +2170,6 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2170 dev->irq = sdev->irq; 2170 dev->irq = sdev->irq;
2171 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); 2171 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2172 2172
2173 netif_carrier_off(dev);
2174
2175 err = ssb_bus_powerup(sdev->bus, 0); 2173 err = ssb_bus_powerup(sdev->bus, 0);
2176 if (err) { 2174 if (err) {
2177 dev_err(sdev->dev, 2175 dev_err(sdev->dev,
@@ -2213,6 +2211,8 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2213 goto err_out_powerdown; 2211 goto err_out_powerdown;
2214 } 2212 }
2215 2213
2214 netif_carrier_off(dev);
2215
2216 ssb_set_drvdata(sdev, dev); 2216 ssb_set_drvdata(sdev, dev);
2217 2217
2218 /* Chip reset provides power to the b44 MAC & PCI cores, which 2218 /* Chip reset provides power to the b44 MAC & PCI cores, which
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3b16f62d5606..e953c6ad6e6d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5164,6 +5164,15 @@ int bond_create(struct net *net, const char *name)
5164 res = dev_alloc_name(bond_dev, "bond%d"); 5164 res = dev_alloc_name(bond_dev, "bond%d");
5165 if (res < 0) 5165 if (res < 0)
5166 goto out; 5166 goto out;
5167 } else {
5168 /*
5169 * If we're given a name to register
5170 * we need to ensure that its not already
5171 * registered
5172 */
5173 res = -EEXIST;
5174 if (__dev_get_by_name(net, name) != NULL)
5175 goto out;
5167 } 5176 }
5168 5177
5169 res = register_netdevice(bond_dev); 5178 res = register_netdevice(bond_dev);
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index a333b42111b8..6372610ed240 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -533,8 +533,15 @@ static inline void ehea_fill_skb(struct net_device *dev,
533 int length = cqe->num_bytes_transfered - 4; /*remove CRC */ 533 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
534 534
535 skb_put(skb, length); 535 skb_put(skb, length);
536 skb->ip_summed = CHECKSUM_UNNECESSARY;
537 skb->protocol = eth_type_trans(skb, dev); 536 skb->protocol = eth_type_trans(skb, dev);
537
538 /* The packet was not an IPV4 packet so a complemented checksum was
539 calculated. The value is found in the Internet Checksum field. */
540 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
541 skb->ip_summed = CHECKSUM_COMPLETE;
542 skb->csum = csum_unfold(~cqe->inet_checksum_value);
543 } else
544 skb->ip_summed = CHECKSUM_UNNECESSARY;
538} 545}
539 546
540static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array, 547static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index f608a6c54af5..38104734a3be 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -150,6 +150,7 @@ struct ehea_rwqe {
150#define EHEA_CQE_TYPE_RQ 0x60 150#define EHEA_CQE_TYPE_RQ 0x60
151#define EHEA_CQE_STAT_ERR_MASK 0x700F 151#define EHEA_CQE_STAT_ERR_MASK 0x700F
152#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF 152#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
153#define EHEA_CQE_BLIND_CKSUM 0x8000
153#define EHEA_CQE_STAT_ERR_TCP 0x4000 154#define EHEA_CQE_STAT_ERR_TCP 0x4000
154#define EHEA_CQE_STAT_ERR_IP 0x2000 155#define EHEA_CQE_STAT_ERR_IP 0x2000
155#define EHEA_CQE_STAT_ERR_CRC 0x1000 156#define EHEA_CQE_STAT_ERR_CRC 0x1000
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 768b840aeb6b..cce32d43175f 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -678,24 +678,37 @@ static int fec_enet_mii_probe(struct net_device *dev)
678{ 678{
679 struct fec_enet_private *fep = netdev_priv(dev); 679 struct fec_enet_private *fep = netdev_priv(dev);
680 struct phy_device *phy_dev = NULL; 680 struct phy_device *phy_dev = NULL;
681 int ret; 681 char mdio_bus_id[MII_BUS_ID_SIZE];
682 char phy_name[MII_BUS_ID_SIZE + 3];
683 int phy_id;
682 684
683 fep->phy_dev = NULL; 685 fep->phy_dev = NULL;
684 686
685 /* find the first phy */ 687 /* check for attached phy */
686 phy_dev = phy_find_first(fep->mii_bus); 688 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
687 if (!phy_dev) { 689 if ((fep->mii_bus->phy_mask & (1 << phy_id)))
688 printk(KERN_ERR "%s: no PHY found\n", dev->name); 690 continue;
689 return -ENODEV; 691 if (fep->mii_bus->phy_map[phy_id] == NULL)
692 continue;
693 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
694 continue;
695 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
696 break;
690 } 697 }
691 698
692 /* attach the mac to the phy */ 699 if (phy_id >= PHY_MAX_ADDR) {
693 ret = phy_connect_direct(dev, phy_dev, 700 printk(KERN_INFO "%s: no PHY, assuming direct connection "
694 &fec_enet_adjust_link, 0, 701 "to switch\n", dev->name);
695 PHY_INTERFACE_MODE_MII); 702 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
696 if (ret) { 703 phy_id = 0;
697 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 704 }
698 return ret; 705
706 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
707 phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0,
708 PHY_INTERFACE_MODE_MII);
709 if (IS_ERR(phy_dev)) {
710 printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
711 return PTR_ERR(phy_dev);
699 } 712 }
700 713
701 /* mask with MAC supported features */ 714 /* mask with MAC supported features */
@@ -738,7 +751,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
738 fep->mii_bus->read = fec_enet_mdio_read; 751 fep->mii_bus->read = fec_enet_mdio_read;
739 fep->mii_bus->write = fec_enet_mdio_write; 752 fep->mii_bus->write = fec_enet_mdio_write;
740 fep->mii_bus->reset = fec_enet_mdio_reset; 753 fep->mii_bus->reset = fec_enet_mdio_reset;
741 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); 754 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
742 fep->mii_bus->priv = fep; 755 fep->mii_bus->priv = fep;
743 fep->mii_bus->parent = &pdev->dev; 756 fep->mii_bus->parent = &pdev->dev;
744 757
@@ -1311,6 +1324,9 @@ fec_probe(struct platform_device *pdev)
1311 if (ret) 1324 if (ret)
1312 goto failed_mii_init; 1325 goto failed_mii_init;
1313 1326
1327 /* Carrier starts down, phylib will bring it up */
1328 netif_carrier_off(ndev);
1329
1314 ret = register_netdev(ndev); 1330 ret = register_netdev(ndev);
1315 if (ret) 1331 if (ret)
1316 goto failed_register; 1332 goto failed_register;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 4b52c767ad05..3e5d0b6b6516 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -608,7 +608,7 @@ static int sixpack_open(struct tty_struct *tty)
608 608
609 spin_lock_init(&sp->lock); 609 spin_lock_init(&sp->lock);
610 atomic_set(&sp->refcnt, 1); 610 atomic_set(&sp->refcnt, 1);
611 init_MUTEX_LOCKED(&sp->dead_sem); 611 sema_init(&sp->dead_sem, 0);
612 612
613 /* !!! length of the buffers. MTU is IP MTU, not PACLEN! */ 613 /* !!! length of the buffers. MTU is IP MTU, not PACLEN! */
614 614
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 66e88bd59caa..4c628393c8b1 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -747,7 +747,7 @@ static int mkiss_open(struct tty_struct *tty)
747 747
748 spin_lock_init(&ax->buflock); 748 spin_lock_init(&ax->buflock);
749 atomic_set(&ax->refcnt, 1); 749 atomic_set(&ax->refcnt, 1);
750 init_MUTEX_LOCKED(&ax->dead_sem); 750 sema_init(&ax->dead_sem, 0);
751 751
752 ax->tty = tty; 752 ax->tty = tty;
753 tty->disc_data = ax; 753 tty->disc_data = ax;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 1b051dab7b29..51d74447f8f8 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -909,7 +909,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
909 dev->tx_skb = NULL; 909 dev->tx_skb = NULL;
910 910
911 spin_lock_init(&dev->tx_lock); 911 spin_lock_init(&dev->tx_lock);
912 init_MUTEX(&dev->fsm.sem); 912 sema_init(&dev->fsm.sem, 1);
913 913
914 dev->drv = drv; 914 dev->drv = drv;
915 dev->netdev = ndev; 915 dev->netdev = ndev;
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index af50a530daee..78d70a6481bf 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -184,7 +184,7 @@ ppp_asynctty_open(struct tty_struct *tty)
184 tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap); 184 tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
185 185
186 atomic_set(&ap->refcnt, 1); 186 atomic_set(&ap->refcnt, 1);
187 init_MUTEX_LOCKED(&ap->dead_sem); 187 sema_init(&ap->dead_sem, 0);
188 188
189 ap->chan.private = ap; 189 ap->chan.private = ap;
190 ap->chan.ops = &async_ops; 190 ap->chan.ops = &async_ops;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index a0da4a17b025..992db2fa136e 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1212,7 +1212,8 @@ static void rtl8169_update_counters(struct net_device *dev)
1212 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) 1212 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1213 return; 1213 return;
1214 1214
1215 counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr); 1215 counters = dma_alloc_coherent(&tp->pci_dev->dev, sizeof(*counters),
1216 &paddr, GFP_KERNEL);
1216 if (!counters) 1217 if (!counters)
1217 return; 1218 return;
1218 1219
@@ -1233,7 +1234,8 @@ static void rtl8169_update_counters(struct net_device *dev)
1233 RTL_W32(CounterAddrLow, 0); 1234 RTL_W32(CounterAddrLow, 0);
1234 RTL_W32(CounterAddrHigh, 0); 1235 RTL_W32(CounterAddrHigh, 0);
1235 1236
1236 pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr); 1237 dma_free_coherent(&tp->pci_dev->dev, sizeof(*counters), counters,
1238 paddr);
1237} 1239}
1238 1240
1239static void rtl8169_get_ethtool_stats(struct net_device *dev, 1241static void rtl8169_get_ethtool_stats(struct net_device *dev,
@@ -3292,15 +3294,15 @@ static int rtl8169_open(struct net_device *dev)
3292 3294
3293 /* 3295 /*
3294 * Rx and Tx desscriptors needs 256 bytes alignment. 3296 * Rx and Tx desscriptors needs 256 bytes alignment.
3295 * pci_alloc_consistent provides more. 3297 * dma_alloc_coherent provides more.
3296 */ 3298 */
3297 tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES, 3299 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
3298 &tp->TxPhyAddr); 3300 &tp->TxPhyAddr, GFP_KERNEL);
3299 if (!tp->TxDescArray) 3301 if (!tp->TxDescArray)
3300 goto err_pm_runtime_put; 3302 goto err_pm_runtime_put;
3301 3303
3302 tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES, 3304 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
3303 &tp->RxPhyAddr); 3305 &tp->RxPhyAddr, GFP_KERNEL);
3304 if (!tp->RxDescArray) 3306 if (!tp->RxDescArray)
3305 goto err_free_tx_0; 3307 goto err_free_tx_0;
3306 3308
@@ -3334,12 +3336,12 @@ out:
3334err_release_ring_2: 3336err_release_ring_2:
3335 rtl8169_rx_clear(tp); 3337 rtl8169_rx_clear(tp);
3336err_free_rx_1: 3338err_free_rx_1:
3337 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, 3339 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
3338 tp->RxPhyAddr); 3340 tp->RxPhyAddr);
3339 tp->RxDescArray = NULL; 3341 tp->RxDescArray = NULL;
3340err_free_tx_0: 3342err_free_tx_0:
3341 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, 3343 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
3342 tp->TxPhyAddr); 3344 tp->TxPhyAddr);
3343 tp->TxDescArray = NULL; 3345 tp->TxDescArray = NULL;
3344err_pm_runtime_put: 3346err_pm_runtime_put:
3345 pm_runtime_put_noidle(&pdev->dev); 3347 pm_runtime_put_noidle(&pdev->dev);
@@ -3975,7 +3977,7 @@ static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
3975{ 3977{
3976 struct pci_dev *pdev = tp->pci_dev; 3978 struct pci_dev *pdev = tp->pci_dev;
3977 3979
3978 pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz, 3980 dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
3979 PCI_DMA_FROMDEVICE); 3981 PCI_DMA_FROMDEVICE);
3980 dev_kfree_skb(*sk_buff); 3982 dev_kfree_skb(*sk_buff);
3981 *sk_buff = NULL; 3983 *sk_buff = NULL;
@@ -4000,7 +4002,7 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
4000static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev, 4002static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
4001 struct net_device *dev, 4003 struct net_device *dev,
4002 struct RxDesc *desc, int rx_buf_sz, 4004 struct RxDesc *desc, int rx_buf_sz,
4003 unsigned int align) 4005 unsigned int align, gfp_t gfp)
4004{ 4006{
4005 struct sk_buff *skb; 4007 struct sk_buff *skb;
4006 dma_addr_t mapping; 4008 dma_addr_t mapping;
@@ -4008,13 +4010,13 @@ static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
4008 4010
4009 pad = align ? align : NET_IP_ALIGN; 4011 pad = align ? align : NET_IP_ALIGN;
4010 4012
4011 skb = netdev_alloc_skb(dev, rx_buf_sz + pad); 4013 skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp);
4012 if (!skb) 4014 if (!skb)
4013 goto err_out; 4015 goto err_out;
4014 4016
4015 skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad); 4017 skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad);
4016 4018
4017 mapping = pci_map_single(pdev, skb->data, rx_buf_sz, 4019 mapping = dma_map_single(&pdev->dev, skb->data, rx_buf_sz,
4018 PCI_DMA_FROMDEVICE); 4020 PCI_DMA_FROMDEVICE);
4019 4021
4020 rtl8169_map_to_asic(desc, mapping, rx_buf_sz); 4022 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
@@ -4039,7 +4041,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
4039} 4041}
4040 4042
4041static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, 4043static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
4042 u32 start, u32 end) 4044 u32 start, u32 end, gfp_t gfp)
4043{ 4045{
4044 u32 cur; 4046 u32 cur;
4045 4047
@@ -4054,7 +4056,7 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
4054 4056
4055 skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev, 4057 skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
4056 tp->RxDescArray + i, 4058 tp->RxDescArray + i,
4057 tp->rx_buf_sz, tp->align); 4059 tp->rx_buf_sz, tp->align, gfp);
4058 if (!skb) 4060 if (!skb)
4059 break; 4061 break;
4060 4062
@@ -4082,7 +4084,7 @@ static int rtl8169_init_ring(struct net_device *dev)
4082 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); 4084 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
4083 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); 4085 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
4084 4086
4085 if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC) 4087 if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC)
4086 goto err_out; 4088 goto err_out;
4087 4089
4088 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); 4090 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
@@ -4099,7 +4101,8 @@ static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
4099{ 4101{
4100 unsigned int len = tx_skb->len; 4102 unsigned int len = tx_skb->len;
4101 4103
4102 pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE); 4104 dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len,
4105 PCI_DMA_TODEVICE);
4103 desc->opts1 = 0x00; 4106 desc->opts1 = 0x00;
4104 desc->opts2 = 0x00; 4107 desc->opts2 = 0x00;
4105 desc->addr = 0x00; 4108 desc->addr = 0x00;
@@ -4243,7 +4246,8 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4243 txd = tp->TxDescArray + entry; 4246 txd = tp->TxDescArray + entry;
4244 len = frag->size; 4247 len = frag->size;
4245 addr = ((void *) page_address(frag->page)) + frag->page_offset; 4248 addr = ((void *) page_address(frag->page)) + frag->page_offset;
4246 mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE); 4249 mapping = dma_map_single(&tp->pci_dev->dev, addr, len,
4250 PCI_DMA_TODEVICE);
4247 4251
4248 /* anti gcc 2.95.3 bugware (sic) */ 4252 /* anti gcc 2.95.3 bugware (sic) */
4249 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); 4253 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
@@ -4313,7 +4317,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4313 tp->tx_skb[entry].skb = skb; 4317 tp->tx_skb[entry].skb = skb;
4314 } 4318 }
4315 4319
4316 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE); 4320 mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
4321 PCI_DMA_TODEVICE);
4317 4322
4318 tp->tx_skb[entry].len = len; 4323 tp->tx_skb[entry].len = len;
4319 txd->addr = cpu_to_le64(mapping); 4324 txd->addr = cpu_to_le64(mapping);
@@ -4477,8 +4482,8 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
4477 if (!skb) 4482 if (!skb)
4478 goto out; 4483 goto out;
4479 4484
4480 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size, 4485 dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size,
4481 PCI_DMA_FROMDEVICE); 4486 PCI_DMA_FROMDEVICE);
4482 skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size); 4487 skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
4483 *sk_buff = skb; 4488 *sk_buff = skb;
4484 done = true; 4489 done = true;
@@ -4549,11 +4554,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4549 rtl8169_rx_csum(skb, desc); 4554 rtl8169_rx_csum(skb, desc);
4550 4555
4551 if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) { 4556 if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
4552 pci_dma_sync_single_for_device(pdev, addr, 4557 dma_sync_single_for_device(&pdev->dev, addr,
4553 pkt_size, PCI_DMA_FROMDEVICE); 4558 pkt_size, PCI_DMA_FROMDEVICE);
4554 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 4559 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
4555 } else { 4560 } else {
4556 pci_unmap_single(pdev, addr, tp->rx_buf_sz, 4561 dma_unmap_single(&pdev->dev, addr, tp->rx_buf_sz,
4557 PCI_DMA_FROMDEVICE); 4562 PCI_DMA_FROMDEVICE);
4558 tp->Rx_skbuff[entry] = NULL; 4563 tp->Rx_skbuff[entry] = NULL;
4559 } 4564 }
@@ -4583,7 +4588,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4583 count = cur_rx - tp->cur_rx; 4588 count = cur_rx - tp->cur_rx;
4584 tp->cur_rx = cur_rx; 4589 tp->cur_rx = cur_rx;
4585 4590
4586 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); 4591 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC);
4587 if (!delta && count) 4592 if (!delta && count)
4588 netif_info(tp, intr, dev, "no Rx buffer allocated\n"); 4593 netif_info(tp, intr, dev, "no Rx buffer allocated\n");
4589 tp->dirty_rx += delta; 4594 tp->dirty_rx += delta;
@@ -4769,10 +4774,10 @@ static int rtl8169_close(struct net_device *dev)
4769 4774
4770 free_irq(dev->irq, dev); 4775 free_irq(dev->irq, dev);
4771 4776
4772 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, 4777 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
4773 tp->RxPhyAddr); 4778 tp->RxPhyAddr);
4774 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, 4779 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
4775 tp->TxPhyAddr); 4780 tp->TxPhyAddr);
4776 tp->TxDescArray = NULL; 4781 tp->TxDescArray = NULL;
4777 tp->RxDescArray = NULL; 4782 tp->RxDescArray = NULL;
4778 4783
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 40e5c46e7571..465ae7e84507 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -43,6 +43,7 @@
43#include <linux/seq_file.h> 43#include <linux/seq_file.h>
44#include <linux/mii.h> 44#include <linux/mii.h>
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <linux/dmi.h>
46#include <asm/irq.h> 47#include <asm/irq.h>
47 48
48#include "skge.h" 49#include "skge.h"
@@ -3868,6 +3869,8 @@ static void __devinit skge_show_addr(struct net_device *dev)
3868 netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); 3869 netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
3869} 3870}
3870 3871
3872static int only_32bit_dma;
3873
3871static int __devinit skge_probe(struct pci_dev *pdev, 3874static int __devinit skge_probe(struct pci_dev *pdev,
3872 const struct pci_device_id *ent) 3875 const struct pci_device_id *ent)
3873{ 3876{
@@ -3889,7 +3892,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3889 3892
3890 pci_set_master(pdev); 3893 pci_set_master(pdev);
3891 3894
3892 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3895 if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3893 using_dac = 1; 3896 using_dac = 1;
3894 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3897 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3895 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 3898 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
@@ -4147,8 +4150,21 @@ static struct pci_driver skge_driver = {
4147 .shutdown = skge_shutdown, 4150 .shutdown = skge_shutdown,
4148}; 4151};
4149 4152
4153static struct dmi_system_id skge_32bit_dma_boards[] = {
4154 {
4155 .ident = "Gigabyte nForce boards",
4156 .matches = {
4157 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
4158 DMI_MATCH(DMI_BOARD_NAME, "nForce"),
4159 },
4160 },
4161 {}
4162};
4163
4150static int __init skge_init_module(void) 4164static int __init skge_init_module(void)
4151{ 4165{
4166 if (dmi_check_system(skge_32bit_dma_boards))
4167 only_32bit_dma = 1;
4152 skge_debug_init(); 4168 skge_debug_init();
4153 return pci_register_driver(&skge_driver); 4169 return pci_register_driver(&skge_driver);
4154} 4170}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index bc3af78a869f..1ec4b9e0239a 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4666,7 +4666,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4666 desc_idx, *post_ptr); 4666 desc_idx, *post_ptr);
4667 drop_it_no_recycle: 4667 drop_it_no_recycle:
4668 /* Other statistics kept track of by card. */ 4668 /* Other statistics kept track of by card. */
4669 tp->net_stats.rx_dropped++; 4669 tp->rx_dropped++;
4670 goto next_pkt; 4670 goto next_pkt;
4671 } 4671 }
4672 4672
@@ -4726,7 +4726,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4726 if (len > (tp->dev->mtu + ETH_HLEN) && 4726 if (len > (tp->dev->mtu + ETH_HLEN) &&
4727 skb->protocol != htons(ETH_P_8021Q)) { 4727 skb->protocol != htons(ETH_P_8021Q)) {
4728 dev_kfree_skb(skb); 4728 dev_kfree_skb(skb);
4729 goto next_pkt; 4729 goto drop_it_no_recycle;
4730 } 4730 }
4731 4731
4732 if (desc->type_flags & RXD_FLAG_VLAN && 4732 if (desc->type_flags & RXD_FLAG_VLAN &&
@@ -9240,6 +9240,8 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9240 stats->rx_missed_errors = old_stats->rx_missed_errors + 9240 stats->rx_missed_errors = old_stats->rx_missed_errors +
9241 get_stat64(&hw_stats->rx_discards); 9241 get_stat64(&hw_stats->rx_discards);
9242 9242
9243 stats->rx_dropped = tp->rx_dropped;
9244
9243 return stats; 9245 return stats;
9244} 9246}
9245 9247
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4937bd190964..be7ff138a7f9 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2759,7 +2759,7 @@ struct tg3 {
2759 2759
2760 2760
2761 /* begin "everything else" cacheline(s) section */ 2761 /* begin "everything else" cacheline(s) section */
2762 struct rtnl_link_stats64 net_stats; 2762 unsigned long rx_dropped;
2763 struct rtnl_link_stats64 net_stats_prev; 2763 struct rtnl_link_stats64 net_stats_prev;
2764 struct tg3_ethtool_stats estats; 2764 struct tg3_ethtool_stats estats;
2765 struct tg3_ethtool_stats estats_prev; 2765 struct tg3_ethtool_stats estats_prev;
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 04c6cd4333f1..10bafd59f9c3 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -575,7 +575,7 @@ static int cosa_probe(int base, int irq, int dma)
575 575
576 /* Initialize the chardev data structures */ 576 /* Initialize the chardev data structures */
577 mutex_init(&chan->rlock); 577 mutex_init(&chan->rlock);
578 init_MUTEX(&chan->wsem); 578 sema_init(&chan->wsem, 1);
579 579
580 /* Register the network interface */ 580 /* Register the network interface */
581 if (!(chan->netdev = alloc_hdlcdev(chan))) { 581 if (!(chan->netdev = alloc_hdlcdev(chan))) {
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 8cc9e319f435..1737d1488b35 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -1244,16 +1244,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
1244 int i, result; 1244 int i, result;
1245 struct device *dev = i2400m_dev(i2400m); 1245 struct device *dev = i2400m_dev(i2400m);
1246 const struct i2400m_msg_hdr *msg_hdr; 1246 const struct i2400m_msg_hdr *msg_hdr;
1247 size_t pl_itr, pl_size, skb_len; 1247 size_t pl_itr, pl_size;
1248 unsigned long flags; 1248 unsigned long flags;
1249 unsigned num_pls, single_last; 1249 unsigned num_pls, single_last, skb_len;
1250 1250
1251 skb_len = skb->len; 1251 skb_len = skb->len;
1252 d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n", 1252 d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n",
1253 i2400m, skb, skb_len); 1253 i2400m, skb, skb_len);
1254 result = -EIO; 1254 result = -EIO;
1255 msg_hdr = (void *) skb->data; 1255 msg_hdr = (void *) skb->data;
1256 result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len); 1256 result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len);
1257 if (result < 0) 1257 if (result < 0)
1258 goto error_msg_hdr_check; 1258 goto error_msg_hdr_check;
1259 result = -EIO; 1259 result = -EIO;
@@ -1261,10 +1261,10 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
1261 pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */ 1261 pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */
1262 num_pls * sizeof(msg_hdr->pld[0]); 1262 num_pls * sizeof(msg_hdr->pld[0]);
1263 pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN); 1263 pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN);
1264 if (pl_itr > skb->len) { /* got all the payload descriptors? */ 1264 if (pl_itr > skb_len) { /* got all the payload descriptors? */
1265 dev_err(dev, "RX: HW BUG? message too short (%u bytes) for " 1265 dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
1266 "%u payload descriptors (%zu each, total %zu)\n", 1266 "%u payload descriptors (%zu each, total %zu)\n",
1267 skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr); 1267 skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
1268 goto error_pl_descr_short; 1268 goto error_pl_descr_short;
1269 } 1269 }
1270 /* Walk each payload payload--check we really got it */ 1270 /* Walk each payload payload--check we really got it */
@@ -1272,7 +1272,7 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
1272 /* work around old gcc warnings */ 1272 /* work around old gcc warnings */
1273 pl_size = i2400m_pld_size(&msg_hdr->pld[i]); 1273 pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
1274 result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i], 1274 result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
1275 pl_itr, skb->len); 1275 pl_itr, skb_len);
1276 if (result < 0) 1276 if (result < 0)
1277 goto error_pl_descr_check; 1277 goto error_pl_descr_check;
1278 single_last = num_pls == 1 || i == num_pls - 1; 1278 single_last = num_pls == 1 || i == num_pls - 1;
@@ -1290,16 +1290,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
1290 if (i < i2400m->rx_pl_min) 1290 if (i < i2400m->rx_pl_min)
1291 i2400m->rx_pl_min = i; 1291 i2400m->rx_pl_min = i;
1292 i2400m->rx_num++; 1292 i2400m->rx_num++;
1293 i2400m->rx_size_acc += skb->len; 1293 i2400m->rx_size_acc += skb_len;
1294 if (skb->len < i2400m->rx_size_min) 1294 if (skb_len < i2400m->rx_size_min)
1295 i2400m->rx_size_min = skb->len; 1295 i2400m->rx_size_min = skb_len;
1296 if (skb->len > i2400m->rx_size_max) 1296 if (skb_len > i2400m->rx_size_max)
1297 i2400m->rx_size_max = skb->len; 1297 i2400m->rx_size_max = skb_len;
1298 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 1298 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1299error_pl_descr_check: 1299error_pl_descr_check:
1300error_pl_descr_short: 1300error_pl_descr_short:
1301error_msg_hdr_check: 1301error_msg_hdr_check:
1302 d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n", 1302 d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n",
1303 i2400m, skb, skb_len, result); 1303 i2400m, skb, skb_len, result);
1304 return result; 1304 return result;
1305} 1305}
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index cc648b6ae31c..a3d95cca8f0c 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -543,7 +543,7 @@ static u8 ath9k_hw_chan_2_clockrate_mhz(struct ath_hw *ah)
543 if (conf_is_ht40(conf)) 543 if (conf_is_ht40(conf))
544 return clockrate * 2; 544 return clockrate * 2;
545 545
546 return clockrate * 2; 546 return clockrate;
547} 547}
548 548
549static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah) 549static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index dffa5d4fb298..a2d9d1e59260 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -306,7 +306,7 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma,
306 spin_lock_init(&tmp->pardevice_lock); 306 spin_lock_init(&tmp->pardevice_lock);
307 tmp->ieee1284.mode = IEEE1284_MODE_COMPAT; 307 tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
308 tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE; 308 tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
309 init_MUTEX_LOCKED (&tmp->ieee1284.irq); /* actually a semaphore at 0 */ 309 sema_init(&tmp->ieee1284.irq, 0);
310 tmp->spintime = parport_default_spintime; 310 tmp->spintime = parport_default_spintime;
311 atomic_set (&tmp->ref_count, 1); 311 atomic_set (&tmp->ref_count, 1);
312 INIT_LIST_HEAD(&tmp->full_list); 312 INIT_LIST_HEAD(&tmp->full_list);
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 9024480a8228..c44a5e8b8b82 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -51,7 +51,6 @@
51 * TODO: 51 * TODO:
52 * - handle CPU hotplug 52 * - handle CPU hotplug
53 * - provide turbo enable/disable api 53 * - provide turbo enable/disable api
54 * - make sure we can write turbo enable/disable reg based on MISC_EN
55 * 54 *
56 * Related documents: 55 * Related documents:
57 * - CDI 403777, 403778 - Auburndale EDS vol 1 & 2 56 * - CDI 403777, 403778 - Auburndale EDS vol 1 & 2
@@ -230,7 +229,7 @@
230#define THM_TC2 0xac 229#define THM_TC2 0xac
231#define THM_DTV 0xb0 230#define THM_DTV 0xb0
232#define THM_ITV 0xd8 231#define THM_ITV 0xd8
233#define ITV_ME_SEQNO_MASK 0x000f0000 /* ME should update every ~200ms */ 232#define ITV_ME_SEQNO_MASK 0x00ff0000 /* ME should update every ~200ms */
234#define ITV_ME_SEQNO_SHIFT (16) 233#define ITV_ME_SEQNO_SHIFT (16)
235#define ITV_MCH_TEMP_MASK 0x0000ff00 234#define ITV_MCH_TEMP_MASK 0x0000ff00
236#define ITV_MCH_TEMP_SHIFT (8) 235#define ITV_MCH_TEMP_SHIFT (8)
@@ -325,6 +324,7 @@ struct ips_driver {
325 bool gpu_preferred; 324 bool gpu_preferred;
326 bool poll_turbo_status; 325 bool poll_turbo_status;
327 bool second_cpu; 326 bool second_cpu;
327 bool turbo_toggle_allowed;
328 struct ips_mcp_limits *limits; 328 struct ips_mcp_limits *limits;
329 329
330 /* Optional MCH interfaces for if i915 is in use */ 330 /* Optional MCH interfaces for if i915 is in use */
@@ -415,7 +415,7 @@ static void ips_cpu_lower(struct ips_driver *ips)
415 new_limit = cur_limit - 8; /* 1W decrease */ 415 new_limit = cur_limit - 8; /* 1W decrease */
416 416
417 /* Clamp to SKU TDP limit */ 417 /* Clamp to SKU TDP limit */
418 if (((new_limit * 10) / 8) < (ips->orig_turbo_limit & TURBO_TDP_MASK)) 418 if (new_limit < (ips->orig_turbo_limit & TURBO_TDP_MASK))
419 new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK; 419 new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK;
420 420
421 thm_writew(THM_MPCPC, (new_limit * 10) / 8); 421 thm_writew(THM_MPCPC, (new_limit * 10) / 8);
@@ -461,7 +461,8 @@ static void ips_enable_cpu_turbo(struct ips_driver *ips)
461 if (ips->__cpu_turbo_on) 461 if (ips->__cpu_turbo_on)
462 return; 462 return;
463 463
464 on_each_cpu(do_enable_cpu_turbo, ips, 1); 464 if (ips->turbo_toggle_allowed)
465 on_each_cpu(do_enable_cpu_turbo, ips, 1);
465 466
466 ips->__cpu_turbo_on = true; 467 ips->__cpu_turbo_on = true;
467} 468}
@@ -498,7 +499,8 @@ static void ips_disable_cpu_turbo(struct ips_driver *ips)
498 if (!ips->__cpu_turbo_on) 499 if (!ips->__cpu_turbo_on)
499 return; 500 return;
500 501
501 on_each_cpu(do_disable_cpu_turbo, ips, 1); 502 if (ips->turbo_toggle_allowed)
503 on_each_cpu(do_disable_cpu_turbo, ips, 1);
502 504
503 ips->__cpu_turbo_on = false; 505 ips->__cpu_turbo_on = false;
504} 506}
@@ -598,17 +600,29 @@ static bool mcp_exceeded(struct ips_driver *ips)
598{ 600{
599 unsigned long flags; 601 unsigned long flags;
600 bool ret = false; 602 bool ret = false;
603 u32 temp_limit;
604 u32 avg_power;
605 const char *msg = "MCP limit exceeded: ";
601 606
602 spin_lock_irqsave(&ips->turbo_status_lock, flags); 607 spin_lock_irqsave(&ips->turbo_status_lock, flags);
603 if (ips->mcp_avg_temp > (ips->mcp_temp_limit * 100)) 608
604 ret = true; 609 temp_limit = ips->mcp_temp_limit * 100;
605 if (ips->cpu_avg_power + ips->mch_avg_power > ips->mcp_power_limit) 610 if (ips->mcp_avg_temp > temp_limit) {
611 dev_info(&ips->dev->dev,
612 "%sAvg temp %u, limit %u\n", msg, ips->mcp_avg_temp,
613 temp_limit);
606 ret = true; 614 ret = true;
607 spin_unlock_irqrestore(&ips->turbo_status_lock, flags); 615 }
608 616
609 if (ret) 617 avg_power = ips->cpu_avg_power + ips->mch_avg_power;
618 if (avg_power > ips->mcp_power_limit) {
610 dev_info(&ips->dev->dev, 619 dev_info(&ips->dev->dev,
611 "MCP power or thermal limit exceeded\n"); 620 "%sAvg power %u, limit %u\n", msg, avg_power,
621 ips->mcp_power_limit);
622 ret = true;
623 }
624
625 spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
612 626
613 return ret; 627 return ret;
614} 628}
@@ -663,6 +677,27 @@ static bool mch_exceeded(struct ips_driver *ips)
663} 677}
664 678
665/** 679/**
680 * verify_limits - verify BIOS provided limits
681 * @ips: IPS structure
682 *
683 * BIOS can optionally provide non-default limits for power and temp. Check
684 * them here and use the defaults if the BIOS values are not provided or
685 * are otherwise unusable.
686 */
687static void verify_limits(struct ips_driver *ips)
688{
689 if (ips->mcp_power_limit < ips->limits->mcp_power_limit ||
690 ips->mcp_power_limit > 35000)
691 ips->mcp_power_limit = ips->limits->mcp_power_limit;
692
693 if (ips->mcp_temp_limit < ips->limits->core_temp_limit ||
694 ips->mcp_temp_limit < ips->limits->mch_temp_limit ||
695 ips->mcp_temp_limit > 150)
696 ips->mcp_temp_limit = min(ips->limits->core_temp_limit,
697 ips->limits->mch_temp_limit);
698}
699
700/**
666 * update_turbo_limits - get various limits & settings from regs 701 * update_turbo_limits - get various limits & settings from regs
667 * @ips: IPS driver struct 702 * @ips: IPS driver struct
668 * 703 *
@@ -680,12 +715,21 @@ static void update_turbo_limits(struct ips_driver *ips)
680 u32 hts = thm_readl(THM_HTS); 715 u32 hts = thm_readl(THM_HTS);
681 716
682 ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS); 717 ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS);
683 ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS); 718 /*
719 * Disable turbo for now, until we can figure out why the power figures
720 * are wrong
721 */
722 ips->cpu_turbo_enabled = false;
723
724 if (ips->gpu_busy)
725 ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS);
726
684 ips->core_power_limit = thm_readw(THM_MPCPC); 727 ips->core_power_limit = thm_readw(THM_MPCPC);
685 ips->mch_power_limit = thm_readw(THM_MMGPC); 728 ips->mch_power_limit = thm_readw(THM_MMGPC);
686 ips->mcp_temp_limit = thm_readw(THM_PTL); 729 ips->mcp_temp_limit = thm_readw(THM_PTL);
687 ips->mcp_power_limit = thm_readw(THM_MPPC); 730 ips->mcp_power_limit = thm_readw(THM_MPPC);
688 731
732 verify_limits(ips);
689 /* Ignore BIOS CPU vs GPU pref */ 733 /* Ignore BIOS CPU vs GPU pref */
690} 734}
691 735
@@ -858,7 +902,7 @@ static u32 get_cpu_power(struct ips_driver *ips, u32 *last, int period)
858 ret = (ret * 1000) / 65535; 902 ret = (ret * 1000) / 65535;
859 *last = val; 903 *last = val;
860 904
861 return ret; 905 return 0;
862} 906}
863 907
864static const u16 temp_decay_factor = 2; 908static const u16 temp_decay_factor = 2;
@@ -940,7 +984,6 @@ static int ips_monitor(void *data)
940 kfree(mch_samples); 984 kfree(mch_samples);
941 kfree(cpu_samples); 985 kfree(cpu_samples);
942 kfree(mchp_samples); 986 kfree(mchp_samples);
943 kthread_stop(ips->adjust);
944 return -ENOMEM; 987 return -ENOMEM;
945 } 988 }
946 989
@@ -948,7 +991,7 @@ static int ips_monitor(void *data)
948 ITV_ME_SEQNO_SHIFT; 991 ITV_ME_SEQNO_SHIFT;
949 seqno_timestamp = get_jiffies_64(); 992 seqno_timestamp = get_jiffies_64();
950 993
951 old_cpu_power = thm_readl(THM_CEC) / 65535; 994 old_cpu_power = thm_readl(THM_CEC);
952 schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); 995 schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
953 996
954 /* Collect an initial average */ 997 /* Collect an initial average */
@@ -1150,11 +1193,18 @@ static irqreturn_t ips_irq_handler(int irq, void *arg)
1150 STS_GPL_SHIFT; 1193 STS_GPL_SHIFT;
1151 /* ignore EC CPU vs GPU pref */ 1194 /* ignore EC CPU vs GPU pref */
1152 ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS); 1195 ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS);
1153 ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS); 1196 /*
1197 * Disable turbo for now, until we can figure
1198 * out why the power figures are wrong
1199 */
1200 ips->cpu_turbo_enabled = false;
1201 if (ips->gpu_busy)
1202 ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS);
1154 ips->mcp_temp_limit = (sts & STS_PTL_MASK) >> 1203 ips->mcp_temp_limit = (sts & STS_PTL_MASK) >>
1155 STS_PTL_SHIFT; 1204 STS_PTL_SHIFT;
1156 ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >> 1205 ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >>
1157 STS_PPL_SHIFT; 1206 STS_PPL_SHIFT;
1207 verify_limits(ips);
1158 spin_unlock(&ips->turbo_status_lock); 1208 spin_unlock(&ips->turbo_status_lock);
1159 1209
1160 thm_writeb(THM_SEC, SEC_ACK); 1210 thm_writeb(THM_SEC, SEC_ACK);
@@ -1333,8 +1383,10 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
1333 * turbo manually or we'll get an illegal MSR access, even though 1383 * turbo manually or we'll get an illegal MSR access, even though
1334 * turbo will still be available. 1384 * turbo will still be available.
1335 */ 1385 */
1336 if (!(misc_en & IA32_MISC_TURBO_EN)) 1386 if (misc_en & IA32_MISC_TURBO_EN)
1337 ; /* add turbo MSR write allowed flag if necessary */ 1387 ips->turbo_toggle_allowed = true;
1388 else
1389 ips->turbo_toggle_allowed = false;
1338 1390
1339 if (strstr(boot_cpu_data.x86_model_id, "CPU M")) 1391 if (strstr(boot_cpu_data.x86_model_id, "CPU M"))
1340 limits = &ips_sv_limits; 1392 limits = &ips_sv_limits;
@@ -1351,9 +1403,10 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
1351 tdp = turbo_power & TURBO_TDP_MASK; 1403 tdp = turbo_power & TURBO_TDP_MASK;
1352 1404
1353 /* Sanity check TDP against CPU */ 1405 /* Sanity check TDP against CPU */
1354 if (limits->mcp_power_limit != (tdp / 8) * 1000) { 1406 if (limits->core_power_limit != (tdp / 8) * 1000) {
1355 dev_warn(&ips->dev->dev, "Warning: CPU TDP doesn't match expected value (found %d, expected %d)\n", 1407 dev_info(&ips->dev->dev, "CPU TDP doesn't match expected value (found %d, expected %d)\n",
1356 tdp / 8, limits->mcp_power_limit / 1000); 1408 tdp / 8, limits->core_power_limit / 1000);
1409 limits->core_power_limit = (tdp / 8) * 1000;
1357 } 1410 }
1358 1411
1359out: 1412out:
@@ -1390,7 +1443,7 @@ static bool ips_get_i915_syms(struct ips_driver *ips)
1390 return true; 1443 return true;
1391 1444
1392out_put_busy: 1445out_put_busy:
1393 symbol_put(i915_gpu_turbo_disable); 1446 symbol_put(i915_gpu_busy);
1394out_put_lower: 1447out_put_lower:
1395 symbol_put(i915_gpu_lower); 1448 symbol_put(i915_gpu_lower);
1396out_put_raise: 1449out_put_raise:
@@ -1532,22 +1585,27 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
1532 /* Save turbo limits & ratios */ 1585 /* Save turbo limits & ratios */
1533 rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); 1586 rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
1534 1587
1535 ips_enable_cpu_turbo(ips); 1588 ips_disable_cpu_turbo(ips);
1536 ips->cpu_turbo_enabled = true; 1589 ips->cpu_turbo_enabled = false;
1537 1590
1538 /* Set up the work queue and monitor/adjust threads */ 1591 /* Create thermal adjust thread */
1539 ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor"); 1592 ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust");
1540 if (IS_ERR(ips->monitor)) { 1593 if (IS_ERR(ips->adjust)) {
1541 dev_err(&dev->dev, 1594 dev_err(&dev->dev,
1542 "failed to create thermal monitor thread, aborting\n"); 1595 "failed to create thermal adjust thread, aborting\n");
1543 ret = -ENOMEM; 1596 ret = -ENOMEM;
1544 goto error_free_irq; 1597 goto error_free_irq;
1598
1545 } 1599 }
1546 1600
1547 ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust"); 1601 /*
1548 if (IS_ERR(ips->adjust)) { 1602 * Set up the work queue and monitor thread. The monitor thread
1603 * will wake up ips_adjust thread.
1604 */
1605 ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor");
1606 if (IS_ERR(ips->monitor)) {
1549 dev_err(&dev->dev, 1607 dev_err(&dev->dev,
1550 "failed to create thermal adjust thread, aborting\n"); 1608 "failed to create thermal monitor thread, aborting\n");
1551 ret = -ENOMEM; 1609 ret = -ENOMEM;
1552 goto error_thread_cleanup; 1610 goto error_thread_cleanup;
1553 } 1611 }
@@ -1566,7 +1624,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
1566 return ret; 1624 return ret;
1567 1625
1568error_thread_cleanup: 1626error_thread_cleanup:
1569 kthread_stop(ips->monitor); 1627 kthread_stop(ips->adjust);
1570error_free_irq: 1628error_free_irq:
1571 free_irq(ips->dev->irq, ips); 1629 free_irq(ips->dev->irq, ips);
1572error_unmap: 1630error_unmap:
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index df1fb53c09d2..a4be41614eeb 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -256,7 +256,6 @@ static int __devexit ad5398_remove(struct i2c_client *client)
256 256
257 regulator_unregister(chip->rdev); 257 regulator_unregister(chip->rdev);
258 kfree(chip); 258 kfree(chip);
259 i2c_set_clientdata(client, NULL);
260 259
261 return 0; 260 return 0;
262} 261}
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index d61ecb885a8c..b8cc6389a541 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -191,8 +191,6 @@ static int __devexit isl6271a_remove(struct i2c_client *i2c)
191 struct isl_pmic *pmic = i2c_get_clientdata(i2c); 191 struct isl_pmic *pmic = i2c_get_clientdata(i2c);
192 int i; 192 int i;
193 193
194 i2c_set_clientdata(i2c, NULL);
195
196 for (i = 0; i < 3; i++) 194 for (i = 0; i < 3; i++)
197 regulator_unregister(pmic->rdev[i]); 195 regulator_unregister(pmic->rdev[i]);
198 196
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 9daed8db83d3..9de8516e3531 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -268,7 +268,6 @@ out_irq:
268 free_irq(client->irq, client); 268 free_irq(client->irq, client);
269 269
270out_free: 270out_free:
271 i2c_set_clientdata(client, NULL);
272 kfree(ds3232); 271 kfree(ds3232);
273 return ret; 272 return ret;
274} 273}
@@ -287,7 +286,6 @@ static int __devexit ds3232_remove(struct i2c_client *client)
287 } 286 }
288 287
289 rtc_device_unregister(ds3232->rtc); 288 rtc_device_unregister(ds3232->rtc);
290 i2c_set_clientdata(client, NULL);
291 kfree(ds3232); 289 kfree(ds3232);
292 return 0; 290 return 0;
293} 291}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index ad0ed212db4a..348fba0a8976 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1046,13 +1046,13 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
1046 1046
1047 /* If the user actually wanted this page, we can skip the rest */ 1047 /* If the user actually wanted this page, we can skip the rest */
1048 if (page == 0) 1048 if (page == 0)
1049 return -EINVAL; 1049 return 0;
1050 1050
1051 for (i = 0; i < min((int)buf[3], buf_len - 4); i++) 1051 for (i = 0; i < min((int)buf[3], buf_len - 4); i++)
1052 if (buf[i + 4] == page) 1052 if (buf[i + 4] == page)
1053 goto found; 1053 goto found;
1054 1054
1055 if (i < buf[3] && i > buf_len) 1055 if (i < buf[3] && i >= buf_len - 4)
1056 /* ran off the end of the buffer, give us benefit of doubt */ 1056 /* ran off the end of the buffer, give us benefit of doubt */
1057 goto found; 1057 goto found;
1058 /* The device claims it doesn't support the requested page */ 1058 /* The device claims it doesn't support the requested page */
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index 93de907b1208..800c54602339 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -2044,6 +2044,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
2044 if (!port) { 2044 if (!port) {
2045 printk(KERN_WARNING 2045 printk(KERN_WARNING
2046 "IOC3 serial memory not available for port\n"); 2046 "IOC3 serial memory not available for port\n");
2047 ret = -ENOMEM;
2047 goto out4; 2048 goto out4;
2048 } 2049 }
2049 spin_lock_init(&port->ip_lock); 2050 spin_lock_init(&port->ip_lock);
diff --git a/drivers/staging/tm6000/Kconfig b/drivers/staging/tm6000/Kconfig
index c725356cc346..de7ebb99d8f6 100644
--- a/drivers/staging/tm6000/Kconfig
+++ b/drivers/staging/tm6000/Kconfig
@@ -1,6 +1,6 @@
1config VIDEO_TM6000 1config VIDEO_TM6000
2 tristate "TV Master TM5600/6000/6010 driver" 2 tristate "TV Master TM5600/6000/6010 driver"
3 depends on VIDEO_DEV && I2C && INPUT && USB && EXPERIMENTAL 3 depends on VIDEO_DEV && I2C && INPUT && IR_CORE && USB && EXPERIMENTAL
4 select VIDEO_TUNER 4 select VIDEO_TUNER
5 select MEDIA_TUNER_XC2028 5 select MEDIA_TUNER_XC2028
6 select MEDIA_TUNER_XC5000 6 select MEDIA_TUNER_XC5000
diff --git a/drivers/staging/tm6000/tm6000-input.c b/drivers/staging/tm6000/tm6000-input.c
index 32f7a0af6938..54f7667cc706 100644
--- a/drivers/staging/tm6000/tm6000-input.c
+++ b/drivers/staging/tm6000/tm6000-input.c
@@ -46,7 +46,7 @@ MODULE_PARM_DESC(enable_ir, "enable ir (default is enable");
46 } 46 }
47 47
48struct tm6000_ir_poll_result { 48struct tm6000_ir_poll_result {
49 u8 rc_data[4]; 49 u16 rc_data;
50}; 50};
51 51
52struct tm6000_IR { 52struct tm6000_IR {
@@ -60,9 +60,9 @@ struct tm6000_IR {
60 int polling; 60 int polling;
61 struct delayed_work work; 61 struct delayed_work work;
62 u8 wait:1; 62 u8 wait:1;
63 u8 key:1;
63 struct urb *int_urb; 64 struct urb *int_urb;
64 u8 *urb_data; 65 u8 *urb_data;
65 u8 key:1;
66 66
67 int (*get_key) (struct tm6000_IR *, struct tm6000_ir_poll_result *); 67 int (*get_key) (struct tm6000_IR *, struct tm6000_ir_poll_result *);
68 68
@@ -122,13 +122,14 @@ static void tm6000_ir_urb_received(struct urb *urb)
122 122
123 if (urb->status != 0) 123 if (urb->status != 0)
124 printk(KERN_INFO "not ready\n"); 124 printk(KERN_INFO "not ready\n");
125 else if (urb->actual_length > 0) 125 else if (urb->actual_length > 0) {
126 memcpy(ir->urb_data, urb->transfer_buffer, urb->actual_length); 126 memcpy(ir->urb_data, urb->transfer_buffer, urb->actual_length);
127 127
128 dprintk("data %02x %02x %02x %02x\n", ir->urb_data[0], 128 dprintk("data %02x %02x %02x %02x\n", ir->urb_data[0],
129 ir->urb_data[1], ir->urb_data[2], ir->urb_data[3]); 129 ir->urb_data[1], ir->urb_data[2], ir->urb_data[3]);
130 130
131 ir->key = 1; 131 ir->key = 1;
132 }
132 133
133 rc = usb_submit_urb(urb, GFP_ATOMIC); 134 rc = usb_submit_urb(urb, GFP_ATOMIC);
134} 135}
@@ -140,30 +141,47 @@ static int default_polling_getkey(struct tm6000_IR *ir,
140 int rc; 141 int rc;
141 u8 buf[2]; 142 u8 buf[2];
142 143
143 if (ir->wait && !&dev->int_in) { 144 if (ir->wait && !&dev->int_in)
144 poll_result->rc_data[0] = 0xff;
145 return 0; 145 return 0;
146 }
147 146
148 if (&dev->int_in) { 147 if (&dev->int_in) {
149 poll_result->rc_data[0] = ir->urb_data[0]; 148 if (ir->ir.ir_type == IR_TYPE_RC5)
150 poll_result->rc_data[1] = ir->urb_data[1]; 149 poll_result->rc_data = ir->urb_data[0];
150 else
151 poll_result->rc_data = ir->urb_data[0] | ir->urb_data[1] << 8;
151 } else { 152 } else {
152 tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 0); 153 tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 0);
153 msleep(10); 154 msleep(10);
154 tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 1); 155 tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 1);
155 msleep(10); 156 msleep(10);
156 157
157 rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | 158 if (ir->ir.ir_type == IR_TYPE_RC5) {
158 USB_RECIP_DEVICE, REQ_02_GET_IR_CODE, 0, 0, buf, 1); 159 rc = tm6000_read_write_usb(dev, USB_DIR_IN |
160 USB_TYPE_VENDOR | USB_RECIP_DEVICE,
161 REQ_02_GET_IR_CODE, 0, 0, buf, 1);
159 162
160 msleep(10); 163 msleep(10);
161 164
162 dprintk("read data=%02x\n", buf[0]); 165 dprintk("read data=%02x\n", buf[0]);
163 if (rc < 0) 166 if (rc < 0)
164 return rc; 167 return rc;
165 168
166 poll_result->rc_data[0] = buf[0]; 169 poll_result->rc_data = buf[0];
170 } else {
171 rc = tm6000_read_write_usb(dev, USB_DIR_IN |
172 USB_TYPE_VENDOR | USB_RECIP_DEVICE,
173 REQ_02_GET_IR_CODE, 0, 0, buf, 2);
174
175 msleep(10);
176
177 dprintk("read data=%04x\n", buf[0] | buf[1] << 8);
178 if (rc < 0)
179 return rc;
180
181 poll_result->rc_data = buf[0] | buf[1] << 8;
182 }
183 if ((poll_result->rc_data & 0x00ff) != 0xff)
184 ir->key = 1;
167 } 185 }
168 return 0; 186 return 0;
169} 187}
@@ -180,12 +198,11 @@ static void tm6000_ir_handle_key(struct tm6000_IR *ir)
180 return; 198 return;
181 } 199 }
182 200
183 dprintk("ir->get_key result data=%02x %02x\n", 201 dprintk("ir->get_key result data=%04x\n", poll_result.rc_data);
184 poll_result.rc_data[0], poll_result.rc_data[1]);
185 202
186 if (poll_result.rc_data[0] != 0xff && ir->key == 1) { 203 if (ir->key) {
187 ir_input_keydown(ir->input->input_dev, &ir->ir, 204 ir_input_keydown(ir->input->input_dev, &ir->ir,
188 poll_result.rc_data[0] | poll_result.rc_data[1] << 8); 205 (u32)poll_result.rc_data);
189 206
190 ir_input_nokey(ir->input->input_dev, &ir->ir); 207 ir_input_nokey(ir->input->input_dev, &ir->ir);
191 ir->key = 0; 208 ir->key = 0;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 7c8008225ee3..17927b1f9334 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -127,7 +127,10 @@ static void handle_tx(struct vhost_net *net)
127 size_t len, total_len = 0; 127 size_t len, total_len = 0;
128 int err, wmem; 128 int err, wmem;
129 size_t hdr_size; 129 size_t hdr_size;
130 struct socket *sock = rcu_dereference(vq->private_data); 130 struct socket *sock;
131
132 sock = rcu_dereference_check(vq->private_data,
133 lockdep_is_held(&vq->mutex));
131 if (!sock) 134 if (!sock)
132 return; 135 return;
133 136
@@ -582,7 +585,10 @@ static void vhost_net_disable_vq(struct vhost_net *n,
582static void vhost_net_enable_vq(struct vhost_net *n, 585static void vhost_net_enable_vq(struct vhost_net *n,
583 struct vhost_virtqueue *vq) 586 struct vhost_virtqueue *vq)
584{ 587{
585 struct socket *sock = vq->private_data; 588 struct socket *sock;
589
590 sock = rcu_dereference_protected(vq->private_data,
591 lockdep_is_held(&vq->mutex));
586 if (!sock) 592 if (!sock)
587 return; 593 return;
588 if (vq == n->vqs + VHOST_NET_VQ_TX) { 594 if (vq == n->vqs + VHOST_NET_VQ_TX) {
@@ -598,7 +604,8 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
598 struct socket *sock; 604 struct socket *sock;
599 605
600 mutex_lock(&vq->mutex); 606 mutex_lock(&vq->mutex);
601 sock = vq->private_data; 607 sock = rcu_dereference_protected(vq->private_data,
608 lockdep_is_held(&vq->mutex));
602 vhost_net_disable_vq(n, vq); 609 vhost_net_disable_vq(n, vq);
603 rcu_assign_pointer(vq->private_data, NULL); 610 rcu_assign_pointer(vq->private_data, NULL);
604 mutex_unlock(&vq->mutex); 611 mutex_unlock(&vq->mutex);
@@ -736,7 +743,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
736 } 743 }
737 744
738 /* start polling new socket */ 745 /* start polling new socket */
739 oldsock = vq->private_data; 746 oldsock = rcu_dereference_protected(vq->private_data,
747 lockdep_is_held(&vq->mutex));
740 if (sock != oldsock) { 748 if (sock != oldsock) {
741 vhost_net_disable_vq(n, vq); 749 vhost_net_disable_vq(n, vq);
742 rcu_assign_pointer(vq->private_data, sock); 750 rcu_assign_pointer(vq->private_data, sock);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index dd3d6f7406f8..8b5a1b33d0fe 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -320,7 +320,7 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
320 vhost_dev_cleanup(dev); 320 vhost_dev_cleanup(dev);
321 321
322 memory->nregions = 0; 322 memory->nregions = 0;
323 dev->memory = memory; 323 RCU_INIT_POINTER(dev->memory, memory);
324 return 0; 324 return 0;
325} 325}
326 326
@@ -352,8 +352,9 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
352 fput(dev->log_file); 352 fput(dev->log_file);
353 dev->log_file = NULL; 353 dev->log_file = NULL;
354 /* No one will access memory at this point */ 354 /* No one will access memory at this point */
355 kfree(dev->memory); 355 kfree(rcu_dereference_protected(dev->memory,
356 dev->memory = NULL; 356 lockdep_is_held(&dev->mutex)));
357 RCU_INIT_POINTER(dev->memory, NULL);
357 if (dev->mm) 358 if (dev->mm)
358 mmput(dev->mm); 359 mmput(dev->mm);
359 dev->mm = NULL; 360 dev->mm = NULL;
@@ -440,14 +441,22 @@ static int vq_access_ok(unsigned int num,
440/* Caller should have device mutex but not vq mutex */ 441/* Caller should have device mutex but not vq mutex */
441int vhost_log_access_ok(struct vhost_dev *dev) 442int vhost_log_access_ok(struct vhost_dev *dev)
442{ 443{
443 return memory_access_ok(dev, dev->memory, 1); 444 struct vhost_memory *mp;
445
446 mp = rcu_dereference_protected(dev->memory,
447 lockdep_is_held(&dev->mutex));
448 return memory_access_ok(dev, mp, 1);
444} 449}
445 450
446/* Verify access for write logging. */ 451/* Verify access for write logging. */
447/* Caller should have vq mutex and device mutex */ 452/* Caller should have vq mutex and device mutex */
448static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base) 453static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
449{ 454{
450 return vq_memory_access_ok(log_base, vq->dev->memory, 455 struct vhost_memory *mp;
456
457 mp = rcu_dereference_protected(vq->dev->memory,
458 lockdep_is_held(&vq->mutex));
459 return vq_memory_access_ok(log_base, mp,
451 vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) && 460 vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
452 (!vq->log_used || log_access_ok(log_base, vq->log_addr, 461 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
453 sizeof *vq->used + 462 sizeof *vq->used +
@@ -487,7 +496,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
487 kfree(newmem); 496 kfree(newmem);
488 return -EFAULT; 497 return -EFAULT;
489 } 498 }
490 oldmem = d->memory; 499 oldmem = rcu_dereference_protected(d->memory,
500 lockdep_is_held(&d->mutex));
491 rcu_assign_pointer(d->memory, newmem); 501 rcu_assign_pointer(d->memory, newmem);
492 synchronize_rcu(); 502 synchronize_rcu();
493 kfree(oldmem); 503 kfree(oldmem);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index afd77295971c..af3c11ded5fd 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -106,7 +106,7 @@ struct vhost_virtqueue {
106 * vhost_work execution acts instead of rcu_read_lock() and the end of 106 * vhost_work execution acts instead of rcu_read_lock() and the end of
107 * vhost_work execution acts instead of rcu_read_lock(). 107 * vhost_work execution acts instead of rcu_read_lock().
108 * Writers use virtqueue mutex. */ 108 * Writers use virtqueue mutex. */
109 void *private_data; 109 void __rcu *private_data;
110 /* Log write descriptors */ 110 /* Log write descriptors */
111 void __user *log_base; 111 void __user *log_base;
112 struct vhost_log log[VHOST_NET_MAX_SG]; 112 struct vhost_log log[VHOST_NET_MAX_SG];
@@ -116,7 +116,7 @@ struct vhost_dev {
116 /* Readers use RCU to access memory table pointer 116 /* Readers use RCU to access memory table pointer
117 * log base pointer and features. 117 * log base pointer and features.
118 * Writers use mutex below.*/ 118 * Writers use mutex below.*/
119 struct vhost_memory *memory; 119 struct vhost_memory __rcu *memory;
120 struct mm_struct *mm; 120 struct mm_struct *mm;
121 struct mutex mutex; 121 struct mutex mutex;
122 unsigned acked_features; 122 unsigned acked_features;
@@ -173,7 +173,11 @@ enum {
173 173
174static inline int vhost_has_feature(struct vhost_dev *dev, int bit) 174static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
175{ 175{
176 unsigned acked_features = rcu_dereference(dev->acked_features); 176 unsigned acked_features;
177
178 acked_features =
179 rcu_dereference_index_check(dev->acked_features,
180 lockdep_is_held(&dev->mutex));
177 return acked_features & (1 << bit); 181 return acked_features & (1 << bit);
178} 182}
179 183
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 33c4e7eef470..9581ea94d5a1 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -109,8 +109,8 @@ static void init_once(void *foo)
109{ 109{
110 struct affs_inode_info *ei = (struct affs_inode_info *) foo; 110 struct affs_inode_info *ei = (struct affs_inode_info *) foo;
111 111
112 init_MUTEX(&ei->i_link_lock); 112 sema_init(&ei->i_link_lock, 1);
113 init_MUTEX(&ei->i_ext_lock); 113 sema_init(&ei->i_ext_lock, 1);
114 inode_init_once(&ei->vfs_inode); 114 inode_init_once(&ei->vfs_inode);
115} 115}
116 116
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index f96eff04e11a..a6395bdb26ae 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -134,10 +134,6 @@ static int aout_core_dump(struct coredump_params *cprm)
134 if (!dump_write(file, dump_start, dump_size)) 134 if (!dump_write(file, dump_start, dump_size))
135 goto end_coredump; 135 goto end_coredump;
136 } 136 }
137/* Finally dump the task struct. Not be used by gdb, but could be useful */
138 set_fs(KERNEL_DS);
139 if (!dump_write(file, current, sizeof(*current)))
140 goto end_coredump;
141end_coredump: 137end_coredump:
142 set_fs(fs); 138 set_fs(fs);
143 return has_dumped; 139 return has_dumped;
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index 0fcd2640c23f..9eb134ea6eb2 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -1,9 +1,11 @@
1config CEPH_FS 1config CEPH_FS
2 tristate "Ceph distributed file system (EXPERIMENTAL)" 2 tristate "Ceph distributed file system (EXPERIMENTAL)"
3 depends on INET && EXPERIMENTAL 3 depends on INET && EXPERIMENTAL
4 select CEPH_LIB
4 select LIBCRC32C 5 select LIBCRC32C
5 select CRYPTO_AES 6 select CRYPTO_AES
6 select CRYPTO 7 select CRYPTO
8 default n
7 help 9 help
8 Choose Y or M here to include support for mounting the 10 Choose Y or M here to include support for mounting the
9 experimental Ceph distributed file system. Ceph is an extremely 11 experimental Ceph distributed file system. Ceph is an extremely
@@ -14,15 +16,3 @@ config CEPH_FS
14 16
15 If unsure, say N. 17 If unsure, say N.
16 18
17config CEPH_FS_PRETTYDEBUG
18 bool "Include file:line in ceph debug output"
19 depends on CEPH_FS
20 default n
21 help
22 If you say Y here, debug output will include a filename and
23 line to aid debugging. This icnreases kernel size and slows
24 execution slightly when debug call sites are enabled (e.g.,
25 via CONFIG_DYNAMIC_DEBUG).
26
27 If unsure, say N.
28
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
index 278e1172600d..9e6c4f2e8ff1 100644
--- a/fs/ceph/Makefile
+++ b/fs/ceph/Makefile
@@ -8,15 +8,8 @@ obj-$(CONFIG_CEPH_FS) += ceph.o
8 8
9ceph-objs := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \ 9ceph-objs := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
10 export.o caps.o snap.o xattr.o \ 10 export.o caps.o snap.o xattr.o \
11 messenger.o msgpool.o buffer.o pagelist.o \ 11 mds_client.o mdsmap.o strings.o ceph_frag.o \
12 mds_client.o mdsmap.o \ 12 debugfs.o
13 mon_client.o \
14 osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \
15 debugfs.o \
16 auth.o auth_none.o \
17 crypto.o armor.o \
18 auth_x.o \
19 ceph_fs.o ceph_strings.o ceph_hash.o ceph_frag.o
20 13
21else 14else
22#Otherwise we were called directly from the command 15#Otherwise we were called directly from the command
diff --git a/fs/ceph/README b/fs/ceph/README
deleted file mode 100644
index 18352fab37c0..000000000000
--- a/fs/ceph/README
+++ /dev/null
@@ -1,20 +0,0 @@
1#
2# The following files are shared by (and manually synchronized
3# between) the Ceph userland and kernel client.
4#
5# userland kernel
6src/include/ceph_fs.h fs/ceph/ceph_fs.h
7src/include/ceph_fs.cc fs/ceph/ceph_fs.c
8src/include/msgr.h fs/ceph/msgr.h
9src/include/rados.h fs/ceph/rados.h
10src/include/ceph_strings.cc fs/ceph/ceph_strings.c
11src/include/ceph_frag.h fs/ceph/ceph_frag.h
12src/include/ceph_frag.cc fs/ceph/ceph_frag.c
13src/include/ceph_hash.h fs/ceph/ceph_hash.h
14src/include/ceph_hash.cc fs/ceph/ceph_hash.c
15src/crush/crush.c fs/ceph/crush/crush.c
16src/crush/crush.h fs/ceph/crush/crush.h
17src/crush/mapper.c fs/ceph/crush/mapper.c
18src/crush/mapper.h fs/ceph/crush/mapper.h
19src/crush/hash.h fs/ceph/crush/hash.h
20src/crush/hash.c fs/ceph/crush/hash.c
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index efbc604001c8..51bcc5ce3230 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1,4 +1,4 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/backing-dev.h> 3#include <linux/backing-dev.h>
4#include <linux/fs.h> 4#include <linux/fs.h>
@@ -10,7 +10,8 @@
10#include <linux/task_io_accounting_ops.h> 10#include <linux/task_io_accounting_ops.h>
11 11
12#include "super.h" 12#include "super.h"
13#include "osd_client.h" 13#include "mds_client.h"
14#include <linux/ceph/osd_client.h>
14 15
15/* 16/*
16 * Ceph address space ops. 17 * Ceph address space ops.
@@ -193,7 +194,8 @@ static int readpage_nounlock(struct file *filp, struct page *page)
193{ 194{
194 struct inode *inode = filp->f_dentry->d_inode; 195 struct inode *inode = filp->f_dentry->d_inode;
195 struct ceph_inode_info *ci = ceph_inode(inode); 196 struct ceph_inode_info *ci = ceph_inode(inode);
196 struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc; 197 struct ceph_osd_client *osdc =
198 &ceph_inode_to_client(inode)->client->osdc;
197 int err = 0; 199 int err = 0;
198 u64 len = PAGE_CACHE_SIZE; 200 u64 len = PAGE_CACHE_SIZE;
199 201
@@ -265,7 +267,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
265{ 267{
266 struct inode *inode = file->f_dentry->d_inode; 268 struct inode *inode = file->f_dentry->d_inode;
267 struct ceph_inode_info *ci = ceph_inode(inode); 269 struct ceph_inode_info *ci = ceph_inode(inode);
268 struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc; 270 struct ceph_osd_client *osdc =
271 &ceph_inode_to_client(inode)->client->osdc;
269 int rc = 0; 272 int rc = 0;
270 struct page **pages; 273 struct page **pages;
271 loff_t offset; 274 loff_t offset;
@@ -365,7 +368,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
365{ 368{
366 struct inode *inode; 369 struct inode *inode;
367 struct ceph_inode_info *ci; 370 struct ceph_inode_info *ci;
368 struct ceph_client *client; 371 struct ceph_fs_client *fsc;
369 struct ceph_osd_client *osdc; 372 struct ceph_osd_client *osdc;
370 loff_t page_off = page->index << PAGE_CACHE_SHIFT; 373 loff_t page_off = page->index << PAGE_CACHE_SHIFT;
371 int len = PAGE_CACHE_SIZE; 374 int len = PAGE_CACHE_SIZE;
@@ -383,8 +386,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
383 } 386 }
384 inode = page->mapping->host; 387 inode = page->mapping->host;
385 ci = ceph_inode(inode); 388 ci = ceph_inode(inode);
386 client = ceph_inode_to_client(inode); 389 fsc = ceph_inode_to_client(inode);
387 osdc = &client->osdc; 390 osdc = &fsc->client->osdc;
388 391
389 /* verify this is a writeable snap context */ 392 /* verify this is a writeable snap context */
390 snapc = (void *)page->private; 393 snapc = (void *)page->private;
@@ -414,10 +417,10 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
414 dout("writepage %p page %p index %lu on %llu~%u snapc %p\n", 417 dout("writepage %p page %p index %lu on %llu~%u snapc %p\n",
415 inode, page, page->index, page_off, len, snapc); 418 inode, page, page->index, page_off, len, snapc);
416 419
417 writeback_stat = atomic_long_inc_return(&client->writeback_count); 420 writeback_stat = atomic_long_inc_return(&fsc->writeback_count);
418 if (writeback_stat > 421 if (writeback_stat >
419 CONGESTION_ON_THRESH(client->mount_args->congestion_kb)) 422 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
420 set_bdi_congested(&client->backing_dev_info, BLK_RW_ASYNC); 423 set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC);
421 424
422 set_page_writeback(page); 425 set_page_writeback(page);
423 err = ceph_osdc_writepages(osdc, ceph_vino(inode), 426 err = ceph_osdc_writepages(osdc, ceph_vino(inode),
@@ -496,7 +499,7 @@ static void writepages_finish(struct ceph_osd_request *req,
496 struct address_space *mapping = inode->i_mapping; 499 struct address_space *mapping = inode->i_mapping;
497 __s32 rc = -EIO; 500 __s32 rc = -EIO;
498 u64 bytes = 0; 501 u64 bytes = 0;
499 struct ceph_client *client = ceph_inode_to_client(inode); 502 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
500 long writeback_stat; 503 long writeback_stat;
501 unsigned issued = ceph_caps_issued(ci); 504 unsigned issued = ceph_caps_issued(ci);
502 505
@@ -529,10 +532,10 @@ static void writepages_finish(struct ceph_osd_request *req,
529 WARN_ON(!PageUptodate(page)); 532 WARN_ON(!PageUptodate(page));
530 533
531 writeback_stat = 534 writeback_stat =
532 atomic_long_dec_return(&client->writeback_count); 535 atomic_long_dec_return(&fsc->writeback_count);
533 if (writeback_stat < 536 if (writeback_stat <
534 CONGESTION_OFF_THRESH(client->mount_args->congestion_kb)) 537 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
535 clear_bdi_congested(&client->backing_dev_info, 538 clear_bdi_congested(&fsc->backing_dev_info,
536 BLK_RW_ASYNC); 539 BLK_RW_ASYNC);
537 540
538 ceph_put_snap_context((void *)page->private); 541 ceph_put_snap_context((void *)page->private);
@@ -569,13 +572,13 @@ static void writepages_finish(struct ceph_osd_request *req,
569 * mempool. we avoid the mempool if we can because req->r_num_pages 572 * mempool. we avoid the mempool if we can because req->r_num_pages
570 * may be less than the maximum write size. 573 * may be less than the maximum write size.
571 */ 574 */
572static void alloc_page_vec(struct ceph_client *client, 575static void alloc_page_vec(struct ceph_fs_client *fsc,
573 struct ceph_osd_request *req) 576 struct ceph_osd_request *req)
574{ 577{
575 req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages, 578 req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages,
576 GFP_NOFS); 579 GFP_NOFS);
577 if (!req->r_pages) { 580 if (!req->r_pages) {
578 req->r_pages = mempool_alloc(client->wb_pagevec_pool, GFP_NOFS); 581 req->r_pages = mempool_alloc(fsc->wb_pagevec_pool, GFP_NOFS);
579 req->r_pages_from_pool = 1; 582 req->r_pages_from_pool = 1;
580 WARN_ON(!req->r_pages); 583 WARN_ON(!req->r_pages);
581 } 584 }
@@ -590,7 +593,7 @@ static int ceph_writepages_start(struct address_space *mapping,
590 struct inode *inode = mapping->host; 593 struct inode *inode = mapping->host;
591 struct backing_dev_info *bdi = mapping->backing_dev_info; 594 struct backing_dev_info *bdi = mapping->backing_dev_info;
592 struct ceph_inode_info *ci = ceph_inode(inode); 595 struct ceph_inode_info *ci = ceph_inode(inode);
593 struct ceph_client *client; 596 struct ceph_fs_client *fsc;
594 pgoff_t index, start, end; 597 pgoff_t index, start, end;
595 int range_whole = 0; 598 int range_whole = 0;
596 int should_loop = 1; 599 int should_loop = 1;
@@ -617,13 +620,13 @@ static int ceph_writepages_start(struct address_space *mapping,
617 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 620 wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
618 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 621 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
619 622
620 client = ceph_inode_to_client(inode); 623 fsc = ceph_inode_to_client(inode);
621 if (client->mount_state == CEPH_MOUNT_SHUTDOWN) { 624 if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
622 pr_warning("writepage_start %p on forced umount\n", inode); 625 pr_warning("writepage_start %p on forced umount\n", inode);
623 return -EIO; /* we're in a forced umount, don't write! */ 626 return -EIO; /* we're in a forced umount, don't write! */
624 } 627 }
625 if (client->mount_args->wsize && client->mount_args->wsize < wsize) 628 if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
626 wsize = client->mount_args->wsize; 629 wsize = fsc->mount_options->wsize;
627 if (wsize < PAGE_CACHE_SIZE) 630 if (wsize < PAGE_CACHE_SIZE)
628 wsize = PAGE_CACHE_SIZE; 631 wsize = PAGE_CACHE_SIZE;
629 max_pages_ever = wsize >> PAGE_CACHE_SHIFT; 632 max_pages_ever = wsize >> PAGE_CACHE_SHIFT;
@@ -769,7 +772,7 @@ get_more_pages:
769 offset = (unsigned long long)page->index 772 offset = (unsigned long long)page->index
770 << PAGE_CACHE_SHIFT; 773 << PAGE_CACHE_SHIFT;
771 len = wsize; 774 len = wsize;
772 req = ceph_osdc_new_request(&client->osdc, 775 req = ceph_osdc_new_request(&fsc->client->osdc,
773 &ci->i_layout, 776 &ci->i_layout,
774 ceph_vino(inode), 777 ceph_vino(inode),
775 offset, &len, 778 offset, &len,
@@ -782,7 +785,7 @@ get_more_pages:
782 &inode->i_mtime, true, 1); 785 &inode->i_mtime, true, 1);
783 max_pages = req->r_num_pages; 786 max_pages = req->r_num_pages;
784 787
785 alloc_page_vec(client, req); 788 alloc_page_vec(fsc, req);
786 req->r_callback = writepages_finish; 789 req->r_callback = writepages_finish;
787 req->r_inode = inode; 790 req->r_inode = inode;
788 } 791 }
@@ -794,10 +797,10 @@ get_more_pages:
794 inode, page, page->index); 797 inode, page, page->index);
795 798
796 writeback_stat = 799 writeback_stat =
797 atomic_long_inc_return(&client->writeback_count); 800 atomic_long_inc_return(&fsc->writeback_count);
798 if (writeback_stat > CONGESTION_ON_THRESH( 801 if (writeback_stat > CONGESTION_ON_THRESH(
799 client->mount_args->congestion_kb)) { 802 fsc->mount_options->congestion_kb)) {
800 set_bdi_congested(&client->backing_dev_info, 803 set_bdi_congested(&fsc->backing_dev_info,
801 BLK_RW_ASYNC); 804 BLK_RW_ASYNC);
802 } 805 }
803 806
@@ -846,7 +849,7 @@ get_more_pages:
846 op->payload_len = cpu_to_le32(len); 849 op->payload_len = cpu_to_le32(len);
847 req->r_request->hdr.data_len = cpu_to_le32(len); 850 req->r_request->hdr.data_len = cpu_to_le32(len);
848 851
849 ceph_osdc_start_request(&client->osdc, req, true); 852 ceph_osdc_start_request(&fsc->client->osdc, req, true);
850 req = NULL; 853 req = NULL;
851 854
852 /* continue? */ 855 /* continue? */
@@ -915,7 +918,7 @@ static int ceph_update_writeable_page(struct file *file,
915{ 918{
916 struct inode *inode = file->f_dentry->d_inode; 919 struct inode *inode = file->f_dentry->d_inode;
917 struct ceph_inode_info *ci = ceph_inode(inode); 920 struct ceph_inode_info *ci = ceph_inode(inode);
918 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; 921 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
919 loff_t page_off = pos & PAGE_CACHE_MASK; 922 loff_t page_off = pos & PAGE_CACHE_MASK;
920 int pos_in_page = pos & ~PAGE_CACHE_MASK; 923 int pos_in_page = pos & ~PAGE_CACHE_MASK;
921 int end_in_page = pos_in_page + len; 924 int end_in_page = pos_in_page + len;
@@ -1053,8 +1056,8 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
1053 struct page *page, void *fsdata) 1056 struct page *page, void *fsdata)
1054{ 1057{
1055 struct inode *inode = file->f_dentry->d_inode; 1058 struct inode *inode = file->f_dentry->d_inode;
1056 struct ceph_client *client = ceph_inode_to_client(inode); 1059 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1057 struct ceph_mds_client *mdsc = &client->mdsc; 1060 struct ceph_mds_client *mdsc = fsc->mdsc;
1058 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1061 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1059 int check_cap = 0; 1062 int check_cap = 0;
1060 1063
@@ -1123,7 +1126,7 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1123{ 1126{
1124 struct inode *inode = vma->vm_file->f_dentry->d_inode; 1127 struct inode *inode = vma->vm_file->f_dentry->d_inode;
1125 struct page *page = vmf->page; 1128 struct page *page = vmf->page;
1126 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; 1129 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1127 loff_t off = page->index << PAGE_CACHE_SHIFT; 1130 loff_t off = page->index << PAGE_CACHE_SHIFT;
1128 loff_t size, len; 1131 loff_t size, len;
1129 int ret; 1132 int ret;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 73c153092f72..98ab13e2b71d 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1,4 +1,4 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/fs.h> 3#include <linux/fs.h>
4#include <linux/kernel.h> 4#include <linux/kernel.h>
@@ -9,8 +9,9 @@
9#include <linux/writeback.h> 9#include <linux/writeback.h>
10 10
11#include "super.h" 11#include "super.h"
12#include "decode.h" 12#include "mds_client.h"
13#include "messenger.h" 13#include <linux/ceph/decode.h>
14#include <linux/ceph/messenger.h>
14 15
15/* 16/*
16 * Capability management 17 * Capability management
@@ -287,11 +288,11 @@ void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
287 spin_unlock(&mdsc->caps_list_lock); 288 spin_unlock(&mdsc->caps_list_lock);
288} 289}
289 290
290void ceph_reservation_status(struct ceph_client *client, 291void ceph_reservation_status(struct ceph_fs_client *fsc,
291 int *total, int *avail, int *used, int *reserved, 292 int *total, int *avail, int *used, int *reserved,
292 int *min) 293 int *min)
293{ 294{
294 struct ceph_mds_client *mdsc = &client->mdsc; 295 struct ceph_mds_client *mdsc = fsc->mdsc;
295 296
296 if (total) 297 if (total)
297 *total = mdsc->caps_total_count; 298 *total = mdsc->caps_total_count;
@@ -399,7 +400,7 @@ static void __insert_cap_node(struct ceph_inode_info *ci,
399static void __cap_set_timeouts(struct ceph_mds_client *mdsc, 400static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
400 struct ceph_inode_info *ci) 401 struct ceph_inode_info *ci)
401{ 402{
402 struct ceph_mount_args *ma = mdsc->client->mount_args; 403 struct ceph_mount_options *ma = mdsc->fsc->mount_options;
403 404
404 ci->i_hold_caps_min = round_jiffies(jiffies + 405 ci->i_hold_caps_min = round_jiffies(jiffies +
405 ma->caps_wanted_delay_min * HZ); 406 ma->caps_wanted_delay_min * HZ);
@@ -515,7 +516,7 @@ int ceph_add_cap(struct inode *inode,
515 unsigned seq, unsigned mseq, u64 realmino, int flags, 516 unsigned seq, unsigned mseq, u64 realmino, int flags,
516 struct ceph_cap_reservation *caps_reservation) 517 struct ceph_cap_reservation *caps_reservation)
517{ 518{
518 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; 519 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
519 struct ceph_inode_info *ci = ceph_inode(inode); 520 struct ceph_inode_info *ci = ceph_inode(inode);
520 struct ceph_cap *new_cap = NULL; 521 struct ceph_cap *new_cap = NULL;
521 struct ceph_cap *cap; 522 struct ceph_cap *cap;
@@ -873,7 +874,7 @@ void __ceph_remove_cap(struct ceph_cap *cap)
873 struct ceph_mds_session *session = cap->session; 874 struct ceph_mds_session *session = cap->session;
874 struct ceph_inode_info *ci = cap->ci; 875 struct ceph_inode_info *ci = cap->ci;
875 struct ceph_mds_client *mdsc = 876 struct ceph_mds_client *mdsc =
876 &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 877 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
877 int removed = 0; 878 int removed = 0;
878 879
879 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); 880 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
@@ -1210,7 +1211,7 @@ void __ceph_flush_snaps(struct ceph_inode_info *ci,
1210 int mds; 1211 int mds;
1211 struct ceph_cap_snap *capsnap; 1212 struct ceph_cap_snap *capsnap;
1212 u32 mseq; 1213 u32 mseq;
1213 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; 1214 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1214 struct ceph_mds_session *session = NULL; /* if session != NULL, we hold 1215 struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1215 session->s_mutex */ 1216 session->s_mutex */
1216 u64 next_follows = 0; /* keep track of how far we've gotten through the 1217 u64 next_follows = 0; /* keep track of how far we've gotten through the
@@ -1336,7 +1337,7 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci)
1336void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) 1337void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1337{ 1338{
1338 struct ceph_mds_client *mdsc = 1339 struct ceph_mds_client *mdsc =
1339 &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 1340 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
1340 struct inode *inode = &ci->vfs_inode; 1341 struct inode *inode = &ci->vfs_inode;
1341 int was = ci->i_dirty_caps; 1342 int was = ci->i_dirty_caps;
1342 int dirty = 0; 1343 int dirty = 0;
@@ -1378,7 +1379,7 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1378static int __mark_caps_flushing(struct inode *inode, 1379static int __mark_caps_flushing(struct inode *inode,
1379 struct ceph_mds_session *session) 1380 struct ceph_mds_session *session)
1380{ 1381{
1381 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; 1382 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1382 struct ceph_inode_info *ci = ceph_inode(inode); 1383 struct ceph_inode_info *ci = ceph_inode(inode);
1383 int flushing; 1384 int flushing;
1384 1385
@@ -1416,17 +1417,6 @@ static int __mark_caps_flushing(struct inode *inode,
1416/* 1417/*
1417 * try to invalidate mapping pages without blocking. 1418 * try to invalidate mapping pages without blocking.
1418 */ 1419 */
1419static int mapping_is_empty(struct address_space *mapping)
1420{
1421 struct page *page = find_get_page(mapping, 0);
1422
1423 if (!page)
1424 return 1;
1425
1426 put_page(page);
1427 return 0;
1428}
1429
1430static int try_nonblocking_invalidate(struct inode *inode) 1420static int try_nonblocking_invalidate(struct inode *inode)
1431{ 1421{
1432 struct ceph_inode_info *ci = ceph_inode(inode); 1422 struct ceph_inode_info *ci = ceph_inode(inode);
@@ -1436,7 +1426,7 @@ static int try_nonblocking_invalidate(struct inode *inode)
1436 invalidate_mapping_pages(&inode->i_data, 0, -1); 1426 invalidate_mapping_pages(&inode->i_data, 0, -1);
1437 spin_lock(&inode->i_lock); 1427 spin_lock(&inode->i_lock);
1438 1428
1439 if (mapping_is_empty(&inode->i_data) && 1429 if (inode->i_data.nrpages == 0 &&
1440 invalidating_gen == ci->i_rdcache_gen) { 1430 invalidating_gen == ci->i_rdcache_gen) {
1441 /* success. */ 1431 /* success. */
1442 dout("try_nonblocking_invalidate %p success\n", inode); 1432 dout("try_nonblocking_invalidate %p success\n", inode);
@@ -1462,8 +1452,8 @@ static int try_nonblocking_invalidate(struct inode *inode)
1462void ceph_check_caps(struct ceph_inode_info *ci, int flags, 1452void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1463 struct ceph_mds_session *session) 1453 struct ceph_mds_session *session)
1464{ 1454{
1465 struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode); 1455 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1466 struct ceph_mds_client *mdsc = &client->mdsc; 1456 struct ceph_mds_client *mdsc = fsc->mdsc;
1467 struct inode *inode = &ci->vfs_inode; 1457 struct inode *inode = &ci->vfs_inode;
1468 struct ceph_cap *cap; 1458 struct ceph_cap *cap;
1469 int file_wanted, used; 1459 int file_wanted, used;
@@ -1533,7 +1523,7 @@ retry_locked:
1533 */ 1523 */
1534 if ((!is_delayed || mdsc->stopping) && 1524 if ((!is_delayed || mdsc->stopping) &&
1535 ci->i_wrbuffer_ref == 0 && /* no dirty pages... */ 1525 ci->i_wrbuffer_ref == 0 && /* no dirty pages... */
1536 ci->i_rdcache_gen && /* may have cached pages */ 1526 inode->i_data.nrpages && /* have cached pages */
1537 (file_wanted == 0 || /* no open files */ 1527 (file_wanted == 0 || /* no open files */
1538 (revoking & (CEPH_CAP_FILE_CACHE| 1528 (revoking & (CEPH_CAP_FILE_CACHE|
1539 CEPH_CAP_FILE_LAZYIO))) && /* or revoking cache */ 1529 CEPH_CAP_FILE_LAZYIO))) && /* or revoking cache */
@@ -1706,7 +1696,7 @@ ack:
1706static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session, 1696static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
1707 unsigned *flush_tid) 1697 unsigned *flush_tid)
1708{ 1698{
1709 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; 1699 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1710 struct ceph_inode_info *ci = ceph_inode(inode); 1700 struct ceph_inode_info *ci = ceph_inode(inode);
1711 int unlock_session = session ? 0 : 1; 1701 int unlock_session = session ? 0 : 1;
1712 int flushing = 0; 1702 int flushing = 0;
@@ -1872,7 +1862,7 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
1872 caps_are_flushed(inode, flush_tid)); 1862 caps_are_flushed(inode, flush_tid));
1873 } else { 1863 } else {
1874 struct ceph_mds_client *mdsc = 1864 struct ceph_mds_client *mdsc =
1875 &ceph_sb_to_client(inode->i_sb)->mdsc; 1865 ceph_sb_to_client(inode->i_sb)->mdsc;
1876 1866
1877 spin_lock(&inode->i_lock); 1867 spin_lock(&inode->i_lock);
1878 if (__ceph_caps_dirty(ci)) 1868 if (__ceph_caps_dirty(ci))
@@ -2283,7 +2273,8 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2283{ 2273{
2284 struct ceph_inode_info *ci = ceph_inode(inode); 2274 struct ceph_inode_info *ci = ceph_inode(inode);
2285 int mds = session->s_mds; 2275 int mds = session->s_mds;
2286 int seq = le32_to_cpu(grant->seq); 2276 unsigned seq = le32_to_cpu(grant->seq);
2277 unsigned issue_seq = le32_to_cpu(grant->issue_seq);
2287 int newcaps = le32_to_cpu(grant->caps); 2278 int newcaps = le32_to_cpu(grant->caps);
2288 int issued, implemented, used, wanted, dirty; 2279 int issued, implemented, used, wanted, dirty;
2289 u64 size = le64_to_cpu(grant->size); 2280 u64 size = le64_to_cpu(grant->size);
@@ -2295,8 +2286,8 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2295 int revoked_rdcache = 0; 2286 int revoked_rdcache = 0;
2296 int queue_invalidate = 0; 2287 int queue_invalidate = 0;
2297 2288
2298 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n", 2289 dout("handle_cap_grant inode %p cap %p mds%d seq %u/%u %s\n",
2299 inode, cap, mds, seq, ceph_cap_string(newcaps)); 2290 inode, cap, mds, seq, issue_seq, ceph_cap_string(newcaps));
2300 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size, 2291 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2301 inode->i_size); 2292 inode->i_size);
2302 2293
@@ -2392,6 +2383,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2392 } 2383 }
2393 2384
2394 cap->seq = seq; 2385 cap->seq = seq;
2386 cap->issue_seq = issue_seq;
2395 2387
2396 /* file layout may have changed */ 2388 /* file layout may have changed */
2397 ci->i_layout = grant->layout; 2389 ci->i_layout = grant->layout;
@@ -2463,7 +2455,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2463 __releases(inode->i_lock) 2455 __releases(inode->i_lock)
2464{ 2456{
2465 struct ceph_inode_info *ci = ceph_inode(inode); 2457 struct ceph_inode_info *ci = ceph_inode(inode);
2466 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; 2458 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
2467 unsigned seq = le32_to_cpu(m->seq); 2459 unsigned seq = le32_to_cpu(m->seq);
2468 int dirty = le32_to_cpu(m->dirty); 2460 int dirty = le32_to_cpu(m->dirty);
2469 int cleaned = 0; 2461 int cleaned = 0;
@@ -2711,7 +2703,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2711 struct ceph_msg *msg) 2703 struct ceph_msg *msg)
2712{ 2704{
2713 struct ceph_mds_client *mdsc = session->s_mdsc; 2705 struct ceph_mds_client *mdsc = session->s_mdsc;
2714 struct super_block *sb = mdsc->client->sb; 2706 struct super_block *sb = mdsc->fsc->sb;
2715 struct inode *inode; 2707 struct inode *inode;
2716 struct ceph_cap *cap; 2708 struct ceph_cap *cap;
2717 struct ceph_mds_caps *h; 2709 struct ceph_mds_caps *h;
@@ -2774,15 +2766,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2774 if (op == CEPH_CAP_OP_IMPORT) 2766 if (op == CEPH_CAP_OP_IMPORT)
2775 __queue_cap_release(session, vino.ino, cap_id, 2767 __queue_cap_release(session, vino.ino, cap_id,
2776 mseq, seq); 2768 mseq, seq);
2777 2769 goto flush_cap_releases;
2778 /*
2779 * send any full release message to try to move things
2780 * along for the mds (who clearly thinks we still have this
2781 * cap).
2782 */
2783 ceph_add_cap_releases(mdsc, session);
2784 ceph_send_cap_releases(mdsc, session);
2785 goto done;
2786 } 2770 }
2787 2771
2788 /* these will work even if we don't have a cap yet */ 2772 /* these will work even if we don't have a cap yet */
@@ -2810,7 +2794,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2810 dout(" no cap on %p ino %llx.%llx from mds%d\n", 2794 dout(" no cap on %p ino %llx.%llx from mds%d\n",
2811 inode, ceph_ino(inode), ceph_snap(inode), mds); 2795 inode, ceph_ino(inode), ceph_snap(inode), mds);
2812 spin_unlock(&inode->i_lock); 2796 spin_unlock(&inode->i_lock);
2813 goto done; 2797 goto flush_cap_releases;
2814 } 2798 }
2815 2799
2816 /* note that each of these drops i_lock for us */ 2800 /* note that each of these drops i_lock for us */
@@ -2834,6 +2818,17 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2834 ceph_cap_op_name(op)); 2818 ceph_cap_op_name(op));
2835 } 2819 }
2836 2820
2821 goto done;
2822
2823flush_cap_releases:
2824 /*
2825 * send any full release message to try to move things
2826 * along for the mds (who clearly thinks we still have this
2827 * cap).
2828 */
2829 ceph_add_cap_releases(mdsc, session);
2830 ceph_send_cap_releases(mdsc, session);
2831
2837done: 2832done:
2838 mutex_unlock(&session->s_mutex); 2833 mutex_unlock(&session->s_mutex);
2839done_unlocked: 2834done_unlocked:
diff --git a/fs/ceph/ceph_frag.c b/fs/ceph/ceph_frag.c
index ab6cf35c4091..bdce8b1fbd06 100644
--- a/fs/ceph/ceph_frag.c
+++ b/fs/ceph/ceph_frag.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Ceph 'frag' type 2 * Ceph 'frag' type
3 */ 3 */
4#include "types.h" 4#include <linux/module.h>
5#include <linux/ceph/types.h>
5 6
6int ceph_frag_compare(__u32 a, __u32 b) 7int ceph_frag_compare(__u32 a, __u32 b)
7{ 8{
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 6fd8b20a8611..7ae1b3d55b58 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -1,4 +1,4 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/device.h> 3#include <linux/device.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
@@ -7,143 +7,49 @@
7#include <linux/debugfs.h> 7#include <linux/debugfs.h>
8#include <linux/seq_file.h> 8#include <linux/seq_file.h>
9 9
10#include <linux/ceph/libceph.h>
11#include <linux/ceph/mon_client.h>
12#include <linux/ceph/auth.h>
13#include <linux/ceph/debugfs.h>
14
10#include "super.h" 15#include "super.h"
11#include "mds_client.h"
12#include "mon_client.h"
13#include "auth.h"
14 16
15#ifdef CONFIG_DEBUG_FS 17#ifdef CONFIG_DEBUG_FS
16 18
17/* 19#include "mds_client.h"
18 * Implement /sys/kernel/debug/ceph fun
19 *
20 * /sys/kernel/debug/ceph/client* - an instance of the ceph client
21 * .../osdmap - current osdmap
22 * .../mdsmap - current mdsmap
23 * .../monmap - current monmap
24 * .../osdc - active osd requests
25 * .../mdsc - active mds requests
26 * .../monc - mon client state
27 * .../dentry_lru - dump contents of dentry lru
28 * .../caps - expose cap (reservation) stats
29 * .../bdi - symlink to ../../bdi/something
30 */
31
32static struct dentry *ceph_debugfs_dir;
33
34static int monmap_show(struct seq_file *s, void *p)
35{
36 int i;
37 struct ceph_client *client = s->private;
38
39 if (client->monc.monmap == NULL)
40 return 0;
41
42 seq_printf(s, "epoch %d\n", client->monc.monmap->epoch);
43 for (i = 0; i < client->monc.monmap->num_mon; i++) {
44 struct ceph_entity_inst *inst =
45 &client->monc.monmap->mon_inst[i];
46
47 seq_printf(s, "\t%s%lld\t%s\n",
48 ENTITY_NAME(inst->name),
49 pr_addr(&inst->addr.in_addr));
50 }
51 return 0;
52}
53 20
54static int mdsmap_show(struct seq_file *s, void *p) 21static int mdsmap_show(struct seq_file *s, void *p)
55{ 22{
56 int i; 23 int i;
57 struct ceph_client *client = s->private; 24 struct ceph_fs_client *fsc = s->private;
58 25
59 if (client->mdsc.mdsmap == NULL) 26 if (fsc->mdsc == NULL || fsc->mdsc->mdsmap == NULL)
60 return 0; 27 return 0;
61 seq_printf(s, "epoch %d\n", client->mdsc.mdsmap->m_epoch); 28 seq_printf(s, "epoch %d\n", fsc->mdsc->mdsmap->m_epoch);
62 seq_printf(s, "root %d\n", client->mdsc.mdsmap->m_root); 29 seq_printf(s, "root %d\n", fsc->mdsc->mdsmap->m_root);
63 seq_printf(s, "session_timeout %d\n", 30 seq_printf(s, "session_timeout %d\n",
64 client->mdsc.mdsmap->m_session_timeout); 31 fsc->mdsc->mdsmap->m_session_timeout);
65 seq_printf(s, "session_autoclose %d\n", 32 seq_printf(s, "session_autoclose %d\n",
66 client->mdsc.mdsmap->m_session_autoclose); 33 fsc->mdsc->mdsmap->m_session_autoclose);
67 for (i = 0; i < client->mdsc.mdsmap->m_max_mds; i++) { 34 for (i = 0; i < fsc->mdsc->mdsmap->m_max_mds; i++) {
68 struct ceph_entity_addr *addr = 35 struct ceph_entity_addr *addr =
69 &client->mdsc.mdsmap->m_info[i].addr; 36 &fsc->mdsc->mdsmap->m_info[i].addr;
70 int state = client->mdsc.mdsmap->m_info[i].state; 37 int state = fsc->mdsc->mdsmap->m_info[i].state;
71 38
72 seq_printf(s, "\tmds%d\t%s\t(%s)\n", i, pr_addr(&addr->in_addr), 39 seq_printf(s, "\tmds%d\t%s\t(%s)\n", i,
40 ceph_pr_addr(&addr->in_addr),
73 ceph_mds_state_name(state)); 41 ceph_mds_state_name(state));
74 } 42 }
75 return 0; 43 return 0;
76} 44}
77 45
78static int osdmap_show(struct seq_file *s, void *p) 46/*
79{ 47 * mdsc debugfs
80 int i; 48 */
81 struct ceph_client *client = s->private;
82 struct rb_node *n;
83
84 if (client->osdc.osdmap == NULL)
85 return 0;
86 seq_printf(s, "epoch %d\n", client->osdc.osdmap->epoch);
87 seq_printf(s, "flags%s%s\n",
88 (client->osdc.osdmap->flags & CEPH_OSDMAP_NEARFULL) ?
89 " NEARFULL" : "",
90 (client->osdc.osdmap->flags & CEPH_OSDMAP_FULL) ?
91 " FULL" : "");
92 for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) {
93 struct ceph_pg_pool_info *pool =
94 rb_entry(n, struct ceph_pg_pool_info, node);
95 seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n",
96 pool->id, pool->v.pg_num, pool->pg_num_mask,
97 pool->v.lpg_num, pool->lpg_num_mask);
98 }
99 for (i = 0; i < client->osdc.osdmap->max_osd; i++) {
100 struct ceph_entity_addr *addr =
101 &client->osdc.osdmap->osd_addr[i];
102 int state = client->osdc.osdmap->osd_state[i];
103 char sb[64];
104
105 seq_printf(s, "\tosd%d\t%s\t%3d%%\t(%s)\n",
106 i, pr_addr(&addr->in_addr),
107 ((client->osdc.osdmap->osd_weight[i]*100) >> 16),
108 ceph_osdmap_state_str(sb, sizeof(sb), state));
109 }
110 return 0;
111}
112
113static int monc_show(struct seq_file *s, void *p)
114{
115 struct ceph_client *client = s->private;
116 struct ceph_mon_generic_request *req;
117 struct ceph_mon_client *monc = &client->monc;
118 struct rb_node *rp;
119
120 mutex_lock(&monc->mutex);
121
122 if (monc->have_mdsmap)
123 seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap);
124 if (monc->have_osdmap)
125 seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap);
126 if (monc->want_next_osdmap)
127 seq_printf(s, "want next osdmap\n");
128
129 for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) {
130 __u16 op;
131 req = rb_entry(rp, struct ceph_mon_generic_request, node);
132 op = le16_to_cpu(req->request->hdr.type);
133 if (op == CEPH_MSG_STATFS)
134 seq_printf(s, "%lld statfs\n", req->tid);
135 else
136 seq_printf(s, "%lld unknown\n", req->tid);
137 }
138
139 mutex_unlock(&monc->mutex);
140 return 0;
141}
142
143static int mdsc_show(struct seq_file *s, void *p) 49static int mdsc_show(struct seq_file *s, void *p)
144{ 50{
145 struct ceph_client *client = s->private; 51 struct ceph_fs_client *fsc = s->private;
146 struct ceph_mds_client *mdsc = &client->mdsc; 52 struct ceph_mds_client *mdsc = fsc->mdsc;
147 struct ceph_mds_request *req; 53 struct ceph_mds_request *req;
148 struct rb_node *rp; 54 struct rb_node *rp;
149 int pathlen; 55 int pathlen;
@@ -214,61 +120,12 @@ static int mdsc_show(struct seq_file *s, void *p)
214 return 0; 120 return 0;
215} 121}
216 122
217static int osdc_show(struct seq_file *s, void *pp)
218{
219 struct ceph_client *client = s->private;
220 struct ceph_osd_client *osdc = &client->osdc;
221 struct rb_node *p;
222
223 mutex_lock(&osdc->request_mutex);
224 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
225 struct ceph_osd_request *req;
226 struct ceph_osd_request_head *head;
227 struct ceph_osd_op *op;
228 int num_ops;
229 int opcode, olen;
230 int i;
231
232 req = rb_entry(p, struct ceph_osd_request, r_node);
233
234 seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid,
235 req->r_osd ? req->r_osd->o_osd : -1,
236 le32_to_cpu(req->r_pgid.pool),
237 le16_to_cpu(req->r_pgid.ps));
238
239 head = req->r_request->front.iov_base;
240 op = (void *)(head + 1);
241
242 num_ops = le16_to_cpu(head->num_ops);
243 olen = le32_to_cpu(head->object_len);
244 seq_printf(s, "%.*s", olen,
245 (const char *)(head->ops + num_ops));
246
247 if (req->r_reassert_version.epoch)
248 seq_printf(s, "\t%u'%llu",
249 (unsigned)le32_to_cpu(req->r_reassert_version.epoch),
250 le64_to_cpu(req->r_reassert_version.version));
251 else
252 seq_printf(s, "\t");
253
254 for (i = 0; i < num_ops; i++) {
255 opcode = le16_to_cpu(op->op);
256 seq_printf(s, "\t%s", ceph_osd_op_name(opcode));
257 op++;
258 }
259
260 seq_printf(s, "\n");
261 }
262 mutex_unlock(&osdc->request_mutex);
263 return 0;
264}
265
266static int caps_show(struct seq_file *s, void *p) 123static int caps_show(struct seq_file *s, void *p)
267{ 124{
268 struct ceph_client *client = s->private; 125 struct ceph_fs_client *fsc = s->private;
269 int total, avail, used, reserved, min; 126 int total, avail, used, reserved, min;
270 127
271 ceph_reservation_status(client, &total, &avail, &used, &reserved, &min); 128 ceph_reservation_status(fsc, &total, &avail, &used, &reserved, &min);
272 seq_printf(s, "total\t\t%d\n" 129 seq_printf(s, "total\t\t%d\n"
273 "avail\t\t%d\n" 130 "avail\t\t%d\n"
274 "used\t\t%d\n" 131 "used\t\t%d\n"
@@ -280,8 +137,8 @@ static int caps_show(struct seq_file *s, void *p)
280 137
281static int dentry_lru_show(struct seq_file *s, void *ptr) 138static int dentry_lru_show(struct seq_file *s, void *ptr)
282{ 139{
283 struct ceph_client *client = s->private; 140 struct ceph_fs_client *fsc = s->private;
284 struct ceph_mds_client *mdsc = &client->mdsc; 141 struct ceph_mds_client *mdsc = fsc->mdsc;
285 struct ceph_dentry_info *di; 142 struct ceph_dentry_info *di;
286 143
287 spin_lock(&mdsc->dentry_lru_lock); 144 spin_lock(&mdsc->dentry_lru_lock);
@@ -295,199 +152,124 @@ static int dentry_lru_show(struct seq_file *s, void *ptr)
295 return 0; 152 return 0;
296} 153}
297 154
298#define DEFINE_SHOW_FUNC(name) \ 155CEPH_DEFINE_SHOW_FUNC(mdsmap_show)
299static int name##_open(struct inode *inode, struct file *file) \ 156CEPH_DEFINE_SHOW_FUNC(mdsc_show)
300{ \ 157CEPH_DEFINE_SHOW_FUNC(caps_show)
301 struct seq_file *sf; \ 158CEPH_DEFINE_SHOW_FUNC(dentry_lru_show)
302 int ret; \ 159
303 \
304 ret = single_open(file, name, NULL); \
305 sf = file->private_data; \
306 sf->private = inode->i_private; \
307 return ret; \
308} \
309 \
310static const struct file_operations name##_fops = { \
311 .open = name##_open, \
312 .read = seq_read, \
313 .llseek = seq_lseek, \
314 .release = single_release, \
315};
316
317DEFINE_SHOW_FUNC(monmap_show)
318DEFINE_SHOW_FUNC(mdsmap_show)
319DEFINE_SHOW_FUNC(osdmap_show)
320DEFINE_SHOW_FUNC(monc_show)
321DEFINE_SHOW_FUNC(mdsc_show)
322DEFINE_SHOW_FUNC(osdc_show)
323DEFINE_SHOW_FUNC(dentry_lru_show)
324DEFINE_SHOW_FUNC(caps_show)
325 160
161/*
162 * debugfs
163 */
326static int congestion_kb_set(void *data, u64 val) 164static int congestion_kb_set(void *data, u64 val)
327{ 165{
328 struct ceph_client *client = (struct ceph_client *)data; 166 struct ceph_fs_client *fsc = (struct ceph_fs_client *)data;
329
330 if (client)
331 client->mount_args->congestion_kb = (int)val;
332 167
168 fsc->mount_options->congestion_kb = (int)val;
333 return 0; 169 return 0;
334} 170}
335 171
336static int congestion_kb_get(void *data, u64 *val) 172static int congestion_kb_get(void *data, u64 *val)
337{ 173{
338 struct ceph_client *client = (struct ceph_client *)data; 174 struct ceph_fs_client *fsc = (struct ceph_fs_client *)data;
339
340 if (client)
341 *val = (u64)client->mount_args->congestion_kb;
342 175
176 *val = (u64)fsc->mount_options->congestion_kb;
343 return 0; 177 return 0;
344} 178}
345 179
346
347DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get, 180DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get,
348 congestion_kb_set, "%llu\n"); 181 congestion_kb_set, "%llu\n");
349 182
350int __init ceph_debugfs_init(void)
351{
352 ceph_debugfs_dir = debugfs_create_dir("ceph", NULL);
353 if (!ceph_debugfs_dir)
354 return -ENOMEM;
355 return 0;
356}
357 183
358void ceph_debugfs_cleanup(void) 184void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
359{ 185{
360 debugfs_remove(ceph_debugfs_dir); 186 dout("ceph_fs_debugfs_cleanup\n");
187 debugfs_remove(fsc->debugfs_bdi);
188 debugfs_remove(fsc->debugfs_congestion_kb);
189 debugfs_remove(fsc->debugfs_mdsmap);
190 debugfs_remove(fsc->debugfs_caps);
191 debugfs_remove(fsc->debugfs_mdsc);
192 debugfs_remove(fsc->debugfs_dentry_lru);
361} 193}
362 194
363int ceph_debugfs_client_init(struct ceph_client *client) 195int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
364{ 196{
365 int ret = 0; 197 char name[100];
366 char name[80]; 198 int err = -ENOMEM;
367
368 snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid,
369 client->monc.auth->global_id);
370 199
371 client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir); 200 dout("ceph_fs_debugfs_init\n");
372 if (!client->debugfs_dir) 201 fsc->debugfs_congestion_kb =
373 goto out; 202 debugfs_create_file("writeback_congestion_kb",
374 203 0600,
375 client->monc.debugfs_file = debugfs_create_file("monc", 204 fsc->client->debugfs_dir,
376 0600, 205 fsc,
377 client->debugfs_dir, 206 &congestion_kb_fops);
378 client, 207 if (!fsc->debugfs_congestion_kb)
379 &monc_show_fops);
380 if (!client->monc.debugfs_file)
381 goto out; 208 goto out;
382 209
383 client->mdsc.debugfs_file = debugfs_create_file("mdsc", 210 dout("a\n");
384 0600,
385 client->debugfs_dir,
386 client,
387 &mdsc_show_fops);
388 if (!client->mdsc.debugfs_file)
389 goto out;
390 211
391 client->osdc.debugfs_file = debugfs_create_file("osdc", 212 snprintf(name, sizeof(name), "../../bdi/%s",
392 0600, 213 dev_name(fsc->backing_dev_info.dev));
393 client->debugfs_dir, 214 fsc->debugfs_bdi =
394 client, 215 debugfs_create_symlink("bdi",
395 &osdc_show_fops); 216 fsc->client->debugfs_dir,
396 if (!client->osdc.debugfs_file) 217 name);
218 if (!fsc->debugfs_bdi)
397 goto out; 219 goto out;
398 220
399 client->debugfs_monmap = debugfs_create_file("monmap", 221 dout("b\n");
222 fsc->debugfs_mdsmap = debugfs_create_file("mdsmap",
400 0600, 223 0600,
401 client->debugfs_dir, 224 fsc->client->debugfs_dir,
402 client, 225 fsc,
403 &monmap_show_fops);
404 if (!client->debugfs_monmap)
405 goto out;
406
407 client->debugfs_mdsmap = debugfs_create_file("mdsmap",
408 0600,
409 client->debugfs_dir,
410 client,
411 &mdsmap_show_fops); 226 &mdsmap_show_fops);
412 if (!client->debugfs_mdsmap) 227 if (!fsc->debugfs_mdsmap)
413 goto out;
414
415 client->debugfs_osdmap = debugfs_create_file("osdmap",
416 0600,
417 client->debugfs_dir,
418 client,
419 &osdmap_show_fops);
420 if (!client->debugfs_osdmap)
421 goto out; 228 goto out;
422 229
423 client->debugfs_dentry_lru = debugfs_create_file("dentry_lru", 230 dout("ca\n");
424 0600, 231 fsc->debugfs_mdsc = debugfs_create_file("mdsc",
425 client->debugfs_dir, 232 0600,
426 client, 233 fsc->client->debugfs_dir,
427 &dentry_lru_show_fops); 234 fsc,
428 if (!client->debugfs_dentry_lru) 235 &mdsc_show_fops);
236 if (!fsc->debugfs_mdsc)
429 goto out; 237 goto out;
430 238
431 client->debugfs_caps = debugfs_create_file("caps", 239 dout("da\n");
240 fsc->debugfs_caps = debugfs_create_file("caps",
432 0400, 241 0400,
433 client->debugfs_dir, 242 fsc->client->debugfs_dir,
434 client, 243 fsc,
435 &caps_show_fops); 244 &caps_show_fops);
436 if (!client->debugfs_caps) 245 if (!fsc->debugfs_caps)
437 goto out; 246 goto out;
438 247
439 client->debugfs_congestion_kb = 248 dout("ea\n");
440 debugfs_create_file("writeback_congestion_kb", 249 fsc->debugfs_dentry_lru = debugfs_create_file("dentry_lru",
441 0600, 250 0600,
442 client->debugfs_dir, 251 fsc->client->debugfs_dir,
443 client, 252 fsc,
444 &congestion_kb_fops); 253 &dentry_lru_show_fops);
445 if (!client->debugfs_congestion_kb) 254 if (!fsc->debugfs_dentry_lru)
446 goto out; 255 goto out;
447 256
448 sprintf(name, "../../bdi/%s", dev_name(client->sb->s_bdi->dev));
449 client->debugfs_bdi = debugfs_create_symlink("bdi", client->debugfs_dir,
450 name);
451
452 return 0; 257 return 0;
453 258
454out: 259out:
455 ceph_debugfs_client_cleanup(client); 260 ceph_fs_debugfs_cleanup(fsc);
456 return ret; 261 return err;
457} 262}
458 263
459void ceph_debugfs_client_cleanup(struct ceph_client *client)
460{
461 debugfs_remove(client->debugfs_bdi);
462 debugfs_remove(client->debugfs_caps);
463 debugfs_remove(client->debugfs_dentry_lru);
464 debugfs_remove(client->debugfs_osdmap);
465 debugfs_remove(client->debugfs_mdsmap);
466 debugfs_remove(client->debugfs_monmap);
467 debugfs_remove(client->osdc.debugfs_file);
468 debugfs_remove(client->mdsc.debugfs_file);
469 debugfs_remove(client->monc.debugfs_file);
470 debugfs_remove(client->debugfs_congestion_kb);
471 debugfs_remove(client->debugfs_dir);
472}
473 264
474#else /* CONFIG_DEBUG_FS */ 265#else /* CONFIG_DEBUG_FS */
475 266
476int __init ceph_debugfs_init(void) 267int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
477{
478 return 0;
479}
480
481void ceph_debugfs_cleanup(void)
482{
483}
484
485int ceph_debugfs_client_init(struct ceph_client *client)
486{ 268{
487 return 0; 269 return 0;
488} 270}
489 271
490void ceph_debugfs_client_cleanup(struct ceph_client *client) 272void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
491{ 273{
492} 274}
493 275
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index a1986eb52045..e0a2dc6fcafc 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1,4 +1,4 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/spinlock.h> 3#include <linux/spinlock.h>
4#include <linux/fs_struct.h> 4#include <linux/fs_struct.h>
@@ -7,6 +7,7 @@
7#include <linux/sched.h> 7#include <linux/sched.h>
8 8
9#include "super.h" 9#include "super.h"
10#include "mds_client.h"
10 11
11/* 12/*
12 * Directory operations: readdir, lookup, create, link, unlink, 13 * Directory operations: readdir, lookup, create, link, unlink,
@@ -94,10 +95,7 @@ static unsigned fpos_off(loff_t p)
94 */ 95 */
95static int __dcache_readdir(struct file *filp, 96static int __dcache_readdir(struct file *filp,
96 void *dirent, filldir_t filldir) 97 void *dirent, filldir_t filldir)
97 __releases(inode->i_lock)
98 __acquires(inode->i_lock)
99{ 98{
100 struct inode *inode = filp->f_dentry->d_inode;
101 struct ceph_file_info *fi = filp->private_data; 99 struct ceph_file_info *fi = filp->private_data;
102 struct dentry *parent = filp->f_dentry; 100 struct dentry *parent = filp->f_dentry;
103 struct inode *dir = parent->d_inode; 101 struct inode *dir = parent->d_inode;
@@ -153,7 +151,6 @@ more:
153 151
154 atomic_inc(&dentry->d_count); 152 atomic_inc(&dentry->d_count);
155 spin_unlock(&dcache_lock); 153 spin_unlock(&dcache_lock);
156 spin_unlock(&inode->i_lock);
157 154
158 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos, 155 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
159 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 156 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
@@ -171,35 +168,30 @@ more:
171 } else { 168 } else {
172 dput(last); 169 dput(last);
173 } 170 }
174 last = NULL;
175 } 171 }
176
177 spin_lock(&inode->i_lock);
178 spin_lock(&dcache_lock);
179
180 last = dentry; 172 last = dentry;
181 173
182 if (err < 0) 174 if (err < 0)
183 goto out_unlock; 175 goto out;
184 176
185 p = p->prev;
186 filp->f_pos++; 177 filp->f_pos++;
187 178
188 /* make sure a dentry wasn't dropped while we didn't have dcache_lock */ 179 /* make sure a dentry wasn't dropped while we didn't have dcache_lock */
189 if ((ceph_inode(dir)->i_ceph_flags & CEPH_I_COMPLETE)) 180 if (!ceph_i_test(dir, CEPH_I_COMPLETE)) {
190 goto more; 181 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
191 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir); 182 err = -EAGAIN;
192 err = -EAGAIN; 183 goto out;
184 }
185
186 spin_lock(&dcache_lock);
187 p = p->prev; /* advance to next dentry */
188 goto more;
193 189
194out_unlock: 190out_unlock:
195 spin_unlock(&dcache_lock); 191 spin_unlock(&dcache_lock);
196 192out:
197 if (last) { 193 if (last)
198 spin_unlock(&inode->i_lock);
199 dput(last); 194 dput(last);
200 spin_lock(&inode->i_lock);
201 }
202
203 return err; 195 return err;
204} 196}
205 197
@@ -227,15 +219,15 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
227 struct ceph_file_info *fi = filp->private_data; 219 struct ceph_file_info *fi = filp->private_data;
228 struct inode *inode = filp->f_dentry->d_inode; 220 struct inode *inode = filp->f_dentry->d_inode;
229 struct ceph_inode_info *ci = ceph_inode(inode); 221 struct ceph_inode_info *ci = ceph_inode(inode);
230 struct ceph_client *client = ceph_inode_to_client(inode); 222 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
231 struct ceph_mds_client *mdsc = &client->mdsc; 223 struct ceph_mds_client *mdsc = fsc->mdsc;
232 unsigned frag = fpos_frag(filp->f_pos); 224 unsigned frag = fpos_frag(filp->f_pos);
233 int off = fpos_off(filp->f_pos); 225 int off = fpos_off(filp->f_pos);
234 int err; 226 int err;
235 u32 ftype; 227 u32 ftype;
236 struct ceph_mds_reply_info_parsed *rinfo; 228 struct ceph_mds_reply_info_parsed *rinfo;
237 const int max_entries = client->mount_args->max_readdir; 229 const int max_entries = fsc->mount_options->max_readdir;
238 const int max_bytes = client->mount_args->max_readdir_bytes; 230 const int max_bytes = fsc->mount_options->max_readdir_bytes;
239 231
240 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off); 232 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
241 if (fi->at_end) 233 if (fi->at_end)
@@ -267,17 +259,17 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
267 /* can we use the dcache? */ 259 /* can we use the dcache? */
268 spin_lock(&inode->i_lock); 260 spin_lock(&inode->i_lock);
269 if ((filp->f_pos == 2 || fi->dentry) && 261 if ((filp->f_pos == 2 || fi->dentry) &&
270 !ceph_test_opt(client, NOASYNCREADDIR) && 262 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
271 ceph_snap(inode) != CEPH_SNAPDIR && 263 ceph_snap(inode) != CEPH_SNAPDIR &&
272 (ci->i_ceph_flags & CEPH_I_COMPLETE) && 264 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
273 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { 265 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
266 spin_unlock(&inode->i_lock);
274 err = __dcache_readdir(filp, dirent, filldir); 267 err = __dcache_readdir(filp, dirent, filldir);
275 if (err != -EAGAIN) { 268 if (err != -EAGAIN)
276 spin_unlock(&inode->i_lock);
277 return err; 269 return err;
278 } 270 } else {
271 spin_unlock(&inode->i_lock);
279 } 272 }
280 spin_unlock(&inode->i_lock);
281 if (fi->dentry) { 273 if (fi->dentry) {
282 err = note_last_dentry(fi, fi->dentry->d_name.name, 274 err = note_last_dentry(fi, fi->dentry->d_name.name,
283 fi->dentry->d_name.len); 275 fi->dentry->d_name.len);
@@ -487,14 +479,13 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
487struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, 479struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
488 struct dentry *dentry, int err) 480 struct dentry *dentry, int err)
489{ 481{
490 struct ceph_client *client = ceph_sb_to_client(dentry->d_sb); 482 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
491 struct inode *parent = dentry->d_parent->d_inode; 483 struct inode *parent = dentry->d_parent->d_inode;
492 484
493 /* .snap dir? */ 485 /* .snap dir? */
494 if (err == -ENOENT && 486 if (err == -ENOENT &&
495 ceph_vino(parent).ino != CEPH_INO_ROOT && /* no .snap in root dir */
496 strcmp(dentry->d_name.name, 487 strcmp(dentry->d_name.name,
497 client->mount_args->snapdir_name) == 0) { 488 fsc->mount_options->snapdir_name) == 0) {
498 struct inode *inode = ceph_get_snapdir(parent); 489 struct inode *inode = ceph_get_snapdir(parent);
499 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", 490 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
500 dentry, dentry->d_name.len, dentry->d_name.name, inode); 491 dentry, dentry->d_name.len, dentry->d_name.name, inode);
@@ -539,8 +530,8 @@ static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
539static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, 530static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
540 struct nameidata *nd) 531 struct nameidata *nd)
541{ 532{
542 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 533 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
543 struct ceph_mds_client *mdsc = &client->mdsc; 534 struct ceph_mds_client *mdsc = fsc->mdsc;
544 struct ceph_mds_request *req; 535 struct ceph_mds_request *req;
545 int op; 536 int op;
546 int err; 537 int err;
@@ -572,7 +563,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
572 spin_lock(&dir->i_lock); 563 spin_lock(&dir->i_lock);
573 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); 564 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
574 if (strncmp(dentry->d_name.name, 565 if (strncmp(dentry->d_name.name,
575 client->mount_args->snapdir_name, 566 fsc->mount_options->snapdir_name,
576 dentry->d_name.len) && 567 dentry->d_name.len) &&
577 !is_root_ceph_dentry(dir, dentry) && 568 !is_root_ceph_dentry(dir, dentry) &&
578 (ci->i_ceph_flags & CEPH_I_COMPLETE) && 569 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
@@ -629,8 +620,8 @@ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
629static int ceph_mknod(struct inode *dir, struct dentry *dentry, 620static int ceph_mknod(struct inode *dir, struct dentry *dentry,
630 int mode, dev_t rdev) 621 int mode, dev_t rdev)
631{ 622{
632 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 623 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
633 struct ceph_mds_client *mdsc = &client->mdsc; 624 struct ceph_mds_client *mdsc = fsc->mdsc;
634 struct ceph_mds_request *req; 625 struct ceph_mds_request *req;
635 int err; 626 int err;
636 627
@@ -685,8 +676,8 @@ static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
685static int ceph_symlink(struct inode *dir, struct dentry *dentry, 676static int ceph_symlink(struct inode *dir, struct dentry *dentry,
686 const char *dest) 677 const char *dest)
687{ 678{
688 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 679 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
689 struct ceph_mds_client *mdsc = &client->mdsc; 680 struct ceph_mds_client *mdsc = fsc->mdsc;
690 struct ceph_mds_request *req; 681 struct ceph_mds_request *req;
691 int err; 682 int err;
692 683
@@ -716,8 +707,8 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
716 707
717static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode) 708static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
718{ 709{
719 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 710 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
720 struct ceph_mds_client *mdsc = &client->mdsc; 711 struct ceph_mds_client *mdsc = fsc->mdsc;
721 struct ceph_mds_request *req; 712 struct ceph_mds_request *req;
722 int err = -EROFS; 713 int err = -EROFS;
723 int op; 714 int op;
@@ -758,8 +749,8 @@ out:
758static int ceph_link(struct dentry *old_dentry, struct inode *dir, 749static int ceph_link(struct dentry *old_dentry, struct inode *dir,
759 struct dentry *dentry) 750 struct dentry *dentry)
760{ 751{
761 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 752 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
762 struct ceph_mds_client *mdsc = &client->mdsc; 753 struct ceph_mds_client *mdsc = fsc->mdsc;
763 struct ceph_mds_request *req; 754 struct ceph_mds_request *req;
764 int err; 755 int err;
765 756
@@ -813,8 +804,8 @@ static int drop_caps_for_unlink(struct inode *inode)
813 */ 804 */
814static int ceph_unlink(struct inode *dir, struct dentry *dentry) 805static int ceph_unlink(struct inode *dir, struct dentry *dentry)
815{ 806{
816 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 807 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
817 struct ceph_mds_client *mdsc = &client->mdsc; 808 struct ceph_mds_client *mdsc = fsc->mdsc;
818 struct inode *inode = dentry->d_inode; 809 struct inode *inode = dentry->d_inode;
819 struct ceph_mds_request *req; 810 struct ceph_mds_request *req;
820 int err = -EROFS; 811 int err = -EROFS;
@@ -854,8 +845,8 @@ out:
854static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, 845static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
855 struct inode *new_dir, struct dentry *new_dentry) 846 struct inode *new_dir, struct dentry *new_dentry)
856{ 847{
857 struct ceph_client *client = ceph_sb_to_client(old_dir->i_sb); 848 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
858 struct ceph_mds_client *mdsc = &client->mdsc; 849 struct ceph_mds_client *mdsc = fsc->mdsc;
859 struct ceph_mds_request *req; 850 struct ceph_mds_request *req;
860 int err; 851 int err;
861 852
@@ -1076,7 +1067,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1076 struct ceph_inode_info *ci = ceph_inode(inode); 1067 struct ceph_inode_info *ci = ceph_inode(inode);
1077 int left; 1068 int left;
1078 1069
1079 if (!ceph_test_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) 1070 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1080 return -EISDIR; 1071 return -EISDIR;
1081 1072
1082 if (!cf->dir_info) { 1073 if (!cf->dir_info) {
@@ -1177,7 +1168,7 @@ void ceph_dentry_lru_add(struct dentry *dn)
1177 dout("dentry_lru_add %p %p '%.*s'\n", di, dn, 1168 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1178 dn->d_name.len, dn->d_name.name); 1169 dn->d_name.len, dn->d_name.name);
1179 if (di) { 1170 if (di) {
1180 mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc; 1171 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1181 spin_lock(&mdsc->dentry_lru_lock); 1172 spin_lock(&mdsc->dentry_lru_lock);
1182 list_add_tail(&di->lru, &mdsc->dentry_lru); 1173 list_add_tail(&di->lru, &mdsc->dentry_lru);
1183 mdsc->num_dentry++; 1174 mdsc->num_dentry++;
@@ -1193,7 +1184,7 @@ void ceph_dentry_lru_touch(struct dentry *dn)
1193 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn, 1184 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
1194 dn->d_name.len, dn->d_name.name, di->offset); 1185 dn->d_name.len, dn->d_name.name, di->offset);
1195 if (di) { 1186 if (di) {
1196 mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc; 1187 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1197 spin_lock(&mdsc->dentry_lru_lock); 1188 spin_lock(&mdsc->dentry_lru_lock);
1198 list_move_tail(&di->lru, &mdsc->dentry_lru); 1189 list_move_tail(&di->lru, &mdsc->dentry_lru);
1199 spin_unlock(&mdsc->dentry_lru_lock); 1190 spin_unlock(&mdsc->dentry_lru_lock);
@@ -1208,7 +1199,7 @@ void ceph_dentry_lru_del(struct dentry *dn)
1208 dout("dentry_lru_del %p %p '%.*s'\n", di, dn, 1199 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1209 dn->d_name.len, dn->d_name.name); 1200 dn->d_name.len, dn->d_name.name);
1210 if (di) { 1201 if (di) {
1211 mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc; 1202 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1212 spin_lock(&mdsc->dentry_lru_lock); 1203 spin_lock(&mdsc->dentry_lru_lock);
1213 list_del_init(&di->lru); 1204 list_del_init(&di->lru);
1214 mdsc->num_dentry--; 1205 mdsc->num_dentry--;
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 4480cb1c63e7..2297d9426992 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -1,10 +1,11 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/exportfs.h> 3#include <linux/exportfs.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <asm/unaligned.h> 5#include <asm/unaligned.h>
6 6
7#include "super.h" 7#include "super.h"
8#include "mds_client.h"
8 9
9/* 10/*
10 * NFS export support 11 * NFS export support
@@ -42,32 +43,37 @@ struct ceph_nfs_confh {
42static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len, 43static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
43 int connectable) 44 int connectable)
44{ 45{
46 int type;
45 struct ceph_nfs_fh *fh = (void *)rawfh; 47 struct ceph_nfs_fh *fh = (void *)rawfh;
46 struct ceph_nfs_confh *cfh = (void *)rawfh; 48 struct ceph_nfs_confh *cfh = (void *)rawfh;
47 struct dentry *parent = dentry->d_parent; 49 struct dentry *parent = dentry->d_parent;
48 struct inode *inode = dentry->d_inode; 50 struct inode *inode = dentry->d_inode;
49 int type; 51 int connected_handle_length = sizeof(*cfh)/4;
52 int handle_length = sizeof(*fh)/4;
50 53
51 /* don't re-export snaps */ 54 /* don't re-export snaps */
52 if (ceph_snap(inode) != CEPH_NOSNAP) 55 if (ceph_snap(inode) != CEPH_NOSNAP)
53 return -EINVAL; 56 return -EINVAL;
54 57
55 if (*max_len >= sizeof(*cfh)) { 58 if (*max_len >= connected_handle_length) {
56 dout("encode_fh %p connectable\n", dentry); 59 dout("encode_fh %p connectable\n", dentry);
57 cfh->ino = ceph_ino(dentry->d_inode); 60 cfh->ino = ceph_ino(dentry->d_inode);
58 cfh->parent_ino = ceph_ino(parent->d_inode); 61 cfh->parent_ino = ceph_ino(parent->d_inode);
59 cfh->parent_name_hash = parent->d_name.hash; 62 cfh->parent_name_hash = parent->d_name.hash;
60 *max_len = sizeof(*cfh); 63 *max_len = connected_handle_length;
61 type = 2; 64 type = 2;
62 } else if (*max_len > sizeof(*fh)) { 65 } else if (*max_len >= handle_length) {
63 if (connectable) 66 if (connectable) {
64 return -ENOSPC; 67 *max_len = connected_handle_length;
68 return 255;
69 }
65 dout("encode_fh %p\n", dentry); 70 dout("encode_fh %p\n", dentry);
66 fh->ino = ceph_ino(dentry->d_inode); 71 fh->ino = ceph_ino(dentry->d_inode);
67 *max_len = sizeof(*fh); 72 *max_len = handle_length;
68 type = 1; 73 type = 1;
69 } else { 74 } else {
70 return -ENOSPC; 75 *max_len = handle_length;
76 return 255;
71 } 77 }
72 return type; 78 return type;
73} 79}
@@ -115,7 +121,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
115static struct dentry *__cfh_to_dentry(struct super_block *sb, 121static struct dentry *__cfh_to_dentry(struct super_block *sb,
116 struct ceph_nfs_confh *cfh) 122 struct ceph_nfs_confh *cfh)
117{ 123{
118 struct ceph_mds_client *mdsc = &ceph_sb_to_client(sb)->mdsc; 124 struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
119 struct inode *inode; 125 struct inode *inode;
120 struct dentry *dentry; 126 struct dentry *dentry;
121 struct ceph_vino vino; 127 struct ceph_vino vino;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 8c044a4f0457..e77c28cf3690 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1,5 +1,6 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/module.h>
3#include <linux/sched.h> 4#include <linux/sched.h>
4#include <linux/slab.h> 5#include <linux/slab.h>
5#include <linux/file.h> 6#include <linux/file.h>
@@ -38,8 +39,8 @@
38static struct ceph_mds_request * 39static struct ceph_mds_request *
39prepare_open_request(struct super_block *sb, int flags, int create_mode) 40prepare_open_request(struct super_block *sb, int flags, int create_mode)
40{ 41{
41 struct ceph_client *client = ceph_sb_to_client(sb); 42 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
42 struct ceph_mds_client *mdsc = &client->mdsc; 43 struct ceph_mds_client *mdsc = fsc->mdsc;
43 struct ceph_mds_request *req; 44 struct ceph_mds_request *req;
44 int want_auth = USE_ANY_MDS; 45 int want_auth = USE_ANY_MDS;
45 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; 46 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
@@ -117,8 +118,8 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
117int ceph_open(struct inode *inode, struct file *file) 118int ceph_open(struct inode *inode, struct file *file)
118{ 119{
119 struct ceph_inode_info *ci = ceph_inode(inode); 120 struct ceph_inode_info *ci = ceph_inode(inode);
120 struct ceph_client *client = ceph_sb_to_client(inode->i_sb); 121 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
121 struct ceph_mds_client *mdsc = &client->mdsc; 122 struct ceph_mds_client *mdsc = fsc->mdsc;
122 struct ceph_mds_request *req; 123 struct ceph_mds_request *req;
123 struct ceph_file_info *cf = file->private_data; 124 struct ceph_file_info *cf = file->private_data;
124 struct inode *parent_inode = file->f_dentry->d_parent->d_inode; 125 struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
@@ -216,8 +217,8 @@ struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
216 struct nameidata *nd, int mode, 217 struct nameidata *nd, int mode,
217 int locked_dir) 218 int locked_dir)
218{ 219{
219 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 220 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
220 struct ceph_mds_client *mdsc = &client->mdsc; 221 struct ceph_mds_client *mdsc = fsc->mdsc;
221 struct file *file = nd->intent.open.file; 222 struct file *file = nd->intent.open.file;
222 struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry); 223 struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry);
223 struct ceph_mds_request *req; 224 struct ceph_mds_request *req;
@@ -270,163 +271,6 @@ int ceph_release(struct inode *inode, struct file *file)
270} 271}
271 272
272/* 273/*
273 * build a vector of user pages
274 */
275static struct page **get_direct_page_vector(const char __user *data,
276 int num_pages,
277 loff_t off, size_t len)
278{
279 struct page **pages;
280 int rc;
281
282 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
283 if (!pages)
284 return ERR_PTR(-ENOMEM);
285
286 down_read(&current->mm->mmap_sem);
287 rc = get_user_pages(current, current->mm, (unsigned long)data,
288 num_pages, 0, 0, pages, NULL);
289 up_read(&current->mm->mmap_sem);
290 if (rc < 0)
291 goto fail;
292 return pages;
293
294fail:
295 kfree(pages);
296 return ERR_PTR(rc);
297}
298
299static void put_page_vector(struct page **pages, int num_pages)
300{
301 int i;
302
303 for (i = 0; i < num_pages; i++)
304 put_page(pages[i]);
305 kfree(pages);
306}
307
308void ceph_release_page_vector(struct page **pages, int num_pages)
309{
310 int i;
311
312 for (i = 0; i < num_pages; i++)
313 __free_pages(pages[i], 0);
314 kfree(pages);
315}
316
317/*
318 * allocate a vector new pages
319 */
320static struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
321{
322 struct page **pages;
323 int i;
324
325 pages = kmalloc(sizeof(*pages) * num_pages, flags);
326 if (!pages)
327 return ERR_PTR(-ENOMEM);
328 for (i = 0; i < num_pages; i++) {
329 pages[i] = __page_cache_alloc(flags);
330 if (pages[i] == NULL) {
331 ceph_release_page_vector(pages, i);
332 return ERR_PTR(-ENOMEM);
333 }
334 }
335 return pages;
336}
337
338/*
339 * copy user data into a page vector
340 */
341static int copy_user_to_page_vector(struct page **pages,
342 const char __user *data,
343 loff_t off, size_t len)
344{
345 int i = 0;
346 int po = off & ~PAGE_CACHE_MASK;
347 int left = len;
348 int l, bad;
349
350 while (left > 0) {
351 l = min_t(int, PAGE_CACHE_SIZE-po, left);
352 bad = copy_from_user(page_address(pages[i]) + po, data, l);
353 if (bad == l)
354 return -EFAULT;
355 data += l - bad;
356 left -= l - bad;
357 po += l - bad;
358 if (po == PAGE_CACHE_SIZE) {
359 po = 0;
360 i++;
361 }
362 }
363 return len;
364}
365
366/*
367 * copy user data from a page vector into a user pointer
368 */
369static int copy_page_vector_to_user(struct page **pages, char __user *data,
370 loff_t off, size_t len)
371{
372 int i = 0;
373 int po = off & ~PAGE_CACHE_MASK;
374 int left = len;
375 int l, bad;
376
377 while (left > 0) {
378 l = min_t(int, left, PAGE_CACHE_SIZE-po);
379 bad = copy_to_user(data, page_address(pages[i]) + po, l);
380 if (bad == l)
381 return -EFAULT;
382 data += l - bad;
383 left -= l - bad;
384 if (po) {
385 po += l - bad;
386 if (po == PAGE_CACHE_SIZE)
387 po = 0;
388 }
389 i++;
390 }
391 return len;
392}
393
394/*
395 * Zero an extent within a page vector. Offset is relative to the
396 * start of the first page.
397 */
398static void zero_page_vector_range(int off, int len, struct page **pages)
399{
400 int i = off >> PAGE_CACHE_SHIFT;
401
402 off &= ~PAGE_CACHE_MASK;
403
404 dout("zero_page_vector_page %u~%u\n", off, len);
405
406 /* leading partial page? */
407 if (off) {
408 int end = min((int)PAGE_CACHE_SIZE, off + len);
409 dout("zeroing %d %p head from %d\n", i, pages[i],
410 (int)off);
411 zero_user_segment(pages[i], off, end);
412 len -= (end - off);
413 i++;
414 }
415 while (len >= PAGE_CACHE_SIZE) {
416 dout("zeroing %d %p len=%d\n", i, pages[i], len);
417 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
418 len -= PAGE_CACHE_SIZE;
419 i++;
420 }
421 /* trailing partial page? */
422 if (len) {
423 dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
424 zero_user_segment(pages[i], 0, len);
425 }
426}
427
428
429/*
430 * Read a range of bytes striped over one or more objects. Iterate over 274 * Read a range of bytes striped over one or more objects. Iterate over
431 * objects we stripe over. (That's not atomic, but good enough for now.) 275 * objects we stripe over. (That's not atomic, but good enough for now.)
432 * 276 *
@@ -438,7 +282,7 @@ static int striped_read(struct inode *inode,
438 struct page **pages, int num_pages, 282 struct page **pages, int num_pages,
439 int *checkeof) 283 int *checkeof)
440{ 284{
441 struct ceph_client *client = ceph_inode_to_client(inode); 285 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
442 struct ceph_inode_info *ci = ceph_inode(inode); 286 struct ceph_inode_info *ci = ceph_inode(inode);
443 u64 pos, this_len; 287 u64 pos, this_len;
444 int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */ 288 int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */
@@ -459,7 +303,7 @@ static int striped_read(struct inode *inode,
459 303
460more: 304more:
461 this_len = left; 305 this_len = left;
462 ret = ceph_osdc_readpages(&client->osdc, ceph_vino(inode), 306 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
463 &ci->i_layout, pos, &this_len, 307 &ci->i_layout, pos, &this_len,
464 ci->i_truncate_seq, 308 ci->i_truncate_seq,
465 ci->i_truncate_size, 309 ci->i_truncate_size,
@@ -477,8 +321,8 @@ more:
477 321
478 if (read < pos - off) { 322 if (read < pos - off) {
479 dout(" zero gap %llu to %llu\n", off + read, pos); 323 dout(" zero gap %llu to %llu\n", off + read, pos);
480 zero_page_vector_range(page_off + read, 324 ceph_zero_page_vector_range(page_off + read,
481 pos - off - read, pages); 325 pos - off - read, pages);
482 } 326 }
483 pos += ret; 327 pos += ret;
484 read = pos - off; 328 read = pos - off;
@@ -495,8 +339,8 @@ more:
495 /* was original extent fully inside i_size? */ 339 /* was original extent fully inside i_size? */
496 if (pos + left <= inode->i_size) { 340 if (pos + left <= inode->i_size) {
497 dout("zero tail\n"); 341 dout("zero tail\n");
498 zero_page_vector_range(page_off + read, len - read, 342 ceph_zero_page_vector_range(page_off + read, len - read,
499 pages); 343 pages);
500 read = len; 344 read = len;
501 goto out; 345 goto out;
502 } 346 }
@@ -531,7 +375,7 @@ static ssize_t ceph_sync_read(struct file *file, char __user *data,
531 (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 375 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
532 376
533 if (file->f_flags & O_DIRECT) { 377 if (file->f_flags & O_DIRECT) {
534 pages = get_direct_page_vector(data, num_pages, off, len); 378 pages = ceph_get_direct_page_vector(data, num_pages, off, len);
535 379
536 /* 380 /*
537 * flush any page cache pages in this range. this 381 * flush any page cache pages in this range. this
@@ -552,13 +396,13 @@ static ssize_t ceph_sync_read(struct file *file, char __user *data,
552 ret = striped_read(inode, off, len, pages, num_pages, checkeof); 396 ret = striped_read(inode, off, len, pages, num_pages, checkeof);
553 397
554 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) 398 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
555 ret = copy_page_vector_to_user(pages, data, off, ret); 399 ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
556 if (ret >= 0) 400 if (ret >= 0)
557 *poff = off + ret; 401 *poff = off + ret;
558 402
559done: 403done:
560 if (file->f_flags & O_DIRECT) 404 if (file->f_flags & O_DIRECT)
561 put_page_vector(pages, num_pages); 405 ceph_put_page_vector(pages, num_pages);
562 else 406 else
563 ceph_release_page_vector(pages, num_pages); 407 ceph_release_page_vector(pages, num_pages);
564 dout("sync_read result %d\n", ret); 408 dout("sync_read result %d\n", ret);
@@ -594,7 +438,7 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
594{ 438{
595 struct inode *inode = file->f_dentry->d_inode; 439 struct inode *inode = file->f_dentry->d_inode;
596 struct ceph_inode_info *ci = ceph_inode(inode); 440 struct ceph_inode_info *ci = ceph_inode(inode);
597 struct ceph_client *client = ceph_inode_to_client(inode); 441 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
598 struct ceph_osd_request *req; 442 struct ceph_osd_request *req;
599 struct page **pages; 443 struct page **pages;
600 int num_pages; 444 int num_pages;
@@ -642,7 +486,7 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
642 */ 486 */
643more: 487more:
644 len = left; 488 len = left;
645 req = ceph_osdc_new_request(&client->osdc, &ci->i_layout, 489 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
646 ceph_vino(inode), pos, &len, 490 ceph_vino(inode), pos, &len,
647 CEPH_OSD_OP_WRITE, flags, 491 CEPH_OSD_OP_WRITE, flags,
648 ci->i_snap_realm->cached_context, 492 ci->i_snap_realm->cached_context,
@@ -655,7 +499,7 @@ more:
655 num_pages = calc_pages_for(pos, len); 499 num_pages = calc_pages_for(pos, len);
656 500
657 if (file->f_flags & O_DIRECT) { 501 if (file->f_flags & O_DIRECT) {
658 pages = get_direct_page_vector(data, num_pages, pos, len); 502 pages = ceph_get_direct_page_vector(data, num_pages, pos, len);
659 if (IS_ERR(pages)) { 503 if (IS_ERR(pages)) {
660 ret = PTR_ERR(pages); 504 ret = PTR_ERR(pages);
661 goto out; 505 goto out;
@@ -673,7 +517,7 @@ more:
673 ret = PTR_ERR(pages); 517 ret = PTR_ERR(pages);
674 goto out; 518 goto out;
675 } 519 }
676 ret = copy_user_to_page_vector(pages, data, pos, len); 520 ret = ceph_copy_user_to_page_vector(pages, data, pos, len);
677 if (ret < 0) { 521 if (ret < 0) {
678 ceph_release_page_vector(pages, num_pages); 522 ceph_release_page_vector(pages, num_pages);
679 goto out; 523 goto out;
@@ -689,7 +533,7 @@ more:
689 req->r_num_pages = num_pages; 533 req->r_num_pages = num_pages;
690 req->r_inode = inode; 534 req->r_inode = inode;
691 535
692 ret = ceph_osdc_start_request(&client->osdc, req, false); 536 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
693 if (!ret) { 537 if (!ret) {
694 if (req->r_safe_callback) { 538 if (req->r_safe_callback) {
695 /* 539 /*
@@ -697,15 +541,15 @@ more:
697 * start_request so that a tid has been assigned. 541 * start_request so that a tid has been assigned.
698 */ 542 */
699 spin_lock(&ci->i_unsafe_lock); 543 spin_lock(&ci->i_unsafe_lock);
700 list_add(&ci->i_unsafe_writes, &req->r_unsafe_item); 544 list_add(&req->r_unsafe_item, &ci->i_unsafe_writes);
701 spin_unlock(&ci->i_unsafe_lock); 545 spin_unlock(&ci->i_unsafe_lock);
702 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); 546 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
703 } 547 }
704 ret = ceph_osdc_wait_request(&client->osdc, req); 548 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
705 } 549 }
706 550
707 if (file->f_flags & O_DIRECT) 551 if (file->f_flags & O_DIRECT)
708 put_page_vector(pages, num_pages); 552 ceph_put_page_vector(pages, num_pages);
709 else if (file->f_flags & O_SYNC) 553 else if (file->f_flags & O_SYNC)
710 ceph_release_page_vector(pages, num_pages); 554 ceph_release_page_vector(pages, num_pages);
711 555
@@ -814,7 +658,8 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
814 struct ceph_file_info *fi = file->private_data; 658 struct ceph_file_info *fi = file->private_data;
815 struct inode *inode = file->f_dentry->d_inode; 659 struct inode *inode = file->f_dentry->d_inode;
816 struct ceph_inode_info *ci = ceph_inode(inode); 660 struct ceph_inode_info *ci = ceph_inode(inode);
817 struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->osdc; 661 struct ceph_osd_client *osdc =
662 &ceph_sb_to_client(inode->i_sb)->client->osdc;
818 loff_t endoff = pos + iov->iov_len; 663 loff_t endoff = pos + iov->iov_len;
819 int want, got = 0; 664 int want, got = 0;
820 int ret, err; 665 int ret, err;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 62377ec37edf..1d6a45b5a04c 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1,4 +1,4 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/fs.h> 4#include <linux/fs.h>
@@ -13,7 +13,8 @@
13#include <linux/pagevec.h> 13#include <linux/pagevec.h>
14 14
15#include "super.h" 15#include "super.h"
16#include "decode.h" 16#include "mds_client.h"
17#include <linux/ceph/decode.h>
17 18
18/* 19/*
19 * Ceph inode operations 20 * Ceph inode operations
@@ -384,7 +385,7 @@ void ceph_destroy_inode(struct inode *inode)
384 */ 385 */
385 if (ci->i_snap_realm) { 386 if (ci->i_snap_realm) {
386 struct ceph_mds_client *mdsc = 387 struct ceph_mds_client *mdsc =
387 &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 388 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
388 struct ceph_snap_realm *realm = ci->i_snap_realm; 389 struct ceph_snap_realm *realm = ci->i_snap_realm;
389 390
390 dout(" dropping residual ref to snap realm %p\n", realm); 391 dout(" dropping residual ref to snap realm %p\n", realm);
@@ -685,7 +686,7 @@ static int fill_inode(struct inode *inode,
685 } 686 }
686 687
687 /* it may be better to set st_size in getattr instead? */ 688 /* it may be better to set st_size in getattr instead? */
688 if (ceph_test_opt(ceph_sb_to_client(inode->i_sb), RBYTES)) 689 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
689 inode->i_size = ci->i_rbytes; 690 inode->i_size = ci->i_rbytes;
690 break; 691 break;
691 default: 692 default:
@@ -901,7 +902,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
901 struct inode *in = NULL; 902 struct inode *in = NULL;
902 struct ceph_mds_reply_inode *ininfo; 903 struct ceph_mds_reply_inode *ininfo;
903 struct ceph_vino vino; 904 struct ceph_vino vino;
904 struct ceph_client *client = ceph_sb_to_client(sb); 905 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
905 int i = 0; 906 int i = 0;
906 int err = 0; 907 int err = 0;
907 908
@@ -965,7 +966,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
965 */ 966 */
966 if (rinfo->head->is_dentry && !req->r_aborted && 967 if (rinfo->head->is_dentry && !req->r_aborted &&
967 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 968 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
968 client->mount_args->snapdir_name, 969 fsc->mount_options->snapdir_name,
969 req->r_dentry->d_name.len))) { 970 req->r_dentry->d_name.len))) {
970 /* 971 /*
971 * lookup link rename : null -> possibly existing inode 972 * lookup link rename : null -> possibly existing inode
@@ -1533,7 +1534,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1533 struct inode *parent_inode = dentry->d_parent->d_inode; 1534 struct inode *parent_inode = dentry->d_parent->d_inode;
1534 const unsigned int ia_valid = attr->ia_valid; 1535 const unsigned int ia_valid = attr->ia_valid;
1535 struct ceph_mds_request *req; 1536 struct ceph_mds_request *req;
1536 struct ceph_mds_client *mdsc = &ceph_sb_to_client(dentry->d_sb)->mdsc; 1537 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
1537 int issued; 1538 int issued;
1538 int release = 0, dirtied = 0; 1539 int release = 0, dirtied = 0;
1539 int mask = 0; 1540 int mask = 0;
@@ -1728,8 +1729,8 @@ out:
1728 */ 1729 */
1729int ceph_do_getattr(struct inode *inode, int mask) 1730int ceph_do_getattr(struct inode *inode, int mask)
1730{ 1731{
1731 struct ceph_client *client = ceph_sb_to_client(inode->i_sb); 1732 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1732 struct ceph_mds_client *mdsc = &client->mdsc; 1733 struct ceph_mds_client *mdsc = fsc->mdsc;
1733 struct ceph_mds_request *req; 1734 struct ceph_mds_request *req;
1734 int err; 1735 int err;
1735 1736
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 76e307d2aba1..8888c9ba68db 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -1,8 +1,10 @@
1#include <linux/in.h> 1#include <linux/in.h>
2 2
3#include "ioctl.h"
4#include "super.h" 3#include "super.h"
5#include "ceph_debug.h" 4#include "mds_client.h"
5#include <linux/ceph/ceph_debug.h>
6
7#include "ioctl.h"
6 8
7 9
8/* 10/*
@@ -37,7 +39,7 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
37{ 39{
38 struct inode *inode = file->f_dentry->d_inode; 40 struct inode *inode = file->f_dentry->d_inode;
39 struct inode *parent_inode = file->f_dentry->d_parent->d_inode; 41 struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
40 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; 42 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
41 struct ceph_mds_request *req; 43 struct ceph_mds_request *req;
42 struct ceph_ioctl_layout l; 44 struct ceph_ioctl_layout l;
43 int err, i; 45 int err, i;
@@ -90,6 +92,68 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
90} 92}
91 93
92/* 94/*
95 * Set a layout policy on a directory inode. All items in the tree
96 * rooted at this inode will inherit this layout on creation,
97 * (It doesn't apply retroactively )
98 * unless a subdirectory has its own layout policy.
99 */
100static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
101{
102 struct inode *inode = file->f_dentry->d_inode;
103 struct ceph_mds_request *req;
104 struct ceph_ioctl_layout l;
105 int err, i;
106 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
107
108 /* copy and validate */
109 if (copy_from_user(&l, arg, sizeof(l)))
110 return -EFAULT;
111
112 if ((l.object_size & ~PAGE_MASK) ||
113 (l.stripe_unit & ~PAGE_MASK) ||
114 !l.stripe_unit ||
115 (l.object_size &&
116 (unsigned)l.object_size % (unsigned)l.stripe_unit))
117 return -EINVAL;
118
119 /* make sure it's a valid data pool */
120 if (l.data_pool > 0) {
121 mutex_lock(&mdsc->mutex);
122 err = -EINVAL;
123 for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
124 if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) {
125 err = 0;
126 break;
127 }
128 mutex_unlock(&mdsc->mutex);
129 if (err)
130 return err;
131 }
132
133 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETDIRLAYOUT,
134 USE_AUTH_MDS);
135
136 if (IS_ERR(req))
137 return PTR_ERR(req);
138 req->r_inode = igrab(inode);
139
140 req->r_args.setlayout.layout.fl_stripe_unit =
141 cpu_to_le32(l.stripe_unit);
142 req->r_args.setlayout.layout.fl_stripe_count =
143 cpu_to_le32(l.stripe_count);
144 req->r_args.setlayout.layout.fl_object_size =
145 cpu_to_le32(l.object_size);
146 req->r_args.setlayout.layout.fl_pg_pool =
147 cpu_to_le32(l.data_pool);
148 req->r_args.setlayout.layout.fl_pg_preferred =
149 cpu_to_le32(l.preferred_osd);
150
151 err = ceph_mdsc_do_request(mdsc, inode, req);
152 ceph_mdsc_put_request(req);
153 return err;
154}
155
156/*
93 * Return object name, size/offset information, and location (OSD 157 * Return object name, size/offset information, and location (OSD
94 * number, network address) for a given file offset. 158 * number, network address) for a given file offset.
95 */ 159 */
@@ -98,7 +162,8 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
98 struct ceph_ioctl_dataloc dl; 162 struct ceph_ioctl_dataloc dl;
99 struct inode *inode = file->f_dentry->d_inode; 163 struct inode *inode = file->f_dentry->d_inode;
100 struct ceph_inode_info *ci = ceph_inode(inode); 164 struct ceph_inode_info *ci = ceph_inode(inode);
101 struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->osdc; 165 struct ceph_osd_client *osdc =
166 &ceph_sb_to_client(inode->i_sb)->client->osdc;
102 u64 len = 1, olen; 167 u64 len = 1, olen;
103 u64 tmp; 168 u64 tmp;
104 struct ceph_object_layout ol; 169 struct ceph_object_layout ol;
@@ -174,11 +239,15 @@ long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
174 case CEPH_IOC_SET_LAYOUT: 239 case CEPH_IOC_SET_LAYOUT:
175 return ceph_ioctl_set_layout(file, (void __user *)arg); 240 return ceph_ioctl_set_layout(file, (void __user *)arg);
176 241
242 case CEPH_IOC_SET_LAYOUT_POLICY:
243 return ceph_ioctl_set_layout_policy(file, (void __user *)arg);
244
177 case CEPH_IOC_GET_DATALOC: 245 case CEPH_IOC_GET_DATALOC:
178 return ceph_ioctl_get_dataloc(file, (void __user *)arg); 246 return ceph_ioctl_get_dataloc(file, (void __user *)arg);
179 247
180 case CEPH_IOC_LAZYIO: 248 case CEPH_IOC_LAZYIO:
181 return ceph_ioctl_lazyio(file); 249 return ceph_ioctl_lazyio(file);
182 } 250 }
251
183 return -ENOTTY; 252 return -ENOTTY;
184} 253}
diff --git a/fs/ceph/ioctl.h b/fs/ceph/ioctl.h
index 88451a3b6857..a6ce54e94eb5 100644
--- a/fs/ceph/ioctl.h
+++ b/fs/ceph/ioctl.h
@@ -4,7 +4,7 @@
4#include <linux/ioctl.h> 4#include <linux/ioctl.h>
5#include <linux/types.h> 5#include <linux/types.h>
6 6
7#define CEPH_IOCTL_MAGIC 0x97 7#define CEPH_IOCTL_MAGIC 0x98
8 8
9/* just use u64 to align sanely on all archs */ 9/* just use u64 to align sanely on all archs */
10struct ceph_ioctl_layout { 10struct ceph_ioctl_layout {
@@ -17,6 +17,8 @@ struct ceph_ioctl_layout {
17 struct ceph_ioctl_layout) 17 struct ceph_ioctl_layout)
18#define CEPH_IOC_SET_LAYOUT _IOW(CEPH_IOCTL_MAGIC, 2, \ 18#define CEPH_IOC_SET_LAYOUT _IOW(CEPH_IOCTL_MAGIC, 2, \
19 struct ceph_ioctl_layout) 19 struct ceph_ioctl_layout)
20#define CEPH_IOC_SET_LAYOUT_POLICY _IOW(CEPH_IOCTL_MAGIC, 5, \
21 struct ceph_ioctl_layout)
20 22
21/* 23/*
22 * Extract identity, address of the OSD and object storing a given 24 * Extract identity, address of the OSD and object storing a given
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index ff4e753aae92..40abde93c345 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -1,11 +1,11 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/file.h> 3#include <linux/file.h>
4#include <linux/namei.h> 4#include <linux/namei.h>
5 5
6#include "super.h" 6#include "super.h"
7#include "mds_client.h" 7#include "mds_client.h"
8#include "pagelist.h" 8#include <linux/ceph/pagelist.h>
9 9
10/** 10/**
11 * Implement fcntl and flock locking functions. 11 * Implement fcntl and flock locking functions.
@@ -16,7 +16,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
16{ 16{
17 struct inode *inode = file->f_dentry->d_inode; 17 struct inode *inode = file->f_dentry->d_inode;
18 struct ceph_mds_client *mdsc = 18 struct ceph_mds_client *mdsc =
19 &ceph_sb_to_client(inode->i_sb)->mdsc; 19 ceph_sb_to_client(inode->i_sb)->mdsc;
20 struct ceph_mds_request *req; 20 struct ceph_mds_request *req;
21 int err; 21 int err;
22 22
@@ -181,8 +181,9 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
181 * Encode the flock and fcntl locks for the given inode into the pagelist. 181 * Encode the flock and fcntl locks for the given inode into the pagelist.
182 * Format is: #fcntl locks, sequential fcntl locks, #flock locks, 182 * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
183 * sequential flock locks. 183 * sequential flock locks.
184 * Must be called with BLK already held, and the lock numbers should have 184 * Must be called with lock_flocks() already held.
185 * been gathered under the same lock holding window. 185 * If we encounter more of a specific lock type than expected,
186 * we return the value 1.
186 */ 187 */
187int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, 188int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
188 int num_fcntl_locks, int num_flock_locks) 189 int num_fcntl_locks, int num_flock_locks)
@@ -190,6 +191,8 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
190 struct file_lock *lock; 191 struct file_lock *lock;
191 struct ceph_filelock cephlock; 192 struct ceph_filelock cephlock;
192 int err = 0; 193 int err = 0;
194 int seen_fcntl = 0;
195 int seen_flock = 0;
193 196
194 dout("encoding %d flock and %d fcntl locks", num_flock_locks, 197 dout("encoding %d flock and %d fcntl locks", num_flock_locks,
195 num_fcntl_locks); 198 num_fcntl_locks);
@@ -198,6 +201,11 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
198 goto fail; 201 goto fail;
199 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { 202 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
200 if (lock->fl_flags & FL_POSIX) { 203 if (lock->fl_flags & FL_POSIX) {
204 ++seen_fcntl;
205 if (seen_fcntl > num_fcntl_locks) {
206 err = -ENOSPC;
207 goto fail;
208 }
201 err = lock_to_ceph_filelock(lock, &cephlock); 209 err = lock_to_ceph_filelock(lock, &cephlock);
202 if (err) 210 if (err)
203 goto fail; 211 goto fail;
@@ -213,6 +221,11 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
213 goto fail; 221 goto fail;
214 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { 222 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
215 if (lock->fl_flags & FL_FLOCK) { 223 if (lock->fl_flags & FL_FLOCK) {
224 ++seen_flock;
225 if (seen_flock > num_flock_locks) {
226 err = -ENOSPC;
227 goto fail;
228 }
216 err = lock_to_ceph_filelock(lock, &cephlock); 229 err = lock_to_ceph_filelock(lock, &cephlock);
217 if (err) 230 if (err)
218 goto fail; 231 goto fail;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index fad95f8f2608..3142b15940c2 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1,17 +1,21 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/fs.h>
3#include <linux/wait.h> 4#include <linux/wait.h>
4#include <linux/slab.h> 5#include <linux/slab.h>
5#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/debugfs.h>
8#include <linux/seq_file.h>
6#include <linux/smp_lock.h> 9#include <linux/smp_lock.h>
7 10
8#include "mds_client.h"
9#include "mon_client.h"
10#include "super.h" 11#include "super.h"
11#include "messenger.h" 12#include "mds_client.h"
12#include "decode.h" 13
13#include "auth.h" 14#include <linux/ceph/messenger.h>
14#include "pagelist.h" 15#include <linux/ceph/decode.h>
16#include <linux/ceph/pagelist.h>
17#include <linux/ceph/auth.h>
18#include <linux/ceph/debugfs.h>
15 19
16/* 20/*
17 * A cluster of MDS (metadata server) daemons is responsible for 21 * A cluster of MDS (metadata server) daemons is responsible for
@@ -286,8 +290,9 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
286 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); 290 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
287 if (atomic_dec_and_test(&s->s_ref)) { 291 if (atomic_dec_and_test(&s->s_ref)) {
288 if (s->s_authorizer) 292 if (s->s_authorizer)
289 s->s_mdsc->client->monc.auth->ops->destroy_authorizer( 293 s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer(
290 s->s_mdsc->client->monc.auth, s->s_authorizer); 294 s->s_mdsc->fsc->client->monc.auth,
295 s->s_authorizer);
291 kfree(s); 296 kfree(s);
292 } 297 }
293} 298}
@@ -344,7 +349,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
344 s->s_seq = 0; 349 s->s_seq = 0;
345 mutex_init(&s->s_mutex); 350 mutex_init(&s->s_mutex);
346 351
347 ceph_con_init(mdsc->client->msgr, &s->s_con); 352 ceph_con_init(mdsc->fsc->client->msgr, &s->s_con);
348 s->s_con.private = s; 353 s->s_con.private = s;
349 s->s_con.ops = &mds_con_ops; 354 s->s_con.ops = &mds_con_ops;
350 s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS; 355 s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS;
@@ -599,7 +604,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
599 } else if (req->r_dentry) { 604 } else if (req->r_dentry) {
600 struct inode *dir = req->r_dentry->d_parent->d_inode; 605 struct inode *dir = req->r_dentry->d_parent->d_inode;
601 606
602 if (dir->i_sb != mdsc->client->sb) { 607 if (dir->i_sb != mdsc->fsc->sb) {
603 /* not this fs! */ 608 /* not this fs! */
604 inode = req->r_dentry->d_inode; 609 inode = req->r_dentry->d_inode;
605 } else if (ceph_snap(dir) != CEPH_NOSNAP) { 610 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
@@ -884,7 +889,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
884 __ceph_remove_cap(cap); 889 __ceph_remove_cap(cap);
885 if (!__ceph_is_any_real_caps(ci)) { 890 if (!__ceph_is_any_real_caps(ci)) {
886 struct ceph_mds_client *mdsc = 891 struct ceph_mds_client *mdsc =
887 &ceph_sb_to_client(inode->i_sb)->mdsc; 892 ceph_sb_to_client(inode->i_sb)->mdsc;
888 893
889 spin_lock(&mdsc->cap_dirty_lock); 894 spin_lock(&mdsc->cap_dirty_lock);
890 if (!list_empty(&ci->i_dirty_item)) { 895 if (!list_empty(&ci->i_dirty_item)) {
@@ -1146,7 +1151,7 @@ int ceph_add_cap_releases(struct ceph_mds_client *mdsc,
1146 struct ceph_msg *msg, *partial = NULL; 1151 struct ceph_msg *msg, *partial = NULL;
1147 struct ceph_mds_cap_release *head; 1152 struct ceph_mds_cap_release *head;
1148 int err = -ENOMEM; 1153 int err = -ENOMEM;
1149 int extra = mdsc->client->mount_args->cap_release_safety; 1154 int extra = mdsc->fsc->mount_options->cap_release_safety;
1150 int num; 1155 int num;
1151 1156
1152 dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds, 1157 dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds,
@@ -2085,7 +2090,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2085 2090
2086 /* insert trace into our cache */ 2091 /* insert trace into our cache */
2087 mutex_lock(&req->r_fill_mutex); 2092 mutex_lock(&req->r_fill_mutex);
2088 err = ceph_fill_trace(mdsc->client->sb, req, req->r_session); 2093 err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
2089 if (err == 0) { 2094 if (err == 0) {
2090 if (result == 0 && rinfo->dir_nr) 2095 if (result == 0 && rinfo->dir_nr)
2091 ceph_readdir_prepopulate(req, req->r_session); 2096 ceph_readdir_prepopulate(req, req->r_session);
@@ -2361,19 +2366,35 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2361 2366
2362 if (recon_state->flock) { 2367 if (recon_state->flock) {
2363 int num_fcntl_locks, num_flock_locks; 2368 int num_fcntl_locks, num_flock_locks;
2364 2369 struct ceph_pagelist_cursor trunc_point;
2365 lock_kernel(); 2370
2366 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); 2371 ceph_pagelist_set_cursor(pagelist, &trunc_point);
2367 rec.v2.flock_len = (2*sizeof(u32) + 2372 do {
2368 (num_fcntl_locks+num_flock_locks) * 2373 lock_flocks();
2369 sizeof(struct ceph_filelock)); 2374 ceph_count_locks(inode, &num_fcntl_locks,
2370 2375 &num_flock_locks);
2371 err = ceph_pagelist_append(pagelist, &rec, reclen); 2376 rec.v2.flock_len = (2*sizeof(u32) +
2372 if (!err) 2377 (num_fcntl_locks+num_flock_locks) *
2373 err = ceph_encode_locks(inode, pagelist, 2378 sizeof(struct ceph_filelock));
2374 num_fcntl_locks, 2379 unlock_flocks();
2375 num_flock_locks); 2380
2376 unlock_kernel(); 2381 /* pre-alloc pagelist */
2382 ceph_pagelist_truncate(pagelist, &trunc_point);
2383 err = ceph_pagelist_append(pagelist, &rec, reclen);
2384 if (!err)
2385 err = ceph_pagelist_reserve(pagelist,
2386 rec.v2.flock_len);
2387
2388 /* encode locks */
2389 if (!err) {
2390 lock_flocks();
2391 err = ceph_encode_locks(inode,
2392 pagelist,
2393 num_fcntl_locks,
2394 num_flock_locks);
2395 unlock_flocks();
2396 }
2397 } while (err == -ENOSPC);
2377 } else { 2398 } else {
2378 err = ceph_pagelist_append(pagelist, &rec, reclen); 2399 err = ceph_pagelist_append(pagelist, &rec, reclen);
2379 } 2400 }
@@ -2613,7 +2634,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
2613 struct ceph_mds_session *session, 2634 struct ceph_mds_session *session,
2614 struct ceph_msg *msg) 2635 struct ceph_msg *msg)
2615{ 2636{
2616 struct super_block *sb = mdsc->client->sb; 2637 struct super_block *sb = mdsc->fsc->sb;
2617 struct inode *inode; 2638 struct inode *inode;
2618 struct ceph_inode_info *ci; 2639 struct ceph_inode_info *ci;
2619 struct dentry *parent, *dentry; 2640 struct dentry *parent, *dentry;
@@ -2891,10 +2912,16 @@ static void delayed_work(struct work_struct *work)
2891 schedule_delayed(mdsc); 2912 schedule_delayed(mdsc);
2892} 2913}
2893 2914
2915int ceph_mdsc_init(struct ceph_fs_client *fsc)
2894 2916
2895int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
2896{ 2917{
2897 mdsc->client = client; 2918 struct ceph_mds_client *mdsc;
2919
2920 mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
2921 if (!mdsc)
2922 return -ENOMEM;
2923 mdsc->fsc = fsc;
2924 fsc->mdsc = mdsc;
2898 mutex_init(&mdsc->mutex); 2925 mutex_init(&mdsc->mutex);
2899 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); 2926 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
2900 if (mdsc->mdsmap == NULL) 2927 if (mdsc->mdsmap == NULL)
@@ -2927,7 +2954,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
2927 INIT_LIST_HEAD(&mdsc->dentry_lru); 2954 INIT_LIST_HEAD(&mdsc->dentry_lru);
2928 2955
2929 ceph_caps_init(mdsc); 2956 ceph_caps_init(mdsc);
2930 ceph_adjust_min_caps(mdsc, client->min_caps); 2957 ceph_adjust_min_caps(mdsc, fsc->min_caps);
2931 2958
2932 return 0; 2959 return 0;
2933} 2960}
@@ -2939,7 +2966,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
2939static void wait_requests(struct ceph_mds_client *mdsc) 2966static void wait_requests(struct ceph_mds_client *mdsc)
2940{ 2967{
2941 struct ceph_mds_request *req; 2968 struct ceph_mds_request *req;
2942 struct ceph_client *client = mdsc->client; 2969 struct ceph_fs_client *fsc = mdsc->fsc;
2943 2970
2944 mutex_lock(&mdsc->mutex); 2971 mutex_lock(&mdsc->mutex);
2945 if (__get_oldest_req(mdsc)) { 2972 if (__get_oldest_req(mdsc)) {
@@ -2947,7 +2974,7 @@ static void wait_requests(struct ceph_mds_client *mdsc)
2947 2974
2948 dout("wait_requests waiting for requests\n"); 2975 dout("wait_requests waiting for requests\n");
2949 wait_for_completion_timeout(&mdsc->safe_umount_waiters, 2976 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
2950 client->mount_args->mount_timeout * HZ); 2977 fsc->client->options->mount_timeout * HZ);
2951 2978
2952 /* tear down remaining requests */ 2979 /* tear down remaining requests */
2953 mutex_lock(&mdsc->mutex); 2980 mutex_lock(&mdsc->mutex);
@@ -3030,7 +3057,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3030{ 3057{
3031 u64 want_tid, want_flush; 3058 u64 want_tid, want_flush;
3032 3059
3033 if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN) 3060 if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
3034 return; 3061 return;
3035 3062
3036 dout("sync\n"); 3063 dout("sync\n");
@@ -3053,7 +3080,7 @@ bool done_closing_sessions(struct ceph_mds_client *mdsc)
3053{ 3080{
3054 int i, n = 0; 3081 int i, n = 0;
3055 3082
3056 if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN) 3083 if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
3057 return true; 3084 return true;
3058 3085
3059 mutex_lock(&mdsc->mutex); 3086 mutex_lock(&mdsc->mutex);
@@ -3071,8 +3098,8 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3071{ 3098{
3072 struct ceph_mds_session *session; 3099 struct ceph_mds_session *session;
3073 int i; 3100 int i;
3074 struct ceph_client *client = mdsc->client; 3101 struct ceph_fs_client *fsc = mdsc->fsc;
3075 unsigned long timeout = client->mount_args->mount_timeout * HZ; 3102 unsigned long timeout = fsc->client->options->mount_timeout * HZ;
3076 3103
3077 dout("close_sessions\n"); 3104 dout("close_sessions\n");
3078 3105
@@ -3119,7 +3146,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3119 dout("stopped\n"); 3146 dout("stopped\n");
3120} 3147}
3121 3148
3122void ceph_mdsc_stop(struct ceph_mds_client *mdsc) 3149static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
3123{ 3150{
3124 dout("stop\n"); 3151 dout("stop\n");
3125 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ 3152 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
@@ -3129,6 +3156,15 @@ void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
3129 ceph_caps_finalize(mdsc); 3156 ceph_caps_finalize(mdsc);
3130} 3157}
3131 3158
3159void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
3160{
3161 struct ceph_mds_client *mdsc = fsc->mdsc;
3162
3163 ceph_mdsc_stop(mdsc);
3164 fsc->mdsc = NULL;
3165 kfree(mdsc);
3166}
3167
3132 3168
3133/* 3169/*
3134 * handle mds map update. 3170 * handle mds map update.
@@ -3145,14 +3181,14 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3145 3181
3146 ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); 3182 ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
3147 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 3183 ceph_decode_copy(&p, &fsid, sizeof(fsid));
3148 if (ceph_check_fsid(mdsc->client, &fsid) < 0) 3184 if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
3149 return; 3185 return;
3150 epoch = ceph_decode_32(&p); 3186 epoch = ceph_decode_32(&p);
3151 maplen = ceph_decode_32(&p); 3187 maplen = ceph_decode_32(&p);
3152 dout("handle_map epoch %u len %d\n", epoch, (int)maplen); 3188 dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
3153 3189
3154 /* do we need it? */ 3190 /* do we need it? */
3155 ceph_monc_got_mdsmap(&mdsc->client->monc, epoch); 3191 ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch);
3156 mutex_lock(&mdsc->mutex); 3192 mutex_lock(&mdsc->mutex);
3157 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { 3193 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
3158 dout("handle_map epoch %u <= our %u\n", 3194 dout("handle_map epoch %u <= our %u\n",
@@ -3176,7 +3212,7 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3176 } else { 3212 } else {
3177 mdsc->mdsmap = newmap; /* first mds map */ 3213 mdsc->mdsmap = newmap; /* first mds map */
3178 } 3214 }
3179 mdsc->client->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; 3215 mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
3180 3216
3181 __wake_requests(mdsc, &mdsc->waiting_for_map); 3217 __wake_requests(mdsc, &mdsc->waiting_for_map);
3182 3218
@@ -3277,7 +3313,7 @@ static int get_authorizer(struct ceph_connection *con,
3277{ 3313{
3278 struct ceph_mds_session *s = con->private; 3314 struct ceph_mds_session *s = con->private;
3279 struct ceph_mds_client *mdsc = s->s_mdsc; 3315 struct ceph_mds_client *mdsc = s->s_mdsc;
3280 struct ceph_auth_client *ac = mdsc->client->monc.auth; 3316 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3281 int ret = 0; 3317 int ret = 0;
3282 3318
3283 if (force_new && s->s_authorizer) { 3319 if (force_new && s->s_authorizer) {
@@ -3311,7 +3347,7 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len)
3311{ 3347{
3312 struct ceph_mds_session *s = con->private; 3348 struct ceph_mds_session *s = con->private;
3313 struct ceph_mds_client *mdsc = s->s_mdsc; 3349 struct ceph_mds_client *mdsc = s->s_mdsc;
3314 struct ceph_auth_client *ac = mdsc->client->monc.auth; 3350 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3315 3351
3316 return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len); 3352 return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
3317} 3353}
@@ -3320,12 +3356,12 @@ static int invalidate_authorizer(struct ceph_connection *con)
3320{ 3356{
3321 struct ceph_mds_session *s = con->private; 3357 struct ceph_mds_session *s = con->private;
3322 struct ceph_mds_client *mdsc = s->s_mdsc; 3358 struct ceph_mds_client *mdsc = s->s_mdsc;
3323 struct ceph_auth_client *ac = mdsc->client->monc.auth; 3359 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3324 3360
3325 if (ac->ops->invalidate_authorizer) 3361 if (ac->ops->invalidate_authorizer)
3326 ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); 3362 ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
3327 3363
3328 return ceph_monc_validate_auth(&mdsc->client->monc); 3364 return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
3329} 3365}
3330 3366
3331static const struct ceph_connection_operations mds_con_ops = { 3367static const struct ceph_connection_operations mds_con_ops = {
@@ -3338,7 +3374,4 @@ static const struct ceph_connection_operations mds_con_ops = {
3338 .peer_reset = peer_reset, 3374 .peer_reset = peer_reset,
3339}; 3375};
3340 3376
3341
3342
3343
3344/* eof */ 3377/* eof */
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index c98267ce6d2a..d66d63c72355 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -8,9 +8,9 @@
8#include <linux/rbtree.h> 8#include <linux/rbtree.h>
9#include <linux/spinlock.h> 9#include <linux/spinlock.h>
10 10
11#include "types.h" 11#include <linux/ceph/types.h>
12#include "messenger.h" 12#include <linux/ceph/messenger.h>
13#include "mdsmap.h" 13#include <linux/ceph/mdsmap.h>
14 14
15/* 15/*
16 * Some lock dependencies: 16 * Some lock dependencies:
@@ -26,7 +26,7 @@
26 * 26 *
27 */ 27 */
28 28
29struct ceph_client; 29struct ceph_fs_client;
30struct ceph_cap; 30struct ceph_cap;
31 31
32/* 32/*
@@ -230,7 +230,7 @@ struct ceph_mds_request {
230 * mds client state 230 * mds client state
231 */ 231 */
232struct ceph_mds_client { 232struct ceph_mds_client {
233 struct ceph_client *client; 233 struct ceph_fs_client *fsc;
234 struct mutex mutex; /* all nested structures */ 234 struct mutex mutex; /* all nested structures */
235 235
236 struct ceph_mdsmap *mdsmap; 236 struct ceph_mdsmap *mdsmap;
@@ -289,11 +289,6 @@ struct ceph_mds_client {
289 int caps_avail_count; /* unused, unreserved */ 289 int caps_avail_count; /* unused, unreserved */
290 int caps_min_count; /* keep at least this many 290 int caps_min_count; /* keep at least this many
291 (unreserved) */ 291 (unreserved) */
292
293#ifdef CONFIG_DEBUG_FS
294 struct dentry *debugfs_file;
295#endif
296
297 spinlock_t dentry_lru_lock; 292 spinlock_t dentry_lru_lock;
298 struct list_head dentry_lru; 293 struct list_head dentry_lru;
299 int num_dentry; 294 int num_dentry;
@@ -316,10 +311,9 @@ extern void ceph_put_mds_session(struct ceph_mds_session *s);
316extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc, 311extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
317 struct ceph_msg *msg, int mds); 312 struct ceph_msg *msg, int mds);
318 313
319extern int ceph_mdsc_init(struct ceph_mds_client *mdsc, 314extern int ceph_mdsc_init(struct ceph_fs_client *fsc);
320 struct ceph_client *client);
321extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc); 315extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc);
322extern void ceph_mdsc_stop(struct ceph_mds_client *mdsc); 316extern void ceph_mdsc_destroy(struct ceph_fs_client *fsc);
323 317
324extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc); 318extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc);
325 319
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 040be6d1150b..73b7d44e8a35 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -1,4 +1,4 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/bug.h> 3#include <linux/bug.h>
4#include <linux/err.h> 4#include <linux/err.h>
@@ -6,9 +6,9 @@
6#include <linux/slab.h> 6#include <linux/slab.h>
7#include <linux/types.h> 7#include <linux/types.h>
8 8
9#include "mdsmap.h" 9#include <linux/ceph/mdsmap.h>
10#include "messenger.h" 10#include <linux/ceph/messenger.h>
11#include "decode.h" 11#include <linux/ceph/decode.h>
12 12
13#include "super.h" 13#include "super.h"
14 14
@@ -117,7 +117,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
117 } 117 }
118 118
119 dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n", 119 dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n",
120 i+1, n, global_id, mds, inc, pr_addr(&addr.in_addr), 120 i+1, n, global_id, mds, inc,
121 ceph_pr_addr(&addr.in_addr),
121 ceph_mds_state_name(state)); 122 ceph_mds_state_name(state));
122 if (mds >= 0 && mds < m->m_max_mds && state > 0) { 123 if (mds >= 0 && mds < m->m_max_mds && state > 0) {
123 m->m_info[mds].global_id = global_id; 124 m->m_info[mds].global_id = global_id;
diff --git a/fs/ceph/pagelist.c b/fs/ceph/pagelist.c
deleted file mode 100644
index 46a368b6dce5..000000000000
--- a/fs/ceph/pagelist.c
+++ /dev/null
@@ -1,63 +0,0 @@
1
2#include <linux/gfp.h>
3#include <linux/pagemap.h>
4#include <linux/highmem.h>
5
6#include "pagelist.h"
7
8static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl)
9{
10 struct page *page = list_entry(pl->head.prev, struct page,
11 lru);
12 kunmap(page);
13}
14
15int ceph_pagelist_release(struct ceph_pagelist *pl)
16{
17 if (pl->mapped_tail)
18 ceph_pagelist_unmap_tail(pl);
19
20 while (!list_empty(&pl->head)) {
21 struct page *page = list_first_entry(&pl->head, struct page,
22 lru);
23 list_del(&page->lru);
24 __free_page(page);
25 }
26 return 0;
27}
28
29static int ceph_pagelist_addpage(struct ceph_pagelist *pl)
30{
31 struct page *page = __page_cache_alloc(GFP_NOFS);
32 if (!page)
33 return -ENOMEM;
34 pl->room += PAGE_SIZE;
35 list_add_tail(&page->lru, &pl->head);
36 if (pl->mapped_tail)
37 ceph_pagelist_unmap_tail(pl);
38 pl->mapped_tail = kmap(page);
39 return 0;
40}
41
42int ceph_pagelist_append(struct ceph_pagelist *pl, void *buf, size_t len)
43{
44 while (pl->room < len) {
45 size_t bit = pl->room;
46 int ret;
47
48 memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK),
49 buf, bit);
50 pl->length += bit;
51 pl->room -= bit;
52 buf += bit;
53 len -= bit;
54 ret = ceph_pagelist_addpage(pl);
55 if (ret)
56 return ret;
57 }
58
59 memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len);
60 pl->length += len;
61 pl->room -= len;
62 return 0;
63}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 190b6c4a6f2b..39c243acd062 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -1,10 +1,12 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/sort.h> 3#include <linux/sort.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5 5
6#include "super.h" 6#include "super.h"
7#include "decode.h" 7#include "mds_client.h"
8
9#include <linux/ceph/decode.h>
8 10
9/* 11/*
10 * Snapshots in ceph are driven in large part by cooperation from the 12 * Snapshots in ceph are driven in large part by cooperation from the
@@ -526,7 +528,7 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
526 struct ceph_cap_snap *capsnap) 528 struct ceph_cap_snap *capsnap)
527{ 529{
528 struct inode *inode = &ci->vfs_inode; 530 struct inode *inode = &ci->vfs_inode;
529 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; 531 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
530 532
531 BUG_ON(capsnap->writing); 533 BUG_ON(capsnap->writing);
532 capsnap->size = inode->i_size; 534 capsnap->size = inode->i_size;
@@ -747,7 +749,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
747 struct ceph_mds_session *session, 749 struct ceph_mds_session *session,
748 struct ceph_msg *msg) 750 struct ceph_msg *msg)
749{ 751{
750 struct super_block *sb = mdsc->client->sb; 752 struct super_block *sb = mdsc->fsc->sb;
751 int mds = session->s_mds; 753 int mds = session->s_mds;
752 u64 split; 754 u64 split;
753 int op; 755 int op;
diff --git a/fs/ceph/ceph_strings.c b/fs/ceph/strings.c
index c6179d3a26a2..cd5097d7c804 100644
--- a/fs/ceph/ceph_strings.c
+++ b/fs/ceph/strings.c
@@ -1,71 +1,9 @@
1/* 1/*
2 * Ceph string constants 2 * Ceph fs string constants
3 */ 3 */
4#include "types.h" 4#include <linux/module.h>
5#include <linux/ceph/types.h>
5 6
6const char *ceph_entity_type_name(int type)
7{
8 switch (type) {
9 case CEPH_ENTITY_TYPE_MDS: return "mds";
10 case CEPH_ENTITY_TYPE_OSD: return "osd";
11 case CEPH_ENTITY_TYPE_MON: return "mon";
12 case CEPH_ENTITY_TYPE_CLIENT: return "client";
13 case CEPH_ENTITY_TYPE_AUTH: return "auth";
14 default: return "unknown";
15 }
16}
17
18const char *ceph_osd_op_name(int op)
19{
20 switch (op) {
21 case CEPH_OSD_OP_READ: return "read";
22 case CEPH_OSD_OP_STAT: return "stat";
23
24 case CEPH_OSD_OP_MASKTRUNC: return "masktrunc";
25
26 case CEPH_OSD_OP_WRITE: return "write";
27 case CEPH_OSD_OP_DELETE: return "delete";
28 case CEPH_OSD_OP_TRUNCATE: return "truncate";
29 case CEPH_OSD_OP_ZERO: return "zero";
30 case CEPH_OSD_OP_WRITEFULL: return "writefull";
31 case CEPH_OSD_OP_ROLLBACK: return "rollback";
32
33 case CEPH_OSD_OP_APPEND: return "append";
34 case CEPH_OSD_OP_STARTSYNC: return "startsync";
35 case CEPH_OSD_OP_SETTRUNC: return "settrunc";
36 case CEPH_OSD_OP_TRIMTRUNC: return "trimtrunc";
37
38 case CEPH_OSD_OP_TMAPUP: return "tmapup";
39 case CEPH_OSD_OP_TMAPGET: return "tmapget";
40 case CEPH_OSD_OP_TMAPPUT: return "tmapput";
41
42 case CEPH_OSD_OP_GETXATTR: return "getxattr";
43 case CEPH_OSD_OP_GETXATTRS: return "getxattrs";
44 case CEPH_OSD_OP_SETXATTR: return "setxattr";
45 case CEPH_OSD_OP_SETXATTRS: return "setxattrs";
46 case CEPH_OSD_OP_RESETXATTRS: return "resetxattrs";
47 case CEPH_OSD_OP_RMXATTR: return "rmxattr";
48 case CEPH_OSD_OP_CMPXATTR: return "cmpxattr";
49
50 case CEPH_OSD_OP_PULL: return "pull";
51 case CEPH_OSD_OP_PUSH: return "push";
52 case CEPH_OSD_OP_BALANCEREADS: return "balance-reads";
53 case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads";
54 case CEPH_OSD_OP_SCRUB: return "scrub";
55
56 case CEPH_OSD_OP_WRLOCK: return "wrlock";
57 case CEPH_OSD_OP_WRUNLOCK: return "wrunlock";
58 case CEPH_OSD_OP_RDLOCK: return "rdlock";
59 case CEPH_OSD_OP_RDUNLOCK: return "rdunlock";
60 case CEPH_OSD_OP_UPLOCK: return "uplock";
61 case CEPH_OSD_OP_DNLOCK: return "dnlock";
62
63 case CEPH_OSD_OP_CALL: return "call";
64
65 case CEPH_OSD_OP_PGLS: return "pgls";
66 }
67 return "???";
68}
69 7
70const char *ceph_mds_state_name(int s) 8const char *ceph_mds_state_name(int s)
71{ 9{
@@ -177,17 +115,3 @@ const char *ceph_snap_op_name(int o)
177 } 115 }
178 return "???"; 116 return "???";
179} 117}
180
181const char *ceph_pool_op_name(int op)
182{
183 switch (op) {
184 case POOL_OP_CREATE: return "create";
185 case POOL_OP_DELETE: return "delete";
186 case POOL_OP_AUID_CHANGE: return "auid change";
187 case POOL_OP_CREATE_SNAP: return "create snap";
188 case POOL_OP_DELETE_SNAP: return "delete snap";
189 case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap";
190 case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap";
191 }
192 return "???";
193}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 9922628532b2..d6e0e0421891 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -1,5 +1,5 @@
1 1
2#include "ceph_debug.h" 2#include <linux/ceph/ceph_debug.h>
3 3
4#include <linux/backing-dev.h> 4#include <linux/backing-dev.h>
5#include <linux/ctype.h> 5#include <linux/ctype.h>
@@ -15,10 +15,13 @@
15#include <linux/statfs.h> 15#include <linux/statfs.h>
16#include <linux/string.h> 16#include <linux/string.h>
17 17
18#include "decode.h"
19#include "super.h" 18#include "super.h"
20#include "mon_client.h" 19#include "mds_client.h"
21#include "auth.h" 20
21#include <linux/ceph/decode.h>
22#include <linux/ceph/mon_client.h>
23#include <linux/ceph/auth.h>
24#include <linux/ceph/debugfs.h>
22 25
23/* 26/*
24 * Ceph superblock operations 27 * Ceph superblock operations
@@ -26,36 +29,22 @@
26 * Handle the basics of mounting, unmounting. 29 * Handle the basics of mounting, unmounting.
27 */ 30 */
28 31
29
30/*
31 * find filename portion of a path (/foo/bar/baz -> baz)
32 */
33const char *ceph_file_part(const char *s, int len)
34{
35 const char *e = s + len;
36
37 while (e != s && *(e-1) != '/')
38 e--;
39 return e;
40}
41
42
43/* 32/*
44 * super ops 33 * super ops
45 */ 34 */
46static void ceph_put_super(struct super_block *s) 35static void ceph_put_super(struct super_block *s)
47{ 36{
48 struct ceph_client *client = ceph_sb_to_client(s); 37 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
49 38
50 dout("put_super\n"); 39 dout("put_super\n");
51 ceph_mdsc_close_sessions(&client->mdsc); 40 ceph_mdsc_close_sessions(fsc->mdsc);
52 41
53 /* 42 /*
54 * ensure we release the bdi before put_anon_super releases 43 * ensure we release the bdi before put_anon_super releases
55 * the device name. 44 * the device name.
56 */ 45 */
57 if (s->s_bdi == &client->backing_dev_info) { 46 if (s->s_bdi == &fsc->backing_dev_info) {
58 bdi_unregister(&client->backing_dev_info); 47 bdi_unregister(&fsc->backing_dev_info);
59 s->s_bdi = NULL; 48 s->s_bdi = NULL;
60 } 49 }
61 50
@@ -64,14 +53,14 @@ static void ceph_put_super(struct super_block *s)
64 53
65static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) 54static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
66{ 55{
67 struct ceph_client *client = ceph_inode_to_client(dentry->d_inode); 56 struct ceph_fs_client *fsc = ceph_inode_to_client(dentry->d_inode);
68 struct ceph_monmap *monmap = client->monc.monmap; 57 struct ceph_monmap *monmap = fsc->client->monc.monmap;
69 struct ceph_statfs st; 58 struct ceph_statfs st;
70 u64 fsid; 59 u64 fsid;
71 int err; 60 int err;
72 61
73 dout("statfs\n"); 62 dout("statfs\n");
74 err = ceph_monc_do_statfs(&client->monc, &st); 63 err = ceph_monc_do_statfs(&fsc->client->monc, &st);
75 if (err < 0) 64 if (err < 0)
76 return err; 65 return err;
77 66
@@ -104,238 +93,28 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
104 93
105static int ceph_sync_fs(struct super_block *sb, int wait) 94static int ceph_sync_fs(struct super_block *sb, int wait)
106{ 95{
107 struct ceph_client *client = ceph_sb_to_client(sb); 96 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
108 97
109 if (!wait) { 98 if (!wait) {
110 dout("sync_fs (non-blocking)\n"); 99 dout("sync_fs (non-blocking)\n");
111 ceph_flush_dirty_caps(&client->mdsc); 100 ceph_flush_dirty_caps(fsc->mdsc);
112 dout("sync_fs (non-blocking) done\n"); 101 dout("sync_fs (non-blocking) done\n");
113 return 0; 102 return 0;
114 } 103 }
115 104
116 dout("sync_fs (blocking)\n"); 105 dout("sync_fs (blocking)\n");
117 ceph_osdc_sync(&ceph_sb_to_client(sb)->osdc); 106 ceph_osdc_sync(&fsc->client->osdc);
118 ceph_mdsc_sync(&ceph_sb_to_client(sb)->mdsc); 107 ceph_mdsc_sync(fsc->mdsc);
119 dout("sync_fs (blocking) done\n"); 108 dout("sync_fs (blocking) done\n");
120 return 0; 109 return 0;
121} 110}
122 111
123static int default_congestion_kb(void)
124{
125 int congestion_kb;
126
127 /*
128 * Copied from NFS
129 *
130 * congestion size, scale with available memory.
131 *
132 * 64MB: 8192k
133 * 128MB: 11585k
134 * 256MB: 16384k
135 * 512MB: 23170k
136 * 1GB: 32768k
137 * 2GB: 46340k
138 * 4GB: 65536k
139 * 8GB: 92681k
140 * 16GB: 131072k
141 *
142 * This allows larger machines to have larger/more transfers.
143 * Limit the default to 256M
144 */
145 congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
146 if (congestion_kb > 256*1024)
147 congestion_kb = 256*1024;
148
149 return congestion_kb;
150}
151
152/**
153 * ceph_show_options - Show mount options in /proc/mounts
154 * @m: seq_file to write to
155 * @mnt: mount descriptor
156 */
157static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
158{
159 struct ceph_client *client = ceph_sb_to_client(mnt->mnt_sb);
160 struct ceph_mount_args *args = client->mount_args;
161
162 if (args->flags & CEPH_OPT_FSID)
163 seq_printf(m, ",fsid=%pU", &args->fsid);
164 if (args->flags & CEPH_OPT_NOSHARE)
165 seq_puts(m, ",noshare");
166 if (args->flags & CEPH_OPT_DIRSTAT)
167 seq_puts(m, ",dirstat");
168 if ((args->flags & CEPH_OPT_RBYTES) == 0)
169 seq_puts(m, ",norbytes");
170 if (args->flags & CEPH_OPT_NOCRC)
171 seq_puts(m, ",nocrc");
172 if (args->flags & CEPH_OPT_NOASYNCREADDIR)
173 seq_puts(m, ",noasyncreaddir");
174
175 if (args->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
176 seq_printf(m, ",mount_timeout=%d", args->mount_timeout);
177 if (args->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
178 seq_printf(m, ",osd_idle_ttl=%d", args->osd_idle_ttl);
179 if (args->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
180 seq_printf(m, ",osdtimeout=%d", args->osd_timeout);
181 if (args->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
182 seq_printf(m, ",osdkeepalivetimeout=%d",
183 args->osd_keepalive_timeout);
184 if (args->wsize)
185 seq_printf(m, ",wsize=%d", args->wsize);
186 if (args->rsize != CEPH_MOUNT_RSIZE_DEFAULT)
187 seq_printf(m, ",rsize=%d", args->rsize);
188 if (args->congestion_kb != default_congestion_kb())
189 seq_printf(m, ",write_congestion_kb=%d", args->congestion_kb);
190 if (args->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
191 seq_printf(m, ",caps_wanted_delay_min=%d",
192 args->caps_wanted_delay_min);
193 if (args->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
194 seq_printf(m, ",caps_wanted_delay_max=%d",
195 args->caps_wanted_delay_max);
196 if (args->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
197 seq_printf(m, ",cap_release_safety=%d",
198 args->cap_release_safety);
199 if (args->max_readdir != CEPH_MAX_READDIR_DEFAULT)
200 seq_printf(m, ",readdir_max_entries=%d", args->max_readdir);
201 if (args->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
202 seq_printf(m, ",readdir_max_bytes=%d", args->max_readdir_bytes);
203 if (strcmp(args->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
204 seq_printf(m, ",snapdirname=%s", args->snapdir_name);
205 if (args->name)
206 seq_printf(m, ",name=%s", args->name);
207 if (args->secret)
208 seq_puts(m, ",secret=<hidden>");
209 return 0;
210}
211
212/*
213 * caches
214 */
215struct kmem_cache *ceph_inode_cachep;
216struct kmem_cache *ceph_cap_cachep;
217struct kmem_cache *ceph_dentry_cachep;
218struct kmem_cache *ceph_file_cachep;
219
220static void ceph_inode_init_once(void *foo)
221{
222 struct ceph_inode_info *ci = foo;
223 inode_init_once(&ci->vfs_inode);
224}
225
226static int __init init_caches(void)
227{
228 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
229 sizeof(struct ceph_inode_info),
230 __alignof__(struct ceph_inode_info),
231 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
232 ceph_inode_init_once);
233 if (ceph_inode_cachep == NULL)
234 return -ENOMEM;
235
236 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
237 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
238 if (ceph_cap_cachep == NULL)
239 goto bad_cap;
240
241 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
242 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
243 if (ceph_dentry_cachep == NULL)
244 goto bad_dentry;
245
246 ceph_file_cachep = KMEM_CACHE(ceph_file_info,
247 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
248 if (ceph_file_cachep == NULL)
249 goto bad_file;
250
251 return 0;
252
253bad_file:
254 kmem_cache_destroy(ceph_dentry_cachep);
255bad_dentry:
256 kmem_cache_destroy(ceph_cap_cachep);
257bad_cap:
258 kmem_cache_destroy(ceph_inode_cachep);
259 return -ENOMEM;
260}
261
262static void destroy_caches(void)
263{
264 kmem_cache_destroy(ceph_inode_cachep);
265 kmem_cache_destroy(ceph_cap_cachep);
266 kmem_cache_destroy(ceph_dentry_cachep);
267 kmem_cache_destroy(ceph_file_cachep);
268}
269
270
271/*
272 * ceph_umount_begin - initiate forced umount. Tear down down the
273 * mount, skipping steps that may hang while waiting for server(s).
274 */
275static void ceph_umount_begin(struct super_block *sb)
276{
277 struct ceph_client *client = ceph_sb_to_client(sb);
278
279 dout("ceph_umount_begin - starting forced umount\n");
280 if (!client)
281 return;
282 client->mount_state = CEPH_MOUNT_SHUTDOWN;
283 return;
284}
285
286static const struct super_operations ceph_super_ops = {
287 .alloc_inode = ceph_alloc_inode,
288 .destroy_inode = ceph_destroy_inode,
289 .write_inode = ceph_write_inode,
290 .sync_fs = ceph_sync_fs,
291 .put_super = ceph_put_super,
292 .show_options = ceph_show_options,
293 .statfs = ceph_statfs,
294 .umount_begin = ceph_umount_begin,
295};
296
297
298const char *ceph_msg_type_name(int type)
299{
300 switch (type) {
301 case CEPH_MSG_SHUTDOWN: return "shutdown";
302 case CEPH_MSG_PING: return "ping";
303 case CEPH_MSG_AUTH: return "auth";
304 case CEPH_MSG_AUTH_REPLY: return "auth_reply";
305 case CEPH_MSG_MON_MAP: return "mon_map";
306 case CEPH_MSG_MON_GET_MAP: return "mon_get_map";
307 case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe";
308 case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack";
309 case CEPH_MSG_STATFS: return "statfs";
310 case CEPH_MSG_STATFS_REPLY: return "statfs_reply";
311 case CEPH_MSG_MDS_MAP: return "mds_map";
312 case CEPH_MSG_CLIENT_SESSION: return "client_session";
313 case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect";
314 case CEPH_MSG_CLIENT_REQUEST: return "client_request";
315 case CEPH_MSG_CLIENT_REQUEST_FORWARD: return "client_request_forward";
316 case CEPH_MSG_CLIENT_REPLY: return "client_reply";
317 case CEPH_MSG_CLIENT_CAPS: return "client_caps";
318 case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release";
319 case CEPH_MSG_CLIENT_SNAP: return "client_snap";
320 case CEPH_MSG_CLIENT_LEASE: return "client_lease";
321 case CEPH_MSG_OSD_MAP: return "osd_map";
322 case CEPH_MSG_OSD_OP: return "osd_op";
323 case CEPH_MSG_OSD_OPREPLY: return "osd_opreply";
324 default: return "unknown";
325 }
326}
327
328
329/* 112/*
330 * mount options 113 * mount options
331 */ 114 */
332enum { 115enum {
333 Opt_wsize, 116 Opt_wsize,
334 Opt_rsize, 117 Opt_rsize,
335 Opt_osdtimeout,
336 Opt_osdkeepalivetimeout,
337 Opt_mount_timeout,
338 Opt_osd_idle_ttl,
339 Opt_caps_wanted_delay_min, 118 Opt_caps_wanted_delay_min,
340 Opt_caps_wanted_delay_max, 119 Opt_caps_wanted_delay_max,
341 Opt_cap_release_safety, 120 Opt_cap_release_safety,
@@ -344,29 +123,19 @@ enum {
344 Opt_congestion_kb, 123 Opt_congestion_kb,
345 Opt_last_int, 124 Opt_last_int,
346 /* int args above */ 125 /* int args above */
347 Opt_fsid,
348 Opt_snapdirname, 126 Opt_snapdirname,
349 Opt_name,
350 Opt_secret,
351 Opt_last_string, 127 Opt_last_string,
352 /* string args above */ 128 /* string args above */
353 Opt_ip,
354 Opt_noshare,
355 Opt_dirstat, 129 Opt_dirstat,
356 Opt_nodirstat, 130 Opt_nodirstat,
357 Opt_rbytes, 131 Opt_rbytes,
358 Opt_norbytes, 132 Opt_norbytes,
359 Opt_nocrc,
360 Opt_noasyncreaddir, 133 Opt_noasyncreaddir,
361}; 134};
362 135
363static match_table_t arg_tokens = { 136static match_table_t fsopt_tokens = {
364 {Opt_wsize, "wsize=%d"}, 137 {Opt_wsize, "wsize=%d"},
365 {Opt_rsize, "rsize=%d"}, 138 {Opt_rsize, "rsize=%d"},
366 {Opt_osdtimeout, "osdtimeout=%d"},
367 {Opt_osdkeepalivetimeout, "osdkeepalive=%d"},
368 {Opt_mount_timeout, "mount_timeout=%d"},
369 {Opt_osd_idle_ttl, "osd_idle_ttl=%d"},
370 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"}, 139 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
371 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"}, 140 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
372 {Opt_cap_release_safety, "cap_release_safety=%d"}, 141 {Opt_cap_release_safety, "cap_release_safety=%d"},
@@ -374,403 +143,459 @@ static match_table_t arg_tokens = {
374 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"}, 143 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
375 {Opt_congestion_kb, "write_congestion_kb=%d"}, 144 {Opt_congestion_kb, "write_congestion_kb=%d"},
376 /* int args above */ 145 /* int args above */
377 {Opt_fsid, "fsid=%s"},
378 {Opt_snapdirname, "snapdirname=%s"}, 146 {Opt_snapdirname, "snapdirname=%s"},
379 {Opt_name, "name=%s"},
380 {Opt_secret, "secret=%s"},
381 /* string args above */ 147 /* string args above */
382 {Opt_ip, "ip=%s"},
383 {Opt_noshare, "noshare"},
384 {Opt_dirstat, "dirstat"}, 148 {Opt_dirstat, "dirstat"},
385 {Opt_nodirstat, "nodirstat"}, 149 {Opt_nodirstat, "nodirstat"},
386 {Opt_rbytes, "rbytes"}, 150 {Opt_rbytes, "rbytes"},
387 {Opt_norbytes, "norbytes"}, 151 {Opt_norbytes, "norbytes"},
388 {Opt_nocrc, "nocrc"},
389 {Opt_noasyncreaddir, "noasyncreaddir"}, 152 {Opt_noasyncreaddir, "noasyncreaddir"},
390 {-1, NULL} 153 {-1, NULL}
391}; 154};
392 155
393static int parse_fsid(const char *str, struct ceph_fsid *fsid) 156static int parse_fsopt_token(char *c, void *private)
394{ 157{
395 int i = 0; 158 struct ceph_mount_options *fsopt = private;
396 char tmp[3]; 159 substring_t argstr[MAX_OPT_ARGS];
397 int err = -EINVAL; 160 int token, intval, ret;
398 int d; 161
399 162 token = match_token((char *)c, fsopt_tokens, argstr);
400 dout("parse_fsid '%s'\n", str); 163 if (token < 0)
401 tmp[2] = 0; 164 return -EINVAL;
402 while (*str && i < 16) { 165
403 if (ispunct(*str)) { 166 if (token < Opt_last_int) {
404 str++; 167 ret = match_int(&argstr[0], &intval);
405 continue; 168 if (ret < 0) {
169 pr_err("bad mount option arg (not int) "
170 "at '%s'\n", c);
171 return ret;
406 } 172 }
407 if (!isxdigit(str[0]) || !isxdigit(str[1])) 173 dout("got int token %d val %d\n", token, intval);
408 break; 174 } else if (token > Opt_last_int && token < Opt_last_string) {
409 tmp[0] = str[0]; 175 dout("got string token %d val %s\n", token,
410 tmp[1] = str[1]; 176 argstr[0].from);
411 if (sscanf(tmp, "%x", &d) < 1) 177 } else {
412 break; 178 dout("got token %d\n", token);
413 fsid->fsid[i] = d & 0xff;
414 i++;
415 str += 2;
416 } 179 }
417 180
418 if (i == 16) 181 switch (token) {
419 err = 0; 182 case Opt_snapdirname:
420 dout("parse_fsid ret %d got fsid %pU", err, fsid); 183 kfree(fsopt->snapdir_name);
421 return err; 184 fsopt->snapdir_name = kstrndup(argstr[0].from,
185 argstr[0].to-argstr[0].from,
186 GFP_KERNEL);
187 if (!fsopt->snapdir_name)
188 return -ENOMEM;
189 break;
190
191 /* misc */
192 case Opt_wsize:
193 fsopt->wsize = intval;
194 break;
195 case Opt_rsize:
196 fsopt->rsize = intval;
197 break;
198 case Opt_caps_wanted_delay_min:
199 fsopt->caps_wanted_delay_min = intval;
200 break;
201 case Opt_caps_wanted_delay_max:
202 fsopt->caps_wanted_delay_max = intval;
203 break;
204 case Opt_readdir_max_entries:
205 fsopt->max_readdir = intval;
206 break;
207 case Opt_readdir_max_bytes:
208 fsopt->max_readdir_bytes = intval;
209 break;
210 case Opt_congestion_kb:
211 fsopt->congestion_kb = intval;
212 break;
213 case Opt_dirstat:
214 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
215 break;
216 case Opt_nodirstat:
217 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
218 break;
219 case Opt_rbytes:
220 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
221 break;
222 case Opt_norbytes:
223 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
224 break;
225 case Opt_noasyncreaddir:
226 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
227 break;
228 default:
229 BUG_ON(token);
230 }
231 return 0;
422} 232}
423 233
424static struct ceph_mount_args *parse_mount_args(int flags, char *options, 234static void destroy_mount_options(struct ceph_mount_options *args)
425 const char *dev_name,
426 const char **path)
427{ 235{
428 struct ceph_mount_args *args; 236 dout("destroy_mount_options %p\n", args);
429 const char *c; 237 kfree(args->snapdir_name);
430 int err = -ENOMEM; 238 kfree(args);
431 substring_t argstr[MAX_OPT_ARGS]; 239}
432 240
433 args = kzalloc(sizeof(*args), GFP_KERNEL); 241static int strcmp_null(const char *s1, const char *s2)
434 if (!args) 242{
435 return ERR_PTR(-ENOMEM); 243 if (!s1 && !s2)
436 args->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*args->mon_addr), 244 return 0;
437 GFP_KERNEL); 245 if (s1 && !s2)
438 if (!args->mon_addr) 246 return -1;
439 goto out; 247 if (!s1 && s2)
248 return 1;
249 return strcmp(s1, s2);
250}
440 251
441 dout("parse_mount_args %p, dev_name '%s'\n", args, dev_name); 252static int compare_mount_options(struct ceph_mount_options *new_fsopt,
442 253 struct ceph_options *new_opt,
443 /* start with defaults */ 254 struct ceph_fs_client *fsc)
444 args->sb_flags = flags; 255{
445 args->flags = CEPH_OPT_DEFAULT; 256 struct ceph_mount_options *fsopt1 = new_fsopt;
446 args->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT; 257 struct ceph_mount_options *fsopt2 = fsc->mount_options;
447 args->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; 258 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
448 args->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */ 259 int ret;
449 args->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */
450 args->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
451 args->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
452 args->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
453 args->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
454 args->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
455 args->max_readdir = CEPH_MAX_READDIR_DEFAULT;
456 args->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
457 args->congestion_kb = default_congestion_kb();
458
459 /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
460 err = -EINVAL;
461 if (!dev_name)
462 goto out;
463 *path = strstr(dev_name, ":/");
464 if (*path == NULL) {
465 pr_err("device name is missing path (no :/ in %s)\n",
466 dev_name);
467 goto out;
468 }
469 260
470 /* get mon ip(s) */ 261 ret = memcmp(fsopt1, fsopt2, ofs);
471 err = ceph_parse_ips(dev_name, *path, args->mon_addr, 262 if (ret)
472 CEPH_MAX_MON, &args->num_mon); 263 return ret;
473 if (err < 0) 264
474 goto out; 265 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
266 if (ret)
267 return ret;
268
269 return ceph_compare_options(new_opt, fsc->client);
270}
271
272static int parse_mount_options(struct ceph_mount_options **pfsopt,
273 struct ceph_options **popt,
274 int flags, char *options,
275 const char *dev_name,
276 const char **path)
277{
278 struct ceph_mount_options *fsopt;
279 const char *dev_name_end;
280 int err = -ENOMEM;
281
282 fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
283 if (!fsopt)
284 return -ENOMEM;
285
286 dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
287
288 fsopt->sb_flags = flags;
289 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
290
291 fsopt->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
292 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
293 fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
294 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
295 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
296 fsopt->congestion_kb = default_congestion_kb();
297
298 /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
299 err = -EINVAL;
300 if (!dev_name)
301 goto out;
302 *path = strstr(dev_name, ":/");
303 if (*path == NULL) {
304 pr_err("device name is missing path (no :/ in %s)\n",
305 dev_name);
306 goto out;
307 }
308 dev_name_end = *path;
309 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
475 310
476 /* path on server */ 311 /* path on server */
477 *path += 2; 312 *path += 2;
478 dout("server path '%s'\n", *path); 313 dout("server path '%s'\n", *path);
479 314
480 /* parse mount options */ 315 err = ceph_parse_options(popt, options, dev_name, dev_name_end,
481 while ((c = strsep(&options, ",")) != NULL) { 316 parse_fsopt_token, (void *)fsopt);
482 int token, intval, ret; 317 if (err)
483 if (!*c) 318 goto out;
484 continue; 319
485 err = -EINVAL; 320 /* success */
486 token = match_token((char *)c, arg_tokens, argstr); 321 *pfsopt = fsopt;
487 if (token < 0) { 322 return 0;
488 pr_err("bad mount option at '%s'\n", c);
489 goto out;
490 }
491 if (token < Opt_last_int) {
492 ret = match_int(&argstr[0], &intval);
493 if (ret < 0) {
494 pr_err("bad mount option arg (not int) "
495 "at '%s'\n", c);
496 continue;
497 }
498 dout("got int token %d val %d\n", token, intval);
499 } else if (token > Opt_last_int && token < Opt_last_string) {
500 dout("got string token %d val %s\n", token,
501 argstr[0].from);
502 } else {
503 dout("got token %d\n", token);
504 }
505 switch (token) {
506 case Opt_ip:
507 err = ceph_parse_ips(argstr[0].from,
508 argstr[0].to,
509 &args->my_addr,
510 1, NULL);
511 if (err < 0)
512 goto out;
513 args->flags |= CEPH_OPT_MYIP;
514 break;
515
516 case Opt_fsid:
517 err = parse_fsid(argstr[0].from, &args->fsid);
518 if (err == 0)
519 args->flags |= CEPH_OPT_FSID;
520 break;
521 case Opt_snapdirname:
522 kfree(args->snapdir_name);
523 args->snapdir_name = kstrndup(argstr[0].from,
524 argstr[0].to-argstr[0].from,
525 GFP_KERNEL);
526 break;
527 case Opt_name:
528 args->name = kstrndup(argstr[0].from,
529 argstr[0].to-argstr[0].from,
530 GFP_KERNEL);
531 break;
532 case Opt_secret:
533 args->secret = kstrndup(argstr[0].from,
534 argstr[0].to-argstr[0].from,
535 GFP_KERNEL);
536 break;
537
538 /* misc */
539 case Opt_wsize:
540 args->wsize = intval;
541 break;
542 case Opt_rsize:
543 args->rsize = intval;
544 break;
545 case Opt_osdtimeout:
546 args->osd_timeout = intval;
547 break;
548 case Opt_osdkeepalivetimeout:
549 args->osd_keepalive_timeout = intval;
550 break;
551 case Opt_osd_idle_ttl:
552 args->osd_idle_ttl = intval;
553 break;
554 case Opt_mount_timeout:
555 args->mount_timeout = intval;
556 break;
557 case Opt_caps_wanted_delay_min:
558 args->caps_wanted_delay_min = intval;
559 break;
560 case Opt_caps_wanted_delay_max:
561 args->caps_wanted_delay_max = intval;
562 break;
563 case Opt_readdir_max_entries:
564 args->max_readdir = intval;
565 break;
566 case Opt_readdir_max_bytes:
567 args->max_readdir_bytes = intval;
568 break;
569 case Opt_congestion_kb:
570 args->congestion_kb = intval;
571 break;
572
573 case Opt_noshare:
574 args->flags |= CEPH_OPT_NOSHARE;
575 break;
576
577 case Opt_dirstat:
578 args->flags |= CEPH_OPT_DIRSTAT;
579 break;
580 case Opt_nodirstat:
581 args->flags &= ~CEPH_OPT_DIRSTAT;
582 break;
583 case Opt_rbytes:
584 args->flags |= CEPH_OPT_RBYTES;
585 break;
586 case Opt_norbytes:
587 args->flags &= ~CEPH_OPT_RBYTES;
588 break;
589 case Opt_nocrc:
590 args->flags |= CEPH_OPT_NOCRC;
591 break;
592 case Opt_noasyncreaddir:
593 args->flags |= CEPH_OPT_NOASYNCREADDIR;
594 break;
595
596 default:
597 BUG_ON(token);
598 }
599 }
600 return args;
601 323
602out: 324out:
603 kfree(args->mon_addr); 325 destroy_mount_options(fsopt);
604 kfree(args); 326 return err;
605 return ERR_PTR(err);
606} 327}
607 328
608static void destroy_mount_args(struct ceph_mount_args *args) 329/**
330 * ceph_show_options - Show mount options in /proc/mounts
331 * @m: seq_file to write to
332 * @mnt: mount descriptor
333 */
334static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
609{ 335{
610 dout("destroy_mount_args %p\n", args); 336 struct ceph_fs_client *fsc = ceph_sb_to_client(mnt->mnt_sb);
611 kfree(args->snapdir_name); 337 struct ceph_mount_options *fsopt = fsc->mount_options;
612 args->snapdir_name = NULL; 338 struct ceph_options *opt = fsc->client->options;
613 kfree(args->name); 339
614 args->name = NULL; 340 if (opt->flags & CEPH_OPT_FSID)
615 kfree(args->secret); 341 seq_printf(m, ",fsid=%pU", &opt->fsid);
616 args->secret = NULL; 342 if (opt->flags & CEPH_OPT_NOSHARE)
617 kfree(args); 343 seq_puts(m, ",noshare");
344 if (opt->flags & CEPH_OPT_NOCRC)
345 seq_puts(m, ",nocrc");
346
347 if (opt->name)
348 seq_printf(m, ",name=%s", opt->name);
349 if (opt->secret)
350 seq_puts(m, ",secret=<hidden>");
351
352 if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
353 seq_printf(m, ",mount_timeout=%d", opt->mount_timeout);
354 if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
355 seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl);
356 if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
357 seq_printf(m, ",osdtimeout=%d", opt->osd_timeout);
358 if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
359 seq_printf(m, ",osdkeepalivetimeout=%d",
360 opt->osd_keepalive_timeout);
361
362 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
363 seq_puts(m, ",dirstat");
364 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES) == 0)
365 seq_puts(m, ",norbytes");
366 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
367 seq_puts(m, ",noasyncreaddir");
368
369 if (fsopt->wsize)
370 seq_printf(m, ",wsize=%d", fsopt->wsize);
371 if (fsopt->rsize != CEPH_MOUNT_RSIZE_DEFAULT)
372 seq_printf(m, ",rsize=%d", fsopt->rsize);
373 if (fsopt->congestion_kb != default_congestion_kb())
374 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
375 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
376 seq_printf(m, ",caps_wanted_delay_min=%d",
377 fsopt->caps_wanted_delay_min);
378 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
379 seq_printf(m, ",caps_wanted_delay_max=%d",
380 fsopt->caps_wanted_delay_max);
381 if (fsopt->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
382 seq_printf(m, ",cap_release_safety=%d",
383 fsopt->cap_release_safety);
384 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
385 seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
386 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
387 seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
388 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
389 seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
390 return 0;
618} 391}
619 392
620/* 393/*
621 * create a fresh client instance 394 * handle any mon messages the standard library doesn't understand.
395 * return error if we don't either.
622 */ 396 */
623static struct ceph_client *ceph_create_client(struct ceph_mount_args *args) 397static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
624{ 398{
625 struct ceph_client *client; 399 struct ceph_fs_client *fsc = client->private;
400 int type = le16_to_cpu(msg->hdr.type);
401
402 switch (type) {
403 case CEPH_MSG_MDS_MAP:
404 ceph_mdsc_handle_map(fsc->mdsc, msg);
405 return 0;
406
407 default:
408 return -1;
409 }
410}
411
412/*
413 * create a new fs client
414 */
415struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
416 struct ceph_options *opt)
417{
418 struct ceph_fs_client *fsc;
626 int err = -ENOMEM; 419 int err = -ENOMEM;
627 420
628 client = kzalloc(sizeof(*client), GFP_KERNEL); 421 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
629 if (client == NULL) 422 if (!fsc)
630 return ERR_PTR(-ENOMEM); 423 return ERR_PTR(-ENOMEM);
631 424
632 mutex_init(&client->mount_mutex); 425 fsc->client = ceph_create_client(opt, fsc);
633 426 if (IS_ERR(fsc->client)) {
634 init_waitqueue_head(&client->auth_wq); 427 err = PTR_ERR(fsc->client);
428 goto fail;
429 }
430 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
431 fsc->client->supported_features |= CEPH_FEATURE_FLOCK;
432 fsc->client->monc.want_mdsmap = 1;
635 433
636 client->sb = NULL; 434 fsc->mount_options = fsopt;
637 client->mount_state = CEPH_MOUNT_MOUNTING;
638 client->mount_args = args;
639 435
640 client->msgr = NULL; 436 fsc->sb = NULL;
437 fsc->mount_state = CEPH_MOUNT_MOUNTING;
641 438
642 client->auth_err = 0; 439 atomic_long_set(&fsc->writeback_count, 0);
643 atomic_long_set(&client->writeback_count, 0);
644 440
645 err = bdi_init(&client->backing_dev_info); 441 err = bdi_init(&fsc->backing_dev_info);
646 if (err < 0) 442 if (err < 0)
647 goto fail; 443 goto fail_client;
648 444
649 err = -ENOMEM; 445 err = -ENOMEM;
650 client->wb_wq = create_workqueue("ceph-writeback"); 446 fsc->wb_wq = create_workqueue("ceph-writeback");
651 if (client->wb_wq == NULL) 447 if (fsc->wb_wq == NULL)
652 goto fail_bdi; 448 goto fail_bdi;
653 client->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid"); 449 fsc->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid");
654 if (client->pg_inv_wq == NULL) 450 if (fsc->pg_inv_wq == NULL)
655 goto fail_wb_wq; 451 goto fail_wb_wq;
656 client->trunc_wq = create_singlethread_workqueue("ceph-trunc"); 452 fsc->trunc_wq = create_singlethread_workqueue("ceph-trunc");
657 if (client->trunc_wq == NULL) 453 if (fsc->trunc_wq == NULL)
658 goto fail_pg_inv_wq; 454 goto fail_pg_inv_wq;
659 455
660 /* set up mempools */ 456 /* set up mempools */
661 err = -ENOMEM; 457 err = -ENOMEM;
662 client->wb_pagevec_pool = mempool_create_kmalloc_pool(10, 458 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10,
663 client->mount_args->wsize >> PAGE_CACHE_SHIFT); 459 fsc->mount_options->wsize >> PAGE_CACHE_SHIFT);
664 if (!client->wb_pagevec_pool) 460 if (!fsc->wb_pagevec_pool)
665 goto fail_trunc_wq; 461 goto fail_trunc_wq;
666 462
667 /* caps */ 463 /* caps */
668 client->min_caps = args->max_readdir; 464 fsc->min_caps = fsopt->max_readdir;
465
466 return fsc;
669 467
670 /* subsystems */
671 err = ceph_monc_init(&client->monc, client);
672 if (err < 0)
673 goto fail_mempool;
674 err = ceph_osdc_init(&client->osdc, client);
675 if (err < 0)
676 goto fail_monc;
677 err = ceph_mdsc_init(&client->mdsc, client);
678 if (err < 0)
679 goto fail_osdc;
680 return client;
681
682fail_osdc:
683 ceph_osdc_stop(&client->osdc);
684fail_monc:
685 ceph_monc_stop(&client->monc);
686fail_mempool:
687 mempool_destroy(client->wb_pagevec_pool);
688fail_trunc_wq: 468fail_trunc_wq:
689 destroy_workqueue(client->trunc_wq); 469 destroy_workqueue(fsc->trunc_wq);
690fail_pg_inv_wq: 470fail_pg_inv_wq:
691 destroy_workqueue(client->pg_inv_wq); 471 destroy_workqueue(fsc->pg_inv_wq);
692fail_wb_wq: 472fail_wb_wq:
693 destroy_workqueue(client->wb_wq); 473 destroy_workqueue(fsc->wb_wq);
694fail_bdi: 474fail_bdi:
695 bdi_destroy(&client->backing_dev_info); 475 bdi_destroy(&fsc->backing_dev_info);
476fail_client:
477 ceph_destroy_client(fsc->client);
696fail: 478fail:
697 kfree(client); 479 kfree(fsc);
698 return ERR_PTR(err); 480 return ERR_PTR(err);
699} 481}
700 482
701static void ceph_destroy_client(struct ceph_client *client) 483void destroy_fs_client(struct ceph_fs_client *fsc)
702{ 484{
703 dout("destroy_client %p\n", client); 485 dout("destroy_fs_client %p\n", fsc);
704 486
705 /* unmount */ 487 destroy_workqueue(fsc->wb_wq);
706 ceph_mdsc_stop(&client->mdsc); 488 destroy_workqueue(fsc->pg_inv_wq);
707 ceph_osdc_stop(&client->osdc); 489 destroy_workqueue(fsc->trunc_wq);
708 490
709 /* 491 bdi_destroy(&fsc->backing_dev_info);
710 * make sure mds and osd connections close out before destroying
711 * the auth module, which is needed to free those connections'
712 * ceph_authorizers.
713 */
714 ceph_msgr_flush();
715
716 ceph_monc_stop(&client->monc);
717 492
718 ceph_debugfs_client_cleanup(client); 493 mempool_destroy(fsc->wb_pagevec_pool);
719 destroy_workqueue(client->wb_wq);
720 destroy_workqueue(client->pg_inv_wq);
721 destroy_workqueue(client->trunc_wq);
722 494
723 bdi_destroy(&client->backing_dev_info); 495 destroy_mount_options(fsc->mount_options);
724 496
725 if (client->msgr) 497 ceph_fs_debugfs_cleanup(fsc);
726 ceph_messenger_destroy(client->msgr);
727 mempool_destroy(client->wb_pagevec_pool);
728 498
729 destroy_mount_args(client->mount_args); 499 ceph_destroy_client(fsc->client);
730 500
731 kfree(client); 501 kfree(fsc);
732 dout("destroy_client %p done\n", client); 502 dout("destroy_fs_client %p done\n", fsc);
733} 503}
734 504
735/* 505/*
736 * Initially learn our fsid, or verify an fsid matches. 506 * caches
737 */ 507 */
738int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid) 508struct kmem_cache *ceph_inode_cachep;
509struct kmem_cache *ceph_cap_cachep;
510struct kmem_cache *ceph_dentry_cachep;
511struct kmem_cache *ceph_file_cachep;
512
513static void ceph_inode_init_once(void *foo)
739{ 514{
740 if (client->have_fsid) { 515 struct ceph_inode_info *ci = foo;
741 if (ceph_fsid_compare(&client->fsid, fsid)) { 516 inode_init_once(&ci->vfs_inode);
742 pr_err("bad fsid, had %pU got %pU", 517}
743 &client->fsid, fsid); 518
744 return -1; 519static int __init init_caches(void)
745 } 520{
746 } else { 521 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
747 pr_info("client%lld fsid %pU\n", client->monc.auth->global_id, 522 sizeof(struct ceph_inode_info),
748 fsid); 523 __alignof__(struct ceph_inode_info),
749 memcpy(&client->fsid, fsid, sizeof(*fsid)); 524 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
750 ceph_debugfs_client_init(client); 525 ceph_inode_init_once);
751 client->have_fsid = true; 526 if (ceph_inode_cachep == NULL)
752 } 527 return -ENOMEM;
528
529 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
530 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
531 if (ceph_cap_cachep == NULL)
532 goto bad_cap;
533
534 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
535 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
536 if (ceph_dentry_cachep == NULL)
537 goto bad_dentry;
538
539 ceph_file_cachep = KMEM_CACHE(ceph_file_info,
540 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
541 if (ceph_file_cachep == NULL)
542 goto bad_file;
543
753 return 0; 544 return 0;
545
546bad_file:
547 kmem_cache_destroy(ceph_dentry_cachep);
548bad_dentry:
549 kmem_cache_destroy(ceph_cap_cachep);
550bad_cap:
551 kmem_cache_destroy(ceph_inode_cachep);
552 return -ENOMEM;
754} 553}
755 554
555static void destroy_caches(void)
556{
557 kmem_cache_destroy(ceph_inode_cachep);
558 kmem_cache_destroy(ceph_cap_cachep);
559 kmem_cache_destroy(ceph_dentry_cachep);
560 kmem_cache_destroy(ceph_file_cachep);
561}
562
563
756/* 564/*
757 * true if we have the mon map (and have thus joined the cluster) 565 * ceph_umount_begin - initiate forced umount. Tear down down the
566 * mount, skipping steps that may hang while waiting for server(s).
758 */ 567 */
759static int have_mon_and_osd_map(struct ceph_client *client) 568static void ceph_umount_begin(struct super_block *sb)
760{ 569{
761 return client->monc.monmap && client->monc.monmap->epoch && 570 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
762 client->osdc.osdmap && client->osdc.osdmap->epoch; 571
572 dout("ceph_umount_begin - starting forced umount\n");
573 if (!fsc)
574 return;
575 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
576 return;
763} 577}
764 578
579static const struct super_operations ceph_super_ops = {
580 .alloc_inode = ceph_alloc_inode,
581 .destroy_inode = ceph_destroy_inode,
582 .write_inode = ceph_write_inode,
583 .sync_fs = ceph_sync_fs,
584 .put_super = ceph_put_super,
585 .show_options = ceph_show_options,
586 .statfs = ceph_statfs,
587 .umount_begin = ceph_umount_begin,
588};
589
765/* 590/*
766 * Bootstrap mount by opening the root directory. Note the mount 591 * Bootstrap mount by opening the root directory. Note the mount
767 * @started time from caller, and time out if this takes too long. 592 * @started time from caller, and time out if this takes too long.
768 */ 593 */
769static struct dentry *open_root_dentry(struct ceph_client *client, 594static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
770 const char *path, 595 const char *path,
771 unsigned long started) 596 unsigned long started)
772{ 597{
773 struct ceph_mds_client *mdsc = &client->mdsc; 598 struct ceph_mds_client *mdsc = fsc->mdsc;
774 struct ceph_mds_request *req = NULL; 599 struct ceph_mds_request *req = NULL;
775 int err; 600 int err;
776 struct dentry *root; 601 struct dentry *root;
@@ -784,14 +609,14 @@ static struct dentry *open_root_dentry(struct ceph_client *client,
784 req->r_ino1.ino = CEPH_INO_ROOT; 609 req->r_ino1.ino = CEPH_INO_ROOT;
785 req->r_ino1.snap = CEPH_NOSNAP; 610 req->r_ino1.snap = CEPH_NOSNAP;
786 req->r_started = started; 611 req->r_started = started;
787 req->r_timeout = client->mount_args->mount_timeout * HZ; 612 req->r_timeout = fsc->client->options->mount_timeout * HZ;
788 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); 613 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
789 req->r_num_caps = 2; 614 req->r_num_caps = 2;
790 err = ceph_mdsc_do_request(mdsc, NULL, req); 615 err = ceph_mdsc_do_request(mdsc, NULL, req);
791 if (err == 0) { 616 if (err == 0) {
792 dout("open_root_inode success\n"); 617 dout("open_root_inode success\n");
793 if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT && 618 if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
794 client->sb->s_root == NULL) 619 fsc->sb->s_root == NULL)
795 root = d_alloc_root(req->r_target_inode); 620 root = d_alloc_root(req->r_target_inode);
796 else 621 else
797 root = d_obtain_alias(req->r_target_inode); 622 root = d_obtain_alias(req->r_target_inode);
@@ -804,105 +629,86 @@ static struct dentry *open_root_dentry(struct ceph_client *client,
804 return root; 629 return root;
805} 630}
806 631
632
633
634
807/* 635/*
808 * mount: join the ceph cluster, and open root directory. 636 * mount: join the ceph cluster, and open root directory.
809 */ 637 */
810static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt, 638static int ceph_mount(struct ceph_fs_client *fsc, struct vfsmount *mnt,
811 const char *path) 639 const char *path)
812{ 640{
813 struct ceph_entity_addr *myaddr = NULL;
814 int err; 641 int err;
815 unsigned long timeout = client->mount_args->mount_timeout * HZ;
816 unsigned long started = jiffies; /* note the start time */ 642 unsigned long started = jiffies; /* note the start time */
817 struct dentry *root; 643 struct dentry *root;
644 int first = 0; /* first vfsmount for this super_block */
818 645
819 dout("mount start\n"); 646 dout("mount start\n");
820 mutex_lock(&client->mount_mutex); 647 mutex_lock(&fsc->client->mount_mutex);
821
822 /* initialize the messenger */
823 if (client->msgr == NULL) {
824 if (ceph_test_opt(client, MYIP))
825 myaddr = &client->mount_args->my_addr;
826 client->msgr = ceph_messenger_create(myaddr);
827 if (IS_ERR(client->msgr)) {
828 err = PTR_ERR(client->msgr);
829 client->msgr = NULL;
830 goto out;
831 }
832 client->msgr->nocrc = ceph_test_opt(client, NOCRC);
833 }
834 648
835 /* open session, and wait for mon, mds, and osd maps */ 649 err = __ceph_open_session(fsc->client, started);
836 err = ceph_monc_open_session(&client->monc);
837 if (err < 0) 650 if (err < 0)
838 goto out; 651 goto out;
839 652
840 while (!have_mon_and_osd_map(client)) {
841 err = -EIO;
842 if (timeout && time_after_eq(jiffies, started + timeout))
843 goto out;
844
845 /* wait */
846 dout("mount waiting for mon_map\n");
847 err = wait_event_interruptible_timeout(client->auth_wq,
848 have_mon_and_osd_map(client) || (client->auth_err < 0),
849 timeout);
850 if (err == -EINTR || err == -ERESTARTSYS)
851 goto out;
852 if (client->auth_err < 0) {
853 err = client->auth_err;
854 goto out;
855 }
856 }
857
858 dout("mount opening root\n"); 653 dout("mount opening root\n");
859 root = open_root_dentry(client, "", started); 654 root = open_root_dentry(fsc, "", started);
860 if (IS_ERR(root)) { 655 if (IS_ERR(root)) {
861 err = PTR_ERR(root); 656 err = PTR_ERR(root);
862 goto out; 657 goto out;
863 } 658 }
864 if (client->sb->s_root) 659 if (fsc->sb->s_root) {
865 dput(root); 660 dput(root);
866 else 661 } else {
867 client->sb->s_root = root; 662 fsc->sb->s_root = root;
663 first = 1;
664
665 err = ceph_fs_debugfs_init(fsc);
666 if (err < 0)
667 goto fail;
668 }
868 669
869 if (path[0] == 0) { 670 if (path[0] == 0) {
870 dget(root); 671 dget(root);
871 } else { 672 } else {
872 dout("mount opening base mountpoint\n"); 673 dout("mount opening base mountpoint\n");
873 root = open_root_dentry(client, path, started); 674 root = open_root_dentry(fsc, path, started);
874 if (IS_ERR(root)) { 675 if (IS_ERR(root)) {
875 err = PTR_ERR(root); 676 err = PTR_ERR(root);
876 dput(client->sb->s_root); 677 goto fail;
877 client->sb->s_root = NULL;
878 goto out;
879 } 678 }
880 } 679 }
881 680
882 mnt->mnt_root = root; 681 mnt->mnt_root = root;
883 mnt->mnt_sb = client->sb; 682 mnt->mnt_sb = fsc->sb;
884 683
885 client->mount_state = CEPH_MOUNT_MOUNTED; 684 fsc->mount_state = CEPH_MOUNT_MOUNTED;
886 dout("mount success\n"); 685 dout("mount success\n");
887 err = 0; 686 err = 0;
888 687
889out: 688out:
890 mutex_unlock(&client->mount_mutex); 689 mutex_unlock(&fsc->client->mount_mutex);
891 return err; 690 return err;
691
692fail:
693 if (first) {
694 dput(fsc->sb->s_root);
695 fsc->sb->s_root = NULL;
696 }
697 goto out;
892} 698}
893 699
894static int ceph_set_super(struct super_block *s, void *data) 700static int ceph_set_super(struct super_block *s, void *data)
895{ 701{
896 struct ceph_client *client = data; 702 struct ceph_fs_client *fsc = data;
897 int ret; 703 int ret;
898 704
899 dout("set_super %p data %p\n", s, data); 705 dout("set_super %p data %p\n", s, data);
900 706
901 s->s_flags = client->mount_args->sb_flags; 707 s->s_flags = fsc->mount_options->sb_flags;
902 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */ 708 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
903 709
904 s->s_fs_info = client; 710 s->s_fs_info = fsc;
905 client->sb = s; 711 fsc->sb = s;
906 712
907 s->s_op = &ceph_super_ops; 713 s->s_op = &ceph_super_ops;
908 s->s_export_op = &ceph_export_ops; 714 s->s_export_op = &ceph_export_ops;
@@ -917,7 +723,7 @@ static int ceph_set_super(struct super_block *s, void *data)
917 723
918fail: 724fail:
919 s->s_fs_info = NULL; 725 s->s_fs_info = NULL;
920 client->sb = NULL; 726 fsc->sb = NULL;
921 return ret; 727 return ret;
922} 728}
923 729
@@ -926,30 +732,23 @@ fail:
926 */ 732 */
927static int ceph_compare_super(struct super_block *sb, void *data) 733static int ceph_compare_super(struct super_block *sb, void *data)
928{ 734{
929 struct ceph_client *new = data; 735 struct ceph_fs_client *new = data;
930 struct ceph_mount_args *args = new->mount_args; 736 struct ceph_mount_options *fsopt = new->mount_options;
931 struct ceph_client *other = ceph_sb_to_client(sb); 737 struct ceph_options *opt = new->client->options;
932 int i; 738 struct ceph_fs_client *other = ceph_sb_to_client(sb);
933 739
934 dout("ceph_compare_super %p\n", sb); 740 dout("ceph_compare_super %p\n", sb);
935 if (args->flags & CEPH_OPT_FSID) { 741
936 if (ceph_fsid_compare(&args->fsid, &other->fsid)) { 742 if (compare_mount_options(fsopt, opt, other)) {
937 dout("fsid doesn't match\n"); 743 dout("monitor(s)/mount options don't match\n");
938 return 0; 744 return 0;
939 }
940 } else {
941 /* do we share (a) monitor? */
942 for (i = 0; i < new->monc.monmap->num_mon; i++)
943 if (ceph_monmap_contains(other->monc.monmap,
944 &new->monc.monmap->mon_inst[i].addr))
945 break;
946 if (i == new->monc.monmap->num_mon) {
947 dout("mon ip not part of monmap\n");
948 return 0;
949 }
950 dout("mon ip matches existing sb %p\n", sb);
951 } 745 }
952 if (args->sb_flags != other->mount_args->sb_flags) { 746 if ((opt->flags & CEPH_OPT_FSID) &&
747 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
748 dout("fsid doesn't match\n");
749 return 0;
750 }
751 if (fsopt->sb_flags != other->mount_options->sb_flags) {
953 dout("flags differ\n"); 752 dout("flags differ\n");
954 return 0; 753 return 0;
955 } 754 }
@@ -961,19 +760,20 @@ static int ceph_compare_super(struct super_block *sb, void *data)
961 */ 760 */
962static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 761static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
963 762
964static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client) 763static int ceph_register_bdi(struct super_block *sb,
764 struct ceph_fs_client *fsc)
965{ 765{
966 int err; 766 int err;
967 767
968 /* set ra_pages based on rsize mount option? */ 768 /* set ra_pages based on rsize mount option? */
969 if (client->mount_args->rsize >= PAGE_CACHE_SIZE) 769 if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE)
970 client->backing_dev_info.ra_pages = 770 fsc->backing_dev_info.ra_pages =
971 (client->mount_args->rsize + PAGE_CACHE_SIZE - 1) 771 (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1)
972 >> PAGE_SHIFT; 772 >> PAGE_SHIFT;
973 err = bdi_register(&client->backing_dev_info, NULL, "ceph-%d", 773 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%d",
974 atomic_long_inc_return(&bdi_seq)); 774 atomic_long_inc_return(&bdi_seq));
975 if (!err) 775 if (!err)
976 sb->s_bdi = &client->backing_dev_info; 776 sb->s_bdi = &fsc->backing_dev_info;
977 return err; 777 return err;
978} 778}
979 779
@@ -982,46 +782,52 @@ static int ceph_get_sb(struct file_system_type *fs_type,
982 struct vfsmount *mnt) 782 struct vfsmount *mnt)
983{ 783{
984 struct super_block *sb; 784 struct super_block *sb;
985 struct ceph_client *client; 785 struct ceph_fs_client *fsc;
986 int err; 786 int err;
987 int (*compare_super)(struct super_block *, void *) = ceph_compare_super; 787 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
988 const char *path = NULL; 788 const char *path = NULL;
989 struct ceph_mount_args *args; 789 struct ceph_mount_options *fsopt = NULL;
790 struct ceph_options *opt = NULL;
990 791
991 dout("ceph_get_sb\n"); 792 dout("ceph_get_sb\n");
992 args = parse_mount_args(flags, data, dev_name, &path); 793 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
993 if (IS_ERR(args)) { 794 if (err < 0)
994 err = PTR_ERR(args);
995 goto out_final; 795 goto out_final;
996 }
997 796
998 /* create client (which we may/may not use) */ 797 /* create client (which we may/may not use) */
999 client = ceph_create_client(args); 798 fsc = create_fs_client(fsopt, opt);
1000 if (IS_ERR(client)) { 799 if (IS_ERR(fsc)) {
1001 err = PTR_ERR(client); 800 err = PTR_ERR(fsc);
801 kfree(fsopt);
802 kfree(opt);
1002 goto out_final; 803 goto out_final;
1003 } 804 }
1004 805
1005 if (client->mount_args->flags & CEPH_OPT_NOSHARE) 806 err = ceph_mdsc_init(fsc);
807 if (err < 0)
808 goto out;
809
810 if (ceph_test_opt(fsc->client, NOSHARE))
1006 compare_super = NULL; 811 compare_super = NULL;
1007 sb = sget(fs_type, compare_super, ceph_set_super, client); 812 sb = sget(fs_type, compare_super, ceph_set_super, fsc);
1008 if (IS_ERR(sb)) { 813 if (IS_ERR(sb)) {
1009 err = PTR_ERR(sb); 814 err = PTR_ERR(sb);
1010 goto out; 815 goto out;
1011 } 816 }
1012 817
1013 if (ceph_sb_to_client(sb) != client) { 818 if (ceph_sb_to_client(sb) != fsc) {
1014 ceph_destroy_client(client); 819 ceph_mdsc_destroy(fsc);
1015 client = ceph_sb_to_client(sb); 820 destroy_fs_client(fsc);
1016 dout("get_sb got existing client %p\n", client); 821 fsc = ceph_sb_to_client(sb);
822 dout("get_sb got existing client %p\n", fsc);
1017 } else { 823 } else {
1018 dout("get_sb using new client %p\n", client); 824 dout("get_sb using new client %p\n", fsc);
1019 err = ceph_register_bdi(sb, client); 825 err = ceph_register_bdi(sb, fsc);
1020 if (err < 0) 826 if (err < 0)
1021 goto out_splat; 827 goto out_splat;
1022 } 828 }
1023 829
1024 err = ceph_mount(client, mnt, path); 830 err = ceph_mount(fsc, mnt, path);
1025 if (err < 0) 831 if (err < 0)
1026 goto out_splat; 832 goto out_splat;
1027 dout("root %p inode %p ino %llx.%llx\n", mnt->mnt_root, 833 dout("root %p inode %p ino %llx.%llx\n", mnt->mnt_root,
@@ -1029,12 +835,13 @@ static int ceph_get_sb(struct file_system_type *fs_type,
1029 return 0; 835 return 0;
1030 836
1031out_splat: 837out_splat:
1032 ceph_mdsc_close_sessions(&client->mdsc); 838 ceph_mdsc_close_sessions(fsc->mdsc);
1033 deactivate_locked_super(sb); 839 deactivate_locked_super(sb);
1034 goto out_final; 840 goto out_final;
1035 841
1036out: 842out:
1037 ceph_destroy_client(client); 843 ceph_mdsc_destroy(fsc);
844 destroy_fs_client(fsc);
1038out_final: 845out_final:
1039 dout("ceph_get_sb fail %d\n", err); 846 dout("ceph_get_sb fail %d\n", err);
1040 return err; 847 return err;
@@ -1042,11 +849,12 @@ out_final:
1042 849
1043static void ceph_kill_sb(struct super_block *s) 850static void ceph_kill_sb(struct super_block *s)
1044{ 851{
1045 struct ceph_client *client = ceph_sb_to_client(s); 852 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1046 dout("kill_sb %p\n", s); 853 dout("kill_sb %p\n", s);
1047 ceph_mdsc_pre_umount(&client->mdsc); 854 ceph_mdsc_pre_umount(fsc->mdsc);
1048 kill_anon_super(s); /* will call put_super after sb is r/o */ 855 kill_anon_super(s); /* will call put_super after sb is r/o */
1049 ceph_destroy_client(client); 856 ceph_mdsc_destroy(fsc);
857 destroy_fs_client(fsc);
1050} 858}
1051 859
1052static struct file_system_type ceph_fs_type = { 860static struct file_system_type ceph_fs_type = {
@@ -1062,36 +870,20 @@ static struct file_system_type ceph_fs_type = {
1062 870
1063static int __init init_ceph(void) 871static int __init init_ceph(void)
1064{ 872{
1065 int ret = 0; 873 int ret = init_caches();
1066
1067 ret = ceph_debugfs_init();
1068 if (ret < 0)
1069 goto out;
1070
1071 ret = ceph_msgr_init();
1072 if (ret < 0)
1073 goto out_debugfs;
1074
1075 ret = init_caches();
1076 if (ret) 874 if (ret)
1077 goto out_msgr; 875 goto out;
1078 876
1079 ret = register_filesystem(&ceph_fs_type); 877 ret = register_filesystem(&ceph_fs_type);
1080 if (ret) 878 if (ret)
1081 goto out_icache; 879 goto out_icache;
1082 880
1083 pr_info("loaded (mon/mds/osd proto %d/%d/%d, osdmap %d/%d %d/%d)\n", 881 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1084 CEPH_MONC_PROTOCOL, CEPH_MDSC_PROTOCOL, CEPH_OSDC_PROTOCOL, 882
1085 CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT,
1086 CEPH_OSDMAP_INC_VERSION, CEPH_OSDMAP_INC_VERSION_EXT);
1087 return 0; 883 return 0;
1088 884
1089out_icache: 885out_icache:
1090 destroy_caches(); 886 destroy_caches();
1091out_msgr:
1092 ceph_msgr_exit();
1093out_debugfs:
1094 ceph_debugfs_cleanup();
1095out: 887out:
1096 return ret; 888 return ret;
1097} 889}
@@ -1101,8 +893,6 @@ static void __exit exit_ceph(void)
1101 dout("exit_ceph\n"); 893 dout("exit_ceph\n");
1102 unregister_filesystem(&ceph_fs_type); 894 unregister_filesystem(&ceph_fs_type);
1103 destroy_caches(); 895 destroy_caches();
1104 ceph_msgr_exit();
1105 ceph_debugfs_cleanup();
1106} 896}
1107 897
1108module_init(init_ceph); 898module_init(init_ceph);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index b87638e84c4b..1886294e12f7 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -1,7 +1,7 @@
1#ifndef _FS_CEPH_SUPER_H 1#ifndef _FS_CEPH_SUPER_H
2#define _FS_CEPH_SUPER_H 2#define _FS_CEPH_SUPER_H
3 3
4#include "ceph_debug.h" 4#include <linux/ceph/ceph_debug.h>
5 5
6#include <asm/unaligned.h> 6#include <asm/unaligned.h>
7#include <linux/backing-dev.h> 7#include <linux/backing-dev.h>
@@ -14,13 +14,7 @@
14#include <linux/writeback.h> 14#include <linux/writeback.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16 16
17#include "types.h" 17#include <linux/ceph/libceph.h>
18#include "messenger.h"
19#include "msgpool.h"
20#include "mon_client.h"
21#include "mds_client.h"
22#include "osd_client.h"
23#include "ceph_fs.h"
24 18
25/* f_type in struct statfs */ 19/* f_type in struct statfs */
26#define CEPH_SUPER_MAGIC 0x00c36400 20#define CEPH_SUPER_MAGIC 0x00c36400
@@ -30,42 +24,25 @@
30#define CEPH_BLOCK_SHIFT 20 /* 1 MB */ 24#define CEPH_BLOCK_SHIFT 20 /* 1 MB */
31#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT) 25#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
32 26
33/* 27#define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
34 * Supported features 28#define CEPH_MOUNT_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */
35 */ 29#define CEPH_MOUNT_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */
36#define CEPH_FEATURE_SUPPORTED CEPH_FEATURE_NOSRCADDR | CEPH_FEATURE_FLOCK
37#define CEPH_FEATURE_REQUIRED CEPH_FEATURE_NOSRCADDR
38 30
39/* 31#define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES)
40 * mount options
41 */
42#define CEPH_OPT_FSID (1<<0)
43#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
44#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
45#define CEPH_OPT_DIRSTAT (1<<4) /* funky `cat dirname` for stats */
46#define CEPH_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */
47#define CEPH_OPT_NOCRC (1<<6) /* no data crc on writes */
48#define CEPH_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */
49 32
50#define CEPH_OPT_DEFAULT (CEPH_OPT_RBYTES) 33#define ceph_set_mount_opt(fsc, opt) \
34 (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt;
35#define ceph_test_mount_opt(fsc, opt) \
36 (!!((fsc)->mount_options->flags & CEPH_MOUNT_OPT_##opt))
51 37
52#define ceph_set_opt(client, opt) \ 38#define CEPH_MAX_READDIR_DEFAULT 1024
53 (client)->mount_args->flags |= CEPH_OPT_##opt; 39#define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024)
54#define ceph_test_opt(client, opt) \ 40#define CEPH_SNAPDIRNAME_DEFAULT ".snap"
55 (!!((client)->mount_args->flags & CEPH_OPT_##opt))
56 41
57 42struct ceph_mount_options {
58struct ceph_mount_args {
59 int sb_flags;
60 int flags; 43 int flags;
61 struct ceph_fsid fsid; 44 int sb_flags;
62 struct ceph_entity_addr my_addr; 45
63 int num_mon;
64 struct ceph_entity_addr *mon_addr;
65 int mount_timeout;
66 int osd_idle_ttl;
67 int osd_timeout;
68 int osd_keepalive_timeout;
69 int wsize; 46 int wsize;
70 int rsize; /* max readahead */ 47 int rsize; /* max readahead */
71 int congestion_kb; /* max writeback in flight */ 48 int congestion_kb; /* max writeback in flight */
@@ -73,82 +50,25 @@ struct ceph_mount_args {
73 int cap_release_safety; 50 int cap_release_safety;
74 int max_readdir; /* max readdir result (entires) */ 51 int max_readdir; /* max readdir result (entires) */
75 int max_readdir_bytes; /* max readdir result (bytes) */ 52 int max_readdir_bytes; /* max readdir result (bytes) */
76 char *snapdir_name; /* default ".snap" */
77 char *name;
78 char *secret;
79};
80 53
81/* 54 /*
82 * defaults 55 * everything above this point can be memcmp'd; everything below
83 */ 56 * is handled in compare_mount_options()
84#define CEPH_MOUNT_TIMEOUT_DEFAULT 60 57 */
85#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */
86#define CEPH_OSD_KEEPALIVE_DEFAULT 5
87#define CEPH_OSD_IDLE_TTL_DEFAULT 60
88#define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */
89#define CEPH_MAX_READDIR_DEFAULT 1024
90#define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024)
91
92#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
93#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
94
95#define CEPH_SNAPDIRNAME_DEFAULT ".snap"
96#define CEPH_AUTH_NAME_DEFAULT "guest"
97/*
98 * Delay telling the MDS we no longer want caps, in case we reopen
99 * the file. Delay a minimum amount of time, even if we send a cap
100 * message for some other reason. Otherwise, take the oppotunity to
101 * update the mds to avoid sending another message later.
102 */
103#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */
104#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */
105
106#define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4)
107
108/* mount state */
109enum {
110 CEPH_MOUNT_MOUNTING,
111 CEPH_MOUNT_MOUNTED,
112 CEPH_MOUNT_UNMOUNTING,
113 CEPH_MOUNT_UNMOUNTED,
114 CEPH_MOUNT_SHUTDOWN,
115};
116
117/*
118 * subtract jiffies
119 */
120static inline unsigned long time_sub(unsigned long a, unsigned long b)
121{
122 BUG_ON(time_after(b, a));
123 return (long)a - (long)b;
124}
125
126/*
127 * per-filesystem client state
128 *
129 * possibly shared by multiple mount points, if they are
130 * mounting the same ceph filesystem/cluster.
131 */
132struct ceph_client {
133 struct ceph_fsid fsid;
134 bool have_fsid;
135 58
136 struct mutex mount_mutex; /* serialize mount attempts */ 59 char *snapdir_name; /* default ".snap" */
137 struct ceph_mount_args *mount_args; 60};
138 61
62struct ceph_fs_client {
139 struct super_block *sb; 63 struct super_block *sb;
140 64
141 unsigned long mount_state; 65 struct ceph_mount_options *mount_options;
142 wait_queue_head_t auth_wq; 66 struct ceph_client *client;
143
144 int auth_err;
145 67
68 unsigned long mount_state;
146 int min_caps; /* min caps i added */ 69 int min_caps; /* min caps i added */
147 70
148 struct ceph_messenger *msgr; /* messenger instance */ 71 struct ceph_mds_client *mdsc;
149 struct ceph_mon_client monc;
150 struct ceph_mds_client mdsc;
151 struct ceph_osd_client osdc;
152 72
153 /* writeback */ 73 /* writeback */
154 mempool_t *wb_pagevec_pool; 74 mempool_t *wb_pagevec_pool;
@@ -160,14 +80,14 @@ struct ceph_client {
160 struct backing_dev_info backing_dev_info; 80 struct backing_dev_info backing_dev_info;
161 81
162#ifdef CONFIG_DEBUG_FS 82#ifdef CONFIG_DEBUG_FS
163 struct dentry *debugfs_monmap; 83 struct dentry *debugfs_dentry_lru, *debugfs_caps;
164 struct dentry *debugfs_mdsmap, *debugfs_osdmap;
165 struct dentry *debugfs_dir, *debugfs_dentry_lru, *debugfs_caps;
166 struct dentry *debugfs_congestion_kb; 84 struct dentry *debugfs_congestion_kb;
167 struct dentry *debugfs_bdi; 85 struct dentry *debugfs_bdi;
86 struct dentry *debugfs_mdsc, *debugfs_mdsmap;
168#endif 87#endif
169}; 88};
170 89
90
171/* 91/*
172 * File i/o capability. This tracks shared state with the metadata 92 * File i/o capability. This tracks shared state with the metadata
173 * server that allows us to cache or writeback attributes or to read 93 * server that allows us to cache or writeback attributes or to read
@@ -275,6 +195,20 @@ struct ceph_inode_xattr {
275 int should_free_val; 195 int should_free_val;
276}; 196};
277 197
198/*
199 * Ceph dentry state
200 */
201struct ceph_dentry_info {
202 struct ceph_mds_session *lease_session;
203 u32 lease_gen, lease_shared_gen;
204 u32 lease_seq;
205 unsigned long lease_renew_after, lease_renew_from;
206 struct list_head lru;
207 struct dentry *dentry;
208 u64 time;
209 u64 offset;
210};
211
278struct ceph_inode_xattrs_info { 212struct ceph_inode_xattrs_info {
279 /* 213 /*
280 * (still encoded) xattr blob. we avoid the overhead of parsing 214 * (still encoded) xattr blob. we avoid the overhead of parsing
@@ -296,11 +230,6 @@ struct ceph_inode_xattrs_info {
296/* 230/*
297 * Ceph inode. 231 * Ceph inode.
298 */ 232 */
299#define CEPH_I_COMPLETE 1 /* we have complete directory cached */
300#define CEPH_I_NODELAY 4 /* do not delay cap release */
301#define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */
302#define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */
303
304struct ceph_inode_info { 233struct ceph_inode_info {
305 struct ceph_vino i_vino; /* ceph ino + snap */ 234 struct ceph_vino i_vino; /* ceph ino + snap */
306 235
@@ -391,6 +320,63 @@ static inline struct ceph_inode_info *ceph_inode(struct inode *inode)
391 return container_of(inode, struct ceph_inode_info, vfs_inode); 320 return container_of(inode, struct ceph_inode_info, vfs_inode);
392} 321}
393 322
323static inline struct ceph_vino ceph_vino(struct inode *inode)
324{
325 return ceph_inode(inode)->i_vino;
326}
327
328/*
329 * ino_t is <64 bits on many architectures, blech.
330 *
331 * don't include snap in ino hash, at least for now.
332 */
333static inline ino_t ceph_vino_to_ino(struct ceph_vino vino)
334{
335 ino_t ino = (ino_t)vino.ino; /* ^ (vino.snap << 20); */
336#if BITS_PER_LONG == 32
337 ino ^= vino.ino >> (sizeof(u64)-sizeof(ino_t)) * 8;
338 if (!ino)
339 ino = 1;
340#endif
341 return ino;
342}
343
344/* for printf-style formatting */
345#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
346
347static inline u64 ceph_ino(struct inode *inode)
348{
349 return ceph_inode(inode)->i_vino.ino;
350}
351static inline u64 ceph_snap(struct inode *inode)
352{
353 return ceph_inode(inode)->i_vino.snap;
354}
355
356static inline int ceph_ino_compare(struct inode *inode, void *data)
357{
358 struct ceph_vino *pvino = (struct ceph_vino *)data;
359 struct ceph_inode_info *ci = ceph_inode(inode);
360 return ci->i_vino.ino == pvino->ino &&
361 ci->i_vino.snap == pvino->snap;
362}
363
364static inline struct inode *ceph_find_inode(struct super_block *sb,
365 struct ceph_vino vino)
366{
367 ino_t t = ceph_vino_to_ino(vino);
368 return ilookup5(sb, t, ceph_ino_compare, &vino);
369}
370
371
372/*
373 * Ceph inode.
374 */
375#define CEPH_I_COMPLETE 1 /* we have complete directory cached */
376#define CEPH_I_NODELAY 4 /* do not delay cap release */
377#define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */
378#define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */
379
394static inline void ceph_i_clear(struct inode *inode, unsigned mask) 380static inline void ceph_i_clear(struct inode *inode, unsigned mask)
395{ 381{
396 struct ceph_inode_info *ci = ceph_inode(inode); 382 struct ceph_inode_info *ci = ceph_inode(inode);
@@ -414,8 +400,9 @@ static inline bool ceph_i_test(struct inode *inode, unsigned mask)
414 struct ceph_inode_info *ci = ceph_inode(inode); 400 struct ceph_inode_info *ci = ceph_inode(inode);
415 bool r; 401 bool r;
416 402
417 smp_mb(); 403 spin_lock(&inode->i_lock);
418 r = (ci->i_ceph_flags & mask) == mask; 404 r = (ci->i_ceph_flags & mask) == mask;
405 spin_unlock(&inode->i_lock);
419 return r; 406 return r;
420} 407}
421 408
@@ -432,20 +419,6 @@ extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
432 struct ceph_inode_frag *pfrag, 419 struct ceph_inode_frag *pfrag,
433 int *found); 420 int *found);
434 421
435/*
436 * Ceph dentry state
437 */
438struct ceph_dentry_info {
439 struct ceph_mds_session *lease_session;
440 u32 lease_gen, lease_shared_gen;
441 u32 lease_seq;
442 unsigned long lease_renew_after, lease_renew_from;
443 struct list_head lru;
444 struct dentry *dentry;
445 u64 time;
446 u64 offset;
447};
448
449static inline struct ceph_dentry_info *ceph_dentry(struct dentry *dentry) 422static inline struct ceph_dentry_info *ceph_dentry(struct dentry *dentry)
450{ 423{
451 return (struct ceph_dentry_info *)dentry->d_fsdata; 424 return (struct ceph_dentry_info *)dentry->d_fsdata;
@@ -456,22 +429,6 @@ static inline loff_t ceph_make_fpos(unsigned frag, unsigned off)
456 return ((loff_t)frag << 32) | (loff_t)off; 429 return ((loff_t)frag << 32) | (loff_t)off;
457} 430}
458 431
459/*
460 * ino_t is <64 bits on many architectures, blech.
461 *
462 * don't include snap in ino hash, at least for now.
463 */
464static inline ino_t ceph_vino_to_ino(struct ceph_vino vino)
465{
466 ino_t ino = (ino_t)vino.ino; /* ^ (vino.snap << 20); */
467#if BITS_PER_LONG == 32
468 ino ^= vino.ino >> (sizeof(u64)-sizeof(ino_t)) * 8;
469 if (!ino)
470 ino = 1;
471#endif
472 return ino;
473}
474
475static inline int ceph_set_ino_cb(struct inode *inode, void *data) 432static inline int ceph_set_ino_cb(struct inode *inode, void *data)
476{ 433{
477 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data; 434 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
@@ -479,39 +436,6 @@ static inline int ceph_set_ino_cb(struct inode *inode, void *data)
479 return 0; 436 return 0;
480} 437}
481 438
482static inline struct ceph_vino ceph_vino(struct inode *inode)
483{
484 return ceph_inode(inode)->i_vino;
485}
486
487/* for printf-style formatting */
488#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
489
490static inline u64 ceph_ino(struct inode *inode)
491{
492 return ceph_inode(inode)->i_vino.ino;
493}
494static inline u64 ceph_snap(struct inode *inode)
495{
496 return ceph_inode(inode)->i_vino.snap;
497}
498
499static inline int ceph_ino_compare(struct inode *inode, void *data)
500{
501 struct ceph_vino *pvino = (struct ceph_vino *)data;
502 struct ceph_inode_info *ci = ceph_inode(inode);
503 return ci->i_vino.ino == pvino->ino &&
504 ci->i_vino.snap == pvino->snap;
505}
506
507static inline struct inode *ceph_find_inode(struct super_block *sb,
508 struct ceph_vino vino)
509{
510 ino_t t = ceph_vino_to_ino(vino);
511 return ilookup5(sb, t, ceph_ino_compare, &vino);
512}
513
514
515/* 439/*
516 * caps helpers 440 * caps helpers
517 */ 441 */
@@ -576,18 +500,18 @@ extern int ceph_reserve_caps(struct ceph_mds_client *mdsc,
576 struct ceph_cap_reservation *ctx, int need); 500 struct ceph_cap_reservation *ctx, int need);
577extern int ceph_unreserve_caps(struct ceph_mds_client *mdsc, 501extern int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
578 struct ceph_cap_reservation *ctx); 502 struct ceph_cap_reservation *ctx);
579extern void ceph_reservation_status(struct ceph_client *client, 503extern void ceph_reservation_status(struct ceph_fs_client *client,
580 int *total, int *avail, int *used, 504 int *total, int *avail, int *used,
581 int *reserved, int *min); 505 int *reserved, int *min);
582 506
583static inline struct ceph_client *ceph_inode_to_client(struct inode *inode) 507static inline struct ceph_fs_client *ceph_inode_to_client(struct inode *inode)
584{ 508{
585 return (struct ceph_client *)inode->i_sb->s_fs_info; 509 return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
586} 510}
587 511
588static inline struct ceph_client *ceph_sb_to_client(struct super_block *sb) 512static inline struct ceph_fs_client *ceph_sb_to_client(struct super_block *sb)
589{ 513{
590 return (struct ceph_client *)sb->s_fs_info; 514 return (struct ceph_fs_client *)sb->s_fs_info;
591} 515}
592 516
593 517
@@ -617,51 +541,6 @@ struct ceph_file_info {
617 541
618 542
619/* 543/*
620 * snapshots
621 */
622
623/*
624 * A "snap context" is the set of existing snapshots when we
625 * write data. It is used by the OSD to guide its COW behavior.
626 *
627 * The ceph_snap_context is refcounted, and attached to each dirty
628 * page, indicating which context the dirty data belonged when it was
629 * dirtied.
630 */
631struct ceph_snap_context {
632 atomic_t nref;
633 u64 seq;
634 int num_snaps;
635 u64 snaps[];
636};
637
638static inline struct ceph_snap_context *
639ceph_get_snap_context(struct ceph_snap_context *sc)
640{
641 /*
642 printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
643 atomic_read(&sc->nref)+1);
644 */
645 if (sc)
646 atomic_inc(&sc->nref);
647 return sc;
648}
649
650static inline void ceph_put_snap_context(struct ceph_snap_context *sc)
651{
652 if (!sc)
653 return;
654 /*
655 printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
656 atomic_read(&sc->nref)-1);
657 */
658 if (atomic_dec_and_test(&sc->nref)) {
659 /*printk(" deleting snap_context %p\n", sc);*/
660 kfree(sc);
661 }
662}
663
664/*
665 * A "snap realm" describes a subset of the file hierarchy sharing 544 * A "snap realm" describes a subset of the file hierarchy sharing
666 * the same set of snapshots that apply to it. The realms themselves 545 * the same set of snapshots that apply to it. The realms themselves
667 * are organized into a hierarchy, such that children inherit (some of) 546 * are organized into a hierarchy, such that children inherit (some of)
@@ -699,16 +578,33 @@ struct ceph_snap_realm {
699 spinlock_t inodes_with_caps_lock; 578 spinlock_t inodes_with_caps_lock;
700}; 579};
701 580
702 581static inline int default_congestion_kb(void)
703
704/*
705 * calculate the number of pages a given length and offset map onto,
706 * if we align the data.
707 */
708static inline int calc_pages_for(u64 off, u64 len)
709{ 582{
710 return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) - 583 int congestion_kb;
711 (off >> PAGE_CACHE_SHIFT); 584
585 /*
586 * Copied from NFS
587 *
588 * congestion size, scale with available memory.
589 *
590 * 64MB: 8192k
591 * 128MB: 11585k
592 * 256MB: 16384k
593 * 512MB: 23170k
594 * 1GB: 32768k
595 * 2GB: 46340k
596 * 4GB: 65536k
597 * 8GB: 92681k
598 * 16GB: 131072k
599 *
600 * This allows larger machines to have larger/more transfers.
601 * Limit the default to 256M
602 */
603 congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
604 if (congestion_kb > 256*1024)
605 congestion_kb = 256*1024;
606
607 return congestion_kb;
712} 608}
713 609
714 610
@@ -741,16 +637,6 @@ static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
741 ci_item)->writing; 637 ci_item)->writing;
742} 638}
743 639
744
745/* super.c */
746extern struct kmem_cache *ceph_inode_cachep;
747extern struct kmem_cache *ceph_cap_cachep;
748extern struct kmem_cache *ceph_dentry_cachep;
749extern struct kmem_cache *ceph_file_cachep;
750
751extern const char *ceph_msg_type_name(int type);
752extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
753
754/* inode.c */ 640/* inode.c */
755extern const struct inode_operations ceph_file_iops; 641extern const struct inode_operations ceph_file_iops;
756 642
@@ -857,12 +743,18 @@ extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
857/* file.c */ 743/* file.c */
858extern const struct file_operations ceph_file_fops; 744extern const struct file_operations ceph_file_fops;
859extern const struct address_space_operations ceph_aops; 745extern const struct address_space_operations ceph_aops;
746extern int ceph_copy_to_page_vector(struct page **pages,
747 const char *data,
748 loff_t off, size_t len);
749extern int ceph_copy_from_page_vector(struct page **pages,
750 char *data,
751 loff_t off, size_t len);
752extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
860extern int ceph_open(struct inode *inode, struct file *file); 753extern int ceph_open(struct inode *inode, struct file *file);
861extern struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry, 754extern struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
862 struct nameidata *nd, int mode, 755 struct nameidata *nd, int mode,
863 int locked_dir); 756 int locked_dir);
864extern int ceph_release(struct inode *inode, struct file *filp); 757extern int ceph_release(struct inode *inode, struct file *filp);
865extern void ceph_release_page_vector(struct page **pages, int num_pages);
866 758
867/* dir.c */ 759/* dir.c */
868extern const struct file_operations ceph_dir_fops; 760extern const struct file_operations ceph_dir_fops;
@@ -892,12 +784,6 @@ extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
892/* export.c */ 784/* export.c */
893extern const struct export_operations ceph_export_ops; 785extern const struct export_operations ceph_export_ops;
894 786
895/* debugfs.c */
896extern int ceph_debugfs_init(void);
897extern void ceph_debugfs_cleanup(void);
898extern int ceph_debugfs_client_init(struct ceph_client *client);
899extern void ceph_debugfs_client_cleanup(struct ceph_client *client);
900
901/* locks.c */ 787/* locks.c */
902extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl); 788extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl);
903extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl); 789extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl);
@@ -914,4 +800,8 @@ static inline struct inode *get_dentry_parent_inode(struct dentry *dentry)
914 return NULL; 800 return NULL;
915} 801}
916 802
803/* debugfs.c */
804extern int ceph_fs_debugfs_init(struct ceph_fs_client *client);
805extern void ceph_fs_debugfs_cleanup(struct ceph_fs_client *client);
806
917#endif /* _FS_CEPH_SUPER_H */ 807#endif /* _FS_CEPH_SUPER_H */
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 9578af610b73..6e12a6ba5f79 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -1,6 +1,9 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2
2#include "super.h" 3#include "super.h"
3#include "decode.h" 4#include "mds_client.h"
5
6#include <linux/ceph/decode.h>
4 7
5#include <linux/xattr.h> 8#include <linux/xattr.h>
6#include <linux/slab.h> 9#include <linux/slab.h>
@@ -620,12 +623,12 @@ out:
620static int ceph_sync_setxattr(struct dentry *dentry, const char *name, 623static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
621 const char *value, size_t size, int flags) 624 const char *value, size_t size, int flags)
622{ 625{
623 struct ceph_client *client = ceph_sb_to_client(dentry->d_sb); 626 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
624 struct inode *inode = dentry->d_inode; 627 struct inode *inode = dentry->d_inode;
625 struct ceph_inode_info *ci = ceph_inode(inode); 628 struct ceph_inode_info *ci = ceph_inode(inode);
626 struct inode *parent_inode = dentry->d_parent->d_inode; 629 struct inode *parent_inode = dentry->d_parent->d_inode;
627 struct ceph_mds_request *req; 630 struct ceph_mds_request *req;
628 struct ceph_mds_client *mdsc = &client->mdsc; 631 struct ceph_mds_client *mdsc = fsc->mdsc;
629 int err; 632 int err;
630 int i, nr_pages; 633 int i, nr_pages;
631 struct page **pages = NULL; 634 struct page **pages = NULL;
@@ -713,10 +716,9 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
713 716
714 /* preallocate memory for xattr name, value, index node */ 717 /* preallocate memory for xattr name, value, index node */
715 err = -ENOMEM; 718 err = -ENOMEM;
716 newname = kmalloc(name_len + 1, GFP_NOFS); 719 newname = kmemdup(name, name_len + 1, GFP_NOFS);
717 if (!newname) 720 if (!newname)
718 goto out; 721 goto out;
719 memcpy(newname, name, name_len + 1);
720 722
721 if (val_len) { 723 if (val_len) {
722 newval = kmalloc(val_len + 1, GFP_NOFS); 724 newval = kmalloc(val_len + 1, GFP_NOFS);
@@ -777,8 +779,8 @@ out:
777 779
778static int ceph_send_removexattr(struct dentry *dentry, const char *name) 780static int ceph_send_removexattr(struct dentry *dentry, const char *name)
779{ 781{
780 struct ceph_client *client = ceph_sb_to_client(dentry->d_sb); 782 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
781 struct ceph_mds_client *mdsc = &client->mdsc; 783 struct ceph_mds_client *mdsc = fsc->mdsc;
782 struct inode *inode = dentry->d_inode; 784 struct inode *inode = dentry->d_inode;
783 struct inode *parent_inode = dentry->d_parent->d_inode; 785 struct inode *parent_inode = dentry->d_parent->d_inode;
784 struct ceph_mds_request *req; 786 struct ceph_mds_request *req;
diff --git a/fs/exec.c b/fs/exec.c
index 828dd2461d6b..6d2b6f936858 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -2014,3 +2014,43 @@ fail_creds:
2014fail: 2014fail:
2015 return; 2015 return;
2016} 2016}
2017
2018/*
2019 * Core dumping helper functions. These are the only things you should
2020 * do on a core-file: use only these functions to write out all the
2021 * necessary info.
2022 */
2023int dump_write(struct file *file, const void *addr, int nr)
2024{
2025 return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
2026}
2027EXPORT_SYMBOL(dump_write);
2028
2029int dump_seek(struct file *file, loff_t off)
2030{
2031 int ret = 1;
2032
2033 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
2034 if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
2035 return 0;
2036 } else {
2037 char *buf = (char *)get_zeroed_page(GFP_KERNEL);
2038
2039 if (!buf)
2040 return 0;
2041 while (off > 0) {
2042 unsigned long n = off;
2043
2044 if (n > PAGE_SIZE)
2045 n = PAGE_SIZE;
2046 if (!dump_write(file, buf, n)) {
2047 ret = 0;
2048 break;
2049 }
2050 off -= n;
2051 }
2052 free_page((unsigned long)buf);
2053 }
2054 return ret;
2055}
2056EXPORT_SYMBOL(dump_seek);
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index eb7368ebd8cd..3eadd97324b1 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -54,6 +54,9 @@ struct page_collect {
54 unsigned nr_pages; 54 unsigned nr_pages;
55 unsigned long length; 55 unsigned long length;
56 loff_t pg_first; /* keep 64bit also in 32-arches */ 56 loff_t pg_first; /* keep 64bit also in 32-arches */
57 bool read_4_write; /* This means two things: that the read is sync
58 * And the pages should not be unlocked.
59 */
57}; 60};
58 61
59static void _pcol_init(struct page_collect *pcol, unsigned expected_pages, 62static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
@@ -71,6 +74,7 @@ static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
71 pcol->nr_pages = 0; 74 pcol->nr_pages = 0;
72 pcol->length = 0; 75 pcol->length = 0;
73 pcol->pg_first = -1; 76 pcol->pg_first = -1;
77 pcol->read_4_write = false;
74} 78}
75 79
76static void _pcol_reset(struct page_collect *pcol) 80static void _pcol_reset(struct page_collect *pcol)
@@ -347,7 +351,8 @@ static int readpage_strip(void *data, struct page *page)
347 if (PageError(page)) 351 if (PageError(page))
348 ClearPageError(page); 352 ClearPageError(page);
349 353
350 unlock_page(page); 354 if (!pcol->read_4_write)
355 unlock_page(page);
351 EXOFS_DBGMSG("readpage_strip(0x%lx, 0x%lx) empty page," 356 EXOFS_DBGMSG("readpage_strip(0x%lx, 0x%lx) empty page,"
352 " splitting\n", inode->i_ino, page->index); 357 " splitting\n", inode->i_ino, page->index);
353 358
@@ -428,6 +433,7 @@ static int _readpage(struct page *page, bool is_sync)
428 /* readpage_strip might call read_exec(,is_sync==false) at several 433 /* readpage_strip might call read_exec(,is_sync==false) at several
429 * places but not if we have a single page. 434 * places but not if we have a single page.
430 */ 435 */
436 pcol.read_4_write = is_sync;
431 ret = readpage_strip(&pcol, page); 437 ret = readpage_strip(&pcol, page);
432 if (ret) { 438 if (ret) {
433 EXOFS_ERR("_readpage => %d\n", ret); 439 EXOFS_ERR("_readpage => %d\n", ret);
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index cc9665522148..c465ae066c62 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -1,6 +1,6 @@
1config GFS2_FS 1config GFS2_FS
2 tristate "GFS2 file system support" 2 tristate "GFS2 file system support"
3 depends on EXPERIMENTAL && (64BIT || LBDAF) 3 depends on (64BIT || LBDAF)
4 select DLM if GFS2_FS_LOCKING_DLM 4 select DLM if GFS2_FS_LOCKING_DLM
5 select CONFIGFS_FS if GFS2_FS_LOCKING_DLM 5 select CONFIGFS_FS if GFS2_FS_LOCKING_DLM
6 select SYSFS if GFS2_FS_LOCKING_DLM 6 select SYSFS if GFS2_FS_LOCKING_DLM
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 194fe16d8418..6b24afb96aae 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -36,8 +36,8 @@
36#include "glops.h" 36#include "glops.h"
37 37
38 38
39static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, 39void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
40 unsigned int from, unsigned int to) 40 unsigned int from, unsigned int to)
41{ 41{
42 struct buffer_head *head = page_buffers(page); 42 struct buffer_head *head = page_buffers(page);
43 unsigned int bsize = head->b_size; 43 unsigned int bsize = head->b_size;
@@ -615,7 +615,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
615 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 615 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
616 int alloc_required; 616 int alloc_required;
617 int error = 0; 617 int error = 0;
618 struct gfs2_alloc *al; 618 struct gfs2_alloc *al = NULL;
619 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 619 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
620 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 620 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
621 unsigned to = from + len; 621 unsigned to = from + len;
@@ -663,6 +663,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
663 rblocks += RES_STATFS + RES_QUOTA; 663 rblocks += RES_STATFS + RES_QUOTA;
664 if (&ip->i_inode == sdp->sd_rindex) 664 if (&ip->i_inode == sdp->sd_rindex)
665 rblocks += 2 * RES_STATFS; 665 rblocks += 2 * RES_STATFS;
666 if (alloc_required)
667 rblocks += gfs2_rg_blocks(al);
666 668
667 error = gfs2_trans_begin(sdp, rblocks, 669 error = gfs2_trans_begin(sdp, rblocks,
668 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); 670 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
@@ -696,13 +698,11 @@ out:
696 698
697 page_cache_release(page); 699 page_cache_release(page);
698 700
699 /* 701 gfs2_trans_end(sdp);
700 * XXX(truncate): the call below should probably be replaced with
701 * a call to the gfs2-specific truncate blocks helper to actually
702 * release disk blocks..
703 */
704 if (pos + len > ip->i_inode.i_size) 702 if (pos + len > ip->i_inode.i_size)
705 truncate_setsize(&ip->i_inode, ip->i_inode.i_size); 703 gfs2_trim_blocks(&ip->i_inode);
704 goto out_trans_fail;
705
706out_endtrans: 706out_endtrans:
707 gfs2_trans_end(sdp); 707 gfs2_trans_end(sdp);
708out_trans_fail: 708out_trans_fail:
@@ -802,10 +802,8 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
802 page_cache_release(page); 802 page_cache_release(page);
803 803
804 if (copied) { 804 if (copied) {
805 if (inode->i_size < to) { 805 if (inode->i_size < to)
806 i_size_write(inode, to); 806 i_size_write(inode, to);
807 ip->i_disksize = inode->i_size;
808 }
809 gfs2_dinode_out(ip, di); 807 gfs2_dinode_out(ip, di);
810 mark_inode_dirty(inode); 808 mark_inode_dirty(inode);
811 } 809 }
@@ -876,8 +874,6 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
876 874
877 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 875 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
878 if (ret > 0) { 876 if (ret > 0) {
879 if (inode->i_size > ip->i_disksize)
880 ip->i_disksize = inode->i_size;
881 gfs2_dinode_out(ip, dibh->b_data); 877 gfs2_dinode_out(ip, dibh->b_data);
882 mark_inode_dirty(inode); 878 mark_inode_dirty(inode);
883 } 879 }
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 6f482809d1a3..5476c066d4ee 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -50,7 +50,7 @@ struct strip_mine {
50 * @ip: the inode 50 * @ip: the inode
51 * @dibh: the dinode buffer 51 * @dibh: the dinode buffer
52 * @block: the block number that was allocated 52 * @block: the block number that was allocated
53 * @private: any locked page held by the caller process 53 * @page: The (optional) page. This is looked up if @page is NULL
54 * 54 *
55 * Returns: errno 55 * Returns: errno
56 */ 56 */
@@ -109,8 +109,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
109/** 109/**
110 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big 110 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
111 * @ip: The GFS2 inode to unstuff 111 * @ip: The GFS2 inode to unstuff
112 * @unstuffer: the routine that handles unstuffing a non-zero length file 112 * @page: The (optional) page. This is looked up if the @page is NULL
113 * @private: private data for the unstuffer
114 * 113 *
115 * This routine unstuffs a dinode and returns it to a "normal" state such 114 * This routine unstuffs a dinode and returns it to a "normal" state such
116 * that the height can be grown in the traditional way. 115 * that the height can be grown in the traditional way.
@@ -132,7 +131,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
132 if (error) 131 if (error)
133 goto out; 132 goto out;
134 133
135 if (ip->i_disksize) { 134 if (i_size_read(&ip->i_inode)) {
136 /* Get a free block, fill it with the stuffed data, 135 /* Get a free block, fill it with the stuffed data,
137 and write it out to disk */ 136 and write it out to disk */
138 137
@@ -161,7 +160,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
161 di = (struct gfs2_dinode *)dibh->b_data; 160 di = (struct gfs2_dinode *)dibh->b_data;
162 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 161 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
163 162
164 if (ip->i_disksize) { 163 if (i_size_read(&ip->i_inode)) {
165 *(__be64 *)(di + 1) = cpu_to_be64(block); 164 *(__be64 *)(di + 1) = cpu_to_be64(block);
166 gfs2_add_inode_blocks(&ip->i_inode, 1); 165 gfs2_add_inode_blocks(&ip->i_inode, 1);
167 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); 166 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
@@ -885,83 +884,14 @@ out:
885} 884}
886 885
887/** 886/**
888 * do_grow - Make a file look bigger than it is
889 * @ip: the inode
890 * @size: the size to set the file to
891 *
892 * Called with an exclusive lock on @ip.
893 *
894 * Returns: errno
895 */
896
897static int do_grow(struct gfs2_inode *ip, u64 size)
898{
899 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
900 struct gfs2_alloc *al;
901 struct buffer_head *dibh;
902 int error;
903
904 al = gfs2_alloc_get(ip);
905 if (!al)
906 return -ENOMEM;
907
908 error = gfs2_quota_lock_check(ip);
909 if (error)
910 goto out;
911
912 al->al_requested = sdp->sd_max_height + RES_DATA;
913
914 error = gfs2_inplace_reserve(ip);
915 if (error)
916 goto out_gunlock_q;
917
918 error = gfs2_trans_begin(sdp,
919 sdp->sd_max_height + al->al_rgd->rd_length +
920 RES_JDATA + RES_DINODE + RES_STATFS + RES_QUOTA, 0);
921 if (error)
922 goto out_ipres;
923
924 error = gfs2_meta_inode_buffer(ip, &dibh);
925 if (error)
926 goto out_end_trans;
927
928 if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
929 if (gfs2_is_stuffed(ip)) {
930 error = gfs2_unstuff_dinode(ip, NULL);
931 if (error)
932 goto out_brelse;
933 }
934 }
935
936 ip->i_disksize = size;
937 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
938 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
939 gfs2_dinode_out(ip, dibh->b_data);
940
941out_brelse:
942 brelse(dibh);
943out_end_trans:
944 gfs2_trans_end(sdp);
945out_ipres:
946 gfs2_inplace_release(ip);
947out_gunlock_q:
948 gfs2_quota_unlock(ip);
949out:
950 gfs2_alloc_put(ip);
951 return error;
952}
953
954
955/**
956 * gfs2_block_truncate_page - Deal with zeroing out data for truncate 887 * gfs2_block_truncate_page - Deal with zeroing out data for truncate
957 * 888 *
958 * This is partly borrowed from ext3. 889 * This is partly borrowed from ext3.
959 */ 890 */
960static int gfs2_block_truncate_page(struct address_space *mapping) 891static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
961{ 892{
962 struct inode *inode = mapping->host; 893 struct inode *inode = mapping->host;
963 struct gfs2_inode *ip = GFS2_I(inode); 894 struct gfs2_inode *ip = GFS2_I(inode);
964 loff_t from = inode->i_size;
965 unsigned long index = from >> PAGE_CACHE_SHIFT; 895 unsigned long index = from >> PAGE_CACHE_SHIFT;
966 unsigned offset = from & (PAGE_CACHE_SIZE-1); 896 unsigned offset = from & (PAGE_CACHE_SIZE-1);
967 unsigned blocksize, iblock, length, pos; 897 unsigned blocksize, iblock, length, pos;
@@ -1023,9 +953,11 @@ unlock:
1023 return err; 953 return err;
1024} 954}
1025 955
1026static int trunc_start(struct gfs2_inode *ip, u64 size) 956static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
1027{ 957{
1028 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 958 struct gfs2_inode *ip = GFS2_I(inode);
959 struct gfs2_sbd *sdp = GFS2_SB(inode);
960 struct address_space *mapping = inode->i_mapping;
1029 struct buffer_head *dibh; 961 struct buffer_head *dibh;
1030 int journaled = gfs2_is_jdata(ip); 962 int journaled = gfs2_is_jdata(ip);
1031 int error; 963 int error;
@@ -1039,31 +971,26 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
1039 if (error) 971 if (error)
1040 goto out; 972 goto out;
1041 973
974 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
975
1042 if (gfs2_is_stuffed(ip)) { 976 if (gfs2_is_stuffed(ip)) {
1043 u64 dsize = size + sizeof(struct gfs2_dinode); 977 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1044 ip->i_disksize = size;
1045 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1046 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1047 gfs2_dinode_out(ip, dibh->b_data);
1048 if (dsize > dibh->b_size)
1049 dsize = dibh->b_size;
1050 gfs2_buffer_clear_tail(dibh, dsize);
1051 error = 1;
1052 } else { 978 } else {
1053 if (size & (u64)(sdp->sd_sb.sb_bsize - 1)) 979 if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) {
1054 error = gfs2_block_truncate_page(ip->i_inode.i_mapping); 980 error = gfs2_block_truncate_page(mapping, newsize);
1055 981 if (error)
1056 if (!error) { 982 goto out_brelse;
1057 ip->i_disksize = size;
1058 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1059 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1060 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1061 gfs2_dinode_out(ip, dibh->b_data);
1062 } 983 }
984 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1063 } 985 }
1064 986
1065 brelse(dibh); 987 i_size_write(inode, newsize);
988 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
989 gfs2_dinode_out(ip, dibh->b_data);
1066 990
991 truncate_pagecache(inode, oldsize, newsize);
992out_brelse:
993 brelse(dibh);
1067out: 994out:
1068 gfs2_trans_end(sdp); 995 gfs2_trans_end(sdp);
1069 return error; 996 return error;
@@ -1123,7 +1050,7 @@ static int trunc_end(struct gfs2_inode *ip)
1123 if (error) 1050 if (error)
1124 goto out; 1051 goto out;
1125 1052
1126 if (!ip->i_disksize) { 1053 if (!i_size_read(&ip->i_inode)) {
1127 ip->i_height = 0; 1054 ip->i_height = 0;
1128 ip->i_goal = ip->i_no_addr; 1055 ip->i_goal = ip->i_no_addr;
1129 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 1056 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
@@ -1143,92 +1070,154 @@ out:
1143 1070
1144/** 1071/**
1145 * do_shrink - make a file smaller 1072 * do_shrink - make a file smaller
1146 * @ip: the inode 1073 * @inode: the inode
1147 * @size: the size to make the file 1074 * @oldsize: the current inode size
1148 * @truncator: function to truncate the last partial block 1075 * @newsize: the size to make the file
1149 * 1076 *
1150 * Called with an exclusive lock on @ip. 1077 * Called with an exclusive lock on @inode. The @size must
1078 * be equal to or smaller than the current inode size.
1151 * 1079 *
1152 * Returns: errno 1080 * Returns: errno
1153 */ 1081 */
1154 1082
1155static int do_shrink(struct gfs2_inode *ip, u64 size) 1083static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
1156{ 1084{
1085 struct gfs2_inode *ip = GFS2_I(inode);
1157 int error; 1086 int error;
1158 1087
1159 error = trunc_start(ip, size); 1088 error = trunc_start(inode, oldsize, newsize);
1160 if (error < 0) 1089 if (error < 0)
1161 return error; 1090 return error;
1162 if (error > 0) 1091 if (gfs2_is_stuffed(ip))
1163 return 0; 1092 return 0;
1164 1093
1165 error = trunc_dealloc(ip, size); 1094 error = trunc_dealloc(ip, newsize);
1166 if (!error) 1095 if (error == 0)
1167 error = trunc_end(ip); 1096 error = trunc_end(ip);
1168 1097
1169 return error; 1098 return error;
1170} 1099}
1171 1100
1172static int do_touch(struct gfs2_inode *ip, u64 size) 1101void gfs2_trim_blocks(struct inode *inode)
1173{ 1102{
1174 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1103 u64 size = inode->i_size;
1104 int ret;
1105
1106 ret = do_shrink(inode, size, size);
1107 WARN_ON(ret != 0);
1108}
1109
1110/**
1111 * do_grow - Touch and update inode size
1112 * @inode: The inode
1113 * @size: The new size
1114 *
1115 * This function updates the timestamps on the inode and
1116 * may also increase the size of the inode. This function
1117 * must not be called with @size any smaller than the current
1118 * inode size.
1119 *
1120 * Although it is not strictly required to unstuff files here,
1121 * earlier versions of GFS2 have a bug in the stuffed file reading
1122 * code which will result in a buffer overrun if the size is larger
1123 * than the max stuffed file size. In order to prevent this from
1124 * occuring, such files are unstuffed, but in other cases we can
1125 * just update the inode size directly.
1126 *
1127 * Returns: 0 on success, or -ve on error
1128 */
1129
1130static int do_grow(struct inode *inode, u64 size)
1131{
1132 struct gfs2_inode *ip = GFS2_I(inode);
1133 struct gfs2_sbd *sdp = GFS2_SB(inode);
1175 struct buffer_head *dibh; 1134 struct buffer_head *dibh;
1135 struct gfs2_alloc *al = NULL;
1176 int error; 1136 int error;
1177 1137
1178 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 1138 if (gfs2_is_stuffed(ip) &&
1139 (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
1140 al = gfs2_alloc_get(ip);
1141 if (al == NULL)
1142 return -ENOMEM;
1143
1144 error = gfs2_quota_lock_check(ip);
1145 if (error)
1146 goto do_grow_alloc_put;
1147
1148 al->al_requested = 1;
1149 error = gfs2_inplace_reserve(ip);
1150 if (error)
1151 goto do_grow_qunlock;
1152 }
1153
1154 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0);
1179 if (error) 1155 if (error)
1180 return error; 1156 goto do_grow_release;
1181 1157
1182 down_write(&ip->i_rw_mutex); 1158 if (al) {
1159 error = gfs2_unstuff_dinode(ip, NULL);
1160 if (error)
1161 goto do_end_trans;
1162 }
1183 1163
1184 error = gfs2_meta_inode_buffer(ip, &dibh); 1164 error = gfs2_meta_inode_buffer(ip, &dibh);
1185 if (error) 1165 if (error)
1186 goto do_touch_out; 1166 goto do_end_trans;
1187 1167
1168 i_size_write(inode, size);
1188 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 1169 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1189 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1170 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1190 gfs2_dinode_out(ip, dibh->b_data); 1171 gfs2_dinode_out(ip, dibh->b_data);
1191 brelse(dibh); 1172 brelse(dibh);
1192 1173
1193do_touch_out: 1174do_end_trans:
1194 up_write(&ip->i_rw_mutex);
1195 gfs2_trans_end(sdp); 1175 gfs2_trans_end(sdp);
1176do_grow_release:
1177 if (al) {
1178 gfs2_inplace_release(ip);
1179do_grow_qunlock:
1180 gfs2_quota_unlock(ip);
1181do_grow_alloc_put:
1182 gfs2_alloc_put(ip);
1183 }
1196 return error; 1184 return error;
1197} 1185}
1198 1186
1199/** 1187/**
1200 * gfs2_truncatei - make a file a given size 1188 * gfs2_setattr_size - make a file a given size
1201 * @ip: the inode 1189 * @inode: the inode
1202 * @size: the size to make the file 1190 * @newsize: the size to make the file
1203 * @truncator: function to truncate the last partial block
1204 * 1191 *
1205 * The file size can grow, shrink, or stay the same size. 1192 * The file size can grow, shrink, or stay the same size. This
1193 * is called holding i_mutex and an exclusive glock on the inode
1194 * in question.
1206 * 1195 *
1207 * Returns: errno 1196 * Returns: errno
1208 */ 1197 */
1209 1198
1210int gfs2_truncatei(struct gfs2_inode *ip, u64 size) 1199int gfs2_setattr_size(struct inode *inode, u64 newsize)
1211{ 1200{
1212 int error; 1201 int ret;
1202 u64 oldsize;
1213 1203
1214 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_inode.i_mode))) 1204 BUG_ON(!S_ISREG(inode->i_mode));
1215 return -EINVAL;
1216 1205
1217 if (size > ip->i_disksize) 1206 ret = inode_newsize_ok(inode, newsize);
1218 error = do_grow(ip, size); 1207 if (ret)
1219 else if (size < ip->i_disksize) 1208 return ret;
1220 error = do_shrink(ip, size);
1221 else
1222 /* update time stamps */
1223 error = do_touch(ip, size);
1224 1209
1225 return error; 1210 oldsize = inode->i_size;
1211 if (newsize >= oldsize)
1212 return do_grow(inode, newsize);
1213
1214 return do_shrink(inode, oldsize, newsize);
1226} 1215}
1227 1216
1228int gfs2_truncatei_resume(struct gfs2_inode *ip) 1217int gfs2_truncatei_resume(struct gfs2_inode *ip)
1229{ 1218{
1230 int error; 1219 int error;
1231 error = trunc_dealloc(ip, ip->i_disksize); 1220 error = trunc_dealloc(ip, i_size_read(&ip->i_inode));
1232 if (!error) 1221 if (!error)
1233 error = trunc_end(ip); 1222 error = trunc_end(ip);
1234 return error; 1223 return error;
@@ -1269,7 +1258,7 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
1269 1258
1270 shift = sdp->sd_sb.sb_bsize_shift; 1259 shift = sdp->sd_sb.sb_bsize_shift;
1271 BUG_ON(gfs2_is_dir(ip)); 1260 BUG_ON(gfs2_is_dir(ip));
1272 end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift; 1261 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
1273 lblock = offset >> shift; 1262 lblock = offset >> shift;
1274 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; 1263 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
1275 if (lblock_stop > end_of_file) 1264 if (lblock_stop > end_of_file)
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h
index a20a5213135a..42fea03e2bd9 100644
--- a/fs/gfs2/bmap.h
+++ b/fs/gfs2/bmap.h
@@ -44,14 +44,16 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
44 } 44 }
45} 45}
46 46
47int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page); 47extern int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page);
48int gfs2_block_map(struct inode *inode, sector_t lblock, struct buffer_head *bh, int create); 48extern int gfs2_block_map(struct inode *inode, sector_t lblock,
49int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen); 49 struct buffer_head *bh, int create);
50 50extern int gfs2_extent_map(struct inode *inode, u64 lblock, int *new,
51int gfs2_truncatei(struct gfs2_inode *ip, u64 size); 51 u64 *dblock, unsigned *extlen);
52int gfs2_truncatei_resume(struct gfs2_inode *ip); 52extern int gfs2_setattr_size(struct inode *inode, u64 size);
53int gfs2_file_dealloc(struct gfs2_inode *ip); 53extern void gfs2_trim_blocks(struct inode *inode);
54int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, 54extern int gfs2_truncatei_resume(struct gfs2_inode *ip);
55 unsigned int len); 55extern int gfs2_file_dealloc(struct gfs2_inode *ip);
56extern int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
57 unsigned int len);
56 58
57#endif /* __BMAP_DOT_H__ */ 59#endif /* __BMAP_DOT_H__ */
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index bb7907bde3d8..6798755b3858 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -49,7 +49,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
49 ip = GFS2_I(inode); 49 ip = GFS2_I(inode);
50 } 50 }
51 51
52 if (sdp->sd_args.ar_localcaching) 52 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
53 goto valid; 53 goto valid;
54 54
55 had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL); 55 had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index b9dd88a78dd4..5c356d09c321 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -79,6 +79,9 @@
79#define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1) 79#define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
80#define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1)) 80#define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1))
81 81
82struct qstr gfs2_qdot __read_mostly;
83struct qstr gfs2_qdotdot __read_mostly;
84
82typedef int (*leaf_call_t) (struct gfs2_inode *dip, u32 index, u32 len, 85typedef int (*leaf_call_t) (struct gfs2_inode *dip, u32 index, u32 len,
83 u64 leaf_no, void *data); 86 u64 leaf_no, void *data);
84typedef int (*gfs2_dscan_t)(const struct gfs2_dirent *dent, 87typedef int (*gfs2_dscan_t)(const struct gfs2_dirent *dent,
@@ -127,8 +130,8 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
127 130
128 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 131 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
129 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size); 132 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
130 if (ip->i_disksize < offset + size) 133 if (ip->i_inode.i_size < offset + size)
131 ip->i_disksize = offset + size; 134 i_size_write(&ip->i_inode, offset + size);
132 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 135 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
133 gfs2_dinode_out(ip, dibh->b_data); 136 gfs2_dinode_out(ip, dibh->b_data);
134 137
@@ -225,8 +228,8 @@ out:
225 if (error) 228 if (error)
226 return error; 229 return error;
227 230
228 if (ip->i_disksize < offset + copied) 231 if (ip->i_inode.i_size < offset + copied)
229 ip->i_disksize = offset + copied; 232 i_size_write(&ip->i_inode, offset + copied);
230 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 233 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
231 234
232 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 235 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
@@ -275,12 +278,13 @@ static int gfs2_dir_read_data(struct gfs2_inode *ip, char *buf, u64 offset,
275 unsigned int o; 278 unsigned int o;
276 int copied = 0; 279 int copied = 0;
277 int error = 0; 280 int error = 0;
281 u64 disksize = i_size_read(&ip->i_inode);
278 282
279 if (offset >= ip->i_disksize) 283 if (offset >= disksize)
280 return 0; 284 return 0;
281 285
282 if (offset + size > ip->i_disksize) 286 if (offset + size > disksize)
283 size = ip->i_disksize - offset; 287 size = disksize - offset;
284 288
285 if (!size) 289 if (!size)
286 return 0; 290 return 0;
@@ -727,7 +731,7 @@ static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode,
727 unsigned hsize = 1 << ip->i_depth; 731 unsigned hsize = 1 << ip->i_depth;
728 unsigned index; 732 unsigned index;
729 u64 ln; 733 u64 ln;
730 if (hsize * sizeof(u64) != ip->i_disksize) { 734 if (hsize * sizeof(u64) != i_size_read(inode)) {
731 gfs2_consist_inode(ip); 735 gfs2_consist_inode(ip);
732 return ERR_PTR(-EIO); 736 return ERR_PTR(-EIO);
733 } 737 }
@@ -879,7 +883,7 @@ static int dir_make_exhash(struct inode *inode)
879 for (x = sdp->sd_hash_ptrs; x--; lp++) 883 for (x = sdp->sd_hash_ptrs; x--; lp++)
880 *lp = cpu_to_be64(bn); 884 *lp = cpu_to_be64(bn);
881 885
882 dip->i_disksize = sdp->sd_sb.sb_bsize / 2; 886 i_size_write(inode, sdp->sd_sb.sb_bsize / 2);
883 gfs2_add_inode_blocks(&dip->i_inode, 1); 887 gfs2_add_inode_blocks(&dip->i_inode, 1);
884 dip->i_diskflags |= GFS2_DIF_EXHASH; 888 dip->i_diskflags |= GFS2_DIF_EXHASH;
885 889
@@ -1057,11 +1061,12 @@ static int dir_double_exhash(struct gfs2_inode *dip)
1057 u64 *buf; 1061 u64 *buf;
1058 u64 *from, *to; 1062 u64 *from, *to;
1059 u64 block; 1063 u64 block;
1064 u64 disksize = i_size_read(&dip->i_inode);
1060 int x; 1065 int x;
1061 int error = 0; 1066 int error = 0;
1062 1067
1063 hsize = 1 << dip->i_depth; 1068 hsize = 1 << dip->i_depth;
1064 if (hsize * sizeof(u64) != dip->i_disksize) { 1069 if (hsize * sizeof(u64) != disksize) {
1065 gfs2_consist_inode(dip); 1070 gfs2_consist_inode(dip);
1066 return -EIO; 1071 return -EIO;
1067 } 1072 }
@@ -1072,7 +1077,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
1072 if (!buf) 1077 if (!buf)
1073 return -ENOMEM; 1078 return -ENOMEM;
1074 1079
1075 for (block = dip->i_disksize >> sdp->sd_hash_bsize_shift; block--;) { 1080 for (block = disksize >> sdp->sd_hash_bsize_shift; block--;) {
1076 error = gfs2_dir_read_data(dip, (char *)buf, 1081 error = gfs2_dir_read_data(dip, (char *)buf,
1077 block * sdp->sd_hash_bsize, 1082 block * sdp->sd_hash_bsize,
1078 sdp->sd_hash_bsize, 1); 1083 sdp->sd_hash_bsize, 1);
@@ -1370,7 +1375,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
1370 unsigned depth = 0; 1375 unsigned depth = 0;
1371 1376
1372 hsize = 1 << dip->i_depth; 1377 hsize = 1 << dip->i_depth;
1373 if (hsize * sizeof(u64) != dip->i_disksize) { 1378 if (hsize * sizeof(u64) != i_size_read(inode)) {
1374 gfs2_consist_inode(dip); 1379 gfs2_consist_inode(dip);
1375 return -EIO; 1380 return -EIO;
1376 } 1381 }
@@ -1784,7 +1789,7 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
1784 int error = 0; 1789 int error = 0;
1785 1790
1786 hsize = 1 << dip->i_depth; 1791 hsize = 1 << dip->i_depth;
1787 if (hsize * sizeof(u64) != dip->i_disksize) { 1792 if (hsize * sizeof(u64) != i_size_read(&dip->i_inode)) {
1788 gfs2_consist_inode(dip); 1793 gfs2_consist_inode(dip);
1789 return -EIO; 1794 return -EIO;
1790 } 1795 }
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index 4f919440c3be..a98f644bd3df 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -17,23 +17,24 @@ struct inode;
17struct gfs2_inode; 17struct gfs2_inode;
18struct gfs2_inum; 18struct gfs2_inum;
19 19
20struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *filename); 20extern struct inode *gfs2_dir_search(struct inode *dir,
21int gfs2_dir_check(struct inode *dir, const struct qstr *filename, 21 const struct qstr *filename);
22 const struct gfs2_inode *ip); 22extern int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
23int gfs2_dir_add(struct inode *inode, const struct qstr *filename, 23 const struct gfs2_inode *ip);
24 const struct gfs2_inode *ip, unsigned int type); 24extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
25int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename); 25 const struct gfs2_inode *ip, unsigned int type);
26int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, 26extern int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
27 filldir_t filldir); 27extern int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
28int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, 28 filldir_t filldir);
29 const struct gfs2_inode *nip, unsigned int new_type); 29extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
30 const struct gfs2_inode *nip, unsigned int new_type);
30 31
31int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip); 32extern int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
32 33
33int gfs2_diradd_alloc_required(struct inode *dir, 34extern int gfs2_diradd_alloc_required(struct inode *dir,
34 const struct qstr *filename); 35 const struct qstr *filename);
35int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block, 36extern int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
36 struct buffer_head **bhp); 37 struct buffer_head **bhp);
37 38
38static inline u32 gfs2_disk_hash(const char *data, int len) 39static inline u32 gfs2_disk_hash(const char *data, int len)
39{ 40{
@@ -61,4 +62,7 @@ static inline void gfs2_qstr2dirent(const struct qstr *name, u16 reclen, struct
61 memcpy(dent + 1, name->name, name->len); 62 memcpy(dent + 1, name->name, name->len);
62} 63}
63 64
65extern struct qstr gfs2_qdot;
66extern struct qstr gfs2_qdotdot;
67
64#endif /* __DIR_DOT_H__ */ 68#endif /* __DIR_DOT_H__ */
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index dfe237a3f8ad..06d582732d34 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -126,16 +126,9 @@ static int gfs2_get_name(struct dentry *parent, char *name,
126 126
127static struct dentry *gfs2_get_parent(struct dentry *child) 127static struct dentry *gfs2_get_parent(struct dentry *child)
128{ 128{
129 struct qstr dotdot;
130 struct dentry *dentry; 129 struct dentry *dentry;
131 130
132 /* 131 dentry = d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1));
133 * XXX(hch): it would be a good idea to keep this around as a
134 * static variable.
135 */
136 gfs2_str2qstr(&dotdot, "..");
137
138 dentry = d_obtain_alias(gfs2_lookupi(child->d_inode, &dotdot, 1));
139 if (!IS_ERR(dentry)) 132 if (!IS_ERR(dentry))
140 dentry->d_op = &gfs2_dops; 133 dentry->d_op = &gfs2_dops;
141 return dentry; 134 return dentry;
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 4edd662c8232..237ee6a940df 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -382,8 +382,10 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
382 rblocks = RES_DINODE + ind_blocks; 382 rblocks = RES_DINODE + ind_blocks;
383 if (gfs2_is_jdata(ip)) 383 if (gfs2_is_jdata(ip))
384 rblocks += data_blocks ? data_blocks : 1; 384 rblocks += data_blocks ? data_blocks : 1;
385 if (ind_blocks || data_blocks) 385 if (ind_blocks || data_blocks) {
386 rblocks += RES_STATFS + RES_QUOTA; 386 rblocks += RES_STATFS + RES_QUOTA;
387 rblocks += gfs2_rg_blocks(al);
388 }
387 ret = gfs2_trans_begin(sdp, rblocks, 0); 389 ret = gfs2_trans_begin(sdp, rblocks, 0);
388 if (ret) 390 if (ret)
389 goto out_trans_fail; 391 goto out_trans_fail;
@@ -491,7 +493,7 @@ static int gfs2_open(struct inode *inode, struct file *file)
491 goto fail; 493 goto fail;
492 494
493 if (!(file->f_flags & O_LARGEFILE) && 495 if (!(file->f_flags & O_LARGEFILE) &&
494 ip->i_disksize > MAX_NON_LFS) { 496 i_size_read(inode) > MAX_NON_LFS) {
495 error = -EOVERFLOW; 497 error = -EOVERFLOW;
496 goto fail_gunlock; 498 goto fail_gunlock;
497 } 499 }
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 9adf8f924e08..87778857f099 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -441,6 +441,8 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
441 else 441 else
442 gfs2_glock_put_nolock(gl); 442 gfs2_glock_put_nolock(gl);
443 } 443 }
444 if (held1 && held2 && list_empty(&gl->gl_holders))
445 clear_bit(GLF_QUEUED, &gl->gl_flags);
444 446
445 gl->gl_state = new_state; 447 gl->gl_state = new_state;
446 gl->gl_tchange = jiffies; 448 gl->gl_tchange = jiffies;
@@ -1012,6 +1014,7 @@ fail:
1012 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) 1014 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1013 insert_pt = &gh2->gh_list; 1015 insert_pt = &gh2->gh_list;
1014 } 1016 }
1017 set_bit(GLF_QUEUED, &gl->gl_flags);
1015 if (likely(insert_pt == NULL)) { 1018 if (likely(insert_pt == NULL)) {
1016 list_add_tail(&gh->gh_list, &gl->gl_holders); 1019 list_add_tail(&gh->gh_list, &gl->gl_holders);
1017 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) 1020 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
@@ -1310,10 +1313,12 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1310 1313
1311 gfs2_glock_hold(gl); 1314 gfs2_glock_hold(gl);
1312 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; 1315 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1313 if (time_before(now, holdtime)) 1316 if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
1314 delay = holdtime - now; 1317 if (time_before(now, holdtime))
1315 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 1318 delay = holdtime - now;
1316 delay = gl->gl_ops->go_min_hold_time; 1319 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1320 delay = gl->gl_ops->go_min_hold_time;
1321 }
1317 1322
1318 spin_lock(&gl->gl_spin); 1323 spin_lock(&gl->gl_spin);
1319 handle_callback(gl, state, delay); 1324 handle_callback(gl, state, delay);
@@ -1512,7 +1517,7 @@ static void clear_glock(struct gfs2_glock *gl)
1512 spin_unlock(&lru_lock); 1517 spin_unlock(&lru_lock);
1513 1518
1514 spin_lock(&gl->gl_spin); 1519 spin_lock(&gl->gl_spin);
1515 if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED) 1520 if (gl->gl_state != LM_ST_UNLOCKED)
1516 handle_callback(gl, LM_ST_UNLOCKED, 0); 1521 handle_callback(gl, LM_ST_UNLOCKED, 0);
1517 spin_unlock(&gl->gl_spin); 1522 spin_unlock(&gl->gl_spin);
1518 gfs2_glock_hold(gl); 1523 gfs2_glock_hold(gl);
@@ -1660,6 +1665,8 @@ static const char *gflags2str(char *buf, const unsigned long *gflags)
1660 *p++ = 'I'; 1665 *p++ = 'I';
1661 if (test_bit(GLF_FROZEN, gflags)) 1666 if (test_bit(GLF_FROZEN, gflags))
1662 *p++ = 'F'; 1667 *p++ = 'F';
1668 if (test_bit(GLF_QUEUED, gflags))
1669 *p++ = 'q';
1663 *p = 0; 1670 *p = 0;
1664 return buf; 1671 return buf;
1665} 1672}
@@ -1776,10 +1783,12 @@ int __init gfs2_glock_init(void)
1776 } 1783 }
1777#endif 1784#endif
1778 1785
1779 glock_workqueue = create_workqueue("glock_workqueue"); 1786 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_RESCUER |
1787 WQ_HIGHPRI | WQ_FREEZEABLE, 0);
1780 if (IS_ERR(glock_workqueue)) 1788 if (IS_ERR(glock_workqueue))
1781 return PTR_ERR(glock_workqueue); 1789 return PTR_ERR(glock_workqueue);
1782 gfs2_delete_workqueue = create_workqueue("delete_workqueue"); 1790 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", WQ_RESCUER |
1791 WQ_FREEZEABLE, 0);
1783 if (IS_ERR(gfs2_delete_workqueue)) { 1792 if (IS_ERR(gfs2_delete_workqueue)) {
1784 destroy_workqueue(glock_workqueue); 1793 destroy_workqueue(glock_workqueue);
1785 return PTR_ERR(gfs2_delete_workqueue); 1794 return PTR_ERR(gfs2_delete_workqueue);
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 2bda1911b156..db1c26d6d220 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -215,7 +215,7 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
215void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...); 215void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
216 216
217/** 217/**
218 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock 218 * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock
219 * @gl: the glock 219 * @gl: the glock
220 * @state: the state we're requesting 220 * @state: the state we're requesting
221 * @flags: the modifier flags 221 * @flags: the modifier flags
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 49f97d3bb690..0d149dcc04e5 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -262,13 +262,12 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
262 const struct gfs2_inode *ip = gl->gl_object; 262 const struct gfs2_inode *ip = gl->gl_object;
263 if (ip == NULL) 263 if (ip == NULL)
264 return 0; 264 return 0;
265 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu/%llu\n", 265 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
266 (unsigned long long)ip->i_no_formal_ino, 266 (unsigned long long)ip->i_no_formal_ino,
267 (unsigned long long)ip->i_no_addr, 267 (unsigned long long)ip->i_no_addr,
268 IF2DT(ip->i_inode.i_mode), ip->i_flags, 268 IF2DT(ip->i_inode.i_mode), ip->i_flags,
269 (unsigned int)ip->i_diskflags, 269 (unsigned int)ip->i_diskflags,
270 (unsigned long long)ip->i_inode.i_size, 270 (unsigned long long)i_size_read(&ip->i_inode));
271 (unsigned long long)ip->i_disksize);
272 return 0; 271 return 0;
273} 272}
274 273
@@ -453,7 +452,6 @@ const struct gfs2_glock_operations *gfs2_glops_list[] = {
453 [LM_TYPE_META] = &gfs2_meta_glops, 452 [LM_TYPE_META] = &gfs2_meta_glops,
454 [LM_TYPE_INODE] = &gfs2_inode_glops, 453 [LM_TYPE_INODE] = &gfs2_inode_glops,
455 [LM_TYPE_RGRP] = &gfs2_rgrp_glops, 454 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
456 [LM_TYPE_NONDISK] = &gfs2_trans_glops,
457 [LM_TYPE_IOPEN] = &gfs2_iopen_glops, 455 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
458 [LM_TYPE_FLOCK] = &gfs2_flock_glops, 456 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
459 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops, 457 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index fdbf4b366fa5..764fbb49efc8 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -196,6 +196,7 @@ enum {
196 GLF_REPLY_PENDING = 9, 196 GLF_REPLY_PENDING = 9,
197 GLF_INITIAL = 10, 197 GLF_INITIAL = 10,
198 GLF_FROZEN = 11, 198 GLF_FROZEN = 11,
199 GLF_QUEUED = 12,
199}; 200};
200 201
201struct gfs2_glock { 202struct gfs2_glock {
@@ -267,7 +268,6 @@ struct gfs2_inode {
267 u64 i_no_formal_ino; 268 u64 i_no_formal_ino;
268 u64 i_generation; 269 u64 i_generation;
269 u64 i_eattr; 270 u64 i_eattr;
270 loff_t i_disksize;
271 unsigned long i_flags; /* GIF_... */ 271 unsigned long i_flags; /* GIF_... */
272 struct gfs2_glock *i_gl; /* Move into i_gh? */ 272 struct gfs2_glock *i_gl; /* Move into i_gh? */
273 struct gfs2_holder i_iopen_gh; 273 struct gfs2_holder i_iopen_gh;
@@ -416,11 +416,8 @@ struct gfs2_args {
416 char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */ 416 char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */
417 char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */ 417 char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */
418 unsigned int ar_spectator:1; /* Don't get a journal */ 418 unsigned int ar_spectator:1; /* Don't get a journal */
419 unsigned int ar_ignore_local_fs:1; /* Ignore optimisations */
420 unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */ 419 unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */
421 unsigned int ar_localcaching:1; /* Local caching */
422 unsigned int ar_debug:1; /* Oops on errors */ 420 unsigned int ar_debug:1; /* Oops on errors */
423 unsigned int ar_upgrade:1; /* Upgrade ondisk format */
424 unsigned int ar_posix_acl:1; /* Enable posix acls */ 421 unsigned int ar_posix_acl:1; /* Enable posix acls */
425 unsigned int ar_quota:2; /* off/account/on */ 422 unsigned int ar_quota:2; /* off/account/on */
426 unsigned int ar_suiddir:1; /* suiddir support */ 423 unsigned int ar_suiddir:1; /* suiddir support */
@@ -497,7 +494,7 @@ struct gfs2_sb_host {
497 */ 494 */
498 495
499struct lm_lockstruct { 496struct lm_lockstruct {
500 unsigned int ls_jid; 497 int ls_jid;
501 unsigned int ls_first; 498 unsigned int ls_first;
502 unsigned int ls_first_done; 499 unsigned int ls_first_done;
503 unsigned int ls_nodir; 500 unsigned int ls_nodir;
@@ -572,6 +569,7 @@ struct gfs2_sbd {
572 struct list_head sd_rindex_mru_list; 569 struct list_head sd_rindex_mru_list;
573 struct gfs2_rgrpd *sd_rindex_forward; 570 struct gfs2_rgrpd *sd_rindex_forward;
574 unsigned int sd_rgrps; 571 unsigned int sd_rgrps;
572 unsigned int sd_max_rg_data;
575 573
576 /* Journal index stuff */ 574 /* Journal index stuff */
577 575
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 08140f185a37..06370f8bd8cf 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -359,8 +359,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
359 * to do that. 359 * to do that.
360 */ 360 */
361 ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink); 361 ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
362 ip->i_disksize = be64_to_cpu(str->di_size); 362 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
363 i_size_write(&ip->i_inode, ip->i_disksize);
364 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); 363 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
365 atime.tv_sec = be64_to_cpu(str->di_atime); 364 atime.tv_sec = be64_to_cpu(str->di_atime);
366 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); 365 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
@@ -1055,7 +1054,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
1055 str->di_uid = cpu_to_be32(ip->i_inode.i_uid); 1054 str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
1056 str->di_gid = cpu_to_be32(ip->i_inode.i_gid); 1055 str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
1057 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink); 1056 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
1058 str->di_size = cpu_to_be64(ip->i_disksize); 1057 str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
1059 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); 1058 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
1060 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec); 1059 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
1061 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec); 1060 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
@@ -1085,8 +1084,8 @@ void gfs2_dinode_print(const struct gfs2_inode *ip)
1085 (unsigned long long)ip->i_no_formal_ino); 1084 (unsigned long long)ip->i_no_formal_ino);
1086 printk(KERN_INFO " no_addr = %llu\n", 1085 printk(KERN_INFO " no_addr = %llu\n",
1087 (unsigned long long)ip->i_no_addr); 1086 (unsigned long long)ip->i_no_addr);
1088 printk(KERN_INFO " i_disksize = %llu\n", 1087 printk(KERN_INFO " i_size = %llu\n",
1089 (unsigned long long)ip->i_disksize); 1088 (unsigned long long)i_size_read(&ip->i_inode));
1090 printk(KERN_INFO " blocks = %llu\n", 1089 printk(KERN_INFO " blocks = %llu\n",
1091 (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode)); 1090 (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode));
1092 printk(KERN_INFO " i_goal = %llu\n", 1091 printk(KERN_INFO " i_goal = %llu\n",
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 300ada3f21de..6720d7d5fbc6 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -19,6 +19,8 @@ extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask);
19extern int gfs2_internal_read(struct gfs2_inode *ip, 19extern int gfs2_internal_read(struct gfs2_inode *ip,
20 struct file_ra_state *ra_state, 20 struct file_ra_state *ra_state,
21 char *buf, loff_t *pos, unsigned size); 21 char *buf, loff_t *pos, unsigned size);
22extern void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
23 unsigned int from, unsigned int to);
22extern void gfs2_set_aops(struct inode *inode); 24extern void gfs2_set_aops(struct inode *inode);
23 25
24static inline int gfs2_is_stuffed(const struct gfs2_inode *ip) 26static inline int gfs2_is_stuffed(const struct gfs2_inode *ip)
@@ -80,6 +82,19 @@ static inline void gfs2_inum_out(const struct gfs2_inode *ip,
80 dent->de_inum.no_addr = cpu_to_be64(ip->i_no_addr); 82 dent->de_inum.no_addr = cpu_to_be64(ip->i_no_addr);
81} 83}
82 84
85static inline int gfs2_check_internal_file_size(struct inode *inode,
86 u64 minsize, u64 maxsize)
87{
88 u64 size = i_size_read(inode);
89 if (size < minsize || size > maxsize)
90 goto err;
91 if (size & ((1 << inode->i_blkbits) - 1))
92 goto err;
93 return 0;
94err:
95 gfs2_consist_inode(GFS2_I(inode));
96 return -EIO;
97}
83 98
84extern void gfs2_set_iop(struct inode *inode); 99extern void gfs2_set_iop(struct inode *inode);
85extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, 100extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 0e0470ed34c2..1c09425b45fd 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -42,9 +42,9 @@ static void gdlm_ast(void *arg)
42 ret |= LM_OUT_CANCELED; 42 ret |= LM_OUT_CANCELED;
43 goto out; 43 goto out;
44 case -EAGAIN: /* Try lock fails */ 44 case -EAGAIN: /* Try lock fails */
45 case -EDEADLK: /* Deadlock detected */
45 goto out; 46 goto out;
46 case -EINVAL: /* Invalid */ 47 case -ETIMEDOUT: /* Canceled due to timeout */
47 case -ENOMEM: /* Out of memory */
48 ret |= LM_OUT_ERROR; 48 ret |= LM_OUT_ERROR;
49 goto out; 49 goto out;
50 case 0: /* Success */ 50 case 0: /* Success */
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index b1e9630eb46a..d7eb1e209aa8 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -24,6 +24,7 @@
24#include "glock.h" 24#include "glock.h"
25#include "quota.h" 25#include "quota.h"
26#include "recovery.h" 26#include "recovery.h"
27#include "dir.h"
27 28
28static struct shrinker qd_shrinker = { 29static struct shrinker qd_shrinker = {
29 .shrink = gfs2_shrink_qd_memory, 30 .shrink = gfs2_shrink_qd_memory,
@@ -78,6 +79,9 @@ static int __init init_gfs2_fs(void)
78{ 79{
79 int error; 80 int error;
80 81
82 gfs2_str2qstr(&gfs2_qdot, ".");
83 gfs2_str2qstr(&gfs2_qdotdot, "..");
84
81 error = gfs2_sys_init(); 85 error = gfs2_sys_init();
82 if (error) 86 if (error)
83 return error; 87 return error;
@@ -140,7 +144,7 @@ static int __init init_gfs2_fs(void)
140 144
141 error = -ENOMEM; 145 error = -ENOMEM;
142 gfs_recovery_wq = alloc_workqueue("gfs_recovery", 146 gfs_recovery_wq = alloc_workqueue("gfs_recovery",
143 WQ_NON_REENTRANT | WQ_RESCUER, 0); 147 WQ_RESCUER | WQ_FREEZEABLE, 0);
144 if (!gfs_recovery_wq) 148 if (!gfs_recovery_wq)
145 goto fail_wq; 149 goto fail_wq;
146 150
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 4d4b1e8ac64c..aeafc233dc89 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -38,14 +38,6 @@
38#define DO 0 38#define DO 0
39#define UNDO 1 39#define UNDO 1
40 40
41static const u32 gfs2_old_fs_formats[] = {
42 0
43};
44
45static const u32 gfs2_old_multihost_formats[] = {
46 0
47};
48
49/** 41/**
50 * gfs2_tune_init - Fill a gfs2_tune structure with default values 42 * gfs2_tune_init - Fill a gfs2_tune structure with default values
51 * @gt: tune 43 * @gt: tune
@@ -135,8 +127,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
135 127
136static int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent) 128static int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent)
137{ 129{
138 unsigned int x;
139
140 if (sb->sb_magic != GFS2_MAGIC || 130 if (sb->sb_magic != GFS2_MAGIC ||
141 sb->sb_type != GFS2_METATYPE_SB) { 131 sb->sb_type != GFS2_METATYPE_SB) {
142 if (!silent) 132 if (!silent)
@@ -150,55 +140,9 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int sile
150 sb->sb_multihost_format == GFS2_FORMAT_MULTI) 140 sb->sb_multihost_format == GFS2_FORMAT_MULTI)
151 return 0; 141 return 0;
152 142
153 if (sb->sb_fs_format != GFS2_FORMAT_FS) { 143 fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
154 for (x = 0; gfs2_old_fs_formats[x]; x++)
155 if (gfs2_old_fs_formats[x] == sb->sb_fs_format)
156 break;
157 144
158 if (!gfs2_old_fs_formats[x]) { 145 return -EINVAL;
159 printk(KERN_WARNING
160 "GFS2: code version (%u, %u) is incompatible "
161 "with ondisk format (%u, %u)\n",
162 GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
163 sb->sb_fs_format, sb->sb_multihost_format);
164 printk(KERN_WARNING
165 "GFS2: I don't know how to upgrade this FS\n");
166 return -EINVAL;
167 }
168 }
169
170 if (sb->sb_multihost_format != GFS2_FORMAT_MULTI) {
171 for (x = 0; gfs2_old_multihost_formats[x]; x++)
172 if (gfs2_old_multihost_formats[x] ==
173 sb->sb_multihost_format)
174 break;
175
176 if (!gfs2_old_multihost_formats[x]) {
177 printk(KERN_WARNING
178 "GFS2: code version (%u, %u) is incompatible "
179 "with ondisk format (%u, %u)\n",
180 GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
181 sb->sb_fs_format, sb->sb_multihost_format);
182 printk(KERN_WARNING
183 "GFS2: I don't know how to upgrade this FS\n");
184 return -EINVAL;
185 }
186 }
187
188 if (!sdp->sd_args.ar_upgrade) {
189 printk(KERN_WARNING
190 "GFS2: code version (%u, %u) is incompatible "
191 "with ondisk format (%u, %u)\n",
192 GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
193 sb->sb_fs_format, sb->sb_multihost_format);
194 printk(KERN_INFO
195 "GFS2: Use the \"upgrade\" mount option to upgrade "
196 "the FS\n");
197 printk(KERN_INFO "GFS2: See the manual for more details\n");
198 return -EINVAL;
199 }
200
201 return 0;
202} 146}
203 147
204static void end_bio_io_page(struct bio *bio, int error) 148static void end_bio_io_page(struct bio *bio, int error)
@@ -586,7 +530,7 @@ static int map_journal_extents(struct gfs2_sbd *sdp)
586 530
587 prev_db = 0; 531 prev_db = 0;
588 532
589 for (lb = 0; lb < ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; lb++) { 533 for (lb = 0; lb < i_size_read(jd->jd_inode) >> sdp->sd_sb.sb_bsize_shift; lb++) {
590 bh.b_state = 0; 534 bh.b_state = 0;
591 bh.b_blocknr = 0; 535 bh.b_blocknr = 0;
592 bh.b_size = 1 << ip->i_inode.i_blkbits; 536 bh.b_size = 1 << ip->i_inode.i_blkbits;
@@ -1022,7 +966,6 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
1022 if (!strcmp("lock_nolock", proto)) { 966 if (!strcmp("lock_nolock", proto)) {
1023 lm = &nolock_ops; 967 lm = &nolock_ops;
1024 sdp->sd_args.ar_localflocks = 1; 968 sdp->sd_args.ar_localflocks = 1;
1025 sdp->sd_args.ar_localcaching = 1;
1026#ifdef CONFIG_GFS2_FS_LOCKING_DLM 969#ifdef CONFIG_GFS2_FS_LOCKING_DLM
1027 } else if (!strcmp("lock_dlm", proto)) { 970 } else if (!strcmp("lock_dlm", proto)) {
1028 lm = &gfs2_dlm_ops; 971 lm = &gfs2_dlm_ops;
@@ -1113,8 +1056,6 @@ static int gfs2_journalid_wait(void *word)
1113 1056
1114static int wait_on_journal(struct gfs2_sbd *sdp) 1057static int wait_on_journal(struct gfs2_sbd *sdp)
1115{ 1058{
1116 if (sdp->sd_args.ar_spectator)
1117 return 0;
1118 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) 1059 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
1119 return 0; 1060 return 0;
1120 1061
@@ -1217,6 +1158,20 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
1217 if (error) 1158 if (error)
1218 goto fail_sb; 1159 goto fail_sb;
1219 1160
1161 /*
1162 * If user space has failed to join the cluster or some similar
1163 * failure has occurred, then the journal id will contain a
1164 * negative (error) number. This will then be returned to the
1165 * caller (of the mount syscall). We do this even for spectator
1166 * mounts (which just write a jid of 0 to indicate "ok" even though
1167 * the jid is unused in the spectator case)
1168 */
1169 if (sdp->sd_lockstruct.ls_jid < 0) {
1170 error = sdp->sd_lockstruct.ls_jid;
1171 sdp->sd_lockstruct.ls_jid = 0;
1172 goto fail_sb;
1173 }
1174
1220 error = init_inodes(sdp, DO); 1175 error = init_inodes(sdp, DO);
1221 if (error) 1176 if (error)
1222 goto fail_sb; 1177 goto fail_sb;
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 1009be2c9737..0534510200d5 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -18,6 +18,8 @@
18#include <linux/gfs2_ondisk.h> 18#include <linux/gfs2_ondisk.h>
19#include <linux/crc32.h> 19#include <linux/crc32.h>
20#include <linux/fiemap.h> 20#include <linux/fiemap.h>
21#include <linux/swap.h>
22#include <linux/falloc.h>
21#include <asm/uaccess.h> 23#include <asm/uaccess.h>
22 24
23#include "gfs2.h" 25#include "gfs2.h"
@@ -217,7 +219,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
217 goto out_gunlock_q; 219 goto out_gunlock_q;
218 220
219 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + 221 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
220 al->al_rgd->rd_length + 222 gfs2_rg_blocks(al) +
221 2 * RES_DINODE + RES_STATFS + 223 2 * RES_DINODE + RES_STATFS +
222 RES_QUOTA, 0); 224 RES_QUOTA, 0);
223 if (error) 225 if (error)
@@ -406,7 +408,6 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
406 408
407 ip = ghs[1].gh_gl->gl_object; 409 ip = ghs[1].gh_gl->gl_object;
408 410
409 ip->i_disksize = size;
410 i_size_write(inode, size); 411 i_size_write(inode, size);
411 412
412 error = gfs2_meta_inode_buffer(ip, &dibh); 413 error = gfs2_meta_inode_buffer(ip, &dibh);
@@ -461,7 +462,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
461 ip = ghs[1].gh_gl->gl_object; 462 ip = ghs[1].gh_gl->gl_object;
462 463
463 ip->i_inode.i_nlink = 2; 464 ip->i_inode.i_nlink = 2;
464 ip->i_disksize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); 465 i_size_write(inode, sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode));
465 ip->i_diskflags |= GFS2_DIF_JDATA; 466 ip->i_diskflags |= GFS2_DIF_JDATA;
466 ip->i_entries = 2; 467 ip->i_entries = 2;
467 468
@@ -470,18 +471,15 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
470 if (!gfs2_assert_withdraw(sdp, !error)) { 471 if (!gfs2_assert_withdraw(sdp, !error)) {
471 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data; 472 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
472 struct gfs2_dirent *dent = (struct gfs2_dirent *)(di+1); 473 struct gfs2_dirent *dent = (struct gfs2_dirent *)(di+1);
473 struct qstr str;
474 474
475 gfs2_str2qstr(&str, ".");
476 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 475 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
477 gfs2_qstr2dirent(&str, GFS2_DIRENT_SIZE(str.len), dent); 476 gfs2_qstr2dirent(&gfs2_qdot, GFS2_DIRENT_SIZE(gfs2_qdot.len), dent);
478 dent->de_inum = di->di_num; /* already GFS2 endian */ 477 dent->de_inum = di->di_num; /* already GFS2 endian */
479 dent->de_type = cpu_to_be16(DT_DIR); 478 dent->de_type = cpu_to_be16(DT_DIR);
480 di->di_entries = cpu_to_be32(1); 479 di->di_entries = cpu_to_be32(1);
481 480
482 gfs2_str2qstr(&str, "..");
483 dent = (struct gfs2_dirent *)((char*)dent + GFS2_DIRENT_SIZE(1)); 481 dent = (struct gfs2_dirent *)((char*)dent + GFS2_DIRENT_SIZE(1));
484 gfs2_qstr2dirent(&str, dibh->b_size - GFS2_DIRENT_SIZE(1) - sizeof(struct gfs2_dinode), dent); 482 gfs2_qstr2dirent(&gfs2_qdotdot, dibh->b_size - GFS2_DIRENT_SIZE(1) - sizeof(struct gfs2_dinode), dent);
485 483
486 gfs2_inum_out(dip, dent); 484 gfs2_inum_out(dip, dent);
487 dent->de_type = cpu_to_be16(DT_DIR); 485 dent->de_type = cpu_to_be16(DT_DIR);
@@ -522,7 +520,6 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
522static int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name, 520static int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
523 struct gfs2_inode *ip) 521 struct gfs2_inode *ip)
524{ 522{
525 struct qstr dotname;
526 int error; 523 int error;
527 524
528 if (ip->i_entries != 2) { 525 if (ip->i_entries != 2) {
@@ -539,13 +536,11 @@ static int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
539 if (error) 536 if (error)
540 return error; 537 return error;
541 538
542 gfs2_str2qstr(&dotname, "."); 539 error = gfs2_dir_del(ip, &gfs2_qdot);
543 error = gfs2_dir_del(ip, &dotname);
544 if (error) 540 if (error)
545 return error; 541 return error;
546 542
547 gfs2_str2qstr(&dotname, ".."); 543 error = gfs2_dir_del(ip, &gfs2_qdotdot);
548 error = gfs2_dir_del(ip, &dotname);
549 if (error) 544 if (error)
550 return error; 545 return error;
551 546
@@ -694,11 +689,8 @@ static int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
694 struct inode *dir = &to->i_inode; 689 struct inode *dir = &to->i_inode;
695 struct super_block *sb = dir->i_sb; 690 struct super_block *sb = dir->i_sb;
696 struct inode *tmp; 691 struct inode *tmp;
697 struct qstr dotdot;
698 int error = 0; 692 int error = 0;
699 693
700 gfs2_str2qstr(&dotdot, "..");
701
702 igrab(dir); 694 igrab(dir);
703 695
704 for (;;) { 696 for (;;) {
@@ -711,7 +703,7 @@ static int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
711 break; 703 break;
712 } 704 }
713 705
714 tmp = gfs2_lookupi(dir, &dotdot, 1); 706 tmp = gfs2_lookupi(dir, &gfs2_qdotdot, 1);
715 if (IS_ERR(tmp)) { 707 if (IS_ERR(tmp)) {
716 error = PTR_ERR(tmp); 708 error = PTR_ERR(tmp);
717 break; 709 break;
@@ -744,7 +736,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
744 struct gfs2_inode *ip = GFS2_I(odentry->d_inode); 736 struct gfs2_inode *ip = GFS2_I(odentry->d_inode);
745 struct gfs2_inode *nip = NULL; 737 struct gfs2_inode *nip = NULL;
746 struct gfs2_sbd *sdp = GFS2_SB(odir); 738 struct gfs2_sbd *sdp = GFS2_SB(odir);
747 struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, }; 739 struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, }, ri_gh;
748 struct gfs2_rgrpd *nrgd; 740 struct gfs2_rgrpd *nrgd;
749 unsigned int num_gh; 741 unsigned int num_gh;
750 int dir_rename = 0; 742 int dir_rename = 0;
@@ -758,6 +750,9 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
758 return 0; 750 return 0;
759 } 751 }
760 752
753 error = gfs2_rindex_hold(sdp, &ri_gh);
754 if (error)
755 return error;
761 756
762 if (odip != ndip) { 757 if (odip != ndip) {
763 error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE, 758 error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE,
@@ -887,12 +882,12 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
887 882
888 al->al_requested = sdp->sd_max_dirres; 883 al->al_requested = sdp->sd_max_dirres;
889 884
890 error = gfs2_inplace_reserve(ndip); 885 error = gfs2_inplace_reserve_ri(ndip);
891 if (error) 886 if (error)
892 goto out_gunlock_q; 887 goto out_gunlock_q;
893 888
894 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + 889 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
895 al->al_rgd->rd_length + 890 gfs2_rg_blocks(al) +
896 4 * RES_DINODE + 4 * RES_LEAF + 891 4 * RES_DINODE + 4 * RES_LEAF +
897 RES_STATFS + RES_QUOTA + 4, 0); 892 RES_STATFS + RES_QUOTA + 4, 0);
898 if (error) 893 if (error)
@@ -920,9 +915,6 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
920 } 915 }
921 916
922 if (dir_rename) { 917 if (dir_rename) {
923 struct qstr name;
924 gfs2_str2qstr(&name, "..");
925
926 error = gfs2_change_nlink(ndip, +1); 918 error = gfs2_change_nlink(ndip, +1);
927 if (error) 919 if (error)
928 goto out_end_trans; 920 goto out_end_trans;
@@ -930,7 +922,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
930 if (error) 922 if (error)
931 goto out_end_trans; 923 goto out_end_trans;
932 924
933 error = gfs2_dir_mvino(ip, &name, ndip, DT_DIR); 925 error = gfs2_dir_mvino(ip, &gfs2_qdotdot, ndip, DT_DIR);
934 if (error) 926 if (error)
935 goto out_end_trans; 927 goto out_end_trans;
936 } else { 928 } else {
@@ -972,6 +964,7 @@ out_gunlock_r:
972 if (r_gh.gh_gl) 964 if (r_gh.gh_gl)
973 gfs2_glock_dq_uninit(&r_gh); 965 gfs2_glock_dq_uninit(&r_gh);
974out: 966out:
967 gfs2_glock_dq_uninit(&ri_gh);
975 return error; 968 return error;
976} 969}
977 970
@@ -990,7 +983,7 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
990 struct gfs2_inode *ip = GFS2_I(dentry->d_inode); 983 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
991 struct gfs2_holder i_gh; 984 struct gfs2_holder i_gh;
992 struct buffer_head *dibh; 985 struct buffer_head *dibh;
993 unsigned int x; 986 unsigned int x, size;
994 char *buf; 987 char *buf;
995 int error; 988 int error;
996 989
@@ -1002,7 +995,8 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
1002 return NULL; 995 return NULL;
1003 } 996 }
1004 997
1005 if (!ip->i_disksize) { 998 size = (unsigned int)i_size_read(&ip->i_inode);
999 if (size == 0) {
1006 gfs2_consist_inode(ip); 1000 gfs2_consist_inode(ip);
1007 buf = ERR_PTR(-EIO); 1001 buf = ERR_PTR(-EIO);
1008 goto out; 1002 goto out;
@@ -1014,7 +1008,7 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
1014 goto out; 1008 goto out;
1015 } 1009 }
1016 1010
1017 x = ip->i_disksize + 1; 1011 x = size + 1;
1018 buf = kmalloc(x, GFP_NOFS); 1012 buf = kmalloc(x, GFP_NOFS);
1019 if (!buf) 1013 if (!buf)
1020 buf = ERR_PTR(-ENOMEM); 1014 buf = ERR_PTR(-ENOMEM);
@@ -1071,30 +1065,6 @@ int gfs2_permission(struct inode *inode, int mask)
1071 return error; 1065 return error;
1072} 1066}
1073 1067
1074/*
1075 * XXX(truncate): the truncate_setsize calls should be moved to the end.
1076 */
1077static int setattr_size(struct inode *inode, struct iattr *attr)
1078{
1079 struct gfs2_inode *ip = GFS2_I(inode);
1080 struct gfs2_sbd *sdp = GFS2_SB(inode);
1081 int error;
1082
1083 if (attr->ia_size != ip->i_disksize) {
1084 error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1085 if (error)
1086 return error;
1087 truncate_setsize(inode, attr->ia_size);
1088 gfs2_trans_end(sdp);
1089 }
1090
1091 error = gfs2_truncatei(ip, attr->ia_size);
1092 if (error && (inode->i_size != ip->i_disksize))
1093 i_size_write(inode, ip->i_disksize);
1094
1095 return error;
1096}
1097
1098static int setattr_chown(struct inode *inode, struct iattr *attr) 1068static int setattr_chown(struct inode *inode, struct iattr *attr)
1099{ 1069{
1100 struct gfs2_inode *ip = GFS2_I(inode); 1070 struct gfs2_inode *ip = GFS2_I(inode);
@@ -1195,7 +1165,7 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr)
1195 goto out; 1165 goto out;
1196 1166
1197 if (attr->ia_valid & ATTR_SIZE) 1167 if (attr->ia_valid & ATTR_SIZE)
1198 error = setattr_size(inode, attr); 1168 error = gfs2_setattr_size(inode, attr->ia_size);
1199 else if (attr->ia_valid & (ATTR_UID | ATTR_GID)) 1169 else if (attr->ia_valid & (ATTR_UID | ATTR_GID))
1200 error = setattr_chown(inode, attr); 1170 error = setattr_chown(inode, attr);
1201 else if ((attr->ia_valid & ATTR_MODE) && IS_POSIXACL(inode)) 1171 else if ((attr->ia_valid & ATTR_MODE) && IS_POSIXACL(inode))
@@ -1301,6 +1271,257 @@ static int gfs2_removexattr(struct dentry *dentry, const char *name)
1301 return ret; 1271 return ret;
1302} 1272}
1303 1273
1274static void empty_write_end(struct page *page, unsigned from,
1275 unsigned to)
1276{
1277 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
1278
1279 page_zero_new_buffers(page, from, to);
1280 flush_dcache_page(page);
1281 mark_page_accessed(page);
1282
1283 if (!gfs2_is_writeback(ip))
1284 gfs2_page_add_databufs(ip, page, from, to);
1285
1286 block_commit_write(page, from, to);
1287}
1288
1289
1290static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
1291{
1292 unsigned start, end, next;
1293 struct buffer_head *bh, *head;
1294 int error;
1295
1296 if (!page_has_buffers(page)) {
1297 error = block_prepare_write(page, from, to, gfs2_block_map);
1298 if (unlikely(error))
1299 return error;
1300
1301 empty_write_end(page, from, to);
1302 return 0;
1303 }
1304
1305 bh = head = page_buffers(page);
1306 next = end = 0;
1307 while (next < from) {
1308 next += bh->b_size;
1309 bh = bh->b_this_page;
1310 }
1311 start = next;
1312 do {
1313 next += bh->b_size;
1314 if (buffer_mapped(bh)) {
1315 if (end) {
1316 error = block_prepare_write(page, start, end,
1317 gfs2_block_map);
1318 if (unlikely(error))
1319 return error;
1320 empty_write_end(page, start, end);
1321 end = 0;
1322 }
1323 start = next;
1324 }
1325 else
1326 end = next;
1327 bh = bh->b_this_page;
1328 } while (next < to);
1329
1330 if (end) {
1331 error = block_prepare_write(page, start, end, gfs2_block_map);
1332 if (unlikely(error))
1333 return error;
1334 empty_write_end(page, start, end);
1335 }
1336
1337 return 0;
1338}
1339
1340static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
1341 int mode)
1342{
1343 struct gfs2_inode *ip = GFS2_I(inode);
1344 struct buffer_head *dibh;
1345 int error;
1346 u64 start = offset >> PAGE_CACHE_SHIFT;
1347 unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
1348 u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
1349 pgoff_t curr;
1350 struct page *page;
1351 unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
1352 unsigned int from, to;
1353
1354 if (!end_offset)
1355 end_offset = PAGE_CACHE_SIZE;
1356
1357 error = gfs2_meta_inode_buffer(ip, &dibh);
1358 if (unlikely(error))
1359 goto out;
1360
1361 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1362
1363 if (gfs2_is_stuffed(ip)) {
1364 error = gfs2_unstuff_dinode(ip, NULL);
1365 if (unlikely(error))
1366 goto out;
1367 }
1368
1369 curr = start;
1370 offset = start << PAGE_CACHE_SHIFT;
1371 from = start_offset;
1372 to = PAGE_CACHE_SIZE;
1373 while (curr <= end) {
1374 page = grab_cache_page_write_begin(inode->i_mapping, curr,
1375 AOP_FLAG_NOFS);
1376 if (unlikely(!page)) {
1377 error = -ENOMEM;
1378 goto out;
1379 }
1380
1381 if (curr == end)
1382 to = end_offset;
1383 error = write_empty_blocks(page, from, to);
1384 if (!error && offset + to > inode->i_size &&
1385 !(mode & FALLOC_FL_KEEP_SIZE)) {
1386 i_size_write(inode, offset + to);
1387 }
1388 unlock_page(page);
1389 page_cache_release(page);
1390 if (error)
1391 goto out;
1392 curr++;
1393 offset += PAGE_CACHE_SIZE;
1394 from = 0;
1395 }
1396
1397 gfs2_dinode_out(ip, dibh->b_data);
1398 mark_inode_dirty(inode);
1399
1400 brelse(dibh);
1401
1402out:
1403 return error;
1404}
1405
1406static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
1407 unsigned int *data_blocks, unsigned int *ind_blocks)
1408{
1409 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1410 unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
1411 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
1412
1413 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
1414 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
1415 max_data -= tmp;
1416 }
1417 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
1418 so it might end up with fewer data blocks */
1419 if (max_data <= *data_blocks)
1420 return;
1421 *data_blocks = max_data;
1422 *ind_blocks = max_blocks - max_data;
1423 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
1424 if (*len > max) {
1425 *len = max;
1426 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
1427 }
1428}
1429
1430static long gfs2_fallocate(struct inode *inode, int mode, loff_t offset,
1431 loff_t len)
1432{
1433 struct gfs2_sbd *sdp = GFS2_SB(inode);
1434 struct gfs2_inode *ip = GFS2_I(inode);
1435 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
1436 loff_t bytes, max_bytes;
1437 struct gfs2_alloc *al;
1438 int error;
1439 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
1440 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
1441
1442 offset = (offset >> sdp->sd_sb.sb_bsize_shift) <<
1443 sdp->sd_sb.sb_bsize_shift;
1444
1445 len = next - offset;
1446 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
1447 if (!bytes)
1448 bytes = UINT_MAX;
1449
1450 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
1451 error = gfs2_glock_nq(&ip->i_gh);
1452 if (unlikely(error))
1453 goto out_uninit;
1454
1455 if (!gfs2_write_alloc_required(ip, offset, len))
1456 goto out_unlock;
1457
1458 while (len > 0) {
1459 if (len < bytes)
1460 bytes = len;
1461 al = gfs2_alloc_get(ip);
1462 if (!al) {
1463 error = -ENOMEM;
1464 goto out_unlock;
1465 }
1466
1467 error = gfs2_quota_lock_check(ip);
1468 if (error)
1469 goto out_alloc_put;
1470
1471retry:
1472 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
1473
1474 al->al_requested = data_blocks + ind_blocks;
1475 error = gfs2_inplace_reserve(ip);
1476 if (error) {
1477 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
1478 bytes >>= 1;
1479 goto retry;
1480 }
1481 goto out_qunlock;
1482 }
1483 max_bytes = bytes;
1484 calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
1485 al->al_requested = data_blocks + ind_blocks;
1486
1487 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
1488 RES_RG_HDR + gfs2_rg_blocks(al);
1489 if (gfs2_is_jdata(ip))
1490 rblocks += data_blocks ? data_blocks : 1;
1491
1492 error = gfs2_trans_begin(sdp, rblocks,
1493 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
1494 if (error)
1495 goto out_trans_fail;
1496
1497 error = fallocate_chunk(inode, offset, max_bytes, mode);
1498 gfs2_trans_end(sdp);
1499
1500 if (error)
1501 goto out_trans_fail;
1502
1503 len -= max_bytes;
1504 offset += max_bytes;
1505 gfs2_inplace_release(ip);
1506 gfs2_quota_unlock(ip);
1507 gfs2_alloc_put(ip);
1508 }
1509 goto out_unlock;
1510
1511out_trans_fail:
1512 gfs2_inplace_release(ip);
1513out_qunlock:
1514 gfs2_quota_unlock(ip);
1515out_alloc_put:
1516 gfs2_alloc_put(ip);
1517out_unlock:
1518 gfs2_glock_dq(&ip->i_gh);
1519out_uninit:
1520 gfs2_holder_uninit(&ip->i_gh);
1521 return error;
1522}
1523
1524
1304static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1525static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1305 u64 start, u64 len) 1526 u64 start, u64 len)
1306{ 1527{
@@ -1351,6 +1572,7 @@ const struct inode_operations gfs2_file_iops = {
1351 .getxattr = gfs2_getxattr, 1572 .getxattr = gfs2_getxattr,
1352 .listxattr = gfs2_listxattr, 1573 .listxattr = gfs2_listxattr,
1353 .removexattr = gfs2_removexattr, 1574 .removexattr = gfs2_removexattr,
1575 .fallocate = gfs2_fallocate,
1354 .fiemap = gfs2_fiemap, 1576 .fiemap = gfs2_fiemap,
1355}; 1577};
1356 1578
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 1bc6b5695e6d..58a9b9998b42 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -735,10 +735,8 @@ get_a_page:
735 goto out; 735 goto out;
736 736
737 size = loc + sizeof(struct gfs2_quota); 737 size = loc + sizeof(struct gfs2_quota);
738 if (size > inode->i_size) { 738 if (size > inode->i_size)
739 ip->i_disksize = size;
740 i_size_write(inode, size); 739 i_size_write(inode, size);
741 }
742 inode->i_mtime = inode->i_atime = CURRENT_TIME; 740 inode->i_mtime = inode->i_atime = CURRENT_TIME;
743 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 741 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
744 gfs2_dinode_out(ip, dibh->b_data); 742 gfs2_dinode_out(ip, dibh->b_data);
@@ -817,7 +815,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
817 goto out_alloc; 815 goto out_alloc;
818 816
819 if (nalloc) 817 if (nalloc)
820 blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS; 818 blocks += gfs2_rg_blocks(al) + nalloc * ind_blocks + RES_STATFS;
821 819
822 error = gfs2_trans_begin(sdp, blocks, 0); 820 error = gfs2_trans_begin(sdp, blocks, 0);
823 if (error) 821 if (error)
@@ -1190,18 +1188,17 @@ static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *
1190int gfs2_quota_init(struct gfs2_sbd *sdp) 1188int gfs2_quota_init(struct gfs2_sbd *sdp)
1191{ 1189{
1192 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 1190 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1193 unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; 1191 u64 size = i_size_read(sdp->sd_qc_inode);
1192 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1194 unsigned int x, slot = 0; 1193 unsigned int x, slot = 0;
1195 unsigned int found = 0; 1194 unsigned int found = 0;
1196 u64 dblock; 1195 u64 dblock;
1197 u32 extlen = 0; 1196 u32 extlen = 0;
1198 int error; 1197 int error;
1199 1198
1200 if (!ip->i_disksize || ip->i_disksize > (64 << 20) || 1199 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1201 ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) {
1202 gfs2_consist_inode(ip);
1203 return -EIO; 1200 return -EIO;
1204 } 1201
1205 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; 1202 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1206 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE); 1203 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1207 1204
@@ -1589,6 +1586,7 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
1589 error = gfs2_inplace_reserve(ip); 1586 error = gfs2_inplace_reserve(ip);
1590 if (error) 1587 if (error)
1591 goto out_alloc; 1588 goto out_alloc;
1589 blocks += gfs2_rg_blocks(al);
1592 } 1590 }
1593 1591
1594 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0); 1592 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0);
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index f7f89a94a5a4..f2a02edcac8f 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -455,11 +455,13 @@ void gfs2_recover_func(struct work_struct *work)
455 int ro = 0; 455 int ro = 0;
456 unsigned int pass; 456 unsigned int pass;
457 int error; 457 int error;
458 int jlocked = 0;
458 459
459 if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) { 460 if (sdp->sd_args.ar_spectator ||
461 (jd->jd_jid != sdp->sd_lockstruct.ls_jid)) {
460 fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n", 462 fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n",
461 jd->jd_jid); 463 jd->jd_jid);
462 464 jlocked = 1;
463 /* Acquire the journal lock so we can do recovery */ 465 /* Acquire the journal lock so we can do recovery */
464 466
465 error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops, 467 error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops,
@@ -554,13 +556,12 @@ void gfs2_recover_func(struct work_struct *work)
554 jd->jd_jid, t); 556 jd->jd_jid, t);
555 } 557 }
556 558
557 if (jd->jd_jid != sdp->sd_lockstruct.ls_jid)
558 gfs2_glock_dq_uninit(&ji_gh);
559
560 gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS); 559 gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS);
561 560
562 if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) 561 if (jlocked) {
562 gfs2_glock_dq_uninit(&ji_gh);
563 gfs2_glock_dq_uninit(&j_gh); 563 gfs2_glock_dq_uninit(&j_gh);
564 }
564 565
565 fs_info(sdp, "jid=%u: Done\n", jd->jd_jid); 566 fs_info(sdp, "jid=%u: Done\n", jd->jd_jid);
566 goto done; 567 goto done;
@@ -568,7 +569,7 @@ void gfs2_recover_func(struct work_struct *work)
568fail_gunlock_tr: 569fail_gunlock_tr:
569 gfs2_glock_dq_uninit(&t_gh); 570 gfs2_glock_dq_uninit(&t_gh);
570fail_gunlock_ji: 571fail_gunlock_ji:
571 if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) { 572 if (jlocked) {
572 gfs2_glock_dq_uninit(&ji_gh); 573 gfs2_glock_dq_uninit(&ji_gh);
573fail_gunlock_j: 574fail_gunlock_j:
574 gfs2_glock_dq_uninit(&j_gh); 575 gfs2_glock_dq_uninit(&j_gh);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 171a744f8e45..fb67f593f408 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -500,7 +500,7 @@ u64 gfs2_ri_total(struct gfs2_sbd *sdp)
500 for (rgrps = 0;; rgrps++) { 500 for (rgrps = 0;; rgrps++) {
501 loff_t pos = rgrps * sizeof(struct gfs2_rindex); 501 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
502 502
503 if (pos + sizeof(struct gfs2_rindex) >= ip->i_disksize) 503 if (pos + sizeof(struct gfs2_rindex) >= i_size_read(inode))
504 break; 504 break;
505 error = gfs2_internal_read(ip, &ra_state, buf, &pos, 505 error = gfs2_internal_read(ip, &ra_state, buf, &pos,
506 sizeof(struct gfs2_rindex)); 506 sizeof(struct gfs2_rindex));
@@ -588,7 +588,9 @@ static int gfs2_ri_update(struct gfs2_inode *ip)
588 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 588 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
589 struct inode *inode = &ip->i_inode; 589 struct inode *inode = &ip->i_inode;
590 struct file_ra_state ra_state; 590 struct file_ra_state ra_state;
591 u64 rgrp_count = ip->i_disksize; 591 u64 rgrp_count = i_size_read(inode);
592 struct gfs2_rgrpd *rgd;
593 unsigned int max_data = 0;
592 int error; 594 int error;
593 595
594 do_div(rgrp_count, sizeof(struct gfs2_rindex)); 596 do_div(rgrp_count, sizeof(struct gfs2_rindex));
@@ -603,6 +605,10 @@ static int gfs2_ri_update(struct gfs2_inode *ip)
603 } 605 }
604 } 606 }
605 607
608 list_for_each_entry(rgd, &sdp->sd_rindex_list, rd_list)
609 if (rgd->rd_data > max_data)
610 max_data = rgd->rd_data;
611 sdp->sd_max_rg_data = max_data;
606 sdp->sd_rindex_uptodate = 1; 612 sdp->sd_rindex_uptodate = 1;
607 return 0; 613 return 0;
608} 614}
@@ -622,13 +628,15 @@ static int gfs2_ri_update_special(struct gfs2_inode *ip)
622 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 628 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
623 struct inode *inode = &ip->i_inode; 629 struct inode *inode = &ip->i_inode;
624 struct file_ra_state ra_state; 630 struct file_ra_state ra_state;
631 struct gfs2_rgrpd *rgd;
632 unsigned int max_data = 0;
625 int error; 633 int error;
626 634
627 file_ra_state_init(&ra_state, inode->i_mapping); 635 file_ra_state_init(&ra_state, inode->i_mapping);
628 for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) { 636 for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) {
629 /* Ignore partials */ 637 /* Ignore partials */
630 if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) > 638 if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) >
631 ip->i_disksize) 639 i_size_read(inode))
632 break; 640 break;
633 error = read_rindex_entry(ip, &ra_state); 641 error = read_rindex_entry(ip, &ra_state);
634 if (error) { 642 if (error) {
@@ -636,6 +644,10 @@ static int gfs2_ri_update_special(struct gfs2_inode *ip)
636 return error; 644 return error;
637 } 645 }
638 } 646 }
647 list_for_each_entry(rgd, &sdp->sd_rindex_list, rd_list)
648 if (rgd->rd_data > max_data)
649 max_data = rgd->rd_data;
650 sdp->sd_max_rg_data = max_data;
639 651
640 sdp->sd_rindex_uptodate = 1; 652 sdp->sd_rindex_uptodate = 1;
641 return 0; 653 return 0;
@@ -1188,7 +1200,8 @@ out:
1188 * Returns: errno 1200 * Returns: errno
1189 */ 1201 */
1190 1202
1191int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line) 1203int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex,
1204 char *file, unsigned int line)
1192{ 1205{
1193 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1206 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1194 struct gfs2_alloc *al = ip->i_alloc; 1207 struct gfs2_alloc *al = ip->i_alloc;
@@ -1199,12 +1212,15 @@ int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
1199 return -EINVAL; 1212 return -EINVAL;
1200 1213
1201try_again: 1214try_again:
1202 /* We need to hold the rindex unless the inode we're using is 1215 if (hold_rindex) {
1203 the rindex itself, in which case it's already held. */ 1216 /* We need to hold the rindex unless the inode we're using is
1204 if (ip != GFS2_I(sdp->sd_rindex)) 1217 the rindex itself, in which case it's already held. */
1205 error = gfs2_rindex_hold(sdp, &al->al_ri_gh); 1218 if (ip != GFS2_I(sdp->sd_rindex))
1206 else if (!sdp->sd_rgrps) /* We may not have the rindex read in, so: */ 1219 error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
1207 error = gfs2_ri_update_special(ip); 1220 else if (!sdp->sd_rgrps) /* We may not have the rindex read
1221 in, so: */
1222 error = gfs2_ri_update_special(ip);
1223 }
1208 1224
1209 if (error) 1225 if (error)
1210 return error; 1226 return error;
@@ -1215,7 +1231,7 @@ try_again:
1215 try to free it, and try the allocation again. */ 1231 try to free it, and try the allocation again. */
1216 error = get_local_rgrp(ip, &unlinked, &last_unlinked); 1232 error = get_local_rgrp(ip, &unlinked, &last_unlinked);
1217 if (error) { 1233 if (error) {
1218 if (ip != GFS2_I(sdp->sd_rindex)) 1234 if (hold_rindex && ip != GFS2_I(sdp->sd_rindex))
1219 gfs2_glock_dq_uninit(&al->al_ri_gh); 1235 gfs2_glock_dq_uninit(&al->al_ri_gh);
1220 if (error != -EAGAIN) 1236 if (error != -EAGAIN)
1221 return error; 1237 return error;
@@ -1257,7 +1273,7 @@ void gfs2_inplace_release(struct gfs2_inode *ip)
1257 al->al_rgd = NULL; 1273 al->al_rgd = NULL;
1258 if (al->al_rgd_gh.gh_gl) 1274 if (al->al_rgd_gh.gh_gl)
1259 gfs2_glock_dq_uninit(&al->al_rgd_gh); 1275 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1260 if (ip != GFS2_I(sdp->sd_rindex)) 1276 if (ip != GFS2_I(sdp->sd_rindex) && al->al_ri_gh.gh_gl)
1261 gfs2_glock_dq_uninit(&al->al_ri_gh); 1277 gfs2_glock_dq_uninit(&al->al_ri_gh);
1262} 1278}
1263 1279
@@ -1496,11 +1512,19 @@ int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n)
1496 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1512 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1497 struct buffer_head *dibh; 1513 struct buffer_head *dibh;
1498 struct gfs2_alloc *al = ip->i_alloc; 1514 struct gfs2_alloc *al = ip->i_alloc;
1499 struct gfs2_rgrpd *rgd = al->al_rgd; 1515 struct gfs2_rgrpd *rgd;
1500 u32 goal, blk; 1516 u32 goal, blk;
1501 u64 block; 1517 u64 block;
1502 int error; 1518 int error;
1503 1519
1520 /* Only happens if there is a bug in gfs2, return something distinctive
1521 * to ensure that it is noticed.
1522 */
1523 if (al == NULL)
1524 return -ECANCELED;
1525
1526 rgd = al->al_rgd;
1527
1504 if (rgrp_contains_block(rgd, ip->i_goal)) 1528 if (rgrp_contains_block(rgd, ip->i_goal))
1505 goal = ip->i_goal - rgd->rd_data0; 1529 goal = ip->i_goal - rgd->rd_data0;
1506 else 1530 else
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index f07119d89557..0e35c0466f9a 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -39,10 +39,12 @@ static inline void gfs2_alloc_put(struct gfs2_inode *ip)
39 ip->i_alloc = NULL; 39 ip->i_alloc = NULL;
40} 40}
41 41
42extern int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, 42extern int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex,
43 unsigned int line); 43 char *file, unsigned int line);
44#define gfs2_inplace_reserve(ip) \ 44#define gfs2_inplace_reserve(ip) \
45gfs2_inplace_reserve_i((ip), __FILE__, __LINE__) 45 gfs2_inplace_reserve_i((ip), 1, __FILE__, __LINE__)
46#define gfs2_inplace_reserve_ri(ip) \
47 gfs2_inplace_reserve_i((ip), 0, __FILE__, __LINE__)
46 48
47extern void gfs2_inplace_release(struct gfs2_inode *ip); 49extern void gfs2_inplace_release(struct gfs2_inode *ip);
48 50
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 77cb9f830ee4..047d1176096c 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -85,6 +85,7 @@ static const match_table_t tokens = {
85 {Opt_locktable, "locktable=%s"}, 85 {Opt_locktable, "locktable=%s"},
86 {Opt_hostdata, "hostdata=%s"}, 86 {Opt_hostdata, "hostdata=%s"},
87 {Opt_spectator, "spectator"}, 87 {Opt_spectator, "spectator"},
88 {Opt_spectator, "norecovery"},
88 {Opt_ignore_local_fs, "ignore_local_fs"}, 89 {Opt_ignore_local_fs, "ignore_local_fs"},
89 {Opt_localflocks, "localflocks"}, 90 {Opt_localflocks, "localflocks"},
90 {Opt_localcaching, "localcaching"}, 91 {Opt_localcaching, "localcaching"},
@@ -159,13 +160,13 @@ int gfs2_mount_args(struct gfs2_args *args, char *options)
159 args->ar_spectator = 1; 160 args->ar_spectator = 1;
160 break; 161 break;
161 case Opt_ignore_local_fs: 162 case Opt_ignore_local_fs:
162 args->ar_ignore_local_fs = 1; 163 /* Retained for backwards compat only */
163 break; 164 break;
164 case Opt_localflocks: 165 case Opt_localflocks:
165 args->ar_localflocks = 1; 166 args->ar_localflocks = 1;
166 break; 167 break;
167 case Opt_localcaching: 168 case Opt_localcaching:
168 args->ar_localcaching = 1; 169 /* Retained for backwards compat only */
169 break; 170 break;
170 case Opt_debug: 171 case Opt_debug:
171 if (args->ar_errors == GFS2_ERRORS_PANIC) { 172 if (args->ar_errors == GFS2_ERRORS_PANIC) {
@@ -179,7 +180,7 @@ int gfs2_mount_args(struct gfs2_args *args, char *options)
179 args->ar_debug = 0; 180 args->ar_debug = 0;
180 break; 181 break;
181 case Opt_upgrade: 182 case Opt_upgrade:
182 args->ar_upgrade = 1; 183 /* Retained for backwards compat only */
183 break; 184 break;
184 case Opt_acl: 185 case Opt_acl:
185 args->ar_posix_acl = 1; 186 args->ar_posix_acl = 1;
@@ -342,15 +343,14 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd)
342{ 343{
343 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 344 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
344 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 345 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
346 u64 size = i_size_read(jd->jd_inode);
345 347
346 if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) || 348 if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, 1 << 30))
347 (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) {
348 gfs2_consist_inode(ip);
349 return -EIO; 349 return -EIO;
350 }
351 jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
352 350
353 if (gfs2_write_alloc_required(ip, 0, ip->i_disksize)) { 351 jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
352
353 if (gfs2_write_alloc_required(ip, 0, size)) {
354 gfs2_consist_inode(ip); 354 gfs2_consist_inode(ip);
355 return -EIO; 355 return -EIO;
356 } 356 }
@@ -1129,9 +1129,7 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1129 1129
1130 /* Some flags must not be changed */ 1130 /* Some flags must not be changed */
1131 if (args_neq(&args, &sdp->sd_args, spectator) || 1131 if (args_neq(&args, &sdp->sd_args, spectator) ||
1132 args_neq(&args, &sdp->sd_args, ignore_local_fs) ||
1133 args_neq(&args, &sdp->sd_args, localflocks) || 1132 args_neq(&args, &sdp->sd_args, localflocks) ||
1134 args_neq(&args, &sdp->sd_args, localcaching) ||
1135 args_neq(&args, &sdp->sd_args, meta)) 1133 args_neq(&args, &sdp->sd_args, meta))
1136 return -EINVAL; 1134 return -EINVAL;
1137 1135
@@ -1234,16 +1232,10 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1234 seq_printf(s, ",hostdata=%s", args->ar_hostdata); 1232 seq_printf(s, ",hostdata=%s", args->ar_hostdata);
1235 if (args->ar_spectator) 1233 if (args->ar_spectator)
1236 seq_printf(s, ",spectator"); 1234 seq_printf(s, ",spectator");
1237 if (args->ar_ignore_local_fs)
1238 seq_printf(s, ",ignore_local_fs");
1239 if (args->ar_localflocks) 1235 if (args->ar_localflocks)
1240 seq_printf(s, ",localflocks"); 1236 seq_printf(s, ",localflocks");
1241 if (args->ar_localcaching)
1242 seq_printf(s, ",localcaching");
1243 if (args->ar_debug) 1237 if (args->ar_debug)
1244 seq_printf(s, ",debug"); 1238 seq_printf(s, ",debug");
1245 if (args->ar_upgrade)
1246 seq_printf(s, ",upgrade");
1247 if (args->ar_posix_acl) 1239 if (args->ar_posix_acl)
1248 seq_printf(s, ",acl"); 1240 seq_printf(s, ",acl");
1249 if (args->ar_quota != GFS2_QUOTA_DEFAULT) { 1241 if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index ccacffd2faaa..748ccb557c18 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -230,7 +230,10 @@ static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len
230 230
231 if (gltype > LM_TYPE_JOURNAL) 231 if (gltype > LM_TYPE_JOURNAL)
232 return -EINVAL; 232 return -EINVAL;
233 glops = gfs2_glops_list[gltype]; 233 if (gltype == LM_TYPE_NONDISK && glnum == GFS2_TRANS_LOCK)
234 glops = &gfs2_trans_glops;
235 else
236 glops = gfs2_glops_list[gltype];
234 if (glops == NULL) 237 if (glops == NULL)
235 return -EINVAL; 238 return -EINVAL;
236 if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags)) 239 if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags))
@@ -399,31 +402,32 @@ static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf)
399 402
400static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf) 403static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf)
401{ 404{
402 return sprintf(buf, "%u\n", sdp->sd_lockstruct.ls_jid); 405 return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid);
403} 406}
404 407
405static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 408static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
406{ 409{
407 unsigned jid; 410 int jid;
408 int rv; 411 int rv;
409 412
410 rv = sscanf(buf, "%u", &jid); 413 rv = sscanf(buf, "%d", &jid);
411 if (rv != 1) 414 if (rv != 1)
412 return -EINVAL; 415 return -EINVAL;
413 416
414 spin_lock(&sdp->sd_jindex_spin); 417 spin_lock(&sdp->sd_jindex_spin);
415 rv = -EINVAL; 418 rv = -EINVAL;
416 if (sdp->sd_args.ar_spectator)
417 goto out;
418 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) 419 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
419 goto out; 420 goto out;
420 rv = -EBUSY; 421 rv = -EBUSY;
421 if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) 422 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
422 goto out; 423 goto out;
424 rv = 0;
425 if (sdp->sd_args.ar_spectator && jid > 0)
426 rv = jid = -EINVAL;
423 sdp->sd_lockstruct.ls_jid = jid; 427 sdp->sd_lockstruct.ls_jid = jid;
428 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
424 smp_mb__after_clear_bit(); 429 smp_mb__after_clear_bit();
425 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); 430 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
426 rv = 0;
427out: 431out:
428 spin_unlock(&sdp->sd_jindex_spin); 432 spin_unlock(&sdp->sd_jindex_spin);
429 return rv ? rv : len; 433 return rv ? rv : len;
@@ -617,7 +621,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
617 add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); 621 add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
618 add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); 622 add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
619 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) 623 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags))
620 add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid); 624 add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid);
621 if (gfs2_uuid_valid(uuid)) 625 if (gfs2_uuid_valid(uuid))
622 add_uevent_var(env, "UUID=%pUB", uuid); 626 add_uevent_var(env, "UUID=%pUB", uuid);
623 return 0; 627 return 0;
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index 148d55c14171..cedb0bb96d96 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -39,7 +39,8 @@
39 {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \ 39 {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
40 {(1UL << GLF_REPLY_PENDING), "r" }, \ 40 {(1UL << GLF_REPLY_PENDING), "r" }, \
41 {(1UL << GLF_INITIAL), "I" }, \ 41 {(1UL << GLF_INITIAL), "I" }, \
42 {(1UL << GLF_FROZEN), "F" }) 42 {(1UL << GLF_FROZEN), "F" }, \
43 {(1UL << GLF_QUEUED), "q" })
43 44
44#ifndef NUMPTY 45#ifndef NUMPTY
45#define NUMPTY 46#define NUMPTY
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index edf9d4bd908e..fb56b783e028 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -20,11 +20,20 @@ struct gfs2_glock;
20#define RES_JDATA 1 20#define RES_JDATA 1
21#define RES_DATA 1 21#define RES_DATA 1
22#define RES_LEAF 1 22#define RES_LEAF 1
23#define RES_RG_HDR 1
23#define RES_RG_BIT 2 24#define RES_RG_BIT 2
24#define RES_EATTR 1 25#define RES_EATTR 1
25#define RES_STATFS 1 26#define RES_STATFS 1
26#define RES_QUOTA 2 27#define RES_QUOTA 2
27 28
29/* reserve either the number of blocks to be allocated plus the rg header
30 * block, or all of the blocks in the rg, whichever is smaller */
31static inline unsigned int gfs2_rg_blocks(const struct gfs2_alloc *al)
32{
33 return (al->al_requested < al->al_rgd->rd_length)?
34 al->al_requested + 1 : al->al_rgd->rd_length;
35}
36
28int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, 37int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
29 unsigned int revokes); 38 unsigned int revokes);
30 39
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 776af6eb4bcb..30b58f07c8a6 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -734,7 +734,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
734 goto out_gunlock_q; 734 goto out_gunlock_q;
735 735
736 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), 736 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
737 blks + al->al_rgd->rd_length + 737 blks + gfs2_rg_blocks(al) +
738 RES_DINODE + RES_STATFS + RES_QUOTA, 0); 738 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
739 if (error) 739 if (error)
740 goto out_ipres; 740 goto out_ipres;
diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
index 4129cdb3f0d8..571abe97b42a 100644
--- a/fs/hfs/bfind.c
+++ b/fs/hfs/bfind.c
@@ -23,7 +23,7 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
23 fd->search_key = ptr; 23 fd->search_key = ptr;
24 fd->key = ptr + tree->max_key_len + 2; 24 fd->key = ptr + tree->max_key_len + 2;
25 dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); 25 dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0));
26 down(&tree->tree_lock); 26 mutex_lock(&tree->tree_lock);
27 return 0; 27 return 0;
28} 28}
29 29
@@ -32,7 +32,7 @@ void hfs_find_exit(struct hfs_find_data *fd)
32 hfs_bnode_put(fd->bnode); 32 hfs_bnode_put(fd->bnode);
33 kfree(fd->search_key); 33 kfree(fd->search_key);
34 dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0)); 34 dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0));
35 up(&fd->tree->tree_lock); 35 mutex_unlock(&fd->tree->tree_lock);
36 fd->tree = NULL; 36 fd->tree = NULL;
37} 37}
38 38
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 38a0a9917d7f..3ebc437736fe 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -27,7 +27,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
27 if (!tree) 27 if (!tree)
28 return NULL; 28 return NULL;
29 29
30 init_MUTEX(&tree->tree_lock); 30 mutex_init(&tree->tree_lock);
31 spin_lock_init(&tree->hash_lock); 31 spin_lock_init(&tree->hash_lock);
32 /* Set the correct compare function */ 32 /* Set the correct compare function */
33 tree->sb = sb; 33 tree->sb = sb;
diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
index cc51905ac21d..2a1d712f85dc 100644
--- a/fs/hfs/btree.h
+++ b/fs/hfs/btree.h
@@ -33,7 +33,7 @@ struct hfs_btree {
33 unsigned int depth; 33 unsigned int depth;
34 34
35 //unsigned int map1_size, map_size; 35 //unsigned int map1_size, map_size;
36 struct semaphore tree_lock; 36 struct mutex tree_lock;
37 37
38 unsigned int pages_per_bnode; 38 unsigned int pages_per_bnode;
39 spinlock_t hash_lock; 39 spinlock_t hash_lock;
diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c
index 5007a41f1be9..d182438c7ae4 100644
--- a/fs/hfsplus/bfind.c
+++ b/fs/hfsplus/bfind.c
@@ -23,7 +23,7 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
23 fd->search_key = ptr; 23 fd->search_key = ptr;
24 fd->key = ptr + tree->max_key_len + 2; 24 fd->key = ptr + tree->max_key_len + 2;
25 dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); 25 dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0));
26 down(&tree->tree_lock); 26 mutex_lock(&tree->tree_lock);
27 return 0; 27 return 0;
28} 28}
29 29
@@ -32,7 +32,7 @@ void hfs_find_exit(struct hfs_find_data *fd)
32 hfs_bnode_put(fd->bnode); 32 hfs_bnode_put(fd->bnode);
33 kfree(fd->search_key); 33 kfree(fd->search_key);
34 dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0)); 34 dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0));
35 up(&fd->tree->tree_lock); 35 mutex_unlock(&fd->tree->tree_lock);
36 fd->tree = NULL; 36 fd->tree = NULL;
37} 37}
38 38
@@ -52,6 +52,10 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
52 rec = (e + b) / 2; 52 rec = (e + b) / 2;
53 len = hfs_brec_lenoff(bnode, rec, &off); 53 len = hfs_brec_lenoff(bnode, rec, &off);
54 keylen = hfs_brec_keylen(bnode, rec); 54 keylen = hfs_brec_keylen(bnode, rec);
55 if (keylen == 0) {
56 res = -EINVAL;
57 goto fail;
58 }
55 hfs_bnode_read(bnode, fd->key, off, keylen); 59 hfs_bnode_read(bnode, fd->key, off, keylen);
56 cmpval = bnode->tree->keycmp(fd->key, fd->search_key); 60 cmpval = bnode->tree->keycmp(fd->key, fd->search_key);
57 if (!cmpval) { 61 if (!cmpval) {
@@ -67,6 +71,10 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
67 if (rec != e && e >= 0) { 71 if (rec != e && e >= 0) {
68 len = hfs_brec_lenoff(bnode, e, &off); 72 len = hfs_brec_lenoff(bnode, e, &off);
69 keylen = hfs_brec_keylen(bnode, e); 73 keylen = hfs_brec_keylen(bnode, e);
74 if (keylen == 0) {
75 res = -EINVAL;
76 goto fail;
77 }
70 hfs_bnode_read(bnode, fd->key, off, keylen); 78 hfs_bnode_read(bnode, fd->key, off, keylen);
71 } 79 }
72done: 80done:
@@ -75,6 +83,7 @@ done:
75 fd->keylength = keylen; 83 fd->keylength = keylen;
76 fd->entryoffset = off + keylen; 84 fd->entryoffset = off + keylen;
77 fd->entrylength = len - keylen; 85 fd->entrylength = len - keylen;
86fail:
78 return res; 87 return res;
79} 88}
80 89
@@ -198,6 +207,10 @@ int hfs_brec_goto(struct hfs_find_data *fd, int cnt)
198 207
199 len = hfs_brec_lenoff(bnode, fd->record, &off); 208 len = hfs_brec_lenoff(bnode, fd->record, &off);
200 keylen = hfs_brec_keylen(bnode, fd->record); 209 keylen = hfs_brec_keylen(bnode, fd->record);
210 if (keylen == 0) {
211 res = -EINVAL;
212 goto out;
213 }
201 fd->keyoffset = off; 214 fd->keyoffset = off;
202 fd->keylength = keylen; 215 fd->keylength = keylen;
203 fd->entryoffset = off + keylen; 216 fd->entryoffset = off + keylen;
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index ea30afc2a03c..ad57f5991eb1 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -17,6 +17,7 @@
17 17
18int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max) 18int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max)
19{ 19{
20 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
20 struct page *page; 21 struct page *page;
21 struct address_space *mapping; 22 struct address_space *mapping;
22 __be32 *pptr, *curr, *end; 23 __be32 *pptr, *curr, *end;
@@ -29,8 +30,8 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma
29 return size; 30 return size;
30 31
31 dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); 32 dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
32 mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex); 33 mutex_lock(&sbi->alloc_mutex);
33 mapping = HFSPLUS_SB(sb).alloc_file->i_mapping; 34 mapping = sbi->alloc_file->i_mapping;
34 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); 35 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
35 if (IS_ERR(page)) { 36 if (IS_ERR(page)) {
36 start = size; 37 start = size;
@@ -150,16 +151,17 @@ done:
150 set_page_dirty(page); 151 set_page_dirty(page);
151 kunmap(page); 152 kunmap(page);
152 *max = offset + (curr - pptr) * 32 + i - start; 153 *max = offset + (curr - pptr) * 32 + i - start;
153 HFSPLUS_SB(sb).free_blocks -= *max; 154 sbi->free_blocks -= *max;
154 sb->s_dirt = 1; 155 sb->s_dirt = 1;
155 dprint(DBG_BITMAP, "-> %u,%u\n", start, *max); 156 dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
156out: 157out:
157 mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex); 158 mutex_unlock(&sbi->alloc_mutex);
158 return start; 159 return start;
159} 160}
160 161
161int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) 162int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
162{ 163{
164 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
163 struct page *page; 165 struct page *page;
164 struct address_space *mapping; 166 struct address_space *mapping;
165 __be32 *pptr, *curr, *end; 167 __be32 *pptr, *curr, *end;
@@ -172,11 +174,11 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
172 174
173 dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count); 175 dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count);
174 /* are all of the bits in range? */ 176 /* are all of the bits in range? */
175 if ((offset + count) > HFSPLUS_SB(sb).total_blocks) 177 if ((offset + count) > sbi->total_blocks)
176 return -2; 178 return -2;
177 179
178 mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex); 180 mutex_lock(&sbi->alloc_mutex);
179 mapping = HFSPLUS_SB(sb).alloc_file->i_mapping; 181 mapping = sbi->alloc_file->i_mapping;
180 pnr = offset / PAGE_CACHE_BITS; 182 pnr = offset / PAGE_CACHE_BITS;
181 page = read_mapping_page(mapping, pnr, NULL); 183 page = read_mapping_page(mapping, pnr, NULL);
182 pptr = kmap(page); 184 pptr = kmap(page);
@@ -224,9 +226,9 @@ done:
224out: 226out:
225 set_page_dirty(page); 227 set_page_dirty(page);
226 kunmap(page); 228 kunmap(page);
227 HFSPLUS_SB(sb).free_blocks += len; 229 sbi->free_blocks += len;
228 sb->s_dirt = 1; 230 sb->s_dirt = 1;
229 mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex); 231 mutex_unlock(&sbi->alloc_mutex);
230 232
231 return 0; 233 return 0;
232} 234}
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index c88e5d72a402..2f39d05443e1 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -42,10 +42,13 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
42 recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2); 42 recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2);
43 if (!recoff) 43 if (!recoff)
44 return 0; 44 return 0;
45 if (node->tree->attributes & HFS_TREE_BIGKEYS) 45
46 retval = hfs_bnode_read_u16(node, recoff) + 2; 46 retval = hfs_bnode_read_u16(node, recoff) + 2;
47 else 47 if (retval > node->tree->max_key_len + 2) {
48 retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1; 48 printk(KERN_ERR "hfs: keylen %d too large\n",
49 retval);
50 retval = 0;
51 }
49 } 52 }
50 return retval; 53 return retval;
51} 54}
@@ -216,7 +219,7 @@ skip:
216static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd) 219static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
217{ 220{
218 struct hfs_btree *tree; 221 struct hfs_btree *tree;
219 struct hfs_bnode *node, *new_node; 222 struct hfs_bnode *node, *new_node, *next_node;
220 struct hfs_bnode_desc node_desc; 223 struct hfs_bnode_desc node_desc;
221 int num_recs, new_rec_off, new_off, old_rec_off; 224 int num_recs, new_rec_off, new_off, old_rec_off;
222 int data_start, data_end, size; 225 int data_start, data_end, size;
@@ -235,6 +238,17 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
235 new_node->type = node->type; 238 new_node->type = node->type;
236 new_node->height = node->height; 239 new_node->height = node->height;
237 240
241 if (node->next)
242 next_node = hfs_bnode_find(tree, node->next);
243 else
244 next_node = NULL;
245
246 if (IS_ERR(next_node)) {
247 hfs_bnode_put(node);
248 hfs_bnode_put(new_node);
249 return next_node;
250 }
251
238 size = tree->node_size / 2 - node->num_recs * 2 - 14; 252 size = tree->node_size / 2 - node->num_recs * 2 - 14;
239 old_rec_off = tree->node_size - 4; 253 old_rec_off = tree->node_size - 4;
240 num_recs = 1; 254 num_recs = 1;
@@ -248,6 +262,8 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
248 /* panic? */ 262 /* panic? */
249 hfs_bnode_put(node); 263 hfs_bnode_put(node);
250 hfs_bnode_put(new_node); 264 hfs_bnode_put(new_node);
265 if (next_node)
266 hfs_bnode_put(next_node);
251 return ERR_PTR(-ENOSPC); 267 return ERR_PTR(-ENOSPC);
252 } 268 }
253 269
@@ -302,8 +318,7 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
302 hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc)); 318 hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc));
303 319
304 /* update next bnode header */ 320 /* update next bnode header */
305 if (new_node->next) { 321 if (next_node) {
306 struct hfs_bnode *next_node = hfs_bnode_find(tree, new_node->next);
307 next_node->prev = new_node->this; 322 next_node->prev = new_node->this;
308 hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc)); 323 hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc));
309 node_desc.prev = cpu_to_be32(next_node->prev); 324 node_desc.prev = cpu_to_be32(next_node->prev);
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index e49fcee1e293..22e4d4e32999 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -30,7 +30,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
30 if (!tree) 30 if (!tree)
31 return NULL; 31 return NULL;
32 32
33 init_MUTEX(&tree->tree_lock); 33 mutex_init(&tree->tree_lock);
34 spin_lock_init(&tree->hash_lock); 34 spin_lock_init(&tree->hash_lock);
35 tree->sb = sb; 35 tree->sb = sb;
36 tree->cnid = id; 36 tree->cnid = id;
@@ -39,10 +39,16 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
39 goto free_tree; 39 goto free_tree;
40 tree->inode = inode; 40 tree->inode = inode;
41 41
42 if (!HFSPLUS_I(tree->inode)->first_blocks) {
43 printk(KERN_ERR
44 "hfs: invalid btree extent records (0 size).\n");
45 goto free_inode;
46 }
47
42 mapping = tree->inode->i_mapping; 48 mapping = tree->inode->i_mapping;
43 page = read_mapping_page(mapping, 0, NULL); 49 page = read_mapping_page(mapping, 0, NULL);
44 if (IS_ERR(page)) 50 if (IS_ERR(page))
45 goto free_tree; 51 goto free_inode;
46 52
47 /* Load the header */ 53 /* Load the header */
48 head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); 54 head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
@@ -57,27 +63,56 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
57 tree->max_key_len = be16_to_cpu(head->max_key_len); 63 tree->max_key_len = be16_to_cpu(head->max_key_len);
58 tree->depth = be16_to_cpu(head->depth); 64 tree->depth = be16_to_cpu(head->depth);
59 65
60 /* Set the correct compare function */ 66 /* Verify the tree and set the correct compare function */
61 if (id == HFSPLUS_EXT_CNID) { 67 switch (id) {
68 case HFSPLUS_EXT_CNID:
69 if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
70 printk(KERN_ERR "hfs: invalid extent max_key_len %d\n",
71 tree->max_key_len);
72 goto fail_page;
73 }
74 if (tree->attributes & HFS_TREE_VARIDXKEYS) {
75 printk(KERN_ERR "hfs: invalid extent btree flag\n");
76 goto fail_page;
77 }
78
62 tree->keycmp = hfsplus_ext_cmp_key; 79 tree->keycmp = hfsplus_ext_cmp_key;
63 } else if (id == HFSPLUS_CAT_CNID) { 80 break;
64 if ((HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX) && 81 case HFSPLUS_CAT_CNID:
82 if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
83 printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n",
84 tree->max_key_len);
85 goto fail_page;
86 }
87 if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
88 printk(KERN_ERR "hfs: invalid catalog btree flag\n");
89 goto fail_page;
90 }
91
92 if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) &&
65 (head->key_type == HFSPLUS_KEY_BINARY)) 93 (head->key_type == HFSPLUS_KEY_BINARY))
66 tree->keycmp = hfsplus_cat_bin_cmp_key; 94 tree->keycmp = hfsplus_cat_bin_cmp_key;
67 else { 95 else {
68 tree->keycmp = hfsplus_cat_case_cmp_key; 96 tree->keycmp = hfsplus_cat_case_cmp_key;
69 HFSPLUS_SB(sb).flags |= HFSPLUS_SB_CASEFOLD; 97 set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
70 } 98 }
71 } else { 99 break;
100 default:
72 printk(KERN_ERR "hfs: unknown B*Tree requested\n"); 101 printk(KERN_ERR "hfs: unknown B*Tree requested\n");
73 goto fail_page; 102 goto fail_page;
74 } 103 }
75 104
105 if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
106 printk(KERN_ERR "hfs: invalid btree flag\n");
107 goto fail_page;
108 }
109
76 size = tree->node_size; 110 size = tree->node_size;
77 if (!is_power_of_2(size)) 111 if (!is_power_of_2(size))
78 goto fail_page; 112 goto fail_page;
79 if (!tree->node_count) 113 if (!tree->node_count)
80 goto fail_page; 114 goto fail_page;
115
81 tree->node_size_shift = ffs(size) - 1; 116 tree->node_size_shift = ffs(size) - 1;
82 117
83 tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 118 tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
@@ -87,10 +122,11 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
87 return tree; 122 return tree;
88 123
89 fail_page: 124 fail_page:
90 tree->inode->i_mapping->a_ops = &hfsplus_aops;
91 page_cache_release(page); 125 page_cache_release(page);
92 free_tree: 126 free_inode:
127 tree->inode->i_mapping->a_ops = &hfsplus_aops;
93 iput(tree->inode); 128 iput(tree->inode);
129 free_tree:
94 kfree(tree); 130 kfree(tree);
95 return NULL; 131 return NULL;
96} 132}
@@ -192,17 +228,18 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
192 228
193 while (!tree->free_nodes) { 229 while (!tree->free_nodes) {
194 struct inode *inode = tree->inode; 230 struct inode *inode = tree->inode;
231 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
195 u32 count; 232 u32 count;
196 int res; 233 int res;
197 234
198 res = hfsplus_file_extend(inode); 235 res = hfsplus_file_extend(inode);
199 if (res) 236 if (res)
200 return ERR_PTR(res); 237 return ERR_PTR(res);
201 HFSPLUS_I(inode).phys_size = inode->i_size = 238 hip->phys_size = inode->i_size =
202 (loff_t)HFSPLUS_I(inode).alloc_blocks << 239 (loff_t)hip->alloc_blocks <<
203 HFSPLUS_SB(tree->sb).alloc_blksz_shift; 240 HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
204 HFSPLUS_I(inode).fs_blocks = HFSPLUS_I(inode).alloc_blocks << 241 hip->fs_blocks =
205 HFSPLUS_SB(tree->sb).fs_shift; 242 hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
206 inode_set_bytes(inode, inode->i_size); 243 inode_set_bytes(inode, inode->i_size);
207 count = inode->i_size >> tree->node_size_shift; 244 count = inode->i_size >> tree->node_size_shift;
208 tree->free_nodes = count - tree->node_count; 245 tree->free_nodes = count - tree->node_count;
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index f6874acb2cf2..8af45fc5b051 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -67,7 +67,7 @@ static void hfsplus_cat_build_key_uni(hfsplus_btree_key *key, u32 parent,
67 key->key_len = cpu_to_be16(6 + ustrlen); 67 key->key_len = cpu_to_be16(6 + ustrlen);
68} 68}
69 69
70static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms) 70void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms)
71{ 71{
72 if (inode->i_flags & S_IMMUTABLE) 72 if (inode->i_flags & S_IMMUTABLE)
73 perms->rootflags |= HFSPLUS_FLG_IMMUTABLE; 73 perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
@@ -77,15 +77,24 @@ static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
77 perms->rootflags |= HFSPLUS_FLG_APPEND; 77 perms->rootflags |= HFSPLUS_FLG_APPEND;
78 else 78 else
79 perms->rootflags &= ~HFSPLUS_FLG_APPEND; 79 perms->rootflags &= ~HFSPLUS_FLG_APPEND;
80 HFSPLUS_I(inode).rootflags = perms->rootflags; 80
81 HFSPLUS_I(inode).userflags = perms->userflags; 81 perms->userflags = HFSPLUS_I(inode)->userflags;
82 perms->mode = cpu_to_be16(inode->i_mode); 82 perms->mode = cpu_to_be16(inode->i_mode);
83 perms->owner = cpu_to_be32(inode->i_uid); 83 perms->owner = cpu_to_be32(inode->i_uid);
84 perms->group = cpu_to_be32(inode->i_gid); 84 perms->group = cpu_to_be32(inode->i_gid);
85
86 if (S_ISREG(inode->i_mode))
87 perms->dev = cpu_to_be32(inode->i_nlink);
88 else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode))
89 perms->dev = cpu_to_be32(inode->i_rdev);
90 else
91 perms->dev = 0;
85} 92}
86 93
87static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct inode *inode) 94static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct inode *inode)
88{ 95{
96 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
97
89 if (S_ISDIR(inode->i_mode)) { 98 if (S_ISDIR(inode->i_mode)) {
90 struct hfsplus_cat_folder *folder; 99 struct hfsplus_cat_folder *folder;
91 100
@@ -93,13 +102,13 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct i
93 memset(folder, 0, sizeof(*folder)); 102 memset(folder, 0, sizeof(*folder));
94 folder->type = cpu_to_be16(HFSPLUS_FOLDER); 103 folder->type = cpu_to_be16(HFSPLUS_FOLDER);
95 folder->id = cpu_to_be32(inode->i_ino); 104 folder->id = cpu_to_be32(inode->i_ino);
96 HFSPLUS_I(inode).create_date = 105 HFSPLUS_I(inode)->create_date =
97 folder->create_date = 106 folder->create_date =
98 folder->content_mod_date = 107 folder->content_mod_date =
99 folder->attribute_mod_date = 108 folder->attribute_mod_date =
100 folder->access_date = hfsp_now2mt(); 109 folder->access_date = hfsp_now2mt();
101 hfsplus_set_perms(inode, &folder->permissions); 110 hfsplus_cat_set_perms(inode, &folder->permissions);
102 if (inode == HFSPLUS_SB(inode->i_sb).hidden_dir) 111 if (inode == sbi->hidden_dir)
103 /* invisible and namelocked */ 112 /* invisible and namelocked */
104 folder->user_info.frFlags = cpu_to_be16(0x5000); 113 folder->user_info.frFlags = cpu_to_be16(0x5000);
105 return sizeof(*folder); 114 return sizeof(*folder);
@@ -111,19 +120,19 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct i
111 file->type = cpu_to_be16(HFSPLUS_FILE); 120 file->type = cpu_to_be16(HFSPLUS_FILE);
112 file->flags = cpu_to_be16(HFSPLUS_FILE_THREAD_EXISTS); 121 file->flags = cpu_to_be16(HFSPLUS_FILE_THREAD_EXISTS);
113 file->id = cpu_to_be32(cnid); 122 file->id = cpu_to_be32(cnid);
114 HFSPLUS_I(inode).create_date = 123 HFSPLUS_I(inode)->create_date =
115 file->create_date = 124 file->create_date =
116 file->content_mod_date = 125 file->content_mod_date =
117 file->attribute_mod_date = 126 file->attribute_mod_date =
118 file->access_date = hfsp_now2mt(); 127 file->access_date = hfsp_now2mt();
119 if (cnid == inode->i_ino) { 128 if (cnid == inode->i_ino) {
120 hfsplus_set_perms(inode, &file->permissions); 129 hfsplus_cat_set_perms(inode, &file->permissions);
121 if (S_ISLNK(inode->i_mode)) { 130 if (S_ISLNK(inode->i_mode)) {
122 file->user_info.fdType = cpu_to_be32(HFSP_SYMLINK_TYPE); 131 file->user_info.fdType = cpu_to_be32(HFSP_SYMLINK_TYPE);
123 file->user_info.fdCreator = cpu_to_be32(HFSP_SYMLINK_CREATOR); 132 file->user_info.fdCreator = cpu_to_be32(HFSP_SYMLINK_CREATOR);
124 } else { 133 } else {
125 file->user_info.fdType = cpu_to_be32(HFSPLUS_SB(inode->i_sb).type); 134 file->user_info.fdType = cpu_to_be32(sbi->type);
126 file->user_info.fdCreator = cpu_to_be32(HFSPLUS_SB(inode->i_sb).creator); 135 file->user_info.fdCreator = cpu_to_be32(sbi->creator);
127 } 136 }
128 if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE) 137 if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
129 file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED); 138 file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
@@ -131,8 +140,8 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct i
131 file->user_info.fdType = cpu_to_be32(HFSP_HARDLINK_TYPE); 140 file->user_info.fdType = cpu_to_be32(HFSP_HARDLINK_TYPE);
132 file->user_info.fdCreator = cpu_to_be32(HFSP_HFSPLUS_CREATOR); 141 file->user_info.fdCreator = cpu_to_be32(HFSP_HFSPLUS_CREATOR);
133 file->user_info.fdFlags = cpu_to_be16(0x100); 142 file->user_info.fdFlags = cpu_to_be16(0x100);
134 file->create_date = HFSPLUS_I(HFSPLUS_SB(inode->i_sb).hidden_dir).create_date; 143 file->create_date = HFSPLUS_I(sbi->hidden_dir)->create_date;
135 file->permissions.dev = cpu_to_be32(HFSPLUS_I(inode).dev); 144 file->permissions.dev = cpu_to_be32(HFSPLUS_I(inode)->linkid);
136 } 145 }
137 return sizeof(*file); 146 return sizeof(*file);
138 } 147 }
@@ -180,15 +189,14 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
180 189
181int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct inode *inode) 190int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct inode *inode)
182{ 191{
192 struct super_block *sb = dir->i_sb;
183 struct hfs_find_data fd; 193 struct hfs_find_data fd;
184 struct super_block *sb;
185 hfsplus_cat_entry entry; 194 hfsplus_cat_entry entry;
186 int entry_size; 195 int entry_size;
187 int err; 196 int err;
188 197
189 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink); 198 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
190 sb = dir->i_sb; 199 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
191 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
192 200
193 hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL); 201 hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
194 entry_size = hfsplus_fill_cat_thread(sb, &entry, S_ISDIR(inode->i_mode) ? 202 entry_size = hfsplus_fill_cat_thread(sb, &entry, S_ISDIR(inode->i_mode) ?
@@ -234,7 +242,7 @@ err2:
234 242
235int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str) 243int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
236{ 244{
237 struct super_block *sb; 245 struct super_block *sb = dir->i_sb;
238 struct hfs_find_data fd; 246 struct hfs_find_data fd;
239 struct hfsplus_fork_raw fork; 247 struct hfsplus_fork_raw fork;
240 struct list_head *pos; 248 struct list_head *pos;
@@ -242,8 +250,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
242 u16 type; 250 u16 type;
243 251
244 dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid); 252 dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid);
245 sb = dir->i_sb; 253 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
246 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
247 254
248 if (!str) { 255 if (!str) {
249 int len; 256 int len;
@@ -279,7 +286,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
279 hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_RSRC); 286 hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_RSRC);
280 } 287 }
281 288
282 list_for_each(pos, &HFSPLUS_I(dir).open_dir_list) { 289 list_for_each(pos, &HFSPLUS_I(dir)->open_dir_list) {
283 struct hfsplus_readdir_data *rd = 290 struct hfsplus_readdir_data *rd =
284 list_entry(pos, struct hfsplus_readdir_data, list); 291 list_entry(pos, struct hfsplus_readdir_data, list);
285 if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0) 292 if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0)
@@ -312,7 +319,7 @@ int hfsplus_rename_cat(u32 cnid,
312 struct inode *src_dir, struct qstr *src_name, 319 struct inode *src_dir, struct qstr *src_name,
313 struct inode *dst_dir, struct qstr *dst_name) 320 struct inode *dst_dir, struct qstr *dst_name)
314{ 321{
315 struct super_block *sb; 322 struct super_block *sb = src_dir->i_sb;
316 struct hfs_find_data src_fd, dst_fd; 323 struct hfs_find_data src_fd, dst_fd;
317 hfsplus_cat_entry entry; 324 hfsplus_cat_entry entry;
318 int entry_size, type; 325 int entry_size, type;
@@ -320,8 +327,7 @@ int hfsplus_rename_cat(u32 cnid,
320 327
321 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name, 328 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
322 dst_dir->i_ino, dst_name->name); 329 dst_dir->i_ino, dst_name->name);
323 sb = src_dir->i_sb; 330 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd);
324 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &src_fd);
325 dst_fd = src_fd; 331 dst_fd = src_fd;
326 332
327 /* find the old dir entry and read the data */ 333 /* find the old dir entry and read the data */
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 764fd1bdca88..d236d85ec9d7 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -39,7 +39,7 @@ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry,
39 39
40 dentry->d_op = &hfsplus_dentry_operations; 40 dentry->d_op = &hfsplus_dentry_operations;
41 dentry->d_fsdata = NULL; 41 dentry->d_fsdata = NULL;
42 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd); 42 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
43 hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name); 43 hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name);
44again: 44again:
45 err = hfs_brec_read(&fd, &entry, sizeof(entry)); 45 err = hfs_brec_read(&fd, &entry, sizeof(entry));
@@ -68,9 +68,9 @@ again:
68 cnid = be32_to_cpu(entry.file.id); 68 cnid = be32_to_cpu(entry.file.id);
69 if (entry.file.user_info.fdType == cpu_to_be32(HFSP_HARDLINK_TYPE) && 69 if (entry.file.user_info.fdType == cpu_to_be32(HFSP_HARDLINK_TYPE) &&
70 entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR) && 70 entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
71 (entry.file.create_date == HFSPLUS_I(HFSPLUS_SB(sb).hidden_dir).create_date || 71 (entry.file.create_date == HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->create_date ||
72 entry.file.create_date == HFSPLUS_I(sb->s_root->d_inode).create_date) && 72 entry.file.create_date == HFSPLUS_I(sb->s_root->d_inode)->create_date) &&
73 HFSPLUS_SB(sb).hidden_dir) { 73 HFSPLUS_SB(sb)->hidden_dir) {
74 struct qstr str; 74 struct qstr str;
75 char name[32]; 75 char name[32];
76 76
@@ -86,7 +86,8 @@ again:
86 linkid = be32_to_cpu(entry.file.permissions.dev); 86 linkid = be32_to_cpu(entry.file.permissions.dev);
87 str.len = sprintf(name, "iNode%d", linkid); 87 str.len = sprintf(name, "iNode%d", linkid);
88 str.name = name; 88 str.name = name;
89 hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_SB(sb).hidden_dir->i_ino, &str); 89 hfsplus_cat_build_key(sb, fd.search_key,
90 HFSPLUS_SB(sb)->hidden_dir->i_ino, &str);
90 goto again; 91 goto again;
91 } 92 }
92 } else if (!dentry->d_fsdata) 93 } else if (!dentry->d_fsdata)
@@ -101,7 +102,7 @@ again:
101 if (IS_ERR(inode)) 102 if (IS_ERR(inode))
102 return ERR_CAST(inode); 103 return ERR_CAST(inode);
103 if (S_ISREG(inode->i_mode)) 104 if (S_ISREG(inode->i_mode))
104 HFSPLUS_I(inode).dev = linkid; 105 HFSPLUS_I(inode)->linkid = linkid;
105out: 106out:
106 d_add(dentry, inode); 107 d_add(dentry, inode);
107 return NULL; 108 return NULL;
@@ -124,7 +125,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
124 if (filp->f_pos >= inode->i_size) 125 if (filp->f_pos >= inode->i_size)
125 return 0; 126 return 0;
126 127
127 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd); 128 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
128 hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL); 129 hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
129 err = hfs_brec_find(&fd); 130 err = hfs_brec_find(&fd);
130 if (err) 131 if (err)
@@ -180,8 +181,9 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
180 err = -EIO; 181 err = -EIO;
181 goto out; 182 goto out;
182 } 183 }
183 if (HFSPLUS_SB(sb).hidden_dir && 184 if (HFSPLUS_SB(sb)->hidden_dir &&
184 HFSPLUS_SB(sb).hidden_dir->i_ino == be32_to_cpu(entry.folder.id)) 185 HFSPLUS_SB(sb)->hidden_dir->i_ino ==
186 be32_to_cpu(entry.folder.id))
185 goto next; 187 goto next;
186 if (filldir(dirent, strbuf, len, filp->f_pos, 188 if (filldir(dirent, strbuf, len, filp->f_pos,
187 be32_to_cpu(entry.folder.id), DT_DIR)) 189 be32_to_cpu(entry.folder.id), DT_DIR))
@@ -217,7 +219,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
217 } 219 }
218 filp->private_data = rd; 220 filp->private_data = rd;
219 rd->file = filp; 221 rd->file = filp;
220 list_add(&rd->list, &HFSPLUS_I(inode).open_dir_list); 222 list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list);
221 } 223 }
222 memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key)); 224 memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key));
223out: 225out:
@@ -229,38 +231,18 @@ static int hfsplus_dir_release(struct inode *inode, struct file *file)
229{ 231{
230 struct hfsplus_readdir_data *rd = file->private_data; 232 struct hfsplus_readdir_data *rd = file->private_data;
231 if (rd) { 233 if (rd) {
234 mutex_lock(&inode->i_mutex);
232 list_del(&rd->list); 235 list_del(&rd->list);
236 mutex_unlock(&inode->i_mutex);
233 kfree(rd); 237 kfree(rd);
234 } 238 }
235 return 0; 239 return 0;
236} 240}
237 241
238static int hfsplus_create(struct inode *dir, struct dentry *dentry, int mode,
239 struct nameidata *nd)
240{
241 struct inode *inode;
242 int res;
243
244 inode = hfsplus_new_inode(dir->i_sb, mode);
245 if (!inode)
246 return -ENOSPC;
247
248 res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
249 if (res) {
250 inode->i_nlink = 0;
251 hfsplus_delete_inode(inode);
252 iput(inode);
253 return res;
254 }
255 hfsplus_instantiate(dentry, inode, inode->i_ino);
256 mark_inode_dirty(inode);
257 return 0;
258}
259
260static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir, 242static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir,
261 struct dentry *dst_dentry) 243 struct dentry *dst_dentry)
262{ 244{
263 struct super_block *sb = dst_dir->i_sb; 245 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dst_dir->i_sb);
264 struct inode *inode = src_dentry->d_inode; 246 struct inode *inode = src_dentry->d_inode;
265 struct inode *src_dir = src_dentry->d_parent->d_inode; 247 struct inode *src_dir = src_dentry->d_parent->d_inode;
266 struct qstr str; 248 struct qstr str;
@@ -270,7 +252,10 @@ static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir,
270 252
271 if (HFSPLUS_IS_RSRC(inode)) 253 if (HFSPLUS_IS_RSRC(inode))
272 return -EPERM; 254 return -EPERM;
255 if (!S_ISREG(inode->i_mode))
256 return -EPERM;
273 257
258 mutex_lock(&sbi->vh_mutex);
274 if (inode->i_ino == (u32)(unsigned long)src_dentry->d_fsdata) { 259 if (inode->i_ino == (u32)(unsigned long)src_dentry->d_fsdata) {
275 for (;;) { 260 for (;;) {
276 get_random_bytes(&id, sizeof(cnid)); 261 get_random_bytes(&id, sizeof(cnid));
@@ -279,40 +264,41 @@ static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir,
279 str.len = sprintf(name, "iNode%d", id); 264 str.len = sprintf(name, "iNode%d", id);
280 res = hfsplus_rename_cat(inode->i_ino, 265 res = hfsplus_rename_cat(inode->i_ino,
281 src_dir, &src_dentry->d_name, 266 src_dir, &src_dentry->d_name,
282 HFSPLUS_SB(sb).hidden_dir, &str); 267 sbi->hidden_dir, &str);
283 if (!res) 268 if (!res)
284 break; 269 break;
285 if (res != -EEXIST) 270 if (res != -EEXIST)
286 return res; 271 goto out;
287 } 272 }
288 HFSPLUS_I(inode).dev = id; 273 HFSPLUS_I(inode)->linkid = id;
289 cnid = HFSPLUS_SB(sb).next_cnid++; 274 cnid = sbi->next_cnid++;
290 src_dentry->d_fsdata = (void *)(unsigned long)cnid; 275 src_dentry->d_fsdata = (void *)(unsigned long)cnid;
291 res = hfsplus_create_cat(cnid, src_dir, &src_dentry->d_name, inode); 276 res = hfsplus_create_cat(cnid, src_dir, &src_dentry->d_name, inode);
292 if (res) 277 if (res)
293 /* panic? */ 278 /* panic? */
294 return res; 279 goto out;
295 HFSPLUS_SB(sb).file_count++; 280 sbi->file_count++;
296 } 281 }
297 cnid = HFSPLUS_SB(sb).next_cnid++; 282 cnid = sbi->next_cnid++;
298 res = hfsplus_create_cat(cnid, dst_dir, &dst_dentry->d_name, inode); 283 res = hfsplus_create_cat(cnid, dst_dir, &dst_dentry->d_name, inode);
299 if (res) 284 if (res)
300 return res; 285 goto out;
301 286
302 inc_nlink(inode); 287 inc_nlink(inode);
303 hfsplus_instantiate(dst_dentry, inode, cnid); 288 hfsplus_instantiate(dst_dentry, inode, cnid);
304 atomic_inc(&inode->i_count); 289 atomic_inc(&inode->i_count);
305 inode->i_ctime = CURRENT_TIME_SEC; 290 inode->i_ctime = CURRENT_TIME_SEC;
306 mark_inode_dirty(inode); 291 mark_inode_dirty(inode);
307 HFSPLUS_SB(sb).file_count++; 292 sbi->file_count++;
308 sb->s_dirt = 1; 293 dst_dir->i_sb->s_dirt = 1;
309 294out:
310 return 0; 295 mutex_unlock(&sbi->vh_mutex);
296 return res;
311} 297}
312 298
313static int hfsplus_unlink(struct inode *dir, struct dentry *dentry) 299static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
314{ 300{
315 struct super_block *sb = dir->i_sb; 301 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
316 struct inode *inode = dentry->d_inode; 302 struct inode *inode = dentry->d_inode;
317 struct qstr str; 303 struct qstr str;
318 char name[32]; 304 char name[32];
@@ -322,21 +308,22 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
322 if (HFSPLUS_IS_RSRC(inode)) 308 if (HFSPLUS_IS_RSRC(inode))
323 return -EPERM; 309 return -EPERM;
324 310
311 mutex_lock(&sbi->vh_mutex);
325 cnid = (u32)(unsigned long)dentry->d_fsdata; 312 cnid = (u32)(unsigned long)dentry->d_fsdata;
326 if (inode->i_ino == cnid && 313 if (inode->i_ino == cnid &&
327 atomic_read(&HFSPLUS_I(inode).opencnt)) { 314 atomic_read(&HFSPLUS_I(inode)->opencnt)) {
328 str.name = name; 315 str.name = name;
329 str.len = sprintf(name, "temp%lu", inode->i_ino); 316 str.len = sprintf(name, "temp%lu", inode->i_ino);
330 res = hfsplus_rename_cat(inode->i_ino, 317 res = hfsplus_rename_cat(inode->i_ino,
331 dir, &dentry->d_name, 318 dir, &dentry->d_name,
332 HFSPLUS_SB(sb).hidden_dir, &str); 319 sbi->hidden_dir, &str);
333 if (!res) 320 if (!res)
334 inode->i_flags |= S_DEAD; 321 inode->i_flags |= S_DEAD;
335 return res; 322 goto out;
336 } 323 }
337 res = hfsplus_delete_cat(cnid, dir, &dentry->d_name); 324 res = hfsplus_delete_cat(cnid, dir, &dentry->d_name);
338 if (res) 325 if (res)
339 return res; 326 goto out;
340 327
341 if (inode->i_nlink > 0) 328 if (inode->i_nlink > 0)
342 drop_nlink(inode); 329 drop_nlink(inode);
@@ -344,10 +331,10 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
344 clear_nlink(inode); 331 clear_nlink(inode);
345 if (!inode->i_nlink) { 332 if (!inode->i_nlink) {
346 if (inode->i_ino != cnid) { 333 if (inode->i_ino != cnid) {
347 HFSPLUS_SB(sb).file_count--; 334 sbi->file_count--;
348 if (!atomic_read(&HFSPLUS_I(inode).opencnt)) { 335 if (!atomic_read(&HFSPLUS_I(inode)->opencnt)) {
349 res = hfsplus_delete_cat(inode->i_ino, 336 res = hfsplus_delete_cat(inode->i_ino,
350 HFSPLUS_SB(sb).hidden_dir, 337 sbi->hidden_dir,
351 NULL); 338 NULL);
352 if (!res) 339 if (!res)
353 hfsplus_delete_inode(inode); 340 hfsplus_delete_inode(inode);
@@ -356,107 +343,108 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
356 } else 343 } else
357 hfsplus_delete_inode(inode); 344 hfsplus_delete_inode(inode);
358 } else 345 } else
359 HFSPLUS_SB(sb).file_count--; 346 sbi->file_count--;
360 inode->i_ctime = CURRENT_TIME_SEC; 347 inode->i_ctime = CURRENT_TIME_SEC;
361 mark_inode_dirty(inode); 348 mark_inode_dirty(inode);
362 349out:
350 mutex_unlock(&sbi->vh_mutex);
363 return res; 351 return res;
364} 352}
365 353
366static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, int mode)
367{
368 struct inode *inode;
369 int res;
370
371 inode = hfsplus_new_inode(dir->i_sb, S_IFDIR | mode);
372 if (!inode)
373 return -ENOSPC;
374
375 res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
376 if (res) {
377 inode->i_nlink = 0;
378 hfsplus_delete_inode(inode);
379 iput(inode);
380 return res;
381 }
382 hfsplus_instantiate(dentry, inode, inode->i_ino);
383 mark_inode_dirty(inode);
384 return 0;
385}
386
387static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry) 354static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry)
388{ 355{
389 struct inode *inode; 356 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
357 struct inode *inode = dentry->d_inode;
390 int res; 358 int res;
391 359
392 inode = dentry->d_inode;
393 if (inode->i_size != 2) 360 if (inode->i_size != 2)
394 return -ENOTEMPTY; 361 return -ENOTEMPTY;
362
363 mutex_lock(&sbi->vh_mutex);
395 res = hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name); 364 res = hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name);
396 if (res) 365 if (res)
397 return res; 366 goto out;
398 clear_nlink(inode); 367 clear_nlink(inode);
399 inode->i_ctime = CURRENT_TIME_SEC; 368 inode->i_ctime = CURRENT_TIME_SEC;
400 hfsplus_delete_inode(inode); 369 hfsplus_delete_inode(inode);
401 mark_inode_dirty(inode); 370 mark_inode_dirty(inode);
402 return 0; 371out:
372 mutex_unlock(&sbi->vh_mutex);
373 return res;
403} 374}
404 375
405static int hfsplus_symlink(struct inode *dir, struct dentry *dentry, 376static int hfsplus_symlink(struct inode *dir, struct dentry *dentry,
406 const char *symname) 377 const char *symname)
407{ 378{
408 struct super_block *sb; 379 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
409 struct inode *inode; 380 struct inode *inode;
410 int res; 381 int res = -ENOSPC;
411 382
412 sb = dir->i_sb; 383 mutex_lock(&sbi->vh_mutex);
413 inode = hfsplus_new_inode(sb, S_IFLNK | S_IRWXUGO); 384 inode = hfsplus_new_inode(dir->i_sb, S_IFLNK | S_IRWXUGO);
414 if (!inode) 385 if (!inode)
415 return -ENOSPC; 386 goto out;
416 387
417 res = page_symlink(inode, symname, strlen(symname) + 1); 388 res = page_symlink(inode, symname, strlen(symname) + 1);
418 if (res) { 389 if (res)
419 inode->i_nlink = 0; 390 goto out_err;
420 hfsplus_delete_inode(inode);
421 iput(inode);
422 return res;
423 }
424 391
425 mark_inode_dirty(inode);
426 res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode); 392 res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
393 if (res)
394 goto out_err;
427 395
428 if (!res) { 396 hfsplus_instantiate(dentry, inode, inode->i_ino);
429 hfsplus_instantiate(dentry, inode, inode->i_ino); 397 mark_inode_dirty(inode);
430 mark_inode_dirty(inode); 398 goto out;
431 }
432 399
400out_err:
401 inode->i_nlink = 0;
402 hfsplus_delete_inode(inode);
403 iput(inode);
404out:
405 mutex_unlock(&sbi->vh_mutex);
433 return res; 406 return res;
434} 407}
435 408
436static int hfsplus_mknod(struct inode *dir, struct dentry *dentry, 409static int hfsplus_mknod(struct inode *dir, struct dentry *dentry,
437 int mode, dev_t rdev) 410 int mode, dev_t rdev)
438{ 411{
439 struct super_block *sb; 412 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
440 struct inode *inode; 413 struct inode *inode;
441 int res; 414 int res = -ENOSPC;
442 415
443 sb = dir->i_sb; 416 mutex_lock(&sbi->vh_mutex);
444 inode = hfsplus_new_inode(sb, mode); 417 inode = hfsplus_new_inode(dir->i_sb, mode);
445 if (!inode) 418 if (!inode)
446 return -ENOSPC; 419 goto out;
420
421 if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode))
422 init_special_inode(inode, mode, rdev);
447 423
448 res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode); 424 res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
449 if (res) { 425 if (res) {
450 inode->i_nlink = 0; 426 inode->i_nlink = 0;
451 hfsplus_delete_inode(inode); 427 hfsplus_delete_inode(inode);
452 iput(inode); 428 iput(inode);
453 return res; 429 goto out;
454 } 430 }
455 init_special_inode(inode, mode, rdev); 431
456 hfsplus_instantiate(dentry, inode, inode->i_ino); 432 hfsplus_instantiate(dentry, inode, inode->i_ino);
457 mark_inode_dirty(inode); 433 mark_inode_dirty(inode);
434out:
435 mutex_unlock(&sbi->vh_mutex);
436 return res;
437}
458 438
459 return 0; 439static int hfsplus_create(struct inode *dir, struct dentry *dentry, int mode,
440 struct nameidata *nd)
441{
442 return hfsplus_mknod(dir, dentry, mode, 0);
443}
444
445static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, int mode)
446{
447 return hfsplus_mknod(dir, dentry, mode | S_IFDIR, 0);
460} 448}
461 449
462static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry, 450static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
@@ -466,7 +454,10 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
466 454
467 /* Unlink destination if it already exists */ 455 /* Unlink destination if it already exists */
468 if (new_dentry->d_inode) { 456 if (new_dentry->d_inode) {
469 res = hfsplus_unlink(new_dir, new_dentry); 457 if (S_ISDIR(new_dentry->d_inode->i_mode))
458 res = hfsplus_rmdir(new_dir, new_dentry);
459 else
460 res = hfsplus_unlink(new_dir, new_dentry);
470 if (res) 461 if (res)
471 return res; 462 return res;
472 } 463 }
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index 0022eec63cda..0c9cb1820a52 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -85,35 +85,49 @@ static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext)
85 85
86static void __hfsplus_ext_write_extent(struct inode *inode, struct hfs_find_data *fd) 86static void __hfsplus_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
87{ 87{
88 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
88 int res; 89 int res;
89 90
90 hfsplus_ext_build_key(fd->search_key, inode->i_ino, HFSPLUS_I(inode).cached_start, 91 WARN_ON(!mutex_is_locked(&hip->extents_lock));
91 HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); 92
93 hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start,
94 HFSPLUS_IS_RSRC(inode) ?
95 HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
96
92 res = hfs_brec_find(fd); 97 res = hfs_brec_find(fd);
93 if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_NEW) { 98 if (hip->flags & HFSPLUS_FLG_EXT_NEW) {
94 if (res != -ENOENT) 99 if (res != -ENOENT)
95 return; 100 return;
96 hfs_brec_insert(fd, HFSPLUS_I(inode).cached_extents, sizeof(hfsplus_extent_rec)); 101 hfs_brec_insert(fd, hip->cached_extents,
97 HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); 102 sizeof(hfsplus_extent_rec));
103 hip->flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
98 } else { 104 } else {
99 if (res) 105 if (res)
100 return; 106 return;
101 hfs_bnode_write(fd->bnode, HFSPLUS_I(inode).cached_extents, fd->entryoffset, fd->entrylength); 107 hfs_bnode_write(fd->bnode, hip->cached_extents,
102 HFSPLUS_I(inode).flags &= ~HFSPLUS_FLG_EXT_DIRTY; 108 fd->entryoffset, fd->entrylength);
109 hip->flags &= ~HFSPLUS_FLG_EXT_DIRTY;
103 } 110 }
104} 111}
105 112
106void hfsplus_ext_write_extent(struct inode *inode) 113static void hfsplus_ext_write_extent_locked(struct inode *inode)
107{ 114{
108 if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY) { 115 if (HFSPLUS_I(inode)->flags & HFSPLUS_FLG_EXT_DIRTY) {
109 struct hfs_find_data fd; 116 struct hfs_find_data fd;
110 117
111 hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd); 118 hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
112 __hfsplus_ext_write_extent(inode, &fd); 119 __hfsplus_ext_write_extent(inode, &fd);
113 hfs_find_exit(&fd); 120 hfs_find_exit(&fd);
114 } 121 }
115} 122}
116 123
124void hfsplus_ext_write_extent(struct inode *inode)
125{
126 mutex_lock(&HFSPLUS_I(inode)->extents_lock);
127 hfsplus_ext_write_extent_locked(inode);
128 mutex_unlock(&HFSPLUS_I(inode)->extents_lock);
129}
130
117static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd, 131static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
118 struct hfsplus_extent *extent, 132 struct hfsplus_extent *extent,
119 u32 cnid, u32 block, u8 type) 133 u32 cnid, u32 block, u8 type)
@@ -136,33 +150,39 @@ static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
136 150
137static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block) 151static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
138{ 152{
153 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
139 int res; 154 int res;
140 155
141 if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY) 156 WARN_ON(!mutex_is_locked(&hip->extents_lock));
157
158 if (hip->flags & HFSPLUS_FLG_EXT_DIRTY)
142 __hfsplus_ext_write_extent(inode, fd); 159 __hfsplus_ext_write_extent(inode, fd);
143 160
144 res = __hfsplus_ext_read_extent(fd, HFSPLUS_I(inode).cached_extents, inode->i_ino, 161 res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino,
145 block, HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); 162 block, HFSPLUS_IS_RSRC(inode) ?
163 HFSPLUS_TYPE_RSRC :
164 HFSPLUS_TYPE_DATA);
146 if (!res) { 165 if (!res) {
147 HFSPLUS_I(inode).cached_start = be32_to_cpu(fd->key->ext.start_block); 166 hip->cached_start = be32_to_cpu(fd->key->ext.start_block);
148 HFSPLUS_I(inode).cached_blocks = hfsplus_ext_block_count(HFSPLUS_I(inode).cached_extents); 167 hip->cached_blocks = hfsplus_ext_block_count(hip->cached_extents);
149 } else { 168 } else {
150 HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0; 169 hip->cached_start = hip->cached_blocks = 0;
151 HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); 170 hip->flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
152 } 171 }
153 return res; 172 return res;
154} 173}
155 174
156static int hfsplus_ext_read_extent(struct inode *inode, u32 block) 175static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
157{ 176{
177 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
158 struct hfs_find_data fd; 178 struct hfs_find_data fd;
159 int res; 179 int res;
160 180
161 if (block >= HFSPLUS_I(inode).cached_start && 181 if (block >= hip->cached_start &&
162 block < HFSPLUS_I(inode).cached_start + HFSPLUS_I(inode).cached_blocks) 182 block < hip->cached_start + hip->cached_blocks)
163 return 0; 183 return 0;
164 184
165 hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd); 185 hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
166 res = __hfsplus_ext_cache_extent(&fd, inode, block); 186 res = __hfsplus_ext_cache_extent(&fd, inode, block);
167 hfs_find_exit(&fd); 187 hfs_find_exit(&fd);
168 return res; 188 return res;
@@ -172,21 +192,21 @@ static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
172int hfsplus_get_block(struct inode *inode, sector_t iblock, 192int hfsplus_get_block(struct inode *inode, sector_t iblock,
173 struct buffer_head *bh_result, int create) 193 struct buffer_head *bh_result, int create)
174{ 194{
175 struct super_block *sb; 195 struct super_block *sb = inode->i_sb;
196 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
197 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
176 int res = -EIO; 198 int res = -EIO;
177 u32 ablock, dblock, mask; 199 u32 ablock, dblock, mask;
178 int shift; 200 int shift;
179 201
180 sb = inode->i_sb;
181
182 /* Convert inode block to disk allocation block */ 202 /* Convert inode block to disk allocation block */
183 shift = HFSPLUS_SB(sb).alloc_blksz_shift - sb->s_blocksize_bits; 203 shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits;
184 ablock = iblock >> HFSPLUS_SB(sb).fs_shift; 204 ablock = iblock >> sbi->fs_shift;
185 205
186 if (iblock >= HFSPLUS_I(inode).fs_blocks) { 206 if (iblock >= hip->fs_blocks) {
187 if (iblock > HFSPLUS_I(inode).fs_blocks || !create) 207 if (iblock > hip->fs_blocks || !create)
188 return -EIO; 208 return -EIO;
189 if (ablock >= HFSPLUS_I(inode).alloc_blocks) { 209 if (ablock >= hip->alloc_blocks) {
190 res = hfsplus_file_extend(inode); 210 res = hfsplus_file_extend(inode);
191 if (res) 211 if (res)
192 return res; 212 return res;
@@ -194,33 +214,33 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
194 } else 214 } else
195 create = 0; 215 create = 0;
196 216
197 if (ablock < HFSPLUS_I(inode).first_blocks) { 217 if (ablock < hip->first_blocks) {
198 dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).first_extents, ablock); 218 dblock = hfsplus_ext_find_block(hip->first_extents, ablock);
199 goto done; 219 goto done;
200 } 220 }
201 221
202 if (inode->i_ino == HFSPLUS_EXT_CNID) 222 if (inode->i_ino == HFSPLUS_EXT_CNID)
203 return -EIO; 223 return -EIO;
204 224
205 mutex_lock(&HFSPLUS_I(inode).extents_lock); 225 mutex_lock(&hip->extents_lock);
206 res = hfsplus_ext_read_extent(inode, ablock); 226 res = hfsplus_ext_read_extent(inode, ablock);
207 if (!res) { 227 if (!res) {
208 dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).cached_extents, ablock - 228 dblock = hfsplus_ext_find_block(hip->cached_extents,
209 HFSPLUS_I(inode).cached_start); 229 ablock - hip->cached_start);
210 } else { 230 } else {
211 mutex_unlock(&HFSPLUS_I(inode).extents_lock); 231 mutex_unlock(&hip->extents_lock);
212 return -EIO; 232 return -EIO;
213 } 233 }
214 mutex_unlock(&HFSPLUS_I(inode).extents_lock); 234 mutex_unlock(&hip->extents_lock);
215 235
216done: 236done:
217 dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock); 237 dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock);
218 mask = (1 << HFSPLUS_SB(sb).fs_shift) - 1; 238 mask = (1 << sbi->fs_shift) - 1;
219 map_bh(bh_result, sb, (dblock << HFSPLUS_SB(sb).fs_shift) + HFSPLUS_SB(sb).blockoffset + (iblock & mask)); 239 map_bh(bh_result, sb, (dblock << sbi->fs_shift) + sbi->blockoffset + (iblock & mask));
220 if (create) { 240 if (create) {
221 set_buffer_new(bh_result); 241 set_buffer_new(bh_result);
222 HFSPLUS_I(inode).phys_size += sb->s_blocksize; 242 hip->phys_size += sb->s_blocksize;
223 HFSPLUS_I(inode).fs_blocks++; 243 hip->fs_blocks++;
224 inode_add_bytes(inode, sb->s_blocksize); 244 inode_add_bytes(inode, sb->s_blocksize);
225 mark_inode_dirty(inode); 245 mark_inode_dirty(inode);
226 } 246 }
@@ -327,7 +347,7 @@ int hfsplus_free_fork(struct super_block *sb, u32 cnid, struct hfsplus_fork_raw
327 if (total_blocks == blocks) 347 if (total_blocks == blocks)
328 return 0; 348 return 0;
329 349
330 hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd); 350 hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
331 do { 351 do {
332 res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid, 352 res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
333 total_blocks, type); 353 total_blocks, type);
@@ -348,29 +368,33 @@ int hfsplus_free_fork(struct super_block *sb, u32 cnid, struct hfsplus_fork_raw
348int hfsplus_file_extend(struct inode *inode) 368int hfsplus_file_extend(struct inode *inode)
349{ 369{
350 struct super_block *sb = inode->i_sb; 370 struct super_block *sb = inode->i_sb;
371 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
372 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
351 u32 start, len, goal; 373 u32 start, len, goal;
352 int res; 374 int res;
353 375
354 if (HFSPLUS_SB(sb).alloc_file->i_size * 8 < HFSPLUS_SB(sb).total_blocks - HFSPLUS_SB(sb).free_blocks + 8) { 376 if (sbi->alloc_file->i_size * 8 <
377 sbi->total_blocks - sbi->free_blocks + 8) {
355 // extend alloc file 378 // extend alloc file
356 printk(KERN_ERR "hfs: extend alloc file! (%Lu,%u,%u)\n", HFSPLUS_SB(sb).alloc_file->i_size * 8, 379 printk(KERN_ERR "hfs: extend alloc file! (%Lu,%u,%u)\n",
357 HFSPLUS_SB(sb).total_blocks, HFSPLUS_SB(sb).free_blocks); 380 sbi->alloc_file->i_size * 8,
381 sbi->total_blocks, sbi->free_blocks);
358 return -ENOSPC; 382 return -ENOSPC;
359 } 383 }
360 384
361 mutex_lock(&HFSPLUS_I(inode).extents_lock); 385 mutex_lock(&hip->extents_lock);
362 if (HFSPLUS_I(inode).alloc_blocks == HFSPLUS_I(inode).first_blocks) 386 if (hip->alloc_blocks == hip->first_blocks)
363 goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).first_extents); 387 goal = hfsplus_ext_lastblock(hip->first_extents);
364 else { 388 else {
365 res = hfsplus_ext_read_extent(inode, HFSPLUS_I(inode).alloc_blocks); 389 res = hfsplus_ext_read_extent(inode, hip->alloc_blocks);
366 if (res) 390 if (res)
367 goto out; 391 goto out;
368 goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).cached_extents); 392 goal = hfsplus_ext_lastblock(hip->cached_extents);
369 } 393 }
370 394
371 len = HFSPLUS_I(inode).clump_blocks; 395 len = hip->clump_blocks;
372 start = hfsplus_block_allocate(sb, HFSPLUS_SB(sb).total_blocks, goal, &len); 396 start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len);
373 if (start >= HFSPLUS_SB(sb).total_blocks) { 397 if (start >= sbi->total_blocks) {
374 start = hfsplus_block_allocate(sb, goal, 0, &len); 398 start = hfsplus_block_allocate(sb, goal, 0, &len);
375 if (start >= goal) { 399 if (start >= goal) {
376 res = -ENOSPC; 400 res = -ENOSPC;
@@ -379,56 +403,56 @@ int hfsplus_file_extend(struct inode *inode)
379 } 403 }
380 404
381 dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); 405 dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
382 if (HFSPLUS_I(inode).alloc_blocks <= HFSPLUS_I(inode).first_blocks) { 406
383 if (!HFSPLUS_I(inode).first_blocks) { 407 if (hip->alloc_blocks <= hip->first_blocks) {
408 if (!hip->first_blocks) {
384 dprint(DBG_EXTENT, "first extents\n"); 409 dprint(DBG_EXTENT, "first extents\n");
385 /* no extents yet */ 410 /* no extents yet */
386 HFSPLUS_I(inode).first_extents[0].start_block = cpu_to_be32(start); 411 hip->first_extents[0].start_block = cpu_to_be32(start);
387 HFSPLUS_I(inode).first_extents[0].block_count = cpu_to_be32(len); 412 hip->first_extents[0].block_count = cpu_to_be32(len);
388 res = 0; 413 res = 0;
389 } else { 414 } else {
390 /* try to append to extents in inode */ 415 /* try to append to extents in inode */
391 res = hfsplus_add_extent(HFSPLUS_I(inode).first_extents, 416 res = hfsplus_add_extent(hip->first_extents,
392 HFSPLUS_I(inode).alloc_blocks, 417 hip->alloc_blocks,
393 start, len); 418 start, len);
394 if (res == -ENOSPC) 419 if (res == -ENOSPC)
395 goto insert_extent; 420 goto insert_extent;
396 } 421 }
397 if (!res) { 422 if (!res) {
398 hfsplus_dump_extent(HFSPLUS_I(inode).first_extents); 423 hfsplus_dump_extent(hip->first_extents);
399 HFSPLUS_I(inode).first_blocks += len; 424 hip->first_blocks += len;
400 } 425 }
401 } else { 426 } else {
402 res = hfsplus_add_extent(HFSPLUS_I(inode).cached_extents, 427 res = hfsplus_add_extent(hip->cached_extents,
403 HFSPLUS_I(inode).alloc_blocks - 428 hip->alloc_blocks - hip->cached_start,
404 HFSPLUS_I(inode).cached_start,
405 start, len); 429 start, len);
406 if (!res) { 430 if (!res) {
407 hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents); 431 hfsplus_dump_extent(hip->cached_extents);
408 HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY; 432 hip->flags |= HFSPLUS_FLG_EXT_DIRTY;
409 HFSPLUS_I(inode).cached_blocks += len; 433 hip->cached_blocks += len;
410 } else if (res == -ENOSPC) 434 } else if (res == -ENOSPC)
411 goto insert_extent; 435 goto insert_extent;
412 } 436 }
413out: 437out:
414 mutex_unlock(&HFSPLUS_I(inode).extents_lock); 438 mutex_unlock(&hip->extents_lock);
415 if (!res) { 439 if (!res) {
416 HFSPLUS_I(inode).alloc_blocks += len; 440 hip->alloc_blocks += len;
417 mark_inode_dirty(inode); 441 mark_inode_dirty(inode);
418 } 442 }
419 return res; 443 return res;
420 444
421insert_extent: 445insert_extent:
422 dprint(DBG_EXTENT, "insert new extent\n"); 446 dprint(DBG_EXTENT, "insert new extent\n");
423 hfsplus_ext_write_extent(inode); 447 hfsplus_ext_write_extent_locked(inode);
424 448
425 memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec)); 449 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
426 HFSPLUS_I(inode).cached_extents[0].start_block = cpu_to_be32(start); 450 hip->cached_extents[0].start_block = cpu_to_be32(start);
427 HFSPLUS_I(inode).cached_extents[0].block_count = cpu_to_be32(len); 451 hip->cached_extents[0].block_count = cpu_to_be32(len);
428 hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents); 452 hfsplus_dump_extent(hip->cached_extents);
429 HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW; 453 hip->flags |= HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW;
430 HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).alloc_blocks; 454 hip->cached_start = hip->alloc_blocks;
431 HFSPLUS_I(inode).cached_blocks = len; 455 hip->cached_blocks = len;
432 456
433 res = 0; 457 res = 0;
434 goto out; 458 goto out;
@@ -437,13 +461,15 @@ insert_extent:
437void hfsplus_file_truncate(struct inode *inode) 461void hfsplus_file_truncate(struct inode *inode)
438{ 462{
439 struct super_block *sb = inode->i_sb; 463 struct super_block *sb = inode->i_sb;
464 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
440 struct hfs_find_data fd; 465 struct hfs_find_data fd;
441 u32 alloc_cnt, blk_cnt, start; 466 u32 alloc_cnt, blk_cnt, start;
442 int res; 467 int res;
443 468
444 dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino, 469 dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n",
445 (long long)HFSPLUS_I(inode).phys_size, inode->i_size); 470 inode->i_ino, (long long)hip->phys_size, inode->i_size);
446 if (inode->i_size > HFSPLUS_I(inode).phys_size) { 471
472 if (inode->i_size > hip->phys_size) {
447 struct address_space *mapping = inode->i_mapping; 473 struct address_space *mapping = inode->i_mapping;
448 struct page *page; 474 struct page *page;
449 void *fsdata; 475 void *fsdata;
@@ -460,47 +486,48 @@ void hfsplus_file_truncate(struct inode *inode)
460 return; 486 return;
461 mark_inode_dirty(inode); 487 mark_inode_dirty(inode);
462 return; 488 return;
463 } else if (inode->i_size == HFSPLUS_I(inode).phys_size) 489 } else if (inode->i_size == hip->phys_size)
464 return; 490 return;
465 491
466 blk_cnt = (inode->i_size + HFSPLUS_SB(sb).alloc_blksz - 1) >> HFSPLUS_SB(sb).alloc_blksz_shift; 492 blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >>
467 alloc_cnt = HFSPLUS_I(inode).alloc_blocks; 493 HFSPLUS_SB(sb)->alloc_blksz_shift;
494 alloc_cnt = hip->alloc_blocks;
468 if (blk_cnt == alloc_cnt) 495 if (blk_cnt == alloc_cnt)
469 goto out; 496 goto out;
470 497
471 mutex_lock(&HFSPLUS_I(inode).extents_lock); 498 mutex_lock(&hip->extents_lock);
472 hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd); 499 hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
473 while (1) { 500 while (1) {
474 if (alloc_cnt == HFSPLUS_I(inode).first_blocks) { 501 if (alloc_cnt == hip->first_blocks) {
475 hfsplus_free_extents(sb, HFSPLUS_I(inode).first_extents, 502 hfsplus_free_extents(sb, hip->first_extents,
476 alloc_cnt, alloc_cnt - blk_cnt); 503 alloc_cnt, alloc_cnt - blk_cnt);
477 hfsplus_dump_extent(HFSPLUS_I(inode).first_extents); 504 hfsplus_dump_extent(hip->first_extents);
478 HFSPLUS_I(inode).first_blocks = blk_cnt; 505 hip->first_blocks = blk_cnt;
479 break; 506 break;
480 } 507 }
481 res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt); 508 res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
482 if (res) 509 if (res)
483 break; 510 break;
484 start = HFSPLUS_I(inode).cached_start; 511 start = hip->cached_start;
485 hfsplus_free_extents(sb, HFSPLUS_I(inode).cached_extents, 512 hfsplus_free_extents(sb, hip->cached_extents,
486 alloc_cnt - start, alloc_cnt - blk_cnt); 513 alloc_cnt - start, alloc_cnt - blk_cnt);
487 hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents); 514 hfsplus_dump_extent(hip->cached_extents);
488 if (blk_cnt > start) { 515 if (blk_cnt > start) {
489 HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY; 516 hip->flags |= HFSPLUS_FLG_EXT_DIRTY;
490 break; 517 break;
491 } 518 }
492 alloc_cnt = start; 519 alloc_cnt = start;
493 HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0; 520 hip->cached_start = hip->cached_blocks = 0;
494 HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); 521 hip->flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
495 hfs_brec_remove(&fd); 522 hfs_brec_remove(&fd);
496 } 523 }
497 hfs_find_exit(&fd); 524 hfs_find_exit(&fd);
498 mutex_unlock(&HFSPLUS_I(inode).extents_lock); 525 mutex_unlock(&hip->extents_lock);
499 526
500 HFSPLUS_I(inode).alloc_blocks = blk_cnt; 527 hip->alloc_blocks = blk_cnt;
501out: 528out:
502 HFSPLUS_I(inode).phys_size = inode->i_size; 529 hip->phys_size = inode->i_size;
503 HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; 530 hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
504 inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits); 531 inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
505 mark_inode_dirty(inode); 532 mark_inode_dirty(inode);
506} 533}
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index dc856be3c2b0..cb3653efb57a 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -62,7 +62,7 @@ struct hfs_btree {
62 unsigned int depth; 62 unsigned int depth;
63 63
64 //unsigned int map1_size, map_size; 64 //unsigned int map1_size, map_size;
65 struct semaphore tree_lock; 65 struct mutex tree_lock;
66 66
67 unsigned int pages_per_bnode; 67 unsigned int pages_per_bnode;
68 spinlock_t hash_lock; 68 spinlock_t hash_lock;
@@ -121,16 +121,21 @@ struct hfsplus_sb_info {
121 u32 sect_count; 121 u32 sect_count;
122 int fs_shift; 122 int fs_shift;
123 123
124 /* Stuff in host order from Vol Header */ 124 /* immutable data from the volume header */
125 u32 alloc_blksz; 125 u32 alloc_blksz;
126 int alloc_blksz_shift; 126 int alloc_blksz_shift;
127 u32 total_blocks; 127 u32 total_blocks;
128 u32 data_clump_blocks, rsrc_clump_blocks;
129
130 /* mutable data from the volume header, protected by alloc_mutex */
128 u32 free_blocks; 131 u32 free_blocks;
129 u32 next_alloc; 132 struct mutex alloc_mutex;
133
134 /* mutable data from the volume header, protected by vh_mutex */
130 u32 next_cnid; 135 u32 next_cnid;
131 u32 file_count; 136 u32 file_count;
132 u32 folder_count; 137 u32 folder_count;
133 u32 data_clump_blocks, rsrc_clump_blocks; 138 struct mutex vh_mutex;
134 139
135 /* Config options */ 140 /* Config options */
136 u32 creator; 141 u32 creator;
@@ -143,40 +148,50 @@ struct hfsplus_sb_info {
143 int part, session; 148 int part, session;
144 149
145 unsigned long flags; 150 unsigned long flags;
146
147 struct hlist_head rsrc_inodes;
148}; 151};
149 152
150#define HFSPLUS_SB_WRITEBACKUP 0x0001 153#define HFSPLUS_SB_WRITEBACKUP 0
151#define HFSPLUS_SB_NODECOMPOSE 0x0002 154#define HFSPLUS_SB_NODECOMPOSE 1
152#define HFSPLUS_SB_FORCE 0x0004 155#define HFSPLUS_SB_FORCE 2
153#define HFSPLUS_SB_HFSX 0x0008 156#define HFSPLUS_SB_HFSX 3
154#define HFSPLUS_SB_CASEFOLD 0x0010 157#define HFSPLUS_SB_CASEFOLD 4
155 158
156 159
157struct hfsplus_inode_info { 160struct hfsplus_inode_info {
158 struct mutex extents_lock;
159 u32 clump_blocks, alloc_blocks;
160 sector_t fs_blocks;
161 /* Allocation extents from catalog record or volume header */
162 hfsplus_extent_rec first_extents;
163 u32 first_blocks;
164 hfsplus_extent_rec cached_extents;
165 u32 cached_start, cached_blocks;
166 atomic_t opencnt; 161 atomic_t opencnt;
167 162
168 struct inode *rsrc_inode; 163 /*
164 * Extent allocation information, protected by extents_lock.
165 */
166 u32 first_blocks;
167 u32 clump_blocks;
168 u32 alloc_blocks;
169 u32 cached_start;
170 u32 cached_blocks;
171 hfsplus_extent_rec first_extents;
172 hfsplus_extent_rec cached_extents;
169 unsigned long flags; 173 unsigned long flags;
174 struct mutex extents_lock;
170 175
176 /*
177 * Immutable data.
178 */
179 struct inode *rsrc_inode;
171 __be32 create_date; 180 __be32 create_date;
172 /* Device number in hfsplus_permissions in catalog */
173 u32 dev;
174 /* BSD system and user file flags */
175 u8 rootflags;
176 u8 userflags;
177 181
182 /*
183 * Protected by sbi->vh_mutex.
184 */
185 u32 linkid;
186
187 /*
188 * Protected by i_mutex.
189 */
190 sector_t fs_blocks;
191 u8 userflags; /* BSD user file flags */
178 struct list_head open_dir_list; 192 struct list_head open_dir_list;
179 loff_t phys_size; 193 loff_t phys_size;
194
180 struct inode vfs_inode; 195 struct inode vfs_inode;
181}; 196};
182 197
@@ -184,8 +199,8 @@ struct hfsplus_inode_info {
184#define HFSPLUS_FLG_EXT_DIRTY 0x0002 199#define HFSPLUS_FLG_EXT_DIRTY 0x0002
185#define HFSPLUS_FLG_EXT_NEW 0x0004 200#define HFSPLUS_FLG_EXT_NEW 0x0004
186 201
187#define HFSPLUS_IS_DATA(inode) (!(HFSPLUS_I(inode).flags & HFSPLUS_FLG_RSRC)) 202#define HFSPLUS_IS_DATA(inode) (!(HFSPLUS_I(inode)->flags & HFSPLUS_FLG_RSRC))
188#define HFSPLUS_IS_RSRC(inode) (HFSPLUS_I(inode).flags & HFSPLUS_FLG_RSRC) 203#define HFSPLUS_IS_RSRC(inode) (HFSPLUS_I(inode)->flags & HFSPLUS_FLG_RSRC)
189 204
190struct hfs_find_data { 205struct hfs_find_data {
191 /* filled by caller */ 206 /* filled by caller */
@@ -311,6 +326,7 @@ int hfsplus_create_cat(u32, struct inode *, struct qstr *, struct inode *);
311int hfsplus_delete_cat(u32, struct inode *, struct qstr *); 326int hfsplus_delete_cat(u32, struct inode *, struct qstr *);
312int hfsplus_rename_cat(u32, struct inode *, struct qstr *, 327int hfsplus_rename_cat(u32, struct inode *, struct qstr *,
313 struct inode *, struct qstr *); 328 struct inode *, struct qstr *);
329void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms);
314 330
315/* dir.c */ 331/* dir.c */
316extern const struct inode_operations hfsplus_dir_inode_operations; 332extern const struct inode_operations hfsplus_dir_inode_operations;
@@ -372,26 +388,15 @@ int hfsplus_read_wrapper(struct super_block *);
372int hfs_part_find(struct super_block *, sector_t *, sector_t *); 388int hfs_part_find(struct super_block *, sector_t *, sector_t *);
373 389
374/* access macros */ 390/* access macros */
375/*
376static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb) 391static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb)
377{ 392{
378 return sb->s_fs_info; 393 return sb->s_fs_info;
379} 394}
395
380static inline struct hfsplus_inode_info *HFSPLUS_I(struct inode *inode) 396static inline struct hfsplus_inode_info *HFSPLUS_I(struct inode *inode)
381{ 397{
382 return list_entry(inode, struct hfsplus_inode_info, vfs_inode); 398 return list_entry(inode, struct hfsplus_inode_info, vfs_inode);
383} 399}
384*/
385#define HFSPLUS_SB(super) (*(struct hfsplus_sb_info *)(super)->s_fs_info)
386#define HFSPLUS_I(inode) (*list_entry(inode, struct hfsplus_inode_info, vfs_inode))
387
388#if 1
389#define hfsplus_kmap(p) ({ struct page *__p = (p); kmap(__p); })
390#define hfsplus_kunmap(p) ({ struct page *__p = (p); kunmap(__p); __p; })
391#else
392#define hfsplus_kmap(p) kmap(p)
393#define hfsplus_kunmap(p) kunmap(p)
394#endif
395 400
396#define sb_bread512(sb, sec, data) ({ \ 401#define sb_bread512(sb, sec, data) ({ \
397 struct buffer_head *__bh; \ 402 struct buffer_head *__bh; \
@@ -419,6 +424,4 @@ static inline struct hfsplus_inode_info *HFSPLUS_I(struct inode *inode)
419#define hfsp_ut2mt(t) __hfsp_ut2mt((t).tv_sec) 424#define hfsp_ut2mt(t) __hfsp_ut2mt((t).tv_sec)
420#define hfsp_now2mt() __hfsp_ut2mt(get_seconds()) 425#define hfsp_now2mt() __hfsp_ut2mt(get_seconds())
421 426
422#define kdev_t_to_nr(x) (x)
423
424#endif 427#endif
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
index fe99fe8db61a..6892899fd6fb 100644
--- a/fs/hfsplus/hfsplus_raw.h
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -200,6 +200,7 @@ struct hfsplus_cat_key {
200 struct hfsplus_unistr name; 200 struct hfsplus_unistr name;
201} __packed; 201} __packed;
202 202
203#define HFSPLUS_CAT_KEYLEN (sizeof(struct hfsplus_cat_key))
203 204
204/* Structs from hfs.h */ 205/* Structs from hfs.h */
205struct hfsp_point { 206struct hfsp_point {
@@ -323,7 +324,7 @@ struct hfsplus_ext_key {
323 __be32 start_block; 324 __be32 start_block;
324} __packed; 325} __packed;
325 326
326#define HFSPLUS_EXT_KEYLEN 12 327#define HFSPLUS_EXT_KEYLEN sizeof(struct hfsplus_ext_key)
327 328
328/* HFS+ generic BTree key */ 329/* HFS+ generic BTree key */
329typedef union { 330typedef union {
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index c5a979d62c65..78449280dae0 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -36,7 +36,7 @@ static int hfsplus_write_begin(struct file *file, struct address_space *mapping,
36 *pagep = NULL; 36 *pagep = NULL;
37 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 37 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
38 hfsplus_get_block, 38 hfsplus_get_block,
39 &HFSPLUS_I(mapping->host).phys_size); 39 &HFSPLUS_I(mapping->host)->phys_size);
40 if (unlikely(ret)) { 40 if (unlikely(ret)) {
41 loff_t isize = mapping->host->i_size; 41 loff_t isize = mapping->host->i_size;
42 if (pos + len > isize) 42 if (pos + len > isize)
@@ -62,13 +62,13 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
62 62
63 switch (inode->i_ino) { 63 switch (inode->i_ino) {
64 case HFSPLUS_EXT_CNID: 64 case HFSPLUS_EXT_CNID:
65 tree = HFSPLUS_SB(sb).ext_tree; 65 tree = HFSPLUS_SB(sb)->ext_tree;
66 break; 66 break;
67 case HFSPLUS_CAT_CNID: 67 case HFSPLUS_CAT_CNID:
68 tree = HFSPLUS_SB(sb).cat_tree; 68 tree = HFSPLUS_SB(sb)->cat_tree;
69 break; 69 break;
70 case HFSPLUS_ATTR_CNID: 70 case HFSPLUS_ATTR_CNID:
71 tree = HFSPLUS_SB(sb).attr_tree; 71 tree = HFSPLUS_SB(sb)->attr_tree;
72 break; 72 break;
73 default: 73 default:
74 BUG(); 74 BUG();
@@ -172,12 +172,13 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dent
172 struct hfs_find_data fd; 172 struct hfs_find_data fd;
173 struct super_block *sb = dir->i_sb; 173 struct super_block *sb = dir->i_sb;
174 struct inode *inode = NULL; 174 struct inode *inode = NULL;
175 struct hfsplus_inode_info *hip;
175 int err; 176 int err;
176 177
177 if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc")) 178 if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
178 goto out; 179 goto out;
179 180
180 inode = HFSPLUS_I(dir).rsrc_inode; 181 inode = HFSPLUS_I(dir)->rsrc_inode;
181 if (inode) 182 if (inode)
182 goto out; 183 goto out;
183 184
@@ -185,12 +186,13 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dent
185 if (!inode) 186 if (!inode)
186 return ERR_PTR(-ENOMEM); 187 return ERR_PTR(-ENOMEM);
187 188
189 hip = HFSPLUS_I(inode);
188 inode->i_ino = dir->i_ino; 190 inode->i_ino = dir->i_ino;
189 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); 191 INIT_LIST_HEAD(&hip->open_dir_list);
190 mutex_init(&HFSPLUS_I(inode).extents_lock); 192 mutex_init(&hip->extents_lock);
191 HFSPLUS_I(inode).flags = HFSPLUS_FLG_RSRC; 193 hip->flags = HFSPLUS_FLG_RSRC;
192 194
193 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd); 195 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
194 err = hfsplus_find_cat(sb, dir->i_ino, &fd); 196 err = hfsplus_find_cat(sb, dir->i_ino, &fd);
195 if (!err) 197 if (!err)
196 err = hfsplus_cat_read_inode(inode, &fd); 198 err = hfsplus_cat_read_inode(inode, &fd);
@@ -199,10 +201,18 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dent
199 iput(inode); 201 iput(inode);
200 return ERR_PTR(err); 202 return ERR_PTR(err);
201 } 203 }
202 HFSPLUS_I(inode).rsrc_inode = dir; 204 hip->rsrc_inode = dir;
203 HFSPLUS_I(dir).rsrc_inode = inode; 205 HFSPLUS_I(dir)->rsrc_inode = inode;
204 igrab(dir); 206 igrab(dir);
205 hlist_add_head(&inode->i_hash, &HFSPLUS_SB(sb).rsrc_inodes); 207
208 /*
209 * __mark_inode_dirty expects inodes to be hashed. Since we don't
210 * want resource fork inodes in the regular inode space, we make them
211 * appear hashed, but do not put on any lists. hlist_del()
212 * will work fine and require no locking.
213 */
214 inode->i_hash.pprev = &inode->i_hash.next;
215
206 mark_inode_dirty(inode); 216 mark_inode_dirty(inode);
207out: 217out:
208 d_add(dentry, inode); 218 d_add(dentry, inode);
@@ -211,30 +221,27 @@ out:
211 221
212static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir) 222static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir)
213{ 223{
214 struct super_block *sb = inode->i_sb; 224 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
215 u16 mode; 225 u16 mode;
216 226
217 mode = be16_to_cpu(perms->mode); 227 mode = be16_to_cpu(perms->mode);
218 228
219 inode->i_uid = be32_to_cpu(perms->owner); 229 inode->i_uid = be32_to_cpu(perms->owner);
220 if (!inode->i_uid && !mode) 230 if (!inode->i_uid && !mode)
221 inode->i_uid = HFSPLUS_SB(sb).uid; 231 inode->i_uid = sbi->uid;
222 232
223 inode->i_gid = be32_to_cpu(perms->group); 233 inode->i_gid = be32_to_cpu(perms->group);
224 if (!inode->i_gid && !mode) 234 if (!inode->i_gid && !mode)
225 inode->i_gid = HFSPLUS_SB(sb).gid; 235 inode->i_gid = sbi->gid;
226 236
227 if (dir) { 237 if (dir) {
228 mode = mode ? (mode & S_IALLUGO) : 238 mode = mode ? (mode & S_IALLUGO) : (S_IRWXUGO & ~(sbi->umask));
229 (S_IRWXUGO & ~(HFSPLUS_SB(sb).umask));
230 mode |= S_IFDIR; 239 mode |= S_IFDIR;
231 } else if (!mode) 240 } else if (!mode)
232 mode = S_IFREG | ((S_IRUGO|S_IWUGO) & 241 mode = S_IFREG | ((S_IRUGO|S_IWUGO) & ~(sbi->umask));
233 ~(HFSPLUS_SB(sb).umask));
234 inode->i_mode = mode; 242 inode->i_mode = mode;
235 243
236 HFSPLUS_I(inode).rootflags = perms->rootflags; 244 HFSPLUS_I(inode)->userflags = perms->userflags;
237 HFSPLUS_I(inode).userflags = perms->userflags;
238 if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE) 245 if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
239 inode->i_flags |= S_IMMUTABLE; 246 inode->i_flags |= S_IMMUTABLE;
240 else 247 else
@@ -245,30 +252,13 @@ static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, i
245 inode->i_flags &= ~S_APPEND; 252 inode->i_flags &= ~S_APPEND;
246} 253}
247 254
248static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
249{
250 if (inode->i_flags & S_IMMUTABLE)
251 perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
252 else
253 perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
254 if (inode->i_flags & S_APPEND)
255 perms->rootflags |= HFSPLUS_FLG_APPEND;
256 else
257 perms->rootflags &= ~HFSPLUS_FLG_APPEND;
258 perms->userflags = HFSPLUS_I(inode).userflags;
259 perms->mode = cpu_to_be16(inode->i_mode);
260 perms->owner = cpu_to_be32(inode->i_uid);
261 perms->group = cpu_to_be32(inode->i_gid);
262 perms->dev = cpu_to_be32(HFSPLUS_I(inode).dev);
263}
264
265static int hfsplus_file_open(struct inode *inode, struct file *file) 255static int hfsplus_file_open(struct inode *inode, struct file *file)
266{ 256{
267 if (HFSPLUS_IS_RSRC(inode)) 257 if (HFSPLUS_IS_RSRC(inode))
268 inode = HFSPLUS_I(inode).rsrc_inode; 258 inode = HFSPLUS_I(inode)->rsrc_inode;
269 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) 259 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
270 return -EOVERFLOW; 260 return -EOVERFLOW;
271 atomic_inc(&HFSPLUS_I(inode).opencnt); 261 atomic_inc(&HFSPLUS_I(inode)->opencnt);
272 return 0; 262 return 0;
273} 263}
274 264
@@ -277,12 +267,13 @@ static int hfsplus_file_release(struct inode *inode, struct file *file)
277 struct super_block *sb = inode->i_sb; 267 struct super_block *sb = inode->i_sb;
278 268
279 if (HFSPLUS_IS_RSRC(inode)) 269 if (HFSPLUS_IS_RSRC(inode))
280 inode = HFSPLUS_I(inode).rsrc_inode; 270 inode = HFSPLUS_I(inode)->rsrc_inode;
281 if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) { 271 if (atomic_dec_and_test(&HFSPLUS_I(inode)->opencnt)) {
282 mutex_lock(&inode->i_mutex); 272 mutex_lock(&inode->i_mutex);
283 hfsplus_file_truncate(inode); 273 hfsplus_file_truncate(inode);
284 if (inode->i_flags & S_DEAD) { 274 if (inode->i_flags & S_DEAD) {
285 hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL); 275 hfsplus_delete_cat(inode->i_ino,
276 HFSPLUS_SB(sb)->hidden_dir, NULL);
286 hfsplus_delete_inode(inode); 277 hfsplus_delete_inode(inode);
287 } 278 }
288 mutex_unlock(&inode->i_mutex); 279 mutex_unlock(&inode->i_mutex);
@@ -361,47 +352,52 @@ static const struct file_operations hfsplus_file_operations = {
361 352
362struct inode *hfsplus_new_inode(struct super_block *sb, int mode) 353struct inode *hfsplus_new_inode(struct super_block *sb, int mode)
363{ 354{
355 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
364 struct inode *inode = new_inode(sb); 356 struct inode *inode = new_inode(sb);
357 struct hfsplus_inode_info *hip;
358
365 if (!inode) 359 if (!inode)
366 return NULL; 360 return NULL;
367 361
368 inode->i_ino = HFSPLUS_SB(sb).next_cnid++; 362 inode->i_ino = sbi->next_cnid++;
369 inode->i_mode = mode; 363 inode->i_mode = mode;
370 inode->i_uid = current_fsuid(); 364 inode->i_uid = current_fsuid();
371 inode->i_gid = current_fsgid(); 365 inode->i_gid = current_fsgid();
372 inode->i_nlink = 1; 366 inode->i_nlink = 1;
373 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; 367 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
374 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); 368
375 mutex_init(&HFSPLUS_I(inode).extents_lock); 369 hip = HFSPLUS_I(inode);
376 atomic_set(&HFSPLUS_I(inode).opencnt, 0); 370 INIT_LIST_HEAD(&hip->open_dir_list);
377 HFSPLUS_I(inode).flags = 0; 371 mutex_init(&hip->extents_lock);
378 memset(HFSPLUS_I(inode).first_extents, 0, sizeof(hfsplus_extent_rec)); 372 atomic_set(&hip->opencnt, 0);
379 memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec)); 373 hip->flags = 0;
380 HFSPLUS_I(inode).alloc_blocks = 0; 374 memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec));
381 HFSPLUS_I(inode).first_blocks = 0; 375 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
382 HFSPLUS_I(inode).cached_start = 0; 376 hip->alloc_blocks = 0;
383 HFSPLUS_I(inode).cached_blocks = 0; 377 hip->first_blocks = 0;
384 HFSPLUS_I(inode).phys_size = 0; 378 hip->cached_start = 0;
385 HFSPLUS_I(inode).fs_blocks = 0; 379 hip->cached_blocks = 0;
386 HFSPLUS_I(inode).rsrc_inode = NULL; 380 hip->phys_size = 0;
381 hip->fs_blocks = 0;
382 hip->rsrc_inode = NULL;
387 if (S_ISDIR(inode->i_mode)) { 383 if (S_ISDIR(inode->i_mode)) {
388 inode->i_size = 2; 384 inode->i_size = 2;
389 HFSPLUS_SB(sb).folder_count++; 385 sbi->folder_count++;
390 inode->i_op = &hfsplus_dir_inode_operations; 386 inode->i_op = &hfsplus_dir_inode_operations;
391 inode->i_fop = &hfsplus_dir_operations; 387 inode->i_fop = &hfsplus_dir_operations;
392 } else if (S_ISREG(inode->i_mode)) { 388 } else if (S_ISREG(inode->i_mode)) {
393 HFSPLUS_SB(sb).file_count++; 389 sbi->file_count++;
394 inode->i_op = &hfsplus_file_inode_operations; 390 inode->i_op = &hfsplus_file_inode_operations;
395 inode->i_fop = &hfsplus_file_operations; 391 inode->i_fop = &hfsplus_file_operations;
396 inode->i_mapping->a_ops = &hfsplus_aops; 392 inode->i_mapping->a_ops = &hfsplus_aops;
397 HFSPLUS_I(inode).clump_blocks = HFSPLUS_SB(sb).data_clump_blocks; 393 hip->clump_blocks = sbi->data_clump_blocks;
398 } else if (S_ISLNK(inode->i_mode)) { 394 } else if (S_ISLNK(inode->i_mode)) {
399 HFSPLUS_SB(sb).file_count++; 395 sbi->file_count++;
400 inode->i_op = &page_symlink_inode_operations; 396 inode->i_op = &page_symlink_inode_operations;
401 inode->i_mapping->a_ops = &hfsplus_aops; 397 inode->i_mapping->a_ops = &hfsplus_aops;
402 HFSPLUS_I(inode).clump_blocks = 1; 398 hip->clump_blocks = 1;
403 } else 399 } else
404 HFSPLUS_SB(sb).file_count++; 400 sbi->file_count++;
405 insert_inode_hash(inode); 401 insert_inode_hash(inode);
406 mark_inode_dirty(inode); 402 mark_inode_dirty(inode);
407 sb->s_dirt = 1; 403 sb->s_dirt = 1;
@@ -414,11 +410,11 @@ void hfsplus_delete_inode(struct inode *inode)
414 struct super_block *sb = inode->i_sb; 410 struct super_block *sb = inode->i_sb;
415 411
416 if (S_ISDIR(inode->i_mode)) { 412 if (S_ISDIR(inode->i_mode)) {
417 HFSPLUS_SB(sb).folder_count--; 413 HFSPLUS_SB(sb)->folder_count--;
418 sb->s_dirt = 1; 414 sb->s_dirt = 1;
419 return; 415 return;
420 } 416 }
421 HFSPLUS_SB(sb).file_count--; 417 HFSPLUS_SB(sb)->file_count--;
422 if (S_ISREG(inode->i_mode)) { 418 if (S_ISREG(inode->i_mode)) {
423 if (!inode->i_nlink) { 419 if (!inode->i_nlink) {
424 inode->i_size = 0; 420 inode->i_size = 0;
@@ -434,34 +430,39 @@ void hfsplus_delete_inode(struct inode *inode)
434void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork) 430void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
435{ 431{
436 struct super_block *sb = inode->i_sb; 432 struct super_block *sb = inode->i_sb;
433 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
434 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
437 u32 count; 435 u32 count;
438 int i; 436 int i;
439 437
440 memcpy(&HFSPLUS_I(inode).first_extents, &fork->extents, 438 memcpy(&hip->first_extents, &fork->extents, sizeof(hfsplus_extent_rec));
441 sizeof(hfsplus_extent_rec));
442 for (count = 0, i = 0; i < 8; i++) 439 for (count = 0, i = 0; i < 8; i++)
443 count += be32_to_cpu(fork->extents[i].block_count); 440 count += be32_to_cpu(fork->extents[i].block_count);
444 HFSPLUS_I(inode).first_blocks = count; 441 hip->first_blocks = count;
445 memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec)); 442 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
446 HFSPLUS_I(inode).cached_start = 0; 443 hip->cached_start = 0;
447 HFSPLUS_I(inode).cached_blocks = 0; 444 hip->cached_blocks = 0;
448 445
449 HFSPLUS_I(inode).alloc_blocks = be32_to_cpu(fork->total_blocks); 446 hip->alloc_blocks = be32_to_cpu(fork->total_blocks);
450 inode->i_size = HFSPLUS_I(inode).phys_size = be64_to_cpu(fork->total_size); 447 hip->phys_size = inode->i_size = be64_to_cpu(fork->total_size);
451 HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; 448 hip->fs_blocks =
452 inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits); 449 (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
453 HFSPLUS_I(inode).clump_blocks = be32_to_cpu(fork->clump_size) >> HFSPLUS_SB(sb).alloc_blksz_shift; 450 inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
454 if (!HFSPLUS_I(inode).clump_blocks) 451 hip->clump_blocks =
455 HFSPLUS_I(inode).clump_blocks = HFSPLUS_IS_RSRC(inode) ? HFSPLUS_SB(sb).rsrc_clump_blocks : 452 be32_to_cpu(fork->clump_size) >> sbi->alloc_blksz_shift;
456 HFSPLUS_SB(sb).data_clump_blocks; 453 if (!hip->clump_blocks) {
454 hip->clump_blocks = HFSPLUS_IS_RSRC(inode) ?
455 sbi->rsrc_clump_blocks :
456 sbi->data_clump_blocks;
457 }
457} 458}
458 459
459void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork) 460void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
460{ 461{
461 memcpy(&fork->extents, &HFSPLUS_I(inode).first_extents, 462 memcpy(&fork->extents, &HFSPLUS_I(inode)->first_extents,
462 sizeof(hfsplus_extent_rec)); 463 sizeof(hfsplus_extent_rec));
463 fork->total_size = cpu_to_be64(inode->i_size); 464 fork->total_size = cpu_to_be64(inode->i_size);
464 fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode).alloc_blocks); 465 fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode)->alloc_blocks);
465} 466}
466 467
467int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) 468int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
@@ -472,7 +473,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
472 473
473 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset); 474 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
474 475
475 HFSPLUS_I(inode).dev = 0; 476 HFSPLUS_I(inode)->linkid = 0;
476 if (type == HFSPLUS_FOLDER) { 477 if (type == HFSPLUS_FOLDER) {
477 struct hfsplus_cat_folder *folder = &entry.folder; 478 struct hfsplus_cat_folder *folder = &entry.folder;
478 479
@@ -486,8 +487,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
486 inode->i_atime = hfsp_mt2ut(folder->access_date); 487 inode->i_atime = hfsp_mt2ut(folder->access_date);
487 inode->i_mtime = hfsp_mt2ut(folder->content_mod_date); 488 inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
488 inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date); 489 inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
489 HFSPLUS_I(inode).create_date = folder->create_date; 490 HFSPLUS_I(inode)->create_date = folder->create_date;
490 HFSPLUS_I(inode).fs_blocks = 0; 491 HFSPLUS_I(inode)->fs_blocks = 0;
491 inode->i_op = &hfsplus_dir_inode_operations; 492 inode->i_op = &hfsplus_dir_inode_operations;
492 inode->i_fop = &hfsplus_dir_operations; 493 inode->i_fop = &hfsplus_dir_operations;
493 } else if (type == HFSPLUS_FILE) { 494 } else if (type == HFSPLUS_FILE) {
@@ -518,7 +519,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
518 inode->i_atime = hfsp_mt2ut(file->access_date); 519 inode->i_atime = hfsp_mt2ut(file->access_date);
519 inode->i_mtime = hfsp_mt2ut(file->content_mod_date); 520 inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
520 inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date); 521 inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
521 HFSPLUS_I(inode).create_date = file->create_date; 522 HFSPLUS_I(inode)->create_date = file->create_date;
522 } else { 523 } else {
523 printk(KERN_ERR "hfs: bad catalog entry used to create inode\n"); 524 printk(KERN_ERR "hfs: bad catalog entry used to create inode\n");
524 res = -EIO; 525 res = -EIO;
@@ -533,12 +534,12 @@ int hfsplus_cat_write_inode(struct inode *inode)
533 hfsplus_cat_entry entry; 534 hfsplus_cat_entry entry;
534 535
535 if (HFSPLUS_IS_RSRC(inode)) 536 if (HFSPLUS_IS_RSRC(inode))
536 main_inode = HFSPLUS_I(inode).rsrc_inode; 537 main_inode = HFSPLUS_I(inode)->rsrc_inode;
537 538
538 if (!main_inode->i_nlink) 539 if (!main_inode->i_nlink)
539 return 0; 540 return 0;
540 541
541 if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb).cat_tree, &fd)) 542 if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb)->cat_tree, &fd))
542 /* panic? */ 543 /* panic? */
543 return -EIO; 544 return -EIO;
544 545
@@ -554,7 +555,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
554 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, 555 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
555 sizeof(struct hfsplus_cat_folder)); 556 sizeof(struct hfsplus_cat_folder));
556 /* simple node checks? */ 557 /* simple node checks? */
557 hfsplus_set_perms(inode, &folder->permissions); 558 hfsplus_cat_set_perms(inode, &folder->permissions);
558 folder->access_date = hfsp_ut2mt(inode->i_atime); 559 folder->access_date = hfsp_ut2mt(inode->i_atime);
559 folder->content_mod_date = hfsp_ut2mt(inode->i_mtime); 560 folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
560 folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); 561 folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
@@ -576,11 +577,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
576 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, 577 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
577 sizeof(struct hfsplus_cat_file)); 578 sizeof(struct hfsplus_cat_file));
578 hfsplus_inode_write_fork(inode, &file->data_fork); 579 hfsplus_inode_write_fork(inode, &file->data_fork);
579 if (S_ISREG(inode->i_mode)) 580 hfsplus_cat_set_perms(inode, &file->permissions);
580 HFSPLUS_I(inode).dev = inode->i_nlink;
581 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
582 HFSPLUS_I(inode).dev = kdev_t_to_nr(inode->i_rdev);
583 hfsplus_set_perms(inode, &file->permissions);
584 if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE) 581 if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
585 file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED); 582 file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
586 else 583 else
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index ac405f099026..5b4667e08ef7 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -17,83 +17,98 @@
17#include <linux/mount.h> 17#include <linux/mount.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/xattr.h> 19#include <linux/xattr.h>
20#include <linux/smp_lock.h>
21#include <asm/uaccess.h> 20#include <asm/uaccess.h>
22#include "hfsplus_fs.h" 21#include "hfsplus_fs.h"
23 22
24long hfsplus_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 23static int hfsplus_ioctl_getflags(struct file *file, int __user *user_flags)
25{ 24{
26 struct inode *inode = filp->f_path.dentry->d_inode; 25 struct inode *inode = file->f_path.dentry->d_inode;
26 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
27 unsigned int flags = 0;
28
29 if (inode->i_flags & S_IMMUTABLE)
30 flags |= FS_IMMUTABLE_FL;
31 if (inode->i_flags |= S_APPEND)
32 flags |= FS_APPEND_FL;
33 if (hip->userflags & HFSPLUS_FLG_NODUMP)
34 flags |= FS_NODUMP_FL;
35
36 return put_user(flags, user_flags);
37}
38
39static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags)
40{
41 struct inode *inode = file->f_path.dentry->d_inode;
42 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
27 unsigned int flags; 43 unsigned int flags;
44 int err = 0;
28 45
29 lock_kernel(); 46 err = mnt_want_write(file->f_path.mnt);
30 switch (cmd) { 47 if (err)
31 case HFSPLUS_IOC_EXT2_GETFLAGS: 48 goto out;
32 flags = 0;
33 if (HFSPLUS_I(inode).rootflags & HFSPLUS_FLG_IMMUTABLE)
34 flags |= FS_IMMUTABLE_FL; /* EXT2_IMMUTABLE_FL */
35 if (HFSPLUS_I(inode).rootflags & HFSPLUS_FLG_APPEND)
36 flags |= FS_APPEND_FL; /* EXT2_APPEND_FL */
37 if (HFSPLUS_I(inode).userflags & HFSPLUS_FLG_NODUMP)
38 flags |= FS_NODUMP_FL; /* EXT2_NODUMP_FL */
39 return put_user(flags, (int __user *)arg);
40 case HFSPLUS_IOC_EXT2_SETFLAGS: {
41 int err = 0;
42 err = mnt_want_write(filp->f_path.mnt);
43 if (err) {
44 unlock_kernel();
45 return err;
46 }
47 49
48 if (!is_owner_or_cap(inode)) { 50 if (!is_owner_or_cap(inode)) {
49 err = -EACCES; 51 err = -EACCES;
50 goto setflags_out; 52 goto out_drop_write;
51 } 53 }
52 if (get_user(flags, (int __user *)arg)) {
53 err = -EFAULT;
54 goto setflags_out;
55 }
56 if (flags & (FS_IMMUTABLE_FL|FS_APPEND_FL) ||
57 HFSPLUS_I(inode).rootflags & (HFSPLUS_FLG_IMMUTABLE|HFSPLUS_FLG_APPEND)) {
58 if (!capable(CAP_LINUX_IMMUTABLE)) {
59 err = -EPERM;
60 goto setflags_out;
61 }
62 }
63 54
64 /* don't silently ignore unsupported ext2 flags */ 55 if (get_user(flags, user_flags)) {
65 if (flags & ~(FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NODUMP_FL)) { 56 err = -EFAULT;
66 err = -EOPNOTSUPP; 57 goto out_drop_write;
67 goto setflags_out; 58 }
68 } 59
69 if (flags & FS_IMMUTABLE_FL) { /* EXT2_IMMUTABLE_FL */ 60 mutex_lock(&inode->i_mutex);
70 inode->i_flags |= S_IMMUTABLE; 61
71 HFSPLUS_I(inode).rootflags |= HFSPLUS_FLG_IMMUTABLE; 62 if ((flags & (FS_IMMUTABLE_FL|FS_APPEND_FL)) ||
72 } else { 63 inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
73 inode->i_flags &= ~S_IMMUTABLE; 64 if (!capable(CAP_LINUX_IMMUTABLE)) {
74 HFSPLUS_I(inode).rootflags &= ~HFSPLUS_FLG_IMMUTABLE; 65 err = -EPERM;
75 } 66 goto out_unlock_inode;
76 if (flags & FS_APPEND_FL) { /* EXT2_APPEND_FL */
77 inode->i_flags |= S_APPEND;
78 HFSPLUS_I(inode).rootflags |= HFSPLUS_FLG_APPEND;
79 } else {
80 inode->i_flags &= ~S_APPEND;
81 HFSPLUS_I(inode).rootflags &= ~HFSPLUS_FLG_APPEND;
82 } 67 }
83 if (flags & FS_NODUMP_FL) /* EXT2_NODUMP_FL */
84 HFSPLUS_I(inode).userflags |= HFSPLUS_FLG_NODUMP;
85 else
86 HFSPLUS_I(inode).userflags &= ~HFSPLUS_FLG_NODUMP;
87
88 inode->i_ctime = CURRENT_TIME_SEC;
89 mark_inode_dirty(inode);
90setflags_out:
91 mnt_drop_write(filp->f_path.mnt);
92 unlock_kernel();
93 return err;
94 } 68 }
69
70 /* don't silently ignore unsupported ext2 flags */
71 if (flags & ~(FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NODUMP_FL)) {
72 err = -EOPNOTSUPP;
73 goto out_unlock_inode;
74 }
75
76 if (flags & FS_IMMUTABLE_FL)
77 inode->i_flags |= S_IMMUTABLE;
78 else
79 inode->i_flags &= ~S_IMMUTABLE;
80
81 if (flags & FS_APPEND_FL)
82 inode->i_flags |= S_APPEND;
83 else
84 inode->i_flags &= ~S_APPEND;
85
86 if (flags & FS_NODUMP_FL)
87 hip->userflags |= HFSPLUS_FLG_NODUMP;
88 else
89 hip->userflags &= ~HFSPLUS_FLG_NODUMP;
90
91 inode->i_ctime = CURRENT_TIME_SEC;
92 mark_inode_dirty(inode);
93
94out_unlock_inode:
95 mutex_lock(&inode->i_mutex);
96out_drop_write:
97 mnt_drop_write(file->f_path.mnt);
98out:
99 return err;
100}
101
102long hfsplus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
103{
104 void __user *argp = (void __user *)arg;
105
106 switch (cmd) {
107 case HFSPLUS_IOC_EXT2_GETFLAGS:
108 return hfsplus_ioctl_getflags(file, argp);
109 case HFSPLUS_IOC_EXT2_SETFLAGS:
110 return hfsplus_ioctl_setflags(file, argp);
95 default: 111 default:
96 unlock_kernel();
97 return -ENOTTY; 112 return -ENOTTY;
98 } 113 }
99} 114}
@@ -110,7 +125,7 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
110 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode)) 125 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
111 return -EOPNOTSUPP; 126 return -EOPNOTSUPP;
112 127
113 res = hfs_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd); 128 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
114 if (res) 129 if (res)
115 return res; 130 return res;
116 res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); 131 res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
@@ -153,7 +168,7 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
153 return -EOPNOTSUPP; 168 return -EOPNOTSUPP;
154 169
155 if (size) { 170 if (size) {
156 res = hfs_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd); 171 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
157 if (res) 172 if (res)
158 return res; 173 return res;
159 res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); 174 res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
@@ -177,7 +192,7 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
177 } else 192 } else
178 res = size ? -ERANGE : 4; 193 res = size ? -ERANGE : 4;
179 } else 194 } else
180 res = -ENODATA; 195 res = -EOPNOTSUPP;
181out: 196out:
182 if (size) 197 if (size)
183 hfs_find_exit(&fd); 198 hfs_find_exit(&fd);
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index 572628b4b07d..f9ab276a4d8d 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -143,13 +143,13 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
143 kfree(p); 143 kfree(p);
144 break; 144 break;
145 case opt_decompose: 145 case opt_decompose:
146 sbi->flags &= ~HFSPLUS_SB_NODECOMPOSE; 146 clear_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags);
147 break; 147 break;
148 case opt_nodecompose: 148 case opt_nodecompose:
149 sbi->flags |= HFSPLUS_SB_NODECOMPOSE; 149 set_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags);
150 break; 150 break;
151 case opt_force: 151 case opt_force:
152 sbi->flags |= HFSPLUS_SB_FORCE; 152 set_bit(HFSPLUS_SB_FORCE, &sbi->flags);
153 break; 153 break;
154 default: 154 default:
155 return 0; 155 return 0;
@@ -171,7 +171,7 @@ done:
171 171
172int hfsplus_show_options(struct seq_file *seq, struct vfsmount *mnt) 172int hfsplus_show_options(struct seq_file *seq, struct vfsmount *mnt)
173{ 173{
174 struct hfsplus_sb_info *sbi = &HFSPLUS_SB(mnt->mnt_sb); 174 struct hfsplus_sb_info *sbi = HFSPLUS_SB(mnt->mnt_sb);
175 175
176 if (sbi->creator != HFSPLUS_DEF_CR_TYPE) 176 if (sbi->creator != HFSPLUS_DEF_CR_TYPE)
177 seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator); 177 seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator);
@@ -184,7 +184,7 @@ int hfsplus_show_options(struct seq_file *seq, struct vfsmount *mnt)
184 seq_printf(seq, ",session=%u", sbi->session); 184 seq_printf(seq, ",session=%u", sbi->session);
185 if (sbi->nls) 185 if (sbi->nls)
186 seq_printf(seq, ",nls=%s", sbi->nls->charset); 186 seq_printf(seq, ",nls=%s", sbi->nls->charset);
187 if (sbi->flags & HFSPLUS_SB_NODECOMPOSE) 187 if (test_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags))
188 seq_printf(seq, ",nodecompose"); 188 seq_printf(seq, ",nodecompose");
189 return 0; 189 return 0;
190} 190}
diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c
index 1528a6fd0299..208b16c645cc 100644
--- a/fs/hfsplus/part_tbl.c
+++ b/fs/hfsplus/part_tbl.c
@@ -74,6 +74,7 @@ struct old_pmap {
74int hfs_part_find(struct super_block *sb, 74int hfs_part_find(struct super_block *sb,
75 sector_t *part_start, sector_t *part_size) 75 sector_t *part_start, sector_t *part_size)
76{ 76{
77 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
77 struct buffer_head *bh; 78 struct buffer_head *bh;
78 __be16 *data; 79 __be16 *data;
79 int i, size, res; 80 int i, size, res;
@@ -95,7 +96,7 @@ int hfs_part_find(struct super_block *sb,
95 for (i = 0; i < size; p++, i++) { 96 for (i = 0; i < size; p++, i++) {
96 if (p->pdStart && p->pdSize && 97 if (p->pdStart && p->pdSize &&
97 p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ && 98 p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ &&
98 (HFSPLUS_SB(sb).part < 0 || HFSPLUS_SB(sb).part == i)) { 99 (sbi->part < 0 || sbi->part == i)) {
99 *part_start += be32_to_cpu(p->pdStart); 100 *part_start += be32_to_cpu(p->pdStart);
100 *part_size = be32_to_cpu(p->pdSize); 101 *part_size = be32_to_cpu(p->pdSize);
101 res = 0; 102 res = 0;
@@ -111,7 +112,7 @@ int hfs_part_find(struct super_block *sb,
111 size = be32_to_cpu(pm->pmMapBlkCnt); 112 size = be32_to_cpu(pm->pmMapBlkCnt);
112 for (i = 0; i < size;) { 113 for (i = 0; i < size;) {
113 if (!memcmp(pm->pmPartType,"Apple_HFS", 9) && 114 if (!memcmp(pm->pmPartType,"Apple_HFS", 9) &&
114 (HFSPLUS_SB(sb).part < 0 || HFSPLUS_SB(sb).part == i)) { 115 (sbi->part < 0 || sbi->part == i)) {
115 *part_start += be32_to_cpu(pm->pmPyPartStart); 116 *part_start += be32_to_cpu(pm->pmPyPartStart);
116 *part_size = be32_to_cpu(pm->pmPartBlkCnt); 117 *part_size = be32_to_cpu(pm->pmPartBlkCnt);
117 res = 0; 118 res = 0;
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 3b55c050c742..9a88d7536103 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -12,7 +12,6 @@
12#include <linux/pagemap.h> 12#include <linux/pagemap.h>
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/smp_lock.h>
16#include <linux/vfs.h> 15#include <linux/vfs.h>
17#include <linux/nls.h> 16#include <linux/nls.h>
18 17
@@ -21,40 +20,11 @@ static void hfsplus_destroy_inode(struct inode *inode);
21 20
22#include "hfsplus_fs.h" 21#include "hfsplus_fs.h"
23 22
24struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino) 23static int hfsplus_system_read_inode(struct inode *inode)
25{ 24{
26 struct hfs_find_data fd; 25 struct hfsplus_vh *vhdr = HFSPLUS_SB(inode->i_sb)->s_vhdr;
27 struct hfsplus_vh *vhdr;
28 struct inode *inode;
29 long err = -EIO;
30
31 inode = iget_locked(sb, ino);
32 if (!inode)
33 return ERR_PTR(-ENOMEM);
34 if (!(inode->i_state & I_NEW))
35 return inode;
36 26
37 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); 27 switch (inode->i_ino) {
38 mutex_init(&HFSPLUS_I(inode).extents_lock);
39 HFSPLUS_I(inode).flags = 0;
40 HFSPLUS_I(inode).rsrc_inode = NULL;
41 atomic_set(&HFSPLUS_I(inode).opencnt, 0);
42
43 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID) {
44 read_inode:
45 hfs_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd);
46 err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
47 if (!err)
48 err = hfsplus_cat_read_inode(inode, &fd);
49 hfs_find_exit(&fd);
50 if (err)
51 goto bad_inode;
52 goto done;
53 }
54 vhdr = HFSPLUS_SB(inode->i_sb).s_vhdr;
55 switch(inode->i_ino) {
56 case HFSPLUS_ROOT_CNID:
57 goto read_inode;
58 case HFSPLUS_EXT_CNID: 28 case HFSPLUS_EXT_CNID:
59 hfsplus_inode_read_fork(inode, &vhdr->ext_file); 29 hfsplus_inode_read_fork(inode, &vhdr->ext_file);
60 inode->i_mapping->a_ops = &hfsplus_btree_aops; 30 inode->i_mapping->a_ops = &hfsplus_btree_aops;
@@ -75,74 +45,101 @@ struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
75 inode->i_mapping->a_ops = &hfsplus_btree_aops; 45 inode->i_mapping->a_ops = &hfsplus_btree_aops;
76 break; 46 break;
77 default: 47 default:
78 goto bad_inode; 48 return -EIO;
49 }
50
51 return 0;
52}
53
54struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
55{
56 struct hfs_find_data fd;
57 struct inode *inode;
58 int err;
59
60 inode = iget_locked(sb, ino);
61 if (!inode)
62 return ERR_PTR(-ENOMEM);
63 if (!(inode->i_state & I_NEW))
64 return inode;
65
66 INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list);
67 mutex_init(&HFSPLUS_I(inode)->extents_lock);
68 HFSPLUS_I(inode)->flags = 0;
69 HFSPLUS_I(inode)->rsrc_inode = NULL;
70 atomic_set(&HFSPLUS_I(inode)->opencnt, 0);
71
72 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
73 inode->i_ino == HFSPLUS_ROOT_CNID) {
74 hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
75 err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
76 if (!err)
77 err = hfsplus_cat_read_inode(inode, &fd);
78 hfs_find_exit(&fd);
79 } else {
80 err = hfsplus_system_read_inode(inode);
81 }
82
83 if (err) {
84 iget_failed(inode);
85 return ERR_PTR(err);
79 } 86 }
80 87
81done:
82 unlock_new_inode(inode); 88 unlock_new_inode(inode);
83 return inode; 89 return inode;
84
85bad_inode:
86 iget_failed(inode);
87 return ERR_PTR(err);
88} 90}
89 91
90static int hfsplus_write_inode(struct inode *inode, 92static int hfsplus_system_write_inode(struct inode *inode)
91 struct writeback_control *wbc)
92{ 93{
93 struct hfsplus_vh *vhdr; 94 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
94 int ret = 0; 95 struct hfsplus_vh *vhdr = sbi->s_vhdr;
96 struct hfsplus_fork_raw *fork;
97 struct hfs_btree *tree = NULL;
95 98
96 dprint(DBG_INODE, "hfsplus_write_inode: %lu\n", inode->i_ino);
97 hfsplus_ext_write_extent(inode);
98 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID) {
99 return hfsplus_cat_write_inode(inode);
100 }
101 vhdr = HFSPLUS_SB(inode->i_sb).s_vhdr;
102 switch (inode->i_ino) { 99 switch (inode->i_ino) {
103 case HFSPLUS_ROOT_CNID:
104 ret = hfsplus_cat_write_inode(inode);
105 break;
106 case HFSPLUS_EXT_CNID: 100 case HFSPLUS_EXT_CNID:
107 if (vhdr->ext_file.total_size != cpu_to_be64(inode->i_size)) { 101 fork = &vhdr->ext_file;
108 HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP; 102 tree = sbi->ext_tree;
109 inode->i_sb->s_dirt = 1;
110 }
111 hfsplus_inode_write_fork(inode, &vhdr->ext_file);
112 hfs_btree_write(HFSPLUS_SB(inode->i_sb).ext_tree);
113 break; 103 break;
114 case HFSPLUS_CAT_CNID: 104 case HFSPLUS_CAT_CNID:
115 if (vhdr->cat_file.total_size != cpu_to_be64(inode->i_size)) { 105 fork = &vhdr->cat_file;
116 HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP; 106 tree = sbi->cat_tree;
117 inode->i_sb->s_dirt = 1;
118 }
119 hfsplus_inode_write_fork(inode, &vhdr->cat_file);
120 hfs_btree_write(HFSPLUS_SB(inode->i_sb).cat_tree);
121 break; 107 break;
122 case HFSPLUS_ALLOC_CNID: 108 case HFSPLUS_ALLOC_CNID:
123 if (vhdr->alloc_file.total_size != cpu_to_be64(inode->i_size)) { 109 fork = &vhdr->alloc_file;
124 HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
125 inode->i_sb->s_dirt = 1;
126 }
127 hfsplus_inode_write_fork(inode, &vhdr->alloc_file);
128 break; 110 break;
129 case HFSPLUS_START_CNID: 111 case HFSPLUS_START_CNID:
130 if (vhdr->start_file.total_size != cpu_to_be64(inode->i_size)) { 112 fork = &vhdr->start_file;
131 HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
132 inode->i_sb->s_dirt = 1;
133 }
134 hfsplus_inode_write_fork(inode, &vhdr->start_file);
135 break; 113 break;
136 case HFSPLUS_ATTR_CNID: 114 case HFSPLUS_ATTR_CNID:
137 if (vhdr->attr_file.total_size != cpu_to_be64(inode->i_size)) { 115 fork = &vhdr->attr_file;
138 HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP; 116 tree = sbi->attr_tree;
139 inode->i_sb->s_dirt = 1; 117 default:
140 } 118 return -EIO;
141 hfsplus_inode_write_fork(inode, &vhdr->attr_file); 119 }
142 hfs_btree_write(HFSPLUS_SB(inode->i_sb).attr_tree); 120
143 break; 121 if (fork->total_size != cpu_to_be64(inode->i_size)) {
122 set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags);
123 inode->i_sb->s_dirt = 1;
144 } 124 }
145 return ret; 125 hfsplus_inode_write_fork(inode, fork);
126 if (tree)
127 hfs_btree_write(tree);
128 return 0;
129}
130
131static int hfsplus_write_inode(struct inode *inode,
132 struct writeback_control *wbc)
133{
134 dprint(DBG_INODE, "hfsplus_write_inode: %lu\n", inode->i_ino);
135
136 hfsplus_ext_write_extent(inode);
137
138 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
139 inode->i_ino == HFSPLUS_ROOT_CNID)
140 return hfsplus_cat_write_inode(inode);
141 else
142 return hfsplus_system_write_inode(inode);
146} 143}
147 144
148static void hfsplus_evict_inode(struct inode *inode) 145static void hfsplus_evict_inode(struct inode *inode)
@@ -151,51 +148,53 @@ static void hfsplus_evict_inode(struct inode *inode)
151 truncate_inode_pages(&inode->i_data, 0); 148 truncate_inode_pages(&inode->i_data, 0);
152 end_writeback(inode); 149 end_writeback(inode);
153 if (HFSPLUS_IS_RSRC(inode)) { 150 if (HFSPLUS_IS_RSRC(inode)) {
154 HFSPLUS_I(HFSPLUS_I(inode).rsrc_inode).rsrc_inode = NULL; 151 HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
155 iput(HFSPLUS_I(inode).rsrc_inode); 152 iput(HFSPLUS_I(inode)->rsrc_inode);
156 } 153 }
157} 154}
158 155
159int hfsplus_sync_fs(struct super_block *sb, int wait) 156int hfsplus_sync_fs(struct super_block *sb, int wait)
160{ 157{
161 struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr; 158 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
159 struct hfsplus_vh *vhdr = sbi->s_vhdr;
162 160
163 dprint(DBG_SUPER, "hfsplus_write_super\n"); 161 dprint(DBG_SUPER, "hfsplus_write_super\n");
164 162
165 lock_super(sb); 163 mutex_lock(&sbi->vh_mutex);
164 mutex_lock(&sbi->alloc_mutex);
166 sb->s_dirt = 0; 165 sb->s_dirt = 0;
167 166
168 vhdr->free_blocks = cpu_to_be32(HFSPLUS_SB(sb).free_blocks); 167 vhdr->free_blocks = cpu_to_be32(sbi->free_blocks);
169 vhdr->next_alloc = cpu_to_be32(HFSPLUS_SB(sb).next_alloc); 168 vhdr->next_cnid = cpu_to_be32(sbi->next_cnid);
170 vhdr->next_cnid = cpu_to_be32(HFSPLUS_SB(sb).next_cnid); 169 vhdr->folder_count = cpu_to_be32(sbi->folder_count);
171 vhdr->folder_count = cpu_to_be32(HFSPLUS_SB(sb).folder_count); 170 vhdr->file_count = cpu_to_be32(sbi->file_count);
172 vhdr->file_count = cpu_to_be32(HFSPLUS_SB(sb).file_count);
173 171
174 mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh); 172 mark_buffer_dirty(sbi->s_vhbh);
175 if (HFSPLUS_SB(sb).flags & HFSPLUS_SB_WRITEBACKUP) { 173 if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) {
176 if (HFSPLUS_SB(sb).sect_count) { 174 if (sbi->sect_count) {
177 struct buffer_head *bh; 175 struct buffer_head *bh;
178 u32 block, offset; 176 u32 block, offset;
179 177
180 block = HFSPLUS_SB(sb).blockoffset; 178 block = sbi->blockoffset;
181 block += (HFSPLUS_SB(sb).sect_count - 2) >> (sb->s_blocksize_bits - 9); 179 block += (sbi->sect_count - 2) >> (sb->s_blocksize_bits - 9);
182 offset = ((HFSPLUS_SB(sb).sect_count - 2) << 9) & (sb->s_blocksize - 1); 180 offset = ((sbi->sect_count - 2) << 9) & (sb->s_blocksize - 1);
183 printk(KERN_DEBUG "hfs: backup: %u,%u,%u,%u\n", HFSPLUS_SB(sb).blockoffset, 181 printk(KERN_DEBUG "hfs: backup: %u,%u,%u,%u\n",
184 HFSPLUS_SB(sb).sect_count, block, offset); 182 sbi->blockoffset, sbi->sect_count,
183 block, offset);
185 bh = sb_bread(sb, block); 184 bh = sb_bread(sb, block);
186 if (bh) { 185 if (bh) {
187 vhdr = (struct hfsplus_vh *)(bh->b_data + offset); 186 vhdr = (struct hfsplus_vh *)(bh->b_data + offset);
188 if (be16_to_cpu(vhdr->signature) == HFSPLUS_VOLHEAD_SIG) { 187 if (be16_to_cpu(vhdr->signature) == HFSPLUS_VOLHEAD_SIG) {
189 memcpy(vhdr, HFSPLUS_SB(sb).s_vhdr, sizeof(*vhdr)); 188 memcpy(vhdr, sbi->s_vhdr, sizeof(*vhdr));
190 mark_buffer_dirty(bh); 189 mark_buffer_dirty(bh);
191 brelse(bh); 190 brelse(bh);
192 } else 191 } else
193 printk(KERN_WARNING "hfs: backup not found!\n"); 192 printk(KERN_WARNING "hfs: backup not found!\n");
194 } 193 }
195 } 194 }
196 HFSPLUS_SB(sb).flags &= ~HFSPLUS_SB_WRITEBACKUP;
197 } 195 }
198 unlock_super(sb); 196 mutex_unlock(&sbi->alloc_mutex);
197 mutex_unlock(&sbi->vh_mutex);
199 return 0; 198 return 0;
200} 199}
201 200
@@ -209,48 +208,48 @@ static void hfsplus_write_super(struct super_block *sb)
209 208
210static void hfsplus_put_super(struct super_block *sb) 209static void hfsplus_put_super(struct super_block *sb)
211{ 210{
211 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
212
212 dprint(DBG_SUPER, "hfsplus_put_super\n"); 213 dprint(DBG_SUPER, "hfsplus_put_super\n");
214
213 if (!sb->s_fs_info) 215 if (!sb->s_fs_info)
214 return; 216 return;
215 217
216 lock_kernel();
217
218 if (sb->s_dirt) 218 if (sb->s_dirt)
219 hfsplus_write_super(sb); 219 hfsplus_write_super(sb);
220 if (!(sb->s_flags & MS_RDONLY) && HFSPLUS_SB(sb).s_vhdr) { 220 if (!(sb->s_flags & MS_RDONLY) && sbi->s_vhdr) {
221 struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr; 221 struct hfsplus_vh *vhdr = sbi->s_vhdr;
222 222
223 vhdr->modify_date = hfsp_now2mt(); 223 vhdr->modify_date = hfsp_now2mt();
224 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT); 224 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT);
225 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT); 225 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT);
226 mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh); 226 mark_buffer_dirty(sbi->s_vhbh);
227 sync_dirty_buffer(HFSPLUS_SB(sb).s_vhbh); 227 sync_dirty_buffer(sbi->s_vhbh);
228 } 228 }
229 229
230 hfs_btree_close(HFSPLUS_SB(sb).cat_tree); 230 hfs_btree_close(sbi->cat_tree);
231 hfs_btree_close(HFSPLUS_SB(sb).ext_tree); 231 hfs_btree_close(sbi->ext_tree);
232 iput(HFSPLUS_SB(sb).alloc_file); 232 iput(sbi->alloc_file);
233 iput(HFSPLUS_SB(sb).hidden_dir); 233 iput(sbi->hidden_dir);
234 brelse(HFSPLUS_SB(sb).s_vhbh); 234 brelse(sbi->s_vhbh);
235 unload_nls(HFSPLUS_SB(sb).nls); 235 unload_nls(sbi->nls);
236 kfree(sb->s_fs_info); 236 kfree(sb->s_fs_info);
237 sb->s_fs_info = NULL; 237 sb->s_fs_info = NULL;
238
239 unlock_kernel();
240} 238}
241 239
242static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf) 240static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
243{ 241{
244 struct super_block *sb = dentry->d_sb; 242 struct super_block *sb = dentry->d_sb;
243 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
245 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 244 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
246 245
247 buf->f_type = HFSPLUS_SUPER_MAGIC; 246 buf->f_type = HFSPLUS_SUPER_MAGIC;
248 buf->f_bsize = sb->s_blocksize; 247 buf->f_bsize = sb->s_blocksize;
249 buf->f_blocks = HFSPLUS_SB(sb).total_blocks << HFSPLUS_SB(sb).fs_shift; 248 buf->f_blocks = sbi->total_blocks << sbi->fs_shift;
250 buf->f_bfree = HFSPLUS_SB(sb).free_blocks << HFSPLUS_SB(sb).fs_shift; 249 buf->f_bfree = sbi->free_blocks << sbi->fs_shift;
251 buf->f_bavail = buf->f_bfree; 250 buf->f_bavail = buf->f_bfree;
252 buf->f_files = 0xFFFFFFFF; 251 buf->f_files = 0xFFFFFFFF;
253 buf->f_ffree = 0xFFFFFFFF - HFSPLUS_SB(sb).next_cnid; 252 buf->f_ffree = 0xFFFFFFFF - sbi->next_cnid;
254 buf->f_fsid.val[0] = (u32)id; 253 buf->f_fsid.val[0] = (u32)id;
255 buf->f_fsid.val[1] = (u32)(id >> 32); 254 buf->f_fsid.val[1] = (u32)(id >> 32);
256 buf->f_namelen = HFSPLUS_MAX_STRLEN; 255 buf->f_namelen = HFSPLUS_MAX_STRLEN;
@@ -263,11 +262,11 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
263 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) 262 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
264 return 0; 263 return 0;
265 if (!(*flags & MS_RDONLY)) { 264 if (!(*flags & MS_RDONLY)) {
266 struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr; 265 struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr;
267 struct hfsplus_sb_info sbi; 266 struct hfsplus_sb_info sbi;
268 267
269 memset(&sbi, 0, sizeof(struct hfsplus_sb_info)); 268 memset(&sbi, 0, sizeof(struct hfsplus_sb_info));
270 sbi.nls = HFSPLUS_SB(sb).nls; 269 sbi.nls = HFSPLUS_SB(sb)->nls;
271 if (!hfsplus_parse_options(data, &sbi)) 270 if (!hfsplus_parse_options(data, &sbi))
272 return -EINVAL; 271 return -EINVAL;
273 272
@@ -276,7 +275,7 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
276 "running fsck.hfsplus is recommended. leaving read-only.\n"); 275 "running fsck.hfsplus is recommended. leaving read-only.\n");
277 sb->s_flags |= MS_RDONLY; 276 sb->s_flags |= MS_RDONLY;
278 *flags |= MS_RDONLY; 277 *flags |= MS_RDONLY;
279 } else if (sbi.flags & HFSPLUS_SB_FORCE) { 278 } else if (test_bit(HFSPLUS_SB_FORCE, &sbi.flags)) {
280 /* nothing */ 279 /* nothing */
281 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { 280 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
282 printk(KERN_WARNING "hfs: filesystem is marked locked, leaving read-only.\n"); 281 printk(KERN_WARNING "hfs: filesystem is marked locked, leaving read-only.\n");
@@ -320,7 +319,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
320 return -ENOMEM; 319 return -ENOMEM;
321 320
322 sb->s_fs_info = sbi; 321 sb->s_fs_info = sbi;
323 INIT_HLIST_HEAD(&sbi->rsrc_inodes); 322 mutex_init(&sbi->alloc_mutex);
323 mutex_init(&sbi->vh_mutex);
324 hfsplus_fill_defaults(sbi); 324 hfsplus_fill_defaults(sbi);
325 if (!hfsplus_parse_options(data, sbi)) { 325 if (!hfsplus_parse_options(data, sbi)) {
326 printk(KERN_ERR "hfs: unable to parse mount options\n"); 326 printk(KERN_ERR "hfs: unable to parse mount options\n");
@@ -344,7 +344,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
344 err = -EINVAL; 344 err = -EINVAL;
345 goto cleanup; 345 goto cleanup;
346 } 346 }
347 vhdr = HFSPLUS_SB(sb).s_vhdr; 347 vhdr = sbi->s_vhdr;
348 348
349 /* Copy parts of the volume header into the superblock */ 349 /* Copy parts of the volume header into the superblock */
350 sb->s_magic = HFSPLUS_VOLHEAD_SIG; 350 sb->s_magic = HFSPLUS_VOLHEAD_SIG;
@@ -353,18 +353,19 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
353 printk(KERN_ERR "hfs: wrong filesystem version\n"); 353 printk(KERN_ERR "hfs: wrong filesystem version\n");
354 goto cleanup; 354 goto cleanup;
355 } 355 }
356 HFSPLUS_SB(sb).total_blocks = be32_to_cpu(vhdr->total_blocks); 356 sbi->total_blocks = be32_to_cpu(vhdr->total_blocks);
357 HFSPLUS_SB(sb).free_blocks = be32_to_cpu(vhdr->free_blocks); 357 sbi->free_blocks = be32_to_cpu(vhdr->free_blocks);
358 HFSPLUS_SB(sb).next_alloc = be32_to_cpu(vhdr->next_alloc); 358 sbi->next_cnid = be32_to_cpu(vhdr->next_cnid);
359 HFSPLUS_SB(sb).next_cnid = be32_to_cpu(vhdr->next_cnid); 359 sbi->file_count = be32_to_cpu(vhdr->file_count);
360 HFSPLUS_SB(sb).file_count = be32_to_cpu(vhdr->file_count); 360 sbi->folder_count = be32_to_cpu(vhdr->folder_count);
361 HFSPLUS_SB(sb).folder_count = be32_to_cpu(vhdr->folder_count); 361 sbi->data_clump_blocks =
362 HFSPLUS_SB(sb).data_clump_blocks = be32_to_cpu(vhdr->data_clump_sz) >> HFSPLUS_SB(sb).alloc_blksz_shift; 362 be32_to_cpu(vhdr->data_clump_sz) >> sbi->alloc_blksz_shift;
363 if (!HFSPLUS_SB(sb).data_clump_blocks) 363 if (!sbi->data_clump_blocks)
364 HFSPLUS_SB(sb).data_clump_blocks = 1; 364 sbi->data_clump_blocks = 1;
365 HFSPLUS_SB(sb).rsrc_clump_blocks = be32_to_cpu(vhdr->rsrc_clump_sz) >> HFSPLUS_SB(sb).alloc_blksz_shift; 365 sbi->rsrc_clump_blocks =
366 if (!HFSPLUS_SB(sb).rsrc_clump_blocks) 366 be32_to_cpu(vhdr->rsrc_clump_sz) >> sbi->alloc_blksz_shift;
367 HFSPLUS_SB(sb).rsrc_clump_blocks = 1; 367 if (!sbi->rsrc_clump_blocks)
368 sbi->rsrc_clump_blocks = 1;
368 369
369 /* Set up operations so we can load metadata */ 370 /* Set up operations so we can load metadata */
370 sb->s_op = &hfsplus_sops; 371 sb->s_op = &hfsplus_sops;
@@ -374,7 +375,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
374 printk(KERN_WARNING "hfs: Filesystem was not cleanly unmounted, " 375 printk(KERN_WARNING "hfs: Filesystem was not cleanly unmounted, "
375 "running fsck.hfsplus is recommended. mounting read-only.\n"); 376 "running fsck.hfsplus is recommended. mounting read-only.\n");
376 sb->s_flags |= MS_RDONLY; 377 sb->s_flags |= MS_RDONLY;
377 } else if (sbi->flags & HFSPLUS_SB_FORCE) { 378 } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
378 /* nothing */ 379 /* nothing */
379 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { 380 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
380 printk(KERN_WARNING "hfs: Filesystem is marked locked, mounting read-only.\n"); 381 printk(KERN_WARNING "hfs: Filesystem is marked locked, mounting read-only.\n");
@@ -384,16 +385,15 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
384 "use the force option at your own risk, mounting read-only.\n"); 385 "use the force option at your own risk, mounting read-only.\n");
385 sb->s_flags |= MS_RDONLY; 386 sb->s_flags |= MS_RDONLY;
386 } 387 }
387 sbi->flags &= ~HFSPLUS_SB_FORCE;
388 388
389 /* Load metadata objects (B*Trees) */ 389 /* Load metadata objects (B*Trees) */
390 HFSPLUS_SB(sb).ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID); 390 sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
391 if (!HFSPLUS_SB(sb).ext_tree) { 391 if (!sbi->ext_tree) {
392 printk(KERN_ERR "hfs: failed to load extents file\n"); 392 printk(KERN_ERR "hfs: failed to load extents file\n");
393 goto cleanup; 393 goto cleanup;
394 } 394 }
395 HFSPLUS_SB(sb).cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID); 395 sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
396 if (!HFSPLUS_SB(sb).cat_tree) { 396 if (!sbi->cat_tree) {
397 printk(KERN_ERR "hfs: failed to load catalog file\n"); 397 printk(KERN_ERR "hfs: failed to load catalog file\n");
398 goto cleanup; 398 goto cleanup;
399 } 399 }
@@ -404,7 +404,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
404 err = PTR_ERR(inode); 404 err = PTR_ERR(inode);
405 goto cleanup; 405 goto cleanup;
406 } 406 }
407 HFSPLUS_SB(sb).alloc_file = inode; 407 sbi->alloc_file = inode;
408 408
409 /* Load the root directory */ 409 /* Load the root directory */
410 root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID); 410 root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID);
@@ -423,7 +423,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
423 423
424 str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1; 424 str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
425 str.name = HFSP_HIDDENDIR_NAME; 425 str.name = HFSP_HIDDENDIR_NAME;
426 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd); 426 hfs_find_init(sbi->cat_tree, &fd);
427 hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str); 427 hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
428 if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { 428 if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
429 hfs_find_exit(&fd); 429 hfs_find_exit(&fd);
@@ -434,7 +434,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
434 err = PTR_ERR(inode); 434 err = PTR_ERR(inode);
435 goto cleanup; 435 goto cleanup;
436 } 436 }
437 HFSPLUS_SB(sb).hidden_dir = inode; 437 sbi->hidden_dir = inode;
438 } else 438 } else
439 hfs_find_exit(&fd); 439 hfs_find_exit(&fd);
440 440
@@ -449,15 +449,19 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
449 be32_add_cpu(&vhdr->write_count, 1); 449 be32_add_cpu(&vhdr->write_count, 1);
450 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT); 450 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
451 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT); 451 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
452 mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh); 452 mark_buffer_dirty(sbi->s_vhbh);
453 sync_dirty_buffer(HFSPLUS_SB(sb).s_vhbh); 453 sync_dirty_buffer(sbi->s_vhbh);
454 454
455 if (!HFSPLUS_SB(sb).hidden_dir) { 455 if (!sbi->hidden_dir) {
456 printk(KERN_DEBUG "hfs: create hidden dir...\n"); 456 printk(KERN_DEBUG "hfs: create hidden dir...\n");
457 HFSPLUS_SB(sb).hidden_dir = hfsplus_new_inode(sb, S_IFDIR); 457
458 hfsplus_create_cat(HFSPLUS_SB(sb).hidden_dir->i_ino, sb->s_root->d_inode, 458 mutex_lock(&sbi->vh_mutex);
459 &str, HFSPLUS_SB(sb).hidden_dir); 459 sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
460 mark_inode_dirty(HFSPLUS_SB(sb).hidden_dir); 460 hfsplus_create_cat(sbi->hidden_dir->i_ino, sb->s_root->d_inode,
461 &str, sbi->hidden_dir);
462 mutex_unlock(&sbi->vh_mutex);
463
464 mark_inode_dirty(sbi->hidden_dir);
461 } 465 }
462out: 466out:
463 unload_nls(sbi->nls); 467 unload_nls(sbi->nls);
@@ -486,7 +490,7 @@ static struct inode *hfsplus_alloc_inode(struct super_block *sb)
486 490
487static void hfsplus_destroy_inode(struct inode *inode) 491static void hfsplus_destroy_inode(struct inode *inode)
488{ 492{
489 kmem_cache_free(hfsplus_inode_cachep, &HFSPLUS_I(inode)); 493 kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode));
490} 494}
491 495
492#define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info) 496#define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info)
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index 628ccf6fa402..b66d67de882c 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -121,7 +121,7 @@ static u16 *hfsplus_compose_lookup(u16 *p, u16 cc)
121int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr, char *astr, int *len_p) 121int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr, char *astr, int *len_p)
122{ 122{
123 const hfsplus_unichr *ip; 123 const hfsplus_unichr *ip;
124 struct nls_table *nls = HFSPLUS_SB(sb).nls; 124 struct nls_table *nls = HFSPLUS_SB(sb)->nls;
125 u8 *op; 125 u8 *op;
126 u16 cc, c0, c1; 126 u16 cc, c0, c1;
127 u16 *ce1, *ce2; 127 u16 *ce1, *ce2;
@@ -132,7 +132,7 @@ int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr, c
132 ustrlen = be16_to_cpu(ustr->length); 132 ustrlen = be16_to_cpu(ustr->length);
133 len = *len_p; 133 len = *len_p;
134 ce1 = NULL; 134 ce1 = NULL;
135 compose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE); 135 compose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
136 136
137 while (ustrlen > 0) { 137 while (ustrlen > 0) {
138 c0 = be16_to_cpu(*ip++); 138 c0 = be16_to_cpu(*ip++);
@@ -246,7 +246,7 @@ out:
246static inline int asc2unichar(struct super_block *sb, const char *astr, int len, 246static inline int asc2unichar(struct super_block *sb, const char *astr, int len,
247 wchar_t *uc) 247 wchar_t *uc)
248{ 248{
249 int size = HFSPLUS_SB(sb).nls->char2uni(astr, len, uc); 249 int size = HFSPLUS_SB(sb)->nls->char2uni(astr, len, uc);
250 if (size <= 0) { 250 if (size <= 0) {
251 *uc = '?'; 251 *uc = '?';
252 size = 1; 252 size = 1;
@@ -293,7 +293,7 @@ int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
293 u16 *dstr, outlen = 0; 293 u16 *dstr, outlen = 0;
294 wchar_t c; 294 wchar_t c;
295 295
296 decompose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE); 296 decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
297 while (outlen < HFSPLUS_MAX_STRLEN && len > 0) { 297 while (outlen < HFSPLUS_MAX_STRLEN && len > 0) {
298 size = asc2unichar(sb, astr, len, &c); 298 size = asc2unichar(sb, astr, len, &c);
299 299
@@ -330,8 +330,8 @@ int hfsplus_hash_dentry(struct dentry *dentry, struct qstr *str)
330 wchar_t c; 330 wchar_t c;
331 u16 c2; 331 u16 c2;
332 332
333 casefold = (HFSPLUS_SB(sb).flags & HFSPLUS_SB_CASEFOLD); 333 casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
334 decompose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE); 334 decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
335 hash = init_name_hash(); 335 hash = init_name_hash();
336 astr = str->name; 336 astr = str->name;
337 len = str->len; 337 len = str->len;
@@ -373,8 +373,8 @@ int hfsplus_compare_dentry(struct dentry *dentry, struct qstr *s1, struct qstr *
373 u16 c1, c2; 373 u16 c1, c2;
374 wchar_t c; 374 wchar_t c;
375 375
376 casefold = (HFSPLUS_SB(sb).flags & HFSPLUS_SB_CASEFOLD); 376 casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
377 decompose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE); 377 decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
378 astr1 = s1->name; 378 astr1 = s1->name;
379 len1 = s1->len; 379 len1 = s1->len;
380 astr2 = s2->name; 380 astr2 = s2->name;
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index bed78ac8f6d1..8972c20b3216 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -65,8 +65,8 @@ static int hfsplus_get_last_session(struct super_block *sb,
65 *start = 0; 65 *start = 0;
66 *size = sb->s_bdev->bd_inode->i_size >> 9; 66 *size = sb->s_bdev->bd_inode->i_size >> 9;
67 67
68 if (HFSPLUS_SB(sb).session >= 0) { 68 if (HFSPLUS_SB(sb)->session >= 0) {
69 te.cdte_track = HFSPLUS_SB(sb).session; 69 te.cdte_track = HFSPLUS_SB(sb)->session;
70 te.cdte_format = CDROM_LBA; 70 te.cdte_format = CDROM_LBA;
71 res = ioctl_by_bdev(sb->s_bdev, CDROMREADTOCENTRY, (unsigned long)&te); 71 res = ioctl_by_bdev(sb->s_bdev, CDROMREADTOCENTRY, (unsigned long)&te);
72 if (!res && (te.cdte_ctrl & CDROM_DATA_TRACK) == 4) { 72 if (!res && (te.cdte_ctrl & CDROM_DATA_TRACK) == 4) {
@@ -87,6 +87,7 @@ static int hfsplus_get_last_session(struct super_block *sb,
87/* Takes in super block, returns true if good data read */ 87/* Takes in super block, returns true if good data read */
88int hfsplus_read_wrapper(struct super_block *sb) 88int hfsplus_read_wrapper(struct super_block *sb)
89{ 89{
90 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
90 struct buffer_head *bh; 91 struct buffer_head *bh;
91 struct hfsplus_vh *vhdr; 92 struct hfsplus_vh *vhdr;
92 struct hfsplus_wd wd; 93 struct hfsplus_wd wd;
@@ -122,7 +123,7 @@ int hfsplus_read_wrapper(struct super_block *sb)
122 if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIG)) 123 if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIG))
123 break; 124 break;
124 if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIGX)) { 125 if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIGX)) {
125 HFSPLUS_SB(sb).flags |= HFSPLUS_SB_HFSX; 126 set_bit(HFSPLUS_SB_HFSX, &sbi->flags);
126 break; 127 break;
127 } 128 }
128 brelse(bh); 129 brelse(bh);
@@ -143,11 +144,11 @@ int hfsplus_read_wrapper(struct super_block *sb)
143 if (blocksize < HFSPLUS_SECTOR_SIZE || 144 if (blocksize < HFSPLUS_SECTOR_SIZE ||
144 ((blocksize - 1) & blocksize)) 145 ((blocksize - 1) & blocksize))
145 return -EINVAL; 146 return -EINVAL;
146 HFSPLUS_SB(sb).alloc_blksz = blocksize; 147 sbi->alloc_blksz = blocksize;
147 HFSPLUS_SB(sb).alloc_blksz_shift = 0; 148 sbi->alloc_blksz_shift = 0;
148 while ((blocksize >>= 1) != 0) 149 while ((blocksize >>= 1) != 0)
149 HFSPLUS_SB(sb).alloc_blksz_shift++; 150 sbi->alloc_blksz_shift++;
150 blocksize = min(HFSPLUS_SB(sb).alloc_blksz, (u32)PAGE_SIZE); 151 blocksize = min(sbi->alloc_blksz, (u32)PAGE_SIZE);
151 152
152 /* align block size to block offset */ 153 /* align block size to block offset */
153 while (part_start & ((blocksize >> HFSPLUS_SECTOR_SHIFT) - 1)) 154 while (part_start & ((blocksize >> HFSPLUS_SECTOR_SHIFT) - 1))
@@ -158,23 +159,26 @@ int hfsplus_read_wrapper(struct super_block *sb)
158 return -EINVAL; 159 return -EINVAL;
159 } 160 }
160 161
161 HFSPLUS_SB(sb).blockoffset = part_start >> 162 sbi->blockoffset =
162 (sb->s_blocksize_bits - HFSPLUS_SECTOR_SHIFT); 163 part_start >> (sb->s_blocksize_bits - HFSPLUS_SECTOR_SHIFT);
163 HFSPLUS_SB(sb).sect_count = part_size; 164 sbi->sect_count = part_size;
164 HFSPLUS_SB(sb).fs_shift = HFSPLUS_SB(sb).alloc_blksz_shift - 165 sbi->fs_shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits;
165 sb->s_blocksize_bits;
166 166
167 bh = sb_bread512(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, vhdr); 167 bh = sb_bread512(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, vhdr);
168 if (!bh) 168 if (!bh)
169 return -EIO; 169 return -EIO;
170 170
171 /* should still be the same... */ 171 /* should still be the same... */
172 if (vhdr->signature != (HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX ? 172 if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
173 cpu_to_be16(HFSPLUS_VOLHEAD_SIGX) : 173 if (vhdr->signature != cpu_to_be16(HFSPLUS_VOLHEAD_SIGX))
174 cpu_to_be16(HFSPLUS_VOLHEAD_SIG))) 174 goto error;
175 goto error; 175 } else {
176 HFSPLUS_SB(sb).s_vhbh = bh; 176 if (vhdr->signature != cpu_to_be16(HFSPLUS_VOLHEAD_SIG))
177 HFSPLUS_SB(sb).s_vhdr = vhdr; 177 goto error;
178 }
179
180 sbi->s_vhbh = bh;
181 sbi->s_vhdr = vhdr;
178 182
179 return 0; 183 return 0;
180 error: 184 error:
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index cdfb8c6a4206..c16f8d8331b5 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -196,8 +196,6 @@ fh_lock(struct svc_fh *fhp)
196static inline void 196static inline void
197fh_unlock(struct svc_fh *fhp) 197fh_unlock(struct svc_fh *fhp)
198{ 198{
199 BUG_ON(!fhp->fh_dentry);
200
201 if (fhp->fh_locked) { 199 if (fhp->fh_locked) {
202 fill_post_wcc(fhp); 200 fill_post_wcc(fhp);
203 mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex); 201 mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex);
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig
index 22c629eedd82..b388443c3a09 100644
--- a/fs/notify/Kconfig
+++ b/fs/notify/Kconfig
@@ -3,4 +3,4 @@ config FSNOTIFY
3 3
4source "fs/notify/dnotify/Kconfig" 4source "fs/notify/dnotify/Kconfig"
5source "fs/notify/inotify/Kconfig" 5source "fs/notify/inotify/Kconfig"
6source "fs/notify/fanotify/Kconfig" 6#source "fs/notify/fanotify/Kconfig"
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index d59c4a65d492..81976ffed7d6 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -668,14 +668,11 @@ xfs_inode_set_reclaim_tag(
668 xfs_perag_put(pag); 668 xfs_perag_put(pag);
669} 669}
670 670
671void 671STATIC void
672__xfs_inode_clear_reclaim_tag( 672__xfs_inode_clear_reclaim(
673 xfs_mount_t *mp,
674 xfs_perag_t *pag, 673 xfs_perag_t *pag,
675 xfs_inode_t *ip) 674 xfs_inode_t *ip)
676{ 675{
677 radix_tree_tag_clear(&pag->pag_ici_root,
678 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
679 pag->pag_ici_reclaimable--; 676 pag->pag_ici_reclaimable--;
680 if (!pag->pag_ici_reclaimable) { 677 if (!pag->pag_ici_reclaimable) {
681 /* clear the reclaim tag from the perag radix tree */ 678 /* clear the reclaim tag from the perag radix tree */
@@ -689,6 +686,17 @@ __xfs_inode_clear_reclaim_tag(
689 } 686 }
690} 687}
691 688
689void
690__xfs_inode_clear_reclaim_tag(
691 xfs_mount_t *mp,
692 xfs_perag_t *pag,
693 xfs_inode_t *ip)
694{
695 radix_tree_tag_clear(&pag->pag_ici_root,
696 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
697 __xfs_inode_clear_reclaim(pag, ip);
698}
699
692/* 700/*
693 * Inodes in different states need to be treated differently, and the return 701 * Inodes in different states need to be treated differently, and the return
694 * value of xfs_iflush is not sufficient to get this right. The following table 702 * value of xfs_iflush is not sufficient to get this right. The following table
@@ -838,6 +846,7 @@ reclaim:
838 if (!radix_tree_delete(&pag->pag_ici_root, 846 if (!radix_tree_delete(&pag->pag_ici_root,
839 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino))) 847 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
840 ASSERT(0); 848 ASSERT(0);
849 __xfs_inode_clear_reclaim(pag, ip);
841 write_unlock(&pag->pag_ici_lock); 850 write_unlock(&pag->pag_ici_lock);
842 851
843 /* 852 /*
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 267a86c74e2e..2040e6c4f172 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -246,9 +246,11 @@ struct ttm_buffer_object {
246 246
247 atomic_t reserved; 247 atomic_t reserved;
248 248
249
250 /** 249 /**
251 * Members protected by the bo::lock 250 * Members protected by the bo::lock
251 * In addition, setting sync_obj to anything else
252 * than NULL requires bo::reserved to be held. This allows for
253 * checking NULL while reserved but not holding bo::lock.
252 */ 254 */
253 255
254 void *sync_obj_arg; 256 void *sync_obj_arg;
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 626b629429ff..4e8ea8c8ec1e 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -118,7 +118,6 @@ header-y += eventpoll.h
118header-y += ext2_fs.h 118header-y += ext2_fs.h
119header-y += fadvise.h 119header-y += fadvise.h
120header-y += falloc.h 120header-y += falloc.h
121header-y += fanotify.h
122header-y += fb.h 121header-y += fb.h
123header-y += fcntl.h 122header-y += fcntl.h
124header-y += fd.h 123header-y += fd.h
diff --git a/fs/ceph/auth.h b/include/linux/ceph/auth.h
index d38a2fb4a137..7fff521d7eb5 100644
--- a/fs/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -1,8 +1,8 @@
1#ifndef _FS_CEPH_AUTH_H 1#ifndef _FS_CEPH_AUTH_H
2#define _FS_CEPH_AUTH_H 2#define _FS_CEPH_AUTH_H
3 3
4#include "types.h" 4#include <linux/ceph/types.h>
5#include "buffer.h" 5#include <linux/ceph/buffer.h>
6 6
7/* 7/*
8 * Abstract interface for communicating with the authenticate module. 8 * Abstract interface for communicating with the authenticate module.
diff --git a/fs/ceph/buffer.h b/include/linux/ceph/buffer.h
index 58d19014068f..58d19014068f 100644
--- a/fs/ceph/buffer.h
+++ b/include/linux/ceph/buffer.h
diff --git a/fs/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h
index 1818c2305610..aa2e19182d99 100644
--- a/fs/ceph/ceph_debug.h
+++ b/include/linux/ceph/ceph_debug.h
@@ -3,7 +3,7 @@
3 3
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 5
6#ifdef CONFIG_CEPH_FS_PRETTYDEBUG 6#ifdef CONFIG_CEPH_LIB_PRETTYDEBUG
7 7
8/* 8/*
9 * wrap pr_debug to include a filename:lineno prefix on each line. 9 * wrap pr_debug to include a filename:lineno prefix on each line.
@@ -14,7 +14,8 @@
14# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) 14# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
15extern const char *ceph_file_part(const char *s, int len); 15extern const char *ceph_file_part(const char *s, int len);
16# define dout(fmt, ...) \ 16# define dout(fmt, ...) \
17 pr_debug(" %12.12s:%-4d : " fmt, \ 17 pr_debug("%.*s %12.12s:%-4d : " fmt, \
18 8 - (int)sizeof(KBUILD_MODNAME), " ", \
18 ceph_file_part(__FILE__, sizeof(__FILE__)), \ 19 ceph_file_part(__FILE__, sizeof(__FILE__)), \
19 __LINE__, ##__VA_ARGS__) 20 __LINE__, ##__VA_ARGS__)
20# else 21# else
diff --git a/fs/ceph/ceph_frag.h b/include/linux/ceph/ceph_frag.h
index 5babb8e95352..5babb8e95352 100644
--- a/fs/ceph/ceph_frag.h
+++ b/include/linux/ceph/ceph_frag.h
diff --git a/fs/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index d5619ac86711..c3c74aef289d 100644
--- a/fs/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -299,6 +299,7 @@ enum {
299 CEPH_MDS_OP_SETATTR = 0x01108, 299 CEPH_MDS_OP_SETATTR = 0x01108,
300 CEPH_MDS_OP_SETFILELOCK= 0x01109, 300 CEPH_MDS_OP_SETFILELOCK= 0x01109,
301 CEPH_MDS_OP_GETFILELOCK= 0x00110, 301 CEPH_MDS_OP_GETFILELOCK= 0x00110,
302 CEPH_MDS_OP_SETDIRLAYOUT=0x0110a,
302 303
303 CEPH_MDS_OP_MKNOD = 0x01201, 304 CEPH_MDS_OP_MKNOD = 0x01201,
304 CEPH_MDS_OP_LINK = 0x01202, 305 CEPH_MDS_OP_LINK = 0x01202,
diff --git a/fs/ceph/ceph_hash.h b/include/linux/ceph/ceph_hash.h
index d099c3f90236..d099c3f90236 100644
--- a/fs/ceph/ceph_hash.h
+++ b/include/linux/ceph/ceph_hash.h
diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h
new file mode 100644
index 000000000000..2a79702e092b
--- /dev/null
+++ b/include/linux/ceph/debugfs.h
@@ -0,0 +1,33 @@
1#ifndef _FS_CEPH_DEBUGFS_H
2#define _FS_CEPH_DEBUGFS_H
3
4#include "ceph_debug.h"
5#include "types.h"
6
7#define CEPH_DEFINE_SHOW_FUNC(name) \
8static int name##_open(struct inode *inode, struct file *file) \
9{ \
10 struct seq_file *sf; \
11 int ret; \
12 \
13 ret = single_open(file, name, NULL); \
14 sf = file->private_data; \
15 sf->private = inode->i_private; \
16 return ret; \
17} \
18 \
19static const struct file_operations name##_fops = { \
20 .open = name##_open, \
21 .read = seq_read, \
22 .llseek = seq_lseek, \
23 .release = single_release, \
24};
25
26/* debugfs.c */
27extern int ceph_debugfs_init(void);
28extern void ceph_debugfs_cleanup(void);
29extern int ceph_debugfs_client_init(struct ceph_client *client);
30extern void ceph_debugfs_client_cleanup(struct ceph_client *client);
31
32#endif
33
diff --git a/fs/ceph/decode.h b/include/linux/ceph/decode.h
index 3d25415afe63..c5b6939fb32a 100644
--- a/fs/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -191,6 +191,11 @@ static inline void ceph_encode_string(void **p, void *end,
191 ceph_encode_need(p, end, n, bad); \ 191 ceph_encode_need(p, end, n, bad); \
192 ceph_encode_copy(p, pv, n); \ 192 ceph_encode_copy(p, pv, n); \
193 } while (0) 193 } while (0)
194#define ceph_encode_string_safe(p, end, s, n, bad) \
195 do { \
196 ceph_encode_need(p, end, n, bad); \
197 ceph_encode_string(p, end, s, n); \
198 } while (0)
194 199
195 200
196#endif 201#endif
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
new file mode 100644
index 000000000000..f22b2e941686
--- /dev/null
+++ b/include/linux/ceph/libceph.h
@@ -0,0 +1,249 @@
1#ifndef _FS_CEPH_LIBCEPH_H
2#define _FS_CEPH_LIBCEPH_H
3
4#include "ceph_debug.h"
5
6#include <asm/unaligned.h>
7#include <linux/backing-dev.h>
8#include <linux/completion.h>
9#include <linux/exportfs.h>
10#include <linux/fs.h>
11#include <linux/mempool.h>
12#include <linux/pagemap.h>
13#include <linux/wait.h>
14#include <linux/writeback.h>
15#include <linux/slab.h>
16
17#include "types.h"
18#include "messenger.h"
19#include "msgpool.h"
20#include "mon_client.h"
21#include "osd_client.h"
22#include "ceph_fs.h"
23
24/*
25 * Supported features
26 */
27#define CEPH_FEATURE_SUPPORTED_DEFAULT CEPH_FEATURE_NOSRCADDR
28#define CEPH_FEATURE_REQUIRED_DEFAULT CEPH_FEATURE_NOSRCADDR
29
30/*
31 * mount options
32 */
33#define CEPH_OPT_FSID (1<<0)
34#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
35#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
36#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */
37
38#define CEPH_OPT_DEFAULT (0);
39
40#define ceph_set_opt(client, opt) \
41 (client)->options->flags |= CEPH_OPT_##opt;
42#define ceph_test_opt(client, opt) \
43 (!!((client)->options->flags & CEPH_OPT_##opt))
44
45struct ceph_options {
46 int flags;
47 struct ceph_fsid fsid;
48 struct ceph_entity_addr my_addr;
49 int mount_timeout;
50 int osd_idle_ttl;
51 int osd_timeout;
52 int osd_keepalive_timeout;
53
54 /*
55 * any type that can't be simply compared or doesn't need need
56 * to be compared should go beyond this point,
57 * ceph_compare_options() should be updated accordingly
58 */
59
60 struct ceph_entity_addr *mon_addr; /* should be the first
61 pointer type of args */
62 int num_mon;
63 char *name;
64 char *secret;
65};
66
67/*
68 * defaults
69 */
70#define CEPH_MOUNT_TIMEOUT_DEFAULT 60
71#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */
72#define CEPH_OSD_KEEPALIVE_DEFAULT 5
73#define CEPH_OSD_IDLE_TTL_DEFAULT 60
74#define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */
75
76#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
77#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
78
79#define CEPH_AUTH_NAME_DEFAULT "guest"
80
81/*
82 * Delay telling the MDS we no longer want caps, in case we reopen
83 * the file. Delay a minimum amount of time, even if we send a cap
84 * message for some other reason. Otherwise, take the oppotunity to
85 * update the mds to avoid sending another message later.
86 */
87#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */
88#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */
89
90#define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4)
91
92/* mount state */
93enum {
94 CEPH_MOUNT_MOUNTING,
95 CEPH_MOUNT_MOUNTED,
96 CEPH_MOUNT_UNMOUNTING,
97 CEPH_MOUNT_UNMOUNTED,
98 CEPH_MOUNT_SHUTDOWN,
99};
100
101/*
102 * subtract jiffies
103 */
104static inline unsigned long time_sub(unsigned long a, unsigned long b)
105{
106 BUG_ON(time_after(b, a));
107 return (long)a - (long)b;
108}
109
110struct ceph_mds_client;
111
112/*
113 * per client state
114 *
115 * possibly shared by multiple mount points, if they are
116 * mounting the same ceph filesystem/cluster.
117 */
118struct ceph_client {
119 struct ceph_fsid fsid;
120 bool have_fsid;
121
122 void *private;
123
124 struct ceph_options *options;
125
126 struct mutex mount_mutex; /* serialize mount attempts */
127 wait_queue_head_t auth_wq;
128 int auth_err;
129
130 int (*extra_mon_dispatch)(struct ceph_client *, struct ceph_msg *);
131
132 u32 supported_features;
133 u32 required_features;
134
135 struct ceph_messenger *msgr; /* messenger instance */
136 struct ceph_mon_client monc;
137 struct ceph_osd_client osdc;
138
139#ifdef CONFIG_DEBUG_FS
140 struct dentry *debugfs_dir;
141 struct dentry *debugfs_monmap;
142 struct dentry *debugfs_osdmap;
143#endif
144};
145
146
147
148/*
149 * snapshots
150 */
151
152/*
153 * A "snap context" is the set of existing snapshots when we
154 * write data. It is used by the OSD to guide its COW behavior.
155 *
156 * The ceph_snap_context is refcounted, and attached to each dirty
157 * page, indicating which context the dirty data belonged when it was
158 * dirtied.
159 */
160struct ceph_snap_context {
161 atomic_t nref;
162 u64 seq;
163 int num_snaps;
164 u64 snaps[];
165};
166
167static inline struct ceph_snap_context *
168ceph_get_snap_context(struct ceph_snap_context *sc)
169{
170 /*
171 printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
172 atomic_read(&sc->nref)+1);
173 */
174 if (sc)
175 atomic_inc(&sc->nref);
176 return sc;
177}
178
179static inline void ceph_put_snap_context(struct ceph_snap_context *sc)
180{
181 if (!sc)
182 return;
183 /*
184 printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
185 atomic_read(&sc->nref)-1);
186 */
187 if (atomic_dec_and_test(&sc->nref)) {
188 /*printk(" deleting snap_context %p\n", sc);*/
189 kfree(sc);
190 }
191}
192
193/*
194 * calculate the number of pages a given length and offset map onto,
195 * if we align the data.
196 */
197static inline int calc_pages_for(u64 off, u64 len)
198{
199 return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) -
200 (off >> PAGE_CACHE_SHIFT);
201}
202
203/* ceph_common.c */
204extern const char *ceph_msg_type_name(int type);
205extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
206extern struct kmem_cache *ceph_inode_cachep;
207extern struct kmem_cache *ceph_cap_cachep;
208extern struct kmem_cache *ceph_dentry_cachep;
209extern struct kmem_cache *ceph_file_cachep;
210
211extern int ceph_parse_options(struct ceph_options **popt, char *options,
212 const char *dev_name, const char *dev_name_end,
213 int (*parse_extra_token)(char *c, void *private),
214 void *private);
215extern void ceph_destroy_options(struct ceph_options *opt);
216extern int ceph_compare_options(struct ceph_options *new_opt,
217 struct ceph_client *client);
218extern struct ceph_client *ceph_create_client(struct ceph_options *opt,
219 void *private);
220extern u64 ceph_client_id(struct ceph_client *client);
221extern void ceph_destroy_client(struct ceph_client *client);
222extern int __ceph_open_session(struct ceph_client *client,
223 unsigned long started);
224extern int ceph_open_session(struct ceph_client *client);
225
226/* pagevec.c */
227extern void ceph_release_page_vector(struct page **pages, int num_pages);
228
229extern struct page **ceph_get_direct_page_vector(const char __user *data,
230 int num_pages,
231 loff_t off, size_t len);
232extern void ceph_put_page_vector(struct page **pages, int num_pages);
233extern void ceph_release_page_vector(struct page **pages, int num_pages);
234extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
235extern int ceph_copy_user_to_page_vector(struct page **pages,
236 const char __user *data,
237 loff_t off, size_t len);
238extern int ceph_copy_to_page_vector(struct page **pages,
239 const char *data,
240 loff_t off, size_t len);
241extern int ceph_copy_from_page_vector(struct page **pages,
242 char *data,
243 loff_t off, size_t len);
244extern int ceph_copy_page_vector_to_user(struct page **pages, char __user *data,
245 loff_t off, size_t len);
246extern void ceph_zero_page_vector_range(int off, int len, struct page **pages);
247
248
249#endif /* _FS_CEPH_SUPER_H */
diff --git a/fs/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
index 4c5cb0880bba..4c5cb0880bba 100644
--- a/fs/ceph/mdsmap.h
+++ b/include/linux/ceph/mdsmap.h
diff --git a/fs/ceph/messenger.h b/include/linux/ceph/messenger.h
index 76fbc957bc13..5956d62c3057 100644
--- a/fs/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -65,6 +65,9 @@ struct ceph_messenger {
65 */ 65 */
66 u32 global_seq; 66 u32 global_seq;
67 spinlock_t global_seq_lock; 67 spinlock_t global_seq_lock;
68
69 u32 supported_features;
70 u32 required_features;
68}; 71};
69 72
70/* 73/*
@@ -82,6 +85,10 @@ struct ceph_msg {
82 struct ceph_pagelist *pagelist; /* instead of pages */ 85 struct ceph_pagelist *pagelist; /* instead of pages */
83 struct list_head list_head; 86 struct list_head list_head;
84 struct kref kref; 87 struct kref kref;
88 struct bio *bio; /* instead of pages/pagelist */
89 struct bio *bio_iter; /* bio iterator */
90 int bio_seg; /* current bio segment */
91 struct ceph_pagelist *trail; /* the trailing part of the data */
85 bool front_is_vmalloc; 92 bool front_is_vmalloc;
86 bool more_to_follow; 93 bool more_to_follow;
87 bool needs_out_seq; 94 bool needs_out_seq;
@@ -205,7 +212,7 @@ struct ceph_connection {
205}; 212};
206 213
207 214
208extern const char *pr_addr(const struct sockaddr_storage *ss); 215extern const char *ceph_pr_addr(const struct sockaddr_storage *ss);
209extern int ceph_parse_ips(const char *c, const char *end, 216extern int ceph_parse_ips(const char *c, const char *end,
210 struct ceph_entity_addr *addr, 217 struct ceph_entity_addr *addr,
211 int max_count, int *count); 218 int max_count, int *count);
@@ -216,7 +223,8 @@ extern void ceph_msgr_exit(void);
216extern void ceph_msgr_flush(void); 223extern void ceph_msgr_flush(void);
217 224
218extern struct ceph_messenger *ceph_messenger_create( 225extern struct ceph_messenger *ceph_messenger_create(
219 struct ceph_entity_addr *myaddr); 226 struct ceph_entity_addr *myaddr,
227 u32 features, u32 required);
220extern void ceph_messenger_destroy(struct ceph_messenger *); 228extern void ceph_messenger_destroy(struct ceph_messenger *);
221 229
222extern void ceph_con_init(struct ceph_messenger *msgr, 230extern void ceph_con_init(struct ceph_messenger *msgr,
diff --git a/fs/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index 8e396f2c0963..545f85917780 100644
--- a/fs/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -79,6 +79,7 @@ struct ceph_mon_client {
79 u64 last_tid; 79 u64 last_tid;
80 80
81 /* mds/osd map */ 81 /* mds/osd map */
82 int want_mdsmap;
82 int want_next_osdmap; /* 1 = want, 2 = want+asked */ 83 int want_next_osdmap; /* 1 = want, 2 = want+asked */
83 u32 have_osdmap, have_mdsmap; 84 u32 have_osdmap, have_mdsmap;
84 85
diff --git a/fs/ceph/msgpool.h b/include/linux/ceph/msgpool.h
index a362605f9368..a362605f9368 100644
--- a/fs/ceph/msgpool.h
+++ b/include/linux/ceph/msgpool.h
diff --git a/fs/ceph/msgr.h b/include/linux/ceph/msgr.h
index 680d3d648cac..680d3d648cac 100644
--- a/fs/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
diff --git a/fs/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index ce776989ef6a..6c91fb032c39 100644
--- a/fs/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -15,6 +15,7 @@ struct ceph_snap_context;
15struct ceph_osd_request; 15struct ceph_osd_request;
16struct ceph_osd_client; 16struct ceph_osd_client;
17struct ceph_authorizer; 17struct ceph_authorizer;
18struct ceph_pagelist;
18 19
19/* 20/*
20 * completion callback for async writepages 21 * completion callback for async writepages
@@ -68,6 +69,7 @@ struct ceph_osd_request {
68 struct list_head r_unsafe_item; 69 struct list_head r_unsafe_item;
69 70
70 struct inode *r_inode; /* for use by callbacks */ 71 struct inode *r_inode; /* for use by callbacks */
72 void *r_priv; /* ditto */
71 73
72 char r_oid[40]; /* object name */ 74 char r_oid[40]; /* object name */
73 int r_oid_len; 75 int r_oid_len;
@@ -80,6 +82,11 @@ struct ceph_osd_request {
80 struct page **r_pages; /* pages for data payload */ 82 struct page **r_pages; /* pages for data payload */
81 int r_pages_from_pool; 83 int r_pages_from_pool;
82 int r_own_pages; /* if true, i own page list */ 84 int r_own_pages; /* if true, i own page list */
85#ifdef CONFIG_BLOCK
86 struct bio *r_bio; /* instead of pages */
87#endif
88
89 struct ceph_pagelist *r_trail; /* trailing part of the data */
83}; 90};
84 91
85struct ceph_osd_client { 92struct ceph_osd_client {
@@ -110,6 +117,42 @@ struct ceph_osd_client {
110 struct ceph_msgpool msgpool_op_reply; 117 struct ceph_msgpool msgpool_op_reply;
111}; 118};
112 119
120struct ceph_osd_req_op {
121 u16 op; /* CEPH_OSD_OP_* */
122 u32 flags; /* CEPH_OSD_FLAG_* */
123 union {
124 struct {
125 u64 offset, length;
126 u64 truncate_size;
127 u32 truncate_seq;
128 } extent;
129 struct {
130 const char *name;
131 u32 name_len;
132 const char *val;
133 u32 value_len;
134 __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
135 __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
136 } xattr;
137 struct {
138 const char *class_name;
139 __u8 class_len;
140 const char *method_name;
141 __u8 method_len;
142 __u8 argc;
143 const char *indata;
144 u32 indata_len;
145 } cls;
146 struct {
147 u64 cookie, count;
148 } pgls;
149 struct {
150 u64 snapid;
151 } snap;
152 };
153 u32 payload_len;
154};
155
113extern int ceph_osdc_init(struct ceph_osd_client *osdc, 156extern int ceph_osdc_init(struct ceph_osd_client *osdc,
114 struct ceph_client *client); 157 struct ceph_client *client);
115extern void ceph_osdc_stop(struct ceph_osd_client *osdc); 158extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
@@ -119,6 +162,30 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
119extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, 162extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
120 struct ceph_msg *msg); 163 struct ceph_msg *msg);
121 164
165extern void ceph_calc_raw_layout(struct ceph_osd_client *osdc,
166 struct ceph_file_layout *layout,
167 u64 snapid,
168 u64 off, u64 *plen, u64 *bno,
169 struct ceph_osd_request *req,
170 struct ceph_osd_req_op *op);
171
172extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
173 int flags,
174 struct ceph_snap_context *snapc,
175 struct ceph_osd_req_op *ops,
176 bool use_mempool,
177 gfp_t gfp_flags,
178 struct page **pages,
179 struct bio *bio);
180
181extern void ceph_osdc_build_request(struct ceph_osd_request *req,
182 u64 off, u64 *plen,
183 struct ceph_osd_req_op *src_ops,
184 struct ceph_snap_context *snapc,
185 struct timespec *mtime,
186 const char *oid,
187 int oid_len);
188
122extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, 189extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
123 struct ceph_file_layout *layout, 190 struct ceph_file_layout *layout,
124 struct ceph_vino vino, 191 struct ceph_vino vino,
diff --git a/fs/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index 970b547e510d..ba4c205cbb01 100644
--- a/fs/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -4,7 +4,7 @@
4#include <linux/rbtree.h> 4#include <linux/rbtree.h>
5#include "types.h" 5#include "types.h"
6#include "ceph_fs.h" 6#include "ceph_fs.h"
7#include "crush/crush.h" 7#include <linux/crush/crush.h>
8 8
9/* 9/*
10 * The osd map describes the current membership of the osd cluster and 10 * The osd map describes the current membership of the osd cluster and
@@ -125,4 +125,6 @@ extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
125extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, 125extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
126 struct ceph_pg pgid); 126 struct ceph_pg pgid);
127 127
128extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
129
128#endif 130#endif
diff --git a/fs/ceph/pagelist.h b/include/linux/ceph/pagelist.h
index e8a4187e1087..9660d6b0a35d 100644
--- a/fs/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -8,6 +8,14 @@ struct ceph_pagelist {
8 void *mapped_tail; 8 void *mapped_tail;
9 size_t length; 9 size_t length;
10 size_t room; 10 size_t room;
11 struct list_head free_list;
12 size_t num_pages_free;
13};
14
15struct ceph_pagelist_cursor {
16 struct ceph_pagelist *pl; /* pagelist, for error checking */
17 struct list_head *page_lru; /* page in list */
18 size_t room; /* room remaining to reset to */
11}; 19};
12 20
13static inline void ceph_pagelist_init(struct ceph_pagelist *pl) 21static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
@@ -16,10 +24,23 @@ static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
16 pl->mapped_tail = NULL; 24 pl->mapped_tail = NULL;
17 pl->length = 0; 25 pl->length = 0;
18 pl->room = 0; 26 pl->room = 0;
27 INIT_LIST_HEAD(&pl->free_list);
28 pl->num_pages_free = 0;
19} 29}
30
20extern int ceph_pagelist_release(struct ceph_pagelist *pl); 31extern int ceph_pagelist_release(struct ceph_pagelist *pl);
21 32
22extern int ceph_pagelist_append(struct ceph_pagelist *pl, void *d, size_t l); 33extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l);
34
35extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space);
36
37extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl);
38
39extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
40 struct ceph_pagelist_cursor *c);
41
42extern int ceph_pagelist_truncate(struct ceph_pagelist *pl,
43 struct ceph_pagelist_cursor *c);
23 44
24static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v) 45static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v)
25{ 46{
diff --git a/fs/ceph/rados.h b/include/linux/ceph/rados.h
index 6d5247f2e81b..6d5247f2e81b 100644
--- a/fs/ceph/rados.h
+++ b/include/linux/ceph/rados.h
diff --git a/fs/ceph/types.h b/include/linux/ceph/types.h
index 28b35a005ec2..28b35a005ec2 100644
--- a/fs/ceph/types.h
+++ b/include/linux/ceph/types.h
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 0c991023ee47..709dfb901d11 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -75,7 +75,7 @@ struct cgroup_subsys_state {
75 75
76 unsigned long flags; 76 unsigned long flags;
77 /* ID for this css, if possible */ 77 /* ID for this css, if possible */
78 struct css_id *id; 78 struct css_id __rcu *id;
79}; 79};
80 80
81/* bits in struct cgroup_subsys_state flags field */ 81/* bits in struct cgroup_subsys_state flags field */
@@ -205,7 +205,7 @@ struct cgroup {
205 struct list_head children; /* my children */ 205 struct list_head children; /* my children */
206 206
207 struct cgroup *parent; /* my parent */ 207 struct cgroup *parent; /* my parent */
208 struct dentry *dentry; /* cgroup fs entry, RCU protected */ 208 struct dentry __rcu *dentry; /* cgroup fs entry, RCU protected */
209 209
210 /* Private pointers for each registered subsystem */ 210 /* Private pointers for each registered subsystem */
211 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; 211 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c1a62c56a660..320d6c94ff84 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -16,7 +16,11 @@
16# define __release(x) __context__(x,-1) 16# define __release(x) __context__(x,-1)
17# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) 17# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
18# define __percpu __attribute__((noderef, address_space(3))) 18# define __percpu __attribute__((noderef, address_space(3)))
19#ifdef CONFIG_SPARSE_RCU_POINTER
20# define __rcu __attribute__((noderef, address_space(4)))
21#else
19# define __rcu 22# define __rcu
23#endif
20extern void __chk_user_ptr(const volatile void __user *); 24extern void __chk_user_ptr(const volatile void __user *);
21extern void __chk_io_ptr(const volatile void __iomem *); 25extern void __chk_io_ptr(const volatile void __iomem *);
22#else 26#else
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 8ba66a9d9022..ba4b85a6d9b8 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -9,37 +9,7 @@
9 * These are the only things you should do on a core-file: use only these 9 * These are the only things you should do on a core-file: use only these
10 * functions to write out all the necessary info. 10 * functions to write out all the necessary info.
11 */ 11 */
12static inline int dump_write(struct file *file, const void *addr, int nr) 12extern int dump_write(struct file *file, const void *addr, int nr);
13{ 13extern int dump_seek(struct file *file, loff_t off);
14 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
15}
16
17static inline int dump_seek(struct file *file, loff_t off)
18{
19 int ret = 1;
20
21 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
22 if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
23 return 0;
24 } else {
25 char *buf = (char *)get_zeroed_page(GFP_KERNEL);
26
27 if (!buf)
28 return 0;
29 while (off > 0) {
30 unsigned long n = off;
31
32 if (n > PAGE_SIZE)
33 n = PAGE_SIZE;
34 if (!dump_write(file, buf, n)) {
35 ret = 0;
36 break;
37 }
38 off -= n;
39 }
40 free_page((unsigned long)buf);
41 }
42 return ret;
43}
44 14
45#endif /* _LINUX_COREDUMP_H */ 15#endif /* _LINUX_COREDUMP_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 4d2c39573f36..4aaeab376446 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -84,7 +84,7 @@ struct thread_group_cred {
84 atomic_t usage; 84 atomic_t usage;
85 pid_t tgid; /* thread group process ID */ 85 pid_t tgid; /* thread group process ID */
86 spinlock_t lock; 86 spinlock_t lock;
87 struct key *session_keyring; /* keyring inherited over fork */ 87 struct key __rcu *session_keyring; /* keyring inherited over fork */
88 struct key *process_keyring; /* keyring private to this process */ 88 struct key *process_keyring; /* keyring private to this process */
89 struct rcu_head rcu; /* RCU deletion hook */ 89 struct rcu_head rcu; /* RCU deletion hook */
90}; 90};
diff --git a/fs/ceph/crush/crush.h b/include/linux/crush/crush.h
index 97e435b191f4..97e435b191f4 100644
--- a/fs/ceph/crush/crush.h
+++ b/include/linux/crush/crush.h
diff --git a/fs/ceph/crush/hash.h b/include/linux/crush/hash.h
index 91e884230d5d..91e884230d5d 100644
--- a/fs/ceph/crush/hash.h
+++ b/include/linux/crush/hash.h
diff --git a/fs/ceph/crush/mapper.h b/include/linux/crush/mapper.h
index c46b99c18bb0..c46b99c18bb0 100644
--- a/fs/ceph/crush/mapper.h
+++ b/include/linux/crush/mapper.h
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 29b3ce3f2a1d..2833452ea01c 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -49,7 +49,6 @@ struct task_struct;
49 49
50#ifdef CONFIG_LOCKDEP 50#ifdef CONFIG_LOCKDEP
51extern void debug_show_all_locks(void); 51extern void debug_show_all_locks(void);
52extern void __debug_show_held_locks(struct task_struct *task);
53extern void debug_show_held_locks(struct task_struct *task); 52extern void debug_show_held_locks(struct task_struct *task);
54extern void debug_check_no_locks_freed(const void *from, unsigned long len); 53extern void debug_check_no_locks_freed(const void *from, unsigned long len);
55extern void debug_check_no_locks_held(struct task_struct *task); 54extern void debug_check_no_locks_held(struct task_struct *task);
@@ -58,10 +57,6 @@ static inline void debug_show_all_locks(void)
58{ 57{
59} 58}
60 59
61static inline void __debug_show_held_locks(struct task_struct *task)
62{
63}
64
65static inline void debug_show_held_locks(struct task_struct *task) 60static inline void debug_show_held_locks(struct task_struct *task)
66{ 61{
67} 62}
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 926b50322a46..4fd978e7eb83 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -93,6 +93,7 @@ struct elevator_queue
93 struct elevator_type *elevator_type; 93 struct elevator_type *elevator_type;
94 struct mutex sysfs_lock; 94 struct mutex sysfs_lock;
95 struct hlist_head *hash; 95 struct hlist_head *hash;
96 unsigned int registered:1;
96}; 97};
97 98
98/* 99/*
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index f59ed297b661..133c0ba25e30 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -31,7 +31,7 @@ struct embedded_fd_set {
31 31
32struct fdtable { 32struct fdtable {
33 unsigned int max_fds; 33 unsigned int max_fds;
34 struct file ** fd; /* current fd array */ 34 struct file __rcu **fd; /* current fd array */
35 fd_set *close_on_exec; 35 fd_set *close_on_exec;
36 fd_set *open_fds; 36 fd_set *open_fds;
37 struct rcu_head rcu; 37 struct rcu_head rcu;
@@ -46,7 +46,7 @@ struct files_struct {
46 * read mostly part 46 * read mostly part
47 */ 47 */
48 atomic_t count; 48 atomic_t count;
49 struct fdtable *fdt; 49 struct fdtable __rcu *fdt;
50 struct fdtable fdtab; 50 struct fdtable fdtab;
51 /* 51 /*
52 * written part on a separate cache line in SMP 52 * written part on a separate cache line in SMP
@@ -55,7 +55,7 @@ struct files_struct {
55 int next_fd; 55 int next_fd;
56 struct embedded_fd_set close_on_exec_init; 56 struct embedded_fd_set close_on_exec_init;
57 struct embedded_fd_set open_fds_init; 57 struct embedded_fd_set open_fds_init;
58 struct file * fd_array[NR_OPEN_DEFAULT]; 58 struct file __rcu * fd_array[NR_OPEN_DEFAULT];
59}; 59};
60 60
61#define rcu_dereference_check_fdtable(files, fdtfd) \ 61#define rcu_dereference_check_fdtable(files, fdtfd) \
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 63d069bd80b7..3168dcfb94f2 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1384,7 +1384,7 @@ struct super_block {
1384 * Saved mount options for lazy filesystems using 1384 * Saved mount options for lazy filesystems using
1385 * generic_show_options() 1385 * generic_show_options()
1386 */ 1386 */
1387 char *s_options; 1387 char __rcu *s_options;
1388}; 1388};
1389 1389
1390extern struct timespec current_fs_time(struct super_block *sb); 1390extern struct timespec current_fs_time(struct super_block *sb);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 5f2f4c4d8fb0..af3f06b41dc1 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -129,8 +129,8 @@ struct blk_scsi_cmd_filter {
129struct disk_part_tbl { 129struct disk_part_tbl {
130 struct rcu_head rcu_head; 130 struct rcu_head rcu_head;
131 int len; 131 int len;
132 struct hd_struct *last_lookup; 132 struct hd_struct __rcu *last_lookup;
133 struct hd_struct *part[]; 133 struct hd_struct __rcu *part[];
134}; 134};
135 135
136struct gendisk { 136struct gendisk {
@@ -149,7 +149,7 @@ struct gendisk {
149 * non-critical accesses use RCU. Always access through 149 * non-critical accesses use RCU. Always access through
150 * helpers. 150 * helpers.
151 */ 151 */
152 struct disk_part_tbl *part_tbl; 152 struct disk_part_tbl __rcu *part_tbl;
153 struct hd_struct part0; 153 struct hd_struct part0;
154 154
155 const struct block_device_operations *fops; 155 const struct block_device_operations *fops;
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index d5b387669dab..1f4517d55b19 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -139,7 +139,7 @@ static inline void account_system_vtime(struct task_struct *tsk)
139#endif 139#endif
140 140
141#if defined(CONFIG_NO_HZ) 141#if defined(CONFIG_NO_HZ)
142#if defined(CONFIG_TINY_RCU) 142#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
143extern void rcu_enter_nohz(void); 143extern void rcu_enter_nohz(void);
144extern void rcu_exit_nohz(void); 144extern void rcu_exit_nohz(void);
145 145
diff --git a/include/linux/idr.h b/include/linux/idr.h
index e968db71e33a..cdb715e58e3e 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -50,14 +50,14 @@
50 50
51struct idr_layer { 51struct idr_layer {
52 unsigned long bitmap; /* A zero bit means "space here" */ 52 unsigned long bitmap; /* A zero bit means "space here" */
53 struct idr_layer *ary[1<<IDR_BITS]; 53 struct idr_layer __rcu *ary[1<<IDR_BITS];
54 int count; /* When zero, we can release it */ 54 int count; /* When zero, we can release it */
55 int layer; /* distance from leaf */ 55 int layer; /* distance from leaf */
56 struct rcu_head rcu_head; 56 struct rcu_head rcu_head;
57}; 57};
58 58
59struct idr { 59struct idr {
60 struct idr_layer *top; 60 struct idr_layer __rcu *top;
61 struct idr_layer *id_free; 61 struct idr_layer *id_free;
62 int layers; /* only valid without concurrent changes */ 62 int layers; /* only valid without concurrent changes */
63 int id_free_cnt; 63 int id_free_cnt;
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 1f43fa56f600..2fea6c8ef6ba 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -82,11 +82,17 @@ extern struct group_info init_groups;
82# define CAP_INIT_BSET CAP_FULL_SET 82# define CAP_INIT_BSET CAP_FULL_SET
83 83
84#ifdef CONFIG_TREE_PREEMPT_RCU 84#ifdef CONFIG_TREE_PREEMPT_RCU
85#define INIT_TASK_RCU_TREE_PREEMPT() \
86 .rcu_blocked_node = NULL,
87#else
88#define INIT_TASK_RCU_TREE_PREEMPT(tsk)
89#endif
90#ifdef CONFIG_PREEMPT_RCU
85#define INIT_TASK_RCU_PREEMPT(tsk) \ 91#define INIT_TASK_RCU_PREEMPT(tsk) \
86 .rcu_read_lock_nesting = 0, \ 92 .rcu_read_lock_nesting = 0, \
87 .rcu_read_unlock_special = 0, \ 93 .rcu_read_unlock_special = 0, \
88 .rcu_blocked_node = NULL, \ 94 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
89 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), 95 INIT_TASK_RCU_TREE_PREEMPT()
90#else 96#else
91#define INIT_TASK_RCU_PREEMPT(tsk) 97#define INIT_TASK_RCU_PREEMPT(tsk)
92#endif 98#endif
@@ -137,8 +143,8 @@ extern struct cred init_cred;
137 .children = LIST_HEAD_INIT(tsk.children), \ 143 .children = LIST_HEAD_INIT(tsk.children), \
138 .sibling = LIST_HEAD_INIT(tsk.sibling), \ 144 .sibling = LIST_HEAD_INIT(tsk.sibling), \
139 .group_leader = &tsk, \ 145 .group_leader = &tsk, \
140 .real_cred = &init_cred, \ 146 RCU_INIT_POINTER(.real_cred, &init_cred), \
141 .cred = &init_cred, \ 147 RCU_INIT_POINTER(.cred, &init_cred), \
142 .cred_guard_mutex = \ 148 .cred_guard_mutex = \
143 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \ 149 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
144 .comm = "swapper", \ 150 .comm = "swapper", \
diff --git a/include/linux/input.h b/include/linux/input.h
index 896a92227bc4..d6ae1761be97 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1196,7 +1196,7 @@ struct input_dev {
1196 int (*flush)(struct input_dev *dev, struct file *file); 1196 int (*flush)(struct input_dev *dev, struct file *file);
1197 int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); 1197 int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value);
1198 1198
1199 struct input_handle *grab; 1199 struct input_handle __rcu *grab;
1200 1200
1201 spinlock_t event_lock; 1201 spinlock_t event_lock;
1202 struct mutex mutex; 1202 struct mutex mutex;
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 64d529133031..3e70b21884a9 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -53,7 +53,7 @@ struct io_context {
53 53
54 struct radix_tree_root radix_root; 54 struct radix_tree_root radix_root;
55 struct hlist_head cic_list; 55 struct hlist_head cic_list;
56 void *ioc_data; 56 void __rcu *ioc_data;
57}; 57};
58 58
59static inline struct io_context *ioc_task_link(struct io_context *ioc) 59static inline struct io_context *ioc_task_link(struct io_context *ioc)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2b0a35e6bc69..1759ba5adce8 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -58,7 +58,18 @@ extern const char linux_proc_banner[];
58 58
59#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) 59#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
60#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) 60#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
61#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 61#define roundup(x, y) ( \
62{ \
63 typeof(y) __y = y; \
64 (((x) + (__y - 1)) / __y) * __y; \
65} \
66)
67#define rounddown(x, y) ( \
68{ \
69 typeof(x) __x = (x); \
70 __x - (__x % (y)); \
71} \
72)
62#define DIV_ROUND_CLOSEST(x, divisor)( \ 73#define DIV_ROUND_CLOSEST(x, divisor)( \
63{ \ 74{ \
64 typeof(divisor) __divisor = divisor; \ 75 typeof(divisor) __divisor = divisor; \
diff --git a/include/linux/key.h b/include/linux/key.h
index cd50dfa1d4c2..3db0adce1fda 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -178,8 +178,9 @@ struct key {
178 */ 178 */
179 union { 179 union {
180 unsigned long value; 180 unsigned long value;
181 void __rcu *rcudata;
181 void *data; 182 void *data;
182 struct keyring_list *subscriptions; 183 struct keyring_list __rcu *subscriptions;
183 } payload; 184 } payload;
184}; 185};
185 186
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c13cc48697aa..ac740b26eb10 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -205,7 +205,7 @@ struct kvm {
205 205
206 struct mutex irq_lock; 206 struct mutex irq_lock;
207#ifdef CONFIG_HAVE_KVM_IRQCHIP 207#ifdef CONFIG_HAVE_KVM_IRQCHIP
208 struct kvm_irq_routing_table *irq_routing; 208 struct kvm_irq_routing_table __rcu *irq_routing;
209 struct hlist_head mask_notifier_list; 209 struct hlist_head mask_notifier_list;
210 struct hlist_head irq_ack_notifier_list; 210 struct hlist_head irq_ack_notifier_list;
211#endif 211#endif
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 06aed8305bf3..2186a64ee4b5 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -32,6 +32,17 @@ extern int lock_stat;
32#define MAX_LOCKDEP_SUBCLASSES 8UL 32#define MAX_LOCKDEP_SUBCLASSES 8UL
33 33
34/* 34/*
35 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
36 * cached in the instance of lockdep_map
37 *
38 * Currently main class (subclass == 0) and signle depth subclass
39 * are cached in lockdep_map. This optimization is mainly targeting
40 * on rq->lock. double_rq_lock() acquires this highly competitive with
41 * single depth.
42 */
43#define NR_LOCKDEP_CACHING_CLASSES 2
44
45/*
35 * Lock-classes are keyed via unique addresses, by embedding the 46 * Lock-classes are keyed via unique addresses, by embedding the
36 * lockclass-key into the kernel (or module) .data section. (For 47 * lockclass-key into the kernel (or module) .data section. (For
37 * static locks we use the lock address itself as the key.) 48 * static locks we use the lock address itself as the key.)
@@ -138,7 +149,7 @@ void clear_lock_stats(struct lock_class *class);
138 */ 149 */
139struct lockdep_map { 150struct lockdep_map {
140 struct lock_class_key *key; 151 struct lock_class_key *key;
141 struct lock_class *class_cache; 152 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
142 const char *name; 153 const char *name;
143#ifdef CONFIG_LOCK_STAT 154#ifdef CONFIG_LOCK_STAT
144 int cpu; 155 int cpu;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index ee7e258627f9..cb57d657ce4d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -299,7 +299,7 @@ struct mm_struct {
299 * new_owner->mm == mm 299 * new_owner->mm == mm
300 * new_owner->alloc_lock is held 300 * new_owner->alloc_lock is held
301 */ 301 */
302 struct task_struct *owner; 302 struct task_struct __rcu *owner;
303#endif 303#endif
304 304
305#ifdef CONFIG_PROC_FS 305#ifdef CONFIG_PROC_FS
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index 9ed534c991b9..70cd0603911c 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -39,8 +39,9 @@ enum ctattr_type {
39 CTA_TUPLE_MASTER, 39 CTA_TUPLE_MASTER,
40 CTA_NAT_SEQ_ADJ_ORIG, 40 CTA_NAT_SEQ_ADJ_ORIG,
41 CTA_NAT_SEQ_ADJ_REPLY, 41 CTA_NAT_SEQ_ADJ_REPLY,
42 CTA_SECMARK, 42 CTA_SECMARK, /* obsolete */
43 CTA_ZONE, 43 CTA_ZONE,
44 CTA_SECCTX,
44 __CTA_MAX 45 __CTA_MAX
45}; 46};
46#define CTA_MAX (__CTA_MAX - 1) 47#define CTA_MAX (__CTA_MAX - 1)
@@ -172,4 +173,11 @@ enum ctattr_help {
172}; 173};
173#define CTA_HELP_MAX (__CTA_HELP_MAX - 1) 174#define CTA_HELP_MAX (__CTA_HELP_MAX - 1)
174 175
176enum ctattr_secctx {
177 CTA_SECCTX_UNSPEC,
178 CTA_SECCTX_NAME,
179 __CTA_SECCTX_MAX
180};
181#define CTA_SECCTX_MAX (__CTA_SECCTX_MAX - 1)
182
175#endif /* _IPCONNTRACK_NETLINK_H */ 183#endif /* _IPCONNTRACK_NETLINK_H */
diff --git a/include/linux/netfilter/xt_SECMARK.h b/include/linux/netfilter/xt_SECMARK.h
index 6fcd3448b186..989092bd6274 100644
--- a/include/linux/netfilter/xt_SECMARK.h
+++ b/include/linux/netfilter/xt_SECMARK.h
@@ -11,18 +11,12 @@
11 * packets are being marked for. 11 * packets are being marked for.
12 */ 12 */
13#define SECMARK_MODE_SEL 0x01 /* SELinux */ 13#define SECMARK_MODE_SEL 0x01 /* SELinux */
14#define SECMARK_SELCTX_MAX 256 14#define SECMARK_SECCTX_MAX 256
15
16struct xt_secmark_target_selinux_info {
17 __u32 selsid;
18 char selctx[SECMARK_SELCTX_MAX];
19};
20 15
21struct xt_secmark_target_info { 16struct xt_secmark_target_info {
22 __u8 mode; 17 __u8 mode;
23 union { 18 __u32 secid;
24 struct xt_secmark_target_selinux_info sel; 19 char secctx[SECMARK_SECCTX_MAX];
25 } u;
26}; 20};
27 21
28#endif /*_XT_SECMARK_H_target */ 22#endif /*_XT_SECMARK_H_target */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 508f8cf6da37..d0edf7d823ae 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -185,7 +185,7 @@ struct nfs_inode {
185 struct nfs4_cached_acl *nfs4_acl; 185 struct nfs4_cached_acl *nfs4_acl;
186 /* NFSv4 state */ 186 /* NFSv4 state */
187 struct list_head open_states; 187 struct list_head open_states;
188 struct nfs_delegation *delegation; 188 struct nfs_delegation __rcu *delegation;
189 fmode_t delegation_state; 189 fmode_t delegation_state;
190 struct rw_semaphore rwsem; 190 struct rw_semaphore rwsem;
191#endif /* CONFIG_NFS_V4*/ 191#endif /* CONFIG_NFS_V4*/
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index b2f1a4d83550..2026f9e1ceb8 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -49,28 +49,28 @@
49 49
50struct notifier_block { 50struct notifier_block {
51 int (*notifier_call)(struct notifier_block *, unsigned long, void *); 51 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
52 struct notifier_block *next; 52 struct notifier_block __rcu *next;
53 int priority; 53 int priority;
54}; 54};
55 55
56struct atomic_notifier_head { 56struct atomic_notifier_head {
57 spinlock_t lock; 57 spinlock_t lock;
58 struct notifier_block *head; 58 struct notifier_block __rcu *head;
59}; 59};
60 60
61struct blocking_notifier_head { 61struct blocking_notifier_head {
62 struct rw_semaphore rwsem; 62 struct rw_semaphore rwsem;
63 struct notifier_block *head; 63 struct notifier_block __rcu *head;
64}; 64};
65 65
66struct raw_notifier_head { 66struct raw_notifier_head {
67 struct notifier_block *head; 67 struct notifier_block __rcu *head;
68}; 68};
69 69
70struct srcu_notifier_head { 70struct srcu_notifier_head {
71 struct mutex mutex; 71 struct mutex mutex;
72 struct srcu_struct srcu; 72 struct srcu_struct srcu;
73 struct notifier_block *head; 73 struct notifier_block __rcu *head;
74}; 74};
75 75
76#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \ 76#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 634b8e674ac5..a39cbed9ee17 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -47,6 +47,8 @@ static inline void *radix_tree_indirect_to_ptr(void *ptr)
47{ 47{
48 return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); 48 return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
49} 49}
50#define radix_tree_indirect_to_ptr(ptr) \
51 radix_tree_indirect_to_ptr((void __force *)(ptr))
50 52
51static inline int radix_tree_is_indirect_ptr(void *ptr) 53static inline int radix_tree_is_indirect_ptr(void *ptr)
52{ 54{
@@ -61,7 +63,7 @@ static inline int radix_tree_is_indirect_ptr(void *ptr)
61struct radix_tree_root { 63struct radix_tree_root {
62 unsigned int height; 64 unsigned int height;
63 gfp_t gfp_mask; 65 gfp_t gfp_mask;
64 struct radix_tree_node *rnode; 66 struct radix_tree_node __rcu *rnode;
65}; 67};
66 68
67#define RADIX_TREE_INIT(mask) { \ 69#define RADIX_TREE_INIT(mask) { \
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4ec3b38ce9c5..f31ef61f1c65 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -10,6 +10,21 @@
10#include <linux/rcupdate.h> 10#include <linux/rcupdate.h>
11 11
12/* 12/*
13 * Why is there no list_empty_rcu()? Because list_empty() serves this
14 * purpose. The list_empty() function fetches the RCU-protected pointer
15 * and compares it to the address of the list head, but neither dereferences
16 * this pointer itself nor provides this pointer to the caller. Therefore,
17 * it is not necessary to use rcu_dereference(), so that list_empty() can
18 * be used anywhere you would want to use a list_empty_rcu().
19 */
20
21/*
22 * return the ->next pointer of a list_head in an rcu safe
23 * way, we must not access it directly
24 */
25#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
26
27/*
13 * Insert a new entry between two known consecutive entries. 28 * Insert a new entry between two known consecutive entries.
14 * 29 *
15 * This is only for internal list manipulation where we know 30 * This is only for internal list manipulation where we know
@@ -20,7 +35,7 @@ static inline void __list_add_rcu(struct list_head *new,
20{ 35{
21 new->next = next; 36 new->next = next;
22 new->prev = prev; 37 new->prev = prev;
23 rcu_assign_pointer(prev->next, new); 38 rcu_assign_pointer(list_next_rcu(prev), new);
24 next->prev = new; 39 next->prev = new;
25} 40}
26 41
@@ -138,7 +153,7 @@ static inline void list_replace_rcu(struct list_head *old,
138{ 153{
139 new->next = old->next; 154 new->next = old->next;
140 new->prev = old->prev; 155 new->prev = old->prev;
141 rcu_assign_pointer(new->prev->next, new); 156 rcu_assign_pointer(list_next_rcu(new->prev), new);
142 new->next->prev = new; 157 new->next->prev = new;
143 old->prev = LIST_POISON2; 158 old->prev = LIST_POISON2;
144} 159}
@@ -193,7 +208,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
193 */ 208 */
194 209
195 last->next = at; 210 last->next = at;
196 rcu_assign_pointer(head->next, first); 211 rcu_assign_pointer(list_next_rcu(head), first);
197 first->prev = head; 212 first->prev = head;
198 at->prev = last; 213 at->prev = last;
199} 214}
@@ -208,7 +223,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
208 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 223 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
209 */ 224 */
210#define list_entry_rcu(ptr, type, member) \ 225#define list_entry_rcu(ptr, type, member) \
211 container_of(rcu_dereference_raw(ptr), type, member) 226 ({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \
227 container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
228 })
212 229
213/** 230/**
214 * list_first_entry_rcu - get the first element from a list 231 * list_first_entry_rcu - get the first element from a list
@@ -225,9 +242,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
225 list_entry_rcu((ptr)->next, type, member) 242 list_entry_rcu((ptr)->next, type, member)
226 243
227#define __list_for_each_rcu(pos, head) \ 244#define __list_for_each_rcu(pos, head) \
228 for (pos = rcu_dereference_raw((head)->next); \ 245 for (pos = rcu_dereference_raw(list_next_rcu(head)); \
229 pos != (head); \ 246 pos != (head); \
230 pos = rcu_dereference_raw(pos->next)) 247 pos = rcu_dereference_raw(list_next_rcu((pos)))
231 248
232/** 249/**
233 * list_for_each_entry_rcu - iterate over rcu list of given type 250 * list_for_each_entry_rcu - iterate over rcu list of given type
@@ -257,9 +274,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
257 * as long as the traversal is guarded by rcu_read_lock(). 274 * as long as the traversal is guarded by rcu_read_lock().
258 */ 275 */
259#define list_for_each_continue_rcu(pos, head) \ 276#define list_for_each_continue_rcu(pos, head) \
260 for ((pos) = rcu_dereference_raw((pos)->next); \ 277 for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \
261 prefetch((pos)->next), (pos) != (head); \ 278 prefetch((pos)->next), (pos) != (head); \
262 (pos) = rcu_dereference_raw((pos)->next)) 279 (pos) = rcu_dereference_raw(list_next_rcu(pos)))
263 280
264/** 281/**
265 * list_for_each_entry_continue_rcu - continue iteration over list of given type 282 * list_for_each_entry_continue_rcu - continue iteration over list of given type
@@ -314,12 +331,19 @@ static inline void hlist_replace_rcu(struct hlist_node *old,
314 331
315 new->next = next; 332 new->next = next;
316 new->pprev = old->pprev; 333 new->pprev = old->pprev;
317 rcu_assign_pointer(*new->pprev, new); 334 rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
318 if (next) 335 if (next)
319 new->next->pprev = &new->next; 336 new->next->pprev = &new->next;
320 old->pprev = LIST_POISON2; 337 old->pprev = LIST_POISON2;
321} 338}
322 339
340/*
341 * return the first or the next element in an RCU protected hlist
342 */
343#define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first)))
344#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next)))
345#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev)))
346
323/** 347/**
324 * hlist_add_head_rcu 348 * hlist_add_head_rcu
325 * @n: the element to add to the hash list. 349 * @n: the element to add to the hash list.
@@ -346,7 +370,7 @@ static inline void hlist_add_head_rcu(struct hlist_node *n,
346 370
347 n->next = first; 371 n->next = first;
348 n->pprev = &h->first; 372 n->pprev = &h->first;
349 rcu_assign_pointer(h->first, n); 373 rcu_assign_pointer(hlist_first_rcu(h), n);
350 if (first) 374 if (first)
351 first->pprev = &n->next; 375 first->pprev = &n->next;
352} 376}
@@ -374,7 +398,7 @@ static inline void hlist_add_before_rcu(struct hlist_node *n,
374{ 398{
375 n->pprev = next->pprev; 399 n->pprev = next->pprev;
376 n->next = next; 400 n->next = next;
377 rcu_assign_pointer(*(n->pprev), n); 401 rcu_assign_pointer(hlist_pprev_rcu(n), n);
378 next->pprev = &n->next; 402 next->pprev = &n->next;
379} 403}
380 404
@@ -401,15 +425,15 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
401{ 425{
402 n->next = prev->next; 426 n->next = prev->next;
403 n->pprev = &prev->next; 427 n->pprev = &prev->next;
404 rcu_assign_pointer(prev->next, n); 428 rcu_assign_pointer(hlist_next_rcu(prev), n);
405 if (n->next) 429 if (n->next)
406 n->next->pprev = &n->next; 430 n->next->pprev = &n->next;
407} 431}
408 432
409#define __hlist_for_each_rcu(pos, head) \ 433#define __hlist_for_each_rcu(pos, head) \
410 for (pos = rcu_dereference((head)->first); \ 434 for (pos = rcu_dereference(hlist_first_rcu(head)); \
411 pos && ({ prefetch(pos->next); 1; }); \ 435 pos && ({ prefetch(pos->next); 1; }); \
412 pos = rcu_dereference(pos->next)) 436 pos = rcu_dereference(hlist_next_rcu(pos)))
413 437
414/** 438/**
415 * hlist_for_each_entry_rcu - iterate over rcu list of given type 439 * hlist_for_each_entry_rcu - iterate over rcu list of given type
@@ -422,11 +446,11 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
422 * the _rcu list-mutation primitives such as hlist_add_head_rcu() 446 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
423 * as long as the traversal is guarded by rcu_read_lock(). 447 * as long as the traversal is guarded by rcu_read_lock().
424 */ 448 */
425#define hlist_for_each_entry_rcu(tpos, pos, head, member) \ 449#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
426 for (pos = rcu_dereference_raw((head)->first); \ 450 for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \
427 pos && ({ prefetch(pos->next); 1; }) && \ 451 pos && ({ prefetch(pos->next); 1; }) && \
428 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 452 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
429 pos = rcu_dereference_raw(pos->next)) 453 pos = rcu_dereference_raw(hlist_next_rcu(pos)))
430 454
431/** 455/**
432 * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type 456 * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index b70ffe53cb9f..2ae13714828b 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -37,6 +37,12 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
37 } 37 }
38} 38}
39 39
40#define hlist_nulls_first_rcu(head) \
41 (*((struct hlist_nulls_node __rcu __force **)&(head)->first))
42
43#define hlist_nulls_next_rcu(node) \
44 (*((struct hlist_nulls_node __rcu __force **)&(node)->next))
45
40/** 46/**
41 * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization 47 * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
42 * @n: the element to delete from the hash list. 48 * @n: the element to delete from the hash list.
@@ -88,7 +94,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
88 94
89 n->next = first; 95 n->next = first;
90 n->pprev = &h->first; 96 n->pprev = &h->first;
91 rcu_assign_pointer(h->first, n); 97 rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
92 if (!is_a_nulls(first)) 98 if (!is_a_nulls(first))
93 first->pprev = &n->next; 99 first->pprev = &n->next;
94} 100}
@@ -100,11 +106,11 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
100 * @member: the name of the hlist_nulls_node within the struct. 106 * @member: the name of the hlist_nulls_node within the struct.
101 * 107 *
102 */ 108 */
103#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ 109#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
104 for (pos = rcu_dereference_raw((head)->first); \ 110 for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
105 (!is_a_nulls(pos)) && \ 111 (!is_a_nulls(pos)) && \
106 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ 112 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
107 pos = rcu_dereference_raw(pos->next)) 113 pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
108 114
109#endif 115#endif
110#endif 116#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 83af1f8d8b74..03cda7bed985 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -41,11 +41,15 @@
41#include <linux/lockdep.h> 41#include <linux/lockdep.h>
42#include <linux/completion.h> 42#include <linux/completion.h>
43#include <linux/debugobjects.h> 43#include <linux/debugobjects.h>
44#include <linux/compiler.h>
44 45
45#ifdef CONFIG_RCU_TORTURE_TEST 46#ifdef CONFIG_RCU_TORTURE_TEST
46extern int rcutorture_runnable; /* for sysctl */ 47extern int rcutorture_runnable; /* for sysctl */
47#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ 48#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
48 49
50#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
51#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
52
49/** 53/**
50 * struct rcu_head - callback structure for use with RCU 54 * struct rcu_head - callback structure for use with RCU
51 * @next: next update requests in a list 55 * @next: next update requests in a list
@@ -57,29 +61,94 @@ struct rcu_head {
57}; 61};
58 62
59/* Exported common interfaces */ 63/* Exported common interfaces */
60extern void rcu_barrier(void); 64extern void call_rcu_sched(struct rcu_head *head,
65 void (*func)(struct rcu_head *rcu));
66extern void synchronize_sched(void);
61extern void rcu_barrier_bh(void); 67extern void rcu_barrier_bh(void);
62extern void rcu_barrier_sched(void); 68extern void rcu_barrier_sched(void);
63extern void synchronize_sched_expedited(void); 69extern void synchronize_sched_expedited(void);
64extern int sched_expedited_torture_stats(char *page); 70extern int sched_expedited_torture_stats(char *page);
65 71
72static inline void __rcu_read_lock_bh(void)
73{
74 local_bh_disable();
75}
76
77static inline void __rcu_read_unlock_bh(void)
78{
79 local_bh_enable();
80}
81
82#ifdef CONFIG_PREEMPT_RCU
83
84extern void __rcu_read_lock(void);
85extern void __rcu_read_unlock(void);
86void synchronize_rcu(void);
87
88/*
89 * Defined as a macro as it is a very low level header included from
90 * areas that don't even know about current. This gives the rcu_read_lock()
91 * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
92 * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
93 */
94#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
95
96#else /* #ifdef CONFIG_PREEMPT_RCU */
97
98static inline void __rcu_read_lock(void)
99{
100 preempt_disable();
101}
102
103static inline void __rcu_read_unlock(void)
104{
105 preempt_enable();
106}
107
108static inline void synchronize_rcu(void)
109{
110 synchronize_sched();
111}
112
113static inline int rcu_preempt_depth(void)
114{
115 return 0;
116}
117
118#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
119
66/* Internal to kernel */ 120/* Internal to kernel */
67extern void rcu_init(void); 121extern void rcu_init(void);
122extern void rcu_sched_qs(int cpu);
123extern void rcu_bh_qs(int cpu);
124extern void rcu_check_callbacks(int cpu, int user);
125struct notifier_block;
126
127#ifdef CONFIG_NO_HZ
128
129extern void rcu_enter_nohz(void);
130extern void rcu_exit_nohz(void);
131
132#else /* #ifdef CONFIG_NO_HZ */
133
134static inline void rcu_enter_nohz(void)
135{
136}
137
138static inline void rcu_exit_nohz(void)
139{
140}
141
142#endif /* #else #ifdef CONFIG_NO_HZ */
68 143
69#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 144#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
70#include <linux/rcutree.h> 145#include <linux/rcutree.h>
71#elif defined(CONFIG_TINY_RCU) 146#elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
72#include <linux/rcutiny.h> 147#include <linux/rcutiny.h>
73#else 148#else
74#error "Unknown RCU implementation specified to kernel configuration" 149#error "Unknown RCU implementation specified to kernel configuration"
75#endif 150#endif
76 151
77#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
78#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
79#define INIT_RCU_HEAD(ptr) do { \
80 (ptr)->next = NULL; (ptr)->func = NULL; \
81} while (0)
82
83/* 152/*
84 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic 153 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
85 * initialization and destruction of rcu_head on the stack. rcu_head structures 154 * initialization and destruction of rcu_head on the stack. rcu_head structures
@@ -120,14 +189,15 @@ extern struct lockdep_map rcu_sched_lock_map;
120extern int debug_lockdep_rcu_enabled(void); 189extern int debug_lockdep_rcu_enabled(void);
121 190
122/** 191/**
123 * rcu_read_lock_held - might we be in RCU read-side critical section? 192 * rcu_read_lock_held() - might we be in RCU read-side critical section?
124 * 193 *
125 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU 194 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
126 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 195 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
127 * this assumes we are in an RCU read-side critical section unless it can 196 * this assumes we are in an RCU read-side critical section unless it can
128 * prove otherwise. 197 * prove otherwise. This is useful for debug checks in functions that
198 * require that they be called within an RCU read-side critical section.
129 * 199 *
130 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot 200 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
131 * and while lockdep is disabled. 201 * and while lockdep is disabled.
132 */ 202 */
133static inline int rcu_read_lock_held(void) 203static inline int rcu_read_lock_held(void)
@@ -144,14 +214,16 @@ static inline int rcu_read_lock_held(void)
144extern int rcu_read_lock_bh_held(void); 214extern int rcu_read_lock_bh_held(void);
145 215
146/** 216/**
147 * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? 217 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
148 * 218 *
149 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an 219 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
150 * RCU-sched read-side critical section. In absence of 220 * RCU-sched read-side critical section. In absence of
151 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side 221 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
152 * critical section unless it can prove otherwise. Note that disabling 222 * critical section unless it can prove otherwise. Note that disabling
153 * of preemption (including disabling irqs) counts as an RCU-sched 223 * of preemption (including disabling irqs) counts as an RCU-sched
154 * read-side critical section. 224 * read-side critical section. This is useful for debug checks in functions
225 * that required that they be called within an RCU-sched read-side
226 * critical section.
155 * 227 *
156 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot 228 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
157 * and while lockdep is disabled. 229 * and while lockdep is disabled.
@@ -211,7 +283,11 @@ static inline int rcu_read_lock_sched_held(void)
211 283
212extern int rcu_my_thread_group_empty(void); 284extern int rcu_my_thread_group_empty(void);
213 285
214#define __do_rcu_dereference_check(c) \ 286/**
287 * rcu_lockdep_assert - emit lockdep splat if specified condition not met
288 * @c: condition to check
289 */
290#define rcu_lockdep_assert(c) \
215 do { \ 291 do { \
216 static bool __warned; \ 292 static bool __warned; \
217 if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ 293 if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
@@ -220,41 +296,163 @@ extern int rcu_my_thread_group_empty(void);
220 } \ 296 } \
221 } while (0) 297 } while (0)
222 298
299#else /* #ifdef CONFIG_PROVE_RCU */
300
301#define rcu_lockdep_assert(c) do { } while (0)
302
303#endif /* #else #ifdef CONFIG_PROVE_RCU */
304
305/*
306 * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
307 * and rcu_assign_pointer(). Some of these could be folded into their
308 * callers, but they are left separate in order to ease introduction of
309 * multiple flavors of pointers to match the multiple flavors of RCU
310 * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
311 * the future.
312 */
313
314#ifdef __CHECKER__
315#define rcu_dereference_sparse(p, space) \
316 ((void)(((typeof(*p) space *)p) == p))
317#else /* #ifdef __CHECKER__ */
318#define rcu_dereference_sparse(p, space)
319#endif /* #else #ifdef __CHECKER__ */
320
321#define __rcu_access_pointer(p, space) \
322 ({ \
323 typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
324 rcu_dereference_sparse(p, space); \
325 ((typeof(*p) __force __kernel *)(_________p1)); \
326 })
327#define __rcu_dereference_check(p, c, space) \
328 ({ \
329 typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
330 rcu_lockdep_assert(c); \
331 rcu_dereference_sparse(p, space); \
332 smp_read_barrier_depends(); \
333 ((typeof(*p) __force __kernel *)(_________p1)); \
334 })
335#define __rcu_dereference_protected(p, c, space) \
336 ({ \
337 rcu_lockdep_assert(c); \
338 rcu_dereference_sparse(p, space); \
339 ((typeof(*p) __force __kernel *)(p)); \
340 })
341
342#define __rcu_dereference_index_check(p, c) \
343 ({ \
344 typeof(p) _________p1 = ACCESS_ONCE(p); \
345 rcu_lockdep_assert(c); \
346 smp_read_barrier_depends(); \
347 (_________p1); \
348 })
349#define __rcu_assign_pointer(p, v, space) \
350 ({ \
351 if (!__builtin_constant_p(v) || \
352 ((v) != NULL)) \
353 smp_wmb(); \
354 (p) = (typeof(*v) __force space *)(v); \
355 })
356
357
358/**
359 * rcu_access_pointer() - fetch RCU pointer with no dereferencing
360 * @p: The pointer to read
361 *
362 * Return the value of the specified RCU-protected pointer, but omit the
363 * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
364 * when the value of this pointer is accessed, but the pointer is not
365 * dereferenced, for example, when testing an RCU-protected pointer against
366 * NULL. Although rcu_access_pointer() may also be used in cases where
367 * update-side locks prevent the value of the pointer from changing, you
368 * should instead use rcu_dereference_protected() for this use case.
369 */
370#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
371
223/** 372/**
224 * rcu_dereference_check - rcu_dereference with debug checking 373 * rcu_dereference_check() - rcu_dereference with debug checking
225 * @p: The pointer to read, prior to dereferencing 374 * @p: The pointer to read, prior to dereferencing
226 * @c: The conditions under which the dereference will take place 375 * @c: The conditions under which the dereference will take place
227 * 376 *
228 * Do an rcu_dereference(), but check that the conditions under which the 377 * Do an rcu_dereference(), but check that the conditions under which the
229 * dereference will take place are correct. Typically the conditions indicate 378 * dereference will take place are correct. Typically the conditions
230 * the various locking conditions that should be held at that point. The check 379 * indicate the various locking conditions that should be held at that
231 * should return true if the conditions are satisfied. 380 * point. The check should return true if the conditions are satisfied.
381 * An implicit check for being in an RCU read-side critical section
382 * (rcu_read_lock()) is included.
232 * 383 *
233 * For example: 384 * For example:
234 * 385 *
235 * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || 386 * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
236 * lockdep_is_held(&foo->lock));
237 * 387 *
238 * could be used to indicate to lockdep that foo->bar may only be dereferenced 388 * could be used to indicate to lockdep that foo->bar may only be dereferenced
239 * if either the RCU read lock is held, or that the lock required to replace 389 * if either rcu_read_lock() is held, or that the lock required to replace
240 * the bar struct at foo->bar is held. 390 * the bar struct at foo->bar is held.
241 * 391 *
242 * Note that the list of conditions may also include indications of when a lock 392 * Note that the list of conditions may also include indications of when a lock
243 * need not be held, for example during initialisation or destruction of the 393 * need not be held, for example during initialisation or destruction of the
244 * target struct: 394 * target struct:
245 * 395 *
246 * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || 396 * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
247 * lockdep_is_held(&foo->lock) ||
248 * atomic_read(&foo->usage) == 0); 397 * atomic_read(&foo->usage) == 0);
398 *
399 * Inserts memory barriers on architectures that require them
400 * (currently only the Alpha), prevents the compiler from refetching
401 * (and from merging fetches), and, more importantly, documents exactly
402 * which pointers are protected by RCU and checks that the pointer is
403 * annotated as __rcu.
249 */ 404 */
250#define rcu_dereference_check(p, c) \ 405#define rcu_dereference_check(p, c) \
251 ({ \ 406 __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
252 __do_rcu_dereference_check(c); \ 407
253 rcu_dereference_raw(p); \ 408/**
254 }) 409 * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
410 * @p: The pointer to read, prior to dereferencing
411 * @c: The conditions under which the dereference will take place
412 *
413 * This is the RCU-bh counterpart to rcu_dereference_check().
414 */
415#define rcu_dereference_bh_check(p, c) \
416 __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
255 417
256/** 418/**
257 * rcu_dereference_protected - fetch RCU pointer when updates prevented 419 * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
420 * @p: The pointer to read, prior to dereferencing
421 * @c: The conditions under which the dereference will take place
422 *
423 * This is the RCU-sched counterpart to rcu_dereference_check().
424 */
425#define rcu_dereference_sched_check(p, c) \
426 __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
427 __rcu)
428
429#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
430
431/**
432 * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
433 * @p: The pointer to read, prior to dereferencing
434 * @c: The conditions under which the dereference will take place
435 *
436 * Similar to rcu_dereference_check(), but omits the sparse checking.
437 * This allows rcu_dereference_index_check() to be used on integers,
438 * which can then be used as array indices. Attempting to use
439 * rcu_dereference_check() on an integer will give compiler warnings
440 * because the sparse address-space mechanism relies on dereferencing
441 * the RCU-protected pointer. Dereferencing integers is not something
442 * that even gcc will put up with.
443 *
444 * Note that this function does not implicitly check for RCU read-side
445 * critical sections. If this function gains lots of uses, it might
446 * make sense to provide versions for each flavor of RCU, but it does
447 * not make sense as of early 2010.
448 */
449#define rcu_dereference_index_check(p, c) \
450 __rcu_dereference_index_check((p), (c))
451
452/**
453 * rcu_dereference_protected() - fetch RCU pointer when updates prevented
454 * @p: The pointer to read, prior to dereferencing
455 * @c: The conditions under which the dereference will take place
258 * 456 *
259 * Return the value of the specified RCU-protected pointer, but omit 457 * Return the value of the specified RCU-protected pointer, but omit
260 * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This 458 * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
@@ -263,35 +461,61 @@ extern int rcu_my_thread_group_empty(void);
263 * prevent the compiler from repeating this reference or combining it 461 * prevent the compiler from repeating this reference or combining it
264 * with other references, so it should not be used without protection 462 * with other references, so it should not be used without protection
265 * of appropriate locks. 463 * of appropriate locks.
464 *
465 * This function is only for update-side use. Using this function
466 * when protected only by rcu_read_lock() will result in infrequent
467 * but very ugly failures.
266 */ 468 */
267#define rcu_dereference_protected(p, c) \ 469#define rcu_dereference_protected(p, c) \
268 ({ \ 470 __rcu_dereference_protected((p), (c), __rcu)
269 __do_rcu_dereference_check(c); \
270 (p); \
271 })
272 471
273#else /* #ifdef CONFIG_PROVE_RCU */ 472/**
473 * rcu_dereference_bh_protected() - fetch RCU-bh pointer when updates prevented
474 * @p: The pointer to read, prior to dereferencing
475 * @c: The conditions under which the dereference will take place
476 *
477 * This is the RCU-bh counterpart to rcu_dereference_protected().
478 */
479#define rcu_dereference_bh_protected(p, c) \
480 __rcu_dereference_protected((p), (c), __rcu)
274 481
275#define rcu_dereference_check(p, c) rcu_dereference_raw(p) 482/**
276#define rcu_dereference_protected(p, c) (p) 483 * rcu_dereference_sched_protected() - fetch RCU-sched pointer when updates prevented
484 * @p: The pointer to read, prior to dereferencing
485 * @c: The conditions under which the dereference will take place
486 *
487 * This is the RCU-sched counterpart to rcu_dereference_protected().
488 */
489#define rcu_dereference_sched_protected(p, c) \
490 __rcu_dereference_protected((p), (c), __rcu)
277 491
278#endif /* #else #ifdef CONFIG_PROVE_RCU */
279 492
280/** 493/**
281 * rcu_access_pointer - fetch RCU pointer with no dereferencing 494 * rcu_dereference() - fetch RCU-protected pointer for dereferencing
495 * @p: The pointer to read, prior to dereferencing
282 * 496 *
283 * Return the value of the specified RCU-protected pointer, but omit the 497 * This is a simple wrapper around rcu_dereference_check().
284 * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful 498 */
285 * when the value of this pointer is accessed, but the pointer is not 499#define rcu_dereference(p) rcu_dereference_check(p, 0)
286 * dereferenced, for example, when testing an RCU-protected pointer against 500
287 * NULL. This may also be used in cases where update-side locks prevent 501/**
288 * the value of the pointer from changing, but rcu_dereference_protected() 502 * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
289 * is a lighter-weight primitive for this use case. 503 * @p: The pointer to read, prior to dereferencing
504 *
505 * Makes rcu_dereference_check() do the dirty work.
506 */
507#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
508
509/**
510 * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
511 * @p: The pointer to read, prior to dereferencing
512 *
513 * Makes rcu_dereference_check() do the dirty work.
290 */ 514 */
291#define rcu_access_pointer(p) ACCESS_ONCE(p) 515#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
292 516
293/** 517/**
294 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 518 * rcu_read_lock() - mark the beginning of an RCU read-side critical section
295 * 519 *
296 * When synchronize_rcu() is invoked on one CPU while other CPUs 520 * When synchronize_rcu() is invoked on one CPU while other CPUs
297 * are within RCU read-side critical sections, then the 521 * are within RCU read-side critical sections, then the
@@ -302,7 +526,7 @@ extern int rcu_my_thread_group_empty(void);
302 * until after the all the other CPUs exit their critical sections. 526 * until after the all the other CPUs exit their critical sections.
303 * 527 *
304 * Note, however, that RCU callbacks are permitted to run concurrently 528 * Note, however, that RCU callbacks are permitted to run concurrently
305 * with RCU read-side critical sections. One way that this can happen 529 * with new RCU read-side critical sections. One way that this can happen
306 * is via the following sequence of events: (1) CPU 0 enters an RCU 530 * is via the following sequence of events: (1) CPU 0 enters an RCU
307 * read-side critical section, (2) CPU 1 invokes call_rcu() to register 531 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
308 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, 532 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
@@ -317,7 +541,20 @@ extern int rcu_my_thread_group_empty(void);
317 * will be deferred until the outermost RCU read-side critical section 541 * will be deferred until the outermost RCU read-side critical section
318 * completes. 542 * completes.
319 * 543 *
320 * It is illegal to block while in an RCU read-side critical section. 544 * You can avoid reading and understanding the next paragraph by
545 * following this rule: don't put anything in an rcu_read_lock() RCU
546 * read-side critical section that would block in a !PREEMPT kernel.
547 * But if you want the full story, read on!
548 *
549 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it
550 * is illegal to block while in an RCU read-side critical section. In
551 * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
552 * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
553 * be preempted, but explicit blocking is illegal. Finally, in preemptible
554 * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds,
555 * RCU read-side critical sections may be preempted and they may also
556 * block, but only when acquiring spinlocks that are subject to priority
557 * inheritance.
321 */ 558 */
322static inline void rcu_read_lock(void) 559static inline void rcu_read_lock(void)
323{ 560{
@@ -337,7 +574,7 @@ static inline void rcu_read_lock(void)
337 */ 574 */
338 575
339/** 576/**
340 * rcu_read_unlock - marks the end of an RCU read-side critical section. 577 * rcu_read_unlock() - marks the end of an RCU read-side critical section.
341 * 578 *
342 * See rcu_read_lock() for more information. 579 * See rcu_read_lock() for more information.
343 */ 580 */
@@ -349,15 +586,16 @@ static inline void rcu_read_unlock(void)
349} 586}
350 587
351/** 588/**
352 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section 589 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
353 * 590 *
354 * This is equivalent of rcu_read_lock(), but to be used when updates 591 * This is equivalent of rcu_read_lock(), but to be used when updates
355 * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks 592 * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
356 * consider completion of a softirq handler to be a quiescent state, 593 * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
357 * a process in RCU read-side critical section must be protected by 594 * softirq handler to be a quiescent state, a process in RCU read-side
358 * disabling softirqs. Read-side critical sections in interrupt context 595 * critical section must be protected by disabling softirqs. Read-side
359 * can use just rcu_read_lock(). 596 * critical sections in interrupt context can use just rcu_read_lock(),
360 * 597 * though this should at least be commented to avoid confusing people
598 * reading the code.
361 */ 599 */
362static inline void rcu_read_lock_bh(void) 600static inline void rcu_read_lock_bh(void)
363{ 601{
@@ -379,13 +617,12 @@ static inline void rcu_read_unlock_bh(void)
379} 617}
380 618
381/** 619/**
382 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section 620 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
383 * 621 *
384 * Should be used with either 622 * This is equivalent of rcu_read_lock(), but to be used when updates
385 * - synchronize_sched() 623 * are being done using call_rcu_sched() or synchronize_rcu_sched().
386 * or 624 * Read-side critical sections can also be introduced by anything that
387 * - call_rcu_sched() and rcu_barrier_sched() 625 * disables preemption, including local_irq_disable() and friends.
388 * on the write-side to insure proper synchronization.
389 */ 626 */
390static inline void rcu_read_lock_sched(void) 627static inline void rcu_read_lock_sched(void)
391{ 628{
@@ -420,54 +657,14 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
420 preempt_enable_notrace(); 657 preempt_enable_notrace();
421} 658}
422 659
423
424/** 660/**
425 * rcu_dereference_raw - fetch an RCU-protected pointer 661 * rcu_assign_pointer() - assign to RCU-protected pointer
662 * @p: pointer to assign to
663 * @v: value to assign (publish)
426 * 664 *
427 * The caller must be within some flavor of RCU read-side critical 665 * Assigns the specified value to the specified RCU-protected
428 * section, or must be otherwise preventing the pointer from changing, 666 * pointer, ensuring that any concurrent RCU readers will see
429 * for example, by holding an appropriate lock. This pointer may later 667 * any prior initialization. Returns the value assigned.
430 * be safely dereferenced. It is the caller's responsibility to have
431 * done the right thing, as this primitive does no checking of any kind.
432 *
433 * Inserts memory barriers on architectures that require them
434 * (currently only the Alpha), and, more importantly, documents
435 * exactly which pointers are protected by RCU.
436 */
437#define rcu_dereference_raw(p) ({ \
438 typeof(p) _________p1 = ACCESS_ONCE(p); \
439 smp_read_barrier_depends(); \
440 (_________p1); \
441 })
442
443/**
444 * rcu_dereference - fetch an RCU-protected pointer, checking for RCU
445 *
446 * Makes rcu_dereference_check() do the dirty work.
447 */
448#define rcu_dereference(p) \
449 rcu_dereference_check(p, rcu_read_lock_held())
450
451/**
452 * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh
453 *
454 * Makes rcu_dereference_check() do the dirty work.
455 */
456#define rcu_dereference_bh(p) \
457 rcu_dereference_check(p, rcu_read_lock_bh_held() || irqs_disabled())
458
459/**
460 * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
461 *
462 * Makes rcu_dereference_check() do the dirty work.
463 */
464#define rcu_dereference_sched(p) \
465 rcu_dereference_check(p, rcu_read_lock_sched_held())
466
467/**
468 * rcu_assign_pointer - assign (publicize) a pointer to a newly
469 * initialized structure that will be dereferenced by RCU read-side
470 * critical sections. Returns the value assigned.
471 * 668 *
472 * Inserts memory barriers on architectures that require them 669 * Inserts memory barriers on architectures that require them
473 * (pretty much all of them other than x86), and also prevents 670 * (pretty much all of them other than x86), and also prevents
@@ -476,14 +673,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
476 * call documents which pointers will be dereferenced by RCU read-side 673 * call documents which pointers will be dereferenced by RCU read-side
477 * code. 674 * code.
478 */ 675 */
479
480#define rcu_assign_pointer(p, v) \ 676#define rcu_assign_pointer(p, v) \
481 ({ \ 677 __rcu_assign_pointer((p), (v), __rcu)
482 if (!__builtin_constant_p(v) || \ 678
483 ((v) != NULL)) \ 679/**
484 smp_wmb(); \ 680 * RCU_INIT_POINTER() - initialize an RCU protected pointer
485 (p) = (v); \ 681 *
486 }) 682 * Initialize an RCU-protected pointer in such a way to avoid RCU-lockdep
683 * splats.
684 */
685#define RCU_INIT_POINTER(p, v) \
686 p = (typeof(*v) __force __rcu *)(v)
487 687
488/* Infrastructure to implement the synchronize_() primitives. */ 688/* Infrastructure to implement the synchronize_() primitives. */
489 689
@@ -494,26 +694,37 @@ struct rcu_synchronize {
494 694
495extern void wakeme_after_rcu(struct rcu_head *head); 695extern void wakeme_after_rcu(struct rcu_head *head);
496 696
697#ifdef CONFIG_PREEMPT_RCU
698
497/** 699/**
498 * call_rcu - Queue an RCU callback for invocation after a grace period. 700 * call_rcu() - Queue an RCU callback for invocation after a grace period.
499 * @head: structure to be used for queueing the RCU updates. 701 * @head: structure to be used for queueing the RCU updates.
500 * @func: actual update function to be invoked after the grace period 702 * @func: actual callback function to be invoked after the grace period
501 * 703 *
502 * The update function will be invoked some time after a full grace 704 * The callback function will be invoked some time after a full grace
503 * period elapses, in other words after all currently executing RCU 705 * period elapses, in other words after all pre-existing RCU read-side
504 * read-side critical sections have completed. RCU read-side critical 706 * critical sections have completed. However, the callback function
707 * might well execute concurrently with RCU read-side critical sections
708 * that started after call_rcu() was invoked. RCU read-side critical
505 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), 709 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
506 * and may be nested. 710 * and may be nested.
507 */ 711 */
508extern void call_rcu(struct rcu_head *head, 712extern void call_rcu(struct rcu_head *head,
509 void (*func)(struct rcu_head *head)); 713 void (*func)(struct rcu_head *head));
510 714
715#else /* #ifdef CONFIG_PREEMPT_RCU */
716
717/* In classic RCU, call_rcu() is just call_rcu_sched(). */
718#define call_rcu call_rcu_sched
719
720#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
721
511/** 722/**
512 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. 723 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
513 * @head: structure to be used for queueing the RCU updates. 724 * @head: structure to be used for queueing the RCU updates.
514 * @func: actual update function to be invoked after the grace period 725 * @func: actual callback function to be invoked after the grace period
515 * 726 *
516 * The update function will be invoked some time after a full grace 727 * The callback function will be invoked some time after a full grace
517 * period elapses, in other words after all currently executing RCU 728 * period elapses, in other words after all currently executing RCU
518 * read-side critical sections have completed. call_rcu_bh() assumes 729 * read-side critical sections have completed. call_rcu_bh() assumes
519 * that the read-side critical sections end on completion of a softirq 730 * that the read-side critical sections end on completion of a softirq
@@ -566,37 +777,4 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
566} 777}
567#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 778#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
568 779
569#ifndef CONFIG_PROVE_RCU
570#define __do_rcu_dereference_check(c) do { } while (0)
571#endif /* #ifdef CONFIG_PROVE_RCU */
572
573#define __rcu_dereference_index_check(p, c) \
574 ({ \
575 typeof(p) _________p1 = ACCESS_ONCE(p); \
576 __do_rcu_dereference_check(c); \
577 smp_read_barrier_depends(); \
578 (_________p1); \
579 })
580
581/**
582 * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
583 * @p: The pointer to read, prior to dereferencing
584 * @c: The conditions under which the dereference will take place
585 *
586 * Similar to rcu_dereference_check(), but omits the sparse checking.
587 * This allows rcu_dereference_index_check() to be used on integers,
588 * which can then be used as array indices. Attempting to use
589 * rcu_dereference_check() on an integer will give compiler warnings
590 * because the sparse address-space mechanism relies on dereferencing
591 * the RCU-protected pointer. Dereferencing integers is not something
592 * that even gcc will put up with.
593 *
594 * Note that this function does not implicitly check for RCU read-side
595 * critical sections. If this function gains lots of uses, it might
596 * make sense to provide versions for each flavor of RCU, but it does
597 * not make sense as of early 2010.
598 */
599#define rcu_dereference_index_check(p, c) \
600 __rcu_dereference_index_check((p), (c))
601
602#endif /* __LINUX_RCUPDATE_H */ 780#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index e2e893144a84..13877cb93a60 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,103 +27,101 @@
27 27
28#include <linux/cache.h> 28#include <linux/cache.h>
29 29
30void rcu_sched_qs(int cpu); 30#define rcu_init_sched() do { } while (0)
31void rcu_bh_qs(int cpu);
32static inline void rcu_note_context_switch(int cpu)
33{
34 rcu_sched_qs(cpu);
35}
36 31
37#define __rcu_read_lock() preempt_disable() 32#ifdef CONFIG_TINY_RCU
38#define __rcu_read_unlock() preempt_enable()
39#define __rcu_read_lock_bh() local_bh_disable()
40#define __rcu_read_unlock_bh() local_bh_enable()
41#define call_rcu_sched call_rcu
42 33
43#define rcu_init_sched() do { } while (0) 34static inline void synchronize_rcu_expedited(void)
44extern void rcu_check_callbacks(int cpu, int user); 35{
36 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
37}
45 38
46static inline int rcu_needs_cpu(int cpu) 39static inline void rcu_barrier(void)
47{ 40{
48 return 0; 41 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
49} 42}
50 43
51/* 44#else /* #ifdef CONFIG_TINY_RCU */
52 * Return the number of grace periods. 45
53 */ 46void rcu_barrier(void);
54static inline long rcu_batches_completed(void) 47void synchronize_rcu_expedited(void);
48
49#endif /* #else #ifdef CONFIG_TINY_RCU */
50
51static inline void synchronize_rcu_bh(void)
55{ 52{
56 return 0; 53 synchronize_sched();
57} 54}
58 55
59/* 56static inline void synchronize_rcu_bh_expedited(void)
60 * Return the number of bottom-half grace periods.
61 */
62static inline long rcu_batches_completed_bh(void)
63{ 57{
64 return 0; 58 synchronize_sched();
65} 59}
66 60
67static inline void rcu_force_quiescent_state(void) 61#ifdef CONFIG_TINY_RCU
62
63static inline void rcu_preempt_note_context_switch(void)
68{ 64{
69} 65}
70 66
71static inline void rcu_bh_force_quiescent_state(void) 67static inline void exit_rcu(void)
72{ 68{
73} 69}
74 70
75static inline void rcu_sched_force_quiescent_state(void) 71static inline int rcu_needs_cpu(int cpu)
76{ 72{
73 return 0;
77} 74}
78 75
79extern void synchronize_sched(void); 76#else /* #ifdef CONFIG_TINY_RCU */
77
78void rcu_preempt_note_context_switch(void);
79extern void exit_rcu(void);
80int rcu_preempt_needs_cpu(void);
80 81
81static inline void synchronize_rcu(void) 82static inline int rcu_needs_cpu(int cpu)
82{ 83{
83 synchronize_sched(); 84 return rcu_preempt_needs_cpu();
84} 85}
85 86
86static inline void synchronize_rcu_bh(void) 87#endif /* #else #ifdef CONFIG_TINY_RCU */
88
89static inline void rcu_note_context_switch(int cpu)
87{ 90{
88 synchronize_sched(); 91 rcu_sched_qs(cpu);
92 rcu_preempt_note_context_switch();
89} 93}
90 94
91static inline void synchronize_rcu_expedited(void) 95/*
96 * Return the number of grace periods.
97 */
98static inline long rcu_batches_completed(void)
92{ 99{
93 synchronize_sched(); 100 return 0;
94} 101}
95 102
96static inline void synchronize_rcu_bh_expedited(void) 103/*
104 * Return the number of bottom-half grace periods.
105 */
106static inline long rcu_batches_completed_bh(void)
97{ 107{
98 synchronize_sched(); 108 return 0;
99} 109}
100 110
101struct notifier_block; 111static inline void rcu_force_quiescent_state(void)
102
103#ifdef CONFIG_NO_HZ
104
105extern void rcu_enter_nohz(void);
106extern void rcu_exit_nohz(void);
107
108#else /* #ifdef CONFIG_NO_HZ */
109
110static inline void rcu_enter_nohz(void)
111{ 112{
112} 113}
113 114
114static inline void rcu_exit_nohz(void) 115static inline void rcu_bh_force_quiescent_state(void)
115{ 116{
116} 117}
117 118
118#endif /* #else #ifdef CONFIG_NO_HZ */ 119static inline void rcu_sched_force_quiescent_state(void)
119
120static inline void exit_rcu(void)
121{ 120{
122} 121}
123 122
124static inline int rcu_preempt_depth(void) 123static inline void rcu_cpu_stall_reset(void)
125{ 124{
126 return 0;
127} 125}
128 126
129#ifdef CONFIG_DEBUG_LOCK_ALLOC 127#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index c0ed1c056f29..95518e628794 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,64 +30,23 @@
30#ifndef __LINUX_RCUTREE_H 30#ifndef __LINUX_RCUTREE_H
31#define __LINUX_RCUTREE_H 31#define __LINUX_RCUTREE_H
32 32
33struct notifier_block;
34
35extern void rcu_sched_qs(int cpu);
36extern void rcu_bh_qs(int cpu);
37extern void rcu_note_context_switch(int cpu); 33extern void rcu_note_context_switch(int cpu);
38extern int rcu_needs_cpu(int cpu); 34extern int rcu_needs_cpu(int cpu);
35extern void rcu_cpu_stall_reset(void);
39 36
40#ifdef CONFIG_TREE_PREEMPT_RCU 37#ifdef CONFIG_TREE_PREEMPT_RCU
41 38
42extern void __rcu_read_lock(void);
43extern void __rcu_read_unlock(void);
44extern void synchronize_rcu(void);
45extern void exit_rcu(void); 39extern void exit_rcu(void);
46 40
47/*
48 * Defined as macro as it is a very low level header
49 * included from areas that don't even know about current
50 */
51#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
52
53#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 41#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
54 42
55static inline void __rcu_read_lock(void)
56{
57 preempt_disable();
58}
59
60static inline void __rcu_read_unlock(void)
61{
62 preempt_enable();
63}
64
65#define synchronize_rcu synchronize_sched
66
67static inline void exit_rcu(void) 43static inline void exit_rcu(void)
68{ 44{
69} 45}
70 46
71static inline int rcu_preempt_depth(void)
72{
73 return 0;
74}
75
76#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 47#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
77 48
78static inline void __rcu_read_lock_bh(void)
79{
80 local_bh_disable();
81}
82static inline void __rcu_read_unlock_bh(void)
83{
84 local_bh_enable();
85}
86
87extern void call_rcu_sched(struct rcu_head *head,
88 void (*func)(struct rcu_head *rcu));
89extern void synchronize_rcu_bh(void); 49extern void synchronize_rcu_bh(void);
90extern void synchronize_sched(void);
91extern void synchronize_rcu_expedited(void); 50extern void synchronize_rcu_expedited(void);
92 51
93static inline void synchronize_rcu_bh_expedited(void) 52static inline void synchronize_rcu_bh_expedited(void)
@@ -95,7 +54,7 @@ static inline void synchronize_rcu_bh_expedited(void)
95 synchronize_sched_expedited(); 54 synchronize_sched_expedited();
96} 55}
97 56
98extern void rcu_check_callbacks(int cpu, int user); 57extern void rcu_barrier(void);
99 58
100extern long rcu_batches_completed(void); 59extern long rcu_batches_completed(void);
101extern long rcu_batches_completed_bh(void); 60extern long rcu_batches_completed_bh(void);
@@ -104,18 +63,6 @@ extern void rcu_force_quiescent_state(void);
104extern void rcu_bh_force_quiescent_state(void); 63extern void rcu_bh_force_quiescent_state(void);
105extern void rcu_sched_force_quiescent_state(void); 64extern void rcu_sched_force_quiescent_state(void);
106 65
107#ifdef CONFIG_NO_HZ
108void rcu_enter_nohz(void);
109void rcu_exit_nohz(void);
110#else /* CONFIG_NO_HZ */
111static inline void rcu_enter_nohz(void)
112{
113}
114static inline void rcu_exit_nohz(void)
115{
116}
117#endif /* CONFIG_NO_HZ */
118
119/* A context switch is a grace period for RCU-sched and RCU-bh. */ 66/* A context switch is a grace period for RCU-sched and RCU-bh. */
120static inline int rcu_blocking_is_gp(void) 67static inline int rcu_blocking_is_gp(void)
121{ 68{
diff --git a/include/linux/sched.h b/include/linux/sched.h
index eb3c1ceec06e..61b4ecf1da50 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1209,11 +1209,13 @@ struct task_struct {
1209 unsigned int policy; 1209 unsigned int policy;
1210 cpumask_t cpus_allowed; 1210 cpumask_t cpus_allowed;
1211 1211
1212#ifdef CONFIG_TREE_PREEMPT_RCU 1212#ifdef CONFIG_PREEMPT_RCU
1213 int rcu_read_lock_nesting; 1213 int rcu_read_lock_nesting;
1214 char rcu_read_unlock_special; 1214 char rcu_read_unlock_special;
1215 struct rcu_node *rcu_blocked_node;
1216 struct list_head rcu_node_entry; 1215 struct list_head rcu_node_entry;
1216#endif /* #ifdef CONFIG_PREEMPT_RCU */
1217#ifdef CONFIG_TREE_PREEMPT_RCU
1218 struct rcu_node *rcu_blocked_node;
1217#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1219#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1218 1220
1219#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1221#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
@@ -1295,9 +1297,9 @@ struct task_struct {
1295 struct list_head cpu_timers[3]; 1297 struct list_head cpu_timers[3];
1296 1298
1297/* process credentials */ 1299/* process credentials */
1298 const struct cred *real_cred; /* objective and real subjective task 1300 const struct cred __rcu *real_cred; /* objective and real subjective task
1299 * credentials (COW) */ 1301 * credentials (COW) */
1300 const struct cred *cred; /* effective (overridable) subjective task 1302 const struct cred __rcu *cred; /* effective (overridable) subjective task
1301 * credentials (COW) */ 1303 * credentials (COW) */
1302 struct mutex cred_guard_mutex; /* guard against foreign influences on 1304 struct mutex cred_guard_mutex; /* guard against foreign influences on
1303 * credential calculations 1305 * credential calculations
@@ -1425,7 +1427,7 @@ struct task_struct {
1425#endif 1427#endif
1426#ifdef CONFIG_CGROUPS 1428#ifdef CONFIG_CGROUPS
1427 /* Control Group info protected by css_set_lock */ 1429 /* Control Group info protected by css_set_lock */
1428 struct css_set *cgroups; 1430 struct css_set __rcu *cgroups;
1429 /* cg_list protected by css_set_lock and tsk->alloc_lock */ 1431 /* cg_list protected by css_set_lock and tsk->alloc_lock */
1430 struct list_head cg_list; 1432 struct list_head cg_list;
1431#endif 1433#endif
@@ -1747,7 +1749,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1747#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1749#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1748#define used_math() tsk_used_math(current) 1750#define used_math() tsk_used_math(current)
1749 1751
1750#ifdef CONFIG_TREE_PREEMPT_RCU 1752#ifdef CONFIG_PREEMPT_RCU
1751 1753
1752#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1754#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1753#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ 1755#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
@@ -1756,7 +1758,9 @@ static inline void rcu_copy_process(struct task_struct *p)
1756{ 1758{
1757 p->rcu_read_lock_nesting = 0; 1759 p->rcu_read_lock_nesting = 0;
1758 p->rcu_read_unlock_special = 0; 1760 p->rcu_read_unlock_special = 0;
1761#ifdef CONFIG_TREE_PREEMPT_RCU
1759 p->rcu_blocked_node = NULL; 1762 p->rcu_blocked_node = NULL;
1763#endif
1760 INIT_LIST_HEAD(&p->rcu_node_entry); 1764 INIT_LIST_HEAD(&p->rcu_node_entry);
1761} 1765}
1762 1766
diff --git a/include/linux/security.h b/include/linux/security.h
index a22219afff09..b8246a8df7d2 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -74,7 +74,7 @@ extern int cap_file_mmap(struct file *file, unsigned long reqprot,
74extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); 74extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
75extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, 75extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
76 unsigned long arg4, unsigned long arg5); 76 unsigned long arg4, unsigned long arg5);
77extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp); 77extern int cap_task_setscheduler(struct task_struct *p);
78extern int cap_task_setioprio(struct task_struct *p, int ioprio); 78extern int cap_task_setioprio(struct task_struct *p, int ioprio);
79extern int cap_task_setnice(struct task_struct *p, int nice); 79extern int cap_task_setnice(struct task_struct *p, int nice);
80extern int cap_syslog(int type, bool from_file); 80extern int cap_syslog(int type, bool from_file);
@@ -959,6 +959,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
959 * Sets the new child socket's sid to the openreq sid. 959 * Sets the new child socket's sid to the openreq sid.
960 * @inet_conn_established: 960 * @inet_conn_established:
961 * Sets the connection's peersid to the secmark on skb. 961 * Sets the connection's peersid to the secmark on skb.
962 * @secmark_relabel_packet:
963 * check if the process should be allowed to relabel packets to the given secid
964 * @security_secmark_refcount_inc
965 * tells the LSM to increment the number of secmark labeling rules loaded
966 * @security_secmark_refcount_dec
967 * tells the LSM to decrement the number of secmark labeling rules loaded
962 * @req_classify_flow: 968 * @req_classify_flow:
963 * Sets the flow's sid to the openreq sid. 969 * Sets the flow's sid to the openreq sid.
964 * @tun_dev_create: 970 * @tun_dev_create:
@@ -1279,9 +1285,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1279 * Return 0 if permission is granted. 1285 * Return 0 if permission is granted.
1280 * 1286 *
1281 * @secid_to_secctx: 1287 * @secid_to_secctx:
1282 * Convert secid to security context. 1288 * Convert secid to security context. If secdata is NULL the length of
1289 * the result will be returned in seclen, but no secdata will be returned.
1290 * This does mean that the length could change between calls to check the
1291 * length and the next call which actually allocates and returns the secdata.
1283 * @secid contains the security ID. 1292 * @secid contains the security ID.
1284 * @secdata contains the pointer that stores the converted security context. 1293 * @secdata contains the pointer that stores the converted security context.
1294 * @seclen pointer which contains the length of the data
1285 * @secctx_to_secid: 1295 * @secctx_to_secid:
1286 * Convert security context to secid. 1296 * Convert security context to secid.
1287 * @secid contains the pointer to the generated security ID. 1297 * @secid contains the pointer to the generated security ID.
@@ -1501,8 +1511,7 @@ struct security_operations {
1501 int (*task_getioprio) (struct task_struct *p); 1511 int (*task_getioprio) (struct task_struct *p);
1502 int (*task_setrlimit) (struct task_struct *p, unsigned int resource, 1512 int (*task_setrlimit) (struct task_struct *p, unsigned int resource,
1503 struct rlimit *new_rlim); 1513 struct rlimit *new_rlim);
1504 int (*task_setscheduler) (struct task_struct *p, int policy, 1514 int (*task_setscheduler) (struct task_struct *p);
1505 struct sched_param *lp);
1506 int (*task_getscheduler) (struct task_struct *p); 1515 int (*task_getscheduler) (struct task_struct *p);
1507 int (*task_movememory) (struct task_struct *p); 1516 int (*task_movememory) (struct task_struct *p);
1508 int (*task_kill) (struct task_struct *p, 1517 int (*task_kill) (struct task_struct *p,
@@ -1594,6 +1603,9 @@ struct security_operations {
1594 struct request_sock *req); 1603 struct request_sock *req);
1595 void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); 1604 void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req);
1596 void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); 1605 void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb);
1606 int (*secmark_relabel_packet) (u32 secid);
1607 void (*secmark_refcount_inc) (void);
1608 void (*secmark_refcount_dec) (void);
1597 void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); 1609 void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
1598 int (*tun_dev_create)(void); 1610 int (*tun_dev_create)(void);
1599 void (*tun_dev_post_create)(struct sock *sk); 1611 void (*tun_dev_post_create)(struct sock *sk);
@@ -1752,8 +1764,7 @@ int security_task_setioprio(struct task_struct *p, int ioprio);
1752int security_task_getioprio(struct task_struct *p); 1764int security_task_getioprio(struct task_struct *p);
1753int security_task_setrlimit(struct task_struct *p, unsigned int resource, 1765int security_task_setrlimit(struct task_struct *p, unsigned int resource,
1754 struct rlimit *new_rlim); 1766 struct rlimit *new_rlim);
1755int security_task_setscheduler(struct task_struct *p, 1767int security_task_setscheduler(struct task_struct *p);
1756 int policy, struct sched_param *lp);
1757int security_task_getscheduler(struct task_struct *p); 1768int security_task_getscheduler(struct task_struct *p);
1758int security_task_movememory(struct task_struct *p); 1769int security_task_movememory(struct task_struct *p);
1759int security_task_kill(struct task_struct *p, struct siginfo *info, 1770int security_task_kill(struct task_struct *p, struct siginfo *info,
@@ -2320,11 +2331,9 @@ static inline int security_task_setrlimit(struct task_struct *p,
2320 return 0; 2331 return 0;
2321} 2332}
2322 2333
2323static inline int security_task_setscheduler(struct task_struct *p, 2334static inline int security_task_setscheduler(struct task_struct *p)
2324 int policy,
2325 struct sched_param *lp)
2326{ 2335{
2327 return cap_task_setscheduler(p, policy, lp); 2336 return cap_task_setscheduler(p);
2328} 2337}
2329 2338
2330static inline int security_task_getscheduler(struct task_struct *p) 2339static inline int security_task_getscheduler(struct task_struct *p)
@@ -2551,6 +2560,9 @@ void security_inet_csk_clone(struct sock *newsk,
2551 const struct request_sock *req); 2560 const struct request_sock *req);
2552void security_inet_conn_established(struct sock *sk, 2561void security_inet_conn_established(struct sock *sk,
2553 struct sk_buff *skb); 2562 struct sk_buff *skb);
2563int security_secmark_relabel_packet(u32 secid);
2564void security_secmark_refcount_inc(void);
2565void security_secmark_refcount_dec(void);
2554int security_tun_dev_create(void); 2566int security_tun_dev_create(void);
2555void security_tun_dev_post_create(struct sock *sk); 2567void security_tun_dev_post_create(struct sock *sk);
2556int security_tun_dev_attach(struct sock *sk); 2568int security_tun_dev_attach(struct sock *sk);
@@ -2705,6 +2717,19 @@ static inline void security_inet_conn_established(struct sock *sk,
2705{ 2717{
2706} 2718}
2707 2719
2720static inline int security_secmark_relabel_packet(u32 secid)
2721{
2722 return 0;
2723}
2724
2725static inline void security_secmark_refcount_inc(void)
2726{
2727}
2728
2729static inline void security_secmark_refcount_dec(void)
2730{
2731}
2732
2708static inline int security_tun_dev_create(void) 2733static inline int security_tun_dev_create(void)
2709{ 2734{
2710 return 0; 2735 return 0;
diff --git a/include/linux/selinux.h b/include/linux/selinux.h
index 82e0f26a1299..44f459612690 100644
--- a/include/linux/selinux.h
+++ b/include/linux/selinux.h
@@ -21,74 +21,11 @@ struct kern_ipc_perm;
21#ifdef CONFIG_SECURITY_SELINUX 21#ifdef CONFIG_SECURITY_SELINUX
22 22
23/** 23/**
24 * selinux_string_to_sid - map a security context string to a security ID
25 * @str: the security context string to be mapped
26 * @sid: ID value returned via this.
27 *
28 * Returns 0 if successful, with the SID stored in sid. A value
29 * of zero for sid indicates no SID could be determined (but no error
30 * occurred).
31 */
32int selinux_string_to_sid(char *str, u32 *sid);
33
34/**
35 * selinux_secmark_relabel_packet_permission - secmark permission check
36 * @sid: SECMARK ID value to be applied to network packet
37 *
38 * Returns 0 if the current task is allowed to set the SECMARK label of
39 * packets with the supplied security ID. Note that it is implicit that
40 * the packet is always being relabeled from the default unlabeled value,
41 * and that the access control decision is made in the AVC.
42 */
43int selinux_secmark_relabel_packet_permission(u32 sid);
44
45/**
46 * selinux_secmark_refcount_inc - increments the secmark use counter
47 *
48 * SELinux keeps track of the current SECMARK targets in use so it knows
49 * when to apply SECMARK label access checks to network packets. This
50 * function incements this reference count to indicate that a new SECMARK
51 * target has been configured.
52 */
53void selinux_secmark_refcount_inc(void);
54
55/**
56 * selinux_secmark_refcount_dec - decrements the secmark use counter
57 *
58 * SELinux keeps track of the current SECMARK targets in use so it knows
59 * when to apply SECMARK label access checks to network packets. This
60 * function decements this reference count to indicate that one of the
61 * existing SECMARK targets has been removed/flushed.
62 */
63void selinux_secmark_refcount_dec(void);
64
65/**
66 * selinux_is_enabled - is SELinux enabled? 24 * selinux_is_enabled - is SELinux enabled?
67 */ 25 */
68bool selinux_is_enabled(void); 26bool selinux_is_enabled(void);
69#else 27#else
70 28
71static inline int selinux_string_to_sid(const char *str, u32 *sid)
72{
73 *sid = 0;
74 return 0;
75}
76
77static inline int selinux_secmark_relabel_packet_permission(u32 sid)
78{
79 return 0;
80}
81
82static inline void selinux_secmark_refcount_inc(void)
83{
84 return;
85}
86
87static inline void selinux_secmark_refcount_dec(void)
88{
89 return;
90}
91
92static inline bool selinux_is_enabled(void) 29static inline bool selinux_is_enabled(void)
93{ 30{
94 return false; 31 return false;
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 4d5d2f546dbf..58971e891f48 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -108,19 +108,43 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
108#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 108#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
109 109
110/** 110/**
111 * srcu_dereference - fetch SRCU-protected pointer with checking 111 * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
112 * @p: the pointer to fetch and protect for later dereferencing
113 * @sp: pointer to the srcu_struct, which is used to check that we
114 * really are in an SRCU read-side critical section.
115 * @c: condition to check for update-side use
112 * 116 *
113 * Makes rcu_dereference_check() do the dirty work. 117 * If PROVE_RCU is enabled, invoking this outside of an RCU read-side
118 * critical section will result in an RCU-lockdep splat, unless @c evaluates
119 * to 1. The @c argument will normally be a logical expression containing
120 * lockdep_is_held() calls.
114 */ 121 */
115#define srcu_dereference(p, sp) \ 122#define srcu_dereference_check(p, sp, c) \
116 rcu_dereference_check(p, srcu_read_lock_held(sp)) 123 __rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu)
124
125/**
126 * srcu_dereference - fetch SRCU-protected pointer for later dereferencing
127 * @p: the pointer to fetch and protect for later dereferencing
128 * @sp: pointer to the srcu_struct, which is used to check that we
129 * really are in an SRCU read-side critical section.
130 *
131 * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU
132 * is enabled, invoking this outside of an RCU read-side critical
133 * section will result in an RCU-lockdep splat.
134 */
135#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0)
117 136
118/** 137/**
119 * srcu_read_lock - register a new reader for an SRCU-protected structure. 138 * srcu_read_lock - register a new reader for an SRCU-protected structure.
120 * @sp: srcu_struct in which to register the new reader. 139 * @sp: srcu_struct in which to register the new reader.
121 * 140 *
122 * Enter an SRCU read-side critical section. Note that SRCU read-side 141 * Enter an SRCU read-side critical section. Note that SRCU read-side
123 * critical sections may be nested. 142 * critical sections may be nested. However, it is illegal to
143 * call anything that waits on an SRCU grace period for the same
144 * srcu_struct, whether directly or indirectly. Please note that
145 * one way to indirectly wait on an SRCU grace period is to acquire
146 * a mutex that is held elsewhere while calling synchronize_srcu() or
147 * synchronize_srcu_expedited().
124 */ 148 */
125static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) 149static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
126{ 150{
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h
index 671538d25bc1..8eee9dbbfe7a 100644
--- a/include/linux/sunrpc/auth_gss.h
+++ b/include/linux/sunrpc/auth_gss.h
@@ -69,7 +69,7 @@ struct gss_cl_ctx {
69 enum rpc_gss_proc gc_proc; 69 enum rpc_gss_proc gc_proc;
70 u32 gc_seq; 70 u32 gc_seq;
71 spinlock_t gc_seq_lock; 71 spinlock_t gc_seq_lock;
72 struct gss_ctx *gc_gss_ctx; 72 struct gss_ctx __rcu *gc_gss_ctx;
73 struct xdr_netobj gc_wire_ctx; 73 struct xdr_netobj gc_wire_ctx;
74 u32 gc_win; 74 u32 gc_win;
75 unsigned long gc_expiry; 75 unsigned long gc_expiry;
@@ -80,7 +80,7 @@ struct gss_upcall_msg;
80struct gss_cred { 80struct gss_cred {
81 struct rpc_cred gc_base; 81 struct rpc_cred gc_base;
82 enum rpc_gss_svc gc_service; 82 enum rpc_gss_svc gc_service;
83 struct gss_cl_ctx *gc_ctx; 83 struct gss_cl_ctx __rcu *gc_ctx;
84 struct gss_upcall_msg *gc_upcall; 84 struct gss_upcall_msg *gc_upcall;
85 unsigned long gc_upcall_timestamp; 85 unsigned long gc_upcall_timestamp;
86 unsigned char gc_machine_cred : 1; 86 unsigned char gc_machine_cred : 1;
diff --git a/include/linux/types.h b/include/linux/types.h
index 01a082f56ef4..357dbc19606f 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -121,7 +121,15 @@ typedef __u64 u_int64_t;
121typedef __s64 int64_t; 121typedef __s64 int64_t;
122#endif 122#endif
123 123
124/* this is a special 64bit data type that is 8-byte aligned */ 124/*
125 * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid
126 * common 32/64-bit compat problems.
127 * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other
128 * architectures) and to 8-byte boundaries on 64-bit architetures. The new
129 * aligned_64 type enforces 8-byte alignment so that structs containing
130 * aligned_64 values have the same alignment on 32-bit and 64-bit architectures.
131 * No conversions are necessary between 32-bit user-space and a 64-bit kernel.
132 */
125#define aligned_u64 __u64 __attribute__((aligned(8))) 133#define aligned_u64 __u64 __attribute__((aligned(8)))
126#define aligned_be64 __be64 __attribute__((aligned(8))) 134#define aligned_be64 __be64 __attribute__((aligned(8)))
127#define aligned_le64 __le64 __attribute__((aligned(8))) 135#define aligned_le64 __le64 __attribute__((aligned(8)))
@@ -178,6 +186,11 @@ typedef __u64 __bitwise __be64;
178typedef __u16 __bitwise __sum16; 186typedef __u16 __bitwise __sum16;
179typedef __u32 __bitwise __wsum; 187typedef __u32 __bitwise __wsum;
180 188
189/* this is a special 64bit data type that is 8-byte aligned */
190#define __aligned_u64 __u64 __attribute__((aligned(8)))
191#define __aligned_be64 __be64 __attribute__((aligned(8)))
192#define __aligned_le64 __le64 __attribute__((aligned(8)))
193
181#ifdef __KERNEL__ 194#ifdef __KERNEL__
182typedef unsigned __bitwise__ gfp_t; 195typedef unsigned __bitwise__ gfp_t;
183typedef unsigned __bitwise__ fmode_t; 196typedef unsigned __bitwise__ fmode_t;
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
index 97e07f46a0fa..aa4ebb42a565 100644
--- a/include/media/videobuf-dma-sg.h
+++ b/include/media/videobuf-dma-sg.h
@@ -48,6 +48,7 @@ struct videobuf_dmabuf {
48 48
49 /* for userland buffer */ 49 /* for userland buffer */
50 int offset; 50 int offset;
51 size_t size;
51 struct page **pages; 52 struct page **pages;
52 53
53 /* for kernel buffers */ 54 /* for kernel buffers */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 27a902d9b3a9..30fce0128dd7 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -161,12 +161,30 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, unsigned long l
161{ 161{
162 struct sk_buff *skb; 162 struct sk_buff *skb;
163 163
164 release_sock(sk);
164 if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) { 165 if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) {
165 skb_reserve(skb, BT_SKB_RESERVE); 166 skb_reserve(skb, BT_SKB_RESERVE);
166 bt_cb(skb)->incoming = 0; 167 bt_cb(skb)->incoming = 0;
167 } 168 }
169 lock_sock(sk);
170
171 if (!skb && *err)
172 return NULL;
173
174 *err = sock_error(sk);
175 if (*err)
176 goto out;
177
178 if (sk->sk_shutdown) {
179 *err = -ECONNRESET;
180 goto out;
181 }
168 182
169 return skb; 183 return skb;
184
185out:
186 kfree_skb(skb);
187 return NULL;
170} 188}
171 189
172int bt_err(__u16 code); 190int bt_err(__u16 code);
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index ef6c24a529e1..a4dc5b027bd9 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -51,7 +51,8 @@ static inline u32 task_cls_classid(struct task_struct *p)
51 return 0; 51 return 0;
52 52
53 rcu_read_lock(); 53 rcu_read_lock();
54 id = rcu_dereference(net_cls_subsys_id); 54 id = rcu_dereference_index_check(net_cls_subsys_id,
55 rcu_read_lock_held());
55 if (id >= 0) 56 if (id >= 0)
56 classid = container_of(task_subsys_state(p, id), 57 classid = container_of(task_subsys_state(p, id),
57 struct cgroup_cls_state, css)->classid; 58 struct cgroup_cls_state, css)->classid;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index e624dae54fa4..caf17db87dbc 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -75,7 +75,7 @@ struct nf_conntrack_helper;
75/* nf_conn feature for connections that have a helper */ 75/* nf_conn feature for connections that have a helper */
76struct nf_conn_help { 76struct nf_conn_help {
77 /* Helper. if any */ 77 /* Helper. if any */
78 struct nf_conntrack_helper *helper; 78 struct nf_conntrack_helper __rcu *helper;
79 79
80 union nf_conntrack_help help; 80 union nf_conntrack_help help;
81 81
diff --git a/init/Kconfig b/init/Kconfig
index 1ef0b439908e..7b920aafa98a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -347,6 +347,7 @@ choice
347 347
348config TREE_RCU 348config TREE_RCU
349 bool "Tree-based hierarchical RCU" 349 bool "Tree-based hierarchical RCU"
350 depends on !PREEMPT && SMP
350 help 351 help
351 This option selects the RCU implementation that is 352 This option selects the RCU implementation that is
352 designed for very large SMP system with hundreds or 353 designed for very large SMP system with hundreds or
@@ -354,7 +355,7 @@ config TREE_RCU
354 smaller systems. 355 smaller systems.
355 356
356config TREE_PREEMPT_RCU 357config TREE_PREEMPT_RCU
357 bool "Preemptable tree-based hierarchical RCU" 358 bool "Preemptible tree-based hierarchical RCU"
358 depends on PREEMPT 359 depends on PREEMPT
359 help 360 help
360 This option selects the RCU implementation that is 361 This option selects the RCU implementation that is
@@ -372,8 +373,22 @@ config TINY_RCU
372 is not required. This option greatly reduces the 373 is not required. This option greatly reduces the
373 memory footprint of RCU. 374 memory footprint of RCU.
374 375
376config TINY_PREEMPT_RCU
377 bool "Preemptible UP-only small-memory-footprint RCU"
378 depends on !SMP && PREEMPT
379 help
380 This option selects the RCU implementation that is designed
381 for real-time UP systems. This option greatly reduces the
382 memory footprint of RCU.
383
375endchoice 384endchoice
376 385
386config PREEMPT_RCU
387 def_bool ( TREE_PREEMPT_RCU || TINY_PREEMPT_RCU )
388 help
389 This option enables preemptible-RCU code that is common between
390 the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations.
391
377config RCU_TRACE 392config RCU_TRACE
378 bool "Enable tracing for RCU" 393 bool "Enable tracing for RCU"
379 depends on TREE_RCU || TREE_PREEMPT_RCU 394 depends on TREE_RCU || TREE_PREEMPT_RCU
@@ -394,9 +409,12 @@ config RCU_FANOUT
394 help 409 help
395 This option controls the fanout of hierarchical implementations 410 This option controls the fanout of hierarchical implementations
396 of RCU, allowing RCU to work efficiently on machines with 411 of RCU, allowing RCU to work efficiently on machines with
397 large numbers of CPUs. This value must be at least the cube 412 large numbers of CPUs. This value must be at least the fourth
398 root of NR_CPUS, which allows NR_CPUS up to 32,768 for 32-bit 413 root of NR_CPUS, which allows NR_CPUS to be insanely large.
399 systems and up to 262,144 for 64-bit systems. 414 The default value of RCU_FANOUT should be used for production
415 systems, but if you are stress-testing the RCU implementation
416 itself, small RCU_FANOUT values allow you to test large-system
417 code paths on small(er) systems.
400 418
401 Select a specific number if testing RCU itself. 419 Select a specific number if testing RCU itself.
402 Take the default if unsure. 420 Take the default if unsure.
diff --git a/kernel/Makefile b/kernel/Makefile
index 4d9bf5f8531f..e2c9d52cfe9e 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -87,6 +87,7 @@ obj-$(CONFIG_TREE_RCU) += rcutree.o
87obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o 87obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
88obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o 88obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
89obj-$(CONFIG_TINY_RCU) += rcutiny.o 89obj-$(CONFIG_TINY_RCU) += rcutiny.o
90obj-$(CONFIG_TINY_PREEMPT_RCU) += rcutiny.o
90obj-$(CONFIG_RELAY) += relay.o 91obj-$(CONFIG_RELAY) += relay.o
91obj-$(CONFIG_SYSCTL) += utsname_sysctl.o 92obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
92obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o 93obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c9483d8f6140..291ba3d04bea 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -138,7 +138,7 @@ struct css_id {
138 * is called after synchronize_rcu(). But for safe use, css_is_removed() 138 * is called after synchronize_rcu(). But for safe use, css_is_removed()
139 * css_tryget() should be used for avoiding race. 139 * css_tryget() should be used for avoiding race.
140 */ 140 */
141 struct cgroup_subsys_state *css; 141 struct cgroup_subsys_state __rcu *css;
142 /* 142 /*
143 * ID of this css. 143 * ID of this css.
144 */ 144 */
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b23c0979bbe7..51b143e2a07a 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1397,7 +1397,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1397 if (tsk->flags & PF_THREAD_BOUND) 1397 if (tsk->flags & PF_THREAD_BOUND)
1398 return -EINVAL; 1398 return -EINVAL;
1399 1399
1400 ret = security_task_setscheduler(tsk, 0, NULL); 1400 ret = security_task_setscheduler(tsk);
1401 if (ret) 1401 if (ret)
1402 return ret; 1402 return ret;
1403 if (threadgroup) { 1403 if (threadgroup) {
@@ -1405,7 +1405,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1405 1405
1406 rcu_read_lock(); 1406 rcu_read_lock();
1407 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { 1407 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
1408 ret = security_task_setscheduler(c, 0, NULL); 1408 ret = security_task_setscheduler(c);
1409 if (ret) { 1409 if (ret) {
1410 rcu_read_unlock(); 1410 rcu_read_unlock();
1411 return ret; 1411 return ret;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 1decafbb6b1a..72206cf5c6cf 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -931,6 +931,7 @@ static inline int
931remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) 931remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
932{ 932{
933 if (hrtimer_is_queued(timer)) { 933 if (hrtimer_is_queued(timer)) {
934 unsigned long state;
934 int reprogram; 935 int reprogram;
935 936
936 /* 937 /*
@@ -944,8 +945,13 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
944 debug_deactivate(timer); 945 debug_deactivate(timer);
945 timer_stats_hrtimer_clear_start_info(timer); 946 timer_stats_hrtimer_clear_start_info(timer);
946 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); 947 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
947 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 948 /*
948 reprogram); 949 * We must preserve the CALLBACK state flag here,
950 * otherwise we could move the timer base in
951 * switch_hrtimer_base.
952 */
953 state = timer->state & HRTIMER_STATE_CALLBACK;
954 __remove_hrtimer(timer, base, state, reprogram);
949 return 1; 955 return 1;
950 } 956 }
951 return 0; 957 return 0;
@@ -1231,6 +1237,9 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
1231 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); 1237 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
1232 enqueue_hrtimer(timer, base); 1238 enqueue_hrtimer(timer, base);
1233 } 1239 }
1240
1241 WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
1242
1234 timer->state &= ~HRTIMER_STATE_CALLBACK; 1243 timer->state &= ~HRTIMER_STATE_CALLBACK;
1235} 1244}
1236 1245
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 0c642d51aac2..53ead174da2f 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -98,7 +98,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
98 printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" 98 printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
99 " disables this message.\n"); 99 " disables this message.\n");
100 sched_show_task(t); 100 sched_show_task(t);
101 __debug_show_held_locks(t); 101 debug_show_held_locks(t);
102 102
103 touch_nmi_watchdog(); 103 touch_nmi_watchdog();
104 104
@@ -111,7 +111,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
111 * periodically exit the critical section and enter a new one. 111 * periodically exit the critical section and enter a new one.
112 * 112 *
113 * For preemptible RCU it is sufficient to call rcu_read_unlock in order 113 * For preemptible RCU it is sufficient to call rcu_read_unlock in order
114 * exit the grace period. For classic RCU, a reschedule is required. 114 * to exit the grace period. For classic RCU, a reschedule is required.
115 */ 115 */
116static void rcu_lock_break(struct task_struct *g, struct task_struct *t) 116static void rcu_lock_break(struct task_struct *g, struct task_struct *t)
117{ 117{
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index f2852a510232..42ba65dff7d9 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -639,6 +639,16 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
639 } 639 }
640#endif 640#endif
641 641
642 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
643 debug_locks_off();
644 printk(KERN_ERR
645 "BUG: looking up invalid subclass: %u\n", subclass);
646 printk(KERN_ERR
647 "turning off the locking correctness validator.\n");
648 dump_stack();
649 return NULL;
650 }
651
642 /* 652 /*
643 * Static locks do not have their class-keys yet - for them the key 653 * Static locks do not have their class-keys yet - for them the key
644 * is the lock object itself: 654 * is the lock object itself:
@@ -774,7 +784,9 @@ out_unlock_set:
774 raw_local_irq_restore(flags); 784 raw_local_irq_restore(flags);
775 785
776 if (!subclass || force) 786 if (!subclass || force)
777 lock->class_cache = class; 787 lock->class_cache[0] = class;
788 else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
789 lock->class_cache[subclass] = class;
778 790
779 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) 791 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
780 return NULL; 792 return NULL;
@@ -2679,7 +2691,11 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2679void lockdep_init_map(struct lockdep_map *lock, const char *name, 2691void lockdep_init_map(struct lockdep_map *lock, const char *name,
2680 struct lock_class_key *key, int subclass) 2692 struct lock_class_key *key, int subclass)
2681{ 2693{
2682 lock->class_cache = NULL; 2694 int i;
2695
2696 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
2697 lock->class_cache[i] = NULL;
2698
2683#ifdef CONFIG_LOCK_STAT 2699#ifdef CONFIG_LOCK_STAT
2684 lock->cpu = raw_smp_processor_id(); 2700 lock->cpu = raw_smp_processor_id();
2685#endif 2701#endif
@@ -2739,21 +2755,13 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2739 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2755 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2740 return 0; 2756 return 0;
2741 2757
2742 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
2743 debug_locks_off();
2744 printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2745 printk("turning off the locking correctness validator.\n");
2746 dump_stack();
2747 return 0;
2748 }
2749
2750 if (lock->key == &__lockdep_no_validate__) 2758 if (lock->key == &__lockdep_no_validate__)
2751 check = 1; 2759 check = 1;
2752 2760
2753 if (!subclass) 2761 if (subclass < NR_LOCKDEP_CACHING_CLASSES)
2754 class = lock->class_cache; 2762 class = lock->class_cache[subclass];
2755 /* 2763 /*
2756 * Not cached yet or subclass? 2764 * Not cached?
2757 */ 2765 */
2758 if (unlikely(!class)) { 2766 if (unlikely(!class)) {
2759 class = register_lock_class(lock, subclass, 0); 2767 class = register_lock_class(lock, subclass, 0);
@@ -2918,7 +2926,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
2918 return 1; 2926 return 1;
2919 2927
2920 if (hlock->references) { 2928 if (hlock->references) {
2921 struct lock_class *class = lock->class_cache; 2929 struct lock_class *class = lock->class_cache[0];
2922 2930
2923 if (!class) 2931 if (!class)
2924 class = look_up_lock_class(lock, 0); 2932 class = look_up_lock_class(lock, 0);
@@ -3559,7 +3567,12 @@ void lockdep_reset_lock(struct lockdep_map *lock)
3559 if (list_empty(head)) 3567 if (list_empty(head))
3560 continue; 3568 continue;
3561 list_for_each_entry_safe(class, next, head, hash_entry) { 3569 list_for_each_entry_safe(class, next, head, hash_entry) {
3562 if (unlikely(class == lock->class_cache)) { 3570 int match = 0;
3571
3572 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
3573 match |= class == lock->class_cache[j];
3574
3575 if (unlikely(match)) {
3563 if (debug_locks_off_graph_unlock()) 3576 if (debug_locks_off_graph_unlock())
3564 WARN_ON(1); 3577 WARN_ON(1);
3565 goto out_restore; 3578 goto out_restore;
@@ -3775,7 +3788,7 @@ EXPORT_SYMBOL_GPL(debug_show_all_locks);
3775 * Careful: only use this function if you are sure that 3788 * Careful: only use this function if you are sure that
3776 * the task cannot run in parallel! 3789 * the task cannot run in parallel!
3777 */ 3790 */
3778void __debug_show_held_locks(struct task_struct *task) 3791void debug_show_held_locks(struct task_struct *task)
3779{ 3792{
3780 if (unlikely(!debug_locks)) { 3793 if (unlikely(!debug_locks)) {
3781 printk("INFO: lockdep is turned off.\n"); 3794 printk("INFO: lockdep is turned off.\n");
@@ -3783,12 +3796,6 @@ void __debug_show_held_locks(struct task_struct *task)
3783 } 3796 }
3784 lockdep_print_held_locks(task); 3797 lockdep_print_held_locks(task);
3785} 3798}
3786EXPORT_SYMBOL_GPL(__debug_show_held_locks);
3787
3788void debug_show_held_locks(struct task_struct *task)
3789{
3790 __debug_show_held_locks(task);
3791}
3792EXPORT_SYMBOL_GPL(debug_show_held_locks); 3799EXPORT_SYMBOL_GPL(debug_show_held_locks);
3793 3800
3794void lockdep_sys_exit(void) 3801void lockdep_sys_exit(void)
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 05ecf6f7c672..f309e8014c78 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -2509,15 +2509,13 @@ static void perf_event_for_each(struct perf_event *event,
2509static int perf_event_period(struct perf_event *event, u64 __user *arg) 2509static int perf_event_period(struct perf_event *event, u64 __user *arg)
2510{ 2510{
2511 struct perf_event_context *ctx = event->ctx; 2511 struct perf_event_context *ctx = event->ctx;
2512 unsigned long size;
2513 int ret = 0; 2512 int ret = 0;
2514 u64 value; 2513 u64 value;
2515 2514
2516 if (!event->attr.sample_period) 2515 if (!event->attr.sample_period)
2517 return -EINVAL; 2516 return -EINVAL;
2518 2517
2519 size = copy_from_user(&value, arg, sizeof(value)); 2518 if (copy_from_user(&value, arg, sizeof(value)))
2520 if (size != sizeof(value))
2521 return -EFAULT; 2519 return -EFAULT;
2522 2520
2523 if (!value) 2521 if (!value)
diff --git a/kernel/pid.c b/kernel/pid.c
index d55c6fb8d087..39b65b69584f 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -401,7 +401,7 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type)
401 struct task_struct *result = NULL; 401 struct task_struct *result = NULL;
402 if (pid) { 402 if (pid) {
403 struct hlist_node *first; 403 struct hlist_node *first;
404 first = rcu_dereference_check(pid->tasks[type].first, 404 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
405 rcu_read_lock_held() || 405 rcu_read_lock_held() ||
406 lockdep_tasklist_lock_is_held()); 406 lockdep_tasklist_lock_is_held());
407 if (first) 407 if (first)
@@ -416,6 +416,7 @@ EXPORT_SYMBOL(pid_task);
416 */ 416 */
417struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) 417struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
418{ 418{
419 rcu_lockdep_assert(rcu_read_lock_held());
419 return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); 420 return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
420} 421}
421 422
diff --git a/kernel/printk.c b/kernel/printk.c
index 8fe465ac008a..2531017795f6 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -85,7 +85,7 @@ EXPORT_SYMBOL(oops_in_progress);
85 * provides serialisation for access to the entire console 85 * provides serialisation for access to the entire console
86 * driver system. 86 * driver system.
87 */ 87 */
88static DECLARE_MUTEX(console_sem); 88static DEFINE_SEMAPHORE(console_sem);
89struct console *console_drivers; 89struct console *console_drivers;
90EXPORT_SYMBOL_GPL(console_drivers); 90EXPORT_SYMBOL_GPL(console_drivers);
91 91
@@ -556,7 +556,7 @@ static void zap_locks(void)
556 /* If a crash is occurring, make sure we can't deadlock */ 556 /* If a crash is occurring, make sure we can't deadlock */
557 spin_lock_init(&logbuf_lock); 557 spin_lock_init(&logbuf_lock);
558 /* And make sure that we print immediately */ 558 /* And make sure that we print immediately */
559 init_MUTEX(&console_sem); 559 sema_init(&console_sem, 1);
560} 560}
561 561
562#if defined(CONFIG_PRINTK_TIME) 562#if defined(CONFIG_PRINTK_TIME)
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 4d169835fb36..a23a57a976d1 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -73,12 +73,14 @@ int debug_lockdep_rcu_enabled(void)
73EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); 73EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
74 74
75/** 75/**
76 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? 76 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
77 * 77 *
78 * Check for bottom half being disabled, which covers both the 78 * Check for bottom half being disabled, which covers both the
79 * CONFIG_PROVE_RCU and not cases. Note that if someone uses 79 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
80 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) 80 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
81 * will show the situation. 81 * will show the situation. This is useful for debug checks in functions
82 * that require that they be called within an RCU read-side critical
83 * section.
82 * 84 *
83 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. 85 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
84 */ 86 */
@@ -86,7 +88,7 @@ int rcu_read_lock_bh_held(void)
86{ 88{
87 if (!debug_lockdep_rcu_enabled()) 89 if (!debug_lockdep_rcu_enabled())
88 return 1; 90 return 1;
89 return in_softirq(); 91 return in_softirq() || irqs_disabled();
90} 92}
91EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); 93EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
92 94
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 196ec02f8be0..d806735342ac 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -59,6 +59,14 @@ int rcu_scheduler_active __read_mostly;
59EXPORT_SYMBOL_GPL(rcu_scheduler_active); 59EXPORT_SYMBOL_GPL(rcu_scheduler_active);
60#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 60#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
61 61
62/* Forward declarations for rcutiny_plugin.h. */
63static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
64static void __call_rcu(struct rcu_head *head,
65 void (*func)(struct rcu_head *rcu),
66 struct rcu_ctrlblk *rcp);
67
68#include "rcutiny_plugin.h"
69
62#ifdef CONFIG_NO_HZ 70#ifdef CONFIG_NO_HZ
63 71
64static long rcu_dynticks_nesting = 1; 72static long rcu_dynticks_nesting = 1;
@@ -140,6 +148,7 @@ void rcu_check_callbacks(int cpu, int user)
140 rcu_sched_qs(cpu); 148 rcu_sched_qs(cpu);
141 else if (!in_softirq()) 149 else if (!in_softirq())
142 rcu_bh_qs(cpu); 150 rcu_bh_qs(cpu);
151 rcu_preempt_check_callbacks();
143} 152}
144 153
145/* 154/*
@@ -162,6 +171,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
162 *rcp->donetail = NULL; 171 *rcp->donetail = NULL;
163 if (rcp->curtail == rcp->donetail) 172 if (rcp->curtail == rcp->donetail)
164 rcp->curtail = &rcp->rcucblist; 173 rcp->curtail = &rcp->rcucblist;
174 rcu_preempt_remove_callbacks(rcp);
165 rcp->donetail = &rcp->rcucblist; 175 rcp->donetail = &rcp->rcucblist;
166 local_irq_restore(flags); 176 local_irq_restore(flags);
167 177
@@ -182,6 +192,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
182{ 192{
183 __rcu_process_callbacks(&rcu_sched_ctrlblk); 193 __rcu_process_callbacks(&rcu_sched_ctrlblk);
184 __rcu_process_callbacks(&rcu_bh_ctrlblk); 194 __rcu_process_callbacks(&rcu_bh_ctrlblk);
195 rcu_preempt_process_callbacks();
185} 196}
186 197
187/* 198/*
@@ -223,15 +234,15 @@ static void __call_rcu(struct rcu_head *head,
223} 234}
224 235
225/* 236/*
226 * Post an RCU callback to be invoked after the end of an RCU grace 237 * Post an RCU callback to be invoked after the end of an RCU-sched grace
227 * period. But since we have but one CPU, that would be after any 238 * period. But since we have but one CPU, that would be after any
228 * quiescent state. 239 * quiescent state.
229 */ 240 */
230void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 241void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
231{ 242{
232 __call_rcu(head, func, &rcu_sched_ctrlblk); 243 __call_rcu(head, func, &rcu_sched_ctrlblk);
233} 244}
234EXPORT_SYMBOL_GPL(call_rcu); 245EXPORT_SYMBOL_GPL(call_rcu_sched);
235 246
236/* 247/*
237 * Post an RCU bottom-half callback to be invoked after any subsequent 248 * Post an RCU bottom-half callback to be invoked after any subsequent
@@ -243,20 +254,6 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
243} 254}
244EXPORT_SYMBOL_GPL(call_rcu_bh); 255EXPORT_SYMBOL_GPL(call_rcu_bh);
245 256
246void rcu_barrier(void)
247{
248 struct rcu_synchronize rcu;
249
250 init_rcu_head_on_stack(&rcu.head);
251 init_completion(&rcu.completion);
252 /* Will wake me after RCU finished. */
253 call_rcu(&rcu.head, wakeme_after_rcu);
254 /* Wait for it. */
255 wait_for_completion(&rcu.completion);
256 destroy_rcu_head_on_stack(&rcu.head);
257}
258EXPORT_SYMBOL_GPL(rcu_barrier);
259
260void rcu_barrier_bh(void) 257void rcu_barrier_bh(void)
261{ 258{
262 struct rcu_synchronize rcu; 259 struct rcu_synchronize rcu;
@@ -289,5 +286,3 @@ void __init rcu_init(void)
289{ 286{
290 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 287 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
291} 288}
292
293#include "rcutiny_plugin.h"
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index d223a92bc742..6ceca4f745ff 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
3 * Internal non-public definitions that provide either classic 3 * Internal non-public definitions that provide either classic
4 * or preemptable semantics. 4 * or preemptible semantics.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -17,11 +17,587 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * 19 *
20 * Copyright IBM Corporation, 2009 20 * Copyright (c) 2010 Linaro
21 * 21 *
22 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 22 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 */ 23 */
24 24
25#ifdef CONFIG_TINY_PREEMPT_RCU
26
27#include <linux/delay.h>
28
29/* Global control variables for preemptible RCU. */
30struct rcu_preempt_ctrlblk {
31 struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */
32 struct rcu_head **nexttail;
33 /* Tasks blocked in a preemptible RCU */
34 /* read-side critical section while an */
35 /* preemptible-RCU grace period is in */
36 /* progress must wait for a later grace */
37 /* period. This pointer points to the */
38 /* ->next pointer of the last task that */
39 /* must wait for a later grace period, or */
40 /* to &->rcb.rcucblist if there is no */
41 /* such task. */
42 struct list_head blkd_tasks;
43 /* Tasks blocked in RCU read-side critical */
44 /* section. Tasks are placed at the head */
45 /* of this list and age towards the tail. */
46 struct list_head *gp_tasks;
47 /* Pointer to the first task blocking the */
48 /* current grace period, or NULL if there */
49 /* is not such task. */
50 struct list_head *exp_tasks;
51 /* Pointer to first task blocking the */
52 /* current expedited grace period, or NULL */
53 /* if there is no such task. If there */
54 /* is no current expedited grace period, */
55 /* then there cannot be any such task. */
56 u8 gpnum; /* Current grace period. */
57 u8 gpcpu; /* Last grace period blocked by the CPU. */
58 u8 completed; /* Last grace period completed. */
59 /* If all three are equal, RCU is idle. */
60};
61
62static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
63 .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
64 .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
65 .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
66 .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
67};
68
69static int rcu_preempted_readers_exp(void);
70static void rcu_report_exp_done(void);
71
72/*
73 * Return true if the CPU has not yet responded to the current grace period.
74 */
75static int rcu_cpu_blocking_cur_gp(void)
76{
77 return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
78}
79
80/*
81 * Check for a running RCU reader. Because there is only one CPU,
82 * there can be but one running RCU reader at a time. ;-)
83 */
84static int rcu_preempt_running_reader(void)
85{
86 return current->rcu_read_lock_nesting;
87}
88
89/*
90 * Check for preempted RCU readers blocking any grace period.
91 * If the caller needs a reliable answer, it must disable hard irqs.
92 */
93static int rcu_preempt_blocked_readers_any(void)
94{
95 return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
96}
97
98/*
99 * Check for preempted RCU readers blocking the current grace period.
100 * If the caller needs a reliable answer, it must disable hard irqs.
101 */
102static int rcu_preempt_blocked_readers_cgp(void)
103{
104 return rcu_preempt_ctrlblk.gp_tasks != NULL;
105}
106
107/*
108 * Return true if another preemptible-RCU grace period is needed.
109 */
110static int rcu_preempt_needs_another_gp(void)
111{
112 return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
113}
114
115/*
116 * Return true if a preemptible-RCU grace period is in progress.
117 * The caller must disable hardirqs.
118 */
119static int rcu_preempt_gp_in_progress(void)
120{
121 return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
122}
123
124/*
125 * Record a preemptible-RCU quiescent state for the specified CPU. Note
126 * that this just means that the task currently running on the CPU is
127 * in a quiescent state. There might be any number of tasks blocked
128 * while in an RCU read-side critical section.
129 *
130 * Unlike the other rcu_*_qs() functions, callers to this function
131 * must disable irqs in order to protect the assignment to
132 * ->rcu_read_unlock_special.
133 *
134 * Because this is a single-CPU implementation, the only way a grace
135 * period can end is if the CPU is in a quiescent state. The reason is
136 * that a blocked preemptible-RCU reader can exit its critical section
137 * only if the CPU is running it at the time. Therefore, when the
138 * last task blocking the current grace period exits its RCU read-side
139 * critical section, neither the CPU nor blocked tasks will be stopping
140 * the current grace period. (In contrast, SMP implementations
141 * might have CPUs running in RCU read-side critical sections that
142 * block later grace periods -- but this is not possible given only
143 * one CPU.)
144 */
145static void rcu_preempt_cpu_qs(void)
146{
147 /* Record both CPU and task as having responded to current GP. */
148 rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
149 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
150
151 /*
152 * If there is no GP, or if blocked readers are still blocking GP,
153 * then there is nothing more to do.
154 */
155 if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp())
156 return;
157
158 /* Advance callbacks. */
159 rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
160 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
161 rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
162
163 /* If there are no blocked readers, next GP is done instantly. */
164 if (!rcu_preempt_blocked_readers_any())
165 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
166
167 /* If there are done callbacks, make RCU_SOFTIRQ process them. */
168 if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
169 raise_softirq(RCU_SOFTIRQ);
170}
171
172/*
173 * Start a new RCU grace period if warranted. Hard irqs must be disabled.
174 */
175static void rcu_preempt_start_gp(void)
176{
177 if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
178
179 /* Official start of GP. */
180 rcu_preempt_ctrlblk.gpnum++;
181
182 /* Any blocked RCU readers block new GP. */
183 if (rcu_preempt_blocked_readers_any())
184 rcu_preempt_ctrlblk.gp_tasks =
185 rcu_preempt_ctrlblk.blkd_tasks.next;
186
187 /* If there is no running reader, CPU is done with GP. */
188 if (!rcu_preempt_running_reader())
189 rcu_preempt_cpu_qs();
190 }
191}
192
193/*
194 * We have entered the scheduler, and the current task might soon be
195 * context-switched away from. If this task is in an RCU read-side
196 * critical section, we will no longer be able to rely on the CPU to
197 * record that fact, so we enqueue the task on the blkd_tasks list.
198 * If the task started after the current grace period began, as recorded
199 * by ->gpcpu, we enqueue at the beginning of the list. Otherwise
200 * before the element referenced by ->gp_tasks (or at the tail if
201 * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element.
202 * The task will dequeue itself when it exits the outermost enclosing
203 * RCU read-side critical section. Therefore, the current grace period
204 * cannot be permitted to complete until the ->gp_tasks pointer becomes
205 * NULL.
206 *
207 * Caller must disable preemption.
208 */
209void rcu_preempt_note_context_switch(void)
210{
211 struct task_struct *t = current;
212 unsigned long flags;
213
214 local_irq_save(flags); /* must exclude scheduler_tick(). */
215 if (rcu_preempt_running_reader() &&
216 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
217
218 /* Possibly blocking in an RCU read-side critical section. */
219 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
220
221 /*
222 * If this CPU has already checked in, then this task
223 * will hold up the next grace period rather than the
224 * current grace period. Queue the task accordingly.
225 * If the task is queued for the current grace period
226 * (i.e., this CPU has not yet passed through a quiescent
227 * state for the current grace period), then as long
228 * as that task remains queued, the current grace period
229 * cannot end.
230 */
231 list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
232 if (rcu_cpu_blocking_cur_gp())
233 rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
234 }
235
236 /*
237 * Either we were not in an RCU read-side critical section to
238 * begin with, or we have now recorded that critical section
239 * globally. Either way, we can now note a quiescent state
240 * for this CPU. Again, if we were in an RCU read-side critical
241 * section, and if that critical section was blocking the current
242 * grace period, then the fact that the task has been enqueued
243 * means that current grace period continues to be blocked.
244 */
245 rcu_preempt_cpu_qs();
246 local_irq_restore(flags);
247}
248
249/*
250 * Tiny-preemptible RCU implementation for rcu_read_lock().
251 * Just increment ->rcu_read_lock_nesting, shared state will be updated
252 * if we block.
253 */
254void __rcu_read_lock(void)
255{
256 current->rcu_read_lock_nesting++;
257 barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
258}
259EXPORT_SYMBOL_GPL(__rcu_read_lock);
260
261/*
262 * Handle special cases during rcu_read_unlock(), such as needing to
263 * notify RCU core processing or task having blocked during the RCU
264 * read-side critical section.
265 */
266static void rcu_read_unlock_special(struct task_struct *t)
267{
268 int empty;
269 int empty_exp;
270 unsigned long flags;
271 struct list_head *np;
272 int special;
273
274 /*
275 * NMI handlers cannot block and cannot safely manipulate state.
276 * They therefore cannot possibly be special, so just leave.
277 */
278 if (in_nmi())
279 return;
280
281 local_irq_save(flags);
282
283 /*
284 * If RCU core is waiting for this CPU to exit critical section,
285 * let it know that we have done so.
286 */
287 special = t->rcu_read_unlock_special;
288 if (special & RCU_READ_UNLOCK_NEED_QS)
289 rcu_preempt_cpu_qs();
290
291 /* Hardware IRQ handlers cannot block. */
292 if (in_irq()) {
293 local_irq_restore(flags);
294 return;
295 }
296
297 /* Clean up if blocked during RCU read-side critical section. */
298 if (special & RCU_READ_UNLOCK_BLOCKED) {
299 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
300
301 /*
302 * Remove this task from the ->blkd_tasks list and adjust
303 * any pointers that might have been referencing it.
304 */
305 empty = !rcu_preempt_blocked_readers_cgp();
306 empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
307 np = t->rcu_node_entry.next;
308 if (np == &rcu_preempt_ctrlblk.blkd_tasks)
309 np = NULL;
310 list_del(&t->rcu_node_entry);
311 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
312 rcu_preempt_ctrlblk.gp_tasks = np;
313 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
314 rcu_preempt_ctrlblk.exp_tasks = np;
315 INIT_LIST_HEAD(&t->rcu_node_entry);
316
317 /*
318 * If this was the last task on the current list, and if
319 * we aren't waiting on the CPU, report the quiescent state
320 * and start a new grace period if needed.
321 */
322 if (!empty && !rcu_preempt_blocked_readers_cgp()) {
323 rcu_preempt_cpu_qs();
324 rcu_preempt_start_gp();
325 }
326
327 /*
328 * If this was the last task on the expedited lists,
329 * then we need wake up the waiting task.
330 */
331 if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
332 rcu_report_exp_done();
333 }
334 local_irq_restore(flags);
335}
336
337/*
338 * Tiny-preemptible RCU implementation for rcu_read_unlock().
339 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
340 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
341 * invoke rcu_read_unlock_special() to clean up after a context switch
342 * in an RCU read-side critical section and other special cases.
343 */
344void __rcu_read_unlock(void)
345{
346 struct task_struct *t = current;
347
348 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
349 --t->rcu_read_lock_nesting;
350 barrier(); /* decrement before load of ->rcu_read_unlock_special */
351 if (t->rcu_read_lock_nesting == 0 &&
352 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
353 rcu_read_unlock_special(t);
354#ifdef CONFIG_PROVE_LOCKING
355 WARN_ON_ONCE(t->rcu_read_lock_nesting < 0);
356#endif /* #ifdef CONFIG_PROVE_LOCKING */
357}
358EXPORT_SYMBOL_GPL(__rcu_read_unlock);
359
360/*
361 * Check for a quiescent state from the current CPU. When a task blocks,
362 * the task is recorded in the rcu_preempt_ctrlblk structure, which is
363 * checked elsewhere. This is called from the scheduling-clock interrupt.
364 *
365 * Caller must disable hard irqs.
366 */
367static void rcu_preempt_check_callbacks(void)
368{
369 struct task_struct *t = current;
370
371 if (rcu_preempt_gp_in_progress() &&
372 (!rcu_preempt_running_reader() ||
373 !rcu_cpu_blocking_cur_gp()))
374 rcu_preempt_cpu_qs();
375 if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
376 rcu_preempt_ctrlblk.rcb.donetail)
377 raise_softirq(RCU_SOFTIRQ);
378 if (rcu_preempt_gp_in_progress() &&
379 rcu_cpu_blocking_cur_gp() &&
380 rcu_preempt_running_reader())
381 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
382}
383
384/*
385 * TINY_PREEMPT_RCU has an extra callback-list tail pointer to
386 * update, so this is invoked from __rcu_process_callbacks() to
387 * handle that case. Of course, it is invoked for all flavors of
388 * RCU, but RCU callbacks can appear only on one of the lists, and
389 * neither ->nexttail nor ->donetail can possibly be NULL, so there
390 * is no need for an explicit check.
391 */
392static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
393{
394 if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
395 rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
396}
397
398/*
399 * Process callbacks for preemptible RCU.
400 */
401static void rcu_preempt_process_callbacks(void)
402{
403 __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
404}
405
406/*
407 * Queue a preemptible -RCU callback for invocation after a grace period.
408 */
409void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
410{
411 unsigned long flags;
412
413 debug_rcu_head_queue(head);
414 head->func = func;
415 head->next = NULL;
416
417 local_irq_save(flags);
418 *rcu_preempt_ctrlblk.nexttail = head;
419 rcu_preempt_ctrlblk.nexttail = &head->next;
420 rcu_preempt_start_gp(); /* checks to see if GP needed. */
421 local_irq_restore(flags);
422}
423EXPORT_SYMBOL_GPL(call_rcu);
424
425void rcu_barrier(void)
426{
427 struct rcu_synchronize rcu;
428
429 init_rcu_head_on_stack(&rcu.head);
430 init_completion(&rcu.completion);
431 /* Will wake me after RCU finished. */
432 call_rcu(&rcu.head, wakeme_after_rcu);
433 /* Wait for it. */
434 wait_for_completion(&rcu.completion);
435 destroy_rcu_head_on_stack(&rcu.head);
436}
437EXPORT_SYMBOL_GPL(rcu_barrier);
438
439/*
440 * synchronize_rcu - wait until a grace period has elapsed.
441 *
442 * Control will return to the caller some time after a full grace
443 * period has elapsed, in other words after all currently executing RCU
444 * read-side critical sections have completed. RCU read-side critical
445 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
446 * and may be nested.
447 */
448void synchronize_rcu(void)
449{
450#ifdef CONFIG_DEBUG_LOCK_ALLOC
451 if (!rcu_scheduler_active)
452 return;
453#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
454
455 WARN_ON_ONCE(rcu_preempt_running_reader());
456 if (!rcu_preempt_blocked_readers_any())
457 return;
458
459 /* Once we get past the fastpath checks, same code as rcu_barrier(). */
460 rcu_barrier();
461}
462EXPORT_SYMBOL_GPL(synchronize_rcu);
463
464static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
465static unsigned long sync_rcu_preempt_exp_count;
466static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
467
468/*
469 * Return non-zero if there are any tasks in RCU read-side critical
470 * sections blocking the current preemptible-RCU expedited grace period.
471 * If there is no preemptible-RCU expedited grace period currently in
472 * progress, returns zero unconditionally.
473 */
474static int rcu_preempted_readers_exp(void)
475{
476 return rcu_preempt_ctrlblk.exp_tasks != NULL;
477}
478
479/*
480 * Report the exit from RCU read-side critical section for the last task
481 * that queued itself during or before the current expedited preemptible-RCU
482 * grace period.
483 */
484static void rcu_report_exp_done(void)
485{
486 wake_up(&sync_rcu_preempt_exp_wq);
487}
488
489/*
490 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
491 * is to rely in the fact that there is but one CPU, and that it is
492 * illegal for a task to invoke synchronize_rcu_expedited() while in a
493 * preemptible-RCU read-side critical section. Therefore, any such
494 * critical sections must correspond to blocked tasks, which must therefore
495 * be on the ->blkd_tasks list. So just record the current head of the
496 * list in the ->exp_tasks pointer, and wait for all tasks including and
497 * after the task pointed to by ->exp_tasks to drain.
498 */
499void synchronize_rcu_expedited(void)
500{
501 unsigned long flags;
502 struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
503 unsigned long snap;
504
505 barrier(); /* ensure prior action seen before grace period. */
506
507 WARN_ON_ONCE(rcu_preempt_running_reader());
508
509 /*
510 * Acquire lock so that there is only one preemptible RCU grace
511 * period in flight. Of course, if someone does the expedited
512 * grace period for us while we are acquiring the lock, just leave.
513 */
514 snap = sync_rcu_preempt_exp_count + 1;
515 mutex_lock(&sync_rcu_preempt_exp_mutex);
516 if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
517 goto unlock_mb_ret; /* Others did our work for us. */
518
519 local_irq_save(flags);
520
521 /*
522 * All RCU readers have to already be on blkd_tasks because
523 * we cannot legally be executing in an RCU read-side critical
524 * section.
525 */
526
527 /* Snapshot current head of ->blkd_tasks list. */
528 rpcp->exp_tasks = rpcp->blkd_tasks.next;
529 if (rpcp->exp_tasks == &rpcp->blkd_tasks)
530 rpcp->exp_tasks = NULL;
531 local_irq_restore(flags);
532
533 /* Wait for tail of ->blkd_tasks list to drain. */
534 if (rcu_preempted_readers_exp())
535 wait_event(sync_rcu_preempt_exp_wq,
536 !rcu_preempted_readers_exp());
537
538 /* Clean up and exit. */
539 barrier(); /* ensure expedited GP seen before counter increment. */
540 sync_rcu_preempt_exp_count++;
541unlock_mb_ret:
542 mutex_unlock(&sync_rcu_preempt_exp_mutex);
543 barrier(); /* ensure subsequent action seen after grace period. */
544}
545EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
546
547/*
548 * Does preemptible RCU need the CPU to stay out of dynticks mode?
549 */
550int rcu_preempt_needs_cpu(void)
551{
552 if (!rcu_preempt_running_reader())
553 rcu_preempt_cpu_qs();
554 return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
555}
556
557/*
558 * Check for a task exiting while in a preemptible -RCU read-side
559 * critical section, clean up if so. No need to issue warnings,
560 * as debug_check_no_locks_held() already does this if lockdep
561 * is enabled.
562 */
563void exit_rcu(void)
564{
565 struct task_struct *t = current;
566
567 if (t->rcu_read_lock_nesting == 0)
568 return;
569 t->rcu_read_lock_nesting = 1;
570 rcu_read_unlock();
571}
572
573#else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
574
575/*
576 * Because preemptible RCU does not exist, it never has any callbacks
577 * to check.
578 */
579static void rcu_preempt_check_callbacks(void)
580{
581}
582
583/*
584 * Because preemptible RCU does not exist, it never has any callbacks
585 * to remove.
586 */
587static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
588{
589}
590
591/*
592 * Because preemptible RCU does not exist, it never has any callbacks
593 * to process.
594 */
595static void rcu_preempt_process_callbacks(void)
596{
597}
598
599#endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
600
25#ifdef CONFIG_DEBUG_LOCK_ALLOC 601#ifdef CONFIG_DEBUG_LOCK_ALLOC
26 602
27#include <linux/kernel_stat.h> 603#include <linux/kernel_stat.h>
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 2e2726d790b9..9d8e8fb2515f 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -120,7 +120,7 @@ struct rcu_torture {
120}; 120};
121 121
122static LIST_HEAD(rcu_torture_freelist); 122static LIST_HEAD(rcu_torture_freelist);
123static struct rcu_torture *rcu_torture_current; 123static struct rcu_torture __rcu *rcu_torture_current;
124static long rcu_torture_current_version; 124static long rcu_torture_current_version;
125static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 125static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
126static DEFINE_SPINLOCK(rcu_torture_lock); 126static DEFINE_SPINLOCK(rcu_torture_lock);
@@ -153,8 +153,10 @@ int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
153#define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */ 153#define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */
154#define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */ 154#define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */
155static int fullstop = FULLSTOP_RMMOD; 155static int fullstop = FULLSTOP_RMMOD;
156DEFINE_MUTEX(fullstop_mutex); /* Protect fullstop transitions and spawning */ 156/*
157 /* of kthreads. */ 157 * Protect fullstop transitions and spawning of kthreads.
158 */
159static DEFINE_MUTEX(fullstop_mutex);
158 160
159/* 161/*
160 * Detect and respond to a system shutdown. 162 * Detect and respond to a system shutdown.
@@ -303,6 +305,10 @@ static void rcu_read_delay(struct rcu_random_state *rrsp)
303 mdelay(longdelay_ms); 305 mdelay(longdelay_ms);
304 if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) 306 if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
305 udelay(shortdelay_us); 307 udelay(shortdelay_us);
308#ifdef CONFIG_PREEMPT
309 if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
310 preempt_schedule(); /* No QS if preempt_disable() in effect */
311#endif
306} 312}
307 313
308static void rcu_torture_read_unlock(int idx) __releases(RCU) 314static void rcu_torture_read_unlock(int idx) __releases(RCU)
@@ -536,6 +542,8 @@ static void srcu_read_delay(struct rcu_random_state *rrsp)
536 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick); 542 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
537 if (!delay) 543 if (!delay)
538 schedule_timeout_interruptible(longdelay); 544 schedule_timeout_interruptible(longdelay);
545 else
546 rcu_read_delay(rrsp);
539} 547}
540 548
541static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl) 549static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
@@ -731,7 +739,8 @@ rcu_torture_writer(void *arg)
731 continue; 739 continue;
732 rp->rtort_pipe_count = 0; 740 rp->rtort_pipe_count = 0;
733 udelay(rcu_random(&rand) & 0x3ff); 741 udelay(rcu_random(&rand) & 0x3ff);
734 old_rp = rcu_torture_current; 742 old_rp = rcu_dereference_check(rcu_torture_current,
743 current == writer_task);
735 rp->rtort_mbtest = 1; 744 rp->rtort_mbtest = 1;
736 rcu_assign_pointer(rcu_torture_current, rp); 745 rcu_assign_pointer(rcu_torture_current, rp);
737 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 746 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index d5bc43976c5a..ccdc04c47981 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -143,6 +143,11 @@ module_param(blimit, int, 0);
143module_param(qhimark, int, 0); 143module_param(qhimark, int, 0);
144module_param(qlowmark, int, 0); 144module_param(qlowmark, int, 0);
145 145
146#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
147int rcu_cpu_stall_suppress __read_mostly = RCU_CPU_STALL_SUPPRESS_INIT;
148module_param(rcu_cpu_stall_suppress, int, 0644);
149#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
150
146static void force_quiescent_state(struct rcu_state *rsp, int relaxed); 151static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
147static int rcu_pending(int cpu); 152static int rcu_pending(int cpu);
148 153
@@ -450,7 +455,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
450 455
451#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 456#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
452 457
453int rcu_cpu_stall_panicking __read_mostly; 458int rcu_cpu_stall_suppress __read_mostly;
454 459
455static void record_gp_stall_check_time(struct rcu_state *rsp) 460static void record_gp_stall_check_time(struct rcu_state *rsp)
456{ 461{
@@ -482,8 +487,11 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
482 rcu_print_task_stall(rnp); 487 rcu_print_task_stall(rnp);
483 raw_spin_unlock_irqrestore(&rnp->lock, flags); 488 raw_spin_unlock_irqrestore(&rnp->lock, flags);
484 489
485 /* OK, time to rat on our buddy... */ 490 /*
486 491 * OK, time to rat on our buddy...
492 * See Documentation/RCU/stallwarn.txt for info on how to debug
493 * RCU CPU stall warnings.
494 */
487 printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", 495 printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {",
488 rsp->name); 496 rsp->name);
489 rcu_for_each_leaf_node(rsp, rnp) { 497 rcu_for_each_leaf_node(rsp, rnp) {
@@ -512,6 +520,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
512 unsigned long flags; 520 unsigned long flags;
513 struct rcu_node *rnp = rcu_get_root(rsp); 521 struct rcu_node *rnp = rcu_get_root(rsp);
514 522
523 /*
524 * OK, time to rat on ourselves...
525 * See Documentation/RCU/stallwarn.txt for info on how to debug
526 * RCU CPU stall warnings.
527 */
515 printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n", 528 printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n",
516 rsp->name, smp_processor_id(), jiffies - rsp->gp_start); 529 rsp->name, smp_processor_id(), jiffies - rsp->gp_start);
517 trigger_all_cpu_backtrace(); 530 trigger_all_cpu_backtrace();
@@ -530,11 +543,11 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
530 long delta; 543 long delta;
531 struct rcu_node *rnp; 544 struct rcu_node *rnp;
532 545
533 if (rcu_cpu_stall_panicking) 546 if (rcu_cpu_stall_suppress)
534 return; 547 return;
535 delta = jiffies - rsp->jiffies_stall; 548 delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall);
536 rnp = rdp->mynode; 549 rnp = rdp->mynode;
537 if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { 550 if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && delta >= 0) {
538 551
539 /* We haven't checked in, so go dump stack. */ 552 /* We haven't checked in, so go dump stack. */
540 print_cpu_stall(rsp); 553 print_cpu_stall(rsp);
@@ -548,10 +561,26 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
548 561
549static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) 562static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
550{ 563{
551 rcu_cpu_stall_panicking = 1; 564 rcu_cpu_stall_suppress = 1;
552 return NOTIFY_DONE; 565 return NOTIFY_DONE;
553} 566}
554 567
568/**
569 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
570 *
571 * Set the stall-warning timeout way off into the future, thus preventing
572 * any RCU CPU stall-warning messages from appearing in the current set of
573 * RCU grace periods.
574 *
575 * The caller must disable hard irqs.
576 */
577void rcu_cpu_stall_reset(void)
578{
579 rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2;
580 rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2;
581 rcu_preempt_stall_reset();
582}
583
555static struct notifier_block rcu_panic_block = { 584static struct notifier_block rcu_panic_block = {
556 .notifier_call = rcu_panic, 585 .notifier_call = rcu_panic,
557}; 586};
@@ -571,6 +600,10 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
571{ 600{
572} 601}
573 602
603void rcu_cpu_stall_reset(void)
604{
605}
606
574static void __init check_cpu_stall_init(void) 607static void __init check_cpu_stall_init(void)
575{ 608{
576} 609}
@@ -712,7 +745,7 @@ static void
712rcu_start_gp(struct rcu_state *rsp, unsigned long flags) 745rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
713 __releases(rcu_get_root(rsp)->lock) 746 __releases(rcu_get_root(rsp)->lock)
714{ 747{
715 struct rcu_data *rdp = rsp->rda[smp_processor_id()]; 748 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
716 struct rcu_node *rnp = rcu_get_root(rsp); 749 struct rcu_node *rnp = rcu_get_root(rsp);
717 750
718 if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) { 751 if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) {
@@ -960,7 +993,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
960static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) 993static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
961{ 994{
962 int i; 995 int i;
963 struct rcu_data *rdp = rsp->rda[smp_processor_id()]; 996 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
964 997
965 if (rdp->nxtlist == NULL) 998 if (rdp->nxtlist == NULL)
966 return; /* irqs disabled, so comparison is stable. */ 999 return; /* irqs disabled, so comparison is stable. */
@@ -971,6 +1004,7 @@ static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
971 for (i = 0; i < RCU_NEXT_SIZE; i++) 1004 for (i = 0; i < RCU_NEXT_SIZE; i++)
972 rdp->nxttail[i] = &rdp->nxtlist; 1005 rdp->nxttail[i] = &rdp->nxtlist;
973 rsp->orphan_qlen += rdp->qlen; 1006 rsp->orphan_qlen += rdp->qlen;
1007 rdp->n_cbs_orphaned += rdp->qlen;
974 rdp->qlen = 0; 1008 rdp->qlen = 0;
975 raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 1009 raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
976} 1010}
@@ -984,7 +1018,7 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
984 struct rcu_data *rdp; 1018 struct rcu_data *rdp;
985 1019
986 raw_spin_lock_irqsave(&rsp->onofflock, flags); 1020 raw_spin_lock_irqsave(&rsp->onofflock, flags);
987 rdp = rsp->rda[smp_processor_id()]; 1021 rdp = this_cpu_ptr(rsp->rda);
988 if (rsp->orphan_cbs_list == NULL) { 1022 if (rsp->orphan_cbs_list == NULL) {
989 raw_spin_unlock_irqrestore(&rsp->onofflock, flags); 1023 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
990 return; 1024 return;
@@ -992,6 +1026,7 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
992 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list; 1026 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
993 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail; 1027 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail;
994 rdp->qlen += rsp->orphan_qlen; 1028 rdp->qlen += rsp->orphan_qlen;
1029 rdp->n_cbs_adopted += rsp->orphan_qlen;
995 rsp->orphan_cbs_list = NULL; 1030 rsp->orphan_cbs_list = NULL;
996 rsp->orphan_cbs_tail = &rsp->orphan_cbs_list; 1031 rsp->orphan_cbs_tail = &rsp->orphan_cbs_list;
997 rsp->orphan_qlen = 0; 1032 rsp->orphan_qlen = 0;
@@ -1007,7 +1042,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
1007 unsigned long flags; 1042 unsigned long flags;
1008 unsigned long mask; 1043 unsigned long mask;
1009 int need_report = 0; 1044 int need_report = 0;
1010 struct rcu_data *rdp = rsp->rda[cpu]; 1045 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1011 struct rcu_node *rnp; 1046 struct rcu_node *rnp;
1012 1047
1013 /* Exclude any attempts to start a new grace period. */ 1048 /* Exclude any attempts to start a new grace period. */
@@ -1123,6 +1158,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1123 1158
1124 /* Update count, and requeue any remaining callbacks. */ 1159 /* Update count, and requeue any remaining callbacks. */
1125 rdp->qlen -= count; 1160 rdp->qlen -= count;
1161 rdp->n_cbs_invoked += count;
1126 if (list != NULL) { 1162 if (list != NULL) {
1127 *tail = rdp->nxtlist; 1163 *tail = rdp->nxtlist;
1128 rdp->nxtlist = list; 1164 rdp->nxtlist = list;
@@ -1226,7 +1262,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
1226 cpu = rnp->grplo; 1262 cpu = rnp->grplo;
1227 bit = 1; 1263 bit = 1;
1228 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { 1264 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
1229 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) 1265 if ((rnp->qsmask & bit) != 0 &&
1266 f(per_cpu_ptr(rsp->rda, cpu)))
1230 mask |= bit; 1267 mask |= bit;
1231 } 1268 }
1232 if (mask != 0) { 1269 if (mask != 0) {
@@ -1402,7 +1439,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1402 * a quiescent state betweentimes. 1439 * a quiescent state betweentimes.
1403 */ 1440 */
1404 local_irq_save(flags); 1441 local_irq_save(flags);
1405 rdp = rsp->rda[smp_processor_id()]; 1442 rdp = this_cpu_ptr(rsp->rda);
1406 rcu_process_gp_end(rsp, rdp); 1443 rcu_process_gp_end(rsp, rdp);
1407 check_for_new_grace_period(rsp, rdp); 1444 check_for_new_grace_period(rsp, rdp);
1408 1445
@@ -1701,7 +1738,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
1701{ 1738{
1702 unsigned long flags; 1739 unsigned long flags;
1703 int i; 1740 int i;
1704 struct rcu_data *rdp = rsp->rda[cpu]; 1741 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1705 struct rcu_node *rnp = rcu_get_root(rsp); 1742 struct rcu_node *rnp = rcu_get_root(rsp);
1706 1743
1707 /* Set up local state, ensuring consistent view of global state. */ 1744 /* Set up local state, ensuring consistent view of global state. */
@@ -1729,7 +1766,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1729{ 1766{
1730 unsigned long flags; 1767 unsigned long flags;
1731 unsigned long mask; 1768 unsigned long mask;
1732 struct rcu_data *rdp = rsp->rda[cpu]; 1769 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1733 struct rcu_node *rnp = rcu_get_root(rsp); 1770 struct rcu_node *rnp = rcu_get_root(rsp);
1734 1771
1735 /* Set up local state, ensuring consistent view of global state. */ 1772 /* Set up local state, ensuring consistent view of global state. */
@@ -1865,7 +1902,8 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
1865/* 1902/*
1866 * Helper function for rcu_init() that initializes one rcu_state structure. 1903 * Helper function for rcu_init() that initializes one rcu_state structure.
1867 */ 1904 */
1868static void __init rcu_init_one(struct rcu_state *rsp) 1905static void __init rcu_init_one(struct rcu_state *rsp,
1906 struct rcu_data __percpu *rda)
1869{ 1907{
1870 static char *buf[] = { "rcu_node_level_0", 1908 static char *buf[] = { "rcu_node_level_0",
1871 "rcu_node_level_1", 1909 "rcu_node_level_1",
@@ -1918,37 +1956,23 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1918 } 1956 }
1919 } 1957 }
1920 1958
1959 rsp->rda = rda;
1921 rnp = rsp->level[NUM_RCU_LVLS - 1]; 1960 rnp = rsp->level[NUM_RCU_LVLS - 1];
1922 for_each_possible_cpu(i) { 1961 for_each_possible_cpu(i) {
1923 while (i > rnp->grphi) 1962 while (i > rnp->grphi)
1924 rnp++; 1963 rnp++;
1925 rsp->rda[i]->mynode = rnp; 1964 per_cpu_ptr(rsp->rda, i)->mynode = rnp;
1926 rcu_boot_init_percpu_data(i, rsp); 1965 rcu_boot_init_percpu_data(i, rsp);
1927 } 1966 }
1928} 1967}
1929 1968
1930/*
1931 * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used
1932 * nowhere else! Assigns leaf node pointers into each CPU's rcu_data
1933 * structure.
1934 */
1935#define RCU_INIT_FLAVOR(rsp, rcu_data) \
1936do { \
1937 int i; \
1938 \
1939 for_each_possible_cpu(i) { \
1940 (rsp)->rda[i] = &per_cpu(rcu_data, i); \
1941 } \
1942 rcu_init_one(rsp); \
1943} while (0)
1944
1945void __init rcu_init(void) 1969void __init rcu_init(void)
1946{ 1970{
1947 int cpu; 1971 int cpu;
1948 1972
1949 rcu_bootup_announce(); 1973 rcu_bootup_announce();
1950 RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); 1974 rcu_init_one(&rcu_sched_state, &rcu_sched_data);
1951 RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); 1975 rcu_init_one(&rcu_bh_state, &rcu_bh_data);
1952 __rcu_init_preempt(); 1976 __rcu_init_preempt();
1953 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1977 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1954 1978
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 14c040b18ed0..91d4170c5c13 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -202,6 +202,9 @@ struct rcu_data {
202 long qlen; /* # of queued callbacks */ 202 long qlen; /* # of queued callbacks */
203 long qlen_last_fqs_check; 203 long qlen_last_fqs_check;
204 /* qlen at last check for QS forcing */ 204 /* qlen at last check for QS forcing */
205 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
206 unsigned long n_cbs_orphaned; /* RCU cbs sent to orphanage. */
207 unsigned long n_cbs_adopted; /* RCU cbs adopted from orphanage. */
205 unsigned long n_force_qs_snap; 208 unsigned long n_force_qs_snap;
206 /* did other CPU force QS recently? */ 209 /* did other CPU force QS recently? */
207 long blimit; /* Upper limit on a processed batch */ 210 long blimit; /* Upper limit on a processed batch */
@@ -254,19 +257,23 @@ struct rcu_data {
254#define RCU_STALL_DELAY_DELTA 0 257#define RCU_STALL_DELAY_DELTA 0
255#endif 258#endif
256 259
257#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ + RCU_STALL_DELAY_DELTA) 260#define RCU_SECONDS_TILL_STALL_CHECK (CONFIG_RCU_CPU_STALL_TIMEOUT * HZ + \
261 RCU_STALL_DELAY_DELTA)
258 /* for rsp->jiffies_stall */ 262 /* for rsp->jiffies_stall */
259#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ + RCU_STALL_DELAY_DELTA) 263#define RCU_SECONDS_TILL_STALL_RECHECK (3 * RCU_SECONDS_TILL_STALL_CHECK + 30)
260 /* for rsp->jiffies_stall */ 264 /* for rsp->jiffies_stall */
261#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ 265#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
262 /* to take at least one */ 266 /* to take at least one */
263 /* scheduling clock irq */ 267 /* scheduling clock irq */
264 /* before ratting on them. */ 268 /* before ratting on them. */
265 269
266#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 270#ifdef CONFIG_RCU_CPU_STALL_DETECTOR_RUNNABLE
271#define RCU_CPU_STALL_SUPPRESS_INIT 0
272#else
273#define RCU_CPU_STALL_SUPPRESS_INIT 1
274#endif
267 275
268#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) 276#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
269#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
270 277
271/* 278/*
272 * RCU global state, including node hierarchy. This hierarchy is 279 * RCU global state, including node hierarchy. This hierarchy is
@@ -283,7 +290,7 @@ struct rcu_state {
283 struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ 290 struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
284 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ 291 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
285 u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ 292 u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
286 struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */ 293 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
287 294
288 /* The following fields are guarded by the root rcu_node's lock. */ 295 /* The following fields are guarded by the root rcu_node's lock. */
289 296
@@ -365,6 +372,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
365#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 372#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
366static void rcu_print_detail_task_stall(struct rcu_state *rsp); 373static void rcu_print_detail_task_stall(struct rcu_state *rsp);
367static void rcu_print_task_stall(struct rcu_node *rnp); 374static void rcu_print_task_stall(struct rcu_node *rnp);
375static void rcu_preempt_stall_reset(void);
368#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 376#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
369static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 377static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
370#ifdef CONFIG_HOTPLUG_CPU 378#ifdef CONFIG_HOTPLUG_CPU
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 0e4f420245d9..71a4147473f9 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -57,7 +57,7 @@ static void __init rcu_bootup_announce_oddness(void)
57 printk(KERN_INFO 57 printk(KERN_INFO
58 "\tRCU-based detection of stalled CPUs is disabled.\n"); 58 "\tRCU-based detection of stalled CPUs is disabled.\n");
59#endif 59#endif
60#ifndef CONFIG_RCU_CPU_STALL_VERBOSE 60#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
61 printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); 61 printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
62#endif 62#endif
63#if NUM_RCU_LVL_4 != 0 63#if NUM_RCU_LVL_4 != 0
@@ -154,7 +154,7 @@ static void rcu_preempt_note_context_switch(int cpu)
154 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { 154 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
155 155
156 /* Possibly blocking in an RCU read-side critical section. */ 156 /* Possibly blocking in an RCU read-side critical section. */
157 rdp = rcu_preempt_state.rda[cpu]; 157 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
158 rnp = rdp->mynode; 158 rnp = rdp->mynode;
159 raw_spin_lock_irqsave(&rnp->lock, flags); 159 raw_spin_lock_irqsave(&rnp->lock, flags);
160 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; 160 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
@@ -201,7 +201,7 @@ static void rcu_preempt_note_context_switch(int cpu)
201 */ 201 */
202void __rcu_read_lock(void) 202void __rcu_read_lock(void)
203{ 203{
204 ACCESS_ONCE(current->rcu_read_lock_nesting)++; 204 current->rcu_read_lock_nesting++;
205 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ 205 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
206} 206}
207EXPORT_SYMBOL_GPL(__rcu_read_lock); 207EXPORT_SYMBOL_GPL(__rcu_read_lock);
@@ -344,7 +344,9 @@ void __rcu_read_unlock(void)
344 struct task_struct *t = current; 344 struct task_struct *t = current;
345 345
346 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ 346 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
347 if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 && 347 --t->rcu_read_lock_nesting;
348 barrier(); /* decrement before load of ->rcu_read_unlock_special */
349 if (t->rcu_read_lock_nesting == 0 &&
348 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) 350 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
349 rcu_read_unlock_special(t); 351 rcu_read_unlock_special(t);
350#ifdef CONFIG_PROVE_LOCKING 352#ifdef CONFIG_PROVE_LOCKING
@@ -417,6 +419,16 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
417 } 419 }
418} 420}
419 421
422/*
423 * Suppress preemptible RCU's CPU stall warnings by pushing the
424 * time of the next stall-warning message comfortably far into the
425 * future.
426 */
427static void rcu_preempt_stall_reset(void)
428{
429 rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
430}
431
420#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 432#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
421 433
422/* 434/*
@@ -546,9 +558,11 @@ EXPORT_SYMBOL_GPL(call_rcu);
546 * 558 *
547 * Control will return to the caller some time after a full grace 559 * Control will return to the caller some time after a full grace
548 * period has elapsed, in other words after all currently executing RCU 560 * period has elapsed, in other words after all currently executing RCU
549 * read-side critical sections have completed. RCU read-side critical 561 * read-side critical sections have completed. Note, however, that
550 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), 562 * upon return from synchronize_rcu(), the caller might well be executing
551 * and may be nested. 563 * concurrently with new RCU read-side critical sections that began while
564 * synchronize_rcu() was waiting. RCU read-side critical sections are
565 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
552 */ 566 */
553void synchronize_rcu(void) 567void synchronize_rcu(void)
554{ 568{
@@ -771,7 +785,7 @@ static void rcu_preempt_send_cbs_to_orphanage(void)
771 */ 785 */
772static void __init __rcu_init_preempt(void) 786static void __init __rcu_init_preempt(void)
773{ 787{
774 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); 788 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
775} 789}
776 790
777/* 791/*
@@ -865,6 +879,14 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
865{ 879{
866} 880}
867 881
882/*
883 * Because preemptible RCU does not exist, there is no need to suppress
884 * its CPU stall warnings.
885 */
886static void rcu_preempt_stall_reset(void)
887{
888}
889
868#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 890#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
869 891
870/* 892/*
@@ -919,15 +941,6 @@ static void rcu_preempt_process_callbacks(void)
919} 941}
920 942
921/* 943/*
922 * In classic RCU, call_rcu() is just call_rcu_sched().
923 */
924void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
925{
926 call_rcu_sched(head, func);
927}
928EXPORT_SYMBOL_GPL(call_rcu);
929
930/*
931 * Wait for an rcu-preempt grace period, but make it happen quickly. 944 * Wait for an rcu-preempt grace period, but make it happen quickly.
932 * But because preemptable RCU does not exist, map to rcu-sched. 945 * But because preemptable RCU does not exist, map to rcu-sched.
933 */ 946 */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 36c95b45738e..d15430b9d122 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -64,7 +64,9 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
64 rdp->dynticks_fqs); 64 rdp->dynticks_fqs);
65#endif /* #ifdef CONFIG_NO_HZ */ 65#endif /* #ifdef CONFIG_NO_HZ */
66 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); 66 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
67 seq_printf(m, " ql=%ld b=%ld\n", rdp->qlen, rdp->blimit); 67 seq_printf(m, " ql=%ld b=%ld", rdp->qlen, rdp->blimit);
68 seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
69 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
68} 70}
69 71
70#define PRINT_RCU_DATA(name, func, m) \ 72#define PRINT_RCU_DATA(name, func, m) \
@@ -119,7 +121,9 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
119 rdp->dynticks_fqs); 121 rdp->dynticks_fqs);
120#endif /* #ifdef CONFIG_NO_HZ */ 122#endif /* #ifdef CONFIG_NO_HZ */
121 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); 123 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
122 seq_printf(m, ",%ld,%ld\n", rdp->qlen, rdp->blimit); 124 seq_printf(m, ",%ld,%ld", rdp->qlen, rdp->blimit);
125 seq_printf(m, ",%lu,%lu,%lu\n",
126 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
123} 127}
124 128
125static int show_rcudata_csv(struct seq_file *m, void *unused) 129static int show_rcudata_csv(struct seq_file *m, void *unused)
@@ -128,7 +132,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
128#ifdef CONFIG_NO_HZ 132#ifdef CONFIG_NO_HZ
129 seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); 133 seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
130#endif /* #ifdef CONFIG_NO_HZ */ 134#endif /* #ifdef CONFIG_NO_HZ */
131 seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n"); 135 seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n");
132#ifdef CONFIG_TREE_PREEMPT_RCU 136#ifdef CONFIG_TREE_PREEMPT_RCU
133 seq_puts(m, "\"rcu_preempt:\"\n"); 137 seq_puts(m, "\"rcu_preempt:\"\n");
134 PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); 138 PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
@@ -262,7 +266,7 @@ static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp)
262 struct rcu_data *rdp; 266 struct rcu_data *rdp;
263 267
264 for_each_possible_cpu(cpu) { 268 for_each_possible_cpu(cpu) {
265 rdp = rsp->rda[cpu]; 269 rdp = per_cpu_ptr(rsp->rda, cpu);
266 if (rdp->beenonline) 270 if (rdp->beenonline)
267 print_one_rcu_pending(m, rdp); 271 print_one_rcu_pending(m, rdp);
268 } 272 }
diff --git a/kernel/sched.c b/kernel/sched.c
index c0d2067f3e0d..5a5cc33e4999 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4645,7 +4645,7 @@ recheck:
4645 } 4645 }
4646 4646
4647 if (user) { 4647 if (user) {
4648 retval = security_task_setscheduler(p, policy, param); 4648 retval = security_task_setscheduler(p);
4649 if (retval) 4649 if (retval)
4650 return retval; 4650 return retval;
4651 } 4651 }
@@ -4887,7 +4887,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4887 if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) 4887 if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
4888 goto out_unlock; 4888 goto out_unlock;
4889 4889
4890 retval = security_task_setscheduler(p, 0, NULL); 4890 retval = security_task_setscheduler(p);
4891 if (retval) 4891 if (retval)
4892 goto out_unlock; 4892 goto out_unlock;
4893 4893
@@ -5337,7 +5337,19 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5337 idle->se.exec_start = sched_clock(); 5337 idle->se.exec_start = sched_clock();
5338 5338
5339 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); 5339 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
5340 /*
5341 * We're having a chicken and egg problem, even though we are
5342 * holding rq->lock, the cpu isn't yet set to this cpu so the
5343 * lockdep check in task_group() will fail.
5344 *
5345 * Similar case to sched_fork(). / Alternatively we could
5346 * use task_rq_lock() here and obtain the other rq->lock.
5347 *
5348 * Silence PROVE_RCU
5349 */
5350 rcu_read_lock();
5340 __set_task_cpu(idle, cpu); 5351 __set_task_cpu(idle, cpu);
5352 rcu_read_unlock();
5341 5353
5342 rq->curr = rq->idle = idle; 5354 rq->curr = rq->idle = idle;
5343#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 5355#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index db3f674ca49d..5f996d36ac5d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -3751,8 +3751,11 @@ static void task_fork_fair(struct task_struct *p)
3751 3751
3752 update_rq_clock(rq); 3752 update_rq_clock(rq);
3753 3753
3754 if (unlikely(task_cpu(p) != this_cpu)) 3754 if (unlikely(task_cpu(p) != this_cpu)) {
3755 rcu_read_lock();
3755 __set_task_cpu(p, this_cpu); 3756 __set_task_cpu(p, this_cpu);
3757 rcu_read_unlock();
3758 }
3756 3759
3757 update_curr(cfs_rq); 3760 update_curr(cfs_rq);
3758 3761
diff --git a/kernel/signal.c b/kernel/signal.c
index bded65187780..919562c3d6b7 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2215,6 +2215,14 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2215#ifdef __ARCH_SI_TRAPNO 2215#ifdef __ARCH_SI_TRAPNO
2216 err |= __put_user(from->si_trapno, &to->si_trapno); 2216 err |= __put_user(from->si_trapno, &to->si_trapno);
2217#endif 2217#endif
2218#ifdef BUS_MCEERR_AO
2219 /*
2220 * Other callers might not initialize the si_lsb field,
2221 * so check explicitely for the right codes here.
2222 */
2223 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2224 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2225#endif
2218 break; 2226 break;
2219 case __SI_CHLD: 2227 case __SI_CHLD:
2220 err |= __put_user(from->si_pid, &to->si_pid); 2228 err |= __put_user(from->si_pid, &to->si_pid);
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 2980da3fd509..c71e07500536 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -46,11 +46,9 @@ static int init_srcu_struct_fields(struct srcu_struct *sp)
46int __init_srcu_struct(struct srcu_struct *sp, const char *name, 46int __init_srcu_struct(struct srcu_struct *sp, const char *name,
47 struct lock_class_key *key) 47 struct lock_class_key *key)
48{ 48{
49#ifdef CONFIG_DEBUG_LOCK_ALLOC
50 /* Don't re-initialize a lock while it is held. */ 49 /* Don't re-initialize a lock while it is held. */
51 debug_check_no_locks_freed((void *)sp, sizeof(*sp)); 50 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
52 lockdep_init_map(&sp->dep_map, name, key, 0); 51 lockdep_init_map(&sp->dep_map, name, key, 0);
53#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
54 return init_srcu_struct_fields(sp); 52 return init_srcu_struct_fields(sp);
55} 53}
56EXPORT_SYMBOL_GPL(__init_srcu_struct); 54EXPORT_SYMBOL_GPL(__init_srcu_struct);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f88552c6d227..3a45c224770f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2485,7 +2485,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
2485 kbuf[left] = 0; 2485 kbuf[left] = 0;
2486 } 2486 }
2487 2487
2488 for (; left && vleft--; i++, min++, max++, first=0) { 2488 for (; left && vleft--; i++, first = 0) {
2489 unsigned long val; 2489 unsigned long val;
2490 2490
2491 if (write) { 2491 if (write) {
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index 04cdcf72c827..10b90d8a03c4 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -143,15 +143,6 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
143 if (!table->maxlen) 143 if (!table->maxlen)
144 set_fail(&fail, table, "No maxlen"); 144 set_fail(&fail, table, "No maxlen");
145 } 145 }
146 if ((table->proc_handler == proc_doulongvec_minmax) ||
147 (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) {
148 if (table->maxlen > sizeof (unsigned long)) {
149 if (!table->extra1)
150 set_fail(&fail, table, "No min");
151 if (!table->extra2)
152 set_fail(&fail, table, "No max");
153 }
154 }
155#ifdef CONFIG_PROC_SYSCTL 146#ifdef CONFIG_PROC_SYSCTL
156 if (table->procname && !table->proc_handler) 147 if (table->procname && !table->proc_handler)
157 set_fail(&fail, table, "No proc_handler"); 148 set_fail(&fail, table, "No proc_handler");
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 4e2f03410377..c5a632a669e1 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -405,7 +405,7 @@ static inline int test_time_stamp(u64 delta)
405#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 405#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
406 406
407/* Max number of timestamps that can fit on a page */ 407/* Max number of timestamps that can fit on a page */
408#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP) 408#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND)
409 409
410int ring_buffer_print_page_header(struct trace_seq *s) 410int ring_buffer_print_page_header(struct trace_seq *s)
411{ 411{
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index dc8e16824b51..bafba687a6d8 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -196,7 +196,7 @@ static struct perf_event_attr wd_hw_attr = {
196}; 196};
197 197
198/* Callback function for perf event subsystem */ 198/* Callback function for perf event subsystem */
199void watchdog_overflow_callback(struct perf_event *event, int nmi, 199static void watchdog_overflow_callback(struct perf_event *event, int nmi,
200 struct perf_sample_data *data, 200 struct perf_sample_data *data,
201 struct pt_regs *regs) 201 struct pt_regs *regs)
202{ 202{
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e85d549b6eac..21ac83070a80 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -540,6 +540,23 @@ config PROVE_RCU_REPEATEDLY
540 disabling, allowing multiple RCU-lockdep warnings to be printed 540 disabling, allowing multiple RCU-lockdep warnings to be printed
541 on a single reboot. 541 on a single reboot.
542 542
543 Say Y to allow multiple RCU-lockdep warnings per boot.
544
545 Say N if you are unsure.
546
547config SPARSE_RCU_POINTER
548 bool "RCU debugging: sparse-based checks for pointer usage"
549 default n
550 help
551 This feature enables the __rcu sparse annotation for
552 RCU-protected pointers. This annotation will cause sparse
553 to flag any non-RCU used of annotated pointers. This can be
554 helpful when debugging RCU usage. Please note that this feature
555 is not intended to enforce code cleanliness; it is instead merely
556 a debugging aid.
557
558 Say Y to make sparse flag questionable use of RCU-protected pointers
559
543 Say N if you are unsure. 560 Say N if you are unsure.
544 561
545config LOCKDEP 562config LOCKDEP
@@ -832,6 +849,30 @@ config RCU_CPU_STALL_DETECTOR
832 849
833 Say Y if you are unsure. 850 Say Y if you are unsure.
834 851
852config RCU_CPU_STALL_TIMEOUT
853 int "RCU CPU stall timeout in seconds"
854 depends on RCU_CPU_STALL_DETECTOR
855 range 3 300
856 default 60
857 help
858 If a given RCU grace period extends more than the specified
859 number of seconds, a CPU stall warning is printed. If the
860 RCU grace period persists, additional CPU stall warnings are
861 printed at more widely spaced intervals.
862
863config RCU_CPU_STALL_DETECTOR_RUNNABLE
864 bool "RCU CPU stall checking starts automatically at boot"
865 depends on RCU_CPU_STALL_DETECTOR
866 default y
867 help
868 If set, start checking for RCU CPU stalls immediately on
869 boot. Otherwise, RCU CPU stall checking must be manually
870 enabled.
871
872 Say Y if you are unsure.
873
874 Say N if you wish to suppress RCU CPU stall checking during boot.
875
835config RCU_CPU_STALL_VERBOSE 876config RCU_CPU_STALL_VERBOSE
836 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" 877 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
837 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU 878 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index efd16fa80b1c..6f412ab4c24f 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -49,7 +49,7 @@ struct radix_tree_node {
49 unsigned int height; /* Height from the bottom */ 49 unsigned int height; /* Height from the bottom */
50 unsigned int count; 50 unsigned int count;
51 struct rcu_head rcu_head; 51 struct rcu_head rcu_head;
52 void *slots[RADIX_TREE_MAP_SIZE]; 52 void __rcu *slots[RADIX_TREE_MAP_SIZE];
53 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; 53 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
54}; 54};
55 55
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3eed583895a6..9be3cf8a5da4 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3587,9 +3587,13 @@ unlock:
3587 3587
3588static void mem_cgroup_threshold(struct mem_cgroup *memcg) 3588static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3589{ 3589{
3590 __mem_cgroup_threshold(memcg, false); 3590 while (memcg) {
3591 if (do_swap_account) 3591 __mem_cgroup_threshold(memcg, false);
3592 __mem_cgroup_threshold(memcg, true); 3592 if (do_swap_account)
3593 __mem_cgroup_threshold(memcg, true);
3594
3595 memcg = parent_mem_cgroup(memcg);
3596 }
3593} 3597}
3594 3598
3595static int compare_thresholds(const void *a, const void *b) 3599static int compare_thresholds(const void *a, const void *b)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 9c26eeca1342..757f6b0accfe 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -183,7 +183,7 @@ EXPORT_SYMBOL_GPL(hwpoison_filter);
183 * signal. 183 * signal.
184 */ 184 */
185static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, 185static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
186 unsigned long pfn) 186 unsigned long pfn, struct page *page)
187{ 187{
188 struct siginfo si; 188 struct siginfo si;
189 int ret; 189 int ret;
@@ -198,7 +198,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
198#ifdef __ARCH_SI_TRAPNO 198#ifdef __ARCH_SI_TRAPNO
199 si.si_trapno = trapno; 199 si.si_trapno = trapno;
200#endif 200#endif
201 si.si_addr_lsb = PAGE_SHIFT; 201 si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
202 /* 202 /*
203 * Don't use force here, it's convenient if the signal 203 * Don't use force here, it's convenient if the signal
204 * can be temporarily blocked. 204 * can be temporarily blocked.
@@ -235,7 +235,7 @@ void shake_page(struct page *p, int access)
235 int nr; 235 int nr;
236 do { 236 do {
237 nr = shrink_slab(1000, GFP_KERNEL, 1000); 237 nr = shrink_slab(1000, GFP_KERNEL, 1000);
238 if (page_count(p) == 0) 238 if (page_count(p) == 1)
239 break; 239 break;
240 } while (nr > 10); 240 } while (nr > 10);
241 } 241 }
@@ -327,7 +327,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
327 * wrong earlier. 327 * wrong earlier.
328 */ 328 */
329static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno, 329static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
330 int fail, unsigned long pfn) 330 int fail, struct page *page, unsigned long pfn)
331{ 331{
332 struct to_kill *tk, *next; 332 struct to_kill *tk, *next;
333 333
@@ -352,7 +352,7 @@ static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
352 * process anyways. 352 * process anyways.
353 */ 353 */
354 else if (kill_proc_ao(tk->tsk, tk->addr, trapno, 354 else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
355 pfn) < 0) 355 pfn, page) < 0)
356 printk(KERN_ERR 356 printk(KERN_ERR
357 "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", 357 "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
358 pfn, tk->tsk->comm, tk->tsk->pid); 358 pfn, tk->tsk->comm, tk->tsk->pid);
@@ -928,7 +928,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
928 * any accesses to the poisoned memory. 928 * any accesses to the poisoned memory.
929 */ 929 */
930 kill_procs_ao(&tokill, !!PageDirty(hpage), trapno, 930 kill_procs_ao(&tokill, !!PageDirty(hpage), trapno,
931 ret != SWAP_SUCCESS, pfn); 931 ret != SWAP_SUCCESS, p, pfn);
932 932
933 return ret; 933 return ret;
934} 934}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a8cfa9cc6e86..f12ad1836abe 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5182,9 +5182,9 @@ void *__init alloc_large_system_hash(const char *tablename,
5182 if (!table) 5182 if (!table)
5183 panic("Failed to allocate %s hash table\n", tablename); 5183 panic("Failed to allocate %s hash table\n", tablename);
5184 5184
5185 printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n", 5185 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
5186 tablename, 5186 tablename,
5187 (1U << log2qty), 5187 (1UL << log2qty),
5188 ilog2(size) - PAGE_SHIFT, 5188 ilog2(size) - PAGE_SHIFT,
5189 size); 5189 size);
5190 5190
diff --git a/net/Kconfig b/net/Kconfig
index e926884c1675..55fd82e9ffd9 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -293,6 +293,7 @@ source "net/wimax/Kconfig"
293source "net/rfkill/Kconfig" 293source "net/rfkill/Kconfig"
294source "net/9p/Kconfig" 294source "net/9p/Kconfig"
295source "net/caif/Kconfig" 295source "net/caif/Kconfig"
296source "net/ceph/Kconfig"
296 297
297 298
298endif # if NET 299endif # if NET
diff --git a/net/Makefile b/net/Makefile
index ea60fbce9b1b..6b7bfd7f1416 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -68,3 +68,4 @@ obj-$(CONFIG_SYSCTL) += sysctl_net.o
68endif 68endif
69obj-$(CONFIG_WIMAX) += wimax/ 69obj-$(CONFIG_WIMAX) += wimax/
70obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/ 70obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/
71obj-$(CONFIG_CEPH_LIB) += ceph/
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 622b471e14e0..74bcc662c3dd 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -778,7 +778,7 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
778 eg->packets_rcvd++; 778 eg->packets_rcvd++;
779 mpc->eg_ops->put(eg); 779 mpc->eg_ops->put(eg);
780 780
781 memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); 781 memset(ATM_SKB(new_skb), 0, sizeof(struct atm_skb_data));
782 netif_rx(new_skb); 782 netif_rx(new_skb);
783} 783}
784 784
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index fadf26b4ed7c..0b54b7dd8401 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1441,33 +1441,23 @@ static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1441 1441
1442static void l2cap_streaming_send(struct sock *sk) 1442static void l2cap_streaming_send(struct sock *sk)
1443{ 1443{
1444 struct sk_buff *skb, *tx_skb; 1444 struct sk_buff *skb;
1445 struct l2cap_pinfo *pi = l2cap_pi(sk); 1445 struct l2cap_pinfo *pi = l2cap_pi(sk);
1446 u16 control, fcs; 1446 u16 control, fcs;
1447 1447
1448 while ((skb = sk->sk_send_head)) { 1448 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1449 tx_skb = skb_clone(skb, GFP_ATOMIC); 1449 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1450
1451 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1452 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; 1450 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1453 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1451 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1454 1452
1455 if (pi->fcs == L2CAP_FCS_CRC16) { 1453 if (pi->fcs == L2CAP_FCS_CRC16) {
1456 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); 1454 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1457 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); 1455 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1458 } 1456 }
1459 1457
1460 l2cap_do_send(sk, tx_skb); 1458 l2cap_do_send(sk, skb);
1461 1459
1462 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; 1460 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1463
1464 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1465 sk->sk_send_head = NULL;
1466 else
1467 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1468
1469 skb = skb_dequeue(TX_QUEUE(sk));
1470 kfree_skb(skb);
1471 } 1461 }
1472} 1462}
1473 1463
@@ -1960,6 +1950,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
1960 1950
1961 switch (optname) { 1951 switch (optname) {
1962 case L2CAP_OPTIONS: 1952 case L2CAP_OPTIONS:
1953 if (sk->sk_state == BT_CONNECTED) {
1954 err = -EINVAL;
1955 break;
1956 }
1957
1963 opts.imtu = l2cap_pi(sk)->imtu; 1958 opts.imtu = l2cap_pi(sk)->imtu;
1964 opts.omtu = l2cap_pi(sk)->omtu; 1959 opts.omtu = l2cap_pi(sk)->omtu;
1965 opts.flush_to = l2cap_pi(sk)->flush_to; 1960 opts.flush_to = l2cap_pi(sk)->flush_to;
@@ -2771,10 +2766,10 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
2771 case L2CAP_CONF_MTU: 2766 case L2CAP_CONF_MTU:
2772 if (val < L2CAP_DEFAULT_MIN_MTU) { 2767 if (val < L2CAP_DEFAULT_MIN_MTU) {
2773 *result = L2CAP_CONF_UNACCEPT; 2768 *result = L2CAP_CONF_UNACCEPT;
2774 pi->omtu = L2CAP_DEFAULT_MIN_MTU; 2769 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2775 } else 2770 } else
2776 pi->omtu = val; 2771 pi->imtu = val;
2777 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); 2772 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2778 break; 2773 break;
2779 2774
2780 case L2CAP_CONF_FLUSH_TO: 2775 case L2CAP_CONF_FLUSH_TO:
@@ -3071,6 +3066,17 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
3071 return 0; 3066 return 0;
3072} 3067}
3073 3068
3069static inline void set_default_fcs(struct l2cap_pinfo *pi)
3070{
3071 /* FCS is enabled only in ERTM or streaming mode, if one or both
3072 * sides request it.
3073 */
3074 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3075 pi->fcs = L2CAP_FCS_NONE;
3076 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3077 pi->fcs = L2CAP_FCS_CRC16;
3078}
3079
3074static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) 3080static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3075{ 3081{
3076 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; 3082 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
@@ -3088,14 +3094,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3088 if (!sk) 3094 if (!sk)
3089 return -ENOENT; 3095 return -ENOENT;
3090 3096
3091 if (sk->sk_state != BT_CONFIG) { 3097 if (sk->sk_state == BT_DISCONN)
3092 struct l2cap_cmd_rej rej;
3093
3094 rej.reason = cpu_to_le16(0x0002);
3095 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3096 sizeof(rej), &rej);
3097 goto unlock; 3098 goto unlock;
3098 }
3099 3099
3100 /* Reject if config buffer is too small. */ 3100 /* Reject if config buffer is too small. */
3101 len = cmd_len - sizeof(*req); 3101 len = cmd_len - sizeof(*req);
@@ -3135,9 +3135,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3135 goto unlock; 3135 goto unlock;
3136 3136
3137 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { 3137 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3138 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) || 3138 set_default_fcs(l2cap_pi(sk));
3139 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3140 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3141 3139
3142 sk->sk_state = BT_CONNECTED; 3140 sk->sk_state = BT_CONNECTED;
3143 3141
@@ -3225,9 +3223,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3225 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; 3223 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3226 3224
3227 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { 3225 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3228 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) || 3226 set_default_fcs(l2cap_pi(sk));
3229 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3230 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3231 3227
3232 sk->sk_state = BT_CONNECTED; 3228 sk->sk_state = BT_CONNECTED;
3233 l2cap_pi(sk)->next_tx_seq = 0; 3229 l2cap_pi(sk)->next_tx_seq = 0;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 44a623275951..194b3a04cfd3 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -82,11 +82,14 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
82static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) 82static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
83{ 83{
84 struct sock *sk = d->owner, *parent; 84 struct sock *sk = d->owner, *parent;
85 unsigned long flags;
86
85 if (!sk) 87 if (!sk)
86 return; 88 return;
87 89
88 BT_DBG("dlc %p state %ld err %d", d, d->state, err); 90 BT_DBG("dlc %p state %ld err %d", d, d->state, err);
89 91
92 local_irq_save(flags);
90 bh_lock_sock(sk); 93 bh_lock_sock(sk);
91 94
92 if (err) 95 if (err)
@@ -108,6 +111,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
108 } 111 }
109 112
110 bh_unlock_sock(sk); 113 bh_unlock_sock(sk);
114 local_irq_restore(flags);
111 115
112 if (parent && sock_flag(sk, SOCK_ZAPPED)) { 116 if (parent && sock_flag(sk, SOCK_ZAPPED)) {
113 /* We have to drop DLC lock here, otherwise 117 /* We have to drop DLC lock here, otherwise
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 8ce904786116..4bf28f25f368 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -827,6 +827,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
827 long timeo; 827 long timeo;
828 int err; 828 int err;
829 int ifindex, headroom, tailroom; 829 int ifindex, headroom, tailroom;
830 unsigned int mtu;
830 struct net_device *dev; 831 struct net_device *dev;
831 832
832 lock_sock(sk); 833 lock_sock(sk);
@@ -896,15 +897,23 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
896 cf_sk->sk.sk_state = CAIF_DISCONNECTED; 897 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
897 goto out; 898 goto out;
898 } 899 }
899 dev = dev_get_by_index(sock_net(sk), ifindex); 900
901 err = -ENODEV;
902 rcu_read_lock();
903 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
904 if (!dev) {
905 rcu_read_unlock();
906 goto out;
907 }
900 cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom); 908 cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom);
909 mtu = dev->mtu;
910 rcu_read_unlock();
911
901 cf_sk->tailroom = tailroom; 912 cf_sk->tailroom = tailroom;
902 cf_sk->maxframe = dev->mtu - (headroom + tailroom); 913 cf_sk->maxframe = mtu - (headroom + tailroom);
903 dev_put(dev);
904 if (cf_sk->maxframe < 1) { 914 if (cf_sk->maxframe < 1) {
905 pr_warning("CAIF: %s(): CAIF Interface MTU too small (%d)\n", 915 pr_warning("CAIF: %s(): CAIF Interface MTU too small (%u)\n",
906 __func__, dev->mtu); 916 __func__, mtu);
907 err = -ENODEV;
908 goto out; 917 goto out;
909 } 918 }
910 919
diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig
new file mode 100644
index 000000000000..ad424049b0cf
--- /dev/null
+++ b/net/ceph/Kconfig
@@ -0,0 +1,28 @@
1config CEPH_LIB
2 tristate "Ceph core library (EXPERIMENTAL)"
3 depends on INET && EXPERIMENTAL
4 select LIBCRC32C
5 select CRYPTO_AES
6 select CRYPTO
7 default n
8 help
9 Choose Y or M here to include cephlib, which provides the
10 common functionality to both the Ceph filesystem and
11 to the rados block device (rbd).
12
13 More information at http://ceph.newdream.net/.
14
15 If unsure, say N.
16
17config CEPH_LIB_PRETTYDEBUG
18 bool "Include file:line in ceph debug output"
19 depends on CEPH_LIB
20 default n
21 help
22 If you say Y here, debug output will include a filename and
23 line to aid debugging. This increases kernel size and slows
24 execution slightly when debug call sites are enabled (e.g.,
25 via CONFIG_DYNAMIC_DEBUG).
26
27 If unsure, say N.
28
diff --git a/net/ceph/Makefile b/net/ceph/Makefile
new file mode 100644
index 000000000000..aab1cabb8035
--- /dev/null
+++ b/net/ceph/Makefile
@@ -0,0 +1,37 @@
1#
2# Makefile for CEPH filesystem.
3#
4
5ifneq ($(KERNELRELEASE),)
6
7obj-$(CONFIG_CEPH_LIB) += libceph.o
8
9libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
10 mon_client.o \
11 osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \
12 debugfs.o \
13 auth.o auth_none.o \
14 crypto.o armor.o \
15 auth_x.o \
16 ceph_fs.o ceph_strings.o ceph_hash.o \
17 pagevec.o
18
19else
20#Otherwise we were called directly from the command
21# line; invoke the kernel build system.
22
23KERNELDIR ?= /lib/modules/$(shell uname -r)/build
24PWD := $(shell pwd)
25
26default: all
27
28all:
29 $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules
30
31modules_install:
32 $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules_install
33
34clean:
35 $(MAKE) -C $(KERNELDIR) M=$(PWD) clean
36
37endif
diff --git a/fs/ceph/armor.c b/net/ceph/armor.c
index eb2a666b0be7..eb2a666b0be7 100644
--- a/fs/ceph/armor.c
+++ b/net/ceph/armor.c
diff --git a/fs/ceph/auth.c b/net/ceph/auth.c
index 6d2e30600627..549c1f43e1d5 100644
--- a/fs/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -1,16 +1,16 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/err.h> 4#include <linux/err.h>
5#include <linux/slab.h> 5#include <linux/slab.h>
6 6
7#include "types.h" 7#include <linux/ceph/types.h>
8#include <linux/ceph/decode.h>
9#include <linux/ceph/libceph.h>
10#include <linux/ceph/messenger.h>
8#include "auth_none.h" 11#include "auth_none.h"
9#include "auth_x.h" 12#include "auth_x.h"
10#include "decode.h"
11#include "super.h"
12 13
13#include "messenger.h"
14 14
15/* 15/*
16 * get protocol handler 16 * get protocol handler
diff --git a/fs/ceph/auth_none.c b/net/ceph/auth_none.c
index ad1dc21286c7..214c2bb43d62 100644
--- a/fs/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -1,14 +1,15 @@
1 1
2#include "ceph_debug.h" 2#include <linux/ceph/ceph_debug.h>
3 3
4#include <linux/err.h> 4#include <linux/err.h>
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/random.h> 6#include <linux/random.h>
7#include <linux/slab.h> 7#include <linux/slab.h>
8 8
9#include <linux/ceph/decode.h>
10#include <linux/ceph/auth.h>
11
9#include "auth_none.h" 12#include "auth_none.h"
10#include "auth.h"
11#include "decode.h"
12 13
13static void reset(struct ceph_auth_client *ac) 14static void reset(struct ceph_auth_client *ac)
14{ 15{
diff --git a/fs/ceph/auth_none.h b/net/ceph/auth_none.h
index 8164df1a08be..ed7d088b1bc9 100644
--- a/fs/ceph/auth_none.h
+++ b/net/ceph/auth_none.h
@@ -2,8 +2,7 @@
2#define _FS_CEPH_AUTH_NONE_H 2#define _FS_CEPH_AUTH_NONE_H
3 3
4#include <linux/slab.h> 4#include <linux/slab.h>
5 5#include <linux/ceph/auth.h>
6#include "auth.h"
7 6
8/* 7/*
9 * null security mode. 8 * null security mode.
diff --git a/fs/ceph/auth_x.c b/net/ceph/auth_x.c
index a2d002cbdec2..7fd5dfcf6e18 100644
--- a/fs/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -1,16 +1,17 @@
1 1
2#include "ceph_debug.h" 2#include <linux/ceph/ceph_debug.h>
3 3
4#include <linux/err.h> 4#include <linux/err.h>
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/random.h> 6#include <linux/random.h>
7#include <linux/slab.h> 7#include <linux/slab.h>
8 8
9#include <linux/ceph/decode.h>
10#include <linux/ceph/auth.h>
11
12#include "crypto.h"
9#include "auth_x.h" 13#include "auth_x.h"
10#include "auth_x_protocol.h" 14#include "auth_x_protocol.h"
11#include "crypto.h"
12#include "auth.h"
13#include "decode.h"
14 15
15#define TEMP_TICKET_BUF_LEN 256 16#define TEMP_TICKET_BUF_LEN 256
16 17
diff --git a/fs/ceph/auth_x.h b/net/ceph/auth_x.h
index ff6f8180e681..e02da7a5c5a1 100644
--- a/fs/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -3,8 +3,9 @@
3 3
4#include <linux/rbtree.h> 4#include <linux/rbtree.h>
5 5
6#include <linux/ceph/auth.h>
7
6#include "crypto.h" 8#include "crypto.h"
7#include "auth.h"
8#include "auth_x_protocol.h" 9#include "auth_x_protocol.h"
9 10
10/* 11/*
diff --git a/fs/ceph/auth_x_protocol.h b/net/ceph/auth_x_protocol.h
index 671d30576c4f..671d30576c4f 100644
--- a/fs/ceph/auth_x_protocol.h
+++ b/net/ceph/auth_x_protocol.h
diff --git a/fs/ceph/buffer.c b/net/ceph/buffer.c
index cd39f17021de..53d8abfa25d5 100644
--- a/fs/ceph/buffer.c
+++ b/net/ceph/buffer.c
@@ -1,10 +1,11 @@
1 1
2#include "ceph_debug.h" 2#include <linux/ceph/ceph_debug.h>
3 3
4#include <linux/module.h>
4#include <linux/slab.h> 5#include <linux/slab.h>
5 6
6#include "buffer.h" 7#include <linux/ceph/buffer.h>
7#include "decode.h" 8#include <linux/ceph/decode.h>
8 9
9struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) 10struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
10{ 11{
@@ -32,6 +33,7 @@ struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
32 dout("buffer_new %p\n", b); 33 dout("buffer_new %p\n", b);
33 return b; 34 return b;
34} 35}
36EXPORT_SYMBOL(ceph_buffer_new);
35 37
36void ceph_buffer_release(struct kref *kref) 38void ceph_buffer_release(struct kref *kref)
37{ 39{
@@ -46,6 +48,7 @@ void ceph_buffer_release(struct kref *kref)
46 } 48 }
47 kfree(b); 49 kfree(b);
48} 50}
51EXPORT_SYMBOL(ceph_buffer_release);
49 52
50int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end) 53int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end)
51{ 54{
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
new file mode 100644
index 000000000000..f3e4a13fea0c
--- /dev/null
+++ b/net/ceph/ceph_common.c
@@ -0,0 +1,529 @@
1
2#include <linux/ceph/ceph_debug.h>
3#include <linux/backing-dev.h>
4#include <linux/ctype.h>
5#include <linux/fs.h>
6#include <linux/inet.h>
7#include <linux/in6.h>
8#include <linux/module.h>
9#include <linux/mount.h>
10#include <linux/parser.h>
11#include <linux/sched.h>
12#include <linux/seq_file.h>
13#include <linux/slab.h>
14#include <linux/statfs.h>
15#include <linux/string.h>
16
17
18#include <linux/ceph/libceph.h>
19#include <linux/ceph/debugfs.h>
20#include <linux/ceph/decode.h>
21#include <linux/ceph/mon_client.h>
22#include <linux/ceph/auth.h>
23
24
25
26/*
27 * find filename portion of a path (/foo/bar/baz -> baz)
28 */
29const char *ceph_file_part(const char *s, int len)
30{
31 const char *e = s + len;
32
33 while (e != s && *(e-1) != '/')
34 e--;
35 return e;
36}
37EXPORT_SYMBOL(ceph_file_part);
38
39const char *ceph_msg_type_name(int type)
40{
41 switch (type) {
42 case CEPH_MSG_SHUTDOWN: return "shutdown";
43 case CEPH_MSG_PING: return "ping";
44 case CEPH_MSG_AUTH: return "auth";
45 case CEPH_MSG_AUTH_REPLY: return "auth_reply";
46 case CEPH_MSG_MON_MAP: return "mon_map";
47 case CEPH_MSG_MON_GET_MAP: return "mon_get_map";
48 case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe";
49 case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack";
50 case CEPH_MSG_STATFS: return "statfs";
51 case CEPH_MSG_STATFS_REPLY: return "statfs_reply";
52 case CEPH_MSG_MDS_MAP: return "mds_map";
53 case CEPH_MSG_CLIENT_SESSION: return "client_session";
54 case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect";
55 case CEPH_MSG_CLIENT_REQUEST: return "client_request";
56 case CEPH_MSG_CLIENT_REQUEST_FORWARD: return "client_request_forward";
57 case CEPH_MSG_CLIENT_REPLY: return "client_reply";
58 case CEPH_MSG_CLIENT_CAPS: return "client_caps";
59 case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release";
60 case CEPH_MSG_CLIENT_SNAP: return "client_snap";
61 case CEPH_MSG_CLIENT_LEASE: return "client_lease";
62 case CEPH_MSG_OSD_MAP: return "osd_map";
63 case CEPH_MSG_OSD_OP: return "osd_op";
64 case CEPH_MSG_OSD_OPREPLY: return "osd_opreply";
65 default: return "unknown";
66 }
67}
68EXPORT_SYMBOL(ceph_msg_type_name);
69
70/*
71 * Initially learn our fsid, or verify an fsid matches.
72 */
73int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
74{
75 if (client->have_fsid) {
76 if (ceph_fsid_compare(&client->fsid, fsid)) {
77 pr_err("bad fsid, had %pU got %pU",
78 &client->fsid, fsid);
79 return -1;
80 }
81 } else {
82 pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);
83 memcpy(&client->fsid, fsid, sizeof(*fsid));
84 ceph_debugfs_client_init(client);
85 client->have_fsid = true;
86 }
87 return 0;
88}
89EXPORT_SYMBOL(ceph_check_fsid);
90
91static int strcmp_null(const char *s1, const char *s2)
92{
93 if (!s1 && !s2)
94 return 0;
95 if (s1 && !s2)
96 return -1;
97 if (!s1 && s2)
98 return 1;
99 return strcmp(s1, s2);
100}
101
102int ceph_compare_options(struct ceph_options *new_opt,
103 struct ceph_client *client)
104{
105 struct ceph_options *opt1 = new_opt;
106 struct ceph_options *opt2 = client->options;
107 int ofs = offsetof(struct ceph_options, mon_addr);
108 int i;
109 int ret;
110
111 ret = memcmp(opt1, opt2, ofs);
112 if (ret)
113 return ret;
114
115 ret = strcmp_null(opt1->name, opt2->name);
116 if (ret)
117 return ret;
118
119 ret = strcmp_null(opt1->secret, opt2->secret);
120 if (ret)
121 return ret;
122
123 /* any matching mon ip implies a match */
124 for (i = 0; i < opt1->num_mon; i++) {
125 if (ceph_monmap_contains(client->monc.monmap,
126 &opt1->mon_addr[i]))
127 return 0;
128 }
129 return -1;
130}
131EXPORT_SYMBOL(ceph_compare_options);
132
133
134static int parse_fsid(const char *str, struct ceph_fsid *fsid)
135{
136 int i = 0;
137 char tmp[3];
138 int err = -EINVAL;
139 int d;
140
141 dout("parse_fsid '%s'\n", str);
142 tmp[2] = 0;
143 while (*str && i < 16) {
144 if (ispunct(*str)) {
145 str++;
146 continue;
147 }
148 if (!isxdigit(str[0]) || !isxdigit(str[1]))
149 break;
150 tmp[0] = str[0];
151 tmp[1] = str[1];
152 if (sscanf(tmp, "%x", &d) < 1)
153 break;
154 fsid->fsid[i] = d & 0xff;
155 i++;
156 str += 2;
157 }
158
159 if (i == 16)
160 err = 0;
161 dout("parse_fsid ret %d got fsid %pU", err, fsid);
162 return err;
163}
164
165/*
166 * ceph options
167 */
168enum {
169 Opt_osdtimeout,
170 Opt_osdkeepalivetimeout,
171 Opt_mount_timeout,
172 Opt_osd_idle_ttl,
173 Opt_last_int,
174 /* int args above */
175 Opt_fsid,
176 Opt_name,
177 Opt_secret,
178 Opt_ip,
179 Opt_last_string,
180 /* string args above */
181 Opt_noshare,
182 Opt_nocrc,
183};
184
185static match_table_t opt_tokens = {
186 {Opt_osdtimeout, "osdtimeout=%d"},
187 {Opt_osdkeepalivetimeout, "osdkeepalive=%d"},
188 {Opt_mount_timeout, "mount_timeout=%d"},
189 {Opt_osd_idle_ttl, "osd_idle_ttl=%d"},
190 /* int args above */
191 {Opt_fsid, "fsid=%s"},
192 {Opt_name, "name=%s"},
193 {Opt_secret, "secret=%s"},
194 {Opt_ip, "ip=%s"},
195 /* string args above */
196 {Opt_noshare, "noshare"},
197 {Opt_nocrc, "nocrc"},
198 {-1, NULL}
199};
200
201void ceph_destroy_options(struct ceph_options *opt)
202{
203 dout("destroy_options %p\n", opt);
204 kfree(opt->name);
205 kfree(opt->secret);
206 kfree(opt);
207}
208EXPORT_SYMBOL(ceph_destroy_options);
209
210int ceph_parse_options(struct ceph_options **popt, char *options,
211 const char *dev_name, const char *dev_name_end,
212 int (*parse_extra_token)(char *c, void *private),
213 void *private)
214{
215 struct ceph_options *opt;
216 const char *c;
217 int err = -ENOMEM;
218 substring_t argstr[MAX_OPT_ARGS];
219
220 opt = kzalloc(sizeof(*opt), GFP_KERNEL);
221 if (!opt)
222 return err;
223 opt->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*opt->mon_addr),
224 GFP_KERNEL);
225 if (!opt->mon_addr)
226 goto out;
227
228 dout("parse_options %p options '%s' dev_name '%s'\n", opt, options,
229 dev_name);
230
231 /* start with defaults */
232 opt->flags = CEPH_OPT_DEFAULT;
233 opt->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT;
234 opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
235 opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
236 opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */
237
238 /* get mon ip(s) */
239 /* ip1[:port1][,ip2[:port2]...] */
240 err = ceph_parse_ips(dev_name, dev_name_end, opt->mon_addr,
241 CEPH_MAX_MON, &opt->num_mon);
242 if (err < 0)
243 goto out;
244
245 /* parse mount options */
246 while ((c = strsep(&options, ",")) != NULL) {
247 int token, intval, ret;
248 if (!*c)
249 continue;
250 err = -EINVAL;
251 token = match_token((char *)c, opt_tokens, argstr);
252 if (token < 0 && parse_extra_token) {
253 /* extra? */
254 err = parse_extra_token((char *)c, private);
255 if (err < 0) {
256 pr_err("bad option at '%s'\n", c);
257 goto out;
258 }
259 continue;
260 }
261 if (token < Opt_last_int) {
262 ret = match_int(&argstr[0], &intval);
263 if (ret < 0) {
264 pr_err("bad mount option arg (not int) "
265 "at '%s'\n", c);
266 continue;
267 }
268 dout("got int token %d val %d\n", token, intval);
269 } else if (token > Opt_last_int && token < Opt_last_string) {
270 dout("got string token %d val %s\n", token,
271 argstr[0].from);
272 } else {
273 dout("got token %d\n", token);
274 }
275 switch (token) {
276 case Opt_ip:
277 err = ceph_parse_ips(argstr[0].from,
278 argstr[0].to,
279 &opt->my_addr,
280 1, NULL);
281 if (err < 0)
282 goto out;
283 opt->flags |= CEPH_OPT_MYIP;
284 break;
285
286 case Opt_fsid:
287 err = parse_fsid(argstr[0].from, &opt->fsid);
288 if (err == 0)
289 opt->flags |= CEPH_OPT_FSID;
290 break;
291 case Opt_name:
292 opt->name = kstrndup(argstr[0].from,
293 argstr[0].to-argstr[0].from,
294 GFP_KERNEL);
295 break;
296 case Opt_secret:
297 opt->secret = kstrndup(argstr[0].from,
298 argstr[0].to-argstr[0].from,
299 GFP_KERNEL);
300 break;
301
302 /* misc */
303 case Opt_osdtimeout:
304 opt->osd_timeout = intval;
305 break;
306 case Opt_osdkeepalivetimeout:
307 opt->osd_keepalive_timeout = intval;
308 break;
309 case Opt_osd_idle_ttl:
310 opt->osd_idle_ttl = intval;
311 break;
312 case Opt_mount_timeout:
313 opt->mount_timeout = intval;
314 break;
315
316 case Opt_noshare:
317 opt->flags |= CEPH_OPT_NOSHARE;
318 break;
319
320 case Opt_nocrc:
321 opt->flags |= CEPH_OPT_NOCRC;
322 break;
323
324 default:
325 BUG_ON(token);
326 }
327 }
328
329 /* success */
330 *popt = opt;
331 return 0;
332
333out:
334 ceph_destroy_options(opt);
335 return err;
336}
337EXPORT_SYMBOL(ceph_parse_options);
338
339u64 ceph_client_id(struct ceph_client *client)
340{
341 return client->monc.auth->global_id;
342}
343EXPORT_SYMBOL(ceph_client_id);
344
345/*
346 * create a fresh client instance
347 */
348struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private)
349{
350 struct ceph_client *client;
351 int err = -ENOMEM;
352
353 client = kzalloc(sizeof(*client), GFP_KERNEL);
354 if (client == NULL)
355 return ERR_PTR(-ENOMEM);
356
357 client->private = private;
358 client->options = opt;
359
360 mutex_init(&client->mount_mutex);
361 init_waitqueue_head(&client->auth_wq);
362 client->auth_err = 0;
363
364 client->extra_mon_dispatch = NULL;
365 client->supported_features = CEPH_FEATURE_SUPPORTED_DEFAULT;
366 client->required_features = CEPH_FEATURE_REQUIRED_DEFAULT;
367
368 client->msgr = NULL;
369
370 /* subsystems */
371 err = ceph_monc_init(&client->monc, client);
372 if (err < 0)
373 goto fail;
374 err = ceph_osdc_init(&client->osdc, client);
375 if (err < 0)
376 goto fail_monc;
377
378 return client;
379
380fail_monc:
381 ceph_monc_stop(&client->monc);
382fail:
383 kfree(client);
384 return ERR_PTR(err);
385}
386EXPORT_SYMBOL(ceph_create_client);
387
388void ceph_destroy_client(struct ceph_client *client)
389{
390 dout("destroy_client %p\n", client);
391
392 /* unmount */
393 ceph_osdc_stop(&client->osdc);
394
395 /*
396 * make sure mds and osd connections close out before destroying
397 * the auth module, which is needed to free those connections'
398 * ceph_authorizers.
399 */
400 ceph_msgr_flush();
401
402 ceph_monc_stop(&client->monc);
403
404 ceph_debugfs_client_cleanup(client);
405
406 if (client->msgr)
407 ceph_messenger_destroy(client->msgr);
408
409 ceph_destroy_options(client->options);
410
411 kfree(client);
412 dout("destroy_client %p done\n", client);
413}
414EXPORT_SYMBOL(ceph_destroy_client);
415
416/*
417 * true if we have the mon map (and have thus joined the cluster)
418 */
419static int have_mon_and_osd_map(struct ceph_client *client)
420{
421 return client->monc.monmap && client->monc.monmap->epoch &&
422 client->osdc.osdmap && client->osdc.osdmap->epoch;
423}
424
425/*
426 * mount: join the ceph cluster, and open root directory.
427 */
428int __ceph_open_session(struct ceph_client *client, unsigned long started)
429{
430 struct ceph_entity_addr *myaddr = NULL;
431 int err;
432 unsigned long timeout = client->options->mount_timeout * HZ;
433
434 /* initialize the messenger */
435 if (client->msgr == NULL) {
436 if (ceph_test_opt(client, MYIP))
437 myaddr = &client->options->my_addr;
438 client->msgr = ceph_messenger_create(myaddr,
439 client->supported_features,
440 client->required_features);
441 if (IS_ERR(client->msgr)) {
442 client->msgr = NULL;
443 return PTR_ERR(client->msgr);
444 }
445 client->msgr->nocrc = ceph_test_opt(client, NOCRC);
446 }
447
448 /* open session, and wait for mon and osd maps */
449 err = ceph_monc_open_session(&client->monc);
450 if (err < 0)
451 return err;
452
453 while (!have_mon_and_osd_map(client)) {
454 err = -EIO;
455 if (timeout && time_after_eq(jiffies, started + timeout))
456 return err;
457
458 /* wait */
459 dout("mount waiting for mon_map\n");
460 err = wait_event_interruptible_timeout(client->auth_wq,
461 have_mon_and_osd_map(client) || (client->auth_err < 0),
462 timeout);
463 if (err == -EINTR || err == -ERESTARTSYS)
464 return err;
465 if (client->auth_err < 0)
466 return client->auth_err;
467 }
468
469 return 0;
470}
471EXPORT_SYMBOL(__ceph_open_session);
472
473
474int ceph_open_session(struct ceph_client *client)
475{
476 int ret;
477 unsigned long started = jiffies; /* note the start time */
478
479 dout("open_session start\n");
480 mutex_lock(&client->mount_mutex);
481
482 ret = __ceph_open_session(client, started);
483
484 mutex_unlock(&client->mount_mutex);
485 return ret;
486}
487EXPORT_SYMBOL(ceph_open_session);
488
489
490static int __init init_ceph_lib(void)
491{
492 int ret = 0;
493
494 ret = ceph_debugfs_init();
495 if (ret < 0)
496 goto out;
497
498 ret = ceph_msgr_init();
499 if (ret < 0)
500 goto out_debugfs;
501
502 pr_info("loaded (mon/osd proto %d/%d, osdmap %d/%d %d/%d)\n",
503 CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL,
504 CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT,
505 CEPH_OSDMAP_INC_VERSION, CEPH_OSDMAP_INC_VERSION_EXT);
506
507 return 0;
508
509out_debugfs:
510 ceph_debugfs_cleanup();
511out:
512 return ret;
513}
514
515static void __exit exit_ceph_lib(void)
516{
517 dout("exit_ceph_lib\n");
518 ceph_msgr_exit();
519 ceph_debugfs_cleanup();
520}
521
522module_init(init_ceph_lib);
523module_exit(exit_ceph_lib);
524
525MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
526MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
527MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
528MODULE_DESCRIPTION("Ceph filesystem for Linux");
529MODULE_LICENSE("GPL");
diff --git a/fs/ceph/ceph_fs.c b/net/ceph/ceph_fs.c
index 3ac6cc7c1156..a3a3a31d3c37 100644
--- a/fs/ceph/ceph_fs.c
+++ b/net/ceph/ceph_fs.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Some non-inline ceph helpers 2 * Some non-inline ceph helpers
3 */ 3 */
4#include "types.h" 4#include <linux/module.h>
5#include <linux/ceph/types.h>
5 6
6/* 7/*
7 * return true if @layout appears to be valid 8 * return true if @layout appears to be valid
@@ -52,6 +53,7 @@ int ceph_flags_to_mode(int flags)
52 53
53 return mode; 54 return mode;
54} 55}
56EXPORT_SYMBOL(ceph_flags_to_mode);
55 57
56int ceph_caps_for_mode(int mode) 58int ceph_caps_for_mode(int mode)
57{ 59{
@@ -70,3 +72,4 @@ int ceph_caps_for_mode(int mode)
70 72
71 return caps; 73 return caps;
72} 74}
75EXPORT_SYMBOL(ceph_caps_for_mode);
diff --git a/fs/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
index bd570015d147..815ef8826796 100644
--- a/fs/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -1,5 +1,5 @@
1 1
2#include "types.h" 2#include <linux/ceph/types.h>
3 3
4/* 4/*
5 * Robert Jenkin's hash function. 5 * Robert Jenkin's hash function.
diff --git a/net/ceph/ceph_strings.c b/net/ceph/ceph_strings.c
new file mode 100644
index 000000000000..3fbda04de29c
--- /dev/null
+++ b/net/ceph/ceph_strings.c
@@ -0,0 +1,84 @@
1/*
2 * Ceph string constants
3 */
4#include <linux/module.h>
5#include <linux/ceph/types.h>
6
7const char *ceph_entity_type_name(int type)
8{
9 switch (type) {
10 case CEPH_ENTITY_TYPE_MDS: return "mds";
11 case CEPH_ENTITY_TYPE_OSD: return "osd";
12 case CEPH_ENTITY_TYPE_MON: return "mon";
13 case CEPH_ENTITY_TYPE_CLIENT: return "client";
14 case CEPH_ENTITY_TYPE_AUTH: return "auth";
15 default: return "unknown";
16 }
17}
18
19const char *ceph_osd_op_name(int op)
20{
21 switch (op) {
22 case CEPH_OSD_OP_READ: return "read";
23 case CEPH_OSD_OP_STAT: return "stat";
24
25 case CEPH_OSD_OP_MASKTRUNC: return "masktrunc";
26
27 case CEPH_OSD_OP_WRITE: return "write";
28 case CEPH_OSD_OP_DELETE: return "delete";
29 case CEPH_OSD_OP_TRUNCATE: return "truncate";
30 case CEPH_OSD_OP_ZERO: return "zero";
31 case CEPH_OSD_OP_WRITEFULL: return "writefull";
32 case CEPH_OSD_OP_ROLLBACK: return "rollback";
33
34 case CEPH_OSD_OP_APPEND: return "append";
35 case CEPH_OSD_OP_STARTSYNC: return "startsync";
36 case CEPH_OSD_OP_SETTRUNC: return "settrunc";
37 case CEPH_OSD_OP_TRIMTRUNC: return "trimtrunc";
38
39 case CEPH_OSD_OP_TMAPUP: return "tmapup";
40 case CEPH_OSD_OP_TMAPGET: return "tmapget";
41 case CEPH_OSD_OP_TMAPPUT: return "tmapput";
42
43 case CEPH_OSD_OP_GETXATTR: return "getxattr";
44 case CEPH_OSD_OP_GETXATTRS: return "getxattrs";
45 case CEPH_OSD_OP_SETXATTR: return "setxattr";
46 case CEPH_OSD_OP_SETXATTRS: return "setxattrs";
47 case CEPH_OSD_OP_RESETXATTRS: return "resetxattrs";
48 case CEPH_OSD_OP_RMXATTR: return "rmxattr";
49 case CEPH_OSD_OP_CMPXATTR: return "cmpxattr";
50
51 case CEPH_OSD_OP_PULL: return "pull";
52 case CEPH_OSD_OP_PUSH: return "push";
53 case CEPH_OSD_OP_BALANCEREADS: return "balance-reads";
54 case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads";
55 case CEPH_OSD_OP_SCRUB: return "scrub";
56
57 case CEPH_OSD_OP_WRLOCK: return "wrlock";
58 case CEPH_OSD_OP_WRUNLOCK: return "wrunlock";
59 case CEPH_OSD_OP_RDLOCK: return "rdlock";
60 case CEPH_OSD_OP_RDUNLOCK: return "rdunlock";
61 case CEPH_OSD_OP_UPLOCK: return "uplock";
62 case CEPH_OSD_OP_DNLOCK: return "dnlock";
63
64 case CEPH_OSD_OP_CALL: return "call";
65
66 case CEPH_OSD_OP_PGLS: return "pgls";
67 }
68 return "???";
69}
70
71
72const char *ceph_pool_op_name(int op)
73{
74 switch (op) {
75 case POOL_OP_CREATE: return "create";
76 case POOL_OP_DELETE: return "delete";
77 case POOL_OP_AUID_CHANGE: return "auid change";
78 case POOL_OP_CREATE_SNAP: return "create snap";
79 case POOL_OP_DELETE_SNAP: return "delete snap";
80 case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap";
81 case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap";
82 }
83 return "???";
84}
diff --git a/fs/ceph/crush/crush.c b/net/ceph/crush/crush.c
index fabd302e5779..d6ebb13a18a4 100644
--- a/fs/ceph/crush/crush.c
+++ b/net/ceph/crush/crush.c
@@ -8,7 +8,7 @@
8# define BUG_ON(x) assert(!(x)) 8# define BUG_ON(x) assert(!(x))
9#endif 9#endif
10 10
11#include "crush.h" 11#include <linux/crush/crush.h>
12 12
13const char *crush_bucket_alg_name(int alg) 13const char *crush_bucket_alg_name(int alg)
14{ 14{
diff --git a/fs/ceph/crush/hash.c b/net/ceph/crush/hash.c
index 5873aed694bf..5bb63e37a8a1 100644
--- a/fs/ceph/crush/hash.c
+++ b/net/ceph/crush/hash.c
@@ -1,6 +1,6 @@
1 1
2#include <linux/types.h> 2#include <linux/types.h>
3#include "hash.h" 3#include <linux/crush/hash.h>
4 4
5/* 5/*
6 * Robert Jenkins' function for mixing 32-bit values 6 * Robert Jenkins' function for mixing 32-bit values
diff --git a/fs/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index a4eec133258e..42599e31dcad 100644
--- a/fs/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -18,8 +18,8 @@
18# define kfree(x) free(x) 18# define kfree(x) free(x)
19#endif 19#endif
20 20
21#include "crush.h" 21#include <linux/crush/crush.h>
22#include "hash.h" 22#include <linux/crush/hash.h>
23 23
24/* 24/*
25 * Implement the core CRUSH mapping algorithm. 25 * Implement the core CRUSH mapping algorithm.
diff --git a/fs/ceph/crypto.c b/net/ceph/crypto.c
index a3e627f63293..7b505b0c983f 100644
--- a/fs/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -1,13 +1,13 @@
1 1
2#include "ceph_debug.h" 2#include <linux/ceph/ceph_debug.h>
3 3
4#include <linux/err.h> 4#include <linux/err.h>
5#include <linux/scatterlist.h> 5#include <linux/scatterlist.h>
6#include <linux/slab.h> 6#include <linux/slab.h>
7#include <crypto/hash.h> 7#include <crypto/hash.h>
8 8
9#include <linux/ceph/decode.h>
9#include "crypto.h" 10#include "crypto.h"
10#include "decode.h"
11 11
12int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end) 12int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
13{ 13{
diff --git a/fs/ceph/crypto.h b/net/ceph/crypto.h
index bdf38607323c..f9eccace592b 100644
--- a/fs/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -1,8 +1,8 @@
1#ifndef _FS_CEPH_CRYPTO_H 1#ifndef _FS_CEPH_CRYPTO_H
2#define _FS_CEPH_CRYPTO_H 2#define _FS_CEPH_CRYPTO_H
3 3
4#include "types.h" 4#include <linux/ceph/types.h>
5#include "buffer.h" 5#include <linux/ceph/buffer.h>
6 6
7/* 7/*
8 * cryptographic secret 8 * cryptographic secret
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
new file mode 100644
index 000000000000..27d4ea315d12
--- /dev/null
+++ b/net/ceph/debugfs.c
@@ -0,0 +1,267 @@
1#include <linux/ceph/ceph_debug.h>
2
3#include <linux/device.h>
4#include <linux/slab.h>
5#include <linux/module.h>
6#include <linux/ctype.h>
7#include <linux/debugfs.h>
8#include <linux/seq_file.h>
9
10#include <linux/ceph/libceph.h>
11#include <linux/ceph/mon_client.h>
12#include <linux/ceph/auth.h>
13#include <linux/ceph/debugfs.h>
14
15#ifdef CONFIG_DEBUG_FS
16
17/*
18 * Implement /sys/kernel/debug/ceph fun
19 *
20 * /sys/kernel/debug/ceph/client* - an instance of the ceph client
21 * .../osdmap - current osdmap
22 * .../monmap - current monmap
23 * .../osdc - active osd requests
24 * .../monc - mon client state
25 * .../dentry_lru - dump contents of dentry lru
26 * .../caps - expose cap (reservation) stats
27 * .../bdi - symlink to ../../bdi/something
28 */
29
30static struct dentry *ceph_debugfs_dir;
31
32static int monmap_show(struct seq_file *s, void *p)
33{
34 int i;
35 struct ceph_client *client = s->private;
36
37 if (client->monc.monmap == NULL)
38 return 0;
39
40 seq_printf(s, "epoch %d\n", client->monc.monmap->epoch);
41 for (i = 0; i < client->monc.monmap->num_mon; i++) {
42 struct ceph_entity_inst *inst =
43 &client->monc.monmap->mon_inst[i];
44
45 seq_printf(s, "\t%s%lld\t%s\n",
46 ENTITY_NAME(inst->name),
47 ceph_pr_addr(&inst->addr.in_addr));
48 }
49 return 0;
50}
51
52static int osdmap_show(struct seq_file *s, void *p)
53{
54 int i;
55 struct ceph_client *client = s->private;
56 struct rb_node *n;
57
58 if (client->osdc.osdmap == NULL)
59 return 0;
60 seq_printf(s, "epoch %d\n", client->osdc.osdmap->epoch);
61 seq_printf(s, "flags%s%s\n",
62 (client->osdc.osdmap->flags & CEPH_OSDMAP_NEARFULL) ?
63 " NEARFULL" : "",
64 (client->osdc.osdmap->flags & CEPH_OSDMAP_FULL) ?
65 " FULL" : "");
66 for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) {
67 struct ceph_pg_pool_info *pool =
68 rb_entry(n, struct ceph_pg_pool_info, node);
69 seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n",
70 pool->id, pool->v.pg_num, pool->pg_num_mask,
71 pool->v.lpg_num, pool->lpg_num_mask);
72 }
73 for (i = 0; i < client->osdc.osdmap->max_osd; i++) {
74 struct ceph_entity_addr *addr =
75 &client->osdc.osdmap->osd_addr[i];
76 int state = client->osdc.osdmap->osd_state[i];
77 char sb[64];
78
79 seq_printf(s, "\tosd%d\t%s\t%3d%%\t(%s)\n",
80 i, ceph_pr_addr(&addr->in_addr),
81 ((client->osdc.osdmap->osd_weight[i]*100) >> 16),
82 ceph_osdmap_state_str(sb, sizeof(sb), state));
83 }
84 return 0;
85}
86
87static int monc_show(struct seq_file *s, void *p)
88{
89 struct ceph_client *client = s->private;
90 struct ceph_mon_generic_request *req;
91 struct ceph_mon_client *monc = &client->monc;
92 struct rb_node *rp;
93
94 mutex_lock(&monc->mutex);
95
96 if (monc->have_mdsmap)
97 seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap);
98 if (monc->have_osdmap)
99 seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap);
100 if (monc->want_next_osdmap)
101 seq_printf(s, "want next osdmap\n");
102
103 for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) {
104 __u16 op;
105 req = rb_entry(rp, struct ceph_mon_generic_request, node);
106 op = le16_to_cpu(req->request->hdr.type);
107 if (op == CEPH_MSG_STATFS)
108 seq_printf(s, "%lld statfs\n", req->tid);
109 else
110 seq_printf(s, "%lld unknown\n", req->tid);
111 }
112
113 mutex_unlock(&monc->mutex);
114 return 0;
115}
116
117static int osdc_show(struct seq_file *s, void *pp)
118{
119 struct ceph_client *client = s->private;
120 struct ceph_osd_client *osdc = &client->osdc;
121 struct rb_node *p;
122
123 mutex_lock(&osdc->request_mutex);
124 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
125 struct ceph_osd_request *req;
126 struct ceph_osd_request_head *head;
127 struct ceph_osd_op *op;
128 int num_ops;
129 int opcode, olen;
130 int i;
131
132 req = rb_entry(p, struct ceph_osd_request, r_node);
133
134 seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid,
135 req->r_osd ? req->r_osd->o_osd : -1,
136 le32_to_cpu(req->r_pgid.pool),
137 le16_to_cpu(req->r_pgid.ps));
138
139 head = req->r_request->front.iov_base;
140 op = (void *)(head + 1);
141
142 num_ops = le16_to_cpu(head->num_ops);
143 olen = le32_to_cpu(head->object_len);
144 seq_printf(s, "%.*s", olen,
145 (const char *)(head->ops + num_ops));
146
147 if (req->r_reassert_version.epoch)
148 seq_printf(s, "\t%u'%llu",
149 (unsigned)le32_to_cpu(req->r_reassert_version.epoch),
150 le64_to_cpu(req->r_reassert_version.version));
151 else
152 seq_printf(s, "\t");
153
154 for (i = 0; i < num_ops; i++) {
155 opcode = le16_to_cpu(op->op);
156 seq_printf(s, "\t%s", ceph_osd_op_name(opcode));
157 op++;
158 }
159
160 seq_printf(s, "\n");
161 }
162 mutex_unlock(&osdc->request_mutex);
163 return 0;
164}
165
166CEPH_DEFINE_SHOW_FUNC(monmap_show)
167CEPH_DEFINE_SHOW_FUNC(osdmap_show)
168CEPH_DEFINE_SHOW_FUNC(monc_show)
169CEPH_DEFINE_SHOW_FUNC(osdc_show)
170
171int ceph_debugfs_init(void)
172{
173 ceph_debugfs_dir = debugfs_create_dir("ceph", NULL);
174 if (!ceph_debugfs_dir)
175 return -ENOMEM;
176 return 0;
177}
178
179void ceph_debugfs_cleanup(void)
180{
181 debugfs_remove(ceph_debugfs_dir);
182}
183
184int ceph_debugfs_client_init(struct ceph_client *client)
185{
186 int ret = -ENOMEM;
187 char name[80];
188
189 snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid,
190 client->monc.auth->global_id);
191
192 client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);
193 if (!client->debugfs_dir)
194 goto out;
195
196 client->monc.debugfs_file = debugfs_create_file("monc",
197 0600,
198 client->debugfs_dir,
199 client,
200 &monc_show_fops);
201 if (!client->monc.debugfs_file)
202 goto out;
203
204 client->osdc.debugfs_file = debugfs_create_file("osdc",
205 0600,
206 client->debugfs_dir,
207 client,
208 &osdc_show_fops);
209 if (!client->osdc.debugfs_file)
210 goto out;
211
212 client->debugfs_monmap = debugfs_create_file("monmap",
213 0600,
214 client->debugfs_dir,
215 client,
216 &monmap_show_fops);
217 if (!client->debugfs_monmap)
218 goto out;
219
220 client->debugfs_osdmap = debugfs_create_file("osdmap",
221 0600,
222 client->debugfs_dir,
223 client,
224 &osdmap_show_fops);
225 if (!client->debugfs_osdmap)
226 goto out;
227
228 return 0;
229
230out:
231 ceph_debugfs_client_cleanup(client);
232 return ret;
233}
234
235void ceph_debugfs_client_cleanup(struct ceph_client *client)
236{
237 debugfs_remove(client->debugfs_osdmap);
238 debugfs_remove(client->debugfs_monmap);
239 debugfs_remove(client->osdc.debugfs_file);
240 debugfs_remove(client->monc.debugfs_file);
241 debugfs_remove(client->debugfs_dir);
242}
243
244#else /* CONFIG_DEBUG_FS */
245
246int ceph_debugfs_init(void)
247{
248 return 0;
249}
250
251void ceph_debugfs_cleanup(void)
252{
253}
254
255int ceph_debugfs_client_init(struct ceph_client *client)
256{
257 return 0;
258}
259
260void ceph_debugfs_client_cleanup(struct ceph_client *client)
261{
262}
263
264#endif /* CONFIG_DEBUG_FS */
265
266EXPORT_SYMBOL(ceph_debugfs_init);
267EXPORT_SYMBOL(ceph_debugfs_cleanup);
diff --git a/fs/ceph/messenger.c b/net/ceph/messenger.c
index 2502d76fcec1..0e8157ee5d43 100644
--- a/fs/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1,4 +1,4 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/crc32c.h> 3#include <linux/crc32c.h>
4#include <linux/ctype.h> 4#include <linux/ctype.h>
@@ -9,12 +9,14 @@
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/socket.h> 10#include <linux/socket.h>
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/bio.h>
13#include <linux/blkdev.h>
12#include <net/tcp.h> 14#include <net/tcp.h>
13 15
14#include "super.h" 16#include <linux/ceph/libceph.h>
15#include "messenger.h" 17#include <linux/ceph/messenger.h>
16#include "decode.h" 18#include <linux/ceph/decode.h>
17#include "pagelist.h" 19#include <linux/ceph/pagelist.h>
18 20
19/* 21/*
20 * Ceph uses the messenger to exchange ceph_msg messages with other 22 * Ceph uses the messenger to exchange ceph_msg messages with other
@@ -48,7 +50,7 @@ static char addr_str[MAX_ADDR_STR][MAX_ADDR_STR_LEN];
48static DEFINE_SPINLOCK(addr_str_lock); 50static DEFINE_SPINLOCK(addr_str_lock);
49static int last_addr_str; 51static int last_addr_str;
50 52
51const char *pr_addr(const struct sockaddr_storage *ss) 53const char *ceph_pr_addr(const struct sockaddr_storage *ss)
52{ 54{
53 int i; 55 int i;
54 char *s; 56 char *s;
@@ -79,6 +81,7 @@ const char *pr_addr(const struct sockaddr_storage *ss)
79 81
80 return s; 82 return s;
81} 83}
84EXPORT_SYMBOL(ceph_pr_addr);
82 85
83static void encode_my_addr(struct ceph_messenger *msgr) 86static void encode_my_addr(struct ceph_messenger *msgr)
84{ 87{
@@ -91,7 +94,7 @@ static void encode_my_addr(struct ceph_messenger *msgr)
91 */ 94 */
92struct workqueue_struct *ceph_msgr_wq; 95struct workqueue_struct *ceph_msgr_wq;
93 96
94int __init ceph_msgr_init(void) 97int ceph_msgr_init(void)
95{ 98{
96 ceph_msgr_wq = create_workqueue("ceph-msgr"); 99 ceph_msgr_wq = create_workqueue("ceph-msgr");
97 if (IS_ERR(ceph_msgr_wq)) { 100 if (IS_ERR(ceph_msgr_wq)) {
@@ -102,16 +105,19 @@ int __init ceph_msgr_init(void)
102 } 105 }
103 return 0; 106 return 0;
104} 107}
108EXPORT_SYMBOL(ceph_msgr_init);
105 109
106void ceph_msgr_exit(void) 110void ceph_msgr_exit(void)
107{ 111{
108 destroy_workqueue(ceph_msgr_wq); 112 destroy_workqueue(ceph_msgr_wq);
109} 113}
114EXPORT_SYMBOL(ceph_msgr_exit);
110 115
111void ceph_msgr_flush(void) 116void ceph_msgr_flush(void)
112{ 117{
113 flush_workqueue(ceph_msgr_wq); 118 flush_workqueue(ceph_msgr_wq);
114} 119}
120EXPORT_SYMBOL(ceph_msgr_flush);
115 121
116 122
117/* 123/*
@@ -221,19 +227,19 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con)
221 227
222 set_sock_callbacks(sock, con); 228 set_sock_callbacks(sock, con);
223 229
224 dout("connect %s\n", pr_addr(&con->peer_addr.in_addr)); 230 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
225 231
226 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), 232 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
227 O_NONBLOCK); 233 O_NONBLOCK);
228 if (ret == -EINPROGRESS) { 234 if (ret == -EINPROGRESS) {
229 dout("connect %s EINPROGRESS sk_state = %u\n", 235 dout("connect %s EINPROGRESS sk_state = %u\n",
230 pr_addr(&con->peer_addr.in_addr), 236 ceph_pr_addr(&con->peer_addr.in_addr),
231 sock->sk->sk_state); 237 sock->sk->sk_state);
232 ret = 0; 238 ret = 0;
233 } 239 }
234 if (ret < 0) { 240 if (ret < 0) {
235 pr_err("connect %s error %d\n", 241 pr_err("connect %s error %d\n",
236 pr_addr(&con->peer_addr.in_addr), ret); 242 ceph_pr_addr(&con->peer_addr.in_addr), ret);
237 sock_release(sock); 243 sock_release(sock);
238 con->sock = NULL; 244 con->sock = NULL;
239 con->error_msg = "connect error"; 245 con->error_msg = "connect error";
@@ -334,7 +340,8 @@ static void reset_connection(struct ceph_connection *con)
334 */ 340 */
335void ceph_con_close(struct ceph_connection *con) 341void ceph_con_close(struct ceph_connection *con)
336{ 342{
337 dout("con_close %p peer %s\n", con, pr_addr(&con->peer_addr.in_addr)); 343 dout("con_close %p peer %s\n", con,
344 ceph_pr_addr(&con->peer_addr.in_addr));
338 set_bit(CLOSED, &con->state); /* in case there's queued work */ 345 set_bit(CLOSED, &con->state); /* in case there's queued work */
339 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */ 346 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
340 clear_bit(LOSSYTX, &con->state); /* so we retry next connect */ 347 clear_bit(LOSSYTX, &con->state); /* so we retry next connect */
@@ -347,19 +354,21 @@ void ceph_con_close(struct ceph_connection *con)
347 mutex_unlock(&con->mutex); 354 mutex_unlock(&con->mutex);
348 queue_con(con); 355 queue_con(con);
349} 356}
357EXPORT_SYMBOL(ceph_con_close);
350 358
351/* 359/*
352 * Reopen a closed connection, with a new peer address. 360 * Reopen a closed connection, with a new peer address.
353 */ 361 */
354void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr) 362void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
355{ 363{
356 dout("con_open %p %s\n", con, pr_addr(&addr->in_addr)); 364 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
357 set_bit(OPENING, &con->state); 365 set_bit(OPENING, &con->state);
358 clear_bit(CLOSED, &con->state); 366 clear_bit(CLOSED, &con->state);
359 memcpy(&con->peer_addr, addr, sizeof(*addr)); 367 memcpy(&con->peer_addr, addr, sizeof(*addr));
360 con->delay = 0; /* reset backoff memory */ 368 con->delay = 0; /* reset backoff memory */
361 queue_con(con); 369 queue_con(con);
362} 370}
371EXPORT_SYMBOL(ceph_con_open);
363 372
364/* 373/*
365 * return true if this connection ever successfully opened 374 * return true if this connection ever successfully opened
@@ -406,6 +415,7 @@ void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con)
406 INIT_LIST_HEAD(&con->out_sent); 415 INIT_LIST_HEAD(&con->out_sent);
407 INIT_DELAYED_WORK(&con->work, con_work); 416 INIT_DELAYED_WORK(&con->work, con_work);
408} 417}
418EXPORT_SYMBOL(ceph_con_init);
409 419
410 420
411/* 421/*
@@ -529,8 +539,11 @@ static void prepare_write_message(struct ceph_connection *con)
529 if (le32_to_cpu(m->hdr.data_len) > 0) { 539 if (le32_to_cpu(m->hdr.data_len) > 0) {
530 /* initialize page iterator */ 540 /* initialize page iterator */
531 con->out_msg_pos.page = 0; 541 con->out_msg_pos.page = 0;
532 con->out_msg_pos.page_pos = 542 if (m->pages)
533 le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK; 543 con->out_msg_pos.page_pos =
544 le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK;
545 else
546 con->out_msg_pos.page_pos = 0;
534 con->out_msg_pos.data_pos = 0; 547 con->out_msg_pos.data_pos = 0;
535 con->out_msg_pos.did_page_crc = 0; 548 con->out_msg_pos.did_page_crc = 0;
536 con->out_more = 1; /* data + footer will follow */ 549 con->out_more = 1; /* data + footer will follow */
@@ -647,7 +660,7 @@ static void prepare_write_connect(struct ceph_messenger *msgr,
647 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, 660 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
648 con->connect_seq, global_seq, proto); 661 con->connect_seq, global_seq, proto);
649 662
650 con->out_connect.features = cpu_to_le64(CEPH_FEATURE_SUPPORTED); 663 con->out_connect.features = cpu_to_le64(msgr->supported_features);
651 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); 664 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
652 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); 665 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
653 con->out_connect.global_seq = cpu_to_le32(global_seq); 666 con->out_connect.global_seq = cpu_to_le32(global_seq);
@@ -712,6 +725,31 @@ out:
712 return ret; /* done! */ 725 return ret; /* done! */
713} 726}
714 727
728#ifdef CONFIG_BLOCK
729static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
730{
731 if (!bio) {
732 *iter = NULL;
733 *seg = 0;
734 return;
735 }
736 *iter = bio;
737 *seg = bio->bi_idx;
738}
739
740static void iter_bio_next(struct bio **bio_iter, int *seg)
741{
742 if (*bio_iter == NULL)
743 return;
744
745 BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
746
747 (*seg)++;
748 if (*seg == (*bio_iter)->bi_vcnt)
749 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
750}
751#endif
752
715/* 753/*
716 * Write as much message data payload as we can. If we finish, queue 754 * Write as much message data payload as we can. If we finish, queue
717 * up the footer. 755 * up the footer.
@@ -726,21 +764,46 @@ static int write_partial_msg_pages(struct ceph_connection *con)
726 size_t len; 764 size_t len;
727 int crc = con->msgr->nocrc; 765 int crc = con->msgr->nocrc;
728 int ret; 766 int ret;
767 int total_max_write;
768 int in_trail = 0;
769 size_t trail_len = (msg->trail ? msg->trail->length : 0);
729 770
730 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n", 771 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
731 con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages, 772 con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
732 con->out_msg_pos.page_pos); 773 con->out_msg_pos.page_pos);
733 774
734 while (con->out_msg_pos.page < con->out_msg->nr_pages) { 775#ifdef CONFIG_BLOCK
776 if (msg->bio && !msg->bio_iter)
777 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
778#endif
779
780 while (data_len > con->out_msg_pos.data_pos) {
735 struct page *page = NULL; 781 struct page *page = NULL;
736 void *kaddr = NULL; 782 void *kaddr = NULL;
783 int max_write = PAGE_SIZE;
784 int page_shift = 0;
785
786 total_max_write = data_len - trail_len -
787 con->out_msg_pos.data_pos;
737 788
738 /* 789 /*
739 * if we are calculating the data crc (the default), we need 790 * if we are calculating the data crc (the default), we need
740 * to map the page. if our pages[] has been revoked, use the 791 * to map the page. if our pages[] has been revoked, use the
741 * zero page. 792 * zero page.
742 */ 793 */
743 if (msg->pages) { 794
795 /* have we reached the trail part of the data? */
796 if (con->out_msg_pos.data_pos >= data_len - trail_len) {
797 in_trail = 1;
798
799 total_max_write = data_len - con->out_msg_pos.data_pos;
800
801 page = list_first_entry(&msg->trail->head,
802 struct page, lru);
803 if (crc)
804 kaddr = kmap(page);
805 max_write = PAGE_SIZE;
806 } else if (msg->pages) {
744 page = msg->pages[con->out_msg_pos.page]; 807 page = msg->pages[con->out_msg_pos.page];
745 if (crc) 808 if (crc)
746 kaddr = kmap(page); 809 kaddr = kmap(page);
@@ -749,13 +812,25 @@ static int write_partial_msg_pages(struct ceph_connection *con)
749 struct page, lru); 812 struct page, lru);
750 if (crc) 813 if (crc)
751 kaddr = kmap(page); 814 kaddr = kmap(page);
815#ifdef CONFIG_BLOCK
816 } else if (msg->bio) {
817 struct bio_vec *bv;
818
819 bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg);
820 page = bv->bv_page;
821 page_shift = bv->bv_offset;
822 if (crc)
823 kaddr = kmap(page) + page_shift;
824 max_write = bv->bv_len;
825#endif
752 } else { 826 } else {
753 page = con->msgr->zero_page; 827 page = con->msgr->zero_page;
754 if (crc) 828 if (crc)
755 kaddr = page_address(con->msgr->zero_page); 829 kaddr = page_address(con->msgr->zero_page);
756 } 830 }
757 len = min((int)(PAGE_SIZE - con->out_msg_pos.page_pos), 831 len = min_t(int, max_write - con->out_msg_pos.page_pos,
758 (int)(data_len - con->out_msg_pos.data_pos)); 832 total_max_write);
833
759 if (crc && !con->out_msg_pos.did_page_crc) { 834 if (crc && !con->out_msg_pos.did_page_crc) {
760 void *base = kaddr + con->out_msg_pos.page_pos; 835 void *base = kaddr + con->out_msg_pos.page_pos;
761 u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc); 836 u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
@@ -765,13 +840,14 @@ static int write_partial_msg_pages(struct ceph_connection *con)
765 cpu_to_le32(crc32c(tmpcrc, base, len)); 840 cpu_to_le32(crc32c(tmpcrc, base, len));
766 con->out_msg_pos.did_page_crc = 1; 841 con->out_msg_pos.did_page_crc = 1;
767 } 842 }
768
769 ret = kernel_sendpage(con->sock, page, 843 ret = kernel_sendpage(con->sock, page,
770 con->out_msg_pos.page_pos, len, 844 con->out_msg_pos.page_pos + page_shift,
845 len,
771 MSG_DONTWAIT | MSG_NOSIGNAL | 846 MSG_DONTWAIT | MSG_NOSIGNAL |
772 MSG_MORE); 847 MSG_MORE);
773 848
774 if (crc && (msg->pages || msg->pagelist)) 849 if (crc &&
850 (msg->pages || msg->pagelist || msg->bio || in_trail))
775 kunmap(page); 851 kunmap(page);
776 852
777 if (ret <= 0) 853 if (ret <= 0)
@@ -783,9 +859,16 @@ static int write_partial_msg_pages(struct ceph_connection *con)
783 con->out_msg_pos.page_pos = 0; 859 con->out_msg_pos.page_pos = 0;
784 con->out_msg_pos.page++; 860 con->out_msg_pos.page++;
785 con->out_msg_pos.did_page_crc = 0; 861 con->out_msg_pos.did_page_crc = 0;
786 if (msg->pagelist) 862 if (in_trail)
863 list_move_tail(&page->lru,
864 &msg->trail->head);
865 else if (msg->pagelist)
787 list_move_tail(&page->lru, 866 list_move_tail(&page->lru,
788 &msg->pagelist->head); 867 &msg->pagelist->head);
868#ifdef CONFIG_BLOCK
869 else if (msg->bio)
870 iter_bio_next(&msg->bio_iter, &msg->bio_seg);
871#endif
789 } 872 }
790 } 873 }
791 874
@@ -938,7 +1021,7 @@ static int verify_hello(struct ceph_connection *con)
938{ 1021{
939 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { 1022 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
940 pr_err("connect to %s got bad banner\n", 1023 pr_err("connect to %s got bad banner\n",
941 pr_addr(&con->peer_addr.in_addr)); 1024 ceph_pr_addr(&con->peer_addr.in_addr));
942 con->error_msg = "protocol error, bad banner"; 1025 con->error_msg = "protocol error, bad banner";
943 return -1; 1026 return -1;
944 } 1027 }
@@ -1041,7 +1124,7 @@ int ceph_parse_ips(const char *c, const char *end,
1041 1124
1042 addr_set_port(ss, port); 1125 addr_set_port(ss, port);
1043 1126
1044 dout("parse_ips got %s\n", pr_addr(ss)); 1127 dout("parse_ips got %s\n", ceph_pr_addr(ss));
1045 1128
1046 if (p == end) 1129 if (p == end)
1047 break; 1130 break;
@@ -1061,6 +1144,7 @@ bad:
1061 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 1144 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1062 return -EINVAL; 1145 return -EINVAL;
1063} 1146}
1147EXPORT_SYMBOL(ceph_parse_ips);
1064 1148
1065static int process_banner(struct ceph_connection *con) 1149static int process_banner(struct ceph_connection *con)
1066{ 1150{
@@ -1082,9 +1166,9 @@ static int process_banner(struct ceph_connection *con)
1082 !(addr_is_blank(&con->actual_peer_addr.in_addr) && 1166 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1083 con->actual_peer_addr.nonce == con->peer_addr.nonce)) { 1167 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1084 pr_warning("wrong peer, want %s/%d, got %s/%d\n", 1168 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1085 pr_addr(&con->peer_addr.in_addr), 1169 ceph_pr_addr(&con->peer_addr.in_addr),
1086 (int)le32_to_cpu(con->peer_addr.nonce), 1170 (int)le32_to_cpu(con->peer_addr.nonce),
1087 pr_addr(&con->actual_peer_addr.in_addr), 1171 ceph_pr_addr(&con->actual_peer_addr.in_addr),
1088 (int)le32_to_cpu(con->actual_peer_addr.nonce)); 1172 (int)le32_to_cpu(con->actual_peer_addr.nonce));
1089 con->error_msg = "wrong peer at address"; 1173 con->error_msg = "wrong peer at address";
1090 return -1; 1174 return -1;
@@ -1102,7 +1186,7 @@ static int process_banner(struct ceph_connection *con)
1102 addr_set_port(&con->msgr->inst.addr.in_addr, port); 1186 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1103 encode_my_addr(con->msgr); 1187 encode_my_addr(con->msgr);
1104 dout("process_banner learned my addr is %s\n", 1188 dout("process_banner learned my addr is %s\n",
1105 pr_addr(&con->msgr->inst.addr.in_addr)); 1189 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
1106 } 1190 }
1107 1191
1108 set_bit(NEGOTIATING, &con->state); 1192 set_bit(NEGOTIATING, &con->state);
@@ -1123,8 +1207,8 @@ static void fail_protocol(struct ceph_connection *con)
1123 1207
1124static int process_connect(struct ceph_connection *con) 1208static int process_connect(struct ceph_connection *con)
1125{ 1209{
1126 u64 sup_feat = CEPH_FEATURE_SUPPORTED; 1210 u64 sup_feat = con->msgr->supported_features;
1127 u64 req_feat = CEPH_FEATURE_REQUIRED; 1211 u64 req_feat = con->msgr->required_features;
1128 u64 server_feat = le64_to_cpu(con->in_reply.features); 1212 u64 server_feat = le64_to_cpu(con->in_reply.features);
1129 1213
1130 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 1214 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
@@ -1134,7 +1218,7 @@ static int process_connect(struct ceph_connection *con)
1134 pr_err("%s%lld %s feature set mismatch," 1218 pr_err("%s%lld %s feature set mismatch,"
1135 " my %llx < server's %llx, missing %llx\n", 1219 " my %llx < server's %llx, missing %llx\n",
1136 ENTITY_NAME(con->peer_name), 1220 ENTITY_NAME(con->peer_name),
1137 pr_addr(&con->peer_addr.in_addr), 1221 ceph_pr_addr(&con->peer_addr.in_addr),
1138 sup_feat, server_feat, server_feat & ~sup_feat); 1222 sup_feat, server_feat, server_feat & ~sup_feat);
1139 con->error_msg = "missing required protocol features"; 1223 con->error_msg = "missing required protocol features";
1140 fail_protocol(con); 1224 fail_protocol(con);
@@ -1144,7 +1228,7 @@ static int process_connect(struct ceph_connection *con)
1144 pr_err("%s%lld %s protocol version mismatch," 1228 pr_err("%s%lld %s protocol version mismatch,"
1145 " my %d != server's %d\n", 1229 " my %d != server's %d\n",
1146 ENTITY_NAME(con->peer_name), 1230 ENTITY_NAME(con->peer_name),
1147 pr_addr(&con->peer_addr.in_addr), 1231 ceph_pr_addr(&con->peer_addr.in_addr),
1148 le32_to_cpu(con->out_connect.protocol_version), 1232 le32_to_cpu(con->out_connect.protocol_version),
1149 le32_to_cpu(con->in_reply.protocol_version)); 1233 le32_to_cpu(con->in_reply.protocol_version));
1150 con->error_msg = "protocol version mismatch"; 1234 con->error_msg = "protocol version mismatch";
@@ -1178,7 +1262,7 @@ static int process_connect(struct ceph_connection *con)
1178 le32_to_cpu(con->in_connect.connect_seq)); 1262 le32_to_cpu(con->in_connect.connect_seq));
1179 pr_err("%s%lld %s connection reset\n", 1263 pr_err("%s%lld %s connection reset\n",
1180 ENTITY_NAME(con->peer_name), 1264 ENTITY_NAME(con->peer_name),
1181 pr_addr(&con->peer_addr.in_addr)); 1265 ceph_pr_addr(&con->peer_addr.in_addr));
1182 reset_connection(con); 1266 reset_connection(con);
1183 prepare_write_connect(con->msgr, con, 0); 1267 prepare_write_connect(con->msgr, con, 0);
1184 prepare_read_connect(con); 1268 prepare_read_connect(con);
@@ -1223,7 +1307,7 @@ static int process_connect(struct ceph_connection *con)
1223 pr_err("%s%lld %s protocol feature mismatch," 1307 pr_err("%s%lld %s protocol feature mismatch,"
1224 " my required %llx > server's %llx, need %llx\n", 1308 " my required %llx > server's %llx, need %llx\n",
1225 ENTITY_NAME(con->peer_name), 1309 ENTITY_NAME(con->peer_name),
1226 pr_addr(&con->peer_addr.in_addr), 1310 ceph_pr_addr(&con->peer_addr.in_addr),
1227 req_feat, server_feat, req_feat & ~server_feat); 1311 req_feat, server_feat, req_feat & ~server_feat);
1228 con->error_msg = "missing required protocol features"; 1312 con->error_msg = "missing required protocol features";
1229 fail_protocol(con); 1313 fail_protocol(con);
@@ -1305,8 +1389,7 @@ static int read_partial_message_section(struct ceph_connection *con,
1305 struct kvec *section, 1389 struct kvec *section,
1306 unsigned int sec_len, u32 *crc) 1390 unsigned int sec_len, u32 *crc)
1307{ 1391{
1308 int left; 1392 int ret, left;
1309 int ret;
1310 1393
1311 BUG_ON(!section); 1394 BUG_ON(!section);
1312 1395
@@ -1329,13 +1412,83 @@ static int read_partial_message_section(struct ceph_connection *con,
1329static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, 1412static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
1330 struct ceph_msg_header *hdr, 1413 struct ceph_msg_header *hdr,
1331 int *skip); 1414 int *skip);
1415
1416
1417static int read_partial_message_pages(struct ceph_connection *con,
1418 struct page **pages,
1419 unsigned data_len, int datacrc)
1420{
1421 void *p;
1422 int ret;
1423 int left;
1424
1425 left = min((int)(data_len - con->in_msg_pos.data_pos),
1426 (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
1427 /* (page) data */
1428 BUG_ON(pages == NULL);
1429 p = kmap(pages[con->in_msg_pos.page]);
1430 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1431 left);
1432 if (ret > 0 && datacrc)
1433 con->in_data_crc =
1434 crc32c(con->in_data_crc,
1435 p + con->in_msg_pos.page_pos, ret);
1436 kunmap(pages[con->in_msg_pos.page]);
1437 if (ret <= 0)
1438 return ret;
1439 con->in_msg_pos.data_pos += ret;
1440 con->in_msg_pos.page_pos += ret;
1441 if (con->in_msg_pos.page_pos == PAGE_SIZE) {
1442 con->in_msg_pos.page_pos = 0;
1443 con->in_msg_pos.page++;
1444 }
1445
1446 return ret;
1447}
1448
1449#ifdef CONFIG_BLOCK
1450static int read_partial_message_bio(struct ceph_connection *con,
1451 struct bio **bio_iter, int *bio_seg,
1452 unsigned data_len, int datacrc)
1453{
1454 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
1455 void *p;
1456 int ret, left;
1457
1458 if (IS_ERR(bv))
1459 return PTR_ERR(bv);
1460
1461 left = min((int)(data_len - con->in_msg_pos.data_pos),
1462 (int)(bv->bv_len - con->in_msg_pos.page_pos));
1463
1464 p = kmap(bv->bv_page) + bv->bv_offset;
1465
1466 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1467 left);
1468 if (ret > 0 && datacrc)
1469 con->in_data_crc =
1470 crc32c(con->in_data_crc,
1471 p + con->in_msg_pos.page_pos, ret);
1472 kunmap(bv->bv_page);
1473 if (ret <= 0)
1474 return ret;
1475 con->in_msg_pos.data_pos += ret;
1476 con->in_msg_pos.page_pos += ret;
1477 if (con->in_msg_pos.page_pos == bv->bv_len) {
1478 con->in_msg_pos.page_pos = 0;
1479 iter_bio_next(bio_iter, bio_seg);
1480 }
1481
1482 return ret;
1483}
1484#endif
1485
1332/* 1486/*
1333 * read (part of) a message. 1487 * read (part of) a message.
1334 */ 1488 */
1335static int read_partial_message(struct ceph_connection *con) 1489static int read_partial_message(struct ceph_connection *con)
1336{ 1490{
1337 struct ceph_msg *m = con->in_msg; 1491 struct ceph_msg *m = con->in_msg;
1338 void *p;
1339 int ret; 1492 int ret;
1340 int to, left; 1493 int to, left;
1341 unsigned front_len, middle_len, data_len, data_off; 1494 unsigned front_len, middle_len, data_len, data_off;
@@ -1381,7 +1534,7 @@ static int read_partial_message(struct ceph_connection *con)
1381 if ((s64)seq - (s64)con->in_seq < 1) { 1534 if ((s64)seq - (s64)con->in_seq < 1) {
1382 pr_info("skipping %s%lld %s seq %lld, expected %lld\n", 1535 pr_info("skipping %s%lld %s seq %lld, expected %lld\n",
1383 ENTITY_NAME(con->peer_name), 1536 ENTITY_NAME(con->peer_name),
1384 pr_addr(&con->peer_addr.in_addr), 1537 ceph_pr_addr(&con->peer_addr.in_addr),
1385 seq, con->in_seq + 1); 1538 seq, con->in_seq + 1);
1386 con->in_base_pos = -front_len - middle_len - data_len - 1539 con->in_base_pos = -front_len - middle_len - data_len -
1387 sizeof(m->footer); 1540 sizeof(m->footer);
@@ -1422,7 +1575,10 @@ static int read_partial_message(struct ceph_connection *con)
1422 m->middle->vec.iov_len = 0; 1575 m->middle->vec.iov_len = 0;
1423 1576
1424 con->in_msg_pos.page = 0; 1577 con->in_msg_pos.page = 0;
1425 con->in_msg_pos.page_pos = data_off & ~PAGE_MASK; 1578 if (m->pages)
1579 con->in_msg_pos.page_pos = data_off & ~PAGE_MASK;
1580 else
1581 con->in_msg_pos.page_pos = 0;
1426 con->in_msg_pos.data_pos = 0; 1582 con->in_msg_pos.data_pos = 0;
1427 } 1583 }
1428 1584
@@ -1440,27 +1596,29 @@ static int read_partial_message(struct ceph_connection *con)
1440 if (ret <= 0) 1596 if (ret <= 0)
1441 return ret; 1597 return ret;
1442 } 1598 }
1599#ifdef CONFIG_BLOCK
1600 if (m->bio && !m->bio_iter)
1601 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
1602#endif
1443 1603
1444 /* (page) data */ 1604 /* (page) data */
1445 while (con->in_msg_pos.data_pos < data_len) { 1605 while (con->in_msg_pos.data_pos < data_len) {
1446 left = min((int)(data_len - con->in_msg_pos.data_pos), 1606 if (m->pages) {
1447 (int)(PAGE_SIZE - con->in_msg_pos.page_pos)); 1607 ret = read_partial_message_pages(con, m->pages,
1448 BUG_ON(m->pages == NULL); 1608 data_len, datacrc);
1449 p = kmap(m->pages[con->in_msg_pos.page]); 1609 if (ret <= 0)
1450 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, 1610 return ret;
1451 left); 1611#ifdef CONFIG_BLOCK
1452 if (ret > 0 && datacrc) 1612 } else if (m->bio) {
1453 con->in_data_crc = 1613
1454 crc32c(con->in_data_crc, 1614 ret = read_partial_message_bio(con,
1455 p + con->in_msg_pos.page_pos, ret); 1615 &m->bio_iter, &m->bio_seg,
1456 kunmap(m->pages[con->in_msg_pos.page]); 1616 data_len, datacrc);
1457 if (ret <= 0) 1617 if (ret <= 0)
1458 return ret; 1618 return ret;
1459 con->in_msg_pos.data_pos += ret; 1619#endif
1460 con->in_msg_pos.page_pos += ret; 1620 } else {
1461 if (con->in_msg_pos.page_pos == PAGE_SIZE) { 1621 BUG_ON(1);
1462 con->in_msg_pos.page_pos = 0;
1463 con->in_msg_pos.page++;
1464 } 1622 }
1465 } 1623 }
1466 1624
@@ -1874,9 +2032,9 @@ out:
1874static void ceph_fault(struct ceph_connection *con) 2032static void ceph_fault(struct ceph_connection *con)
1875{ 2033{
1876 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2034 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
1877 pr_addr(&con->peer_addr.in_addr), con->error_msg); 2035 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
1878 dout("fault %p state %lu to peer %s\n", 2036 dout("fault %p state %lu to peer %s\n",
1879 con, con->state, pr_addr(&con->peer_addr.in_addr)); 2037 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
1880 2038
1881 if (test_bit(LOSSYTX, &con->state)) { 2039 if (test_bit(LOSSYTX, &con->state)) {
1882 dout("fault on LOSSYTX channel\n"); 2040 dout("fault on LOSSYTX channel\n");
@@ -1936,7 +2094,9 @@ out:
1936/* 2094/*
1937 * create a new messenger instance 2095 * create a new messenger instance
1938 */ 2096 */
1939struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr) 2097struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr,
2098 u32 supported_features,
2099 u32 required_features)
1940{ 2100{
1941 struct ceph_messenger *msgr; 2101 struct ceph_messenger *msgr;
1942 2102
@@ -1944,6 +2104,9 @@ struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr)
1944 if (msgr == NULL) 2104 if (msgr == NULL)
1945 return ERR_PTR(-ENOMEM); 2105 return ERR_PTR(-ENOMEM);
1946 2106
2107 msgr->supported_features = supported_features;
2108 msgr->required_features = required_features;
2109
1947 spin_lock_init(&msgr->global_seq_lock); 2110 spin_lock_init(&msgr->global_seq_lock);
1948 2111
1949 /* the zero page is needed if a request is "canceled" while the message 2112 /* the zero page is needed if a request is "canceled" while the message
@@ -1966,6 +2129,7 @@ struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr)
1966 dout("messenger_create %p\n", msgr); 2129 dout("messenger_create %p\n", msgr);
1967 return msgr; 2130 return msgr;
1968} 2131}
2132EXPORT_SYMBOL(ceph_messenger_create);
1969 2133
1970void ceph_messenger_destroy(struct ceph_messenger *msgr) 2134void ceph_messenger_destroy(struct ceph_messenger *msgr)
1971{ 2135{
@@ -1975,6 +2139,7 @@ void ceph_messenger_destroy(struct ceph_messenger *msgr)
1975 kfree(msgr); 2139 kfree(msgr);
1976 dout("destroyed messenger %p\n", msgr); 2140 dout("destroyed messenger %p\n", msgr);
1977} 2141}
2142EXPORT_SYMBOL(ceph_messenger_destroy);
1978 2143
1979/* 2144/*
1980 * Queue up an outgoing message on the given connection. 2145 * Queue up an outgoing message on the given connection.
@@ -2011,6 +2176,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2011 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2176 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2012 queue_con(con); 2177 queue_con(con);
2013} 2178}
2179EXPORT_SYMBOL(ceph_con_send);
2014 2180
2015/* 2181/*
2016 * Revoke a message that was previously queued for send 2182 * Revoke a message that was previously queued for send
@@ -2076,6 +2242,7 @@ void ceph_con_keepalive(struct ceph_connection *con)
2076 test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2242 test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2077 queue_con(con); 2243 queue_con(con);
2078} 2244}
2245EXPORT_SYMBOL(ceph_con_keepalive);
2079 2246
2080 2247
2081/* 2248/*
@@ -2136,6 +2303,10 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
2136 m->nr_pages = 0; 2303 m->nr_pages = 0;
2137 m->pages = NULL; 2304 m->pages = NULL;
2138 m->pagelist = NULL; 2305 m->pagelist = NULL;
2306 m->bio = NULL;
2307 m->bio_iter = NULL;
2308 m->bio_seg = 0;
2309 m->trail = NULL;
2139 2310
2140 dout("ceph_msg_new %p front %d\n", m, front_len); 2311 dout("ceph_msg_new %p front %d\n", m, front_len);
2141 return m; 2312 return m;
@@ -2146,6 +2317,7 @@ out:
2146 pr_err("msg_new can't create type %d front %d\n", type, front_len); 2317 pr_err("msg_new can't create type %d front %d\n", type, front_len);
2147 return NULL; 2318 return NULL;
2148} 2319}
2320EXPORT_SYMBOL(ceph_msg_new);
2149 2321
2150/* 2322/*
2151 * Allocate "middle" portion of a message, if it is needed and wasn't 2323 * Allocate "middle" portion of a message, if it is needed and wasn't
@@ -2250,11 +2422,14 @@ void ceph_msg_last_put(struct kref *kref)
2250 m->pagelist = NULL; 2422 m->pagelist = NULL;
2251 } 2423 }
2252 2424
2425 m->trail = NULL;
2426
2253 if (m->pool) 2427 if (m->pool)
2254 ceph_msgpool_put(m->pool, m); 2428 ceph_msgpool_put(m->pool, m);
2255 else 2429 else
2256 ceph_msg_kfree(m); 2430 ceph_msg_kfree(m);
2257} 2431}
2432EXPORT_SYMBOL(ceph_msg_last_put);
2258 2433
2259void ceph_msg_dump(struct ceph_msg *msg) 2434void ceph_msg_dump(struct ceph_msg *msg)
2260{ 2435{
@@ -2275,3 +2450,4 @@ void ceph_msg_dump(struct ceph_msg *msg)
2275 DUMP_PREFIX_OFFSET, 16, 1, 2450 DUMP_PREFIX_OFFSET, 16, 1,
2276 &msg->footer, sizeof(msg->footer), true); 2451 &msg->footer, sizeof(msg->footer), true);
2277} 2452}
2453EXPORT_SYMBOL(ceph_msg_dump);
diff --git a/fs/ceph/mon_client.c b/net/ceph/mon_client.c
index b2a5a3e4a671..8a079399174a 100644
--- a/fs/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -1,14 +1,16 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/module.h>
3#include <linux/types.h> 4#include <linux/types.h>
4#include <linux/slab.h> 5#include <linux/slab.h>
5#include <linux/random.h> 6#include <linux/random.h>
6#include <linux/sched.h> 7#include <linux/sched.h>
7 8
8#include "mon_client.h" 9#include <linux/ceph/mon_client.h>
9#include "super.h" 10#include <linux/ceph/libceph.h>
10#include "auth.h" 11#include <linux/ceph/decode.h>
11#include "decode.h" 12
13#include <linux/ceph/auth.h>
12 14
13/* 15/*
14 * Interact with Ceph monitor cluster. Handle requests for new map 16 * Interact with Ceph monitor cluster. Handle requests for new map
@@ -74,7 +76,7 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
74 m->num_mon); 76 m->num_mon);
75 for (i = 0; i < m->num_mon; i++) 77 for (i = 0; i < m->num_mon; i++)
76 dout("monmap_decode mon%d is %s\n", i, 78 dout("monmap_decode mon%d is %s\n", i,
77 pr_addr(&m->mon_inst[i].addr.in_addr)); 79 ceph_pr_addr(&m->mon_inst[i].addr.in_addr));
78 return m; 80 return m;
79 81
80bad: 82bad:
@@ -191,30 +193,33 @@ static void __send_subscribe(struct ceph_mon_client *monc)
191 struct ceph_msg *msg = monc->m_subscribe; 193 struct ceph_msg *msg = monc->m_subscribe;
192 struct ceph_mon_subscribe_item *i; 194 struct ceph_mon_subscribe_item *i;
193 void *p, *end; 195 void *p, *end;
196 int num;
194 197
195 p = msg->front.iov_base; 198 p = msg->front.iov_base;
196 end = p + msg->front_max; 199 end = p + msg->front_max;
197 200
198 dout("__send_subscribe to 'mdsmap' %u+\n", 201 num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap;
199 (unsigned)monc->have_mdsmap); 202 ceph_encode_32(&p, num);
203
200 if (monc->want_next_osdmap) { 204 if (monc->want_next_osdmap) {
201 dout("__send_subscribe to 'osdmap' %u\n", 205 dout("__send_subscribe to 'osdmap' %u\n",
202 (unsigned)monc->have_osdmap); 206 (unsigned)monc->have_osdmap);
203 ceph_encode_32(&p, 3);
204 ceph_encode_string(&p, end, "osdmap", 6); 207 ceph_encode_string(&p, end, "osdmap", 6);
205 i = p; 208 i = p;
206 i->have = cpu_to_le64(monc->have_osdmap); 209 i->have = cpu_to_le64(monc->have_osdmap);
207 i->onetime = 1; 210 i->onetime = 1;
208 p += sizeof(*i); 211 p += sizeof(*i);
209 monc->want_next_osdmap = 2; /* requested */ 212 monc->want_next_osdmap = 2; /* requested */
210 } else {
211 ceph_encode_32(&p, 2);
212 } 213 }
213 ceph_encode_string(&p, end, "mdsmap", 6); 214 if (monc->want_mdsmap) {
214 i = p; 215 dout("__send_subscribe to 'mdsmap' %u+\n",
215 i->have = cpu_to_le64(monc->have_mdsmap); 216 (unsigned)monc->have_mdsmap);
216 i->onetime = 0; 217 ceph_encode_string(&p, end, "mdsmap", 6);
217 p += sizeof(*i); 218 i = p;
219 i->have = cpu_to_le64(monc->have_mdsmap);
220 i->onetime = 0;
221 p += sizeof(*i);
222 }
218 ceph_encode_string(&p, end, "monmap", 6); 223 ceph_encode_string(&p, end, "monmap", 6);
219 i = p; 224 i = p;
220 i->have = 0; 225 i->have = 0;
@@ -243,7 +248,8 @@ static void handle_subscribe_ack(struct ceph_mon_client *monc,
243 mutex_lock(&monc->mutex); 248 mutex_lock(&monc->mutex);
244 if (monc->hunting) { 249 if (monc->hunting) {
245 pr_info("mon%d %s session established\n", 250 pr_info("mon%d %s session established\n",
246 monc->cur_mon, pr_addr(&monc->con->peer_addr.in_addr)); 251 monc->cur_mon,
252 ceph_pr_addr(&monc->con->peer_addr.in_addr));
247 monc->hunting = false; 253 monc->hunting = false;
248 } 254 }
249 dout("handle_subscribe_ack after %d seconds\n", seconds); 255 dout("handle_subscribe_ack after %d seconds\n", seconds);
@@ -266,6 +272,7 @@ int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got)
266 mutex_unlock(&monc->mutex); 272 mutex_unlock(&monc->mutex);
267 return 0; 273 return 0;
268} 274}
275EXPORT_SYMBOL(ceph_monc_got_mdsmap);
269 276
270int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got) 277int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got)
271{ 278{
@@ -310,6 +317,7 @@ int ceph_monc_open_session(struct ceph_mon_client *monc)
310 mutex_unlock(&monc->mutex); 317 mutex_unlock(&monc->mutex);
311 return 0; 318 return 0;
312} 319}
320EXPORT_SYMBOL(ceph_monc_open_session);
313 321
314/* 322/*
315 * The monitor responds with mount ack indicate mount success. The 323 * The monitor responds with mount ack indicate mount success. The
@@ -540,6 +548,7 @@ out:
540 kref_put(&req->kref, release_generic_request); 548 kref_put(&req->kref, release_generic_request);
541 return err; 549 return err;
542} 550}
551EXPORT_SYMBOL(ceph_monc_do_statfs);
543 552
544/* 553/*
545 * pool ops 554 * pool ops
@@ -651,6 +660,7 @@ int ceph_monc_create_snapid(struct ceph_mon_client *monc,
651 pool, 0, (char *)snapid, sizeof(*snapid)); 660 pool, 0, (char *)snapid, sizeof(*snapid));
652 661
653} 662}
663EXPORT_SYMBOL(ceph_monc_create_snapid);
654 664
655int ceph_monc_delete_snapid(struct ceph_mon_client *monc, 665int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
656 u32 pool, u64 snapid) 666 u32 pool, u64 snapid)
@@ -708,9 +718,9 @@ static void delayed_work(struct work_struct *work)
708 */ 718 */
709static int build_initial_monmap(struct ceph_mon_client *monc) 719static int build_initial_monmap(struct ceph_mon_client *monc)
710{ 720{
711 struct ceph_mount_args *args = monc->client->mount_args; 721 struct ceph_options *opt = monc->client->options;
712 struct ceph_entity_addr *mon_addr = args->mon_addr; 722 struct ceph_entity_addr *mon_addr = opt->mon_addr;
713 int num_mon = args->num_mon; 723 int num_mon = opt->num_mon;
714 int i; 724 int i;
715 725
716 /* build initial monmap */ 726 /* build initial monmap */
@@ -728,11 +738,6 @@ static int build_initial_monmap(struct ceph_mon_client *monc)
728 } 738 }
729 monc->monmap->num_mon = num_mon; 739 monc->monmap->num_mon = num_mon;
730 monc->have_fsid = false; 740 monc->have_fsid = false;
731
732 /* release addr memory */
733 kfree(args->mon_addr);
734 args->mon_addr = NULL;
735 args->num_mon = 0;
736 return 0; 741 return 0;
737} 742}
738 743
@@ -753,8 +758,8 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
753 monc->con = NULL; 758 monc->con = NULL;
754 759
755 /* authentication */ 760 /* authentication */
756 monc->auth = ceph_auth_init(cl->mount_args->name, 761 monc->auth = ceph_auth_init(cl->options->name,
757 cl->mount_args->secret); 762 cl->options->secret);
758 if (IS_ERR(monc->auth)) 763 if (IS_ERR(monc->auth))
759 return PTR_ERR(monc->auth); 764 return PTR_ERR(monc->auth);
760 monc->auth->want_keys = 765 monc->auth->want_keys =
@@ -808,6 +813,7 @@ out_monmap:
808out: 813out:
809 return err; 814 return err;
810} 815}
816EXPORT_SYMBOL(ceph_monc_init);
811 817
812void ceph_monc_stop(struct ceph_mon_client *monc) 818void ceph_monc_stop(struct ceph_mon_client *monc)
813{ 819{
@@ -832,6 +838,7 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
832 838
833 kfree(monc->monmap); 839 kfree(monc->monmap);
834} 840}
841EXPORT_SYMBOL(ceph_monc_stop);
835 842
836static void handle_auth_reply(struct ceph_mon_client *monc, 843static void handle_auth_reply(struct ceph_mon_client *monc,
837 struct ceph_msg *msg) 844 struct ceph_msg *msg)
@@ -889,6 +896,7 @@ int ceph_monc_validate_auth(struct ceph_mon_client *monc)
889 mutex_unlock(&monc->mutex); 896 mutex_unlock(&monc->mutex);
890 return ret; 897 return ret;
891} 898}
899EXPORT_SYMBOL(ceph_monc_validate_auth);
892 900
893/* 901/*
894 * handle incoming message 902 * handle incoming message
@@ -922,15 +930,16 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
922 ceph_monc_handle_map(monc, msg); 930 ceph_monc_handle_map(monc, msg);
923 break; 931 break;
924 932
925 case CEPH_MSG_MDS_MAP:
926 ceph_mdsc_handle_map(&monc->client->mdsc, msg);
927 break;
928
929 case CEPH_MSG_OSD_MAP: 933 case CEPH_MSG_OSD_MAP:
930 ceph_osdc_handle_map(&monc->client->osdc, msg); 934 ceph_osdc_handle_map(&monc->client->osdc, msg);
931 break; 935 break;
932 936
933 default: 937 default:
938 /* can the chained handler handle it? */
939 if (monc->client->extra_mon_dispatch &&
940 monc->client->extra_mon_dispatch(monc->client, msg) == 0)
941 break;
942
934 pr_err("received unknown message type %d %s\n", type, 943 pr_err("received unknown message type %d %s\n", type,
935 ceph_msg_type_name(type)); 944 ceph_msg_type_name(type));
936 } 945 }
@@ -994,7 +1003,7 @@ static void mon_fault(struct ceph_connection *con)
994 if (monc->con && !monc->hunting) 1003 if (monc->con && !monc->hunting)
995 pr_info("mon%d %s session lost, " 1004 pr_info("mon%d %s session lost, "
996 "hunting for new mon\n", monc->cur_mon, 1005 "hunting for new mon\n", monc->cur_mon,
997 pr_addr(&monc->con->peer_addr.in_addr)); 1006 ceph_pr_addr(&monc->con->peer_addr.in_addr));
998 1007
999 __close_session(monc); 1008 __close_session(monc);
1000 if (!monc->hunting) { 1009 if (!monc->hunting) {
diff --git a/fs/ceph/msgpool.c b/net/ceph/msgpool.c
index dd65a6438131..d5f2d97ac05c 100644
--- a/fs/ceph/msgpool.c
+++ b/net/ceph/msgpool.c
@@ -1,11 +1,11 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/err.h> 3#include <linux/err.h>
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/vmalloc.h> 6#include <linux/vmalloc.h>
7 7
8#include "msgpool.h" 8#include <linux/ceph/msgpool.h>
9 9
10static void *alloc_fn(gfp_t gfp_mask, void *arg) 10static void *alloc_fn(gfp_t gfp_mask, void *arg)
11{ 11{
diff --git a/fs/ceph/osd_client.c b/net/ceph/osd_client.c
index dfced1dacbcd..79391994b3ed 100644
--- a/fs/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1,17 +1,22 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/module.h>
3#include <linux/err.h> 4#include <linux/err.h>
4#include <linux/highmem.h> 5#include <linux/highmem.h>
5#include <linux/mm.h> 6#include <linux/mm.h>
6#include <linux/pagemap.h> 7#include <linux/pagemap.h>
7#include <linux/slab.h> 8#include <linux/slab.h>
8#include <linux/uaccess.h> 9#include <linux/uaccess.h>
10#ifdef CONFIG_BLOCK
11#include <linux/bio.h>
12#endif
9 13
10#include "super.h" 14#include <linux/ceph/libceph.h>
11#include "osd_client.h" 15#include <linux/ceph/osd_client.h>
12#include "messenger.h" 16#include <linux/ceph/messenger.h>
13#include "decode.h" 17#include <linux/ceph/decode.h>
14#include "auth.h" 18#include <linux/ceph/auth.h>
19#include <linux/ceph/pagelist.h>
15 20
16#define OSD_OP_FRONT_LEN 4096 21#define OSD_OP_FRONT_LEN 4096
17#define OSD_OPREPLY_FRONT_LEN 512 22#define OSD_OPREPLY_FRONT_LEN 512
@@ -22,6 +27,59 @@ static int __kick_requests(struct ceph_osd_client *osdc,
22 27
23static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd); 28static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd);
24 29
30static int op_needs_trail(int op)
31{
32 switch (op) {
33 case CEPH_OSD_OP_GETXATTR:
34 case CEPH_OSD_OP_SETXATTR:
35 case CEPH_OSD_OP_CMPXATTR:
36 case CEPH_OSD_OP_CALL:
37 return 1;
38 default:
39 return 0;
40 }
41}
42
43static int op_has_extent(int op)
44{
45 return (op == CEPH_OSD_OP_READ ||
46 op == CEPH_OSD_OP_WRITE);
47}
48
49void ceph_calc_raw_layout(struct ceph_osd_client *osdc,
50 struct ceph_file_layout *layout,
51 u64 snapid,
52 u64 off, u64 *plen, u64 *bno,
53 struct ceph_osd_request *req,
54 struct ceph_osd_req_op *op)
55{
56 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
57 u64 orig_len = *plen;
58 u64 objoff, objlen; /* extent in object */
59
60 reqhead->snapid = cpu_to_le64(snapid);
61
62 /* object extent? */
63 ceph_calc_file_object_mapping(layout, off, plen, bno,
64 &objoff, &objlen);
65 if (*plen < orig_len)
66 dout(" skipping last %llu, final file extent %llu~%llu\n",
67 orig_len - *plen, off, *plen);
68
69 if (op_has_extent(op->op)) {
70 op->extent.offset = objoff;
71 op->extent.length = objlen;
72 }
73 req->r_num_pages = calc_pages_for(off, *plen);
74 if (op->op == CEPH_OSD_OP_WRITE)
75 op->payload_len = *plen;
76
77 dout("calc_layout bno=%llx %llu~%llu (%d pages)\n",
78 *bno, objoff, objlen, req->r_num_pages);
79
80}
81EXPORT_SYMBOL(ceph_calc_raw_layout);
82
25/* 83/*
26 * Implement client access to distributed object storage cluster. 84 * Implement client access to distributed object storage cluster.
27 * 85 *
@@ -48,34 +106,19 @@ static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd);
48 * fill osd op in request message. 106 * fill osd op in request message.
49 */ 107 */
50static void calc_layout(struct ceph_osd_client *osdc, 108static void calc_layout(struct ceph_osd_client *osdc,
51 struct ceph_vino vino, struct ceph_file_layout *layout, 109 struct ceph_vino vino,
110 struct ceph_file_layout *layout,
52 u64 off, u64 *plen, 111 u64 off, u64 *plen,
53 struct ceph_osd_request *req) 112 struct ceph_osd_request *req,
113 struct ceph_osd_req_op *op)
54{ 114{
55 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
56 struct ceph_osd_op *op = (void *)(reqhead + 1);
57 u64 orig_len = *plen;
58 u64 objoff, objlen; /* extent in object */
59 u64 bno; 115 u64 bno;
60 116
61 reqhead->snapid = cpu_to_le64(vino.snap); 117 ceph_calc_raw_layout(osdc, layout, vino.snap, off,
62 118 plen, &bno, req, op);
63 /* object extent? */
64 ceph_calc_file_object_mapping(layout, off, plen, &bno,
65 &objoff, &objlen);
66 if (*plen < orig_len)
67 dout(" skipping last %llu, final file extent %llu~%llu\n",
68 orig_len - *plen, off, *plen);
69 119
70 sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno); 120 sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno);
71 req->r_oid_len = strlen(req->r_oid); 121 req->r_oid_len = strlen(req->r_oid);
72
73 op->extent.offset = cpu_to_le64(objoff);
74 op->extent.length = cpu_to_le64(objlen);
75 req->r_num_pages = calc_pages_for(off, *plen);
76
77 dout("calc_layout %s (%d) %llu~%llu (%d pages)\n",
78 req->r_oid, req->r_oid_len, objoff, objlen, req->r_num_pages);
79} 122}
80 123
81/* 124/*
@@ -101,56 +144,66 @@ void ceph_osdc_release_request(struct kref *kref)
101 if (req->r_own_pages) 144 if (req->r_own_pages)
102 ceph_release_page_vector(req->r_pages, 145 ceph_release_page_vector(req->r_pages,
103 req->r_num_pages); 146 req->r_num_pages);
147#ifdef CONFIG_BLOCK
148 if (req->r_bio)
149 bio_put(req->r_bio);
150#endif
104 ceph_put_snap_context(req->r_snapc); 151 ceph_put_snap_context(req->r_snapc);
152 if (req->r_trail) {
153 ceph_pagelist_release(req->r_trail);
154 kfree(req->r_trail);
155 }
105 if (req->r_mempool) 156 if (req->r_mempool)
106 mempool_free(req, req->r_osdc->req_mempool); 157 mempool_free(req, req->r_osdc->req_mempool);
107 else 158 else
108 kfree(req); 159 kfree(req);
109} 160}
161EXPORT_SYMBOL(ceph_osdc_release_request);
110 162
111/* 163static int get_num_ops(struct ceph_osd_req_op *ops, int *needs_trail)
112 * build new request AND message, calculate layout, and adjust file 164{
113 * extent as needed. 165 int i = 0;
114 * 166
115 * if the file was recently truncated, we include information about its 167 if (needs_trail)
116 * old and new size so that the object can be updated appropriately. (we 168 *needs_trail = 0;
117 * avoid synchronously deleting truncated objects because it's slow.) 169 while (ops[i].op) {
118 * 170 if (needs_trail && op_needs_trail(ops[i].op))
119 * if @do_sync, include a 'startsync' command so that the osd will flush 171 *needs_trail = 1;
120 * data quickly. 172 i++;
121 */ 173 }
122struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, 174
123 struct ceph_file_layout *layout, 175 return i;
124 struct ceph_vino vino, 176}
125 u64 off, u64 *plen, 177
126 int opcode, int flags, 178struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
179 int flags,
127 struct ceph_snap_context *snapc, 180 struct ceph_snap_context *snapc,
128 int do_sync, 181 struct ceph_osd_req_op *ops,
129 u32 truncate_seq, 182 bool use_mempool,
130 u64 truncate_size, 183 gfp_t gfp_flags,
131 struct timespec *mtime, 184 struct page **pages,
132 bool use_mempool, int num_reply) 185 struct bio *bio)
133{ 186{
134 struct ceph_osd_request *req; 187 struct ceph_osd_request *req;
135 struct ceph_msg *msg; 188 struct ceph_msg *msg;
136 struct ceph_osd_request_head *head; 189 int needs_trail;
137 struct ceph_osd_op *op; 190 int num_op = get_num_ops(ops, &needs_trail);
138 void *p; 191 size_t msg_size = sizeof(struct ceph_osd_request_head);
139 int num_op = 1 + do_sync; 192
140 size_t msg_size = sizeof(*head) + num_op*sizeof(*op); 193 msg_size += num_op*sizeof(struct ceph_osd_op);
141 int i;
142 194
143 if (use_mempool) { 195 if (use_mempool) {
144 req = mempool_alloc(osdc->req_mempool, GFP_NOFS); 196 req = mempool_alloc(osdc->req_mempool, gfp_flags);
145 memset(req, 0, sizeof(*req)); 197 memset(req, 0, sizeof(*req));
146 } else { 198 } else {
147 req = kzalloc(sizeof(*req), GFP_NOFS); 199 req = kzalloc(sizeof(*req), gfp_flags);
148 } 200 }
149 if (req == NULL) 201 if (req == NULL)
150 return NULL; 202 return NULL;
151 203
152 req->r_osdc = osdc; 204 req->r_osdc = osdc;
153 req->r_mempool = use_mempool; 205 req->r_mempool = use_mempool;
206
154 kref_init(&req->r_kref); 207 kref_init(&req->r_kref);
155 init_completion(&req->r_completion); 208 init_completion(&req->r_completion);
156 init_completion(&req->r_safe_completion); 209 init_completion(&req->r_safe_completion);
@@ -164,13 +217,22 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
164 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); 217 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
165 else 218 else
166 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, 219 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
167 OSD_OPREPLY_FRONT_LEN, GFP_NOFS); 220 OSD_OPREPLY_FRONT_LEN, gfp_flags);
168 if (!msg) { 221 if (!msg) {
169 ceph_osdc_put_request(req); 222 ceph_osdc_put_request(req);
170 return NULL; 223 return NULL;
171 } 224 }
172 req->r_reply = msg; 225 req->r_reply = msg;
173 226
227 /* allocate space for the trailing data */
228 if (needs_trail) {
229 req->r_trail = kmalloc(sizeof(struct ceph_pagelist), gfp_flags);
230 if (!req->r_trail) {
231 ceph_osdc_put_request(req);
232 return NULL;
233 }
234 ceph_pagelist_init(req->r_trail);
235 }
174 /* create request message; allow space for oid */ 236 /* create request message; allow space for oid */
175 msg_size += 40; 237 msg_size += 40;
176 if (snapc) 238 if (snapc)
@@ -178,18 +240,115 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
178 if (use_mempool) 240 if (use_mempool)
179 msg = ceph_msgpool_get(&osdc->msgpool_op, 0); 241 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
180 else 242 else
181 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, GFP_NOFS); 243 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags);
182 if (!msg) { 244 if (!msg) {
183 ceph_osdc_put_request(req); 245 ceph_osdc_put_request(req);
184 return NULL; 246 return NULL;
185 } 247 }
248
186 msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP); 249 msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
187 memset(msg->front.iov_base, 0, msg->front.iov_len); 250 memset(msg->front.iov_base, 0, msg->front.iov_len);
251
252 req->r_request = msg;
253 req->r_pages = pages;
254#ifdef CONFIG_BLOCK
255 if (bio) {
256 req->r_bio = bio;
257 bio_get(req->r_bio);
258 }
259#endif
260
261 return req;
262}
263EXPORT_SYMBOL(ceph_osdc_alloc_request);
264
265static void osd_req_encode_op(struct ceph_osd_request *req,
266 struct ceph_osd_op *dst,
267 struct ceph_osd_req_op *src)
268{
269 dst->op = cpu_to_le16(src->op);
270
271 switch (dst->op) {
272 case CEPH_OSD_OP_READ:
273 case CEPH_OSD_OP_WRITE:
274 dst->extent.offset =
275 cpu_to_le64(src->extent.offset);
276 dst->extent.length =
277 cpu_to_le64(src->extent.length);
278 dst->extent.truncate_size =
279 cpu_to_le64(src->extent.truncate_size);
280 dst->extent.truncate_seq =
281 cpu_to_le32(src->extent.truncate_seq);
282 break;
283
284 case CEPH_OSD_OP_GETXATTR:
285 case CEPH_OSD_OP_SETXATTR:
286 case CEPH_OSD_OP_CMPXATTR:
287 BUG_ON(!req->r_trail);
288
289 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
290 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
291 dst->xattr.cmp_op = src->xattr.cmp_op;
292 dst->xattr.cmp_mode = src->xattr.cmp_mode;
293 ceph_pagelist_append(req->r_trail, src->xattr.name,
294 src->xattr.name_len);
295 ceph_pagelist_append(req->r_trail, src->xattr.val,
296 src->xattr.value_len);
297 break;
298 case CEPH_OSD_OP_CALL:
299 BUG_ON(!req->r_trail);
300
301 dst->cls.class_len = src->cls.class_len;
302 dst->cls.method_len = src->cls.method_len;
303 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
304
305 ceph_pagelist_append(req->r_trail, src->cls.class_name,
306 src->cls.class_len);
307 ceph_pagelist_append(req->r_trail, src->cls.method_name,
308 src->cls.method_len);
309 ceph_pagelist_append(req->r_trail, src->cls.indata,
310 src->cls.indata_len);
311 break;
312 case CEPH_OSD_OP_ROLLBACK:
313 dst->snap.snapid = cpu_to_le64(src->snap.snapid);
314 break;
315 case CEPH_OSD_OP_STARTSYNC:
316 break;
317 default:
318 pr_err("unrecognized osd opcode %d\n", dst->op);
319 WARN_ON(1);
320 break;
321 }
322 dst->payload_len = cpu_to_le32(src->payload_len);
323}
324
325/*
326 * build new request AND message
327 *
328 */
329void ceph_osdc_build_request(struct ceph_osd_request *req,
330 u64 off, u64 *plen,
331 struct ceph_osd_req_op *src_ops,
332 struct ceph_snap_context *snapc,
333 struct timespec *mtime,
334 const char *oid,
335 int oid_len)
336{
337 struct ceph_msg *msg = req->r_request;
338 struct ceph_osd_request_head *head;
339 struct ceph_osd_req_op *src_op;
340 struct ceph_osd_op *op;
341 void *p;
342 int num_op = get_num_ops(src_ops, NULL);
343 size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
344 int flags = req->r_flags;
345 u64 data_len = 0;
346 int i;
347
188 head = msg->front.iov_base; 348 head = msg->front.iov_base;
189 op = (void *)(head + 1); 349 op = (void *)(head + 1);
190 p = (void *)(op + num_op); 350 p = (void *)(op + num_op);
191 351
192 req->r_request = msg;
193 req->r_snapc = ceph_get_snap_context(snapc); 352 req->r_snapc = ceph_get_snap_context(snapc);
194 353
195 head->client_inc = cpu_to_le32(1); /* always, for now. */ 354 head->client_inc = cpu_to_le32(1); /* always, for now. */
@@ -197,29 +356,23 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
197 if (flags & CEPH_OSD_FLAG_WRITE) 356 if (flags & CEPH_OSD_FLAG_WRITE)
198 ceph_encode_timespec(&head->mtime, mtime); 357 ceph_encode_timespec(&head->mtime, mtime);
199 head->num_ops = cpu_to_le16(num_op); 358 head->num_ops = cpu_to_le16(num_op);
200 op->op = cpu_to_le16(opcode);
201 359
202 /* calculate max write size */
203 calc_layout(osdc, vino, layout, off, plen, req);
204 req->r_file_layout = *layout; /* keep a copy */
205
206 if (flags & CEPH_OSD_FLAG_WRITE) {
207 req->r_request->hdr.data_off = cpu_to_le16(off);
208 req->r_request->hdr.data_len = cpu_to_le32(*plen);
209 op->payload_len = cpu_to_le32(*plen);
210 }
211 op->extent.truncate_size = cpu_to_le64(truncate_size);
212 op->extent.truncate_seq = cpu_to_le32(truncate_seq);
213 360
214 /* fill in oid */ 361 /* fill in oid */
215 head->object_len = cpu_to_le32(req->r_oid_len); 362 head->object_len = cpu_to_le32(oid_len);
216 memcpy(p, req->r_oid, req->r_oid_len); 363 memcpy(p, oid, oid_len);
217 p += req->r_oid_len; 364 p += oid_len;
218 365
219 if (do_sync) { 366 src_op = src_ops;
367 while (src_op->op) {
368 osd_req_encode_op(req, op, src_op);
369 src_op++;
220 op++; 370 op++;
221 op->op = cpu_to_le16(CEPH_OSD_OP_STARTSYNC);
222 } 371 }
372
373 if (req->r_trail)
374 data_len += req->r_trail->length;
375
223 if (snapc) { 376 if (snapc) {
224 head->snap_seq = cpu_to_le64(snapc->seq); 377 head->snap_seq = cpu_to_le64(snapc->seq);
225 head->num_snaps = cpu_to_le32(snapc->num_snaps); 378 head->num_snaps = cpu_to_le32(snapc->num_snaps);
@@ -229,12 +382,79 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
229 } 382 }
230 } 383 }
231 384
385 if (flags & CEPH_OSD_FLAG_WRITE) {
386 req->r_request->hdr.data_off = cpu_to_le16(off);
387 req->r_request->hdr.data_len = cpu_to_le32(*plen + data_len);
388 } else if (data_len) {
389 req->r_request->hdr.data_off = 0;
390 req->r_request->hdr.data_len = cpu_to_le32(data_len);
391 }
392
232 BUG_ON(p > msg->front.iov_base + msg->front.iov_len); 393 BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
233 msg_size = p - msg->front.iov_base; 394 msg_size = p - msg->front.iov_base;
234 msg->front.iov_len = msg_size; 395 msg->front.iov_len = msg_size;
235 msg->hdr.front_len = cpu_to_le32(msg_size); 396 msg->hdr.front_len = cpu_to_le32(msg_size);
397 return;
398}
399EXPORT_SYMBOL(ceph_osdc_build_request);
400
401/*
402 * build new request AND message, calculate layout, and adjust file
403 * extent as needed.
404 *
405 * if the file was recently truncated, we include information about its
406 * old and new size so that the object can be updated appropriately. (we
407 * avoid synchronously deleting truncated objects because it's slow.)
408 *
409 * if @do_sync, include a 'startsync' command so that the osd will flush
410 * data quickly.
411 */
412struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
413 struct ceph_file_layout *layout,
414 struct ceph_vino vino,
415 u64 off, u64 *plen,
416 int opcode, int flags,
417 struct ceph_snap_context *snapc,
418 int do_sync,
419 u32 truncate_seq,
420 u64 truncate_size,
421 struct timespec *mtime,
422 bool use_mempool, int num_reply)
423{
424 struct ceph_osd_req_op ops[3];
425 struct ceph_osd_request *req;
426
427 ops[0].op = opcode;
428 ops[0].extent.truncate_seq = truncate_seq;
429 ops[0].extent.truncate_size = truncate_size;
430 ops[0].payload_len = 0;
431
432 if (do_sync) {
433 ops[1].op = CEPH_OSD_OP_STARTSYNC;
434 ops[1].payload_len = 0;
435 ops[2].op = 0;
436 } else
437 ops[1].op = 0;
438
439 req = ceph_osdc_alloc_request(osdc, flags,
440 snapc, ops,
441 use_mempool,
442 GFP_NOFS, NULL, NULL);
443 if (IS_ERR(req))
444 return req;
445
446 /* calculate max write size */
447 calc_layout(osdc, vino, layout, off, plen, req, ops);
448 req->r_file_layout = *layout; /* keep a copy */
449
450 ceph_osdc_build_request(req, off, plen, ops,
451 snapc,
452 mtime,
453 req->r_oid, req->r_oid_len);
454
236 return req; 455 return req;
237} 456}
457EXPORT_SYMBOL(ceph_osdc_new_request);
238 458
239/* 459/*
240 * We keep osd requests in an rbtree, sorted by ->r_tid. 460 * We keep osd requests in an rbtree, sorted by ->r_tid.
@@ -389,7 +609,7 @@ static void __move_osd_to_lru(struct ceph_osd_client *osdc,
389 dout("__move_osd_to_lru %p\n", osd); 609 dout("__move_osd_to_lru %p\n", osd);
390 BUG_ON(!list_empty(&osd->o_osd_lru)); 610 BUG_ON(!list_empty(&osd->o_osd_lru));
391 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); 611 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
392 osd->lru_ttl = jiffies + osdc->client->mount_args->osd_idle_ttl * HZ; 612 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
393} 613}
394 614
395static void __remove_osd_from_lru(struct ceph_osd *osd) 615static void __remove_osd_from_lru(struct ceph_osd *osd)
@@ -483,7 +703,7 @@ static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
483static void __schedule_osd_timeout(struct ceph_osd_client *osdc) 703static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
484{ 704{
485 schedule_delayed_work(&osdc->timeout_work, 705 schedule_delayed_work(&osdc->timeout_work,
486 osdc->client->mount_args->osd_keepalive_timeout * HZ); 706 osdc->client->options->osd_keepalive_timeout * HZ);
487} 707}
488 708
489static void __cancel_osd_timeout(struct ceph_osd_client *osdc) 709static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
@@ -549,7 +769,7 @@ static void __unregister_request(struct ceph_osd_client *osdc,
549 */ 769 */
550static void __cancel_request(struct ceph_osd_request *req) 770static void __cancel_request(struct ceph_osd_request *req)
551{ 771{
552 if (req->r_sent) { 772 if (req->r_sent && req->r_osd) {
553 ceph_con_revoke(&req->r_osd->o_con, req->r_request); 773 ceph_con_revoke(&req->r_osd->o_con, req->r_request);
554 req->r_sent = 0; 774 req->r_sent = 0;
555 } 775 }
@@ -684,9 +904,9 @@ static void handle_timeout(struct work_struct *work)
684 container_of(work, struct ceph_osd_client, timeout_work.work); 904 container_of(work, struct ceph_osd_client, timeout_work.work);
685 struct ceph_osd_request *req, *last_req = NULL; 905 struct ceph_osd_request *req, *last_req = NULL;
686 struct ceph_osd *osd; 906 struct ceph_osd *osd;
687 unsigned long timeout = osdc->client->mount_args->osd_timeout * HZ; 907 unsigned long timeout = osdc->client->options->osd_timeout * HZ;
688 unsigned long keepalive = 908 unsigned long keepalive =
689 osdc->client->mount_args->osd_keepalive_timeout * HZ; 909 osdc->client->options->osd_keepalive_timeout * HZ;
690 unsigned long last_stamp = 0; 910 unsigned long last_stamp = 0;
691 struct rb_node *p; 911 struct rb_node *p;
692 struct list_head slow_osds; 912 struct list_head slow_osds;
@@ -773,7 +993,7 @@ static void handle_osds_timeout(struct work_struct *work)
773 container_of(work, struct ceph_osd_client, 993 container_of(work, struct ceph_osd_client,
774 osds_timeout_work.work); 994 osds_timeout_work.work);
775 unsigned long delay = 995 unsigned long delay =
776 osdc->client->mount_args->osd_idle_ttl * HZ >> 2; 996 osdc->client->options->osd_idle_ttl * HZ >> 2;
777 997
778 dout("osds timeout\n"); 998 dout("osds timeout\n");
779 down_read(&osdc->map_sem); 999 down_read(&osdc->map_sem);
@@ -1104,6 +1324,10 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1104 1324
1105 req->r_request->pages = req->r_pages; 1325 req->r_request->pages = req->r_pages;
1106 req->r_request->nr_pages = req->r_num_pages; 1326 req->r_request->nr_pages = req->r_num_pages;
1327#ifdef CONFIG_BLOCK
1328 req->r_request->bio = req->r_bio;
1329#endif
1330 req->r_request->trail = req->r_trail;
1107 1331
1108 register_request(osdc, req); 1332 register_request(osdc, req);
1109 1333
@@ -1131,6 +1355,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1131 up_read(&osdc->map_sem); 1355 up_read(&osdc->map_sem);
1132 return rc; 1356 return rc;
1133} 1357}
1358EXPORT_SYMBOL(ceph_osdc_start_request);
1134 1359
1135/* 1360/*
1136 * wait for a request to complete 1361 * wait for a request to complete
@@ -1153,6 +1378,7 @@ int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
1153 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result); 1378 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
1154 return req->r_result; 1379 return req->r_result;
1155} 1380}
1381EXPORT_SYMBOL(ceph_osdc_wait_request);
1156 1382
1157/* 1383/*
1158 * sync - wait for all in-flight requests to flush. avoid starvation. 1384 * sync - wait for all in-flight requests to flush. avoid starvation.
@@ -1186,6 +1412,7 @@ void ceph_osdc_sync(struct ceph_osd_client *osdc)
1186 mutex_unlock(&osdc->request_mutex); 1412 mutex_unlock(&osdc->request_mutex);
1187 dout("sync done (thru tid %llu)\n", last_tid); 1413 dout("sync done (thru tid %llu)\n", last_tid);
1188} 1414}
1415EXPORT_SYMBOL(ceph_osdc_sync);
1189 1416
1190/* 1417/*
1191 * init, shutdown 1418 * init, shutdown
@@ -1211,7 +1438,7 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
1211 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); 1438 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
1212 1439
1213 schedule_delayed_work(&osdc->osds_timeout_work, 1440 schedule_delayed_work(&osdc->osds_timeout_work,
1214 round_jiffies_relative(osdc->client->mount_args->osd_idle_ttl * HZ)); 1441 round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
1215 1442
1216 err = -ENOMEM; 1443 err = -ENOMEM;
1217 osdc->req_mempool = mempool_create_kmalloc_pool(10, 1444 osdc->req_mempool = mempool_create_kmalloc_pool(10,
@@ -1237,6 +1464,7 @@ out_mempool:
1237out: 1464out:
1238 return err; 1465 return err;
1239} 1466}
1467EXPORT_SYMBOL(ceph_osdc_init);
1240 1468
1241void ceph_osdc_stop(struct ceph_osd_client *osdc) 1469void ceph_osdc_stop(struct ceph_osd_client *osdc)
1242{ 1470{
@@ -1251,6 +1479,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
1251 ceph_msgpool_destroy(&osdc->msgpool_op); 1479 ceph_msgpool_destroy(&osdc->msgpool_op);
1252 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 1480 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
1253} 1481}
1482EXPORT_SYMBOL(ceph_osdc_stop);
1254 1483
1255/* 1484/*
1256 * Read some contiguous pages. If we cross a stripe boundary, shorten 1485 * Read some contiguous pages. If we cross a stripe boundary, shorten
@@ -1288,6 +1517,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
1288 dout("readpages result %d\n", rc); 1517 dout("readpages result %d\n", rc);
1289 return rc; 1518 return rc;
1290} 1519}
1520EXPORT_SYMBOL(ceph_osdc_readpages);
1291 1521
1292/* 1522/*
1293 * do a synchronous write on N pages 1523 * do a synchronous write on N pages
@@ -1330,6 +1560,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
1330 dout("writepages result %d\n", rc); 1560 dout("writepages result %d\n", rc);
1331 return rc; 1561 return rc;
1332} 1562}
1563EXPORT_SYMBOL(ceph_osdc_writepages);
1333 1564
1334/* 1565/*
1335 * handle incoming message 1566 * handle incoming message
@@ -1420,6 +1651,9 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
1420 } 1651 }
1421 m->pages = req->r_pages; 1652 m->pages = req->r_pages;
1422 m->nr_pages = req->r_num_pages; 1653 m->nr_pages = req->r_num_pages;
1654#ifdef CONFIG_BLOCK
1655 m->bio = req->r_bio;
1656#endif
1423 } 1657 }
1424 *skip = 0; 1658 *skip = 0;
1425 req->r_con_filling_msg = ceph_con_get(con); 1659 req->r_con_filling_msg = ceph_con_get(con);
diff --git a/fs/ceph/osdmap.c b/net/ceph/osdmap.c
index e31f118f1392..d73f3f6efa36 100644
--- a/fs/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1,14 +1,15 @@
1 1
2#include "ceph_debug.h" 2#include <linux/ceph/ceph_debug.h>
3 3
4#include <linux/module.h>
4#include <linux/slab.h> 5#include <linux/slab.h>
5#include <asm/div64.h> 6#include <asm/div64.h>
6 7
7#include "super.h" 8#include <linux/ceph/libceph.h>
8#include "osdmap.h" 9#include <linux/ceph/osdmap.h>
9#include "crush/hash.h" 10#include <linux/ceph/decode.h>
10#include "crush/mapper.h" 11#include <linux/crush/hash.h>
11#include "decode.h" 12#include <linux/crush/mapper.h>
12 13
13char *ceph_osdmap_state_str(char *str, int len, int state) 14char *ceph_osdmap_state_str(char *str, int len, int state)
14{ 15{
@@ -417,6 +418,20 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
417 return NULL; 418 return NULL;
418} 419}
419 420
421int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
422{
423 struct rb_node *rbp;
424
425 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
426 struct ceph_pg_pool_info *pi =
427 rb_entry(rbp, struct ceph_pg_pool_info, node);
428 if (pi->name && strcmp(pi->name, name) == 0)
429 return pi->id;
430 }
431 return -ENOENT;
432}
433EXPORT_SYMBOL(ceph_pg_poolid_by_name);
434
420static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) 435static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
421{ 436{
422 rb_erase(&pi->node, root); 437 rb_erase(&pi->node, root);
@@ -966,6 +981,7 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
966 981
967 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); 982 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
968} 983}
984EXPORT_SYMBOL(ceph_calc_file_object_mapping);
969 985
970/* 986/*
971 * calculate an object layout (i.e. pgid) from an oid, 987 * calculate an object layout (i.e. pgid) from an oid,
@@ -1011,6 +1027,7 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol,
1011 ol->ol_stripe_unit = fl->fl_object_stripe_unit; 1027 ol->ol_stripe_unit = fl->fl_object_stripe_unit;
1012 return 0; 1028 return 0;
1013} 1029}
1030EXPORT_SYMBOL(ceph_calc_object_layout);
1014 1031
1015/* 1032/*
1016 * Calculate raw osd vector for the given pgid. Return pointer to osd 1033 * Calculate raw osd vector for the given pgid. Return pointer to osd
@@ -1108,3 +1125,4 @@ int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
1108 return osds[i]; 1125 return osds[i];
1109 return -1; 1126 return -1;
1110} 1127}
1128EXPORT_SYMBOL(ceph_calc_pg_primary);
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
new file mode 100644
index 000000000000..13cb409a7bba
--- /dev/null
+++ b/net/ceph/pagelist.c
@@ -0,0 +1,154 @@
1
2#include <linux/module.h>
3#include <linux/gfp.h>
4#include <linux/pagemap.h>
5#include <linux/highmem.h>
6#include <linux/ceph/pagelist.h>
7
8static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl)
9{
10 if (pl->mapped_tail) {
11 struct page *page = list_entry(pl->head.prev, struct page, lru);
12 kunmap(page);
13 pl->mapped_tail = NULL;
14 }
15}
16
17int ceph_pagelist_release(struct ceph_pagelist *pl)
18{
19 ceph_pagelist_unmap_tail(pl);
20 while (!list_empty(&pl->head)) {
21 struct page *page = list_first_entry(&pl->head, struct page,
22 lru);
23 list_del(&page->lru);
24 __free_page(page);
25 }
26 ceph_pagelist_free_reserve(pl);
27 return 0;
28}
29EXPORT_SYMBOL(ceph_pagelist_release);
30
31static int ceph_pagelist_addpage(struct ceph_pagelist *pl)
32{
33 struct page *page;
34
35 if (!pl->num_pages_free) {
36 page = __page_cache_alloc(GFP_NOFS);
37 } else {
38 page = list_first_entry(&pl->free_list, struct page, lru);
39 list_del(&page->lru);
40 --pl->num_pages_free;
41 }
42 if (!page)
43 return -ENOMEM;
44 pl->room += PAGE_SIZE;
45 ceph_pagelist_unmap_tail(pl);
46 list_add_tail(&page->lru, &pl->head);
47 pl->mapped_tail = kmap(page);
48 return 0;
49}
50
51int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
52{
53 while (pl->room < len) {
54 size_t bit = pl->room;
55 int ret;
56
57 memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK),
58 buf, bit);
59 pl->length += bit;
60 pl->room -= bit;
61 buf += bit;
62 len -= bit;
63 ret = ceph_pagelist_addpage(pl);
64 if (ret)
65 return ret;
66 }
67
68 memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len);
69 pl->length += len;
70 pl->room -= len;
71 return 0;
72}
73EXPORT_SYMBOL(ceph_pagelist_append);
74
75/**
76 * Allocate enough pages for a pagelist to append the given amount
77 * of data without without allocating.
78 * Returns: 0 on success, -ENOMEM on error.
79 */
80int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space)
81{
82 if (space <= pl->room)
83 return 0;
84 space -= pl->room;
85 space = (space + PAGE_SIZE - 1) >> PAGE_SHIFT; /* conv to num pages */
86
87 while (space > pl->num_pages_free) {
88 struct page *page = __page_cache_alloc(GFP_NOFS);
89 if (!page)
90 return -ENOMEM;
91 list_add_tail(&page->lru, &pl->free_list);
92 ++pl->num_pages_free;
93 }
94 return 0;
95}
96EXPORT_SYMBOL(ceph_pagelist_reserve);
97
98/**
99 * Free any pages that have been preallocated.
100 */
101int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
102{
103 while (!list_empty(&pl->free_list)) {
104 struct page *page = list_first_entry(&pl->free_list,
105 struct page, lru);
106 list_del(&page->lru);
107 __free_page(page);
108 --pl->num_pages_free;
109 }
110 BUG_ON(pl->num_pages_free);
111 return 0;
112}
113EXPORT_SYMBOL(ceph_pagelist_free_reserve);
114
115/**
116 * Create a truncation point.
117 */
118void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
119 struct ceph_pagelist_cursor *c)
120{
121 c->pl = pl;
122 c->page_lru = pl->head.prev;
123 c->room = pl->room;
124}
125EXPORT_SYMBOL(ceph_pagelist_set_cursor);
126
127/**
128 * Truncate a pagelist to the given point. Move extra pages to reserve.
129 * This won't sleep.
130 * Returns: 0 on success,
131 * -EINVAL if the pagelist doesn't match the trunc point pagelist
132 */
133int ceph_pagelist_truncate(struct ceph_pagelist *pl,
134 struct ceph_pagelist_cursor *c)
135{
136 struct page *page;
137
138 if (pl != c->pl)
139 return -EINVAL;
140 ceph_pagelist_unmap_tail(pl);
141 while (pl->head.prev != c->page_lru) {
142 page = list_entry(pl->head.prev, struct page, lru);
143 list_del(&page->lru); /* remove from pagelist */
144 list_add_tail(&page->lru, &pl->free_list); /* add to reserve */
145 ++pl->num_pages_free;
146 }
147 pl->room = c->room;
148 if (!list_empty(&pl->head)) {
149 page = list_entry(pl->head.prev, struct page, lru);
150 pl->mapped_tail = kmap(page);
151 }
152 return 0;
153}
154EXPORT_SYMBOL(ceph_pagelist_truncate);
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
new file mode 100644
index 000000000000..54caf0687155
--- /dev/null
+++ b/net/ceph/pagevec.c
@@ -0,0 +1,223 @@
1#include <linux/ceph/ceph_debug.h>
2
3#include <linux/module.h>
4#include <linux/sched.h>
5#include <linux/slab.h>
6#include <linux/file.h>
7#include <linux/namei.h>
8#include <linux/writeback.h>
9
10#include <linux/ceph/libceph.h>
11
12/*
13 * build a vector of user pages
14 */
15struct page **ceph_get_direct_page_vector(const char __user *data,
16 int num_pages,
17 loff_t off, size_t len)
18{
19 struct page **pages;
20 int rc;
21
22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
23 if (!pages)
24 return ERR_PTR(-ENOMEM);
25
26 down_read(&current->mm->mmap_sem);
27 rc = get_user_pages(current, current->mm, (unsigned long)data,
28 num_pages, 0, 0, pages, NULL);
29 up_read(&current->mm->mmap_sem);
30 if (rc < 0)
31 goto fail;
32 return pages;
33
34fail:
35 kfree(pages);
36 return ERR_PTR(rc);
37}
38EXPORT_SYMBOL(ceph_get_direct_page_vector);
39
40void ceph_put_page_vector(struct page **pages, int num_pages)
41{
42 int i;
43
44 for (i = 0; i < num_pages; i++)
45 put_page(pages[i]);
46 kfree(pages);
47}
48EXPORT_SYMBOL(ceph_put_page_vector);
49
50void ceph_release_page_vector(struct page **pages, int num_pages)
51{
52 int i;
53
54 for (i = 0; i < num_pages; i++)
55 __free_pages(pages[i], 0);
56 kfree(pages);
57}
58EXPORT_SYMBOL(ceph_release_page_vector);
59
60/*
61 * allocate a vector new pages
62 */
63struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
64{
65 struct page **pages;
66 int i;
67
68 pages = kmalloc(sizeof(*pages) * num_pages, flags);
69 if (!pages)
70 return ERR_PTR(-ENOMEM);
71 for (i = 0; i < num_pages; i++) {
72 pages[i] = __page_cache_alloc(flags);
73 if (pages[i] == NULL) {
74 ceph_release_page_vector(pages, i);
75 return ERR_PTR(-ENOMEM);
76 }
77 }
78 return pages;
79}
80EXPORT_SYMBOL(ceph_alloc_page_vector);
81
82/*
83 * copy user data into a page vector
84 */
85int ceph_copy_user_to_page_vector(struct page **pages,
86 const char __user *data,
87 loff_t off, size_t len)
88{
89 int i = 0;
90 int po = off & ~PAGE_CACHE_MASK;
91 int left = len;
92 int l, bad;
93
94 while (left > 0) {
95 l = min_t(int, PAGE_CACHE_SIZE-po, left);
96 bad = copy_from_user(page_address(pages[i]) + po, data, l);
97 if (bad == l)
98 return -EFAULT;
99 data += l - bad;
100 left -= l - bad;
101 po += l - bad;
102 if (po == PAGE_CACHE_SIZE) {
103 po = 0;
104 i++;
105 }
106 }
107 return len;
108}
109EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
110
111int ceph_copy_to_page_vector(struct page **pages,
112 const char *data,
113 loff_t off, size_t len)
114{
115 int i = 0;
116 size_t po = off & ~PAGE_CACHE_MASK;
117 size_t left = len;
118 size_t l;
119
120 while (left > 0) {
121 l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
122 memcpy(page_address(pages[i]) + po, data, l);
123 data += l;
124 left -= l;
125 po += l;
126 if (po == PAGE_CACHE_SIZE) {
127 po = 0;
128 i++;
129 }
130 }
131 return len;
132}
133EXPORT_SYMBOL(ceph_copy_to_page_vector);
134
135int ceph_copy_from_page_vector(struct page **pages,
136 char *data,
137 loff_t off, size_t len)
138{
139 int i = 0;
140 size_t po = off & ~PAGE_CACHE_MASK;
141 size_t left = len;
142 size_t l;
143
144 while (left > 0) {
145 l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
146 memcpy(data, page_address(pages[i]) + po, l);
147 data += l;
148 left -= l;
149 po += l;
150 if (po == PAGE_CACHE_SIZE) {
151 po = 0;
152 i++;
153 }
154 }
155 return len;
156}
157EXPORT_SYMBOL(ceph_copy_from_page_vector);
158
159/*
160 * copy user data from a page vector into a user pointer
161 */
162int ceph_copy_page_vector_to_user(struct page **pages,
163 char __user *data,
164 loff_t off, size_t len)
165{
166 int i = 0;
167 int po = off & ~PAGE_CACHE_MASK;
168 int left = len;
169 int l, bad;
170
171 while (left > 0) {
172 l = min_t(int, left, PAGE_CACHE_SIZE-po);
173 bad = copy_to_user(data, page_address(pages[i]) + po, l);
174 if (bad == l)
175 return -EFAULT;
176 data += l - bad;
177 left -= l - bad;
178 if (po) {
179 po += l - bad;
180 if (po == PAGE_CACHE_SIZE)
181 po = 0;
182 }
183 i++;
184 }
185 return len;
186}
187EXPORT_SYMBOL(ceph_copy_page_vector_to_user);
188
189/*
190 * Zero an extent within a page vector. Offset is relative to the
191 * start of the first page.
192 */
193void ceph_zero_page_vector_range(int off, int len, struct page **pages)
194{
195 int i = off >> PAGE_CACHE_SHIFT;
196
197 off &= ~PAGE_CACHE_MASK;
198
199 dout("zero_page_vector_page %u~%u\n", off, len);
200
201 /* leading partial page? */
202 if (off) {
203 int end = min((int)PAGE_CACHE_SIZE, off + len);
204 dout("zeroing %d %p head from %d\n", i, pages[i],
205 (int)off);
206 zero_user_segment(pages[i], off, end);
207 len -= (end - off);
208 i++;
209 }
210 while (len >= PAGE_CACHE_SIZE) {
211 dout("zeroing %d %p len=%d\n", i, pages[i], len);
212 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
213 len -= PAGE_CACHE_SIZE;
214 i++;
215 }
216 /* trailing partial page? */
217 if (len) {
218 dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
219 zero_user_segment(pages[i], 0, len);
220 }
221}
222EXPORT_SYMBOL(ceph_zero_page_vector_range);
223
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 7a85367b3c2f..8451ab481095 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -348,7 +348,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
348 if (info.cmd == ETHTOOL_GRXCLSRLALL) { 348 if (info.cmd == ETHTOOL_GRXCLSRLALL) {
349 if (info.rule_cnt > 0) { 349 if (info.rule_cnt > 0) {
350 if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) 350 if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
351 rule_buf = kmalloc(info.rule_cnt * sizeof(u32), 351 rule_buf = kzalloc(info.rule_cnt * sizeof(u32),
352 GFP_USER); 352 GFP_USER);
353 if (!rule_buf) 353 if (!rule_buf)
354 return -ENOMEM; 354 return -ENOMEM;
@@ -397,7 +397,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
397 (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index)) 397 (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
398 return -ENOMEM; 398 return -ENOMEM;
399 full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size; 399 full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
400 indir = kmalloc(full_size, GFP_USER); 400 indir = kzalloc(full_size, GFP_USER);
401 if (!indir) 401 if (!indir)
402 return -ENOMEM; 402 return -ENOMEM;
403 403
@@ -538,7 +538,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
538 538
539 gstrings.len = ret; 539 gstrings.len = ret;
540 540
541 data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); 541 data = kzalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
542 if (!data) 542 if (!data)
543 return -ENOMEM; 543 return -ENOMEM;
544 544
@@ -775,7 +775,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
775 if (regs.len > reglen) 775 if (regs.len > reglen)
776 regs.len = reglen; 776 regs.len = reglen;
777 777
778 regbuf = kmalloc(reglen, GFP_USER); 778 regbuf = kzalloc(reglen, GFP_USER);
779 if (!regbuf) 779 if (!regbuf)
780 return -ENOMEM; 780 return -ENOMEM;
781 781
diff --git a/net/core/sock.c b/net/core/sock.c
index ef30e9d286e7..7d99e13148e6 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1078,8 +1078,11 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
1078#ifdef CONFIG_CGROUPS 1078#ifdef CONFIG_CGROUPS
1079void sock_update_classid(struct sock *sk) 1079void sock_update_classid(struct sock *sk)
1080{ 1080{
1081 u32 classid = task_cls_classid(current); 1081 u32 classid;
1082 1082
1083 rcu_read_lock(); /* doing current task, which cannot vanish. */
1084 classid = task_cls_classid(current);
1085 rcu_read_unlock();
1083 if (classid && classid != sk->sk_classid) 1086 if (classid && classid != sk->sk_classid)
1084 sk->sk_classid = classid; 1087 sk->sk_classid = classid;
1085} 1088}
diff --git a/net/core/stream.c b/net/core/stream.c
index d959e0f41528..f5df85dcd20b 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -141,10 +141,10 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
141 141
142 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 142 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
143 sk->sk_write_pending++; 143 sk->sk_write_pending++;
144 sk_wait_event(sk, &current_timeo, !sk->sk_err && 144 sk_wait_event(sk, &current_timeo, sk->sk_err ||
145 !(sk->sk_shutdown & SEND_SHUTDOWN) && 145 (sk->sk_shutdown & SEND_SHUTDOWN) ||
146 sk_stream_memory_free(sk) && 146 (sk_stream_memory_free(sk) &&
147 vm_wait); 147 !vm_wait));
148 sk->sk_write_pending--; 148 sk->sk_write_pending--;
149 149
150 if (vm_wait) { 150 if (vm_wait) {
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 72380a30d1c8..7cd7760144f7 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -413,7 +413,7 @@ config INET_XFRM_MODE_BEET
413 If unsure, say Y. 413 If unsure, say Y.
414 414
415config INET_LRO 415config INET_LRO
416 bool "Large Receive Offload (ipv4/tcp)" 416 tristate "Large Receive Offload (ipv4/tcp)"
417 default y 417 default y
418 ---help--- 418 ---help---
419 Support for Large Receive Offload (ipv4/tcp). 419 Support for Large Receive Offload (ipv4/tcp).
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 1fdcacd36ce7..2a4bb76f2132 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -834,7 +834,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
834 int mark = 0; 834 int mark = 0;
835 835
836 836
837 if (len == 8 || IGMP_V2_SEEN(in_dev)) { 837 if (len == 8) {
838 if (ih->code == 0) { 838 if (ih->code == 0) {
839 /* Alas, old v1 router presents here. */ 839 /* Alas, old v1 router presents here. */
840 840
@@ -856,6 +856,18 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
856 igmpv3_clear_delrec(in_dev); 856 igmpv3_clear_delrec(in_dev);
857 } else if (len < 12) { 857 } else if (len < 12) {
858 return; /* ignore bogus packet; freed by caller */ 858 return; /* ignore bogus packet; freed by caller */
859 } else if (IGMP_V1_SEEN(in_dev)) {
860 /* This is a v3 query with v1 queriers present */
861 max_delay = IGMP_Query_Response_Interval;
862 group = 0;
863 } else if (IGMP_V2_SEEN(in_dev)) {
864 /* this is a v3 query with v2 queriers present;
865 * Interpretation of the max_delay code is problematic here.
866 * A real v2 host would use ih_code directly, while v3 has a
867 * different encoding. We use the v3 encoding as more likely
868 * to be intended in a v3 query.
869 */
870 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
859 } else { /* v3 */ 871 } else { /* v3 */
860 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) 872 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
861 return; 873 return;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 244f7cb08d68..37f8adb68c79 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -11,6 +11,7 @@
11#include <linux/proc_fs.h> 11#include <linux/proc_fs.h>
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include <linux/percpu.h> 13#include <linux/percpu.h>
14#include <linux/security.h>
14#include <net/net_namespace.h> 15#include <net/net_namespace.h>
15 16
16#include <linux/netfilter.h> 17#include <linux/netfilter.h>
@@ -87,6 +88,29 @@ static void ct_seq_stop(struct seq_file *s, void *v)
87 rcu_read_unlock(); 88 rcu_read_unlock();
88} 89}
89 90
91#ifdef CONFIG_NF_CONNTRACK_SECMARK
92static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
93{
94 int ret;
95 u32 len;
96 char *secctx;
97
98 ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
99 if (ret)
100 return ret;
101
102 ret = seq_printf(s, "secctx=%s ", secctx);
103
104 security_release_secctx(secctx, len);
105 return ret;
106}
107#else
108static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
109{
110 return 0;
111}
112#endif
113
90static int ct_seq_show(struct seq_file *s, void *v) 114static int ct_seq_show(struct seq_file *s, void *v)
91{ 115{
92 struct nf_conntrack_tuple_hash *hash = v; 116 struct nf_conntrack_tuple_hash *hash = v;
@@ -148,10 +172,8 @@ static int ct_seq_show(struct seq_file *s, void *v)
148 goto release; 172 goto release;
149#endif 173#endif
150 174
151#ifdef CONFIG_NF_CONNTRACK_SECMARK 175 if (ct_show_secctx(s, ct))
152 if (seq_printf(s, "secmark=%u ", ct->secmark))
153 goto release; 176 goto release;
154#endif
155 177
156 if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) 178 if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
157 goto release; 179 goto release;
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 8c8632d9b93c..957c9241fb0c 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -38,7 +38,7 @@ static DEFINE_SPINLOCK(nf_nat_lock);
38static struct nf_conntrack_l3proto *l3proto __read_mostly; 38static struct nf_conntrack_l3proto *l3proto __read_mostly;
39 39
40#define MAX_IP_NAT_PROTO 256 40#define MAX_IP_NAT_PROTO 256
41static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] 41static const struct nf_nat_protocol __rcu *nf_nat_protos[MAX_IP_NAT_PROTO]
42 __read_mostly; 42 __read_mostly;
43 43
44static inline const struct nf_nat_protocol * 44static inline const struct nf_nat_protocol *
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8323136bdc54..a275c6e1e25c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1556,14 +1556,13 @@ out:
1556 * i.e. Path MTU discovery 1556 * i.e. Path MTU discovery
1557 */ 1557 */
1558 1558
1559void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, 1559static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
1560 struct net_device *dev, u32 pmtu) 1560 struct net *net, u32 pmtu, int ifindex)
1561{ 1561{
1562 struct rt6_info *rt, *nrt; 1562 struct rt6_info *rt, *nrt;
1563 struct net *net = dev_net(dev);
1564 int allfrag = 0; 1563 int allfrag = 0;
1565 1564
1566 rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0); 1565 rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
1567 if (rt == NULL) 1566 if (rt == NULL)
1568 return; 1567 return;
1569 1568
@@ -1631,6 +1630,27 @@ out:
1631 dst_release(&rt->dst); 1630 dst_release(&rt->dst);
1632} 1631}
1633 1632
1633void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1634 struct net_device *dev, u32 pmtu)
1635{
1636 struct net *net = dev_net(dev);
1637
1638 /*
1639 * RFC 1981 states that a node "MUST reduce the size of the packets it
1640 * is sending along the path" that caused the Packet Too Big message.
1641 * Since it's not possible in the general case to determine which
1642 * interface was used to send the original packet, we update the MTU
1643 * on the interface that will be used to send future packets. We also
1644 * update the MTU on the interface that received the Packet Too Big in
1645 * case the original packet was forced out that interface with
1646 * SO_BINDTODEVICE or similar. This is the next best thing to the
1647 * correct behaviour, which would be to update the MTU on all
1648 * interfaces.
1649 */
1650 rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
1651 rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
1652}
1653
1634/* 1654/*
1635 * Misc support functions 1655 * Misc support functions
1636 */ 1656 */
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index c893f236acea..8f23401832b7 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -175,6 +175,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
175 175
176 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); 176 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
177 177
178 del_timer_sync(&tid_tx->addba_resp_timer);
179
178 /* 180 /*
179 * After this packets are no longer handed right through 181 * After this packets are no longer handed right through
180 * to the driver but are put onto tid_tx->pending instead, 182 * to the driver but are put onto tid_tx->pending instead,
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 10caec5ea8fa..34da67995d94 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -377,7 +377,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
377 skb2 = skb_clone(skb, GFP_ATOMIC); 377 skb2 = skb_clone(skb, GFP_ATOMIC);
378 if (skb2) { 378 if (skb2) {
379 skb2->dev = prev_dev; 379 skb2->dev = prev_dev;
380 netif_receive_skb(skb2); 380 netif_rx(skb2);
381 } 381 }
382 } 382 }
383 383
@@ -386,7 +386,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
386 } 386 }
387 if (prev_dev) { 387 if (prev_dev) {
388 skb->dev = prev_dev; 388 skb->dev = prev_dev;
389 netif_receive_skb(skb); 389 netif_rx(skb);
390 skb = NULL; 390 skb = NULL;
391 } 391 }
392 rcu_read_unlock(); 392 rcu_read_unlock();
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 78b505d33bfb..fdaec7daff1d 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -27,7 +27,7 @@
27 27
28static DEFINE_MUTEX(afinfo_mutex); 28static DEFINE_MUTEX(afinfo_mutex);
29 29
30const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; 30const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
31EXPORT_SYMBOL(nf_afinfo); 31EXPORT_SYMBOL(nf_afinfo);
32 32
33int nf_register_afinfo(const struct nf_afinfo *afinfo) 33int nf_register_afinfo(const struct nf_afinfo *afinfo)
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index cdcc7649476b..5702de35e2bb 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -26,10 +26,10 @@
26 26
27static DEFINE_MUTEX(nf_ct_ecache_mutex); 27static DEFINE_MUTEX(nf_ct_ecache_mutex);
28 28
29struct nf_ct_event_notifier *nf_conntrack_event_cb __read_mostly; 29struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly;
30EXPORT_SYMBOL_GPL(nf_conntrack_event_cb); 30EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
31 31
32struct nf_exp_event_notifier *nf_expect_event_cb __read_mostly; 32struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly;
33EXPORT_SYMBOL_GPL(nf_expect_event_cb); 33EXPORT_SYMBOL_GPL(nf_expect_event_cb);
34 34
35/* deliver cached events and clear cache entry - must be called with locally 35/* deliver cached events and clear cache entry - must be called with locally
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 8d9e4c949b96..bd82450c193f 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -16,7 +16,7 @@
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <net/netfilter/nf_conntrack_extend.h> 17#include <net/netfilter/nf_conntrack_extend.h>
18 18
19static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM]; 19static struct nf_ct_ext_type __rcu *nf_ct_ext_types[NF_CT_EXT_NUM];
20static DEFINE_MUTEX(nf_ct_ext_type_mutex); 20static DEFINE_MUTEX(nf_ct_ext_type_mutex);
21 21
22void __nf_ct_ext_destroy(struct nf_conn *ct) 22void __nf_ct_ext_destroy(struct nf_conn *ct)
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 5bae1cd15eea..146476c6441a 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -22,6 +22,7 @@
22#include <linux/rculist_nulls.h> 22#include <linux/rculist_nulls.h>
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/timer.h> 24#include <linux/timer.h>
25#include <linux/security.h>
25#include <linux/skbuff.h> 26#include <linux/skbuff.h>
26#include <linux/errno.h> 27#include <linux/errno.h>
27#include <linux/netlink.h> 28#include <linux/netlink.h>
@@ -245,16 +246,31 @@ nla_put_failure:
245 246
246#ifdef CONFIG_NF_CONNTRACK_SECMARK 247#ifdef CONFIG_NF_CONNTRACK_SECMARK
247static inline int 248static inline int
248ctnetlink_dump_secmark(struct sk_buff *skb, const struct nf_conn *ct) 249ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
249{ 250{
250 NLA_PUT_BE32(skb, CTA_SECMARK, htonl(ct->secmark)); 251 struct nlattr *nest_secctx;
251 return 0; 252 int len, ret;
253 char *secctx;
254
255 ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
256 if (ret)
257 return ret;
258
259 ret = -1;
260 nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED);
261 if (!nest_secctx)
262 goto nla_put_failure;
263
264 NLA_PUT_STRING(skb, CTA_SECCTX_NAME, secctx);
265 nla_nest_end(skb, nest_secctx);
252 266
267 ret = 0;
253nla_put_failure: 268nla_put_failure:
254 return -1; 269 security_release_secctx(secctx, len);
270 return ret;
255} 271}
256#else 272#else
257#define ctnetlink_dump_secmark(a, b) (0) 273#define ctnetlink_dump_secctx(a, b) (0)
258#endif 274#endif
259 275
260#define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple) 276#define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
@@ -391,7 +407,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
391 ctnetlink_dump_protoinfo(skb, ct) < 0 || 407 ctnetlink_dump_protoinfo(skb, ct) < 0 ||
392 ctnetlink_dump_helpinfo(skb, ct) < 0 || 408 ctnetlink_dump_helpinfo(skb, ct) < 0 ||
393 ctnetlink_dump_mark(skb, ct) < 0 || 409 ctnetlink_dump_mark(skb, ct) < 0 ||
394 ctnetlink_dump_secmark(skb, ct) < 0 || 410 ctnetlink_dump_secctx(skb, ct) < 0 ||
395 ctnetlink_dump_id(skb, ct) < 0 || 411 ctnetlink_dump_id(skb, ct) < 0 ||
396 ctnetlink_dump_use(skb, ct) < 0 || 412 ctnetlink_dump_use(skb, ct) < 0 ||
397 ctnetlink_dump_master(skb, ct) < 0 || 413 ctnetlink_dump_master(skb, ct) < 0 ||
@@ -437,6 +453,17 @@ ctnetlink_counters_size(const struct nf_conn *ct)
437 ; 453 ;
438} 454}
439 455
456#ifdef CONFIG_NF_CONNTRACK_SECMARK
457static int ctnetlink_nlmsg_secctx_size(const struct nf_conn *ct)
458{
459 int len;
460
461 security_secid_to_secctx(ct->secmark, NULL, &len);
462
463 return sizeof(char) * len;
464}
465#endif
466
440static inline size_t 467static inline size_t
441ctnetlink_nlmsg_size(const struct nf_conn *ct) 468ctnetlink_nlmsg_size(const struct nf_conn *ct)
442{ 469{
@@ -453,7 +480,8 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
453 + nla_total_size(0) /* CTA_HELP */ 480 + nla_total_size(0) /* CTA_HELP */
454 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ 481 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
455#ifdef CONFIG_NF_CONNTRACK_SECMARK 482#ifdef CONFIG_NF_CONNTRACK_SECMARK
456 + nla_total_size(sizeof(u_int32_t)) /* CTA_SECMARK */ 483 + nla_total_size(0) /* CTA_SECCTX */
484 + nla_total_size(ctnetlink_nlmsg_secctx_size(ct)) /* CTA_SECCTX_NAME */
457#endif 485#endif
458#ifdef CONFIG_NF_NAT_NEEDED 486#ifdef CONFIG_NF_NAT_NEEDED
459 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ 487 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
@@ -556,7 +584,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
556 584
557#ifdef CONFIG_NF_CONNTRACK_SECMARK 585#ifdef CONFIG_NF_CONNTRACK_SECMARK
558 if ((events & (1 << IPCT_SECMARK) || ct->secmark) 586 if ((events & (1 << IPCT_SECMARK) || ct->secmark)
559 && ctnetlink_dump_secmark(skb, ct) < 0) 587 && ctnetlink_dump_secctx(skb, ct) < 0)
560 goto nla_put_failure; 588 goto nla_put_failure;
561#endif 589#endif
562 590
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 5886ba1d52a0..ed6d92958023 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -28,8 +28,8 @@
28#include <net/netfilter/nf_conntrack_l4proto.h> 28#include <net/netfilter/nf_conntrack_l4proto.h>
29#include <net/netfilter/nf_conntrack_core.h> 29#include <net/netfilter/nf_conntrack_core.h>
30 30
31static struct nf_conntrack_l4proto **nf_ct_protos[PF_MAX] __read_mostly; 31static struct nf_conntrack_l4proto __rcu **nf_ct_protos[PF_MAX] __read_mostly;
32struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX] __read_mostly; 32struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX] __read_mostly;
33EXPORT_SYMBOL_GPL(nf_ct_l3protos); 33EXPORT_SYMBOL_GPL(nf_ct_l3protos);
34 34
35static DEFINE_MUTEX(nf_ct_proto_mutex); 35static DEFINE_MUTEX(nf_ct_proto_mutex);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index eb973fcd67ab..0fb65705b44b 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -15,6 +15,7 @@
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/percpu.h> 16#include <linux/percpu.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/security.h>
18#include <net/net_namespace.h> 19#include <net/net_namespace.h>
19#ifdef CONFIG_SYSCTL 20#ifdef CONFIG_SYSCTL
20#include <linux/sysctl.h> 21#include <linux/sysctl.h>
@@ -108,6 +109,29 @@ static void ct_seq_stop(struct seq_file *s, void *v)
108 rcu_read_unlock(); 109 rcu_read_unlock();
109} 110}
110 111
112#ifdef CONFIG_NF_CONNTRACK_SECMARK
113static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
114{
115 int ret;
116 u32 len;
117 char *secctx;
118
119 ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
120 if (ret)
121 return ret;
122
123 ret = seq_printf(s, "secctx=%s ", secctx);
124
125 security_release_secctx(secctx, len);
126 return ret;
127}
128#else
129static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
130{
131 return 0;
132}
133#endif
134
111/* return 0 on success, 1 in case of error */ 135/* return 0 on success, 1 in case of error */
112static int ct_seq_show(struct seq_file *s, void *v) 136static int ct_seq_show(struct seq_file *s, void *v)
113{ 137{
@@ -168,10 +192,8 @@ static int ct_seq_show(struct seq_file *s, void *v)
168 goto release; 192 goto release;
169#endif 193#endif
170 194
171#ifdef CONFIG_NF_CONNTRACK_SECMARK 195 if (ct_show_secctx(s, ct))
172 if (seq_printf(s, "secmark=%u ", ct->secmark))
173 goto release; 196 goto release;
174#endif
175 197
176#ifdef CONFIG_NF_CONNTRACK_ZONES 198#ifdef CONFIG_NF_CONNTRACK_ZONES
177 if (seq_printf(s, "zone=%u ", nf_ct_zone(ct))) 199 if (seq_printf(s, "zone=%u ", nf_ct_zone(ct)))
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 7df37fd786bc..b07393eab88e 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -16,7 +16,7 @@
16#define NF_LOG_PREFIXLEN 128 16#define NF_LOG_PREFIXLEN 128
17#define NFLOGGER_NAME_LEN 64 17#define NFLOGGER_NAME_LEN 64
18 18
19static const struct nf_logger *nf_loggers[NFPROTO_NUMPROTO] __read_mostly; 19static const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO] __read_mostly;
20static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly; 20static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly;
21static DEFINE_MUTEX(nf_log_mutex); 21static DEFINE_MUTEX(nf_log_mutex);
22 22
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 78b3cf9c519c..74aebed5bd28 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -18,7 +18,7 @@
18 * long term mutex. The handler must provide an an outfn() to accept packets 18 * long term mutex. The handler must provide an an outfn() to accept packets
19 * for queueing and must reinject all packets it receives, no matter what. 19 * for queueing and must reinject all packets it receives, no matter what.
20 */ 20 */
21static const struct nf_queue_handler *queue_handler[NFPROTO_NUMPROTO] __read_mostly; 21static const struct nf_queue_handler __rcu *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
22 22
23static DEFINE_MUTEX(queue_handler_mutex); 23static DEFINE_MUTEX(queue_handler_mutex);
24 24
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 0cb6053f02fd..782e51986a6f 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -9,7 +9,6 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/gfp.h> 10#include <linux/gfp.h>
11#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/selinux.h>
13#include <linux/netfilter_ipv4/ip_tables.h> 12#include <linux/netfilter_ipv4/ip_tables.h>
14#include <linux/netfilter_ipv6/ip6_tables.h> 13#include <linux/netfilter_ipv6/ip6_tables.h>
15#include <linux/netfilter/x_tables.h> 14#include <linux/netfilter/x_tables.h>
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index 23b2d6c486b5..9faf5e050b79 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -14,8 +14,8 @@
14 */ 14 */
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/security.h>
17#include <linux/skbuff.h> 18#include <linux/skbuff.h>
18#include <linux/selinux.h>
19#include <linux/netfilter/x_tables.h> 19#include <linux/netfilter/x_tables.h>
20#include <linux/netfilter/xt_SECMARK.h> 20#include <linux/netfilter/xt_SECMARK.h>
21 21
@@ -39,9 +39,8 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
39 39
40 switch (mode) { 40 switch (mode) {
41 case SECMARK_MODE_SEL: 41 case SECMARK_MODE_SEL:
42 secmark = info->u.sel.selsid; 42 secmark = info->secid;
43 break; 43 break;
44
45 default: 44 default:
46 BUG(); 45 BUG();
47 } 46 }
@@ -50,33 +49,33 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
50 return XT_CONTINUE; 49 return XT_CONTINUE;
51} 50}
52 51
53static int checkentry_selinux(struct xt_secmark_target_info *info) 52static int checkentry_lsm(struct xt_secmark_target_info *info)
54{ 53{
55 int err; 54 int err;
56 struct xt_secmark_target_selinux_info *sel = &info->u.sel;
57 55
58 sel->selctx[SECMARK_SELCTX_MAX - 1] = '\0'; 56 info->secctx[SECMARK_SECCTX_MAX - 1] = '\0';
57 info->secid = 0;
59 58
60 err = selinux_string_to_sid(sel->selctx, &sel->selsid); 59 err = security_secctx_to_secid(info->secctx, strlen(info->secctx),
60 &info->secid);
61 if (err) { 61 if (err) {
62 if (err == -EINVAL) 62 if (err == -EINVAL)
63 pr_info("invalid SELinux context \'%s\'\n", 63 pr_info("invalid security context \'%s\'\n", info->secctx);
64 sel->selctx);
65 return err; 64 return err;
66 } 65 }
67 66
68 if (!sel->selsid) { 67 if (!info->secid) {
69 pr_info("unable to map SELinux context \'%s\'\n", sel->selctx); 68 pr_info("unable to map security context \'%s\'\n", info->secctx);
70 return -ENOENT; 69 return -ENOENT;
71 } 70 }
72 71
73 err = selinux_secmark_relabel_packet_permission(sel->selsid); 72 err = security_secmark_relabel_packet(info->secid);
74 if (err) { 73 if (err) {
75 pr_info("unable to obtain relabeling permission\n"); 74 pr_info("unable to obtain relabeling permission\n");
76 return err; 75 return err;
77 } 76 }
78 77
79 selinux_secmark_refcount_inc(); 78 security_secmark_refcount_inc();
80 return 0; 79 return 0;
81} 80}
82 81
@@ -100,16 +99,16 @@ static int secmark_tg_check(const struct xt_tgchk_param *par)
100 99
101 switch (info->mode) { 100 switch (info->mode) {
102 case SECMARK_MODE_SEL: 101 case SECMARK_MODE_SEL:
103 err = checkentry_selinux(info);
104 if (err <= 0)
105 return err;
106 break; 102 break;
107
108 default: 103 default:
109 pr_info("invalid mode: %hu\n", info->mode); 104 pr_info("invalid mode: %hu\n", info->mode);
110 return -EINVAL; 105 return -EINVAL;
111 } 106 }
112 107
108 err = checkentry_lsm(info);
109 if (err)
110 return err;
111
113 if (!mode) 112 if (!mode)
114 mode = info->mode; 113 mode = info->mode;
115 return 0; 114 return 0;
@@ -119,7 +118,7 @@ static void secmark_tg_destroy(const struct xt_tgdtor_param *par)
119{ 118{
120 switch (mode) { 119 switch (mode) {
121 case SECMARK_MODE_SEL: 120 case SECMARK_MODE_SEL:
122 selinux_secmark_refcount_dec(); 121 security_secmark_refcount_dec();
123 } 122 }
124} 123}
125 124
diff --git a/net/rds/page.c b/net/rds/page.c
index 595a952d4b17..1dfbfea12e9b 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -57,30 +57,17 @@ int rds_page_copy_user(struct page *page, unsigned long offset,
57 unsigned long ret; 57 unsigned long ret;
58 void *addr; 58 void *addr;
59 59
60 if (to_user) 60 addr = kmap(page);
61 if (to_user) {
61 rds_stats_add(s_copy_to_user, bytes); 62 rds_stats_add(s_copy_to_user, bytes);
62 else 63 ret = copy_to_user(ptr, addr + offset, bytes);
64 } else {
63 rds_stats_add(s_copy_from_user, bytes); 65 rds_stats_add(s_copy_from_user, bytes);
64 66 ret = copy_from_user(addr + offset, ptr, bytes);
65 addr = kmap_atomic(page, KM_USER0);
66 if (to_user)
67 ret = __copy_to_user_inatomic(ptr, addr + offset, bytes);
68 else
69 ret = __copy_from_user_inatomic(addr + offset, ptr, bytes);
70 kunmap_atomic(addr, KM_USER0);
71
72 if (ret) {
73 addr = kmap(page);
74 if (to_user)
75 ret = copy_to_user(ptr, addr + offset, bytes);
76 else
77 ret = copy_from_user(addr + offset, ptr, bytes);
78 kunmap(page);
79 if (ret)
80 return -EFAULT;
81 } 67 }
68 kunmap(page);
82 69
83 return 0; 70 return ret ? -EFAULT : 0;
84} 71}
85EXPORT_SYMBOL_GPL(rds_page_copy_user); 72EXPORT_SYMBOL_GPL(rds_page_copy_user);
86 73
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 7416a5c73b2a..b0c2a82178af 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -137,7 +137,7 @@ next_knode:
137 int toff = off + key->off + (off2 & key->offmask); 137 int toff = off + key->off + (off2 & key->offmask);
138 __be32 *data, _data; 138 __be32 *data, _data;
139 139
140 if (skb_headroom(skb) + toff < 0) 140 if (skb_headroom(skb) + toff > INT_MAX)
141 goto out; 141 goto out;
142 142
143 data = skb_header_pointer(skb, toff, 4, &_data); 143 data = skb_header_pointer(skb, toff, 4, &_data);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 86366390038a..ddbbf7c81fa1 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -543,16 +543,20 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc)
543 id = ntohs(hmacs->hmac_ids[i]); 543 id = ntohs(hmacs->hmac_ids[i]);
544 544
545 /* Check the id is in the supported range */ 545 /* Check the id is in the supported range */
546 if (id > SCTP_AUTH_HMAC_ID_MAX) 546 if (id > SCTP_AUTH_HMAC_ID_MAX) {
547 id = 0;
547 continue; 548 continue;
549 }
548 550
549 /* See is we support the id. Supported IDs have name and 551 /* See is we support the id. Supported IDs have name and
550 * length fields set, so that we can allocated and use 552 * length fields set, so that we can allocated and use
551 * them. We can safely just check for name, for without the 553 * them. We can safely just check for name, for without the
552 * name, we can't allocate the TFM. 554 * name, we can't allocate the TFM.
553 */ 555 */
554 if (!sctp_hmac_list[id].hmac_name) 556 if (!sctp_hmac_list[id].hmac_name) {
557 id = 0;
555 continue; 558 continue;
559 }
556 560
557 break; 561 break;
558 } 562 }
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ca44917872d2..fbb70770ad05 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -916,6 +916,11 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
916 /* Walk through the addrs buffer and count the number of addresses. */ 916 /* Walk through the addrs buffer and count the number of addresses. */
917 addr_buf = kaddrs; 917 addr_buf = kaddrs;
918 while (walk_size < addrs_size) { 918 while (walk_size < addrs_size) {
919 if (walk_size + sizeof(sa_family_t) > addrs_size) {
920 kfree(kaddrs);
921 return -EINVAL;
922 }
923
919 sa_addr = (struct sockaddr *)addr_buf; 924 sa_addr = (struct sockaddr *)addr_buf;
920 af = sctp_get_af_specific(sa_addr->sa_family); 925 af = sctp_get_af_specific(sa_addr->sa_family);
921 926
@@ -1002,9 +1007,13 @@ static int __sctp_connect(struct sock* sk,
1002 /* Walk through the addrs buffer and count the number of addresses. */ 1007 /* Walk through the addrs buffer and count the number of addresses. */
1003 addr_buf = kaddrs; 1008 addr_buf = kaddrs;
1004 while (walk_size < addrs_size) { 1009 while (walk_size < addrs_size) {
1010 if (walk_size + sizeof(sa_family_t) > addrs_size) {
1011 err = -EINVAL;
1012 goto out_free;
1013 }
1014
1005 sa_addr = (union sctp_addr *)addr_buf; 1015 sa_addr = (union sctp_addr *)addr_buf;
1006 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1016 af = sctp_get_af_specific(sa_addr->sa.sa_family);
1007 port = ntohs(sa_addr->v4.sin_port);
1008 1017
1009 /* If the address family is not supported or if this address 1018 /* If the address family is not supported or if this address
1010 * causes the address buffer to overflow return EINVAL. 1019 * causes the address buffer to overflow return EINVAL.
@@ -1014,6 +1023,8 @@ static int __sctp_connect(struct sock* sk,
1014 goto out_free; 1023 goto out_free;
1015 } 1024 }
1016 1025
1026 port = ntohs(sa_addr->v4.sin_port);
1027
1017 /* Save current address so we can work with it */ 1028 /* Save current address so we can work with it */
1018 memcpy(&to, sa_addr, af->sockaddr_len); 1029 memcpy(&to, sa_addr, af->sockaddr_len);
1019 1030
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 5b7c86ea43a1..7ef429cd5cb3 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -427,7 +427,7 @@ static void check_conf(struct menu *menu)
427 if (sym->name && !sym_is_choice_value(sym)) { 427 if (sym->name && !sym_is_choice_value(sym)) {
428 printf("CONFIG_%s\n", sym->name); 428 printf("CONFIG_%s\n", sym->name);
429 } 429 }
430 } else { 430 } else if (input_mode != oldnoconfig) {
431 if (!conf_cnt++) 431 if (!conf_cnt++)
432 printf(_("*\n* Restart config...\n*\n")); 432 printf(_("*\n* Restart config...\n*\n"));
433 rootEntry = menu_get_parent_menu(menu); 433 rootEntry = menu_get_parent_menu(menu);
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 6ee2e4fb1481..170459c224a1 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -165,7 +165,6 @@ struct menu {
165 struct symbol *sym; 165 struct symbol *sym;
166 struct property *prompt; 166 struct property *prompt;
167 struct expr *dep; 167 struct expr *dep;
168 struct expr *dir_dep;
169 unsigned int flags; 168 unsigned int flags;
170 char *help; 169 char *help;
171 struct file *file; 170 struct file *file;
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 4fb590247f33..edda8b49619d 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -107,7 +107,6 @@ static struct expr *menu_check_dep(struct expr *e)
107void menu_add_dep(struct expr *dep) 107void menu_add_dep(struct expr *dep)
108{ 108{
109 current_entry->dep = expr_alloc_and(current_entry->dep, menu_check_dep(dep)); 109 current_entry->dep = expr_alloc_and(current_entry->dep, menu_check_dep(dep));
110 current_entry->dir_dep = current_entry->dep;
111} 110}
112 111
113void menu_set_type(int type) 112void menu_set_type(int type)
@@ -291,10 +290,6 @@ void menu_finalize(struct menu *parent)
291 for (menu = parent->list; menu; menu = menu->next) 290 for (menu = parent->list; menu; menu = menu->next)
292 menu_finalize(menu); 291 menu_finalize(menu);
293 } else if (sym) { 292 } else if (sym) {
294 /* ignore inherited dependencies for dir_dep */
295 sym->dir_dep.expr = expr_transform(expr_copy(parent->dir_dep));
296 sym->dir_dep.expr = expr_eliminate_dups(sym->dir_dep.expr);
297
298 basedep = parent->prompt ? parent->prompt->visible.expr : NULL; 293 basedep = parent->prompt ? parent->prompt->visible.expr : NULL;
299 basedep = expr_trans_compare(basedep, E_UNEQUAL, &symbol_no); 294 basedep = expr_trans_compare(basedep, E_UNEQUAL, &symbol_no);
300 basedep = expr_eliminate_dups(expr_transform(basedep)); 295 basedep = expr_eliminate_dups(expr_transform(basedep));
@@ -325,6 +320,8 @@ void menu_finalize(struct menu *parent)
325 parent->next = last_menu->next; 320 parent->next = last_menu->next;
326 last_menu->next = NULL; 321 last_menu->next = NULL;
327 } 322 }
323
324 sym->dir_dep.expr = parent->dep;
328 } 325 }
329 for (menu = parent->list; menu; menu = menu->next) { 326 for (menu = parent->list; menu; menu = menu->next) {
330 if (sym && sym_is_choice(sym) && 327 if (sym && sym_is_choice(sym) &&
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 943712ca6c0a..1f8b305449db 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -350,6 +350,7 @@ void sym_calc_value(struct symbol *sym)
350 } 350 }
351 } 351 }
352 calc_newval: 352 calc_newval:
353#if 0
353 if (sym->dir_dep.tri == no && sym->rev_dep.tri != no) { 354 if (sym->dir_dep.tri == no && sym->rev_dep.tri != no) {
354 fprintf(stderr, "warning: ("); 355 fprintf(stderr, "warning: (");
355 expr_fprint(sym->rev_dep.expr, stderr); 356 expr_fprint(sym->rev_dep.expr, stderr);
@@ -358,6 +359,7 @@ void sym_calc_value(struct symbol *sym)
358 expr_fprint(sym->dir_dep.expr, stderr); 359 expr_fprint(sym->dir_dep.expr, stderr);
359 fprintf(stderr, ")\n"); 360 fprintf(stderr, ")\n");
360 } 361 }
362#endif
361 newval.tri = EXPR_OR(newval.tri, sym->rev_dep.tri); 363 newval.tri = EXPR_OR(newval.tri, sym->rev_dep.tri);
362 } 364 }
363 if (newval.tri == mod && sym_get_type(sym) == S_BOOLEAN) 365 if (newval.tri == mod && sym_get_type(sym) == S_BOOLEAN)
diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore
index 0a0a99f3b083..4d995aeaebc0 100644
--- a/security/apparmor/.gitignore
+++ b/security/apparmor/.gitignore
@@ -3,3 +3,4 @@
3# 3#
4af_names.h 4af_names.h
5capability_names.h 5capability_names.h
6rlim_names.h
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 7320331b44ab..544ff5837cb6 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -29,7 +29,7 @@
29 * aa_simple_write_to_buffer - common routine for getting policy from user 29 * aa_simple_write_to_buffer - common routine for getting policy from user
30 * @op: operation doing the user buffer copy 30 * @op: operation doing the user buffer copy
31 * @userbuf: user buffer to copy data from (NOT NULL) 31 * @userbuf: user buffer to copy data from (NOT NULL)
32 * @alloc_size: size of user buffer 32 * @alloc_size: size of user buffer (REQUIRES: @alloc_size >= @copy_size)
33 * @copy_size: size of data to copy from user buffer 33 * @copy_size: size of data to copy from user buffer
34 * @pos: position write is at in the file (NOT NULL) 34 * @pos: position write is at in the file (NOT NULL)
35 * 35 *
@@ -42,6 +42,8 @@ static char *aa_simple_write_to_buffer(int op, const char __user *userbuf,
42{ 42{
43 char *data; 43 char *data;
44 44
45 BUG_ON(copy_size > alloc_size);
46
45 if (*pos != 0) 47 if (*pos != 0)
46 /* only writes from pos 0, that is complete writes */ 48 /* only writes from pos 0, that is complete writes */
47 return ERR_PTR(-ESPIPE); 49 return ERR_PTR(-ESPIPE);
diff --git a/security/capability.c b/security/capability.c
index 95a6599a37bb..30ae00fbecd5 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -677,7 +677,18 @@ static void cap_inet_conn_established(struct sock *sk, struct sk_buff *skb)
677{ 677{
678} 678}
679 679
680static int cap_secmark_relabel_packet(u32 secid)
681{
682 return 0;
683}
680 684
685static void cap_secmark_refcount_inc(void)
686{
687}
688
689static void cap_secmark_refcount_dec(void)
690{
691}
681 692
682static void cap_req_classify_flow(const struct request_sock *req, 693static void cap_req_classify_flow(const struct request_sock *req,
683 struct flowi *fl) 694 struct flowi *fl)
@@ -777,7 +788,8 @@ static int cap_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
777 788
778static int cap_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) 789static int cap_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
779{ 790{
780 return -EOPNOTSUPP; 791 *secid = 0;
792 return 0;
781} 793}
782 794
783static void cap_release_secctx(char *secdata, u32 seclen) 795static void cap_release_secctx(char *secdata, u32 seclen)
@@ -1018,6 +1030,9 @@ void __init security_fixup_ops(struct security_operations *ops)
1018 set_to_cap_if_null(ops, inet_conn_request); 1030 set_to_cap_if_null(ops, inet_conn_request);
1019 set_to_cap_if_null(ops, inet_csk_clone); 1031 set_to_cap_if_null(ops, inet_csk_clone);
1020 set_to_cap_if_null(ops, inet_conn_established); 1032 set_to_cap_if_null(ops, inet_conn_established);
1033 set_to_cap_if_null(ops, secmark_relabel_packet);
1034 set_to_cap_if_null(ops, secmark_refcount_inc);
1035 set_to_cap_if_null(ops, secmark_refcount_dec);
1021 set_to_cap_if_null(ops, req_classify_flow); 1036 set_to_cap_if_null(ops, req_classify_flow);
1022 set_to_cap_if_null(ops, tun_dev_create); 1037 set_to_cap_if_null(ops, tun_dev_create);
1023 set_to_cap_if_null(ops, tun_dev_post_create); 1038 set_to_cap_if_null(ops, tun_dev_post_create);
diff --git a/security/commoncap.c b/security/commoncap.c
index 9d172e6e330c..5e632b4857e4 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -719,14 +719,11 @@ static int cap_safe_nice(struct task_struct *p)
719/** 719/**
720 * cap_task_setscheduler - Detemine if scheduler policy change is permitted 720 * cap_task_setscheduler - Detemine if scheduler policy change is permitted
721 * @p: The task to affect 721 * @p: The task to affect
722 * @policy: The policy to effect
723 * @lp: The parameters to the scheduling policy
724 * 722 *
725 * Detemine if the requested scheduler policy change is permitted for the 723 * Detemine if the requested scheduler policy change is permitted for the
726 * specified task, returning 0 if permission is granted, -ve if denied. 724 * specified task, returning 0 if permission is granted, -ve if denied.
727 */ 725 */
728int cap_task_setscheduler(struct task_struct *p, int policy, 726int cap_task_setscheduler(struct task_struct *p)
729 struct sched_param *lp)
730{ 727{
731 return cap_safe_nice(p); 728 return cap_safe_nice(p);
732} 729}
diff --git a/security/security.c b/security/security.c
index c53949f17d9e..b50f472061a4 100644
--- a/security/security.c
+++ b/security/security.c
@@ -89,20 +89,12 @@ __setup("security=", choose_lsm);
89 * Return true if: 89 * Return true if:
90 * -The passed LSM is the one chosen by user at boot time, 90 * -The passed LSM is the one chosen by user at boot time,
91 * -or the passed LSM is configured as the default and the user did not 91 * -or the passed LSM is configured as the default and the user did not
92 * choose an alternate LSM at boot time, 92 * choose an alternate LSM at boot time.
93 * -or there is no default LSM set and the user didn't specify a
94 * specific LSM and we're the first to ask for registration permission,
95 * -or the passed LSM is currently loaded.
96 * Otherwise, return false. 93 * Otherwise, return false.
97 */ 94 */
98int __init security_module_enable(struct security_operations *ops) 95int __init security_module_enable(struct security_operations *ops)
99{ 96{
100 if (!*chosen_lsm) 97 return !strcmp(ops->name, chosen_lsm);
101 strncpy(chosen_lsm, ops->name, SECURITY_NAME_MAX);
102 else if (strncmp(ops->name, chosen_lsm, SECURITY_NAME_MAX))
103 return 0;
104
105 return 1;
106} 98}
107 99
108/** 100/**
@@ -786,10 +778,9 @@ int security_task_setrlimit(struct task_struct *p, unsigned int resource,
786 return security_ops->task_setrlimit(p, resource, new_rlim); 778 return security_ops->task_setrlimit(p, resource, new_rlim);
787} 779}
788 780
789int security_task_setscheduler(struct task_struct *p, 781int security_task_setscheduler(struct task_struct *p)
790 int policy, struct sched_param *lp)
791{ 782{
792 return security_ops->task_setscheduler(p, policy, lp); 783 return security_ops->task_setscheduler(p);
793} 784}
794 785
795int security_task_getscheduler(struct task_struct *p) 786int security_task_getscheduler(struct task_struct *p)
@@ -1145,6 +1136,24 @@ void security_inet_conn_established(struct sock *sk,
1145 security_ops->inet_conn_established(sk, skb); 1136 security_ops->inet_conn_established(sk, skb);
1146} 1137}
1147 1138
1139int security_secmark_relabel_packet(u32 secid)
1140{
1141 return security_ops->secmark_relabel_packet(secid);
1142}
1143EXPORT_SYMBOL(security_secmark_relabel_packet);
1144
1145void security_secmark_refcount_inc(void)
1146{
1147 security_ops->secmark_refcount_inc();
1148}
1149EXPORT_SYMBOL(security_secmark_refcount_inc);
1150
1151void security_secmark_refcount_dec(void)
1152{
1153 security_ops->secmark_refcount_dec();
1154}
1155EXPORT_SYMBOL(security_secmark_refcount_dec);
1156
1148int security_tun_dev_create(void) 1157int security_tun_dev_create(void)
1149{ 1158{
1150 return security_ops->tun_dev_create(); 1159 return security_ops->tun_dev_create();
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index 58d80f3bd6f6..ad5cd76ec231 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -2,25 +2,20 @@
2# Makefile for building the SELinux module as part of the kernel tree. 2# Makefile for building the SELinux module as part of the kernel tree.
3# 3#
4 4
5obj-$(CONFIG_SECURITY_SELINUX) := selinux.o ss/ 5obj-$(CONFIG_SECURITY_SELINUX) := selinux.o
6 6
7selinux-y := avc.o \ 7selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \
8 hooks.o \ 8 netnode.o netport.o exports.o \
9 selinuxfs.o \ 9 ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \
10 netlink.o \ 10 ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/status.o
11 nlmsgtab.o \
12 netif.o \
13 netnode.o \
14 netport.o \
15 exports.o
16 11
17selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o 12selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
18 13
19selinux-$(CONFIG_NETLABEL) += netlabel.o 14selinux-$(CONFIG_NETLABEL) += netlabel.o
20 15
21EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include 16ccflags-y := -Isecurity/selinux -Isecurity/selinux/include
22 17
23$(obj)/avc.o: $(obj)/flask.h 18$(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
24 19
25quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h 20quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h
26 cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h 21 cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h
diff --git a/security/selinux/exports.c b/security/selinux/exports.c
index c0a454aee1e0..90664385dead 100644
--- a/security/selinux/exports.c
+++ b/security/selinux/exports.c
@@ -11,58 +11,9 @@
11 * it under the terms of the GNU General Public License version 2, 11 * it under the terms of the GNU General Public License version 2,
12 * as published by the Free Software Foundation. 12 * as published by the Free Software Foundation.
13 */ 13 */
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/module.h> 14#include <linux/module.h>
17#include <linux/selinux.h>
18#include <linux/fs.h>
19#include <linux/ipc.h>
20#include <asm/atomic.h>
21 15
22#include "security.h" 16#include "security.h"
23#include "objsec.h"
24
25/* SECMARK reference count */
26extern atomic_t selinux_secmark_refcount;
27
28int selinux_string_to_sid(char *str, u32 *sid)
29{
30 if (selinux_enabled)
31 return security_context_to_sid(str, strlen(str), sid);
32 else {
33 *sid = 0;
34 return 0;
35 }
36}
37EXPORT_SYMBOL_GPL(selinux_string_to_sid);
38
39int selinux_secmark_relabel_packet_permission(u32 sid)
40{
41 if (selinux_enabled) {
42 const struct task_security_struct *__tsec;
43 u32 tsid;
44
45 __tsec = current_security();
46 tsid = __tsec->sid;
47
48 return avc_has_perm(tsid, sid, SECCLASS_PACKET,
49 PACKET__RELABELTO, NULL);
50 }
51 return 0;
52}
53EXPORT_SYMBOL_GPL(selinux_secmark_relabel_packet_permission);
54
55void selinux_secmark_refcount_inc(void)
56{
57 atomic_inc(&selinux_secmark_refcount);
58}
59EXPORT_SYMBOL_GPL(selinux_secmark_refcount_inc);
60
61void selinux_secmark_refcount_dec(void)
62{
63 atomic_dec(&selinux_secmark_refcount);
64}
65EXPORT_SYMBOL_GPL(selinux_secmark_refcount_dec);
66 17
67bool selinux_is_enabled(void) 18bool selinux_is_enabled(void)
68{ 19{
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 4796ddd4e721..d9154cf90ae1 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3354,11 +3354,11 @@ static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource,
3354 return 0; 3354 return 0;
3355} 3355}
3356 3356
3357static int selinux_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp) 3357static int selinux_task_setscheduler(struct task_struct *p)
3358{ 3358{
3359 int rc; 3359 int rc;
3360 3360
3361 rc = cap_task_setscheduler(p, policy, lp); 3361 rc = cap_task_setscheduler(p);
3362 if (rc) 3362 if (rc)
3363 return rc; 3363 return rc;
3364 3364
@@ -4279,6 +4279,27 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
4279 selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid); 4279 selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
4280} 4280}
4281 4281
4282static int selinux_secmark_relabel_packet(u32 sid)
4283{
4284 const struct task_security_struct *__tsec;
4285 u32 tsid;
4286
4287 __tsec = current_security();
4288 tsid = __tsec->sid;
4289
4290 return avc_has_perm(tsid, sid, SECCLASS_PACKET, PACKET__RELABELTO, NULL);
4291}
4292
4293static void selinux_secmark_refcount_inc(void)
4294{
4295 atomic_inc(&selinux_secmark_refcount);
4296}
4297
4298static void selinux_secmark_refcount_dec(void)
4299{
4300 atomic_dec(&selinux_secmark_refcount);
4301}
4302
4282static void selinux_req_classify_flow(const struct request_sock *req, 4303static void selinux_req_classify_flow(const struct request_sock *req,
4283 struct flowi *fl) 4304 struct flowi *fl)
4284{ 4305{
@@ -5533,6 +5554,9 @@ static struct security_operations selinux_ops = {
5533 .inet_conn_request = selinux_inet_conn_request, 5554 .inet_conn_request = selinux_inet_conn_request,
5534 .inet_csk_clone = selinux_inet_csk_clone, 5555 .inet_csk_clone = selinux_inet_csk_clone,
5535 .inet_conn_established = selinux_inet_conn_established, 5556 .inet_conn_established = selinux_inet_conn_established,
5557 .secmark_relabel_packet = selinux_secmark_relabel_packet,
5558 .secmark_refcount_inc = selinux_secmark_refcount_inc,
5559 .secmark_refcount_dec = selinux_secmark_refcount_dec,
5536 .req_classify_flow = selinux_req_classify_flow, 5560 .req_classify_flow = selinux_req_classify_flow,
5537 .tun_dev_create = selinux_tun_dev_create, 5561 .tun_dev_create = selinux_tun_dev_create,
5538 .tun_dev_post_create = selinux_tun_dev_post_create, 5562 .tun_dev_post_create = selinux_tun_dev_post_create,
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index b4c9eb4bd6f9..8858d2b2d4b6 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -17,7 +17,7 @@ struct security_class_mapping secclass_map[] = {
17 { "compute_av", "compute_create", "compute_member", 17 { "compute_av", "compute_create", "compute_member",
18 "check_context", "load_policy", "compute_relabel", 18 "check_context", "load_policy", "compute_relabel",
19 "compute_user", "setenforce", "setbool", "setsecparam", 19 "compute_user", "setenforce", "setbool", "setsecparam",
20 "setcheckreqprot", NULL } }, 20 "setcheckreqprot", "read_policy", NULL } },
21 { "process", 21 { "process",
22 { "fork", "transition", "sigchld", "sigkill", 22 { "fork", "transition", "sigchld", "sigkill",
23 "sigstop", "signull", "signal", "ptrace", "getsched", "setsched", 23 "sigstop", "signull", "signal", "ptrace", "getsched", "setsched",
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 1f7c2491d3dc..671273eb1115 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -9,6 +9,7 @@
9#define _SELINUX_SECURITY_H_ 9#define _SELINUX_SECURITY_H_
10 10
11#include <linux/magic.h> 11#include <linux/magic.h>
12#include <linux/types.h>
12#include "flask.h" 13#include "flask.h"
13 14
14#define SECSID_NULL 0x00000000 /* unspecified SID */ 15#define SECSID_NULL 0x00000000 /* unspecified SID */
@@ -82,6 +83,8 @@ extern int selinux_policycap_openperm;
82int security_mls_enabled(void); 83int security_mls_enabled(void);
83 84
84int security_load_policy(void *data, size_t len); 85int security_load_policy(void *data, size_t len);
86int security_read_policy(void **data, ssize_t *len);
87size_t security_policydb_len(void);
85 88
86int security_policycap_supported(unsigned int req_cap); 89int security_policycap_supported(unsigned int req_cap);
87 90
@@ -191,5 +194,25 @@ static inline int security_netlbl_sid_to_secattr(u32 sid,
191 194
192const char *security_get_initial_sid_context(u32 sid); 195const char *security_get_initial_sid_context(u32 sid);
193 196
197/*
198 * status notifier using mmap interface
199 */
200extern struct page *selinux_kernel_status_page(void);
201
202#define SELINUX_KERNEL_STATUS_VERSION 1
203struct selinux_kernel_status {
204 u32 version; /* version number of thie structure */
205 u32 sequence; /* sequence number of seqlock logic */
206 u32 enforcing; /* current setting of enforcing mode */
207 u32 policyload; /* times of policy reloaded */
208 u32 deny_unknown; /* current setting of deny_unknown */
209 /*
210 * The version > 0 supports above members.
211 */
212} __attribute__((packed));
213
214extern void selinux_status_update_setenforce(int enforcing);
215extern void selinux_status_update_policyload(int seqno);
216
194#endif /* _SELINUX_SECURITY_H_ */ 217#endif /* _SELINUX_SECURITY_H_ */
195 218
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 79a1bb635662..87e0556bae70 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -68,6 +68,8 @@ static int *bool_pending_values;
68static struct dentry *class_dir; 68static struct dentry *class_dir;
69static unsigned long last_class_ino; 69static unsigned long last_class_ino;
70 70
71static char policy_opened;
72
71/* global data for policy capabilities */ 73/* global data for policy capabilities */
72static struct dentry *policycap_dir; 74static struct dentry *policycap_dir;
73 75
@@ -110,6 +112,8 @@ enum sel_inos {
110 SEL_COMPAT_NET, /* whether to use old compat network packet controls */ 112 SEL_COMPAT_NET, /* whether to use old compat network packet controls */
111 SEL_REJECT_UNKNOWN, /* export unknown reject handling to userspace */ 113 SEL_REJECT_UNKNOWN, /* export unknown reject handling to userspace */
112 SEL_DENY_UNKNOWN, /* export unknown deny handling to userspace */ 114 SEL_DENY_UNKNOWN, /* export unknown deny handling to userspace */
115 SEL_STATUS, /* export current status using mmap() */
116 SEL_POLICY, /* allow userspace to read the in kernel policy */
113 SEL_INO_NEXT, /* The next inode number to use */ 117 SEL_INO_NEXT, /* The next inode number to use */
114}; 118};
115 119
@@ -171,6 +175,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
171 if (selinux_enforcing) 175 if (selinux_enforcing)
172 avc_ss_reset(0); 176 avc_ss_reset(0);
173 selnl_notify_setenforce(selinux_enforcing); 177 selnl_notify_setenforce(selinux_enforcing);
178 selinux_status_update_setenforce(selinux_enforcing);
174 } 179 }
175 length = count; 180 length = count;
176out: 181out:
@@ -205,6 +210,59 @@ static const struct file_operations sel_handle_unknown_ops = {
205 .llseek = generic_file_llseek, 210 .llseek = generic_file_llseek,
206}; 211};
207 212
213static int sel_open_handle_status(struct inode *inode, struct file *filp)
214{
215 struct page *status = selinux_kernel_status_page();
216
217 if (!status)
218 return -ENOMEM;
219
220 filp->private_data = status;
221
222 return 0;
223}
224
225static ssize_t sel_read_handle_status(struct file *filp, char __user *buf,
226 size_t count, loff_t *ppos)
227{
228 struct page *status = filp->private_data;
229
230 BUG_ON(!status);
231
232 return simple_read_from_buffer(buf, count, ppos,
233 page_address(status),
234 sizeof(struct selinux_kernel_status));
235}
236
237static int sel_mmap_handle_status(struct file *filp,
238 struct vm_area_struct *vma)
239{
240 struct page *status = filp->private_data;
241 unsigned long size = vma->vm_end - vma->vm_start;
242
243 BUG_ON(!status);
244
245 /* only allows one page from the head */
246 if (vma->vm_pgoff > 0 || size != PAGE_SIZE)
247 return -EIO;
248 /* disallow writable mapping */
249 if (vma->vm_flags & VM_WRITE)
250 return -EPERM;
251 /* disallow mprotect() turns it into writable */
252 vma->vm_flags &= ~VM_MAYWRITE;
253
254 return remap_pfn_range(vma, vma->vm_start,
255 page_to_pfn(status),
256 size, vma->vm_page_prot);
257}
258
259static const struct file_operations sel_handle_status_ops = {
260 .open = sel_open_handle_status,
261 .read = sel_read_handle_status,
262 .mmap = sel_mmap_handle_status,
263 .llseek = generic_file_llseek,
264};
265
208#ifdef CONFIG_SECURITY_SELINUX_DISABLE 266#ifdef CONFIG_SECURITY_SELINUX_DISABLE
209static ssize_t sel_write_disable(struct file *file, const char __user *buf, 267static ssize_t sel_write_disable(struct file *file, const char __user *buf,
210 size_t count, loff_t *ppos) 268 size_t count, loff_t *ppos)
@@ -296,6 +354,141 @@ static const struct file_operations sel_mls_ops = {
296 .llseek = generic_file_llseek, 354 .llseek = generic_file_llseek,
297}; 355};
298 356
357struct policy_load_memory {
358 size_t len;
359 void *data;
360};
361
362static int sel_open_policy(struct inode *inode, struct file *filp)
363{
364 struct policy_load_memory *plm = NULL;
365 int rc;
366
367 BUG_ON(filp->private_data);
368
369 mutex_lock(&sel_mutex);
370
371 rc = task_has_security(current, SECURITY__READ_POLICY);
372 if (rc)
373 goto err;
374
375 rc = -EBUSY;
376 if (policy_opened)
377 goto err;
378
379 rc = -ENOMEM;
380 plm = kzalloc(sizeof(*plm), GFP_KERNEL);
381 if (!plm)
382 goto err;
383
384 if (i_size_read(inode) != security_policydb_len()) {
385 mutex_lock(&inode->i_mutex);
386 i_size_write(inode, security_policydb_len());
387 mutex_unlock(&inode->i_mutex);
388 }
389
390 rc = security_read_policy(&plm->data, &plm->len);
391 if (rc)
392 goto err;
393
394 policy_opened = 1;
395
396 filp->private_data = plm;
397
398 mutex_unlock(&sel_mutex);
399
400 return 0;
401err:
402 mutex_unlock(&sel_mutex);
403
404 if (plm)
405 vfree(plm->data);
406 kfree(plm);
407 return rc;
408}
409
410static int sel_release_policy(struct inode *inode, struct file *filp)
411{
412 struct policy_load_memory *plm = filp->private_data;
413
414 BUG_ON(!plm);
415
416 policy_opened = 0;
417
418 vfree(plm->data);
419 kfree(plm);
420
421 return 0;
422}
423
424static ssize_t sel_read_policy(struct file *filp, char __user *buf,
425 size_t count, loff_t *ppos)
426{
427 struct policy_load_memory *plm = filp->private_data;
428 int ret;
429
430 mutex_lock(&sel_mutex);
431
432 ret = task_has_security(current, SECURITY__READ_POLICY);
433 if (ret)
434 goto out;
435
436 ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
437out:
438 mutex_unlock(&sel_mutex);
439 return ret;
440}
441
442static int sel_mmap_policy_fault(struct vm_area_struct *vma,
443 struct vm_fault *vmf)
444{
445 struct policy_load_memory *plm = vma->vm_file->private_data;
446 unsigned long offset;
447 struct page *page;
448
449 if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
450 return VM_FAULT_SIGBUS;
451
452 offset = vmf->pgoff << PAGE_SHIFT;
453 if (offset >= roundup(plm->len, PAGE_SIZE))
454 return VM_FAULT_SIGBUS;
455
456 page = vmalloc_to_page(plm->data + offset);
457 get_page(page);
458
459 vmf->page = page;
460
461 return 0;
462}
463
464static struct vm_operations_struct sel_mmap_policy_ops = {
465 .fault = sel_mmap_policy_fault,
466 .page_mkwrite = sel_mmap_policy_fault,
467};
468
469int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
470{
471 if (vma->vm_flags & VM_SHARED) {
472 /* do not allow mprotect to make mapping writable */
473 vma->vm_flags &= ~VM_MAYWRITE;
474
475 if (vma->vm_flags & VM_WRITE)
476 return -EACCES;
477 }
478
479 vma->vm_flags |= VM_RESERVED;
480 vma->vm_ops = &sel_mmap_policy_ops;
481
482 return 0;
483}
484
485static const struct file_operations sel_policy_ops = {
486 .open = sel_open_policy,
487 .read = sel_read_policy,
488 .mmap = sel_mmap_policy,
489 .release = sel_release_policy,
490};
491
299static ssize_t sel_write_load(struct file *file, const char __user *buf, 492static ssize_t sel_write_load(struct file *file, const char __user *buf,
300 size_t count, loff_t *ppos) 493 size_t count, loff_t *ppos)
301 494
@@ -1612,6 +1805,8 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
1612 [SEL_CHECKREQPROT] = {"checkreqprot", &sel_checkreqprot_ops, S_IRUGO|S_IWUSR}, 1805 [SEL_CHECKREQPROT] = {"checkreqprot", &sel_checkreqprot_ops, S_IRUGO|S_IWUSR},
1613 [SEL_REJECT_UNKNOWN] = {"reject_unknown", &sel_handle_unknown_ops, S_IRUGO}, 1806 [SEL_REJECT_UNKNOWN] = {"reject_unknown", &sel_handle_unknown_ops, S_IRUGO},
1614 [SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO}, 1807 [SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO},
1808 [SEL_STATUS] = {"status", &sel_handle_status_ops, S_IRUGO},
1809 [SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUSR},
1615 /* last one */ {""} 1810 /* last one */ {""}
1616 }; 1811 };
1617 ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files); 1812 ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files);
diff --git a/security/selinux/ss/Makefile b/security/selinux/ss/Makefile
deleted file mode 100644
index 15d4e62917de..000000000000
--- a/security/selinux/ss/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1#
2# Makefile for building the SELinux security server as part of the kernel tree.
3#
4
5EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include
6obj-y := ss.o
7
8ss-y := ebitmap.o hashtab.o symtab.o sidtab.o avtab.o policydb.o services.o conditional.o mls.o
9
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index 929480c6c430..a3dd9faa19c0 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -266,8 +266,8 @@ int avtab_alloc(struct avtab *h, u32 nrules)
266 if (shift > 2) 266 if (shift > 2)
267 shift = shift - 2; 267 shift = shift - 2;
268 nslot = 1 << shift; 268 nslot = 1 << shift;
269 if (nslot > MAX_AVTAB_SIZE) 269 if (nslot > MAX_AVTAB_HASH_BUCKETS)
270 nslot = MAX_AVTAB_SIZE; 270 nslot = MAX_AVTAB_HASH_BUCKETS;
271 mask = nslot - 1; 271 mask = nslot - 1;
272 272
273 h->htable = kcalloc(nslot, sizeof(*(h->htable)), GFP_KERNEL); 273 h->htable = kcalloc(nslot, sizeof(*(h->htable)), GFP_KERNEL);
@@ -501,6 +501,48 @@ bad:
501 goto out; 501 goto out;
502} 502}
503 503
504int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp)
505{
506 __le16 buf16[4];
507 __le32 buf32[1];
508 int rc;
509
510 buf16[0] = cpu_to_le16(cur->key.source_type);
511 buf16[1] = cpu_to_le16(cur->key.target_type);
512 buf16[2] = cpu_to_le16(cur->key.target_class);
513 buf16[3] = cpu_to_le16(cur->key.specified);
514 rc = put_entry(buf16, sizeof(u16), 4, fp);
515 if (rc)
516 return rc;
517 buf32[0] = cpu_to_le32(cur->datum.data);
518 rc = put_entry(buf32, sizeof(u32), 1, fp);
519 if (rc)
520 return rc;
521 return 0;
522}
523
524int avtab_write(struct policydb *p, struct avtab *a, void *fp)
525{
526 unsigned int i;
527 int rc = 0;
528 struct avtab_node *cur;
529 __le32 buf[1];
530
531 buf[0] = cpu_to_le32(a->nel);
532 rc = put_entry(buf, sizeof(u32), 1, fp);
533 if (rc)
534 return rc;
535
536 for (i = 0; i < a->nslot; i++) {
537 for (cur = a->htable[i]; cur; cur = cur->next) {
538 rc = avtab_write_item(p, cur, fp);
539 if (rc)
540 return rc;
541 }
542 }
543
544 return rc;
545}
504void avtab_cache_init(void) 546void avtab_cache_init(void)
505{ 547{
506 avtab_node_cachep = kmem_cache_create("avtab_node", 548 avtab_node_cachep = kmem_cache_create("avtab_node",
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index cd4f734e2749..dff0c75345c1 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -71,6 +71,8 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
71 void *p); 71 void *p);
72 72
73int avtab_read(struct avtab *a, void *fp, struct policydb *pol); 73int avtab_read(struct avtab *a, void *fp, struct policydb *pol);
74int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp);
75int avtab_write(struct policydb *p, struct avtab *a, void *fp);
74 76
75struct avtab_node *avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, 77struct avtab_node *avtab_insert_nonunique(struct avtab *h, struct avtab_key *key,
76 struct avtab_datum *datum); 78 struct avtab_datum *datum);
@@ -85,7 +87,6 @@ void avtab_cache_destroy(void);
85#define MAX_AVTAB_HASH_BITS 11 87#define MAX_AVTAB_HASH_BITS 11
86#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS) 88#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
87#define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1) 89#define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1)
88#define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS
89 90
90#endif /* _SS_AVTAB_H_ */ 91#endif /* _SS_AVTAB_H_ */
91 92
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index c91e150c3087..655fe1c6cc69 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -490,6 +490,129 @@ err:
490 return rc; 490 return rc;
491} 491}
492 492
493int cond_write_bool(void *vkey, void *datum, void *ptr)
494{
495 char *key = vkey;
496 struct cond_bool_datum *booldatum = datum;
497 struct policy_data *pd = ptr;
498 void *fp = pd->fp;
499 __le32 buf[3];
500 u32 len;
501 int rc;
502
503 len = strlen(key);
504 buf[0] = cpu_to_le32(booldatum->value);
505 buf[1] = cpu_to_le32(booldatum->state);
506 buf[2] = cpu_to_le32(len);
507 rc = put_entry(buf, sizeof(u32), 3, fp);
508 if (rc)
509 return rc;
510 rc = put_entry(key, 1, len, fp);
511 if (rc)
512 return rc;
513 return 0;
514}
515
516/*
517 * cond_write_cond_av_list doesn't write out the av_list nodes.
518 * Instead it writes out the key/value pairs from the avtab. This
519 * is necessary because there is no way to uniquely identifying rules
520 * in the avtab so it is not possible to associate individual rules
521 * in the avtab with a conditional without saving them as part of
522 * the conditional. This means that the avtab with the conditional
523 * rules will not be saved but will be rebuilt on policy load.
524 */
525static int cond_write_av_list(struct policydb *p,
526 struct cond_av_list *list, struct policy_file *fp)
527{
528 __le32 buf[1];
529 struct cond_av_list *cur_list;
530 u32 len;
531 int rc;
532
533 len = 0;
534 for (cur_list = list; cur_list != NULL; cur_list = cur_list->next)
535 len++;
536
537 buf[0] = cpu_to_le32(len);
538 rc = put_entry(buf, sizeof(u32), 1, fp);
539 if (rc)
540 return rc;
541
542 if (len == 0)
543 return 0;
544
545 for (cur_list = list; cur_list != NULL; cur_list = cur_list->next) {
546 rc = avtab_write_item(p, cur_list->node, fp);
547 if (rc)
548 return rc;
549 }
550
551 return 0;
552}
553
554int cond_write_node(struct policydb *p, struct cond_node *node,
555 struct policy_file *fp)
556{
557 struct cond_expr *cur_expr;
558 __le32 buf[2];
559 int rc;
560 u32 len = 0;
561
562 buf[0] = cpu_to_le32(node->cur_state);
563 rc = put_entry(buf, sizeof(u32), 1, fp);
564 if (rc)
565 return rc;
566
567 for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next)
568 len++;
569
570 buf[0] = cpu_to_le32(len);
571 rc = put_entry(buf, sizeof(u32), 1, fp);
572 if (rc)
573 return rc;
574
575 for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) {
576 buf[0] = cpu_to_le32(cur_expr->expr_type);
577 buf[1] = cpu_to_le32(cur_expr->bool);
578 rc = put_entry(buf, sizeof(u32), 2, fp);
579 if (rc)
580 return rc;
581 }
582
583 rc = cond_write_av_list(p, node->true_list, fp);
584 if (rc)
585 return rc;
586 rc = cond_write_av_list(p, node->false_list, fp);
587 if (rc)
588 return rc;
589
590 return 0;
591}
592
593int cond_write_list(struct policydb *p, struct cond_node *list, void *fp)
594{
595 struct cond_node *cur;
596 u32 len;
597 __le32 buf[1];
598 int rc;
599
600 len = 0;
601 for (cur = list; cur != NULL; cur = cur->next)
602 len++;
603 buf[0] = cpu_to_le32(len);
604 rc = put_entry(buf, sizeof(u32), 1, fp);
605 if (rc)
606 return rc;
607
608 for (cur = list; cur != NULL; cur = cur->next) {
609 rc = cond_write_node(p, cur, fp);
610 if (rc)
611 return rc;
612 }
613
614 return 0;
615}
493/* Determine whether additional permissions are granted by the conditional 616/* Determine whether additional permissions are granted by the conditional
494 * av table, and if so, add them to the result 617 * av table, and if so, add them to the result
495 */ 618 */
diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h
index 53ddb013ae57..3f209c635295 100644
--- a/security/selinux/ss/conditional.h
+++ b/security/selinux/ss/conditional.h
@@ -69,6 +69,8 @@ int cond_index_bool(void *key, void *datum, void *datap);
69 69
70int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp); 70int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp);
71int cond_read_list(struct policydb *p, void *fp); 71int cond_read_list(struct policydb *p, void *fp);
72int cond_write_bool(void *key, void *datum, void *ptr);
73int cond_write_list(struct policydb *p, struct cond_node *list, void *fp);
72 74
73void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd); 75void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd);
74 76
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 04b6145d767f..d42951fcbe87 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -22,6 +22,8 @@
22#include "ebitmap.h" 22#include "ebitmap.h"
23#include "policydb.h" 23#include "policydb.h"
24 24
25#define BITS_PER_U64 (sizeof(u64) * 8)
26
25int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2) 27int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2)
26{ 28{
27 struct ebitmap_node *n1, *n2; 29 struct ebitmap_node *n1, *n2;
@@ -363,10 +365,10 @@ int ebitmap_read(struct ebitmap *e, void *fp)
363 e->highbit = le32_to_cpu(buf[1]); 365 e->highbit = le32_to_cpu(buf[1]);
364 count = le32_to_cpu(buf[2]); 366 count = le32_to_cpu(buf[2]);
365 367
366 if (mapunit != sizeof(u64) * 8) { 368 if (mapunit != BITS_PER_U64) {
367 printk(KERN_ERR "SELinux: ebitmap: map size %u does not " 369 printk(KERN_ERR "SELinux: ebitmap: map size %u does not "
368 "match my size %Zd (high bit was %d)\n", 370 "match my size %Zd (high bit was %d)\n",
369 mapunit, sizeof(u64) * 8, e->highbit); 371 mapunit, BITS_PER_U64, e->highbit);
370 goto bad; 372 goto bad;
371 } 373 }
372 374
@@ -446,3 +448,78 @@ bad:
446 ebitmap_destroy(e); 448 ebitmap_destroy(e);
447 goto out; 449 goto out;
448} 450}
451
452int ebitmap_write(struct ebitmap *e, void *fp)
453{
454 struct ebitmap_node *n;
455 u32 count;
456 __le32 buf[3];
457 u64 map;
458 int bit, last_bit, last_startbit, rc;
459
460 buf[0] = cpu_to_le32(BITS_PER_U64);
461
462 count = 0;
463 last_bit = 0;
464 last_startbit = -1;
465 ebitmap_for_each_positive_bit(e, n, bit) {
466 if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
467 count++;
468 last_startbit = rounddown(bit, BITS_PER_U64);
469 }
470 last_bit = roundup(bit + 1, BITS_PER_U64);
471 }
472 buf[1] = cpu_to_le32(last_bit);
473 buf[2] = cpu_to_le32(count);
474
475 rc = put_entry(buf, sizeof(u32), 3, fp);
476 if (rc)
477 return rc;
478
479 map = 0;
480 last_startbit = INT_MIN;
481 ebitmap_for_each_positive_bit(e, n, bit) {
482 if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
483 __le64 buf64[1];
484
485 /* this is the very first bit */
486 if (!map) {
487 last_startbit = rounddown(bit, BITS_PER_U64);
488 map = (u64)1 << (bit - last_startbit);
489 continue;
490 }
491
492 /* write the last node */
493 buf[0] = cpu_to_le32(last_startbit);
494 rc = put_entry(buf, sizeof(u32), 1, fp);
495 if (rc)
496 return rc;
497
498 buf64[0] = cpu_to_le64(map);
499 rc = put_entry(buf64, sizeof(u64), 1, fp);
500 if (rc)
501 return rc;
502
503 /* set up for the next node */
504 map = 0;
505 last_startbit = rounddown(bit, BITS_PER_U64);
506 }
507 map |= (u64)1 << (bit - last_startbit);
508 }
509 /* write the last node */
510 if (map) {
511 __le64 buf64[1];
512
513 /* write the last node */
514 buf[0] = cpu_to_le32(last_startbit);
515 rc = put_entry(buf, sizeof(u32), 1, fp);
516 if (rc)
517 return rc;
518
519 buf64[0] = cpu_to_le64(map);
520 rc = put_entry(buf64, sizeof(u64), 1, fp);
521 if (rc)
522 return rc;
523 }
524 return 0;
525}
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index f283b4367f54..1f4e93c2ae86 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -123,6 +123,7 @@ int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
123int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value); 123int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
124void ebitmap_destroy(struct ebitmap *e); 124void ebitmap_destroy(struct ebitmap *e);
125int ebitmap_read(struct ebitmap *e, void *fp); 125int ebitmap_read(struct ebitmap *e, void *fp);
126int ebitmap_write(struct ebitmap *e, void *fp);
126 127
127#ifdef CONFIG_NETLABEL 128#ifdef CONFIG_NETLABEL
128int ebitmap_netlbl_export(struct ebitmap *ebmap, 129int ebitmap_netlbl_export(struct ebitmap *ebmap,
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 3a29704be8ce..94f630d93a5c 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -37,6 +37,7 @@
37#include "policydb.h" 37#include "policydb.h"
38#include "conditional.h" 38#include "conditional.h"
39#include "mls.h" 39#include "mls.h"
40#include "services.h"
40 41
41#define _DEBUG_HASHES 42#define _DEBUG_HASHES
42 43
@@ -185,9 +186,19 @@ static u32 rangetr_hash(struct hashtab *h, const void *k)
185static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2) 186static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
186{ 187{
187 const struct range_trans *key1 = k1, *key2 = k2; 188 const struct range_trans *key1 = k1, *key2 = k2;
188 return (key1->source_type != key2->source_type || 189 int v;
189 key1->target_type != key2->target_type || 190
190 key1->target_class != key2->target_class); 191 v = key1->source_type - key2->source_type;
192 if (v)
193 return v;
194
195 v = key1->target_type - key2->target_type;
196 if (v)
197 return v;
198
199 v = key1->target_class - key2->target_class;
200
201 return v;
191} 202}
192 203
193/* 204/*
@@ -1624,11 +1635,11 @@ static int role_bounds_sanity_check(void *key, void *datum, void *datap)
1624 1635
1625static int type_bounds_sanity_check(void *key, void *datum, void *datap) 1636static int type_bounds_sanity_check(void *key, void *datum, void *datap)
1626{ 1637{
1627 struct type_datum *upper, *type; 1638 struct type_datum *upper;
1628 struct policydb *p = datap; 1639 struct policydb *p = datap;
1629 int depth = 0; 1640 int depth = 0;
1630 1641
1631 upper = type = datum; 1642 upper = datum;
1632 while (upper->bounds) { 1643 while (upper->bounds) {
1633 if (++depth == POLICYDB_BOUNDS_MAXDEPTH) { 1644 if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
1634 printk(KERN_ERR "SELinux: type %s: " 1645 printk(KERN_ERR "SELinux: type %s: "
@@ -2306,3 +2317,843 @@ bad:
2306 policydb_destroy(p); 2317 policydb_destroy(p);
2307 goto out; 2318 goto out;
2308} 2319}
2320
2321/*
2322 * Write a MLS level structure to a policydb binary
2323 * representation file.
2324 */
2325static int mls_write_level(struct mls_level *l, void *fp)
2326{
2327 __le32 buf[1];
2328 int rc;
2329
2330 buf[0] = cpu_to_le32(l->sens);
2331 rc = put_entry(buf, sizeof(u32), 1, fp);
2332 if (rc)
2333 return rc;
2334
2335 rc = ebitmap_write(&l->cat, fp);
2336 if (rc)
2337 return rc;
2338
2339 return 0;
2340}
2341
2342/*
2343 * Write a MLS range structure to a policydb binary
2344 * representation file.
2345 */
2346static int mls_write_range_helper(struct mls_range *r, void *fp)
2347{
2348 __le32 buf[3];
2349 size_t items;
2350 int rc, eq;
2351
2352 eq = mls_level_eq(&r->level[1], &r->level[0]);
2353
2354 if (eq)
2355 items = 2;
2356 else
2357 items = 3;
2358 buf[0] = cpu_to_le32(items-1);
2359 buf[1] = cpu_to_le32(r->level[0].sens);
2360 if (!eq)
2361 buf[2] = cpu_to_le32(r->level[1].sens);
2362
2363 BUG_ON(items > (sizeof(buf)/sizeof(buf[0])));
2364
2365 rc = put_entry(buf, sizeof(u32), items, fp);
2366 if (rc)
2367 return rc;
2368
2369 rc = ebitmap_write(&r->level[0].cat, fp);
2370 if (rc)
2371 return rc;
2372 if (!eq) {
2373 rc = ebitmap_write(&r->level[1].cat, fp);
2374 if (rc)
2375 return rc;
2376 }
2377
2378 return 0;
2379}
2380
2381static int sens_write(void *vkey, void *datum, void *ptr)
2382{
2383 char *key = vkey;
2384 struct level_datum *levdatum = datum;
2385 struct policy_data *pd = ptr;
2386 void *fp = pd->fp;
2387 __le32 buf[2];
2388 size_t len;
2389 int rc;
2390
2391 len = strlen(key);
2392 buf[0] = cpu_to_le32(len);
2393 buf[1] = cpu_to_le32(levdatum->isalias);
2394 rc = put_entry(buf, sizeof(u32), 2, fp);
2395 if (rc)
2396 return rc;
2397
2398 rc = put_entry(key, 1, len, fp);
2399 if (rc)
2400 return rc;
2401
2402 rc = mls_write_level(levdatum->level, fp);
2403 if (rc)
2404 return rc;
2405
2406 return 0;
2407}
2408
2409static int cat_write(void *vkey, void *datum, void *ptr)
2410{
2411 char *key = vkey;
2412 struct cat_datum *catdatum = datum;
2413 struct policy_data *pd = ptr;
2414 void *fp = pd->fp;
2415 __le32 buf[3];
2416 size_t len;
2417 int rc;
2418
2419 len = strlen(key);
2420 buf[0] = cpu_to_le32(len);
2421 buf[1] = cpu_to_le32(catdatum->value);
2422 buf[2] = cpu_to_le32(catdatum->isalias);
2423 rc = put_entry(buf, sizeof(u32), 3, fp);
2424 if (rc)
2425 return rc;
2426
2427 rc = put_entry(key, 1, len, fp);
2428 if (rc)
2429 return rc;
2430
2431 return 0;
2432}
2433
2434static int role_trans_write(struct role_trans *r, void *fp)
2435{
2436 struct role_trans *tr;
2437 u32 buf[3];
2438 size_t nel;
2439 int rc;
2440
2441 nel = 0;
2442 for (tr = r; tr; tr = tr->next)
2443 nel++;
2444 buf[0] = cpu_to_le32(nel);
2445 rc = put_entry(buf, sizeof(u32), 1, fp);
2446 if (rc)
2447 return rc;
2448 for (tr = r; tr; tr = tr->next) {
2449 buf[0] = cpu_to_le32(tr->role);
2450 buf[1] = cpu_to_le32(tr->type);
2451 buf[2] = cpu_to_le32(tr->new_role);
2452 rc = put_entry(buf, sizeof(u32), 3, fp);
2453 if (rc)
2454 return rc;
2455 }
2456
2457 return 0;
2458}
2459
2460static int role_allow_write(struct role_allow *r, void *fp)
2461{
2462 struct role_allow *ra;
2463 u32 buf[2];
2464 size_t nel;
2465 int rc;
2466
2467 nel = 0;
2468 for (ra = r; ra; ra = ra->next)
2469 nel++;
2470 buf[0] = cpu_to_le32(nel);
2471 rc = put_entry(buf, sizeof(u32), 1, fp);
2472 if (rc)
2473 return rc;
2474 for (ra = r; ra; ra = ra->next) {
2475 buf[0] = cpu_to_le32(ra->role);
2476 buf[1] = cpu_to_le32(ra->new_role);
2477 rc = put_entry(buf, sizeof(u32), 2, fp);
2478 if (rc)
2479 return rc;
2480 }
2481 return 0;
2482}
2483
2484/*
2485 * Write a security context structure
2486 * to a policydb binary representation file.
2487 */
2488static int context_write(struct policydb *p, struct context *c,
2489 void *fp)
2490{
2491 int rc;
2492 __le32 buf[3];
2493
2494 buf[0] = cpu_to_le32(c->user);
2495 buf[1] = cpu_to_le32(c->role);
2496 buf[2] = cpu_to_le32(c->type);
2497
2498 rc = put_entry(buf, sizeof(u32), 3, fp);
2499 if (rc)
2500 return rc;
2501
2502 rc = mls_write_range_helper(&c->range, fp);
2503 if (rc)
2504 return rc;
2505
2506 return 0;
2507}
2508
2509/*
2510 * The following *_write functions are used to
2511 * write the symbol data to a policy database
2512 * binary representation file.
2513 */
2514
2515static int perm_write(void *vkey, void *datum, void *fp)
2516{
2517 char *key = vkey;
2518 struct perm_datum *perdatum = datum;
2519 __le32 buf[2];
2520 size_t len;
2521 int rc;
2522
2523 len = strlen(key);
2524 buf[0] = cpu_to_le32(len);
2525 buf[1] = cpu_to_le32(perdatum->value);
2526 rc = put_entry(buf, sizeof(u32), 2, fp);
2527 if (rc)
2528 return rc;
2529
2530 rc = put_entry(key, 1, len, fp);
2531 if (rc)
2532 return rc;
2533
2534 return 0;
2535}
2536
2537static int common_write(void *vkey, void *datum, void *ptr)
2538{
2539 char *key = vkey;
2540 struct common_datum *comdatum = datum;
2541 struct policy_data *pd = ptr;
2542 void *fp = pd->fp;
2543 __le32 buf[4];
2544 size_t len;
2545 int rc;
2546
2547 len = strlen(key);
2548 buf[0] = cpu_to_le32(len);
2549 buf[1] = cpu_to_le32(comdatum->value);
2550 buf[2] = cpu_to_le32(comdatum->permissions.nprim);
2551 buf[3] = cpu_to_le32(comdatum->permissions.table->nel);
2552 rc = put_entry(buf, sizeof(u32), 4, fp);
2553 if (rc)
2554 return rc;
2555
2556 rc = put_entry(key, 1, len, fp);
2557 if (rc)
2558 return rc;
2559
2560 rc = hashtab_map(comdatum->permissions.table, perm_write, fp);
2561 if (rc)
2562 return rc;
2563
2564 return 0;
2565}
2566
2567static int write_cons_helper(struct policydb *p, struct constraint_node *node,
2568 void *fp)
2569{
2570 struct constraint_node *c;
2571 struct constraint_expr *e;
2572 __le32 buf[3];
2573 u32 nel;
2574 int rc;
2575
2576 for (c = node; c; c = c->next) {
2577 nel = 0;
2578 for (e = c->expr; e; e = e->next)
2579 nel++;
2580 buf[0] = cpu_to_le32(c->permissions);
2581 buf[1] = cpu_to_le32(nel);
2582 rc = put_entry(buf, sizeof(u32), 2, fp);
2583 if (rc)
2584 return rc;
2585 for (e = c->expr; e; e = e->next) {
2586 buf[0] = cpu_to_le32(e->expr_type);
2587 buf[1] = cpu_to_le32(e->attr);
2588 buf[2] = cpu_to_le32(e->op);
2589 rc = put_entry(buf, sizeof(u32), 3, fp);
2590 if (rc)
2591 return rc;
2592
2593 switch (e->expr_type) {
2594 case CEXPR_NAMES:
2595 rc = ebitmap_write(&e->names, fp);
2596 if (rc)
2597 return rc;
2598 break;
2599 default:
2600 break;
2601 }
2602 }
2603 }
2604
2605 return 0;
2606}
2607
2608static int class_write(void *vkey, void *datum, void *ptr)
2609{
2610 char *key = vkey;
2611 struct class_datum *cladatum = datum;
2612 struct policy_data *pd = ptr;
2613 void *fp = pd->fp;
2614 struct policydb *p = pd->p;
2615 struct constraint_node *c;
2616 __le32 buf[6];
2617 u32 ncons;
2618 size_t len, len2;
2619 int rc;
2620
2621 len = strlen(key);
2622 if (cladatum->comkey)
2623 len2 = strlen(cladatum->comkey);
2624 else
2625 len2 = 0;
2626
2627 ncons = 0;
2628 for (c = cladatum->constraints; c; c = c->next)
2629 ncons++;
2630
2631 buf[0] = cpu_to_le32(len);
2632 buf[1] = cpu_to_le32(len2);
2633 buf[2] = cpu_to_le32(cladatum->value);
2634 buf[3] = cpu_to_le32(cladatum->permissions.nprim);
2635 if (cladatum->permissions.table)
2636 buf[4] = cpu_to_le32(cladatum->permissions.table->nel);
2637 else
2638 buf[4] = 0;
2639 buf[5] = cpu_to_le32(ncons);
2640 rc = put_entry(buf, sizeof(u32), 6, fp);
2641 if (rc)
2642 return rc;
2643
2644 rc = put_entry(key, 1, len, fp);
2645 if (rc)
2646 return rc;
2647
2648 if (cladatum->comkey) {
2649 rc = put_entry(cladatum->comkey, 1, len2, fp);
2650 if (rc)
2651 return rc;
2652 }
2653
2654 rc = hashtab_map(cladatum->permissions.table, perm_write, fp);
2655 if (rc)
2656 return rc;
2657
2658 rc = write_cons_helper(p, cladatum->constraints, fp);
2659 if (rc)
2660 return rc;
2661
2662 /* write out the validatetrans rule */
2663 ncons = 0;
2664 for (c = cladatum->validatetrans; c; c = c->next)
2665 ncons++;
2666
2667 buf[0] = cpu_to_le32(ncons);
2668 rc = put_entry(buf, sizeof(u32), 1, fp);
2669 if (rc)
2670 return rc;
2671
2672 rc = write_cons_helper(p, cladatum->validatetrans, fp);
2673 if (rc)
2674 return rc;
2675
2676 return 0;
2677}
2678
2679static int role_write(void *vkey, void *datum, void *ptr)
2680{
2681 char *key = vkey;
2682 struct role_datum *role = datum;
2683 struct policy_data *pd = ptr;
2684 void *fp = pd->fp;
2685 struct policydb *p = pd->p;
2686 __le32 buf[3];
2687 size_t items, len;
2688 int rc;
2689
2690 len = strlen(key);
2691 items = 0;
2692 buf[items++] = cpu_to_le32(len);
2693 buf[items++] = cpu_to_le32(role->value);
2694 if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
2695 buf[items++] = cpu_to_le32(role->bounds);
2696
2697 BUG_ON(items > (sizeof(buf)/sizeof(buf[0])));
2698
2699 rc = put_entry(buf, sizeof(u32), items, fp);
2700 if (rc)
2701 return rc;
2702
2703 rc = put_entry(key, 1, len, fp);
2704 if (rc)
2705 return rc;
2706
2707 rc = ebitmap_write(&role->dominates, fp);
2708 if (rc)
2709 return rc;
2710
2711 rc = ebitmap_write(&role->types, fp);
2712 if (rc)
2713 return rc;
2714
2715 return 0;
2716}
2717
2718static int type_write(void *vkey, void *datum, void *ptr)
2719{
2720 char *key = vkey;
2721 struct type_datum *typdatum = datum;
2722 struct policy_data *pd = ptr;
2723 struct policydb *p = pd->p;
2724 void *fp = pd->fp;
2725 __le32 buf[4];
2726 int rc;
2727 size_t items, len;
2728
2729 len = strlen(key);
2730 items = 0;
2731 buf[items++] = cpu_to_le32(len);
2732 buf[items++] = cpu_to_le32(typdatum->value);
2733 if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) {
2734 u32 properties = 0;
2735
2736 if (typdatum->primary)
2737 properties |= TYPEDATUM_PROPERTY_PRIMARY;
2738
2739 if (typdatum->attribute)
2740 properties |= TYPEDATUM_PROPERTY_ATTRIBUTE;
2741
2742 buf[items++] = cpu_to_le32(properties);
2743 buf[items++] = cpu_to_le32(typdatum->bounds);
2744 } else {
2745 buf[items++] = cpu_to_le32(typdatum->primary);
2746 }
2747 BUG_ON(items > (sizeof(buf) / sizeof(buf[0])));
2748 rc = put_entry(buf, sizeof(u32), items, fp);
2749 if (rc)
2750 return rc;
2751
2752 rc = put_entry(key, 1, len, fp);
2753 if (rc)
2754 return rc;
2755
2756 return 0;
2757}
2758
2759static int user_write(void *vkey, void *datum, void *ptr)
2760{
2761 char *key = vkey;
2762 struct user_datum *usrdatum = datum;
2763 struct policy_data *pd = ptr;
2764 struct policydb *p = pd->p;
2765 void *fp = pd->fp;
2766 __le32 buf[3];
2767 size_t items, len;
2768 int rc;
2769
2770 len = strlen(key);
2771 items = 0;
2772 buf[items++] = cpu_to_le32(len);
2773 buf[items++] = cpu_to_le32(usrdatum->value);
2774 if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
2775 buf[items++] = cpu_to_le32(usrdatum->bounds);
2776 BUG_ON(items > (sizeof(buf) / sizeof(buf[0])));
2777 rc = put_entry(buf, sizeof(u32), items, fp);
2778 if (rc)
2779 return rc;
2780
2781 rc = put_entry(key, 1, len, fp);
2782 if (rc)
2783 return rc;
2784
2785 rc = ebitmap_write(&usrdatum->roles, fp);
2786 if (rc)
2787 return rc;
2788
2789 rc = mls_write_range_helper(&usrdatum->range, fp);
2790 if (rc)
2791 return rc;
2792
2793 rc = mls_write_level(&usrdatum->dfltlevel, fp);
2794 if (rc)
2795 return rc;
2796
2797 return 0;
2798}
2799
2800static int (*write_f[SYM_NUM]) (void *key, void *datum,
2801 void *datap) =
2802{
2803 common_write,
2804 class_write,
2805 role_write,
2806 type_write,
2807 user_write,
2808 cond_write_bool,
2809 sens_write,
2810 cat_write,
2811};
2812
2813static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
2814 void *fp)
2815{
2816 unsigned int i, j, rc;
2817 size_t nel, len;
2818 __le32 buf[3];
2819 u32 nodebuf[8];
2820 struct ocontext *c;
2821 for (i = 0; i < info->ocon_num; i++) {
2822 nel = 0;
2823 for (c = p->ocontexts[i]; c; c = c->next)
2824 nel++;
2825 buf[0] = cpu_to_le32(nel);
2826 rc = put_entry(buf, sizeof(u32), 1, fp);
2827 if (rc)
2828 return rc;
2829 for (c = p->ocontexts[i]; c; c = c->next) {
2830 switch (i) {
2831 case OCON_ISID:
2832 buf[0] = cpu_to_le32(c->sid[0]);
2833 rc = put_entry(buf, sizeof(u32), 1, fp);
2834 if (rc)
2835 return rc;
2836 rc = context_write(p, &c->context[0], fp);
2837 if (rc)
2838 return rc;
2839 break;
2840 case OCON_FS:
2841 case OCON_NETIF:
2842 len = strlen(c->u.name);
2843 buf[0] = cpu_to_le32(len);
2844 rc = put_entry(buf, sizeof(u32), 1, fp);
2845 if (rc)
2846 return rc;
2847 rc = put_entry(c->u.name, 1, len, fp);
2848 if (rc)
2849 return rc;
2850 rc = context_write(p, &c->context[0], fp);
2851 if (rc)
2852 return rc;
2853 rc = context_write(p, &c->context[1], fp);
2854 if (rc)
2855 return rc;
2856 break;
2857 case OCON_PORT:
2858 buf[0] = cpu_to_le32(c->u.port.protocol);
2859 buf[1] = cpu_to_le32(c->u.port.low_port);
2860 buf[2] = cpu_to_le32(c->u.port.high_port);
2861 rc = put_entry(buf, sizeof(u32), 3, fp);
2862 if (rc)
2863 return rc;
2864 rc = context_write(p, &c->context[0], fp);
2865 if (rc)
2866 return rc;
2867 break;
2868 case OCON_NODE:
2869 nodebuf[0] = c->u.node.addr; /* network order */
2870 nodebuf[1] = c->u.node.mask; /* network order */
2871 rc = put_entry(nodebuf, sizeof(u32), 2, fp);
2872 if (rc)
2873 return rc;
2874 rc = context_write(p, &c->context[0], fp);
2875 if (rc)
2876 return rc;
2877 break;
2878 case OCON_FSUSE:
2879 buf[0] = cpu_to_le32(c->v.behavior);
2880 len = strlen(c->u.name);
2881 buf[1] = cpu_to_le32(len);
2882 rc = put_entry(buf, sizeof(u32), 2, fp);
2883 if (rc)
2884 return rc;
2885 rc = put_entry(c->u.name, 1, len, fp);
2886 if (rc)
2887 return rc;
2888 rc = context_write(p, &c->context[0], fp);
2889 if (rc)
2890 return rc;
2891 break;
2892 case OCON_NODE6:
2893 for (j = 0; j < 4; j++)
2894 nodebuf[j] = c->u.node6.addr[j]; /* network order */
2895 for (j = 0; j < 4; j++)
2896 nodebuf[j + 4] = c->u.node6.mask[j]; /* network order */
2897 rc = put_entry(nodebuf, sizeof(u32), 8, fp);
2898 if (rc)
2899 return rc;
2900 rc = context_write(p, &c->context[0], fp);
2901 if (rc)
2902 return rc;
2903 break;
2904 }
2905 }
2906 }
2907 return 0;
2908}
2909
2910static int genfs_write(struct policydb *p, void *fp)
2911{
2912 struct genfs *genfs;
2913 struct ocontext *c;
2914 size_t len;
2915 __le32 buf[1];
2916 int rc;
2917
2918 len = 0;
2919 for (genfs = p->genfs; genfs; genfs = genfs->next)
2920 len++;
2921 buf[0] = cpu_to_le32(len);
2922 rc = put_entry(buf, sizeof(u32), 1, fp);
2923 if (rc)
2924 return rc;
2925 for (genfs = p->genfs; genfs; genfs = genfs->next) {
2926 len = strlen(genfs->fstype);
2927 buf[0] = cpu_to_le32(len);
2928 rc = put_entry(buf, sizeof(u32), 1, fp);
2929 if (rc)
2930 return rc;
2931 rc = put_entry(genfs->fstype, 1, len, fp);
2932 if (rc)
2933 return rc;
2934 len = 0;
2935 for (c = genfs->head; c; c = c->next)
2936 len++;
2937 buf[0] = cpu_to_le32(len);
2938 rc = put_entry(buf, sizeof(u32), 1, fp);
2939 if (rc)
2940 return rc;
2941 for (c = genfs->head; c; c = c->next) {
2942 len = strlen(c->u.name);
2943 buf[0] = cpu_to_le32(len);
2944 rc = put_entry(buf, sizeof(u32), 1, fp);
2945 if (rc)
2946 return rc;
2947 rc = put_entry(c->u.name, 1, len, fp);
2948 if (rc)
2949 return rc;
2950 buf[0] = cpu_to_le32(c->v.sclass);
2951 rc = put_entry(buf, sizeof(u32), 1, fp);
2952 if (rc)
2953 return rc;
2954 rc = context_write(p, &c->context[0], fp);
2955 if (rc)
2956 return rc;
2957 }
2958 }
2959 return 0;
2960}
2961
2962static int range_count(void *key, void *data, void *ptr)
2963{
2964 int *cnt = ptr;
2965 *cnt = *cnt + 1;
2966
2967 return 0;
2968}
2969
2970static int range_write_helper(void *key, void *data, void *ptr)
2971{
2972 __le32 buf[2];
2973 struct range_trans *rt = key;
2974 struct mls_range *r = data;
2975 struct policy_data *pd = ptr;
2976 void *fp = pd->fp;
2977 struct policydb *p = pd->p;
2978 int rc;
2979
2980 buf[0] = cpu_to_le32(rt->source_type);
2981 buf[1] = cpu_to_le32(rt->target_type);
2982 rc = put_entry(buf, sizeof(u32), 2, fp);
2983 if (rc)
2984 return rc;
2985 if (p->policyvers >= POLICYDB_VERSION_RANGETRANS) {
2986 buf[0] = cpu_to_le32(rt->target_class);
2987 rc = put_entry(buf, sizeof(u32), 1, fp);
2988 if (rc)
2989 return rc;
2990 }
2991 rc = mls_write_range_helper(r, fp);
2992 if (rc)
2993 return rc;
2994
2995 return 0;
2996}
2997
2998static int range_write(struct policydb *p, void *fp)
2999{
3000 size_t nel;
3001 __le32 buf[1];
3002 int rc;
3003 struct policy_data pd;
3004
3005 pd.p = p;
3006 pd.fp = fp;
3007
3008 /* count the number of entries in the hashtab */
3009 nel = 0;
3010 rc = hashtab_map(p->range_tr, range_count, &nel);
3011 if (rc)
3012 return rc;
3013
3014 buf[0] = cpu_to_le32(nel);
3015 rc = put_entry(buf, sizeof(u32), 1, fp);
3016 if (rc)
3017 return rc;
3018
3019 /* actually write all of the entries */
3020 rc = hashtab_map(p->range_tr, range_write_helper, &pd);
3021 if (rc)
3022 return rc;
3023
3024 return 0;
3025}
3026
3027/*
3028 * Write the configuration data in a policy database
3029 * structure to a policy database binary representation
3030 * file.
3031 */
3032int policydb_write(struct policydb *p, void *fp)
3033{
3034 unsigned int i, num_syms;
3035 int rc;
3036 __le32 buf[4];
3037 u32 config;
3038 size_t len;
3039 struct policydb_compat_info *info;
3040
3041 /*
3042 * refuse to write policy older than compressed avtab
3043 * to simplify the writer. There are other tests dropped
3044 * since we assume this throughout the writer code. Be
3045 * careful if you ever try to remove this restriction
3046 */
3047 if (p->policyvers < POLICYDB_VERSION_AVTAB) {
3048 printk(KERN_ERR "SELinux: refusing to write policy version %d."
3049 " Because it is less than version %d\n", p->policyvers,
3050 POLICYDB_VERSION_AVTAB);
3051 return -EINVAL;
3052 }
3053
3054 config = 0;
3055 if (p->mls_enabled)
3056 config |= POLICYDB_CONFIG_MLS;
3057
3058 if (p->reject_unknown)
3059 config |= REJECT_UNKNOWN;
3060 if (p->allow_unknown)
3061 config |= ALLOW_UNKNOWN;
3062
3063 /* Write the magic number and string identifiers. */
3064 buf[0] = cpu_to_le32(POLICYDB_MAGIC);
3065 len = strlen(POLICYDB_STRING);
3066 buf[1] = cpu_to_le32(len);
3067 rc = put_entry(buf, sizeof(u32), 2, fp);
3068 if (rc)
3069 return rc;
3070 rc = put_entry(POLICYDB_STRING, 1, len, fp);
3071 if (rc)
3072 return rc;
3073
3074 /* Write the version, config, and table sizes. */
3075 info = policydb_lookup_compat(p->policyvers);
3076 if (!info) {
3077 printk(KERN_ERR "SELinux: compatibility lookup failed for policy "
3078 "version %d", p->policyvers);
3079 return rc;
3080 }
3081
3082 buf[0] = cpu_to_le32(p->policyvers);
3083 buf[1] = cpu_to_le32(config);
3084 buf[2] = cpu_to_le32(info->sym_num);
3085 buf[3] = cpu_to_le32(info->ocon_num);
3086
3087 rc = put_entry(buf, sizeof(u32), 4, fp);
3088 if (rc)
3089 return rc;
3090
3091 if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
3092 rc = ebitmap_write(&p->policycaps, fp);
3093 if (rc)
3094 return rc;
3095 }
3096
3097 if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE) {
3098 rc = ebitmap_write(&p->permissive_map, fp);
3099 if (rc)
3100 return rc;
3101 }
3102
3103 num_syms = info->sym_num;
3104 for (i = 0; i < num_syms; i++) {
3105 struct policy_data pd;
3106
3107 pd.fp = fp;
3108 pd.p = p;
3109
3110 buf[0] = cpu_to_le32(p->symtab[i].nprim);
3111 buf[1] = cpu_to_le32(p->symtab[i].table->nel);
3112
3113 rc = put_entry(buf, sizeof(u32), 2, fp);
3114 if (rc)
3115 return rc;
3116 rc = hashtab_map(p->symtab[i].table, write_f[i], &pd);
3117 if (rc)
3118 return rc;
3119 }
3120
3121 rc = avtab_write(p, &p->te_avtab, fp);
3122 if (rc)
3123 return rc;
3124
3125 rc = cond_write_list(p, p->cond_list, fp);
3126 if (rc)
3127 return rc;
3128
3129 rc = role_trans_write(p->role_tr, fp);
3130 if (rc)
3131 return rc;
3132
3133 rc = role_allow_write(p->role_allow, fp);
3134 if (rc)
3135 return rc;
3136
3137 rc = ocontext_write(p, info, fp);
3138 if (rc)
3139 return rc;
3140
3141 rc = genfs_write(p, fp);
3142 if (rc)
3143 return rc;
3144
3145 rc = range_write(p, fp);
3146 if (rc)
3147 return rc;
3148
3149 for (i = 0; i < p->p_types.nprim; i++) {
3150 struct ebitmap *e = flex_array_get(p->type_attr_map_array, i);
3151
3152 BUG_ON(!e);
3153 rc = ebitmap_write(e, fp);
3154 if (rc)
3155 return rc;
3156 }
3157
3158 return 0;
3159}
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index 310e94442cb8..95d3d7de361e 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -254,6 +254,9 @@ struct policydb {
254 254
255 struct ebitmap permissive_map; 255 struct ebitmap permissive_map;
256 256
257 /* length of this policy when it was loaded */
258 size_t len;
259
257 unsigned int policyvers; 260 unsigned int policyvers;
258 261
259 unsigned int reject_unknown : 1; 262 unsigned int reject_unknown : 1;
@@ -270,6 +273,7 @@ extern int policydb_class_isvalid(struct policydb *p, unsigned int class);
270extern int policydb_type_isvalid(struct policydb *p, unsigned int type); 273extern int policydb_type_isvalid(struct policydb *p, unsigned int type);
271extern int policydb_role_isvalid(struct policydb *p, unsigned int role); 274extern int policydb_role_isvalid(struct policydb *p, unsigned int role);
272extern int policydb_read(struct policydb *p, void *fp); 275extern int policydb_read(struct policydb *p, void *fp);
276extern int policydb_write(struct policydb *p, void *fp);
273 277
274#define PERM_SYMTAB_SIZE 32 278#define PERM_SYMTAB_SIZE 32
275 279
@@ -290,6 +294,11 @@ struct policy_file {
290 size_t len; 294 size_t len;
291}; 295};
292 296
297struct policy_data {
298 struct policydb *p;
299 void *fp;
300};
301
293static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes) 302static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
294{ 303{
295 if (bytes > fp->len) 304 if (bytes > fp->len)
@@ -301,6 +310,17 @@ static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
301 return 0; 310 return 0;
302} 311}
303 312
313static inline int put_entry(void *buf, size_t bytes, int num, struct policy_file *fp)
314{
315 size_t len = bytes * num;
316
317 memcpy(fp->data, buf, len);
318 fp->data += len;
319 fp->len -= len;
320
321 return 0;
322}
323
304extern u16 string_to_security_class(struct policydb *p, const char *name); 324extern u16 string_to_security_class(struct policydb *p, const char *name);
305extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name); 325extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name);
306 326
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 9ea2feca3cd4..223c1ff6ef23 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -51,6 +51,7 @@
51#include <linux/mutex.h> 51#include <linux/mutex.h>
52#include <linux/selinux.h> 52#include <linux/selinux.h>
53#include <linux/flex_array.h> 53#include <linux/flex_array.h>
54#include <linux/vmalloc.h>
54#include <net/netlabel.h> 55#include <net/netlabel.h>
55 56
56#include "flask.h" 57#include "flask.h"
@@ -991,7 +992,8 @@ static int context_struct_to_string(struct context *context, char **scontext, u3
991{ 992{
992 char *scontextp; 993 char *scontextp;
993 994
994 *scontext = NULL; 995 if (scontext)
996 *scontext = NULL;
995 *scontext_len = 0; 997 *scontext_len = 0;
996 998
997 if (context->len) { 999 if (context->len) {
@@ -1008,6 +1010,9 @@ static int context_struct_to_string(struct context *context, char **scontext, u3
1008 *scontext_len += strlen(policydb.p_type_val_to_name[context->type - 1]) + 1; 1010 *scontext_len += strlen(policydb.p_type_val_to_name[context->type - 1]) + 1;
1009 *scontext_len += mls_compute_context_len(context); 1011 *scontext_len += mls_compute_context_len(context);
1010 1012
1013 if (!scontext)
1014 return 0;
1015
1011 /* Allocate space for the context; caller must free this space. */ 1016 /* Allocate space for the context; caller must free this space. */
1012 scontextp = kmalloc(*scontext_len, GFP_ATOMIC); 1017 scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
1013 if (!scontextp) 1018 if (!scontextp)
@@ -1047,7 +1052,8 @@ static int security_sid_to_context_core(u32 sid, char **scontext,
1047 struct context *context; 1052 struct context *context;
1048 int rc = 0; 1053 int rc = 0;
1049 1054
1050 *scontext = NULL; 1055 if (scontext)
1056 *scontext = NULL;
1051 *scontext_len = 0; 1057 *scontext_len = 0;
1052 1058
1053 if (!ss_initialized) { 1059 if (!ss_initialized) {
@@ -1055,6 +1061,8 @@ static int security_sid_to_context_core(u32 sid, char **scontext,
1055 char *scontextp; 1061 char *scontextp;
1056 1062
1057 *scontext_len = strlen(initial_sid_to_string[sid]) + 1; 1063 *scontext_len = strlen(initial_sid_to_string[sid]) + 1;
1064 if (!scontext)
1065 goto out;
1058 scontextp = kmalloc(*scontext_len, GFP_ATOMIC); 1066 scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
1059 if (!scontextp) { 1067 if (!scontextp) {
1060 rc = -ENOMEM; 1068 rc = -ENOMEM;
@@ -1769,6 +1777,7 @@ int security_load_policy(void *data, size_t len)
1769 return rc; 1777 return rc;
1770 } 1778 }
1771 1779
1780 policydb.len = len;
1772 rc = selinux_set_mapping(&policydb, secclass_map, 1781 rc = selinux_set_mapping(&policydb, secclass_map,
1773 &current_mapping, 1782 &current_mapping,
1774 &current_mapping_size); 1783 &current_mapping_size);
@@ -1791,6 +1800,7 @@ int security_load_policy(void *data, size_t len)
1791 selinux_complete_init(); 1800 selinux_complete_init();
1792 avc_ss_reset(seqno); 1801 avc_ss_reset(seqno);
1793 selnl_notify_policyload(seqno); 1802 selnl_notify_policyload(seqno);
1803 selinux_status_update_policyload(seqno);
1794 selinux_netlbl_cache_invalidate(); 1804 selinux_netlbl_cache_invalidate();
1795 selinux_xfrm_notify_policyload(); 1805 selinux_xfrm_notify_policyload();
1796 return 0; 1806 return 0;
@@ -1804,6 +1814,7 @@ int security_load_policy(void *data, size_t len)
1804 if (rc) 1814 if (rc)
1805 return rc; 1815 return rc;
1806 1816
1817 newpolicydb.len = len;
1807 /* If switching between different policy types, log MLS status */ 1818 /* If switching between different policy types, log MLS status */
1808 if (policydb.mls_enabled && !newpolicydb.mls_enabled) 1819 if (policydb.mls_enabled && !newpolicydb.mls_enabled)
1809 printk(KERN_INFO "SELinux: Disabling MLS support...\n"); 1820 printk(KERN_INFO "SELinux: Disabling MLS support...\n");
@@ -1870,6 +1881,7 @@ int security_load_policy(void *data, size_t len)
1870 1881
1871 avc_ss_reset(seqno); 1882 avc_ss_reset(seqno);
1872 selnl_notify_policyload(seqno); 1883 selnl_notify_policyload(seqno);
1884 selinux_status_update_policyload(seqno);
1873 selinux_netlbl_cache_invalidate(); 1885 selinux_netlbl_cache_invalidate();
1874 selinux_xfrm_notify_policyload(); 1886 selinux_xfrm_notify_policyload();
1875 1887
@@ -1883,6 +1895,17 @@ err:
1883 1895
1884} 1896}
1885 1897
1898size_t security_policydb_len(void)
1899{
1900 size_t len;
1901
1902 read_lock(&policy_rwlock);
1903 len = policydb.len;
1904 read_unlock(&policy_rwlock);
1905
1906 return len;
1907}
1908
1886/** 1909/**
1887 * security_port_sid - Obtain the SID for a port. 1910 * security_port_sid - Obtain the SID for a port.
1888 * @protocol: protocol number 1911 * @protocol: protocol number
@@ -2374,6 +2397,7 @@ out:
2374 if (!rc) { 2397 if (!rc) {
2375 avc_ss_reset(seqno); 2398 avc_ss_reset(seqno);
2376 selnl_notify_policyload(seqno); 2399 selnl_notify_policyload(seqno);
2400 selinux_status_update_policyload(seqno);
2377 selinux_xfrm_notify_policyload(); 2401 selinux_xfrm_notify_policyload();
2378 } 2402 }
2379 return rc; 2403 return rc;
@@ -3129,3 +3153,38 @@ netlbl_sid_to_secattr_failure:
3129 return rc; 3153 return rc;
3130} 3154}
3131#endif /* CONFIG_NETLABEL */ 3155#endif /* CONFIG_NETLABEL */
3156
3157/**
3158 * security_read_policy - read the policy.
3159 * @data: binary policy data
3160 * @len: length of data in bytes
3161 *
3162 */
3163int security_read_policy(void **data, ssize_t *len)
3164{
3165 int rc;
3166 struct policy_file fp;
3167
3168 if (!ss_initialized)
3169 return -EINVAL;
3170
3171 *len = security_policydb_len();
3172
3173 *data = vmalloc_user(*len);
3174 if (!*data)
3175 return -ENOMEM;
3176
3177 fp.data = *data;
3178 fp.len = *len;
3179
3180 read_lock(&policy_rwlock);
3181 rc = policydb_write(&policydb, &fp);
3182 read_unlock(&policy_rwlock);
3183
3184 if (rc)
3185 return rc;
3186
3187 *len = (unsigned long)fp.data - (unsigned long)*data;
3188 return 0;
3189
3190}
diff --git a/security/selinux/ss/status.c b/security/selinux/ss/status.c
new file mode 100644
index 000000000000..d982365f9d1a
--- /dev/null
+++ b/security/selinux/ss/status.c
@@ -0,0 +1,126 @@
1/*
2 * mmap based event notifications for SELinux
3 *
4 * Author: KaiGai Kohei <kaigai@ak.jp.nec.com>
5 *
6 * Copyright (C) 2010 NEC corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2,
10 * as published by the Free Software Foundation.
11 */
12#include <linux/kernel.h>
13#include <linux/gfp.h>
14#include <linux/mm.h>
15#include <linux/mutex.h>
16#include "avc.h"
17#include "services.h"
18
19/*
20 * The selinux_status_page shall be exposed to userspace applications
21 * using mmap interface on /selinux/status.
22 * It enables to notify applications a few events that will cause reset
23 * of userspace access vector without context switching.
24 *
25 * The selinux_kernel_status structure on the head of status page is
26 * protected from concurrent accesses using seqlock logic, so userspace
27 * application should reference the status page according to the seqlock
28 * logic.
29 *
30 * Typically, application checks status->sequence at the head of access
31 * control routine. If it is odd-number, kernel is updating the status,
32 * so please wait for a moment. If it is changed from the last sequence
33 * number, it means something happen, so application will reset userspace
34 * avc, if needed.
35 * In most cases, application shall confirm the kernel status is not
36 * changed without any system call invocations.
37 */
38static struct page *selinux_status_page;
39static DEFINE_MUTEX(selinux_status_lock);
40
41/*
42 * selinux_kernel_status_page
43 *
44 * It returns a reference to selinux_status_page. If the status page is
45 * not allocated yet, it also tries to allocate it at the first time.
46 */
47struct page *selinux_kernel_status_page(void)
48{
49 struct selinux_kernel_status *status;
50 struct page *result = NULL;
51
52 mutex_lock(&selinux_status_lock);
53 if (!selinux_status_page) {
54 selinux_status_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
55
56 if (selinux_status_page) {
57 status = page_address(selinux_status_page);
58
59 status->version = SELINUX_KERNEL_STATUS_VERSION;
60 status->sequence = 0;
61 status->enforcing = selinux_enforcing;
62 /*
63 * NOTE: the next policyload event shall set
64 * a positive value on the status->policyload,
65 * although it may not be 1, but never zero.
66 * So, application can know it was updated.
67 */
68 status->policyload = 0;
69 status->deny_unknown = !security_get_allow_unknown();
70 }
71 }
72 result = selinux_status_page;
73 mutex_unlock(&selinux_status_lock);
74
75 return result;
76}
77
78/*
79 * selinux_status_update_setenforce
80 *
81 * It updates status of the current enforcing/permissive mode.
82 */
83void selinux_status_update_setenforce(int enforcing)
84{
85 struct selinux_kernel_status *status;
86
87 mutex_lock(&selinux_status_lock);
88 if (selinux_status_page) {
89 status = page_address(selinux_status_page);
90
91 status->sequence++;
92 smp_wmb();
93
94 status->enforcing = enforcing;
95
96 smp_wmb();
97 status->sequence++;
98 }
99 mutex_unlock(&selinux_status_lock);
100}
101
102/*
103 * selinux_status_update_policyload
104 *
105 * It updates status of the times of policy reloaded, and current
106 * setting of deny_unknown.
107 */
108void selinux_status_update_policyload(int seqno)
109{
110 struct selinux_kernel_status *status;
111
112 mutex_lock(&selinux_status_lock);
113 if (selinux_status_page) {
114 status = page_address(selinux_status_page);
115
116 status->sequence++;
117 smp_wmb();
118
119 status->policyload = seqno;
120 status->deny_unknown = !security_get_allow_unknown();
121
122 smp_wmb();
123 status->sequence++;
124 }
125 mutex_unlock(&selinux_status_lock);
126}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index c448d57ae2b7..bc39f4067af6 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1281,12 +1281,11 @@ static int smack_task_getioprio(struct task_struct *p)
1281 * 1281 *
1282 * Return 0 if read access is permitted 1282 * Return 0 if read access is permitted
1283 */ 1283 */
1284static int smack_task_setscheduler(struct task_struct *p, int policy, 1284static int smack_task_setscheduler(struct task_struct *p)
1285 struct sched_param *lp)
1286{ 1285{
1287 int rc; 1286 int rc;
1288 1287
1289 rc = cap_task_setscheduler(p, policy, lp); 1288 rc = cap_task_setscheduler(p);
1290 if (rc == 0) 1289 if (rc == 0)
1291 rc = smk_curacc_on_task(p, MAY_WRITE); 1290 rc = smk_curacc_on_task(p, MAY_WRITE);
1292 return rc; 1291 return rc;
@@ -3005,7 +3004,8 @@ static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
3005{ 3004{
3006 char *sp = smack_from_secid(secid); 3005 char *sp = smack_from_secid(secid);
3007 3006
3008 *secdata = sp; 3007 if (secdata)
3008 *secdata = sp;
3009 *seclen = strlen(sp); 3009 *seclen = strlen(sp);
3010 return 0; 3010 return 0;
3011} 3011}
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index c668b447c725..7556315c1978 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -768,8 +768,10 @@ static bool tomoyo_select_one(struct tomoyo_io_buffer *head, const char *data)
768 return true; /* Do nothing if open(O_WRONLY). */ 768 return true; /* Do nothing if open(O_WRONLY). */
769 memset(&head->r, 0, sizeof(head->r)); 769 memset(&head->r, 0, sizeof(head->r));
770 head->r.print_this_domain_only = true; 770 head->r.print_this_domain_only = true;
771 head->r.eof = !domain; 771 if (domain)
772 head->r.domain = &domain->list; 772 head->r.domain = &domain->list;
773 else
774 head->r.eof = 1;
773 tomoyo_io_printf(head, "# select %s\n", data); 775 tomoyo_io_printf(head, "# select %s\n", data);
774 if (domain && domain->is_deleted) 776 if (domain && domain->is_deleted)
775 tomoyo_io_printf(head, "# This is a deleted domain.\n"); 777 tomoyo_io_printf(head, "# This is a deleted domain.\n");
@@ -2051,13 +2053,22 @@ void tomoyo_check_profile(void)
2051 const u8 profile = domain->profile; 2053 const u8 profile = domain->profile;
2052 if (tomoyo_profile_ptr[profile]) 2054 if (tomoyo_profile_ptr[profile])
2053 continue; 2055 continue;
2056 printk(KERN_ERR "You need to define profile %u before using it.\n",
2057 profile);
2058 printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.3/ "
2059 "for more information.\n");
2054 panic("Profile %u (used by '%s') not defined.\n", 2060 panic("Profile %u (used by '%s') not defined.\n",
2055 profile, domain->domainname->name); 2061 profile, domain->domainname->name);
2056 } 2062 }
2057 tomoyo_read_unlock(idx); 2063 tomoyo_read_unlock(idx);
2058 if (tomoyo_profile_version != 20090903) 2064 if (tomoyo_profile_version != 20090903) {
2065 printk(KERN_ERR "You need to install userland programs for "
2066 "TOMOYO 2.3 and initialize policy configuration.\n");
2067 printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.3/ "
2068 "for more information.\n");
2059 panic("Profile version %u is not supported.\n", 2069 panic("Profile version %u is not supported.\n",
2060 tomoyo_profile_version); 2070 tomoyo_profile_version);
2071 }
2061 printk(KERN_INFO "TOMOYO: 2.3.0\n"); 2072 printk(KERN_INFO "TOMOYO: 2.3.0\n");
2062 printk(KERN_INFO "Mandatory Access Control activated.\n"); 2073 printk(KERN_INFO "Mandatory Access Control activated.\n");
2063} 2074}
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index a7868ad4d530..cbbed0db9e56 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -535,13 +535,15 @@ static int snd_rawmidi_release(struct inode *inode, struct file *file)
535{ 535{
536 struct snd_rawmidi_file *rfile; 536 struct snd_rawmidi_file *rfile;
537 struct snd_rawmidi *rmidi; 537 struct snd_rawmidi *rmidi;
538 struct module *module;
538 539
539 rfile = file->private_data; 540 rfile = file->private_data;
540 rmidi = rfile->rmidi; 541 rmidi = rfile->rmidi;
541 rawmidi_release_priv(rfile); 542 rawmidi_release_priv(rfile);
542 kfree(rfile); 543 kfree(rfile);
544 module = rmidi->card->module;
543 snd_card_file_remove(rmidi->card, file); 545 snd_card_file_remove(rmidi->card, file);
544 module_put(rmidi->card->module); 546 module_put(module);
545 return 0; 547 return 0;
546} 548}
547 549
diff --git a/sound/oss/soundcard.c b/sound/oss/soundcard.c
index 92aa762ffb7e..07f803e6d203 100644
--- a/sound/oss/soundcard.c
+++ b/sound/oss/soundcard.c
@@ -391,11 +391,11 @@ static long sound_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
391 case SND_DEV_DSP: 391 case SND_DEV_DSP:
392 case SND_DEV_DSP16: 392 case SND_DEV_DSP16:
393 case SND_DEV_AUDIO: 393 case SND_DEV_AUDIO:
394 return audio_ioctl(dev, file, cmd, p); 394 ret = audio_ioctl(dev, file, cmd, p);
395 break; 395 break;
396 396
397 case SND_DEV_MIDIN: 397 case SND_DEV_MIDIN:
398 return MIDIbuf_ioctl(dev, file, cmd, p); 398 ret = MIDIbuf_ioctl(dev, file, cmd, p);
399 break; 399 break;
400 400
401 } 401 }
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 95148e58026c..c16c5ba0fda0 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1747,6 +1747,8 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = {
1747 "HP dv6", STAC_HP_DV5), 1747 "HP dv6", STAC_HP_DV5),
1748 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061, 1748 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061,
1749 "HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */ 1749 "HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */
1750 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x363e,
1751 "HP DV6", STAC_HP_DV5),
1750 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010, 1752 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010,
1751 "HP", STAC_HP_DV5), 1753 "HP", STAC_HP_DV5),
1752 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233, 1754 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233,
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index ef7aa0a0c526..95aaf565c704 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -73,6 +73,18 @@ void get_term_dimensions(struct winsize *ws);
73#define cpu_relax() asm volatile("":::"memory") 73#define cpu_relax() asm volatile("":::"memory")
74#endif 74#endif
75 75
76#ifdef __mips__
77#include "../../arch/mips/include/asm/unistd.h"
78#define rmb() asm volatile( \
79 ".set mips2\n\t" \
80 "sync\n\t" \
81 ".set mips0" \
82 : /* no output */ \
83 : /* no input */ \
84 : "memory")
85#define cpu_relax() asm volatile("" ::: "memory")
86#endif
87
76#include <time.h> 88#include <time.h>
77#include <unistd.h> 89#include <unistd.h>
78#include <sys/types.h> 90#include <sys/types.h>